From bad5df60cd53f774b59edd997ec3ff30fba67cac Mon Sep 17 00:00:00 2001 From: Akira Kakuto Date: Thu, 18 Feb 2016 15:19:40 +0000 Subject: libs/luajit: New convention git-svn-id: svn://tug.org/texlive/trunk@39765 c570f23f-e606-0410-a88d-b1316a301751 --- Build/source/libs/luajit/ChangeLog | 4 + .../luajit/LuaJIT-2.1.0-beta1-PATCHES/ChangeLog | 61 - .../LuaJIT-2.1.0-beta1-PATCHES/patch-01-LuaJITTeX | 175 - .../LuaJIT-2.1.0-beta1-PATCHES/patch-05-LuaJITTeX | 108 - .../LuaJIT-2.1.0-beta1-PATCHES/patch-06-ppc-darwin | 24 - .../patch-07-x86_64-cygwin | 59 - .../libs/luajit/LuaJIT-2.1.0-beta1/COPYRIGHT | 56 - .../source/libs/luajit/LuaJIT-2.1.0-beta1/Makefile | 159 - Build/source/libs/luajit/LuaJIT-2.1.0-beta1/README | 16 - .../LuaJIT-2.1.0-beta1/doc/bluequad-print.css | 166 - .../luajit/LuaJIT-2.1.0-beta1/doc/bluequad.css | 325 - .../luajit/LuaJIT-2.1.0-beta1/doc/changes.html | 804 -- .../luajit/LuaJIT-2.1.0-beta1/doc/contact.html | 104 - .../luajit/LuaJIT-2.1.0-beta1/doc/ext_c_api.html | 189 - .../luajit/LuaJIT-2.1.0-beta1/doc/ext_ffi.html | 332 - .../luajit/LuaJIT-2.1.0-beta1/doc/ext_ffi_api.html | 570 -- .../LuaJIT-2.1.0-beta1/doc/ext_ffi_semantics.html | 1261 ---- .../LuaJIT-2.1.0-beta1/doc/ext_ffi_tutorial.html | 603 -- .../luajit/LuaJIT-2.1.0-beta1/doc/ext_jit.html | 201 - .../LuaJIT-2.1.0-beta1/doc/ext_profiler.html | 365 - .../luajit/LuaJIT-2.1.0-beta1/doc/extensions.html | 448 -- .../libs/luajit/LuaJIT-2.1.0-beta1/doc/faq.html | 186 - .../luajit/LuaJIT-2.1.0-beta1/doc/img/contact.png | Bin 1340 -> 0 bytes .../luajit/LuaJIT-2.1.0-beta1/doc/install.html | 659 -- .../libs/luajit/LuaJIT-2.1.0-beta1/doc/luajit.html | 236 - .../luajit/LuaJIT-2.1.0-beta1/doc/running.html | 309 - .../libs/luajit/LuaJIT-2.1.0-beta1/doc/status.html | 118 - .../luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_arm.h | 456 -- .../luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_arm.lua | 1125 --- .../luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_arm64.h | 518 -- .../LuaJIT-2.1.0-beta1/dynasm/dasm_arm64.lua | 1166 --- .../luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_mips.h | 416 -- .../luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_mips.lua | 953 --- .../luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_ppc.h | 419 -- .../luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_ppc.lua | 1919 ----- .../luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_proto.h | 83 - .../luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_x64.lua | 12 - .../luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_x86.h | 471 -- .../luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_x86.lua | 1945 ----- .../luajit/LuaJIT-2.1.0-beta1/dynasm/dynasm.lua | 1094 --- .../libs/luajit/LuaJIT-2.1.0-beta1/etc/luajit.1 | 88 - .../libs/luajit/LuaJIT-2.1.0-beta1/etc/luajit.pc | 25 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/Makefile | 697 -- .../luajit/LuaJIT-2.1.0-beta1/src/Makefile.dep | 245 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/host/README | 4 - .../luajit/LuaJIT-2.1.0-beta1/src/host/buildvm.c | 518 -- .../luajit/LuaJIT-2.1.0-beta1/src/host/buildvm.h | 105 - .../LuaJIT-2.1.0-beta1/src/host/buildvm_asm.c | 345 - .../LuaJIT-2.1.0-beta1/src/host/buildvm_fold.c | 229 - .../LuaJIT-2.1.0-beta1/src/host/buildvm_lib.c | 457 -- .../LuaJIT-2.1.0-beta1/src/host/buildvm_libbc.h | 45 - .../LuaJIT-2.1.0-beta1/src/host/buildvm_peobj.c | 368 - .../LuaJIT-2.1.0-beta1/src/host/genlibbc.lua | 197 - .../LuaJIT-2.1.0-beta1/src/host/genminilua.lua | 428 -- .../luajit/LuaJIT-2.1.0-beta1/src/host/minilua.c | 7770 -------------------- .../libs/luajit/LuaJIT-2.1.0-beta1/src/jit/bc.lua | 190 - .../luajit/LuaJIT-2.1.0-beta1/src/jit/bcsave.lua | 661 -- .../luajit/LuaJIT-2.1.0-beta1/src/jit/dis_arm.lua | 689 -- .../luajit/LuaJIT-2.1.0-beta1/src/jit/dis_mips.lua | 428 -- .../LuaJIT-2.1.0-beta1/src/jit/dis_mipsel.lua | 17 - .../luajit/LuaJIT-2.1.0-beta1/src/jit/dis_ppc.lua | 591 -- .../luajit/LuaJIT-2.1.0-beta1/src/jit/dis_x64.lua | 17 - .../luajit/LuaJIT-2.1.0-beta1/src/jit/dis_x86.lua | 838 --- .../luajit/LuaJIT-2.1.0-beta1/src/jit/dump.lua | 706 -- .../libs/luajit/LuaJIT-2.1.0-beta1/src/jit/p.lua | 310 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/jit/v.lua | 170 - .../luajit/LuaJIT-2.1.0-beta1/src/jit/zone.lua | 45 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lauxlib.h | 193 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lib_aux.c | 356 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lib_base.c | 664 -- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lib_bit.c | 180 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lib_debug.c | 405 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lib_ffi.c | 872 --- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lib_init.c | 56 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lib_io.c | 541 -- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lib_jit.c | 767 -- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lib_math.c | 230 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lib_os.c | 292 - .../luajit/LuaJIT-2.1.0-beta1/src/lib_package.c | 648 -- .../luajit/LuaJIT-2.1.0-beta1/src/lib_string.c | 752 -- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lib_table.c | 307 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj.supp | 26 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_alloc.c | 1398 ---- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_alloc.h | 17 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_api.c | 1210 --- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_arch.h | 513 -- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm.c | 2278 ------ .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm.h | 17 - .../luajit/LuaJIT-2.1.0-beta1/src/lj_asm_arm.h | 2217 ------ .../luajit/LuaJIT-2.1.0-beta1/src/lj_asm_mips.h | 1833 ----- .../luajit/LuaJIT-2.1.0-beta1/src/lj_asm_ppc.h | 2015 ----- .../luajit/LuaJIT-2.1.0-beta1/src/lj_asm_x86.h | 2634 ------- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bc.c | 14 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bc.h | 265 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bcdump.h | 68 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bcread.c | 457 -- .../luajit/LuaJIT-2.1.0-beta1/src/lj_bcwrite.c | 361 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_buf.c | 234 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_buf.h | 105 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_carith.c | 429 -- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_carith.h | 37 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ccall.c | 984 --- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ccall.h | 178 - .../luajit/LuaJIT-2.1.0-beta1/src/lj_ccallback.c | 712 -- .../luajit/LuaJIT-2.1.0-beta1/src/lj_ccallback.h | 25 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cconv.c | 752 -- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cconv.h | 70 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cdata.c | 288 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cdata.h | 76 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_char.c | 43 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_char.h | 42 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_clib.c | 418 -- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_clib.h | 29 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cparse.c | 1862 ----- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cparse.h | 65 - .../luajit/LuaJIT-2.1.0-beta1/src/lj_crecord.c | 1834 ----- .../luajit/LuaJIT-2.1.0-beta1/src/lj_crecord.h | 38 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ctype.c | 635 -- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ctype.h | 461 -- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_debug.c | 699 -- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_debug.h | 65 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_def.h | 365 - .../luajit/LuaJIT-2.1.0-beta1/src/lj_dispatch.c | 557 -- .../luajit/LuaJIT-2.1.0-beta1/src/lj_dispatch.h | 138 - .../luajit/LuaJIT-2.1.0-beta1/src/lj_emit_arm.h | 356 - .../luajit/LuaJIT-2.1.0-beta1/src/lj_emit_mips.h | 211 - .../luajit/LuaJIT-2.1.0-beta1/src/lj_emit_ppc.h | 238 - .../luajit/LuaJIT-2.1.0-beta1/src/lj_emit_x86.h | 462 -- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_err.c | 796 -- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_err.h | 41 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_errmsg.h | 190 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ff.h | 18 - .../luajit/LuaJIT-2.1.0-beta1/src/lj_ffrecord.c | 1247 ---- .../luajit/LuaJIT-2.1.0-beta1/src/lj_ffrecord.h | 24 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_frame.h | 259 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_func.c | 185 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_func.h | 24 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_gc.c | 845 --- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_gc.h | 134 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_gdbjit.c | 787 -- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_gdbjit.h | 22 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ir.c | 505 -- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ir.h | 577 -- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ircall.h | 321 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_iropt.h | 161 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_jit.h | 440 -- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_lex.c | 482 -- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_lex.h | 86 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_lib.c | 303 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_lib.h | 115 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_load.c | 168 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_mcode.c | 386 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_mcode.h | 30 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_meta.c | 477 -- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_meta.h | 38 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_obj.c | 50 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_obj.h | 976 --- .../luajit/LuaJIT-2.1.0-beta1/src/lj_opt_dce.c | 78 - .../luajit/LuaJIT-2.1.0-beta1/src/lj_opt_fold.c | 2488 ------- .../luajit/LuaJIT-2.1.0-beta1/src/lj_opt_loop.c | 449 -- .../luajit/LuaJIT-2.1.0-beta1/src/lj_opt_mem.c | 935 --- .../luajit/LuaJIT-2.1.0-beta1/src/lj_opt_narrow.c | 652 -- .../luajit/LuaJIT-2.1.0-beta1/src/lj_opt_sink.c | 245 - .../luajit/LuaJIT-2.1.0-beta1/src/lj_opt_split.c | 856 --- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_parse.c | 2725 ------- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_parse.h | 18 - .../luajit/LuaJIT-2.1.0-beta1/src/lj_profile.c | 368 - .../luajit/LuaJIT-2.1.0-beta1/src/lj_profile.h | 21 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_record.c | 2554 ------- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_record.h | 45 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_snap.c | 870 --- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_snap.h | 34 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_state.c | 300 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_state.h | 35 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_str.c | 224 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_str.h | 27 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_strfmt.c | 554 -- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_strfmt.h | 125 - .../luajit/LuaJIT-2.1.0-beta1/src/lj_strscan.c | 547 -- .../luajit/LuaJIT-2.1.0-beta1/src/lj_strscan.h | 39 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_tab.c | 666 -- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_tab.h | 73 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target.h | 164 - .../luajit/LuaJIT-2.1.0-beta1/src/lj_target_arm.h | 270 - .../LuaJIT-2.1.0-beta1/src/lj_target_arm64.h | 97 - .../luajit/LuaJIT-2.1.0-beta1/src/lj_target_mips.h | 260 - .../luajit/LuaJIT-2.1.0-beta1/src/lj_target_ppc.h | 280 - .../luajit/LuaJIT-2.1.0-beta1/src/lj_target_x86.h | 345 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_trace.c | 859 --- .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_trace.h | 54 - .../luajit/LuaJIT-2.1.0-beta1/src/lj_traceerr.h | 63 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_udata.c | 34 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_udata.h | 14 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_vm.h | 114 - .../luajit/LuaJIT-2.1.0-beta1/src/lj_vmevent.c | 58 - .../luajit/LuaJIT-2.1.0-beta1/src/lj_vmevent.h | 59 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lj_vmmath.c | 152 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/ljamalg.c | 96 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lua.h | 406 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lua.hpp | 9 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/luaconf.h | 156 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/luajit.c | 570 -- .../libs/luajit/LuaJIT-2.1.0-beta1/src/luajit.h | 79 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/lualib.h | 47 - .../luajit/LuaJIT-2.1.0-beta1/src/msvcbuild.bat | 114 - .../luajit/LuaJIT-2.1.0-beta1/src/ps4build.bat | 103 - .../luajit/LuaJIT-2.1.0-beta1/src/psvitabuild.bat | 93 - .../libs/luajit/LuaJIT-2.1.0-beta1/src/vm_arm.dasc | 4585 ------------ .../luajit/LuaJIT-2.1.0-beta1/src/vm_arm64.dasc | 3764 ---------- .../luajit/LuaJIT-2.1.0-beta1/src/vm_mips.dasc | 4355 ----------- .../libs/luajit/LuaJIT-2.1.0-beta1/src/vm_ppc.dasc | 5252 ------------- .../libs/luajit/LuaJIT-2.1.0-beta1/src/vm_x64.dasc | 4902 ------------ .../libs/luajit/LuaJIT-2.1.0-beta1/src/vm_x86.dasc | 5710 -------------- .../luajit/LuaJIT-2.1.0-beta1/src/xb1build.bat | 101 - .../luajit/LuaJIT-2.1.0-beta1/src/xedkbuild.bat | 92 - Build/source/libs/luajit/LuaJIT-PATCHES/ChangeLog | 61 + .../libs/luajit/LuaJIT-PATCHES/patch-01-LuaJITTeX | 175 + .../libs/luajit/LuaJIT-PATCHES/patch-05-LuaJITTeX | 108 + .../libs/luajit/LuaJIT-PATCHES/patch-06-ppc-darwin | 24 + .../luajit/LuaJIT-PATCHES/patch-07-x86_64-cygwin | 59 + Build/source/libs/luajit/LuaJIT-src/COPYRIGHT | 56 + Build/source/libs/luajit/LuaJIT-src/Makefile | 159 + Build/source/libs/luajit/LuaJIT-src/README | 16 + .../libs/luajit/LuaJIT-src/doc/bluequad-print.css | 166 + .../source/libs/luajit/LuaJIT-src/doc/bluequad.css | 325 + .../source/libs/luajit/LuaJIT-src/doc/changes.html | 804 ++ .../source/libs/luajit/LuaJIT-src/doc/contact.html | 104 + .../libs/luajit/LuaJIT-src/doc/ext_c_api.html | 189 + .../source/libs/luajit/LuaJIT-src/doc/ext_ffi.html | 332 + .../libs/luajit/LuaJIT-src/doc/ext_ffi_api.html | 570 ++ .../luajit/LuaJIT-src/doc/ext_ffi_semantics.html | 1261 ++++ .../luajit/LuaJIT-src/doc/ext_ffi_tutorial.html | 603 ++ .../source/libs/luajit/LuaJIT-src/doc/ext_jit.html | 201 + .../libs/luajit/LuaJIT-src/doc/ext_profiler.html | 365 + .../libs/luajit/LuaJIT-src/doc/extensions.html | 448 ++ Build/source/libs/luajit/LuaJIT-src/doc/faq.html | 186 + .../libs/luajit/LuaJIT-src/doc/img/contact.png | Bin 0 -> 1340 bytes .../source/libs/luajit/LuaJIT-src/doc/install.html | 659 ++ .../source/libs/luajit/LuaJIT-src/doc/luajit.html | 236 + .../source/libs/luajit/LuaJIT-src/doc/running.html | 309 + .../source/libs/luajit/LuaJIT-src/doc/status.html | 118 + .../libs/luajit/LuaJIT-src/dynasm/dasm_arm.h | 456 ++ .../libs/luajit/LuaJIT-src/dynasm/dasm_arm.lua | 1125 +++ .../libs/luajit/LuaJIT-src/dynasm/dasm_arm64.h | 518 ++ .../libs/luajit/LuaJIT-src/dynasm/dasm_arm64.lua | 1166 +++ .../libs/luajit/LuaJIT-src/dynasm/dasm_mips.h | 416 ++ .../libs/luajit/LuaJIT-src/dynasm/dasm_mips.lua | 953 +++ .../libs/luajit/LuaJIT-src/dynasm/dasm_ppc.h | 419 ++ .../libs/luajit/LuaJIT-src/dynasm/dasm_ppc.lua | 1919 +++++ .../libs/luajit/LuaJIT-src/dynasm/dasm_proto.h | 83 + .../libs/luajit/LuaJIT-src/dynasm/dasm_x64.lua | 12 + .../libs/luajit/LuaJIT-src/dynasm/dasm_x86.h | 471 ++ .../libs/luajit/LuaJIT-src/dynasm/dasm_x86.lua | 1945 +++++ .../libs/luajit/LuaJIT-src/dynasm/dynasm.lua | 1094 +++ Build/source/libs/luajit/LuaJIT-src/etc/luajit.1 | 88 + Build/source/libs/luajit/LuaJIT-src/etc/luajit.pc | 25 + Build/source/libs/luajit/LuaJIT-src/src/Makefile | 697 ++ .../source/libs/luajit/LuaJIT-src/src/Makefile.dep | 245 + .../source/libs/luajit/LuaJIT-src/src/host/README | 4 + .../libs/luajit/LuaJIT-src/src/host/buildvm.c | 518 ++ .../libs/luajit/LuaJIT-src/src/host/buildvm.h | 105 + .../libs/luajit/LuaJIT-src/src/host/buildvm_asm.c | 345 + .../libs/luajit/LuaJIT-src/src/host/buildvm_fold.c | 229 + .../libs/luajit/LuaJIT-src/src/host/buildvm_lib.c | 457 ++ .../luajit/LuaJIT-src/src/host/buildvm_libbc.h | 45 + .../luajit/LuaJIT-src/src/host/buildvm_peobj.c | 368 + .../libs/luajit/LuaJIT-src/src/host/genlibbc.lua | 197 + .../libs/luajit/LuaJIT-src/src/host/genminilua.lua | 428 ++ .../libs/luajit/LuaJIT-src/src/host/minilua.c | 7770 ++++++++++++++++++++ Build/source/libs/luajit/LuaJIT-src/src/jit/bc.lua | 190 + .../libs/luajit/LuaJIT-src/src/jit/bcsave.lua | 661 ++ .../libs/luajit/LuaJIT-src/src/jit/dis_arm.lua | 689 ++ .../libs/luajit/LuaJIT-src/src/jit/dis_mips.lua | 428 ++ .../libs/luajit/LuaJIT-src/src/jit/dis_mipsel.lua | 17 + .../libs/luajit/LuaJIT-src/src/jit/dis_ppc.lua | 591 ++ .../libs/luajit/LuaJIT-src/src/jit/dis_x64.lua | 17 + .../libs/luajit/LuaJIT-src/src/jit/dis_x86.lua | 838 +++ .../source/libs/luajit/LuaJIT-src/src/jit/dump.lua | 706 ++ Build/source/libs/luajit/LuaJIT-src/src/jit/p.lua | 310 + Build/source/libs/luajit/LuaJIT-src/src/jit/v.lua | 170 + .../source/libs/luajit/LuaJIT-src/src/jit/zone.lua | 45 + Build/source/libs/luajit/LuaJIT-src/src/lauxlib.h | 193 + Build/source/libs/luajit/LuaJIT-src/src/lib_aux.c | 356 + Build/source/libs/luajit/LuaJIT-src/src/lib_base.c | 664 ++ Build/source/libs/luajit/LuaJIT-src/src/lib_bit.c | 180 + .../source/libs/luajit/LuaJIT-src/src/lib_debug.c | 405 + Build/source/libs/luajit/LuaJIT-src/src/lib_ffi.c | 872 +++ Build/source/libs/luajit/LuaJIT-src/src/lib_init.c | 56 + Build/source/libs/luajit/LuaJIT-src/src/lib_io.c | 541 ++ Build/source/libs/luajit/LuaJIT-src/src/lib_jit.c | 767 ++ Build/source/libs/luajit/LuaJIT-src/src/lib_math.c | 230 + Build/source/libs/luajit/LuaJIT-src/src/lib_os.c | 292 + .../libs/luajit/LuaJIT-src/src/lib_package.c | 648 ++ .../source/libs/luajit/LuaJIT-src/src/lib_string.c | 752 ++ .../source/libs/luajit/LuaJIT-src/src/lib_table.c | 307 + Build/source/libs/luajit/LuaJIT-src/src/lj.supp | 26 + Build/source/libs/luajit/LuaJIT-src/src/lj_alloc.c | 1398 ++++ Build/source/libs/luajit/LuaJIT-src/src/lj_alloc.h | 17 + Build/source/libs/luajit/LuaJIT-src/src/lj_api.c | 1210 +++ Build/source/libs/luajit/LuaJIT-src/src/lj_arch.h | 513 ++ Build/source/libs/luajit/LuaJIT-src/src/lj_asm.c | 2278 ++++++ Build/source/libs/luajit/LuaJIT-src/src/lj_asm.h | 17 + .../source/libs/luajit/LuaJIT-src/src/lj_asm_arm.h | 2217 ++++++ .../libs/luajit/LuaJIT-src/src/lj_asm_mips.h | 1833 +++++ .../source/libs/luajit/LuaJIT-src/src/lj_asm_ppc.h | 2015 +++++ .../source/libs/luajit/LuaJIT-src/src/lj_asm_x86.h | 2634 +++++++ Build/source/libs/luajit/LuaJIT-src/src/lj_bc.c | 14 + Build/source/libs/luajit/LuaJIT-src/src/lj_bc.h | 265 + .../source/libs/luajit/LuaJIT-src/src/lj_bcdump.h | 68 + .../source/libs/luajit/LuaJIT-src/src/lj_bcread.c | 457 ++ .../source/libs/luajit/LuaJIT-src/src/lj_bcwrite.c | 361 + Build/source/libs/luajit/LuaJIT-src/src/lj_buf.c | 234 + Build/source/libs/luajit/LuaJIT-src/src/lj_buf.h | 105 + .../source/libs/luajit/LuaJIT-src/src/lj_carith.c | 429 ++ .../source/libs/luajit/LuaJIT-src/src/lj_carith.h | 37 + Build/source/libs/luajit/LuaJIT-src/src/lj_ccall.c | 984 +++ Build/source/libs/luajit/LuaJIT-src/src/lj_ccall.h | 178 + .../libs/luajit/LuaJIT-src/src/lj_ccallback.c | 712 ++ .../libs/luajit/LuaJIT-src/src/lj_ccallback.h | 25 + Build/source/libs/luajit/LuaJIT-src/src/lj_cconv.c | 752 ++ Build/source/libs/luajit/LuaJIT-src/src/lj_cconv.h | 70 + Build/source/libs/luajit/LuaJIT-src/src/lj_cdata.c | 288 + Build/source/libs/luajit/LuaJIT-src/src/lj_cdata.h | 76 + Build/source/libs/luajit/LuaJIT-src/src/lj_char.c | 43 + Build/source/libs/luajit/LuaJIT-src/src/lj_char.h | 42 + Build/source/libs/luajit/LuaJIT-src/src/lj_clib.c | 418 ++ Build/source/libs/luajit/LuaJIT-src/src/lj_clib.h | 29 + .../source/libs/luajit/LuaJIT-src/src/lj_cparse.c | 1862 +++++ .../source/libs/luajit/LuaJIT-src/src/lj_cparse.h | 65 + .../source/libs/luajit/LuaJIT-src/src/lj_crecord.c | 1834 +++++ .../source/libs/luajit/LuaJIT-src/src/lj_crecord.h | 38 + Build/source/libs/luajit/LuaJIT-src/src/lj_ctype.c | 635 ++ Build/source/libs/luajit/LuaJIT-src/src/lj_ctype.h | 461 ++ Build/source/libs/luajit/LuaJIT-src/src/lj_debug.c | 699 ++ Build/source/libs/luajit/LuaJIT-src/src/lj_debug.h | 65 + Build/source/libs/luajit/LuaJIT-src/src/lj_def.h | 365 + .../libs/luajit/LuaJIT-src/src/lj_dispatch.c | 557 ++ .../libs/luajit/LuaJIT-src/src/lj_dispatch.h | 138 + .../libs/luajit/LuaJIT-src/src/lj_emit_arm.h | 356 + .../libs/luajit/LuaJIT-src/src/lj_emit_mips.h | 211 + .../libs/luajit/LuaJIT-src/src/lj_emit_ppc.h | 238 + .../libs/luajit/LuaJIT-src/src/lj_emit_x86.h | 462 ++ Build/source/libs/luajit/LuaJIT-src/src/lj_err.c | 796 ++ Build/source/libs/luajit/LuaJIT-src/src/lj_err.h | 41 + .../source/libs/luajit/LuaJIT-src/src/lj_errmsg.h | 190 + Build/source/libs/luajit/LuaJIT-src/src/lj_ff.h | 18 + .../libs/luajit/LuaJIT-src/src/lj_ffrecord.c | 1247 ++++ .../libs/luajit/LuaJIT-src/src/lj_ffrecord.h | 24 + Build/source/libs/luajit/LuaJIT-src/src/lj_frame.h | 259 + Build/source/libs/luajit/LuaJIT-src/src/lj_func.c | 185 + Build/source/libs/luajit/LuaJIT-src/src/lj_func.h | 24 + Build/source/libs/luajit/LuaJIT-src/src/lj_gc.c | 845 +++ Build/source/libs/luajit/LuaJIT-src/src/lj_gc.h | 134 + .../source/libs/luajit/LuaJIT-src/src/lj_gdbjit.c | 787 ++ .../source/libs/luajit/LuaJIT-src/src/lj_gdbjit.h | 22 + Build/source/libs/luajit/LuaJIT-src/src/lj_ir.c | 505 ++ Build/source/libs/luajit/LuaJIT-src/src/lj_ir.h | 577 ++ .../source/libs/luajit/LuaJIT-src/src/lj_ircall.h | 321 + Build/source/libs/luajit/LuaJIT-src/src/lj_iropt.h | 161 + Build/source/libs/luajit/LuaJIT-src/src/lj_jit.h | 440 ++ Build/source/libs/luajit/LuaJIT-src/src/lj_lex.c | 482 ++ Build/source/libs/luajit/LuaJIT-src/src/lj_lex.h | 86 + Build/source/libs/luajit/LuaJIT-src/src/lj_lib.c | 303 + Build/source/libs/luajit/LuaJIT-src/src/lj_lib.h | 115 + Build/source/libs/luajit/LuaJIT-src/src/lj_load.c | 168 + Build/source/libs/luajit/LuaJIT-src/src/lj_mcode.c | 386 + Build/source/libs/luajit/LuaJIT-src/src/lj_mcode.h | 30 + Build/source/libs/luajit/LuaJIT-src/src/lj_meta.c | 477 ++ Build/source/libs/luajit/LuaJIT-src/src/lj_meta.h | 38 + Build/source/libs/luajit/LuaJIT-src/src/lj_obj.c | 50 + Build/source/libs/luajit/LuaJIT-src/src/lj_obj.h | 976 +++ .../source/libs/luajit/LuaJIT-src/src/lj_opt_dce.c | 78 + .../libs/luajit/LuaJIT-src/src/lj_opt_fold.c | 2488 +++++++ .../libs/luajit/LuaJIT-src/src/lj_opt_loop.c | 449 ++ .../source/libs/luajit/LuaJIT-src/src/lj_opt_mem.c | 935 +++ .../libs/luajit/LuaJIT-src/src/lj_opt_narrow.c | 652 ++ .../libs/luajit/LuaJIT-src/src/lj_opt_sink.c | 245 + .../libs/luajit/LuaJIT-src/src/lj_opt_split.c | 856 +++ Build/source/libs/luajit/LuaJIT-src/src/lj_parse.c | 2725 +++++++ Build/source/libs/luajit/LuaJIT-src/src/lj_parse.h | 18 + .../source/libs/luajit/LuaJIT-src/src/lj_profile.c | 368 + .../source/libs/luajit/LuaJIT-src/src/lj_profile.h | 21 + .../source/libs/luajit/LuaJIT-src/src/lj_record.c | 2554 +++++++ .../source/libs/luajit/LuaJIT-src/src/lj_record.h | 45 + Build/source/libs/luajit/LuaJIT-src/src/lj_snap.c | 870 +++ Build/source/libs/luajit/LuaJIT-src/src/lj_snap.h | 34 + Build/source/libs/luajit/LuaJIT-src/src/lj_state.c | 300 + Build/source/libs/luajit/LuaJIT-src/src/lj_state.h | 35 + Build/source/libs/luajit/LuaJIT-src/src/lj_str.c | 224 + Build/source/libs/luajit/LuaJIT-src/src/lj_str.h | 27 + .../source/libs/luajit/LuaJIT-src/src/lj_strfmt.c | 554 ++ .../source/libs/luajit/LuaJIT-src/src/lj_strfmt.h | 125 + .../source/libs/luajit/LuaJIT-src/src/lj_strscan.c | 547 ++ .../source/libs/luajit/LuaJIT-src/src/lj_strscan.h | 39 + Build/source/libs/luajit/LuaJIT-src/src/lj_tab.c | 666 ++ Build/source/libs/luajit/LuaJIT-src/src/lj_tab.h | 73 + .../source/libs/luajit/LuaJIT-src/src/lj_target.h | 164 + .../libs/luajit/LuaJIT-src/src/lj_target_arm.h | 270 + .../libs/luajit/LuaJIT-src/src/lj_target_arm64.h | 97 + .../libs/luajit/LuaJIT-src/src/lj_target_mips.h | 260 + .../libs/luajit/LuaJIT-src/src/lj_target_ppc.h | 280 + .../libs/luajit/LuaJIT-src/src/lj_target_x86.h | 345 + Build/source/libs/luajit/LuaJIT-src/src/lj_trace.c | 859 +++ Build/source/libs/luajit/LuaJIT-src/src/lj_trace.h | 54 + .../libs/luajit/LuaJIT-src/src/lj_traceerr.h | 63 + Build/source/libs/luajit/LuaJIT-src/src/lj_udata.c | 34 + Build/source/libs/luajit/LuaJIT-src/src/lj_udata.h | 14 + Build/source/libs/luajit/LuaJIT-src/src/lj_vm.h | 114 + .../source/libs/luajit/LuaJIT-src/src/lj_vmevent.c | 58 + .../source/libs/luajit/LuaJIT-src/src/lj_vmevent.h | 59 + .../source/libs/luajit/LuaJIT-src/src/lj_vmmath.c | 152 + Build/source/libs/luajit/LuaJIT-src/src/ljamalg.c | 96 + Build/source/libs/luajit/LuaJIT-src/src/lua.h | 406 + Build/source/libs/luajit/LuaJIT-src/src/lua.hpp | 9 + Build/source/libs/luajit/LuaJIT-src/src/luaconf.h | 156 + Build/source/libs/luajit/LuaJIT-src/src/luajit.c | 570 ++ Build/source/libs/luajit/LuaJIT-src/src/luajit.h | 79 + Build/source/libs/luajit/LuaJIT-src/src/lualib.h | 47 + .../libs/luajit/LuaJIT-src/src/msvcbuild.bat | 114 + .../source/libs/luajit/LuaJIT-src/src/ps4build.bat | 103 + .../libs/luajit/LuaJIT-src/src/psvitabuild.bat | 93 + .../source/libs/luajit/LuaJIT-src/src/vm_arm.dasc | 4585 ++++++++++++ .../libs/luajit/LuaJIT-src/src/vm_arm64.dasc | 3764 ++++++++++ .../source/libs/luajit/LuaJIT-src/src/vm_mips.dasc | 4355 +++++++++++ .../source/libs/luajit/LuaJIT-src/src/vm_ppc.dasc | 5252 +++++++++++++ .../source/libs/luajit/LuaJIT-src/src/vm_x64.dasc | 4902 ++++++++++++ .../source/libs/luajit/LuaJIT-src/src/vm_x86.dasc | 5710 ++++++++++++++ .../source/libs/luajit/LuaJIT-src/src/xb1build.bat | 101 + .../libs/luajit/LuaJIT-src/src/xedkbuild.bat | 92 + Build/source/libs/luajit/Makefile.am | 2 +- Build/source/libs/luajit/Makefile.in | 2 +- Build/source/libs/luajit/configure | 6 +- Build/source/libs/luajit/configure.ac | 4 +- Build/source/libs/luajit/native/configure | 4 +- Build/source/libs/luajit/native/configure.ac | 4 +- 435 files changed, 126175 insertions(+), 126171 deletions(-) delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1-PATCHES/ChangeLog delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1-PATCHES/patch-01-LuaJITTeX delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1-PATCHES/patch-05-LuaJITTeX delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1-PATCHES/patch-06-ppc-darwin delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1-PATCHES/patch-07-x86_64-cygwin delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/COPYRIGHT delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/Makefile delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/README delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/bluequad-print.css delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/bluequad.css delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/changes.html delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/contact.html delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_c_api.html delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_ffi.html delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_ffi_api.html delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_ffi_semantics.html delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_ffi_tutorial.html delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_jit.html delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_profiler.html delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/extensions.html delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/faq.html delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/img/contact.png delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/install.html delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/luajit.html delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/running.html delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/status.html delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_arm.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_arm.lua delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_arm64.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_arm64.lua delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_mips.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_mips.lua delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_ppc.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_ppc.lua delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_proto.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_x64.lua delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_x86.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_x86.lua delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dynasm.lua delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/etc/luajit.1 delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/etc/luajit.pc delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/Makefile delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/Makefile.dep delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/README delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm_asm.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm_fold.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm_lib.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm_libbc.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm_peobj.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/genlibbc.lua delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/genminilua.lua delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/minilua.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/bc.lua delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/bcsave.lua delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_arm.lua delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_mips.lua delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_mipsel.lua delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_ppc.lua delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_x64.lua delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_x86.lua delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dump.lua delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/p.lua delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/v.lua delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/zone.lua delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lauxlib.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_aux.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_base.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_bit.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_debug.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_ffi.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_init.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_io.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_jit.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_math.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_os.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_package.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_string.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_table.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj.supp delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_alloc.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_alloc.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_api.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_arch.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm_arm.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm_mips.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm_ppc.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm_x86.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bc.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bc.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bcdump.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bcread.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bcwrite.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_buf.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_buf.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_carith.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_carith.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ccall.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ccall.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ccallback.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ccallback.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cconv.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cconv.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cdata.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cdata.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_char.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_char.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_clib.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_clib.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cparse.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cparse.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_crecord.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_crecord.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ctype.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ctype.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_debug.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_debug.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_def.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_dispatch.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_dispatch.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_emit_arm.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_emit_mips.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_emit_ppc.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_emit_x86.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_err.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_err.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_errmsg.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ff.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ffrecord.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ffrecord.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_frame.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_func.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_func.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_gc.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_gc.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_gdbjit.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_gdbjit.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ir.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ir.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ircall.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_iropt.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_jit.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_lex.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_lex.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_lib.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_lib.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_load.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_mcode.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_mcode.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_meta.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_meta.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_obj.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_obj.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_dce.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_fold.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_loop.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_mem.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_narrow.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_sink.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_split.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_parse.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_parse.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_profile.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_profile.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_record.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_record.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_snap.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_snap.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_state.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_state.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_str.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_str.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_strfmt.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_strfmt.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_strscan.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_strscan.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_tab.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_tab.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target_arm.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target_arm64.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target_mips.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target_ppc.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target_x86.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_trace.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_trace.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_traceerr.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_udata.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_udata.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_vm.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_vmevent.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_vmevent.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_vmmath.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/ljamalg.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lua.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lua.hpp delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/luaconf.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/luajit.c delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/luajit.h delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lualib.h delete mode 100755 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/msvcbuild.bat delete mode 100755 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/ps4build.bat delete mode 100755 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/psvitabuild.bat delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_arm.dasc delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_arm64.dasc delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_mips.dasc delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_ppc.dasc delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_x64.dasc delete mode 100644 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_x86.dasc delete mode 100755 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/xb1build.bat delete mode 100755 Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/xedkbuild.bat create mode 100644 Build/source/libs/luajit/LuaJIT-PATCHES/ChangeLog create mode 100644 Build/source/libs/luajit/LuaJIT-PATCHES/patch-01-LuaJITTeX create mode 100644 Build/source/libs/luajit/LuaJIT-PATCHES/patch-05-LuaJITTeX create mode 100644 Build/source/libs/luajit/LuaJIT-PATCHES/patch-06-ppc-darwin create mode 100644 Build/source/libs/luajit/LuaJIT-PATCHES/patch-07-x86_64-cygwin create mode 100644 Build/source/libs/luajit/LuaJIT-src/COPYRIGHT create mode 100644 Build/source/libs/luajit/LuaJIT-src/Makefile create mode 100644 Build/source/libs/luajit/LuaJIT-src/README create mode 100644 Build/source/libs/luajit/LuaJIT-src/doc/bluequad-print.css create mode 100644 Build/source/libs/luajit/LuaJIT-src/doc/bluequad.css create mode 100644 Build/source/libs/luajit/LuaJIT-src/doc/changes.html create mode 100644 Build/source/libs/luajit/LuaJIT-src/doc/contact.html create mode 100644 Build/source/libs/luajit/LuaJIT-src/doc/ext_c_api.html create mode 100644 Build/source/libs/luajit/LuaJIT-src/doc/ext_ffi.html create mode 100644 Build/source/libs/luajit/LuaJIT-src/doc/ext_ffi_api.html create mode 100644 Build/source/libs/luajit/LuaJIT-src/doc/ext_ffi_semantics.html create mode 100644 Build/source/libs/luajit/LuaJIT-src/doc/ext_ffi_tutorial.html create mode 100644 Build/source/libs/luajit/LuaJIT-src/doc/ext_jit.html create mode 100644 Build/source/libs/luajit/LuaJIT-src/doc/ext_profiler.html create mode 100644 Build/source/libs/luajit/LuaJIT-src/doc/extensions.html create mode 100644 Build/source/libs/luajit/LuaJIT-src/doc/faq.html create mode 100644 Build/source/libs/luajit/LuaJIT-src/doc/img/contact.png create mode 100644 Build/source/libs/luajit/LuaJIT-src/doc/install.html create mode 100644 Build/source/libs/luajit/LuaJIT-src/doc/luajit.html create mode 100644 Build/source/libs/luajit/LuaJIT-src/doc/running.html create mode 100644 Build/source/libs/luajit/LuaJIT-src/doc/status.html create mode 100644 Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_arm.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_arm.lua create mode 100644 Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_arm64.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_arm64.lua create mode 100644 Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_mips.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_mips.lua create mode 100644 Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_ppc.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_ppc.lua create mode 100644 Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_proto.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_x64.lua create mode 100644 Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_x86.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_x86.lua create mode 100644 Build/source/libs/luajit/LuaJIT-src/dynasm/dynasm.lua create mode 100644 Build/source/libs/luajit/LuaJIT-src/etc/luajit.1 create mode 100644 Build/source/libs/luajit/LuaJIT-src/etc/luajit.pc create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/Makefile create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/Makefile.dep create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/host/README create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/host/buildvm.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/host/buildvm.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/host/buildvm_asm.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/host/buildvm_fold.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/host/buildvm_lib.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/host/buildvm_libbc.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/host/buildvm_peobj.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/host/genlibbc.lua create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/host/genminilua.lua create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/host/minilua.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/jit/bc.lua create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/jit/bcsave.lua create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/jit/dis_arm.lua create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/jit/dis_mips.lua create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/jit/dis_mipsel.lua create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/jit/dis_ppc.lua create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/jit/dis_x64.lua create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/jit/dis_x86.lua create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/jit/dump.lua create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/jit/p.lua create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/jit/v.lua create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/jit/zone.lua create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lauxlib.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lib_aux.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lib_base.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lib_bit.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lib_debug.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lib_ffi.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lib_init.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lib_io.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lib_jit.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lib_math.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lib_os.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lib_package.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lib_string.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lib_table.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj.supp create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_alloc.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_alloc.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_api.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_arch.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_asm.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_asm.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_asm_arm.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_asm_mips.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_asm_ppc.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_asm_x86.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_bc.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_bc.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_bcdump.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_bcread.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_bcwrite.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_buf.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_buf.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_carith.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_carith.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_ccall.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_ccall.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_ccallback.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_ccallback.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_cconv.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_cconv.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_cdata.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_cdata.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_char.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_char.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_clib.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_clib.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_cparse.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_cparse.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_crecord.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_crecord.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_ctype.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_ctype.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_debug.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_debug.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_def.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_dispatch.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_dispatch.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_emit_arm.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_emit_mips.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_emit_ppc.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_emit_x86.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_err.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_err.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_errmsg.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_ff.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_ffrecord.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_ffrecord.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_frame.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_func.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_func.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_gc.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_gc.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_gdbjit.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_gdbjit.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_ir.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_ir.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_ircall.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_iropt.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_jit.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_lex.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_lex.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_lib.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_lib.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_load.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_mcode.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_mcode.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_meta.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_meta.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_obj.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_obj.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_opt_dce.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_opt_fold.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_opt_loop.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_opt_mem.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_opt_narrow.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_opt_sink.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_opt_split.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_parse.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_parse.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_profile.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_profile.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_record.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_record.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_snap.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_snap.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_state.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_state.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_str.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_str.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_strfmt.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_strfmt.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_strscan.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_strscan.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_tab.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_tab.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_target.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_target_arm.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_target_arm64.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_target_mips.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_target_ppc.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_target_x86.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_trace.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_trace.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_traceerr.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_udata.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_udata.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_vm.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_vmevent.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_vmevent.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lj_vmmath.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/ljamalg.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lua.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lua.hpp create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/luaconf.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/luajit.c create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/luajit.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/lualib.h create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/msvcbuild.bat create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/ps4build.bat create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/psvitabuild.bat create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/vm_arm.dasc create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/vm_arm64.dasc create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/vm_mips.dasc create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/vm_ppc.dasc create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/vm_x64.dasc create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/vm_x86.dasc create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/xb1build.bat create mode 100644 Build/source/libs/luajit/LuaJIT-src/src/xedkbuild.bat (limited to 'Build/source') diff --git a/Build/source/libs/luajit/ChangeLog b/Build/source/libs/luajit/ChangeLog index 5d02d9a8dd2..0cc770cbd0e 100644 --- a/Build/source/libs/luajit/ChangeLog +++ b/Build/source/libs/luajit/ChangeLog @@ -1,3 +1,7 @@ +2016-02-18 Akira Kakuto + + * Makefile.am, configure.ac: New convention. + 2015-09-28 Peter Breitenlohner * Makefile.am, configure.ac, m4/lj-system.m4: Try to stay diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1-PATCHES/ChangeLog b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1-PATCHES/ChangeLog deleted file mode 100644 index 0306037354a..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1-PATCHES/ChangeLog +++ /dev/null @@ -1,61 +0,0 @@ -2015-09-27 Peter Breitenlohner - - * patch-07-x86_64-cygwin (new): Backport from upstream git. - -2015-09-07 Peter Breitenlohner - - Import LuaJIT-2.1.0-beta1. - - * patch-01-LuaJITTeX: Adapted. - * patch-05-LuaJITTeX: Adapted. - * patch-06-ppc-darwin: Adapted. - -2015-06-12 Peter Breitenlohner - - Import LuaJIT-2.0.4. - - * patch-01-LuaJITTeX: Adapted. - * patch-05-LuaJITTeX: Adapted. - * patch-06-ppc-darwin: Adapted. - -2015-01-19 Peter Breitenlohner - - * patch-06-ppc-darwin (new): Build fix for PPC OS X. - From http://permalink.gmane.org/gmane.comp.lang.lua.luajit/5936. - -2014-06-01 Peter Breitenlohner - - * patch-05-LuaJITTeX (lj_str_new): Replace '6' by LUAI_HASHLIMIT - and use '#define LUAI_HASHLIMIT 5' as default as in - ../../libs/lua52/lua-5.2.3/src/lstring.c. - -2014-03-14 Peter Breitenlohner - - Import LuaJIT-2.0.3. - - * patch-01-LuaJITTeX: Adapted. - * patch-05-LuaJITTeX (new): More changes from Luigi and Taco. - - * patch-02-unwind, patch-03-OSX-build, patch-04-NetBSD64-build - (removed): Now in distribution. - -2014-02-23 Peter Breitenlohner - - * patch-04-NetBSD64-build (new): Fix NetBSD/amd64 build from - Marc Baudoin . - -2014-02-07 Peter Breitenlohner - - * patch-03-OSX-build (new): Backport from upstream git. - Fix OSX build issue by Mike Pall. - -2014-02-06 Peter Breitenlohner - - * patch-02-unwind (new): Bug fix. - -2014-02-05 Peter Breitenlohner - - Import LuaJIT-2.0.2. - - * patch-01-LuaJITTeX (new): Adapt for use in LuaJITTeX. - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1-PATCHES/patch-01-LuaJITTeX b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1-PATCHES/patch-01-LuaJITTeX deleted file mode 100644 index 4853f00cc52..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1-PATCHES/patch-01-LuaJITTeX +++ /dev/null @@ -1,175 +0,0 @@ -diff -ur LuaJIT-2.1.0-beta1.orig/src/lauxlib.h LuaJIT-2.1.0-beta1/src/lauxlib.h ---- LuaJIT-2.1.0-beta1.orig/src/lauxlib.h 2015-08-25 23:35:00.000000000 +0200 -+++ LuaJIT-2.1.0-beta1/src/lauxlib.h 2015-09-04 08:42:39.000000000 +0200 -@@ -86,6 +86,32 @@ - int level); - - -+ -+/* -+** {====================================================== -+** File handles for IO library -+** ======================================================= -+*/ -+ -+/* -+** A file handle is a userdata with metatable 'LUA_FILEHANDLE' and -+** initial structure 'luaL_Stream' (it may contain other fields -+** after that initial structure). -+*/ -+ -+#define LUA_FILEHANDLE "FILE*" -+ -+ -+typedef struct luaL_Stream { -+ FILE *f; /* stream (NULL for incompletely created streams) */ -+ lua_CFunction closef; /* to close stream (NULL for closed streams) */ -+} luaL_Stream; -+ -+/* }====================================================== */ -+ -+ -+ -+ - /* - ** =============================================================== - ** some useful macros -diff -ur LuaJIT-2.1.0-beta1.orig/src/lib_init.c LuaJIT-2.1.0-beta1/src/lib_init.c ---- LuaJIT-2.1.0-beta1.orig/src/lib_init.c 2015-08-25 23:35:00.000000000 +0200 -+++ LuaJIT-2.1.0-beta1/src/lib_init.c 2015-09-04 08:42:39.000000000 +0200 -@@ -26,6 +26,7 @@ - { LUA_DBLIBNAME, luaopen_debug }, - { LUA_BITLIBNAME, luaopen_bit }, - { LUA_JITLIBNAME, luaopen_jit }, -+ { LUA_BITLIBNAME_32, luaopen_bit32 }, - { NULL, NULL } - }; - -diff -ur LuaJIT-2.1.0-beta1.orig/src/lib_package.c LuaJIT-2.1.0-beta1/src/lib_package.c ---- LuaJIT-2.1.0-beta1.orig/src/lib_package.c 2015-08-25 23:35:00.000000000 +0200 -+++ LuaJIT-2.1.0-beta1/src/lib_package.c 2015-09-04 08:42:39.000000000 +0200 -@@ -362,6 +362,29 @@ - return 1; /* library loaded successfully */ - } - -+#define LUA_POF "luaopen_" -+#define LUA_OFSEP "_" -+#define POF LUA_POF -+ -+static const char *mkfuncname (lua_State *L, const char *modname) { -+ const char *funcname; -+ const char *mark = strchr(modname, *LUA_IGMARK); -+ if (mark) modname = mark + 1; -+ funcname = luaL_gsub(L, modname, ".", LUA_OFSEP); -+ funcname = lua_pushfstring(L, POF"%s", funcname); -+ lua_remove(L, -2); /* remove 'gsub' result */ -+ return funcname; -+} -+ -+ -+int loader_C_luatex (lua_State *L, const char *name, const char *filename) { -+ const char *funcname; -+ funcname = mkfuncname(L, name); -+ if (ll_loadfunc(L, filename, funcname,0) != 0) -+ loaderror(L, filename); -+ return 1; /* library loaded successfully */ -+} -+ - static int lj_cf_package_loader_croot(lua_State *L) - { - const char *filename; -@@ -381,6 +404,21 @@ - return 1; - } - -+int loader_Call_luatex (lua_State *L, const char *name, const char *filename) { -+ const char *funcname; -+ int stat; -+ if (filename == NULL) return 1; /* root not found */ -+ funcname = mkfuncname(L, name); -+ if ((stat = ll_loadfunc(L, filename, funcname,0)) != 0) { -+ if (stat != PACKAGE_ERR_FUNC) loaderror(L, filename); /* real error */ -+ lua_pushfstring(L, "\n\tno module " LUA_QS " in file " LUA_QS, -+ name, filename); -+ return 1; /* function not found */ -+ } -+ return 1; /* library loaded successfully */ -+} -+ -+ - static int lj_cf_package_loader_preload(lua_State *L) - { - const char *name = luaL_checkstring(L, 1); -diff -ur LuaJIT-2.1.0-beta1.orig/src/lua.h LuaJIT-2.1.0-beta1/src/lua.h ---- LuaJIT-2.1.0-beta1.orig/src/lua.h 2015-08-25 23:35:00.000000000 +0200 -+++ LuaJIT-2.1.0-beta1/src/lua.h 2015-09-04 08:42:39.000000000 +0200 -@@ -348,6 +348,16 @@ - const char *chunkname, const char *mode); - - -+#define LUA_OPEQ 0 -+#define LUA_OPLT 1 -+#define LUA_OPLE 2 -+#define LUA_OK 0 -+ -+/* see http://comments.gmane.org/gmane.comp.programming.swig/18673 */ -+# define lua_rawlen lua_objlen -+ -+ -+ - struct lua_Debug { - int event; - const char *name; /* (n) */ -diff -ur LuaJIT-2.1.0-beta1.orig/src/lualib.h LuaJIT-2.1.0-beta1/src/lualib.h ---- LuaJIT-2.1.0-beta1.orig/src/lualib.h 2015-08-25 23:35:00.000000000 +0200 -+++ LuaJIT-2.1.0-beta1/src/lualib.h 2015-09-04 08:42:39.000000000 +0200 -@@ -22,6 +22,8 @@ - #define LUA_JITLIBNAME "jit" - #define LUA_FFILIBNAME "ffi" - -+#define LUA_BITLIBNAME_32 "bit32" -+ - LUALIB_API int luaopen_base(lua_State *L); - LUALIB_API int luaopen_math(lua_State *L); - LUALIB_API int luaopen_string(lua_State *L); -@@ -34,6 +36,8 @@ - LUALIB_API int luaopen_jit(lua_State *L); - LUALIB_API int luaopen_ffi(lua_State *L); - -+LUALIB_API int luaopen_bit32(lua_State *L); -+ - LUALIB_API void luaL_openlibs(lua_State *L); - - #ifndef lua_assert -diff -ur LuaJIT-2.1.0-beta1.orig/src/Makefile LuaJIT-2.1.0-beta1/src/Makefile ---- LuaJIT-2.1.0-beta1.orig/src/Makefile 2015-08-25 23:35:00.000000000 +0200 -+++ LuaJIT-2.1.0-beta1/src/Makefile 2015-09-04 08:42:39.000000000 +0200 -@@ -97,7 +97,7 @@ - # enabled by default. Some other features that *might* break some existing - # code (e.g. __pairs or os.execute() return values) can be enabled here. - # Note: this does not provide full compatibility with Lua 5.2 at this time. --#XCFLAGS+= -DLUAJIT_ENABLE_LUA52COMPAT -+XCFLAGS+= -DLUAJIT_ENABLE_LUA52COMPAT - # - # Disable the JIT compiler, i.e. turn LuaJIT into a pure interpreter. - #XCFLAGS+= -DLUAJIT_DISABLE_JIT -@@ -456,7 +456,7 @@ - LJVM_BOUT= $(LJVM_S) - LJVM_MODE= elfasm - --LJLIB_O= lib_base.o lib_math.o lib_bit.o lib_string.o lib_table.o \ -+LJLIB_O= lib_base.o lib_math.o lbitlib.o lib_bit.o lib_string.o lib_table.o \ - lib_io.o lib_os.o lib_package.o lib_debug.o lib_jit.o lib_ffi.o - LJLIB_C= $(LJLIB_O:.o=.c) - -diff -ur LuaJIT-2.1.0-beta1.orig/src/Makefile.dep LuaJIT-2.1.0-beta1/src/Makefile.dep ---- LuaJIT-2.1.0-beta1.orig/src/Makefile.dep 2015-08-25 23:35:00.000000000 +0200 -+++ LuaJIT-2.1.0-beta1/src/Makefile.dep 2015-09-04 08:46:10.000000000 +0200 -@@ -6,6 +6,7 @@ - lj_tab.h lj_meta.h lj_state.h lj_ctype.h lj_cconv.h lj_bc.h lj_ff.h \ - lj_ffdef.h lj_dispatch.h lj_jit.h lj_ir.h lj_char.h lj_strscan.h \ - lj_strfmt.h lj_lib.h lj_libdef.h -+lbitlib.o: lbitlib.c lua.h luaconf.h lauxlib.h lualib.h - lib_bit.o: lib_bit.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \ - lj_arch.h lj_err.h lj_errmsg.h lj_buf.h lj_gc.h lj_str.h lj_strscan.h \ - lj_strfmt.h lj_ctype.h lj_cdata.h lj_cconv.h lj_carith.h lj_ff.h \ diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1-PATCHES/patch-05-LuaJITTeX b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1-PATCHES/patch-05-LuaJITTeX deleted file mode 100644 index 3a05a52d896..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1-PATCHES/patch-05-LuaJITTeX +++ /dev/null @@ -1,108 +0,0 @@ -diff -ur LuaJIT-2.1.0-beta1.orig/src/lj_def.h LuaJIT-2.1.0-beta1/src/lj_def.h ---- LuaJIT-2.1.0-beta1.orig/src/lj_def.h 2015-08-25 23:35:00.000000000 +0200 -+++ LuaJIT-2.1.0-beta1/src/lj_def.h 2015-09-04 08:51:52.000000000 +0200 -@@ -66,7 +66,7 @@ - #define LJ_MAX_BCINS (1<<26) /* Max. # of bytecode instructions. */ - #define LJ_MAX_SLOTS 250 /* Max. # of slots in a Lua func. */ - #define LJ_MAX_LOCVAR 200 /* Max. # of local variables. */ --#define LJ_MAX_UPVAL 60 /* Max. # of upvalues. */ -+#define LJ_MAX_UPVAL 249 /* Max. # of upvalues. */ - - #define LJ_MAX_IDXCHAIN 100 /* __index/__newindex chain limit. */ - #define LJ_STACK_EXTRA (5+2*LJ_FR2) /* Extra stack space (metamethods). */ -diff -ur LuaJIT-2.1.0-beta1.orig/src/lj_str.c LuaJIT-2.1.0-beta1/src/lj_str.c ---- LuaJIT-2.1.0-beta1.orig/src/lj_str.c 2015-08-25 23:35:00.000000000 +0200 -+++ LuaJIT-2.1.0-beta1/src/lj_str.c 2015-09-04 08:51:52.000000000 +0200 -@@ -118,6 +118,16 @@ - g->strhash = newhash; - } - -+/* -+** Lua will use at most ~(2^LUAI_HASHLIMIT) bytes from a string to -+** compute its hash -+*/ -+#if !defined(LUAI_HASHLIMIT) -+#define LUAI_HASHLIMIT 5 -+#endif -+ -+#define cast(t, exp) ((t)(exp)) -+int luajittex_choose_hash_function = 0 ; - /* Intern a string and return string object. */ - GCstr *lj_str_new(lua_State *L, const char *str, size_t lenx) - { -@@ -126,27 +136,44 @@ - GCobj *o; - MSize len = (MSize)lenx; - MSize a, b, h = len; -+ size_t step ; -+ size_t l1 ; - if (lenx >= LJ_MAX_STR) - lj_err_msg(L, LJ_ERR_STROV); - g = G(L); -+ -+ if (len==0) -+ return &g->strempty; -+ if (luajittex_choose_hash_function==0) { -+ /* Lua 5.1.5 hash function */ -+ /* for 5.2 max methods we also need to patch the vm eq */ -+ step = (len>>LUAI_HASHLIMIT)+1; /* if string is too long, don't hash all its chars */ -+ for (l1=len; l1>=step; l1-=step) /* compute hash */ -+ h = h ^ ((h<<5)+(h>>2)+cast(unsigned char, str[l1-1])); -+ } else { -+ /* LuaJIT 2.0.2 hash function */ - /* Compute string hash. Constants taken from lookup3 hash by Bob Jenkins. */ -- if (len >= 4) { /* Caveat: unaligned access! */ -- a = lj_getu32(str); -- h ^= lj_getu32(str+len-4); -- b = lj_getu32(str+(len>>1)-2); -- h ^= b; h -= lj_rol(b, 14); -- b += lj_getu32(str+(len>>2)-1); -- } else if (len > 0) { -- a = *(const uint8_t *)str; -- h ^= *(const uint8_t *)(str+len-1); -- b = *(const uint8_t *)(str+(len>>1)); -- h ^= b; h -= lj_rol(b, 14); -- } else { -- return &g->strempty; -- } -- a ^= h; a -= lj_rol(h, 11); -- b ^= a; b -= lj_rol(a, 25); -- h ^= b; h -= lj_rol(b, 16); -+ if (len >= 4) { /* Caveat: unaligned access! */ -+ a = lj_getu32(str); -+ h ^= lj_getu32(str+len-4); -+ b = lj_getu32(str+(len>>1)-2); -+ h ^= b; h -= lj_rol(b, 14); -+ b += lj_getu32(str+(len>>2)-1); -+ } else if (len > 0) { -+ a = *(const uint8_t *)str; -+ h ^= *(const uint8_t *)(str+len-1); -+ b = *(const uint8_t *)(str+(len>>1)); -+ h ^= b; h -= lj_rol(b, 14); -+ } else { -+ /* Already done, kept for reference */ -+ return &g->strempty; -+ } -+ a ^= h; a -= lj_rol(h, 11); -+ b ^= a; b -= lj_rol(a, 25); -+ h ^= b; h -= lj_rol(b, 16); -+ } -+ -+ - /* Check if the string has already been interned. */ - o = gcref(g->strhash[h & g->strmask]); - if (LJ_LIKELY((((uintptr_t)str+len-1) & (LJ_PAGESIZE-1)) <= LJ_PAGESIZE-4)) { -Only in LuaJIT-2.1.0-beta1/src: lj_str.c.orig -diff -ur LuaJIT-2.1.0-beta1.orig/src/lua.h LuaJIT-2.1.0-beta1/src/lua.h ---- LuaJIT-2.1.0-beta1.orig/src/lua.h 2015-09-04 08:42:39.000000000 +0200 -+++ LuaJIT-2.1.0-beta1/src/lua.h 2015-09-04 08:51:52.000000000 +0200 -@@ -103,6 +103,9 @@ - typedef LUA_INTEGER lua_Integer; - - -+/* communication with LuaJiTTeX */ -+LUA_API int luajittex_choose_hash_function; -+ - - /* - ** state manipulation diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1-PATCHES/patch-06-ppc-darwin b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1-PATCHES/patch-06-ppc-darwin deleted file mode 100644 index 17f7e4a1c08..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1-PATCHES/patch-06-ppc-darwin +++ /dev/null @@ -1,24 +0,0 @@ -diff -ur LuaJIT-2.1.0-beta1.orig/src/host/buildvm.c LuaJIT-2.1.0-beta1/src/host/buildvm.c ---- LuaJIT-2.1.0-beta1.orig/src/host/buildvm.c 2015-08-25 23:35:00.000000000 +0200 -+++ LuaJIT-2.1.0-beta1/src/host/buildvm.c 2015-09-04 09:03:00.000000000 +0200 -@@ -113,7 +113,7 @@ - name[0] = '@'; - else - *p = '\0'; --#elif LJ_TARGET_PPC && !LJ_TARGET_CONSOLE -+#elif LJ_TARGET_PPC && !LJ_TARGET_OSX && !LJ_TARGET_CONSOLE - /* Keep @plt etc. */ - #else - *p = '\0'; -diff -ur LuaJIT-2.1.0-beta1.orig/src/lj_arch.h LuaJIT-2.1.0-beta1/src/lj_arch.h ---- LuaJIT-2.1.0-beta1.orig/src/lj_arch.h 2015-08-25 23:35:00.000000000 +0200 -+++ LuaJIT-2.1.0-beta1/src/lj_arch.h 2015-09-04 08:58:07.000000000 +0200 -@@ -324,7 +324,7 @@ - #if __GNUC__ < 4 - #error "Need at least GCC 4.0 or newer" - #endif --#elif LJ_TARGET_ARM -+#elif LJ_TARGET_ARM || LJ_TARGET_PPC - #if (__GNUC__ < 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ < 2) - #error "Need at least GCC 4.2 or newer" - #endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1-PATCHES/patch-07-x86_64-cygwin b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1-PATCHES/patch-07-x86_64-cygwin deleted file mode 100644 index b3bdead9dd6..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1-PATCHES/patch-07-x86_64-cygwin +++ /dev/null @@ -1,59 +0,0 @@ -diff -ur LuaJIT-2.1.0-beta1.orig/src/lj_alloc.c LuaJIT-2.1.0-beta1/src/lj_alloc.c ---- LuaJIT-2.1.0-beta1.orig/src/lj_alloc.c 2015-08-25 23:35:00.000000000 +0200 -+++ LuaJIT-2.1.0-beta1/src/lj_alloc.c 2015-09-27 19:05:37.000000000 +0200 -@@ -196,7 +196,7 @@ - return ptr; - } - --#elif LJ_TARGET_OSX || LJ_TARGET_PS4 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__sun__) -+#elif LJ_TARGET_OSX || LJ_TARGET_PS4 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__sun__) || defined(__CYGWIN__) - - /* OSX and FreeBSD mmap() use a naive first-fit linear search. - ** That's perfect for us. Except that -pagezero_size must be set for OSX, -diff -ur LuaJIT-2.1.0-beta1.orig/src/lj_arch.h LuaJIT-2.1.0-beta1/src/lj_arch.h ---- LuaJIT-2.1.0-beta1.orig/src/lj_arch.h 2015-09-04 08:58:07.000000000 +0200 -+++ LuaJIT-2.1.0-beta1/src/lj_arch.h 2015-09-27 19:05:37.000000000 +0200 -@@ -155,7 +155,11 @@ - #define LJ_ARCH_NAME "x64" - #define LJ_ARCH_BITS 64 - #define LJ_ARCH_ENDIAN LUAJIT_LE --#define LJ_ABI_WIN LJ_TARGET_WINDOWS -+#if LJ_TARGET_WINDOWS || __CYGWIN__ -+#define LJ_ABI_WIN 1 -+#else -+#define LJ_ABI_WIN 0 -+#endif - #define LJ_TARGET_X64 1 - #define LJ_TARGET_X86ORX64 1 - #define LJ_TARGET_EHRETREG 0 -diff -ur LuaJIT-2.1.0-beta1.orig/src/lj_err.c LuaJIT-2.1.0-beta1/src/lj_err.c ---- LuaJIT-2.1.0-beta1.orig/src/lj_err.c 2015-08-25 23:35:00.000000000 +0200 -+++ LuaJIT-2.1.0-beta1/src/lj_err.c 2015-09-27 19:05:37.000000000 +0200 -@@ -183,7 +183,7 @@ - - /* -- External frame unwinding -------------------------------------------- */ - --#if defined(__GNUC__) && !LJ_NO_UNWIND && !LJ_TARGET_WINDOWS -+#if defined(__GNUC__) && !LJ_NO_UNWIND && !LJ_ABI_WIN - - /* - ** We have to use our own definitions instead of the mandatory (!) unwind.h, -@@ -349,7 +349,7 @@ - - #endif - --#elif LJ_TARGET_X64 && LJ_TARGET_WINDOWS -+#elif LJ_TARGET_X64 && LJ_ABI_WIN - - /* - ** Someone in Redmond owes me several days of my life. A lot of this is -@@ -414,7 +414,9 @@ - if (cf2) { /* We catch it, so start unwinding the upper frames. */ - if (rec->ExceptionCode == LJ_MSVC_EXCODE || - rec->ExceptionCode == LJ_GCC_EXCODE) { -+#if LJ_TARGET_WINDOWS - __DestructExceptionObject(rec, 1); -+#endif - setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRCPP)); - } else if (!LJ_EXCODE_CHECK(rec->ExceptionCode)) { - /* Don't catch access violations etc. */ diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/COPYRIGHT b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/COPYRIGHT deleted file mode 100644 index 1ef7df62446..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/COPYRIGHT +++ /dev/null @@ -1,56 +0,0 @@ -=============================================================================== -LuaJIT -- a Just-In-Time Compiler for Lua. http://luajit.org/ - -Copyright (C) 2005-2015 Mike Pall. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -[ MIT license: http://www.opensource.org/licenses/mit-license.php ] - -=============================================================================== -[ LuaJIT includes code from Lua 5.1/5.2, which has this license statement: ] - -Copyright (C) 1994-2012 Lua.org, PUC-Rio. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -=============================================================================== -[ LuaJIT includes code from dlmalloc, which has this license statement: ] - -This is a version (aka dlmalloc) of malloc/free/realloc written by -Doug Lea and released to the public domain, as explained at -http://creativecommons.org/licenses/publicdomain - -=============================================================================== diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/Makefile b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/Makefile deleted file mode 100644 index 8ce773e3936..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/Makefile +++ /dev/null @@ -1,159 +0,0 @@ -############################################################################## -# LuaJIT top level Makefile for installation. Requires GNU Make. -# -# Please read doc/install.html before changing any variables! -# -# Suitable for POSIX platforms (Linux, *BSD, OSX etc.). -# Note: src/Makefile has many more configurable options. -# -# ##### This Makefile is NOT useful for Windows! ##### -# For MSVC, please follow the instructions given in src/msvcbuild.bat. -# For MinGW and Cygwin, cd to src and run make with the Makefile there. -# -# Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -############################################################################## - -MAJVER= 2 -MINVER= 1 -RELVER= 0 -PREREL= -beta1 -VERSION= $(MAJVER).$(MINVER).$(RELVER)$(PREREL) -ABIVER= 5.1 - -############################################################################## -# -# Change the installation path as needed. This automatically adjusts -# the paths in src/luaconf.h, too. Note: PREFIX must be an absolute path! -# -export PREFIX= /usr/local -export MULTILIB= lib -############################################################################## - -DPREFIX= $(DESTDIR)$(PREFIX) -INSTALL_BIN= $(DPREFIX)/bin -INSTALL_LIB= $(DPREFIX)/$(MULTILIB) -INSTALL_SHARE= $(DPREFIX)/share -INSTALL_INC= $(DPREFIX)/include/luajit-$(MAJVER).$(MINVER) - -INSTALL_LJLIBD= $(INSTALL_SHARE)/luajit-$(VERSION) -INSTALL_JITLIB= $(INSTALL_LJLIBD)/jit -INSTALL_LMODD= $(INSTALL_SHARE)/lua -INSTALL_LMOD= $(INSTALL_LMODD)/$(ABIVER) -INSTALL_CMODD= $(INSTALL_LIB)/lua -INSTALL_CMOD= $(INSTALL_CMODD)/$(ABIVER) -INSTALL_MAN= $(INSTALL_SHARE)/man/man1 -INSTALL_PKGCONFIG= $(INSTALL_LIB)/pkgconfig - -INSTALL_TNAME= luajit-$(VERSION) -INSTALL_TSYMNAME= luajit -INSTALL_ANAME= libluajit-$(ABIVER).a -INSTALL_SONAME= libluajit-$(ABIVER).so.$(MAJVER).$(MINVER).$(RELVER) -INSTALL_SOSHORT= libluajit-$(ABIVER).so -INSTALL_DYLIBNAME= libluajit-$(ABIVER).$(MAJVER).$(MINVER).$(RELVER).dylib -INSTALL_DYLIBSHORT1= libluajit-$(ABIVER).dylib -INSTALL_DYLIBSHORT2= libluajit-$(ABIVER).$(MAJVER).dylib -INSTALL_PCNAME= luajit.pc - -INSTALL_STATIC= $(INSTALL_LIB)/$(INSTALL_ANAME) -INSTALL_DYN= $(INSTALL_LIB)/$(INSTALL_SONAME) -INSTALL_SHORT1= $(INSTALL_LIB)/$(INSTALL_SOSHORT) -INSTALL_SHORT2= $(INSTALL_LIB)/$(INSTALL_SOSHORT) -INSTALL_T= $(INSTALL_BIN)/$(INSTALL_TNAME) -INSTALL_TSYM= $(INSTALL_BIN)/$(INSTALL_TSYMNAME) -INSTALL_PC= $(INSTALL_PKGCONFIG)/$(INSTALL_PCNAME) - -INSTALL_DIRS= $(INSTALL_BIN) $(INSTALL_LIB) $(INSTALL_INC) $(INSTALL_MAN) \ - $(INSTALL_PKGCONFIG) $(INSTALL_JITLIB) $(INSTALL_LMOD) $(INSTALL_CMOD) -UNINSTALL_DIRS= $(INSTALL_JITLIB) $(INSTALL_LJLIBD) $(INSTALL_INC) \ - $(INSTALL_LMOD) $(INSTALL_LMODD) $(INSTALL_CMOD) $(INSTALL_CMODD) - -RM= rm -f -MKDIR= mkdir -p -RMDIR= rmdir 2>/dev/null -SYMLINK= ln -sf -INSTALL_X= install -m 0755 -INSTALL_F= install -m 0644 -UNINSTALL= $(RM) -LDCONFIG= ldconfig -n -SED_PC= sed -e "s|^prefix=.*|prefix=$(PREFIX)|" \ - -e "s|^multilib=.*|multilib=$(MULTILIB)|" - -FILE_T= luajit -FILE_A= libluajit.a -FILE_SO= libluajit.so -FILE_MAN= luajit.1 -FILE_PC= luajit.pc -FILES_INC= lua.h lualib.h lauxlib.h luaconf.h lua.hpp luajit.h -FILES_JITLIB= bc.lua bcsave.lua dump.lua p.lua v.lua zone.lua \ - dis_x86.lua dis_x64.lua dis_arm.lua dis_ppc.lua \ - dis_mips.lua dis_mipsel.lua vmdef.lua - -ifeq (,$(findstring Windows,$(OS))) - ifeq (Darwin,$(shell uname -s)) - INSTALL_SONAME= $(INSTALL_DYLIBNAME) - INSTALL_SHORT1= $(INSTALL_LIB)/$(INSTALL_DYLIBSHORT1) - INSTALL_SHORT2= $(INSTALL_LIB)/$(INSTALL_DYLIBSHORT2) - LDCONFIG= : - endif -endif - -############################################################################## - -INSTALL_DEP= src/luajit - -default all $(INSTALL_DEP): - @echo "==== Building LuaJIT $(VERSION) ====" - $(MAKE) -C src - @echo "==== Successfully built LuaJIT $(VERSION) ====" - -install: $(INSTALL_DEP) - @echo "==== Installing LuaJIT $(VERSION) to $(PREFIX) ====" - $(MKDIR) $(INSTALL_DIRS) - cd src && $(INSTALL_X) $(FILE_T) $(INSTALL_T) - cd src && test -f $(FILE_A) && $(INSTALL_F) $(FILE_A) $(INSTALL_STATIC) || : - $(RM) $(INSTALL_DYN) $(INSTALL_SHORT1) $(INSTALL_SHORT2) - cd src && test -f $(FILE_SO) && \ - $(INSTALL_X) $(FILE_SO) $(INSTALL_DYN) && \ - $(LDCONFIG) $(INSTALL_LIB) && \ - $(SYMLINK) $(INSTALL_SONAME) $(INSTALL_SHORT1) && \ - $(SYMLINK) $(INSTALL_SONAME) $(INSTALL_SHORT2) || : - cd etc && $(INSTALL_F) $(FILE_MAN) $(INSTALL_MAN) - cd etc && $(SED_PC) $(FILE_PC) > $(FILE_PC).tmp && \ - $(INSTALL_F) $(FILE_PC).tmp $(INSTALL_PC) && \ - $(RM) $(FILE_PC).tmp - cd src && $(INSTALL_F) $(FILES_INC) $(INSTALL_INC) - cd src/jit && $(INSTALL_F) $(FILES_JITLIB) $(INSTALL_JITLIB) - @echo "==== Successfully installed LuaJIT $(VERSION) to $(PREFIX) ====" - @echo "" - @echo "Note: the development releases deliberately do NOT install a symlink for luajit" - @echo "You can do this now by running this command (with sudo):" - @echo "" - @echo " $(SYMLINK) $(INSTALL_TNAME) $(INSTALL_TSYM)" - @echo "" - - -uninstall: - @echo "==== Uninstalling LuaJIT $(VERSION) from $(PREFIX) ====" - $(UNINSTALL) $(INSTALL_T) $(INSTALL_STATIC) $(INSTALL_DYN) $(INSTALL_SHORT1) $(INSTALL_SHORT2) $(INSTALL_MAN)/$(FILE_MAN) $(INSTALL_PC) - for file in $(FILES_JITLIB); do \ - $(UNINSTALL) $(INSTALL_JITLIB)/$$file; \ - done - for file in $(FILES_INC); do \ - $(UNINSTALL) $(INSTALL_INC)/$$file; \ - done - $(LDCONFIG) $(INSTALL_LIB) - $(RMDIR) $(UNINSTALL_DIRS) || : - @echo "==== Successfully uninstalled LuaJIT $(VERSION) from $(PREFIX) ====" - -############################################################################## - -amalg: - @echo "Building LuaJIT $(VERSION)" - $(MAKE) -C src amalg - -clean: - $(MAKE) -C src clean - -.PHONY: all install amalg clean - -############################################################################## diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/README b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/README deleted file mode 100644 index ca70dd8eddf..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/README +++ /dev/null @@ -1,16 +0,0 @@ -README for LuaJIT 2.1.0-beta1 ------------------------------ - -LuaJIT is a Just-In-Time (JIT) compiler for the Lua programming language. - -Project Homepage: http://luajit.org/ - -LuaJIT is Copyright (C) 2005-2015 Mike Pall. -LuaJIT is free software, released under the MIT license. -See full Copyright Notice in the COPYRIGHT file or in luajit.h. - -Documentation for LuaJIT is available in HTML format. -Please point your favorite browser to: - - doc/luajit.html - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/bluequad-print.css b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/bluequad-print.css deleted file mode 100644 index 07f5c84a5f6..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/bluequad-print.css +++ /dev/null @@ -1,166 +0,0 @@ -/* Copyright (C) 2004-2015 Mike Pall. - * - * You are welcome to use the general ideas of this design for your own sites. - * But please do not steal the stylesheet, the layout or the color scheme. - */ -body { - font-family: serif; - font-size: 11pt; - margin: 0 3em; - padding: 0; - border: none; -} -a:link, a:visited, a:hover, a:active { - text-decoration: none; - background: transparent; - color: #0000ff; -} -h1, h2, h3 { - font-family: sans-serif; - font-weight: bold; - text-align: left; - margin: 0.5em 0; - padding: 0; -} -h1 { - font-size: 200%; -} -h2 { - font-size: 150%; -} -h3 { - font-size: 125%; -} -p { - margin: 0 0 0.5em 0; - padding: 0; -} -ul, ol { - margin: 0.5em 0; - padding: 0 0 0 2em; -} -ul { - list-style: outside square; -} -ol { - list-style: outside decimal; -} -li { - margin: 0; - padding: 0; -} -dl { - margin: 1em 0; - padding: 1em; - border: 1px solid black; -} -dt { - font-weight: bold; - margin: 0; - padding: 0; -} -dt sup { - float: right; - margin-left: 1em; -} -dd { - margin: 0.5em 0 0 2em; - padding: 0; -} -table { - table-layout: fixed; - width: 100%; - margin: 1em 0; - padding: 0; - border: 1px solid black; - border-spacing: 0; - border-collapse: collapse; -} -tr { - margin: 0; - padding: 0; - border: none; -} -td { - text-align: left; - margin: 0; - padding: 0.2em 0.5em; - border-top: 1px solid black; - border-bottom: 1px solid black; -} -tr.separate td { - border-top: double; -} -tt, pre, code, kbd, samp { - font-family: monospace; - font-size: 75%; -} -kbd { - font-weight: bolder; -} -blockquote, pre { - margin: 1em 2em; - padding: 0; -} -img { - border: none; - vertical-align: baseline; - margin: 0; - padding: 0; -} -img.left { - float: left; - margin: 0.5em 1em 0.5em 0; -} -img.right { - float: right; - margin: 0.5em 0 0.5em 1em; -} -.flush { - clear: both; - visibility: hidden; -} -.hide, .noprint, #nav { - display: none !important; -} -.pagebreak { - page-break-before: always; -} -#site { - text-align: right; - font-family: sans-serif; - font-weight: bold; - margin: 0 1em; - border-bottom: 1pt solid black; -} -#site a { - font-size: 1.2em; -} -#site a:link, #site a:visited { - text-decoration: none; - font-weight: bold; - background: transparent; - color: #ffffff; -} -#logo { - color: #ff8000; -} -#head { - clear: both; - margin: 0 1em; -} -#main { - line-height: 1.3; - text-align: justify; - margin: 1em; -} -#foot { - clear: both; - font-size: 80%; - text-align: center; - margin: 0 1.25em; - padding: 0.5em 0 0 0; - border-top: 1pt solid black; - page-break-before: avoid; - page-break-after: avoid; -} diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/bluequad.css b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/bluequad.css deleted file mode 100644 index ae531430d19..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/bluequad.css +++ /dev/null @@ -1,325 +0,0 @@ -/* Copyright (C) 2004-2015 Mike Pall. - * - * You are welcome to use the general ideas of this design for your own sites. - * But please do not steal the stylesheet, the layout or the color scheme. - */ -/* colorscheme: - * - * site | head #4162bf/white | #6078bf/#e6ecff - * ------+------ ----------------+------------------- - * nav | main #bfcfff | #e6ecff/black - * - * nav: hiback loback #c5d5ff #b9c9f9 - * hiborder loborder #e6ecff #97a7d7 - * link hover #2142bf #ff0000 - * - * link: link visited hover #2142bf #8122bf #ff0000 - * - * main: boxback boxborder #f0f4ff #bfcfff - */ -body { - font-family: Verdana, Arial, Helvetica, sans-serif; - font-size: 10pt; - margin: 0; - padding: 0; - border: none; - background: #e0e0e0; - color: #000000; -} -a:link { - text-decoration: none; - background: transparent; - color: #2142bf; -} -a:visited { - text-decoration: none; - background: transparent; - color: #8122bf; -} -a:hover, a:active { - text-decoration: underline; - background: transparent; - color: #ff0000; -} -h1, h2, h3 { - font-weight: bold; - text-align: left; - margin: 0.5em 0; - padding: 0; - background: transparent; -} -h1 { - font-size: 200%; - line-height: 3em; /* really 6em relative to body, match #site span */ - margin: 0; -} -h2 { - font-size: 150%; - color: #606060; -} -h3 { - font-size: 125%; - color: #404040; -} -p { - max-width: 600px; - margin: 0 0 0.5em 0; - padding: 0; -} -b { - color: #404040; -} -ul, ol { - max-width: 600px; - margin: 0.5em 0; - padding: 0 0 0 2em; -} -ul { - list-style: outside square; -} -ol { - list-style: outside decimal; -} -li { - margin: 0; - padding: 0; -} -dl { - max-width: 600px; - margin: 1em 0; - padding: 1em; - border: 1px solid #bfcfff; - background: #f0f4ff; -} -dt { - font-weight: bold; - margin: 0; - padding: 0; -} -dt sup { - float: right; - margin-left: 1em; - color: #808080; -} -dt a:visited { - text-decoration: none; - color: #2142bf; -} -dt a:hover, dt a:active { - text-decoration: none; - color: #ff0000; -} -dd { - margin: 0.5em 0 0 2em; - padding: 0; -} -div.tablewrap { /* for IE *sigh* */ - max-width: 600px; -} -table { - table-layout: fixed; - border-spacing: 0; - border-collapse: collapse; - max-width: 600px; - width: 100%; - margin: 1em 0; - padding: 0; - border: 1px solid #bfcfff; -} -tr { - margin: 0; - padding: 0; - border: none; -} -tr.odd { - background: #f0f4ff; -} -tr.separate td { - border-top: 1px solid #bfcfff; -} -td { - text-align: left; - margin: 0; - padding: 0.2em 0.5em; - border: none; -} -tt, code, kbd, samp { - font-family: Courier New, Courier, monospace; - line-height: 1.2; - font-size: 110%; -} -kbd { - font-weight: bolder; -} -blockquote, pre { - max-width: 600px; - margin: 1em 2em; - padding: 0; -} -pre { - line-height: 1.1; -} -pre.code { - line-height: 1.4; - margin: 0.5em 0 1em 0.5em; - padding: 0.5em 1em; - border: 1px solid #bfcfff; - background: #f0f4ff; -} -pre.mark { - padding-left: 2em; -} -span.codemark { - position:absolute; - left: 16em; - color: #4040c0; -} -span.mark { - color: #4040c0; - font-family: Courier New, Courier, monospace; - line-height: 1.1; -} -img { - border: none; - vertical-align: baseline; - margin: 0; - padding: 0; -} -img.left { - float: left; - margin: 0.5em 1em 0.5em 0; -} -img.right { - float: right; - margin: 0.5em 0 0.5em 1em; -} -.indent { - padding-left: 1em; -} -.flush { - clear: both; - visibility: hidden; -} -.hide, .noscreen { - display: none !important; -} -.ext { - color: #ff8000; -} -.new { - font-size: 6pt; - vertical-align: middle; - background: #ff8000; - color: #ffffff; -} -#site { - clear: both; - float: left; - width: 13em; - text-align: center; - font-weight: bold; - margin: 0; - padding: 0; - background: transparent; - color: #ffffff; -} -#site a { - font-size: 200%; -} -#site a:link, #site a:visited { - text-decoration: none; - font-weight: bold; - background: transparent; - color: #ffffff; -} -#site span { - line-height: 3em; /* really 6em relative to body, match h1 */ -} -#logo { - color: #ffb380; -} -#head { - margin: 0; - padding: 0 0 0 2em; - border-left: solid 13em #4162bf; - border-right: solid 3em #6078bf; - background: #6078bf; - color: #e6ecff; -} -#nav { - clear: both; - float: left; - overflow: hidden; - text-align: left; - line-height: 1.5; - width: 13em; - padding-top: 1em; - background: transparent; -} -#nav ul { - list-style: none outside; - margin: 0; - padding: 0; -} -#nav li { - margin: 0; - padding: 0; -} -#nav a { - display: block; - text-decoration: none; - font-weight: bold; - margin: 0; - padding: 2px 1em; - border-top: 1px solid transparent; - border-bottom: 1px solid transparent; - background: transparent; - color: #2142bf; -} -#nav a:hover, #nav a:active { - text-decoration: none; - border-top: 1px solid #97a7d7; - border-bottom: 1px solid #e6ecff; - background: #b9c9f9; - color: #ff0000; -} -#nav a.current, #nav a.current:hover, #nav a.current:active { - border-top: 1px solid #e6ecff; - border-bottom: 1px solid #97a7d7; - background: #c5d5ff; - color: #2142bf; -} -#nav ul ul a { - padding: 0 1em 0 1.7em; -} -#nav ul ul ul a { - padding: 0 0.5em 0 2.4em; -} -#main { - line-height: 1.5; - text-align: left; - margin: 0; - padding: 1em 2em; - border-left: solid 13em #bfcfff; - border-right: solid 3em #e6ecff; - background: #e6ecff; -} -#foot { - clear: both; - font-size: 80%; - text-align: center; - margin: 0; - padding: 0.5em; - background: #6078bf; - color: #ffffff; -} -#foot a:link, #foot a:visited { - text-decoration: underline; - background: transparent; - color: #ffffff; -} -#foot a:hover, #foot a:active { - text-decoration: underline; - background: transparent; - color: #bfcfff; -} diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/changes.html b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/changes.html deleted file mode 100644 index 64dc4c2adc1..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/changes.html +++ /dev/null @@ -1,804 +0,0 @@ - - - -LuaJIT Change History - - - - - - - - - -
-Lua -
- - -
-

-This is a list of changes between the released versions of LuaJIT.
-The current stable version is LuaJIT 2.0.4.
-

-

-Please check the -» Online Change History -to see whether newer versions are available. -

- -
-

LuaJIT 2.1.0-beta1 — 2015-08-25

-

-This is a brief summary of the major changes in LuaJIT 2.1 compared to 2.0. -Please take a look at the commit history for more details. -

-
    -
  • Changes to the VM core: -
      -
    • Add low-overhead profiler (-jp).
    • -
    • Add LJ_GC64 mode: 64 bit GC object references (really: 47 bit). Interpreter-only for now.
    • -
    • Add LJ_FR2 mode: Two-slot frame info. Required by LJ_GC64 mode.
    • -
    • Add table.new() and table.clear().
    • -
    • Parse binary number literals (0bxxx).
    • -
  • -
  • Improvements to the JIT compiler: -
      -
    • Add trace stitching (disabled for now).
    • -
    • Compile various builtins: string.char(), string.reverse(), string.lower(), string.upper(), string.rep(), string.format(), table.concat(), bit.tohex(), getfenv(0), debug.getmetatable().
    • -
    • Compile string.find() for fixed string searches (no patterns).
    • -
    • Compile BC_TSETM, e.g. {1,2,3,f()}.
    • -
    • Compile string concatenations (BC_CAT).
    • -
    • Compile __concat metamethod.
    • -
    • Various minor optimizations.
    • -
  • -
  • Internal Changes: -
      -
    • Add support for embedding LuaJIT bytecode for builtins.
    • -
    • Replace various builtins with embedded bytecode.
    • -
    • Refactor string buffers and string formatting.
    • -
    • Remove obsolete non-truncating number to integer conversions.
    • -
  • -
  • Ports: -
      -
    • Add Xbox One port (LJ_GC64 mode).
    • -
    • ARM64: Add port of the interpreter (LJ_GC64 mode).
    • -
    • x64: Add separate port of the interpreter to LJ_GC64 mode.
    • -
    • x86/x64: Drop internal x87 math functions. Use libm functions.
    • -
    • x86: Remove x87 support from interpreter. SSE2 is mandatory now.
    • -
    • PPC/e500: Drop support for this architecture.
    • -
  • -
  • FFI library: -
      -
    • FFI: Add 64 bit bitwise operations.
    • -
    • FFI: Compile VLA/VLS and large cdata allocations with default initialization.
    • -
    • FFI: Compile conversions from functions to function pointers.
    • -
    • FFI: Compile lightuserdata to void * conversion.
    • -
    • FFI: Compile ffi.gc(cdata, nil), too.
    • -
    • FFI: Add ffi.typeinfo().
    • -
  • -
-
- -
-

LuaJIT 2.0.4 — 2015-05-14

-
    -
  • Fix stack check in narrowing optimization.
  • -
  • Fix Lua/C API typecheck error for special indexes.
  • -
  • Fix string to number conversion.
  • -
  • Fix lexer error for chunks without tokens.
  • -
  • Don't compile IR_RETF after CALLT to ff with-side effects.
  • -
  • Fix BC_UCLO/BC_JMP join optimization in Lua parser.
  • -
  • Fix corner case in string to number conversion.
  • -
  • Gracefully handle lua_error() for a suspended coroutine.
  • -
  • Avoid error messages when building with Clang.
  • -
  • Fix snapshot #0 handling for traces with a stack check on entry.
  • -
  • Fix fused constant loads under high register pressure.
  • -
  • Invalidate backpropagation cache after DCE.
  • -
  • Fix ABC elimination.
  • -
  • Fix debug info for main chunk of stripped bytecode.
  • -
  • Fix FOLD rule for string.sub(s, ...) == k.
  • -
  • Fix FOLD rule for STRREF of SNEW.
  • -
  • Fix frame traversal while searching for error function.
  • -
  • Prevent GC estimate miscalculation due to buffer growth.
  • -
  • Prevent adding side traces for stack checks.
  • -
  • Fix top slot calculation for snapshots with continuations.
  • -
  • Fix check for reuse of SCEV results in FORL.
  • -
  • Add PS Vita port.
  • -
  • Fix compatibility issues with Illumos.
  • -
  • Fix DragonFly build (unsupported).
  • -
  • OpenBSD/x86: Better executable memory allocation for W^X mode.
  • -
  • x86: Fix argument checks for ipairs() iterator.
  • -
  • x86: lj_math_random_step() clobbers XMM regs on OSX Clang.
  • -
  • x86: Fix code generation for unused result of math.random().
  • -
  • x64: Allow building with LUAJIT_USE_SYSMALLOC and LUAJIT_USE_VALGRIND.
  • -
  • x86/x64: Fix argument check for bit shifts.
  • -
  • x86/x64: Fix code generation for fused test/arith ops.
  • -
  • ARM: Fix write barrier check in BC_USETS.
  • -
  • PPC: Fix red zone overflow in machine code generation.
  • -
  • PPC: Don't use mcrxr on PPE.
  • -
  • Various archs: Fix excess stack growth in interpreter.
  • -
  • FFI: Fix FOLD rule for TOBIT + CONV num.u32.
  • -
  • FFI: Prevent DSE across ffi.string().
  • -
  • FFI: No meta fallback when indexing pointer to incomplete struct.
  • -
  • FFI: Fix initialization of unions of subtypes.
  • -
  • FFI: Fix cdata vs. non-cdata arithmetic and comparisons.
  • -
  • FFI: Fix __index/__newindex metamethod resolution for ctypes.
  • -
  • FFI: Fix compilation of reference field access.
  • -
  • FFI: Fix frame traversal for backtraces with FFI callbacks.
  • -
  • FFI: Fix recording of indexing a struct pointer ctype object itself.
  • -
  • FFI: Allow non-scalar cdata to be compared for equality by address.
  • -
  • FFI: Fix pseudo type conversions for type punning.
  • -
- -

LuaJIT 2.0.3 — 2014-03-12

-
    -
  • Add PS4 port.
  • -
  • Add support for multilib distro builds.
  • -
  • Fix OSX build.
  • -
  • Fix MinGW build.
  • -
  • Fix Xbox 360 build.
  • -
  • Improve ULOAD forwarding for open upvalues.
  • -
  • Fix GC steps threshold handling when called by JIT-compiled code.
  • -
  • Fix argument checks for math.deg() and math.rad().
  • -
  • Fix jit.flush(func|true).
  • -
  • Respect jit.off(func) when returning to a function, too.
  • -
  • Fix compilation of string.byte(s, nil, n).
  • -
  • Fix line number for relocated bytecode after closure fixup
  • -
  • Fix frame traversal for backtraces.
  • -
  • Fix ABC elimination.
  • -
  • Fix handling of redundant PHIs.
  • -
  • Fix snapshot restore for exit to function header.
  • -
  • Fix type punning alias analysis for constified pointers
  • -
  • Fix call unroll checks in the presence of metamethod frames.
  • -
  • Fix initial maxslot for down-recursive traces.
  • -
  • Prevent BASE register coalescing if parent uses IR_RETF.
  • -
  • Don't purge modified function from stack slots in BC_RET.
  • -
  • Fix recording of BC_VARG.
  • -
  • Don't access dangling reference to reallocated IR.
  • -
  • Fix frame depth display for bytecode dump in -jdump.
  • -
  • ARM: Fix register allocation when rematerializing FPRs.
  • -
  • x64: Fix store to upvalue for lightuserdata values.
  • -
  • FFI: Add missing GC steps for callback argument conversions.
  • -
  • FFI: Properly unload loaded DLLs.
  • -
  • FFI: Fix argument checks for ffi.string().
  • -
  • FFI/x64: Fix passing of vector arguments to calls.
  • -
  • FFI: Rehash finalizer table after GC cycle, if needed.
  • -
  • FFI: Fix cts->L for cdata unsinking in snapshot restore.
  • -
- -

LuaJIT 2.0.2 — 2013-06-03

-
    -
  • Fix memory access check for fast string interning.
  • -
  • Fix MSVC intrinsics for older versions.
  • -
  • Add missing GC steps for io.* functions.
  • -
  • Fix spurious red zone overflows in machine code generation.
  • -
  • Fix jump-range constrained mcode allocation.
  • -
  • Inhibit DSE for implicit loads via calls.
  • -
  • Fix builtin string to number conversion for overflow digits.
  • -
  • Fix optional argument handling while recording builtins.
  • -
  • Fix optional argument handling in table.concat().
  • -
  • Add partial support for building with MingW64 GCC 4.8-SEH.
  • -
  • Add missing PHI barrier to string.sub(str, a, b) == kstr FOLD rule.
  • -
  • Fix compatibility issues with Illumos.
  • -
  • ARM: Fix cache flush/sync for exit stubs of JIT-compiled code.
  • -
  • MIPS: Fix cache flush/sync for JIT-compiled code jump area.
  • -
  • PPC: Add plt suffix for external calls from assembler code.
  • -
  • FFI: Fix snapshot substitution in SPLIT pass.
  • -
  • FFI/x86: Fix register allocation for 64 bit comparisons.
  • -
  • FFI: Fix tailcall in lowest frame to C function with bool result.
  • -
  • FFI: Ignore long type specifier in ffi.istype().
  • -
  • FFI: Fix calling conventions for 32 bit OSX and iOS simulator (struct returns).
  • -
  • FFI: Fix calling conventions for ARM hard-float EABI (nested structs).
  • -
  • FFI: Improve error messages for arithmetic and comparison operators.
  • -
  • FFI: Insert no-op type conversion for pointer to integer cast.
  • -
  • FFI: Fix unroll limit for ffi.fill().
  • -
  • FFI: Must sink XBAR together with XSTOREs.
  • -
  • FFI: Preserve intermediate string for const char * conversion.
  • -
- -

LuaJIT 2.0.1 — 2013-02-19

-
    -
  • Don't clear frame for out-of-memory error.
  • -
  • Leave hook when resume catches error thrown from hook.
  • -
  • Add missing GC steps for template table creation.
  • -
  • Fix discharge order of comparisons in Lua parser.
  • -
  • Improve buffer handling for io.read().
  • -
  • OSX: Add support for Mach-O object files to -b option.
  • -
  • Fix PS3 port.
  • -
  • Fix/enable Xbox 360 port.
  • -
  • x86/x64: Always mark ref for shift count as non-weak.
  • -
  • x64: Don't fuse implicitly 32-to-64 extended operands.
  • -
  • ARM: Fix armhf call argument handling.
  • -
  • ARM: Fix code generation for integer math.min/math.max.
  • -
  • PPC/e500: Fix lj_vm_floor() for Inf/NaN.
  • -
  • FFI: Change priority of table initializer variants for structs.
  • -
  • FFI: Fix code generation for bool call result check on x86/x64.
  • -
  • FFI: Load FFI library on-demand for bytecode with cdata literals.
  • -
  • FFI: Fix handling of qualified transparent structs/unions.
  • -
- -

LuaJIT 2.0.0 — 2012-11-08

-
    -
  • Correctness and completeness: -
      -
    • Fix Android/x86 build.
    • -
    • Fix recording of equality comparisons with __eq metamethods.
    • -
    • Fix detection of immutable upvalues.
    • -
    • Replace error with PANIC for callbacks from JIT-compiled code.
    • -
    • Fix builtin string to number conversion for INT_MIN.
    • -
    • Don't create unneeded array part for template tables.
    • -
    • Fix CONV.num.int sinking.
    • -
    • Don't propagate implicitly widened number to index metamethods.
    • -
    • ARM: Fix ordered comparisons of number vs. non-number.
    • -
    • FFI: Fix code generation for replay of sunk float fields.
    • -
    • FFI: Fix signedness of bool.
    • -
    • FFI: Fix recording of bool call result check on x86/x64.
    • -
    • FFI: Fix stack-adjustment for __thiscall callbacks.
    • -
  • -
- -

LuaJIT 2.0.0-beta11 — 2012-10-16

-
    -
  • New features: -
      -
    • Use ARM VFP instructions, if available (build-time detection).
    • -
    • Add support for ARM hard-float EABI (armhf).
    • -
    • Add PS3 port.
    • -
    • Add many features from Lua 5.2, e.g. goto/labels. - Refer to this list.
    • -
    • FFI: Add parameterized C types.
    • -
    • FFI: Add support for copy constructors.
    • -
    • FFI: Equality comparisons never raise an error (treat as unequal instead).
    • -
    • FFI: Box all accessed or returned enums.
    • -
    • FFI: Check for __new metamethod when calling a constructor.
    • -
    • FFI: Handle __pairs/__ipairs metamethods for cdata objects.
    • -
    • FFI: Convert io.* file handle to FILE * pointer (but as a void *).
    • -
    • FFI: Detect and support type punning through unions.
    • -
    • FFI: Improve various error messages.
    • -
  • -
  • Build-system reorganization: -
      -
    • Reorganize directory layout:
      - lib/*src/jit/*
      - src/buildvm_*.dascsrc/vm_*.dasc
      - src/buildvm_*.h → removed
      - src/buildvm*src/host/*
    • -
    • Add minified Lua interpreter plus Lua BitOp (minilua) to run DynASM.
    • -
    • Change DynASM bit operations to use Lua BitOp
    • -
    • Translate only vm_*.dasc for detected target architecture.
    • -
    • Improve target detection for msvcbuild.bat.
    • -
    • Fix build issues on Cygwin and MinGW with optional MSys.
    • -
    • Handle cross-compiles with FPU/no-FPU or hard-fp/soft-fp ABI mismatch.
    • -
    • Remove some library functions for no-JIT/no-FFI builds.
    • -
    • Add uninstall target to top-level Makefile.
    • -
  • -
  • Correctness and completeness: -
      -
    • Preserve snapshot #0 PC for all traces.
    • -
    • Fix argument checks for coroutine.create().
    • -
    • Command line prints version and JIT status to stdout, not stderr.
    • -
    • Fix userdata __gc separations at Lua state close.
    • -
    • Fix TDUP to HLOAD forwarding for LJ_DUALNUM builds.
    • -
    • Fix buffer check in bytecode writer.
    • -
    • Make os.date() thread-safe.
    • -
    • Add missing declarations for MSVC intrinsics.
    • -
    • Fix dispatch table modifications for return hooks.
    • -
    • Workaround for MSVC conversion bug (doubleuint32_tint32_t).
    • -
    • Fix FOLD rule (i-j)-i => 0-j.
    • -
    • Never use DWARF unwinder on Windows.
    • -
    • Fix shrinking of direct mapped blocks in builtin allocator.
    • -
    • Limit recursion depth in string.match() et al.
    • -
    • Fix late despecialization of ITERN after loop has been entered.
    • -
    • Fix 'f' and 'L' options for debug.getinfo() and lua_getinfo().
    • -
    • Fix package.searchpath().
    • -
    • OSX: Change dylib names to be consistent with other platforms.
    • -
    • Android: Workaround for broken sprintf("%g", -0.0).
    • -
    • x86: Remove support for ancient CPUs without CMOV (before Pentium Pro).
    • -
    • x86: Fix register allocation for calls returning register pair.
    • -
    • x86/x64: Fix fusion of unsigned byte comparisons with swapped operands.
    • -
    • ARM: Fix tonumber() argument check.
    • -
    • ARM: Fix modulo operator and math.floor()/math.ceil() for inf/nan.
    • -
    • ARM: Invoke SPLIT pass for leftover IR_TOBIT.
    • -
    • ARM: Fix BASE register coalescing.
    • -
    • PPC: Fix interpreter state setup in callbacks.
    • -
    • PPC: Fix string.sub() range check.
    • -
    • MIPS: Support generation of MIPS/MIPSEL bytecode object files.
    • -
    • MIPS: Fix calls to floor()/ceil()/trunc().
    • -
    • ARM/PPC: Detect more target architecture variants.
    • -
    • ARM/PPC/e500/MIPS: Fix tailcalls from fast functions, esp. tostring().
    • -
    • ARM/PPC/MIPS: Fix rematerialization of FP constants.
    • -
    • FFI: Don't call FreeLibrary() on our own EXE/DLL.
    • -
    • FFI: Resolve metamethods for constructors, too.
    • -
    • FFI: Properly disable callbacks on iOS (would require executable memory).
    • -
    • FFI: Fix cdecl string parsing during recording.
    • -
    • FFI: Show address pointed to for tostring(ref), too.
    • -
    • FFI: Fix alignment of C call argument/return structure.
    • -
    • FFI: Initialize all fields of standard types.
    • -
    • FFI: Fix callback handling when new C types are declared in callback.
    • -
    • FFI: Fix recording of constructors for pointers.
    • -
    • FFI: Always resolve metamethods for pointers to structs.
    • -
    • FFI: Correctly propagate alignment when interning nested types.
    • -
  • -
  • Structural and performance enhancements: -
      -
    • Add allocation sinking and store sinking optimization.
    • -
    • Constify immutable upvalues.
    • -
    • Add builtin string to integer or FP number conversion. Improves cross-platform consistency and correctness.
    • -
    • Create string hash slots in template tables for non-const values, too. Avoids later table resizes.
    • -
    • Eliminate HREFK guard for template table references.
    • -
    • Add various new FOLD rules.
    • -
    • Don't use stack unwinding for lua_yield() (slow on x64).
    • -
    • ARM, PPC, MIPS: Improve XLOAD operand fusion and register hinting.
    • -
    • PPC, MIPS: Compile math.sqrt() to sqrt instruction, if available.
    • -
    • FFI: Fold KPTR + constant offset in SPLIT pass.
    • -
    • FFI: Optimize/inline ffi.copy() and ffi.fill().
    • -
    • FFI: Compile and optimize array/struct copies.
    • -
    • FFI: Compile ffi.typeof(cdata|ctype), ffi.sizeof(), ffi.alignof(), ffi.offsetof() and ffi.gc().
    • -
  • -
- -

LuaJIT 2.0.0-beta10 — 2012-05-09

-
    -
  • New features: -
      -
    • The MIPS of LuaJIT is complete. It requires a CPU conforming to the -MIPS32 R1 architecture with hardware FPU. O32 hard-fp ABI, -little-endian or big-endian.
    • -
    • Auto-detect target arch via cross-compiler. No need for -TARGET=arch anymore.
    • -
    • Make DynASM compatible with Lua 5.2.
    • -
    • From Lua 5.2: Try __tostring metamethod on non-string error -messages..
    • -
  • -
  • Correctness and completeness: -
      -
    • Fix parsing of hex literals with exponents.
    • -
    • Fix bytecode dump for certain number constants.
    • -
    • Fix argument type in error message for relative arguments.
    • -
    • Fix argument error handling on Lua stacks without a frame.
    • -
    • Add missing mcode limit check in assembler backend.
    • -
    • Fix compilation on OpenBSD.
    • -
    • Avoid recursive GC steps after GC-triggered trace exit.
    • -
    • Replace <unwind.h> definitions with our own.
    • -
    • Fix OSX build issues. Bump minimum required OSX version to 10.4.
    • -
    • Fix discharge order of comparisons in Lua parser.
    • -
    • Ensure running __gc of userdata created in __gc -at state close.
    • -
    • Limit number of userdata __gc separations at state close.
    • -
    • Fix bytecode JMP slot range when optimizing -and/or with constant LHS.
    • -
    • Fix DSE of USTORE.
    • -
    • Make lua_concat() work from C hook with partial frame.
    • -
    • Add required PHIs for implicit conversions, e.g. via XREF -forwarding.
    • -
    • Add more comparison variants to Valgrind suppressions file.
    • -
    • Disable loading bytecode with an extra header (BOM or #!).
    • -
    • Fix PHI stack slot syncing.
    • -
    • ARM: Reorder type/value tests to silence Valgrind.
    • -
    • ARM: Fix register allocation for ldrd-optimized -HREFK.
    • -
    • ARM: Fix conditional branch fixup for OBAR.
    • -
    • ARM: Invoke SPLIT pass for double args in FFI call.
    • -
    • ARM: Handle all CALL* ops with double results in -SPLIT pass.
    • -
    • ARM: Fix rejoin of POW in SPLIT pass.
    • -
    • ARM: Fix compilation of math.sinh, math.cosh, -math.tanh.
    • -
    • ARM, PPC: Avoid pointless arg clearing in BC_IFUNCF.
    • -
    • PPC: Fix resume after yield from hook.
    • -
    • PPC: Fix argument checking for rawget().
    • -
    • PPC: Fix fusion of floating-point XLOAD/XSTORE.
    • -
    • PPC: Fix HREFK code generation for huge tables.
    • -
    • PPC: Use builtin D-Cache/I-Cache sync code.
    • -
  • -
  • FFI library: -
      -
    • Ignore empty statements in ffi.cdef().
    • -
    • Ignore number parsing errors while skipping definitions.
    • -
    • Don't touch frame in callbacks with tailcalls to fast functions.
    • -
    • Fix library unloading on POSIX systems.
    • -
    • Finalize cdata before userdata when closing the state.
    • -
    • Change ffi.load() library name resolution for Cygwin.
    • -
    • Fix resolving of function name redirects on Windows/x86.
    • -
    • Fix symbol resolving error messages on Windows.
    • -
    • Fix blacklisting of C functions calling callbacks.
    • -
    • Fix result type of pointer difference.
    • -
    • Use correct PC in FFI metamethod error message.
    • -
    • Allow 'typedef _Bool int BOOL;' for the Windows API.
    • -
    • Don't record test for bool result of call, if ignored.
    • -
  • -
- -

LuaJIT 2.0.0-beta9 — 2011-12-14

-
    -
  • New features: -
      -
    • PPC port of LuaJIT is complete. Default is the dual-number port -(usually faster). Single-number port selectable via src/Makefile -at build time.
    • -
    • Add FFI callback support.
    • -
    • Extend -b to generate .c, .h or .obj/.o -files with embedded bytecode.
    • -
    • Allow loading embedded bytecode with require().
    • -
    • From Lua 5.2: Change to '\z' escape. Reject undefined escape -sequences.
    • -
  • -
  • Correctness and completeness: -
      -
    • Fix OSX 10.7 build. Fix install_name and versioning on OSX.
    • -
    • Fix iOS build.
    • -
    • Install dis_arm.lua, too.
    • -
    • Mark installed shared library as executable.
    • -
    • Add debug option to msvcbuild.bat and improve error handling.
    • -
    • Fix data-flow analysis for iterators.
    • -
    • Fix forced unwinding triggered by external unwinder.
    • -
    • Record missing for loop slot loads (return to lower frame).
    • -
    • Always use ANSI variants of Windows system functions.
    • -
    • Fix GC barrier for multi-result table constructor (TSETM).
    • -
    • Fix/add various FOLD rules.
    • -
    • Add potential PHI for number conversions due to type instability.
    • -
    • Do not eliminate PHIs only referenced from other PHIs.
    • -
    • Correctly anchor implicit number to string conversions in Lua/C API.
    • -
    • Fix various stack limit checks.
    • -
    • x64: Use thread-safe exceptions for external unwinding (GCC platforms).
    • -
    • x64: Fix result type of cdata index conversions.
    • -
    • x64: Fix math.random() and bit.bswap() code generation.
    • -
    • x64: Fix lightuserdata comparisons.
    • -
    • x64: Always extend stack-passed arguments to pointer size.
    • -
    • ARM: Many fixes to code generation backend.
    • -
    • PPC/e500: Fix dispatch for binop metamethods.
    • -
    • PPC/e500: Save/restore condition registers when entering/leaving the VM.
    • -
    • PPC/e500: Fix write barrier in stores of strings to upvalues.
    • -
  • -
  • FFI library: -
      -
    • Fix C comment parsing.
    • -
    • Fix snapshot optimization for cdata comparisons.
    • -
    • Fix recording of const/enum lookups in namespaces.
    • -
    • Fix call argument and return handling for I8/U8/I16/U16 types.
    • -
    • Fix unfused loads of float fields.
    • -
    • Fix ffi.string() recording.
    • -
    • Save GetLastError() around ffi.load() and symbol -resolving, too.
    • -
    • Improve ld script detection in ffi.load().
    • -
    • Record loads/stores to external variables in namespaces.
    • -
    • Compile calls to stdcall, fastcall and vararg functions.
    • -
    • Treat function ctypes like pointers in comparisons.
    • -
    • Resolve __call metamethod for pointers, too.
    • -
    • Record C function calls with bool return values.
    • -
    • Record ffi.errno().
    • -
    • x86: Fix number to uint32_t conversion rounding.
    • -
    • x86: Fix 64 bit arithmetic in assembler backend.
    • -
    • x64: Fix struct-by-value calling conventions.
    • -
    • ARM: Ensure invocation of SPLIT pass for float conversions.
    • -
  • -
  • Structural and performance enhancements: -
      -
    • Display trace types with -jv and -jdump.
    • -
    • Record isolated calls. But prefer recording loops over calls.
    • -
    • Specialize to prototype for non-monomorphic functions. Solves the -trace-explosion problem for closure-heavy programming styles.
    • -
    • Always generate a portable vmdef.lua. Easier for distros.
    • -
  • -
- -

LuaJIT 2.0.0-beta8 — 2011-06-23

-
    -
  • New features: -
      -
    • Soft-float ARM port of LuaJIT is complete.
    • -
    • Add support for bytecode loading/saving and -b command line -option.
    • -
    • From Lua 5.2: __len metamethod for tables -(disabled by default).
    • -
  • -
  • Correctness and completeness: -
      -
    • ARM: Misc. fixes for interpreter.
    • -
    • x86/x64: Fix bit.* argument checking in interpreter.
    • -
    • Catch early out-of-memory in memory allocator initialization.
    • -
    • Fix data-flow analysis for paths leading to an upvalue close.
    • -
    • Fix check for missing arguments in string.format().
    • -
    • Fix Solaris/x86 build (note: not a supported target).
    • -
    • Fix recording of loops with instable directions in side traces.
    • -
    • x86/x64: Fix fusion of comparisons with u8/u16 -XLOAD.
    • -
    • x86/x64: Fix register allocation for variable shifts.
    • -
  • -
  • FFI library: -
      -
    • Add ffi.errno(). Save errno/GetLastError() -around allocations etc.
    • -
    • Fix __gc for VLA/VLS cdata objects.
    • -
    • Fix recording of casts from 32 bit cdata pointers to integers.
    • -
    • tonumber(cdata) returns nil for non-numbers.
    • -
    • Show address pointed to for tostring(pointer).
    • -
    • Print NULL pointers as "cdata<... *>: NULL".
    • -
    • Support __tostring metamethod for pointers to structs, too.
    • -
  • -
  • Structural and performance enhancements: -
      -
    • More tuning for loop unrolling heuristics.
    • -
    • Flatten and compress in-memory debug info (saves ~70%).
    • -
  • -
- -

LuaJIT 2.0.0-beta7 — 2011-05-05

-
    -
  • New features: -
      -
    • ARM port of the LuaJIT interpreter is complete.
    • -
    • FFI library: Add ffi.gc(), ffi.metatype(), -ffi.istype().
    • -
    • FFI library: Resolve ld script redirection in ffi.load().
    • -
    • From Lua 5.2: package.searchpath(), fp:read("*L"), -load(string).
    • -
    • From Lua 5.2, disabled by default: empty statement, -table.unpack(), modified coroutine.running().
    • -
  • -
  • Correctness and completeness: -
      -
    • FFI library: numerous fixes.
    • -
    • Fix type mismatches in store-to-load forwarding.
    • -
    • Fix error handling within metamethods.
    • -
    • Fix table.maxn().
    • -
    • Improve accuracy of x^-k on x64.
    • -
    • Fix code generation for Intel Atom in x64 mode.
    • -
    • Fix narrowing of POW.
    • -
    • Fix recording of retried fast functions.
    • -
    • Fix code generation for bit.bnot() and multiplies.
    • -
    • Fix error location within cpcall frames.
    • -
    • Add workaround for old libgcc unwind bug.
    • -
    • Fix lua_yield() and getmetatable(lightuserdata) on x64.
    • -
    • Misc. fixes for PPC/e500 interpreter.
    • -
    • Fix stack slot updates for down-recursion.
    • -
  • -
  • Structural and performance enhancements: -
      -
    • Add dual-number mode (int/double) for the VM. Enabled for ARM.
    • -
    • Improve narrowing of arithmetic operators and for loops.
    • -
    • Tune loop unrolling heuristics and increase trace recorder limits.
    • -
    • Eliminate dead slots in snapshots using bytecode data-flow analysis.
    • -
    • Avoid phantom stores to proxy tables.
    • -
    • Optimize lookups in empty proxy tables.
    • -
    • Improve bytecode optimization of and/or operators.
    • -
  • -
- -

LuaJIT 2.0.0-beta6 — 2011-02-11

-
    -
  • New features: -
      -
    • PowerPC/e500v2 port of the LuaJIT interpreter is complete.
    • -
    • Various minor features from Lua 5.2: Hex escapes in literals, -'\*' escape, reversible string.format("%q",s), -"%g" pattern, table.sort checks callbacks, -os.exit(status|true|false[,close]).
    • -
    • Lua 5.2 __pairs and __ipairs metamethods -(disabled by default).
    • -
    • Initial release of the FFI library.
    • -
  • -
  • Correctness and completeness: -
      -
    • Fix string.format() for non-finite numbers.
    • -
    • Fix memory leak when compiled to use the built-in allocator.
    • -
    • x86/x64: Fix unnecessary resize in TSETM bytecode.
    • -
    • Fix various GC issues with traces and jit.flush().
    • -
    • x64: Fix fusion of indexes for array references.
    • -
    • x86/x64: Fix stack overflow handling for coroutine results.
    • -
    • Enable low-2GB memory allocation on FreeBSD/x64.
    • -
    • Fix collectgarbage("count") result if more than 2GB is in use.
    • -
    • Fix parsing of hex floats.
    • -
    • x86/x64: Fix loop branch inversion with trailing -HREF+NE/EQ.
    • -
    • Add jit.os string.
    • -
    • coroutine.create() permits running C functions, too.
    • -
    • Fix OSX build to work with newer ld64 versions.
    • -
    • Fix bytecode optimization of and/or operators.
    • -
  • -
  • Structural and performance enhancements: -
      -
    • Emit specialized bytecode for pairs()/next().
    • -
    • Improve bytecode coalescing of nil constants.
    • -
    • Compile calls to vararg functions.
    • -
    • Compile select().
    • -
    • Improve alias analysis, esp. for loads from allocations.
    • -
    • Tuning of various compiler heuristics.
    • -
    • Refactor and extend IR conversion instructions.
    • -
    • x86/x64: Various backend enhancements related to the FFI.
    • -
    • Add SPLIT pass to split 64 bit IR instructions for 32 bit CPUs.
    • -
  • -
- -

LuaJIT 2.0.0-beta5 — 2010-08-24

-
    -
  • Correctness and completeness: -
      -
    • Fix trace exit dispatch to function headers.
    • -
    • Fix Windows and OSX builds with LUAJIT_DISABLE_JIT.
    • -
    • Reorganize and fix placement of generated machine code on x64.
    • -
    • Fix TNEW in x64 interpreter.
    • -
    • Do not eliminate PHIs for values only referenced from side exits.
    • -
    • OS-independent canonicalization of strings for non-finite numbers.
    • -
    • Fix string.char() range check on x64.
    • -
    • Fix tostring() resolving within print().
    • -
    • Fix error handling for next().
    • -
    • Fix passing of constant arguments to external calls on x64.
    • -
    • Fix interpreter argument check for two-argument SSE math functions.
    • -
    • Fix C frame chain corruption caused by lua_cpcall().
    • -
    • Fix return from pcall() within active hook.
    • -
  • -
  • Structural and performance enhancements: -
      -
    • Replace on-trace GC frame syncing with interpreter exit.
    • -
    • Improve hash lookup specialization by not removing dead keys during GC.
    • -
    • Turn traces into true GC objects.
    • -
    • Avoid starting a GC cycle immediately after library init.
    • -
    • Add weak guards to improve dead-code elimination.
    • -
    • Speed up string interning.
    • -
  • -
- -

LuaJIT 2.0.0-beta4 — 2010-03-28

-
    -
  • Correctness and completeness: -
      -
    • Fix precondition for on-trace creation of table keys.
    • -
    • Fix {f()} on x64 when table is resized.
    • -
    • Fix folding of ordered comparisons with same references.
    • -
    • Fix snapshot restores for multi-result bytecodes.
    • -
    • Fix potential hang when recording bytecode with nested closures.
    • -
    • Fix recording of getmetatable(), tonumber() and bad argument types.
    • -
    • Fix SLOAD fusion across returns to lower frames.
    • -
  • -
  • Structural and performance enhancements: -
      -
    • Add array bounds check elimination. -Oabc is enabled by default.
    • -
    • More tuning for x64, e.g. smaller table objects.
    • -
  • -
- -

LuaJIT 2.0.0-beta3 — 2010-03-07

-
    -
  • LuaJIT x64 port: -
      -
    • Port integrated memory allocator to Linux/x64, Windows/x64 and OSX/x64.
    • -
    • Port interpreter and JIT compiler to x64.
    • -
    • Port DynASM to x64.
    • -
    • Many 32/64 bit cleanups in the VM.
    • -
    • Allow building the interpreter with either x87 or SSE2 arithmetics.
    • -
    • Add external unwinding and C++ exception interop (default on x64).
    • -
  • -
  • Correctness and completeness: -
      -
    • Fix constructor bytecode generation for certain conditional values.
    • -
    • Fix some cases of ordered string comparisons.
    • -
    • Fix lua_tocfunction().
    • -
    • Fix cutoff register in JMP bytecode for some conditional expressions.
    • -
    • Fix PHI marking algorithm for references from variant slots.
    • -
    • Fix package.cpath for non-default PREFIX.
    • -
    • Fix DWARF2 frame unwind information for interpreter on OSX.
    • -
    • Drive the GC forward on string allocations in the parser.
    • -
    • Implement call/return hooks (zero-cost if disabled).
    • -
    • Implement yield from C hooks.
    • -
    • Disable JIT compiler on older non-SSE2 CPUs instead of aborting.
    • -
  • -
  • Structural and performance enhancements: -
      -
    • Compile recursive code (tail-, up- and down-recursion).
    • -
    • Improve heuristics for bytecode penalties and blacklisting.
    • -
    • Split CALL/FUNC recording and clean up fast function call semantics.
    • -
    • Major redesign of internal function call handling.
    • -
    • Improve FOR loop const specialization and integerness checks.
    • -
    • Switch to pre-initialized stacks. Avoid frame-clearing.
    • -
    • Colocation of prototypes and related data: bytecode, constants, debug info.
    • -
    • Cleanup parser and streamline bytecode generation.
    • -
    • Add support for weak IR references to register allocator.
    • -
    • Switch to compressed, extensible snapshots.
    • -
    • Compile returns to frames below the start frame.
    • -
    • Improve alias analysis of upvalues using a disambiguation hash value.
    • -
    • Compile floor/ceil/trunc to SSE2 helper calls or SSE4.1 instructions.
    • -
    • Add generic C call handling to IR and backend.
    • -
    • Improve KNUM fuse vs. load heuristics.
    • -
    • Compile various io.*() functions.
    • -
    • Compile math.sinh(), math.cosh(), math.tanh() -and math.random().
    • -
  • -
- -

LuaJIT 2.0.0-beta2 — 2009-11-09

-
    -
  • Reorganize build system. Build static+shared library on POSIX.
  • -
  • Allow C++ exception conversion on all platforms -using a wrapper function.
  • -
  • Automatically catch C++ exceptions and rethrow Lua error -(DWARF2 only).
  • -
  • Check for the correct x87 FPU precision at strategic points.
  • -
  • Always use wrappers for libm functions.
  • -
  • Resurrect metamethod name strings before copying them.
  • -
  • Mark current trace, even if compiler is idle.
  • -
  • Ensure FILE metatable is created only once.
  • -
  • Fix type comparisons when different integer types are involved.
  • -
  • Fix getmetatable() recording.
  • -
  • Fix TDUP with dead keys in template table.
  • -
  • jit.flush(tr) returns status. -Prevent manual flush of a trace that's still linked.
  • -
  • Improve register allocation heuristics for invariant references.
  • -
  • Compile the push/pop variants of table.insert() and -table.remove().
  • -
  • Compatibility with MSVC link /debug.
  • -
  • Fix lua_iscfunction().
  • -
  • Fix math.random() when compiled with -fpic (OSX).
  • -
  • Fix table.maxn().
  • -
  • Bump MACOSX_DEPLOYMENT_TARGET to 10.4
  • -
  • luaL_check*() and luaL_opt*() now support -negative arguments, too.
    -This matches the behavior of Lua 5.1, but not the specification.
  • -
- -

LuaJIT 2.0.0-beta1 — 2009-10-31

-
    -
  • This is the first public release of LuaJIT 2.0.
  • -
  • The whole VM has been rewritten from the ground up, so there's -no point in listing differences over earlier versions.
  • -
-
-
-
- - - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/contact.html b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/contact.html deleted file mode 100644 index d92c3e38fb3..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/contact.html +++ /dev/null @@ -1,104 +0,0 @@ - - - -Contact - - - - - - - - -
-Lua -
- - -
-

-Please send general questions to the -» LuaJIT mailing list. -You can also send any questions you have directly to me: -

- - - - - -

Copyright

-

-All documentation is -Copyright © 2005-2015 Mike Pall. -

- - -
-
- - - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_c_api.html b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_c_api.html deleted file mode 100644 index 91dd9efbd84..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_c_api.html +++ /dev/null @@ -1,189 +0,0 @@ - - - -Lua/C API Extensions - - - - - - - - -
-Lua -
- - -
-

-LuaJIT adds some extensions to the standard Lua/C API. The LuaJIT include -directory must be in the compiler search path (-Ipath) -to be able to include the required header for C code: -

-
-#include "luajit.h"
-
-

-Or for C++ code: -

-
-#include "lua.hpp"
-
- -

luaJIT_setmode(L, idx, mode) -— Control VM

-

-This is a C API extension to allow control of the VM from C code. The -full prototype of LuaJIT_setmode is: -

-
-LUA_API int luaJIT_setmode(lua_State *L, int idx, int mode);
-
-

-The returned status is either success (1) or failure (0). -The second argument is either 0 or a stack index (similar to the -other Lua/C API functions). -

-

-The third argument specifies the mode, which is 'or'ed with a flag. -The flag can be LUAJIT_MODE_OFF to turn a feature on, -LUAJIT_MODE_ON to turn a feature off, or -LUAJIT_MODE_FLUSH to flush cached code. -

-

-The following modes are defined: -

- -

luaJIT_setmode(L, 0, LUAJIT_MODE_ENGINE|flag)

-

-Turn the whole JIT compiler on or off or flush the whole cache of compiled code. -

- -

luaJIT_setmode(L, idx, LUAJIT_MODE_FUNC|flag)
-luaJIT_setmode(L, idx, LUAJIT_MODE_ALLFUNC|flag)
-luaJIT_setmode(L, idx, LUAJIT_MODE_ALLSUBFUNC|flag)

-

-This sets the mode for the function at the stack index idx or -the parent of the calling function (idx = 0). It either -enables JIT compilation for a function, disables it and flushes any -already compiled code or only flushes already compiled code. This -applies recursively to all sub-functions of the function with -LUAJIT_MODE_ALLFUNC or only to the sub-functions with -LUAJIT_MODE_ALLSUBFUNC. -

- -

luaJIT_setmode(L, trace,
-  LUAJIT_MODE_TRACE|LUAJIT_MODE_FLUSH)

-

-Flushes the specified root trace and all of its side traces from the cache. -The code for the trace will be retained as long as there are any other -traces which link to it. -

- -

luaJIT_setmode(L, idx, LUAJIT_MODE_WRAPCFUNC|flag)

-

-This mode defines a wrapper function for calls to C functions. If -called with LUAJIT_MODE_ON, the stack index at idx -must be a lightuserdata object holding a pointer to the wrapper -function. From now on all C functions are called through the wrapper -function. If called with LUAJIT_MODE_OFF this mode is turned -off and all C functions are directly called. -

-

-The wrapper function can be used for debugging purposes or to catch -and convert foreign exceptions. But please read the section on -C++ exception interoperability -first. Recommended usage can be seen in this C++ code excerpt: -

-
-#include <exception>
-#include "lua.hpp"
-
-// Catch C++ exceptions and convert them to Lua error messages.
-// Customize as needed for your own exception classes.
-static int wrap_exceptions(lua_State *L, lua_CFunction f)
-{
-  try {
-    return f(L);  // Call wrapped function and return result.
-  } catch (const char *s) {  // Catch and convert exceptions.
-    lua_pushstring(L, s);
-  } catch (std::exception& e) {
-    lua_pushstring(L, e.what());
-  } catch (...) {
-    lua_pushliteral(L, "caught (...)");
-  }
-  return lua_error(L);  // Rethrow as a Lua error.
-}
-
-static int myinit(lua_State *L)
-{
-  ...
-  // Define wrapper function and enable it.
-  lua_pushlightuserdata(L, (void *)wrap_exceptions);
-  luaJIT_setmode(L, -1, LUAJIT_MODE_WRAPCFUNC|LUAJIT_MODE_ON);
-  lua_pop(L, 1);
-  ...
-}
-
-

-Note that you can only define a single global wrapper function, -so be careful when using this mechanism from multiple C++ modules. -Also note that this mechanism is not without overhead. -

-
-
- - - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_ffi.html b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_ffi.html deleted file mode 100644 index 1ff22363484..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_ffi.html +++ /dev/null @@ -1,332 +0,0 @@ - - - -FFI Library - - - - - - - - -
-Lua -
- - -
-

- -The FFI library allows calling external C functions and -using C data structures from pure Lua code. - -

-

- -The FFI library largely obviates the need to write tedious manual -Lua/C bindings in C. No need to learn a separate binding language -— it parses plain C declarations! These can be -cut-n-pasted from C header files or reference manuals. It's up to -the task of binding large libraries without the need for dealing with -fragile binding generators. - -

-

-The FFI library is tightly integrated into LuaJIT (it's not available -as a separate module). The code generated by the JIT-compiler for -accesses to C data structures from Lua code is on par with the -code a C compiler would generate. Calls to C functions can -be inlined in JIT-compiled code, unlike calls to functions bound via -the classic Lua/C API. -

-

-This page gives a short introduction to the usage of the FFI library. -Please use the FFI sub-topics in the navigation bar to learn more. -

- -

Motivating Example: Calling External C Functions

-

-It's really easy to call an external C library function: -

-
-①
-②
-
-
-③local ffi = require("ffi")
-ffi.cdef[[
-int printf(const char *fmt, ...);
-]]
-ffi.C.printf("Hello %s!", "world")
-
-

-So, let's pick that apart: -

-

- Load the FFI library. -

-

- Add a C declaration -for the function. The part inside the double-brackets (in green) is -just standard C syntax. -

-

- Call the named -C function — Yes, it's that simple! -

-

-Actually, what goes on behind the scenes is far from simple: makes use of the standard -C library namespace ffi.C. Indexing this namespace with -a symbol name ("printf") automatically binds it to the -standard C library. The result is a special kind of object which, -when called, runs the printf function. The arguments passed -to this function are automatically converted from Lua objects to the -corresponding C types. -

-

-Ok, so maybe the use of printf() wasn't such a spectacular -example. You could have done that with io.write() and -string.format(), too. But you get the idea ... -

-

-So here's something to pop up a message box on Windows: -

-
-local ffi = require("ffi")
-ffi.cdef[[
-int MessageBoxA(void *w, const char *txt, const char *cap, int type);
-]]
-ffi.C.MessageBoxA(nil, "Hello world!", "Test", 0)
-
-

-Bing! Again, that was far too easy, no? -

-

-Compare this with the effort required to bind that function using the -classic Lua/C API: create an extra C file, add a C function -that retrieves and checks the argument types passed from Lua and calls -the actual C function, add a list of module functions and their -names, add a luaopen_* function and register all module -functions, compile and link it into a shared library (DLL), move it to -the proper path, add Lua code that loads the module aaaand ... finally -call the binding function. Phew! -

- -

Motivating Example: Using C Data Structures

-

-The FFI library allows you to create and access C data -structures. Of course the main use for this is for interfacing with -C functions. But they can be used stand-alone, too. -

-

-Lua is built upon high-level data types. They are flexible, extensible -and dynamic. That's why we all love Lua so much. Alas, this can be -inefficient for certain tasks, where you'd really want a low-level -data type. E.g. a large array of a fixed structure needs to be -implemented with a big table holding lots of tiny tables. This imposes -both a substantial memory overhead as well as a performance overhead. -

-

-Here's a sketch of a library that operates on color images plus a -simple benchmark. First, the plain Lua version: -

-
-local floor = math.floor
-
-local function image_ramp_green(n)
-  local img = {}
-  local f = 255/(n-1)
-  for i=1,n do
-    img[i] = { red = 0, green = floor((i-1)*f), blue = 0, alpha = 255 }
-  end
-  return img
-end
-
-local function image_to_grey(img, n)
-  for i=1,n do
-    local y = floor(0.3*img[i].red + 0.59*img[i].green + 0.11*img[i].blue)
-    img[i].red = y; img[i].green = y; img[i].blue = y
-  end
-end
-
-local N = 400*400
-local img = image_ramp_green(N)
-for i=1,1000 do
-  image_to_grey(img, N)
-end
-
-

-This creates a table with 160.000 pixels, each of which is a table -holding four number values in the range of 0-255. First an image with -a green ramp is created (1D for simplicity), then the image is -converted to greyscale 1000 times. Yes, that's silly, but I was in -need of a simple example ... -

-

-And here's the FFI version. The modified parts have been marked in -bold: -

-
-①
-
-
-
-
-
-②
-
-③
-④
-
-
-
-
-
-
-③
-⑤local ffi = require("ffi")
-ffi.cdef[[
-typedef struct { uint8_t red, green, blue, alpha; } rgba_pixel;
-]]
-
-local function image_ramp_green(n)
-  local img = ffi.new("rgba_pixel[?]", n)
-  local f = 255/(n-1)
-  for i=0,n-1 do
-    img[i].green = i*f
-    img[i].alpha = 255
-  end
-  return img
-end
-
-local function image_to_grey(img, n)
-  for i=0,n-1 do
-    local y = 0.3*img[i].red + 0.59*img[i].green + 0.11*img[i].blue
-    img[i].red = y; img[i].green = y; img[i].blue = y
-  end
-end
-
-local N = 400*400
-local img = image_ramp_green(N)
-for i=1,1000 do
-  image_to_grey(img, N)
-end
-
-

-Ok, so that wasn't too difficult: -

-

- First, load the FFI -library and declare the low-level data type. Here we choose a -struct which holds four byte fields, one for each component -of a 4x8 bit RGBA pixel. -

-

- Creating the data -structure with ffi.new() is straightforward — the -'?' is a placeholder for the number of elements of a -variable-length array. -

-

- C arrays are -zero-based, so the indexes have to run from 0 to -n-1. One might want to allocate one more element instead to -simplify converting legacy code. -

-

- Since ffi.new() -zero-fills the array by default, we only need to set the green and the -alpha fields. -

-

- The calls to -math.floor() can be omitted here, because floating-point -numbers are already truncated towards zero when converting them to an -integer. This happens implicitly when the number is stored in the -fields of each pixel. -

-

-Now let's have a look at the impact of the changes: first, memory -consumption for the image is down from 22 Megabytes to -640 Kilobytes (400*400*4 bytes). That's a factor of 35x less! So, -yes, tables do have a noticeable overhead. BTW: The original program -would consume 40 Megabytes in plain Lua (on x64). -

-

-Next, performance: the pure Lua version runs in 9.57 seconds (52.9 -seconds with the Lua interpreter) and the FFI version runs in 0.48 -seconds on my machine (YMMV). That's a factor of 20x faster (110x -faster than the Lua interpreter). -

-

-The avid reader may notice that converting the pure Lua version over -to use array indexes for the colors ([1] instead of -.red, [2] instead of .green etc.) ought to -be more compact and faster. This is certainly true (by a factor of -~1.7x). Switching to a struct-of-arrays would help, too. -

-

-However the resulting code would be less idiomatic and rather -error-prone. And it still doesn't get even close to the performance of -the FFI version of the code. Also, high-level data structures cannot -be easily passed to other C functions, especially I/O functions, -without undue conversion penalties. -

-
-
- - - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_ffi_api.html b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_ffi_api.html deleted file mode 100644 index b095c05ff3d..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_ffi_api.html +++ /dev/null @@ -1,570 +0,0 @@ - - - -ffi.* API Functions - - - - - - - - - -
-Lua -
- - -
-

-This page describes the API functions provided by the FFI library in -detail. It's recommended to read through the -introduction and the -FFI tutorial first. -

- -

Glossary

-
    -
  • cdecl — An abstract C type declaration (a Lua -string).
  • -
  • ctype — A C type object. This is a special kind of -cdata returned by ffi.typeof(). It serves as a -cdata constructor when called.
  • -
  • cdata — A C data object. It holds a value of the -corresponding ctype.
  • -
  • ct — A C type specification which can be used for -most of the API functions. Either a cdecl, a ctype or a -cdata serving as a template type.
  • -
  • cb — A callback object. This is a C data object -holding a special function pointer. Calling this function from -C code runs an associated Lua function.
  • -
  • VLA — A variable-length array is declared with a -? instead of the number of elements, e.g. "int[?]". -The number of elements (nelem) must be given when it's -created.
  • -
  • VLS — A variable-length struct is a struct C -type where the last element is a VLA. The same rules for -declaration and creation apply.
  • -
- -

Declaring and Accessing External Symbols

-

-External symbols must be declared first and can then be accessed by -indexing a C library -namespace, which automatically binds the symbol to a specific -library. -

- -

ffi.cdef(def)

-

-Adds multiple C declarations for types or external symbols (named -variables or functions). def must be a Lua string. It's -recommended to use the syntactic sugar for string arguments as -follows: -

-
-ffi.cdef[[
-typedef struct foo { int a, b; } foo_t;  // Declare a struct and typedef.
-int dofoo(foo_t *f, int n);  /* Declare an external C function. */
-]]
-
-

-The contents of the string (the part in green above) must be a -sequence of -C declarations, -separated by semicolons. The trailing semicolon for a single -declaration may be omitted. -

-

-Please note that external symbols are only declared, but they -are not bound to any specific address, yet. Binding is -achieved with C library namespaces (see below). -

-

-C declarations are not passed through a C pre-processor, -yet. No pre-processor tokens are allowed, except for -#pragma pack. Replace #define in existing -C header files with enum, static const -or typedef and/or pass the files through an external -C pre-processor (once). Be careful not to include unneeded or -redundant declarations from unrelated header files. -

- -

ffi.C

-

-This is the default C library namespace — note the -uppercase 'C'. It binds to the default set of symbols or -libraries on the target system. These are more or less the same as a -C compiler would offer by default, without specifying extra link -libraries. -

-

-On POSIX systems, this binds to symbols in the default or global -namespace. This includes all exported symbols from the executable and -any libraries loaded into the global namespace. This includes at least -libc, libm, libdl (on Linux), -libgcc (if compiled with GCC), as well as any exported -symbols from the Lua/C API provided by LuaJIT itself. -

-

-On Windows systems, this binds to symbols exported from the -*.exe, the lua51.dll (i.e. the Lua/C API -provided by LuaJIT itself), the C runtime library LuaJIT was linked -with (msvcrt*.dll), kernel32.dll, -user32.dll and gdi32.dll. -

- -

clib = ffi.load(name [,global])

-

-This loads the dynamic library given by name and returns -a new C library namespace which binds to its symbols. On POSIX -systems, if global is true, the library symbols are -loaded into the global namespace, too. -

-

-If name is a path, the library is loaded from this path. -Otherwise name is canonicalized in a system-dependent way and -searched in the default search path for dynamic libraries: -

-

-On POSIX systems, if the name contains no dot, the extension -.so is appended. Also, the lib prefix is prepended -if necessary. So ffi.load("z") looks for "libz.so" -in the default shared library search path. -

-

-On Windows systems, if the name contains no dot, the extension -.dll is appended. So ffi.load("ws2_32") looks for -"ws2_32.dll" in the default DLL search path. -

- -

Creating cdata Objects

-

-The following API functions create cdata objects (type() -returns "cdata"). All created cdata objects are -garbage collected. -

- -

cdata = ffi.new(ct [,nelem] [,init...])
-cdata = ctype([nelem,] [init...])

-

-Creates a cdata object for the given ct. VLA/VLS types -require the nelem argument. The second syntax uses a ctype as -a constructor and is otherwise fully equivalent. -

-

-The cdata object is initialized according to the -rules for initializers, -using the optional init arguments. Excess initializers cause -an error. -

-

-Performance notice: if you want to create many objects of one kind, -parse the cdecl only once and get its ctype with -ffi.typeof(). Then use the ctype as a constructor repeatedly. -

-

-Please note that an anonymous struct declaration implicitly -creates a new and distinguished ctype every time you use it for -ffi.new(). This is probably not what you want, -especially if you create more than one cdata object. Different anonymous -structs are not considered assignment-compatible by the -C standard, even though they may have the same fields! Also, they -are considered different types by the JIT-compiler, which may cause an -excessive number of traces. It's strongly suggested to either declare -a named struct or typedef with ffi.cdef() -or to create a single ctype object for an anonymous struct -with ffi.typeof(). -

- -

ctype = ffi.typeof(ct)

-

-Creates a ctype object for the given ct. -

-

-This function is especially useful to parse a cdecl only once and then -use the resulting ctype object as a constructor. -

- -

cdata = ffi.cast(ct, init)

-

-Creates a scalar cdata object for the given ct. The cdata -object is initialized with init using the "cast" variant of -the C type conversion -rules. -

-

-This functions is mainly useful to override the pointer compatibility -checks or to convert pointers to addresses or vice versa. -

- -

ctype = ffi.metatype(ct, metatable)

-

-Creates a ctype object for the given ct and associates it with -a metatable. Only struct/union types, complex numbers -and vectors are allowed. Other types may be wrapped in a -struct, if needed. -

-

-The association with a metatable is permanent and cannot be changed -afterwards. Neither the contents of the metatable nor the -contents of an __index table (if any) may be modified -afterwards. The associated metatable automatically applies to all uses -of this type, no matter how the objects are created or where they -originate from. Note that pre-defined operations on types have -precedence (e.g. declared field names cannot be overriden). -

-

-All standard Lua metamethods are implemented. These are called directly, -without shortcuts and on any mix of types. For binary operations, the -left operand is checked first for a valid ctype metamethod. The -__gc metamethod only applies to struct/union -types and performs an implicit ffi.gc() -call during creation of an instance. -

- -

cdata = ffi.gc(cdata, finalizer)

-

-Associates a finalizer with a pointer or aggregate cdata object. The -cdata object is returned unchanged. -

-

-This function allows safe integration of unmanaged resources into the -automatic memory management of the LuaJIT garbage collector. Typical -usage: -

-
-local p = ffi.gc(ffi.C.malloc(n), ffi.C.free)
-...
-p = nil -- Last reference to p is gone.
--- GC will eventually run finalizer: ffi.C.free(p)
-
-

-A cdata finalizer works like the __gc metamethod for userdata -objects: when the last reference to a cdata object is gone, the -associated finalizer is called with the cdata object as an argument. The -finalizer can be a Lua function or a cdata function or cdata function -pointer. An existing finalizer can be removed by setting a nil -finalizer, e.g. right before explicitly deleting a resource: -

-
-ffi.C.free(ffi.gc(p, nil)) -- Manually free the memory.
-
- -

C Type Information

-

-The following API functions return information about C types. -They are most useful for inspecting cdata objects. -

- -

size = ffi.sizeof(ct [,nelem])

-

-Returns the size of ct in bytes. Returns nil if -the size is not known (e.g. for "void" or function types). -Requires nelem for VLA/VLS types, except for cdata objects. -

- -

align = ffi.alignof(ct)

-

-Returns the minimum required alignment for ct in bytes. -

- -

ofs [,bpos,bsize] = ffi.offsetof(ct, field)

-

-Returns the offset (in bytes) of field relative to the start -of ct, which must be a struct. Additionally returns -the position and the field size (in bits) for bit fields. -

- -

status = ffi.istype(ct, obj)

-

-Returns true if obj has the C type given by -ct. Returns false otherwise. -

-

-C type qualifiers (const etc.) are ignored. Pointers are -checked with the standard pointer compatibility rules, but without any -special treatment for void *. If ct specifies a -struct/union, then a pointer to this type is accepted, -too. Otherwise the types must match exactly. -

-

-Note: this function accepts all kinds of Lua objects for the -obj argument, but always returns false for non-cdata -objects. -

- -

Utility Functions

- -

err = ffi.errno([newerr])

-

-Returns the error number set by the last C function call which -indicated an error condition. If the optional newerr argument -is present, the error number is set to the new value and the previous -value is returned. -

-

-This function offers a portable and OS-independent way to get and set the -error number. Note that only some C functions set the error -number. And it's only significant if the function actually indicated an -error condition (e.g. with a return value of -1 or -NULL). Otherwise, it may or may not contain any previously set -value. -

-

-You're advised to call this function only when needed and as close as -possible after the return of the related C function. The -errno value is preserved across hooks, memory allocations, -invocations of the JIT compiler and other internal VM activity. The same -applies to the value returned by GetLastError() on Windows, but -you need to declare and call it yourself. -

- -

str = ffi.string(ptr [,len])

-

-Creates an interned Lua string from the data pointed to by -ptr. -

-

-If the optional argument len is missing, ptr is -converted to a "char *" and the data is assumed to be -zero-terminated. The length of the string is computed with -strlen(). -

-

-Otherwise ptr is converted to a "void *" and -len gives the length of the data. The data may contain -embedded zeros and need not be byte-oriented (though this may cause -endianess issues). -

-

-This function is mainly useful to convert (temporary) -"const char *" pointers returned by -C functions to Lua strings and store them or pass them to other -functions expecting a Lua string. The Lua string is an (interned) copy -of the data and bears no relation to the original data area anymore. -Lua strings are 8 bit clean and may be used to hold arbitrary, -non-character data. -

-

-Performance notice: it's faster to pass the length of the string, if -it's known. E.g. when the length is returned by a C call like -sprintf(). -

- -

ffi.copy(dst, src, len)
-ffi.copy(dst, str)

-

-Copies the data pointed to by src to dst. -dst is converted to a "void *" and src -is converted to a "const void *". -

-

-In the first syntax, len gives the number of bytes to copy. -Caveat: if src is a Lua string, then len must not -exceed #src+1. -

-

-In the second syntax, the source of the copy must be a Lua string. All -bytes of the string plus a zero-terminator are copied to -dst (i.e. #src+1 bytes). -

-

-Performance notice: ffi.copy() may be used as a faster -(inlinable) replacement for the C library functions -memcpy(), strcpy() and strncpy(). -

- -

ffi.fill(dst, len [,c])

-

-Fills the data pointed to by dst with len constant -bytes, given by c. If c is omitted, the data is -zero-filled. -

-

-Performance notice: ffi.fill() may be used as a faster -(inlinable) replacement for the C library function -memset(dst, c, len). Please note the different -order of arguments! -

- -

Target-specific Information

- -

status = ffi.abi(param)

-

-Returns true if param (a Lua string) applies for the -target ABI (Application Binary Interface). Returns false -otherwise. The following parameters are currently defined: -

- - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterDescription
32bit32 bit architecture
64bit64 bit architecture
leLittle-endian architecture
beBig-endian architecture
fpuTarget has a hardware FPU
softfpsoftfp calling conventions
hardfphardfp calling conventions
eabiEABI variant of the standard ABI
winWindows variant of the standard ABI
gc6464 bit GC references
- -

ffi.os

-

-Contains the target OS name. Same contents as -jit.os. -

- -

ffi.arch

-

-Contains the target architecture name. Same contents as -jit.arch. -

- -

Methods for Callbacks

-

-The C types for callbacks -have some extra methods: -

- -

cb:free()

-

-Free the resources associated with a callback. The associated Lua -function is unanchored and may be garbage collected. The callback -function pointer is no longer valid and must not be called anymore -(it may be reused by a subsequently created callback). -

- -

cb:set(func)

-

-Associate a new Lua function with a callback. The C type of the -callback and the callback function pointer are unchanged. -

-

-This method is useful to dynamically switch the receiver of callbacks -without creating a new callback each time and registering it again (e.g. -with a GUI library). -

- -

Extended Standard Library Functions

-

-The following standard library functions have been extended to work -with cdata objects: -

- -

n = tonumber(cdata)

-

-Converts a number cdata object to a double and returns it as -a Lua number. This is particularly useful for boxed 64 bit -integer values. Caveat: this conversion may incur a precision loss. -

- -

s = tostring(cdata)

-

-Returns a string representation of the value of 64 bit integers -("nnnLL" or "nnnULL") or -complex numbers ("re±imi"). Otherwise -returns a string representation of the C type of a ctype object -("ctype<type>") or a cdata object -("cdata<type>: address"), unless you -override it with a __tostring metamethod (see -ffi.metatype()). -

- -

iter, obj, start = pairs(cdata)
-iter, obj, start = ipairs(cdata)

-

-Calls the __pairs or __ipairs metamethod of the -corresponding ctype. -

- -

Extensions to the Lua Parser

-

-The parser for Lua source code treats numeric literals with the -suffixes LL or ULL as signed or unsigned 64 bit -integers. Case doesn't matter, but uppercase is recommended for -readability. It handles decimal (42LL), hexadecimal -(0x2aLL) and binary (0b101010LL) literals. -

-

-The imaginary part of complex numbers can be specified by suffixing -number literals with i or I, e.g. 12.5i. -Caveat: you'll need to use 1i to get an imaginary part with -the value one, since i itself still refers to a variable -named i. -

-
-
- - - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_ffi_semantics.html b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_ffi_semantics.html deleted file mode 100644 index 889d44d8235..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_ffi_semantics.html +++ /dev/null @@ -1,1261 +0,0 @@ - - - -FFI Semantics - - - - - - - - - -
-Lua -
- - -
-

-This page describes the detailed semantics underlying the FFI library -and its interaction with both Lua and C code. -

-

-Given that the FFI library is designed to interface with C code -and that declarations can be written in plain C syntax, it -closely follows the C language semantics, wherever possible. -Some minor concessions are needed for smoother interoperation with Lua -language semantics. -

-

-Please don't be overwhelmed by the contents of this page — this -is a reference and you may need to consult it, if in doubt. It doesn't -hurt to skim this page, but most of the semantics "just work" as you'd -expect them to work. It should be straightforward to write -applications using the LuaJIT FFI for developers with a C or C++ -background. -

- -

C Language Support

-

-The FFI library has a built-in C parser with a minimal memory -footprint. It's used by the ffi.* library -functions to declare C types or external symbols. -

-

-It's only purpose is to parse C declarations, as found e.g. in -C header files. Although it does evaluate constant expressions, -it's not a C compiler. The body of inline -C function definitions is simply ignored. -

-

-Also, this is not a validating C parser. It expects and -accepts correctly formed C declarations, but it may choose to -ignore bad declarations or show rather generic error messages. If in -doubt, please check the input against your favorite C compiler. -

-

-The C parser complies to the C99 language standard plus -the following extensions: -

-
    - -
  • The '\e' escape in character and string literals.
  • - -
  • The C99/C++ boolean type, declared with the keywords bool -or _Bool.
  • - -
  • Complex numbers, declared with the keywords complex or -_Complex.
  • - -
  • Two complex number types: complex (aka -complex double) and complex float.
  • - -
  • Vector types, declared with the GCC mode or -vector_size attribute.
  • - -
  • Unnamed ('transparent') struct/union fields -inside a struct/union.
  • - -
  • Incomplete enum declarations, handled like incomplete -struct declarations.
  • - -
  • Unnamed enum fields inside a -struct/union. This is similar to a scoped C++ -enum, except that declared constants are visible in the -global namespace, too.
  • - -
  • Scoped static const declarations inside a -struct/union (from C++).
  • - -
  • Zero-length arrays ([0]), empty -struct/union, variable-length arrays (VLA, -[?]) and variable-length structs (VLS, with a trailing -VLA).
  • - -
  • C++ reference types (int &x).
  • - -
  • Alternate GCC keywords with '__', e.g. -__const__.
  • - -
  • GCC __attribute__ with the following attributes: -aligned, packed, mode, -vector_size, cdecl, fastcall, -stdcall, thiscall.
  • - -
  • The GCC __extension__ keyword and the GCC -__alignof__ operator.
  • - -
  • GCC __asm__("symname") symbol name redirection for -function declarations.
  • - -
  • MSVC keywords for fixed-length types: __int8, -__int16, __int32 and __int64.
  • - -
  • MSVC __cdecl, __fastcall, __stdcall, -__thiscall, __ptr32, __ptr64, -__declspec(align(n)) and #pragma pack.
  • - -
  • All other GCC/MSVC-specific attributes are ignored.
  • - -
-

-The following C types are pre-defined by the C parser (like -a typedef, except re-declarations will be ignored): -

-
    - -
  • Vararg handling: va_list, __builtin_va_list, -__gnuc_va_list.
  • - -
  • From <stddef.h>: ptrdiff_t, -size_t, wchar_t.
  • - -
  • From <stdint.h>: int8_t, int16_t, -int32_t, int64_t, uint8_t, -uint16_t, uint32_t, uint64_t, -intptr_t, uintptr_t.
  • - -
-

-You're encouraged to use these types in preference to -compiler-specific extensions or target-dependent standard types. -E.g. char differs in signedness and long differs in -size, depending on the target architecture and platform ABI. -

-

-The following C features are not supported: -

-
    - -
  • A declaration must always have a type specifier; it doesn't -default to an int type.
  • - -
  • Old-style empty function declarations (K&R) are not allowed. -All C functions must have a proper prototype declaration. A -function declared without parameters (int foo();) is -treated as a function taking zero arguments, like in C++.
  • - -
  • The long double C type is parsed correctly, but -there's no support for the related conversions, accesses or arithmetic -operations.
  • - -
  • Wide character strings and character literals are not -supported.
  • - -
  • See below for features that are currently -not implemented.
  • - -
- -

C Type Conversion Rules

- -

Conversions from C types to Lua objects

-

-These conversion rules apply for read accesses to -C types: indexing pointers, arrays or -struct/union types; reading external variables or -constant values; retrieving return values from C calls: -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
InputConversionOutput
int8_t, int16_tsign-ext int32_tdoublenumber
uint8_t, uint16_tzero-ext int32_tdoublenumber
int32_t, uint32_tdoublenumber
int64_t, uint64_tboxed value64 bit int cdata
double, floatdoublenumber
bool0 → false, otherwise trueboolean
enumboxed valueenum cdata
Complex numberboxed valuecomplex cdata
Vectorboxed valuevector cdata
Pointerboxed valuepointer cdata
Arrayboxed referencereference cdata
struct/unionboxed referencereference cdata
-

-Bitfields are treated like their underlying type. -

-

-Reference types are dereferenced before a conversion can take -place — the conversion is applied to the C type pointed to -by the reference. -

- -

Conversions from Lua objects to C types

-

-These conversion rules apply for write accesses to -C types: indexing pointers, arrays or -struct/union types; initializing cdata objects; -casts to C types; writing to external variables; passing -arguments to C calls: -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
InputConversionOutput
numberdouble
booleanfalse → 0, true → 1bool
nilNULL(void *)
lightuserdatalightuserdata address →(void *)
userdatauserdata payload →(void *)
io.* fileget FILE * handle →(void *)
stringmatch against enum constantenum
stringcopy string data + zero-byteint8_t[], uint8_t[]
stringstring data →const char[]
functioncreate callbackC function type
tabletable initializerArray
tabletable initializerstruct/union
cdatacdata payload →C type
-

-If the result type of this conversion doesn't match the -C type of the destination, the -conversion rules between C types -are applied. -

-

-Reference types are immutable after initialization ("no re-seating of -references"). For initialization purposes or when passing values to -reference parameters, they are treated like pointers. Note that unlike -in C++, there's no way to implement automatic reference generation of -variables under the Lua language semantics. If you want to call a -function with a reference parameter, you need to explicitly pass a -one-element array. -

- -

Conversions between C types

-

-These conversion rules are more or less the same as the standard -C conversion rules. Some rules only apply to casts, or require -pointer or type compatibility: -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
InputConversionOutput
Signed integernarrow or sign-extendInteger
Unsigned integernarrow or zero-extendInteger
Integerrounddouble, float
double, floattrunc int32_tnarrow(u)int8_t, (u)int16_t
double, floattrunc(u)int32_t, (u)int64_t
double, floatroundfloat, double
Numbern == 0 → 0, otherwise 1bool
boolfalse → 0, true → 1Number
Complex numberconvert real partNumber
Numberconvert real part, imag = 0Complex number
Complex numberconvert real and imag partComplex number
Numberconvert scalar and replicateVector
Vectorcopy (same size)Vector
struct/uniontake base address (compat)Pointer
Arraytake base address (compat)Pointer
Functiontake function addressFunction pointer
Numberconvert via uintptr_t (cast)Pointer
Pointerconvert address (compat/cast)Pointer
Pointerconvert address (cast)Integer
Arrayconvert base address (cast)Integer
Arraycopy (compat)Array
struct/unioncopy (identical type)struct/union
-

-Bitfields or enum types are treated like their underlying -type. -

-

-Conversions not listed above will raise an error. E.g. it's not -possible to convert a pointer to a complex number or vice versa. -

- -

Conversions for vararg C function arguments

-

-The following default conversion rules apply when passing Lua objects -to the variable argument part of vararg C functions: -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - -
InputConversionOutput
numberdouble
booleanfalse → 0, true → 1bool
nilNULL(void *)
userdatauserdata payload →(void *)
lightuserdatalightuserdata address →(void *)
stringstring data →const char *
float cdatadouble
Array cdatatake base addressElement pointer
struct/union cdatatake base addressstruct/union pointer
Function cdatatake function addressFunction pointer
Any other cdatano conversionC type
-

-To pass a Lua object, other than a cdata object, as a specific type, -you need to override the conversion rules: create a temporary cdata -object with a constructor or a cast and initialize it with the value -to pass: -

-

-Assuming x is a Lua number, here's how to pass it as an -integer to a vararg function: -

-
-ffi.cdef[[
-int printf(const char *fmt, ...);
-]]
-ffi.C.printf("integer value: %d\n", ffi.new("int", x))
-
-

-If you don't do this, the default Lua number → double -conversion rule applies. A vararg C function expecting an integer -will see a garbled or uninitialized value. -

- -

Initializers

-

-Creating a cdata object with -ffi.new() or the -equivalent constructor syntax always initializes its contents, too. -Different rules apply, depending on the number of optional -initializers and the C types involved: -

-
    -
  • If no initializers are given, the object is filled with zero bytes.
  • - -
  • Scalar types (numbers and pointers) accept a single initializer. -The Lua object is converted to the scalar -C type.
  • - -
  • Valarrays (complex numbers and vectors) are treated like scalars -when a single initializer is given. Otherwise they are treated like -regular arrays.
  • - -
  • Aggregate types (arrays and structs) accept either a single cdata -initializer of the same type (copy constructor), a single -table initializer, or a flat list of -initializers.
  • - -
  • The elements of an array are initialized, starting at index zero. -If a single initializer is given for an array, it's repeated for all -remaining elements. This doesn't happen if two or more initializers -are given: all remaining uninitialized elements are filled with zero -bytes.
  • - -
  • Byte arrays may also be initialized with a Lua string. This copies -the whole string plus a terminating zero-byte. The copy stops early only -if the array has a known, fixed size.
  • - -
  • The fields of a struct are initialized in the order of -their declaration. Uninitialized fields are filled with zero -bytes.
  • - -
  • Only the first field of a union can be initialized with a -flat initializer.
  • - -
  • Elements or fields which are aggregates themselves are initialized -with a single initializer, but this may be a table -initializer or a compatible aggregate.
  • - -
  • Excess initializers cause an error.
  • - -
- -

Table Initializers

-

-The following rules apply if a Lua table is used to initialize an -Array or a struct/union: -

-
    - -
  • If the table index [0] is non-nil, then the -table is assumed to be zero-based. Otherwise it's assumed to be -one-based.
  • - -
  • Array elements, starting at index zero, are initialized one-by-one -with the consecutive table elements, starting at either index -[0] or [1]. This process stops at the first -nil table element.
  • - -
  • If exactly one array element was initialized, it's repeated for -all the remaining elements. Otherwise all remaining uninitialized -elements are filled with zero bytes.
  • - -
  • The above logic only applies to arrays with a known fixed size. -A VLA is only initialized with the element(s) given in the table. -Depending on the use case, you may need to explicitly add a -NULL or 0 terminator to a VLA.
  • - -
  • A struct/union can be initialized in the -order of the declaration of its fields. Each field is initialized with -consecutive table elements, starting at either index [0] -or [1]. This process stops at the first nil table -element.
  • - -
  • Otherwise, if neither index [0] nor [1] is present, -a struct/union is initialized by looking up each field -name (as a string key) in the table. Each non-nil value is -used to initialize the corresponding field.
  • - -
  • Uninitialized fields of a struct are filled with zero -bytes, except for the trailing VLA of a VLS.
  • - -
  • Initialization of a union stops after one field has been -initialized. If no field has been initialized, the union is -filled with zero bytes.
  • - -
  • Elements or fields which are aggregates themselves are initialized -with a single initializer, but this may be a nested table -initializer (or a compatible aggregate).
  • - -
  • Excess initializers for an array cause an error. Excess -initializers for a struct/union are ignored. -Unrelated table entries are ignored, too.
  • - -
-

-Example: -

-
-local ffi = require("ffi")
-
-ffi.cdef[[
-struct foo { int a, b; };
-union bar { int i; double d; };
-struct nested { int x; struct foo y; };
-]]
-
-ffi.new("int[3]", {})            --> 0, 0, 0
-ffi.new("int[3]", {1})           --> 1, 1, 1
-ffi.new("int[3]", {1,2})         --> 1, 2, 0
-ffi.new("int[3]", {1,2,3})       --> 1, 2, 3
-ffi.new("int[3]", {[0]=1})       --> 1, 1, 1
-ffi.new("int[3]", {[0]=1,2})     --> 1, 2, 0
-ffi.new("int[3]", {[0]=1,2,3})   --> 1, 2, 3
-ffi.new("int[3]", {[0]=1,2,3,4}) --> error: too many initializers
-
-ffi.new("struct foo", {})            --> a = 0, b = 0
-ffi.new("struct foo", {1})           --> a = 1, b = 0
-ffi.new("struct foo", {1,2})         --> a = 1, b = 2
-ffi.new("struct foo", {[0]=1,2})     --> a = 1, b = 2
-ffi.new("struct foo", {b=2})         --> a = 0, b = 2
-ffi.new("struct foo", {a=1,b=2,c=3}) --> a = 1, b = 2  'c' is ignored
-
-ffi.new("union bar", {})        --> i = 0, d = 0.0
-ffi.new("union bar", {1})       --> i = 1, d = ?
-ffi.new("union bar", {[0]=1,2}) --> i = 1, d = ?    '2' is ignored
-ffi.new("union bar", {d=2})     --> i = ?, d = 2.0
-
-ffi.new("struct nested", {1,{2,3}})     --> x = 1, y.a = 2, y.b = 3
-ffi.new("struct nested", {x=1,y={2,3}}) --> x = 1, y.a = 2, y.b = 3
-
- -

Operations on cdata Objects

-

-All of the standard Lua operators can be applied to cdata objects or a -mix of a cdata object and another Lua object. The following list shows -the pre-defined operations. -

-

-Reference types are dereferenced before performing each of -the operations below — the operation is applied to the -C type pointed to by the reference. -

-

-The pre-defined operations are always tried first before deferring to a -metamethod or index table (if any) for the corresponding ctype (except -for __new). An error is raised if the metamethod lookup or -index table lookup fails. -

- -

Indexing a cdata object

-
    - -
  • Indexing a pointer/array: a cdata pointer/array can be -indexed by a cdata number or a Lua number. The element address is -computed as the base address plus the number value multiplied by the -element size in bytes. A read access loads the element value and -converts it to a Lua object. A write -access converts a Lua object to the element -type and stores the converted value to the element. An error is -raised if the element size is undefined or a write access to a -constant element is attempted.
  • - -
  • Dereferencing a struct/union field: a -cdata struct/union or a pointer to a -struct/union can be dereferenced by a string key, -giving the field name. The field address is computed as the base -address plus the relative offset of the field. A read access loads the -field value and converts it to a Lua -object. A write access converts a Lua -object to the field type and stores the converted value to the -field. An error is raised if a write access to a constant -struct/union or a constant field is attempted. -Scoped enum constants or static constants are treated like a constant -field.
  • - -
  • Indexing a complex number: a complex number can be indexed -either by a cdata number or a Lua number with the values 0 or 1, or by -the strings "re" or "im". A read access loads the -real part ([0], .re) or the imaginary part -([1], .im) part of a complex number and -converts it to a Lua number. The -sub-parts of a complex number are immutable — assigning to an -index of a complex number raises an error. Accessing out-of-bound -indexes returns unspecified results, but is guaranteed not to trigger -memory access violations.
  • - -
  • Indexing a vector: a vector is treated like an array for -indexing purposes, except the vector elements are immutable — -assigning to an index of a vector raises an error.
  • - -
-

-A ctype object can be indexed with a string key, too. The only -pre-defined operation is reading scoped constants of -struct/union types. All other accesses defer -to the corresponding metamethods or index tables (if any). -

-

-Note: since there's (deliberately) no address-of operator, a cdata -object holding a value type is effectively immutable after -initialization. The JIT compiler benefits from this fact when applying -certain optimizations. -

-

-As a consequence, the elements of complex numbers and -vectors are immutable. But the elements of an aggregate holding these -types may be modified of course. I.e. you cannot assign to -foo.c.im, but you can assign a (newly created) complex number -to foo.c. -

-

-The JIT compiler implements strict aliasing rules: accesses to different -types do not alias, except for differences in signedness (this -applies even to char pointers, unlike C99). Type punning -through unions is explicitly detected and allowed. -

- -

Calling a cdata object

-
    - -
  • Constructor: a ctype object can be called and used as a -constructor. This is equivalent -to ffi.new(ct, ...), unless a __new metamethod is -defined. The __new metamethod is called with the ctype object -plus any other arguments passed to the contructor. Note that you have to -use ffi.new inside of it, since calling ct(...) would -cause infinite recursion.
  • - -
  • C function call: a cdata function or cdata function -pointer can be called. The passed arguments are -converted to the C types of the -parameters given by the function declaration. Arguments passed to the -variable argument part of vararg C function use -special conversion rules. This -C function is called and the return value (if any) is -converted to a Lua object.
    -On Windows/x86 systems, __stdcall functions are automatically -detected and a function declared as __cdecl (the default) is -silently fixed up after the first call.
  • - -
- -

Arithmetic on cdata objects

-
    - -
  • Pointer arithmetic: a cdata pointer/array and a cdata -number or a Lua number can be added or subtracted. The number must be -on the right hand side for a subtraction. The result is a pointer of -the same type with an address plus or minus the number value -multiplied by the element size in bytes. An error is raised if the -element size is undefined.
  • - -
  • Pointer difference: two compatible cdata pointers/arrays -can be subtracted. The result is the difference between their -addresses, divided by the element size in bytes. An error is raised if -the element size is undefined or zero.
  • - -
  • 64 bit integer arithmetic: the standard arithmetic -operators (+ - * / % ^ and unary -minus) can be applied to two cdata numbers, or a cdata number and a -Lua number. If one of them is an uint64_t, the other side is -converted to an uint64_t and an unsigned arithmetic operation -is performed. Otherwise both sides are converted to an -int64_t and a signed arithmetic operation is performed. The -result is a boxed 64 bit cdata object.
    - -If one of the operands is an enum and the other operand is a -string, the string is converted to the value of a matching enum -constant before the above conversion.
    - -These rules ensure that 64 bit integers are "sticky". Any -expression involving at least one 64 bit integer operand results -in another one. The undefined cases for the division, modulo and power -operators return 2LL ^ 63 or -2ULL ^ 63.
    - -You'll have to explicitly convert a 64 bit integer to a Lua -number (e.g. for regular floating-point calculations) with -tonumber(). But note this may incur a precision loss.
  • - -
  • 64 bit bitwise operations: the rules for 64 bit -arithmetic operators apply analogously.
    - -Unlike the other bit.* operations, bit.tobit() -converts a cdata number via int64_t to int32_t and -returns a Lua number.
    - -For bit.band(), bit.bor() and bit.bxor(), the -conversion to int64_t or uint64_t applies to -all arguments, if any argument is a cdata number.
    - -For all other operations, only the first argument is used to determine -the output type. This implies that a cdata number as a shift count for -shifts and rotates is accepted, but that alone does not cause -a cdata number output. - -
- -

Comparisons of cdata objects

-
    - -
  • Pointer comparison: two compatible cdata pointers/arrays -can be compared. The result is the same as an unsigned comparison of -their addresses. nil is treated like a NULL pointer, -which is compatible with any other pointer type.
  • - -
  • 64 bit integer comparison: two cdata numbers, or a -cdata number and a Lua number can be compared with each other. If one -of them is an uint64_t, the other side is converted to an -uint64_t and an unsigned comparison is performed. Otherwise -both sides are converted to an int64_t and a signed -comparison is performed.
    - -If one of the operands is an enum and the other operand is a -string, the string is converted to the value of a matching enum -constant before the above conversion.
    - -
  • Comparisons for equality/inequality never raise an error. -Even incompatible pointers can be compared for equality by address. Any -other incompatible comparison (also with non-cdata objects) treats the -two sides as unequal.
  • - -
- -

cdata objects as table keys

-

-Lua tables may be indexed by cdata objects, but this doesn't provide -any useful semantics — cdata objects are unsuitable as table -keys! -

-

-A cdata object is treated like any other garbage-collected object and -is hashed and compared by its address for table indexing. Since -there's no interning for cdata value types, the same value may be -boxed in different cdata objects with different addresses. Thus -t[1LL+1LL] and t[2LL] usually do not point to -the same hash slot and they certainly do not point to the same -hash slot as t[2]. -

-

-It would seriously drive up implementation complexity and slow down -the common case, if one were to add extra handling for by-value -hashing and comparisons to Lua tables. Given the ubiquity of their use -inside the VM, this is not acceptable. -

-

-There are three viable alternatives, if you really need to use cdata -objects as keys: -

-
    - -
  • If you can get by with the precision of Lua numbers -(52 bits), then use tonumber() on a cdata number or -combine multiple fields of a cdata aggregate to a Lua number. Then use -the resulting Lua number as a key when indexing tables.
    -One obvious benefit: t[tonumber(2LL)] does point to -the same slot as t[2].
  • - -
  • Otherwise use either tostring() on 64 bit integers -or complex numbers or combine multiple fields of a cdata aggregate to -a Lua string (e.g. with -ffi.string()). Then -use the resulting Lua string as a key when indexing tables.
  • - -
  • Create your own specialized hash table implementation using the -C types provided by the FFI library, just like you would in -C code. Ultimately this may give much better performance than the -other alternatives or what a generic by-value hash table could -possibly provide.
  • - -
- -

Parameterized Types

-

-To facilitate some abstractions, the two functions -ffi.typeof and -ffi.cdef support -parameterized types in C declarations. Note: none of the other API -functions taking a cdecl allow this. -

-

-Any place you can write a typedef name, an -identifier or a number in a declaration, you can write -$ (the dollar sign) instead. These placeholders are replaced in -order of appearance with the arguments following the cdecl string: -

-
--- Declare a struct with a parameterized field type and name:
-ffi.cdef([[
-typedef struct { $ $; } foo_t;
-]], type1, name1)
-
--- Anonymous struct with dynamic names:
-local bar_t = ffi.typeof("struct { int $, $; }", name1, name2)
--- Derived pointer type:
-local bar_ptr_t = ffi.typeof("$ *", bar_t)
-
--- Parameterized dimensions work even where a VLA won't work:
-local matrix_t = ffi.typeof("uint8_t[$][$]", width, height)
-
-

-Caveat: this is not simple text substitution! A passed ctype or -cdata object is treated like the underlying type, a passed string is -considered an identifier and a number is considered a number. You must -not mix this up: e.g. passing "int" as a string doesn't work in -place of a type, you'd need to use ffi.typeof("int") instead. -

-

-The main use for parameterized types are libraries implementing abstract -data types -(» example), -similar to what can be achieved with C++ template metaprogramming. -Another use case are derived types of anonymous structs, which avoids -pollution of the global struct namespace. -

-

-Please note that parameterized types are a nice tool and indispensable -for certain use cases. But you'll want to use them sparingly in regular -code, e.g. when all types are actually fixed. -

- -

Garbage Collection of cdata Objects

-

-All explicitly (ffi.new(), ffi.cast() etc.) or -implicitly (accessors) created cdata objects are garbage collected. -You need to ensure to retain valid references to cdata objects -somewhere on a Lua stack, an upvalue or in a Lua table while they are -still in use. Once the last reference to a cdata object is gone, the -garbage collector will automatically free the memory used by it (at -the end of the next GC cycle). -

-

-Please note that pointers themselves are cdata objects, however they -are not followed by the garbage collector. So e.g. if you -assign a cdata array to a pointer, you must keep the cdata object -holding the array alive as long as the pointer is still in use: -

-
-ffi.cdef[[
-typedef struct { int *a; } foo_t;
-]]
-
-local s = ffi.new("foo_t", ffi.new("int[10]")) -- WRONG!
-
-local a = ffi.new("int[10]") -- OK
-local s = ffi.new("foo_t", a)
--- Now do something with 's', but keep 'a' alive until you're done.
-
-

-Similar rules apply for Lua strings which are implicitly converted to -"const char *": the string object itself must be -referenced somewhere or it'll be garbage collected eventually. The -pointer will then point to stale data, which may have already been -overwritten. Note that string literals are automatically kept -alive as long as the function containing it (actually its prototype) -is not garbage collected. -

-

-Objects which are passed as an argument to an external C function -are kept alive until the call returns. So it's generally safe to -create temporary cdata objects in argument lists. This is a common -idiom for passing specific C types to -vararg functions. -

-

-Memory areas returned by C functions (e.g. from malloc()) -must be manually managed, of course (or use -ffi.gc()). Pointers to -cdata objects are indistinguishable from pointers returned by C -functions (which is one of the reasons why the GC cannot follow them). -

- -

Callbacks

-

-The LuaJIT FFI automatically generates special callback functions -whenever a Lua function is converted to a C function pointer. This -associates the generated callback function pointer with the C type -of the function pointer and the Lua function object (closure). -

-

-This can happen implicitly due to the usual conversions, e.g. when -passing a Lua function to a function pointer argument. Or you can use -ffi.cast() to explicitly cast a Lua function to a -C function pointer. -

-

-Currently only certain C function types can be used as callback -functions. Neither C vararg functions nor functions with -pass-by-value aggregate argument or result types are supported. There -are no restrictions for the kind of Lua functions that can be called -from the callback — no checks for the proper number of arguments -are made. The return value of the Lua function will be converted to the -result type and an error will be thrown for invalid conversions. -

-

-It's allowed to throw errors across a callback invocation, but it's not -advisable in general. Do this only if you know the C function, that -called the callback, copes with the forced stack unwinding and doesn't -leak resources. -

-

-One thing that's not allowed, is to let an FFI call into a C function -get JIT-compiled, which in turn calls a callback, calling into Lua again. -Usually this attempt is caught by the interpreter first and the -C function is blacklisted for compilation. -

-

-However, this heuristic may fail under specific circumstances: e.g. a -message polling function might not run Lua callbacks right away and the call -gets JIT-compiled. If it later happens to call back into Lua (e.g. a rarely -invoked error callback), you'll get a VM PANIC with the message -"bad callback". Then you'll need to manually turn off -JIT-compilation with -jit.off() for the -surrounding Lua function that invokes such a message polling function (or -similar). -

- -

Callback resource handling

-

-Callbacks take up resources — you can only have a limited number -of them at the same time (500 - 1000, depending on the -architecture). The associated Lua functions are anchored to prevent -garbage collection, too. -

-

-Callbacks due to implicit conversions are permanent! There is no -way to guess their lifetime, since the C side might store the -function pointer for later use (typical for GUI toolkits). The associated -resources cannot be reclaimed until termination: -

-
-ffi.cdef[[
-typedef int (__stdcall *WNDENUMPROC)(void *hwnd, intptr_t l);
-int EnumWindows(WNDENUMPROC func, intptr_t l);
-]]
-
--- Implicit conversion to a callback via function pointer argument.
-local count = 0
-ffi.C.EnumWindows(function(hwnd, l)
-  count = count + 1
-  return true
-end, 0)
--- The callback is permanent and its resources cannot be reclaimed!
--- Ok, so this may not be a problem, if you do this only once.
-
-

-Note: this example shows that you must properly declare -__stdcall callbacks on Windows/x86 systems. The calling -convention cannot be automatically detected, unlike for -__stdcall calls to Windows functions. -

-

-For some use cases it's necessary to free up the resources or to -dynamically redirect callbacks. Use an explicit cast to a -C function pointer and keep the resulting cdata object. Then use -the cb:free() -or cb:set() methods -on the cdata object: -

-
--- Explicitly convert to a callback via cast.
-local count = 0
-local cb = ffi.cast("WNDENUMPROC", function(hwnd, l)
-  count = count + 1
-  return true
-end)
-
--- Pass it to a C function.
-ffi.C.EnumWindows(cb, 0)
--- EnumWindows doesn't need the callback after it returns, so free it.
-
-cb:free()
--- The callback function pointer is no longer valid and its resources
--- will be reclaimed. The created Lua closure will be garbage collected.
-
- -

Callback performance

-

-Callbacks are slow! First, the C to Lua transition itself -has an unavoidable cost, similar to a lua_call() or -lua_pcall(). Argument and result marshalling add to that cost. -And finally, neither the C compiler nor LuaJIT can inline or -optimize across the language barrier and hoist repeated computations out -of a callback function. -

-

-Do not use callbacks for performance-sensitive work: e.g. consider a -numerical integration routine which takes a user-defined function to -integrate over. It's a bad idea to call a user-defined Lua function from -C code millions of times. The callback overhead will be absolutely -detrimental for performance. -

-

-It's considerably faster to write the numerical integration routine -itself in Lua — the JIT compiler will be able to inline the -user-defined function and optimize it together with its calling context, -with very competitive performance. -

-

-As a general guideline: use callbacks only when you must, because -of existing C APIs. E.g. callback performance is irrelevant for a -GUI application, which waits for user input most of the time, anyway. -

-

-For new designs avoid push-style APIs: a C function repeatedly -calling a callback for each result. Instead use pull-style APIs: -call a C function repeatedly to get a new result. Calls from Lua -to C via the FFI are much faster than the other way round. Most well-designed -libraries already use pull-style APIs (read/write, get/put). -

- -

C Library Namespaces

-

-A C library namespace is a special kind of object which allows -access to the symbols contained in shared libraries or the default -symbol namespace. The default -ffi.C namespace is -automatically created when the FFI library is loaded. C library -namespaces for specific shared libraries may be created with the -ffi.load() API -function. -

-

-Indexing a C library namespace object with a symbol name (a Lua -string) automatically binds it to the library. First the symbol type -is resolved — it must have been declared with -ffi.cdef. Then the -symbol address is resolved by searching for the symbol name in the -associated shared libraries or the default symbol namespace. Finally, -the resulting binding between the symbol name, the symbol type and its -address is cached. Missing symbol declarations or nonexistent symbol -names cause an error. -

-

-This is what happens on a read access for the different kinds of -symbols: -

-
    - -
  • External functions: a cdata object with the type of the function -and its address is returned.
  • - -
  • External variables: the symbol address is dereferenced and the -loaded value is converted to a Lua object -and returned.
  • - -
  • Constant values (static const or enum -constants): the constant is converted to a -Lua object and returned.
  • - -
-

-This is what happens on a write access: -

-
    - -
  • External variables: the value to be written is -converted to the C type of the -variable and then stored at the symbol address.
  • - -
  • Writing to constant variables or to any other symbol type causes -an error, like any other attempted write to a constant location.
  • - -
-

-C library namespaces themselves are garbage collected objects. If -the last reference to the namespace object is gone, the garbage -collector will eventually release the shared library reference and -remove all memory associated with the namespace. Since this may -trigger the removal of the shared library from the memory of the -running process, it's generally not safe to use function -cdata objects obtained from a library if the namespace object may be -unreferenced. -

-

-Performance notice: the JIT compiler specializes to the identity of -namespace objects and to the strings used to index it. This -effectively turns function cdata objects into constants. It's not -useful and actually counter-productive to explicitly cache these -function objects, e.g. local strlen = ffi.C.strlen. OTOH it -is useful to cache the namespace itself, e.g. local C = -ffi.C. -

- -

No Hand-holding!

-

-The FFI library has been designed as a low-level library. The -goal is to interface with C code and C data types with a -minimum of overhead. This means you can do anything you can do -from C: access all memory, overwrite anything in memory, call -machine code at any memory address and so on. -

-

-The FFI library provides no memory safety, unlike regular Lua -code. It will happily allow you to dereference a NULL -pointer, to access arrays out of bounds or to misdeclare -C functions. If you make a mistake, your application might crash, -just like equivalent C code would. -

-

-This behavior is inevitable, since the goal is to provide full -interoperability with C code. Adding extra safety measures, like -bounds checks, would be futile. There's no way to detect -misdeclarations of C functions, since shared libraries only -provide symbol names, but no type information. Likewise there's no way -to infer the valid range of indexes for a returned pointer. -

-

-Again: the FFI library is a low-level library. This implies it needs -to be used with care, but it's flexibility and performance often -outweigh this concern. If you're a C or C++ developer, it'll be easy -to apply your existing knowledge. OTOH writing code for the FFI -library is not for the faint of heart and probably shouldn't be the -first exercise for someone with little experience in Lua, C or C++. -

-

-As a corollary of the above, the FFI library is not safe for use by -untrusted Lua code. If you're sandboxing untrusted Lua code, you -definitely don't want to give this code access to the FFI library or -to any cdata object (except 64 bit integers or complex -numbers). Any properly engineered Lua sandbox needs to provide safety -wrappers for many of the standard Lua library functions — -similar wrappers need to be written for high-level operations on FFI -data types, too. -

- -

Current Status

-

-The initial release of the FFI library has some limitations and is -missing some features. Most of these will be fixed in future releases. -

-

-C language support is -currently incomplete: -

-
    -
  • C declarations are not passed through a C pre-processor, -yet.
  • -
  • The C parser is able to evaluate most constant expressions -commonly found in C header files. However it doesn't handle the -full range of C expression semantics and may fail for some -obscure constructs.
  • -
  • static const declarations only work for integer types -up to 32 bits. Neither declaring string constants nor -floating-point constants is supported.
  • -
  • Packed struct bitfields that cross container boundaries -are not implemented.
  • -
  • Native vector types may be defined with the GCC mode or -vector_size attribute. But no operations other than loading, -storing and initializing them are supported, yet.
  • -
  • The volatile type qualifier is currently ignored by -compiled code.
  • -
  • ffi.cdef silently -ignores most re-declarations. Note: avoid re-declarations which do not -conform to C99. The implementation will eventually be changed to -perform strict checks.
  • -
-

-The JIT compiler already handles a large subset of all FFI operations. -It automatically falls back to the interpreter for unimplemented -operations (you can check for this with the --jv command line option). -The following operations are currently not compiled and may exhibit -suboptimal performance, especially when used in inner loops: -

-
    -
  • Bitfield accesses and initializations.
  • -
  • Vector operations.
  • -
  • Table initializers.
  • -
  • Initialization of nested struct/union types.
  • -
  • Non-default initialization of VLA/VLS or large C types -(> 128 bytes or > 16 array elements.
  • -
  • Conversions from lightuserdata to void *.
  • -
  • Pointer differences for element sizes that are not a power of -two.
  • -
  • Calls to C functions with aggregates passed or returned by -value.
  • -
  • Calls to ctype metamethods which are not plain functions.
  • -
  • ctype __newindex tables and non-string lookups in ctype -__index tables.
  • -
  • tostring() for cdata types.
  • -
  • Calls to ffi.cdef(), ffi.load() and -ffi.metatype().
  • -
-

-Other missing features: -

-
    -
  • Arithmetic for complex numbers.
  • -
  • Passing structs by value to vararg C functions.
  • -
  • C++ exception interoperability -does not extend to C functions called via the FFI, if the call is -compiled.
  • -
-
-
- - - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_ffi_tutorial.html b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_ffi_tutorial.html deleted file mode 100644 index e3f01460a80..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_ffi_tutorial.html +++ /dev/null @@ -1,603 +0,0 @@ - - - -FFI Tutorial - - - - - - - - - -
-Lua -
- - -
-

-This page is intended to give you an overview of the features of the FFI -library by presenting a few use cases and guidelines. -

-

-This page makes no attempt to explain all of the FFI library, though. -You'll want to have a look at the ffi.* API -function reference and the FFI -semantics to learn more. -

- -

Loading the FFI Library

-

-The FFI library is built into LuaJIT by default, but it's not loaded -and initialized by default. The suggested way to use the FFI library -is to add the following to the start of every Lua file that needs one -of its functions: -

-
-local ffi = require("ffi")
-
-

-Please note this doesn't define an ffi variable in the table -of globals — you really need to use the local variable. The -require function ensures the library is only loaded once. -

-

-Note: If you want to experiment with the FFI from the interactive prompt -of the command line executable, omit the local, as it doesn't -preserve local variables across lines. -

- -

Accessing Standard System Functions

-

-The following code explains how to access standard system functions. -We slowly print two lines of dots by sleeping for 10 milliseconds -after each dot: -

-
- 
-①
-
-
-
-
-
-②
-③
-④
-
-
-
-⑤
-
-
-
-
-
-⑥local ffi = require("ffi")
-ffi.cdef[[
-void Sleep(int ms);
-int poll(struct pollfd *fds, unsigned long nfds, int timeout);
-]]
-
-local sleep
-if ffi.os == "Windows" then
-  function sleep(s)
-    ffi.C.Sleep(s*1000)
-  end
-else
-  function sleep(s)
-    ffi.C.poll(nil, 0, s*1000)
-  end
-end
-
-for i=1,160 do
-  io.write("."); io.flush()
-  sleep(0.01)
-end
-io.write("\n")
-
-

-Here's the step-by-step explanation: -

-

- This defines the -C library functions we're going to use. The part inside the -double-brackets (in green) is just standard C syntax. You can -usually get this info from the C header files or the -documentation provided by each C library or C compiler. -

-

- The difficulty we're -facing here, is that there are different standards to choose from. -Windows has a simple Sleep() function. On other systems there -are a variety of functions available to achieve sub-second sleeps, but -with no clear consensus. Thankfully poll() can be used for -this task, too, and it's present on most non-Windows systems. The -check for ffi.os makes sure we use the Windows-specific -function only on Windows systems. -

-

- Here we're wrapping the -call to the C function in a Lua function. This isn't strictly -necessary, but it's helpful to deal with system-specific issues only -in one part of the code. The way we're wrapping it ensures the check -for the OS is only done during initialization and not for every call. -

-

- A more subtle point is -that we defined our sleep() function (for the sake of this -example) as taking the number of seconds, but accepting fractional -seconds. Multiplying this by 1000 gets us milliseconds, but that still -leaves it a Lua number, which is a floating-point value. Alas, the -Sleep() function only accepts an integer value. Luckily for -us, the FFI library automatically performs the conversion when calling -the function (truncating the FP value towards zero, like in C). -

-

-Some readers will notice that Sleep() is part of -KERNEL32.DLL and is also a stdcall function. So how -can this possibly work? The FFI library provides the ffi.C -default C library namespace, which allows calling functions from -the default set of libraries, like a C compiler would. Also, the -FFI library automatically detects stdcall functions, so you -don't need to declare them as such. -

-

- The poll() -function takes a couple more arguments we're not going to use. You can -simply use nil to pass a NULL pointer and 0 -for the nfds parameter. Please note that the -number 0 does not convert to a pointer value, -unlike in C++. You really have to pass pointers to pointer arguments -and numbers to number arguments. -

-

-The page on FFI semantics has all -of the gory details about -conversions between Lua -objects and C types. For the most part you don't have to deal -with this, as it's performed automatically and it's carefully designed -to bridge the semantic differences between Lua and C. -

-

- Now that we have defined -our own sleep() function, we can just call it from plain Lua -code. That wasn't so bad, huh? Turning these boring animated dots into -a fascinating best-selling game is left as an exercise for the reader. -:-) -

- -

Accessing the zlib Compression Library

-

-The following code shows how to access the zlib compression library from Lua code. -We'll define two convenience wrapper functions that take a string and -compress or uncompress it to another string: -

-
- 
-①
-
-
-
-
-
-
-②
-
-
-③
-
-④
-
-
-⑤
-
-
-⑥
-
-
-
-
-
-
-
-⑦local ffi = require("ffi")
-ffi.cdef[[
-unsigned long compressBound(unsigned long sourceLen);
-int compress2(uint8_t *dest, unsigned long *destLen,
-	      const uint8_t *source, unsigned long sourceLen, int level);
-int uncompress(uint8_t *dest, unsigned long *destLen,
-	       const uint8_t *source, unsigned long sourceLen);
-]]
-local zlib = ffi.load(ffi.os == "Windows" and "zlib1" or "z")
-
-local function compress(txt)
-  local n = zlib.compressBound(#txt)
-  local buf = ffi.new("uint8_t[?]", n)
-  local buflen = ffi.new("unsigned long[1]", n)
-  local res = zlib.compress2(buf, buflen, txt, #txt, 9)
-  assert(res == 0)
-  return ffi.string(buf, buflen[0])
-end
-
-local function uncompress(comp, n)
-  local buf = ffi.new("uint8_t[?]", n)
-  local buflen = ffi.new("unsigned long[1]", n)
-  local res = zlib.uncompress(buf, buflen, comp, #comp)
-  assert(res == 0)
-  return ffi.string(buf, buflen[0])
-end
-
--- Simple test code.
-local txt = string.rep("abcd", 1000)
-print("Uncompressed size: ", #txt)
-local c = compress(txt)
-print("Compressed size: ", #c)
-local txt2 = uncompress(c, #txt)
-assert(txt2 == txt)
-
-

-Here's the step-by-step explanation: -

-

- This defines some of the -C functions provided by zlib. For the sake of this example, some -type indirections have been reduced and it uses the pre-defined -fixed-size integer types, while still adhering to the zlib API/ABI. -

-

- This loads the zlib shared -library. On POSIX systems it's named libz.so and usually -comes pre-installed. Since ffi.load() automatically adds any -missing standard prefixes/suffixes, we can simply load the -"z" library. On Windows it's named zlib1.dll and -you'll have to download it first from the -» zlib site. The check for -ffi.os makes sure we pass the right name to -ffi.load(). -

-

- First, the maximum size of -the compression buffer is obtained by calling the -zlib.compressBound function with the length of the -uncompressed string. The next line allocates a byte buffer of this -size. The [?] in the type specification indicates a -variable-length array (VLA). The actual number of elements of this -array is given as the 2nd argument to ffi.new(). -

-

- This may look strange at -first, but have a look at the declaration of the compress2 -function from zlib: the destination length is defined as a pointer! -This is because you pass in the maximum buffer size and get back the -actual length that was used. -

-

-In C you'd pass in the address of a local variable -(&buflen). But since there's no address-of operator in -Lua, we'll just pass in a one-element array. Conveniently it can be -initialized with the maximum buffer size in one step. Calling the -actual zlib.compress2 function is then straightforward. -

-

- We want to return the -compressed data as a Lua string, so we'll use ffi.string(). -It needs a pointer to the start of the data and the actual length. The -length has been returned in the buflen array, so we'll just -get it from there. -

-

-Note that since the function returns now, the buf and -buflen variables will eventually be garbage collected. This -is fine, because ffi.string() has copied the contents to a -newly created (interned) Lua string. If you plan to call this function -lots of times, consider reusing the buffers and/or handing back the -results in buffers instead of strings. This will reduce the overhead -for garbage collection and string interning. -

-

- The uncompress -functions does the exact opposite of the compress function. -The compressed data doesn't include the size of the original string, -so this needs to be passed in. Otherwise no surprises here. -

-

- The code, that makes use -of the functions we just defined, is just plain Lua code. It doesn't -need to know anything about the LuaJIT FFI — the convenience -wrapper functions completely hide it. -

-

-One major advantage of the LuaJIT FFI is that you are now able to -write those wrappers in Lua. And at a fraction of the time it -would cost you to create an extra C module using the Lua/C API. -Many of the simpler C functions can probably be used directly -from your Lua code, without any wrappers. -

-

-Side note: the zlib API uses the long type for passing -lengths and sizes around. But all those zlib functions actually only -deal with 32 bit values. This is an unfortunate choice for a -public API, but may be explained by zlib's history — we'll just -have to deal with it. -

-

-First, you should know that a long is a 64 bit type e.g. -on POSIX/x64 systems, but a 32 bit type on Windows/x64 and on -32 bit systems. Thus a long result can be either a plain -Lua number or a boxed 64 bit integer cdata object, depending on -the target system. -

-

-Ok, so the ffi.* functions generally accept cdata objects -wherever you'd want to use a number. That's why we get a away with -passing n to ffi.string() above. But other Lua -library functions or modules don't know how to deal with this. So for -maximum portability one needs to use tonumber() on returned -long results before passing them on. Otherwise the -application might work on some systems, but would fail in a POSIX/x64 -environment. -

- -

Defining Metamethods for a C Type

-

-The following code explains how to define metamethods for a C type. -We define a simple point type and add some operations to it: -

-
- 
-①
-
-
-
-②
-
-③
-
-④
-
-
-
-⑤
-
-⑥local ffi = require("ffi")
-ffi.cdef[[
-typedef struct { double x, y; } point_t;
-]]
-
-local point
-local mt = {
-  __add = function(a, b) return point(a.x+b.x, a.y+b.y) end,
-  __len = function(a) return math.sqrt(a.x*a.x + a.y*a.y) end,
-  __index = {
-    area = function(a) return a.x*a.x + a.y*a.y end,
-  },
-}
-point = ffi.metatype("point_t", mt)
-
-local a = point(3, 4)
-print(a.x, a.y)  --> 3  4
-print(#a)        --> 5
-print(a:area())  --> 25
-local b = a + point(0.5, 8)
-print(#b)        --> 12.5
-
-

-Here's the step-by-step explanation: -

-

- This defines the C type for a -two-dimensional point object. -

-

- We have to declare the variable -holding the point constructor first, because it's used inside of a -metamethod. -

-

- Let's define an __add -metamethod which adds the coordinates of two points and creates a new -point object. For simplicity, this function assumes that both arguments -are points. But it could be any mix of objects, if at least one operand -is of the required type (e.g. adding a point plus a number or vice -versa). Our __len metamethod returns the distance of a point to -the origin. -

-

- If we run out of operators, we can -define named methods, too. Here the __index table defines an -area function. For custom indexing needs, one might want to -define __index and __newindex functions instead. -

-

- This associates the metamethods with -our C type. This only needs to be done once. For convenience, a -constructor is returned by -ffi.metatype(). -We're not required to use it, though. The original C type can still -be used e.g. to create an array of points. The metamethods automatically -apply to any and all uses of this type. -

-

-Please note that the association with a metatable is permanent and -the metatable must not be modified afterwards! Ditto for the -__index table. -

-

- Here are some simple usage examples -for the point type and their expected results. The pre-defined -operations (such as a.x) can be freely mixed with the newly -defined metamethods. Note that area is a method and must be -called with the Lua syntax for methods: a:area(), not -a.area(). -

-

-The C type metamethod mechanism is most useful when used in -conjunction with C libraries that are written in an object-oriented -style. Creators return a pointer to a new instance and methods take an -instance pointer as the first argument. Sometimes you can just point -__index to the library namespace and __gc to the -destructor and you're done. But often enough you'll want to add -convenience wrappers, e.g. to return actual Lua strings or when -returning multiple values. -

-

-Some C libraries only declare instance pointers as an opaque -void * type. In this case you can use a fake type for all -declarations, e.g. a pointer to a named (incomplete) struct will do: -typedef struct foo_type *foo_handle. The C side doesn't -know what you declare with the LuaJIT FFI, but as long as the underlying -types are compatible, everything still works. -

- -

Translating C Idioms

-

-Here's a list of common C idioms and their translation to the -LuaJIT FFI: -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - -
IdiomC codeLua code
Pointer dereference
int *p;
x = *p;
*p = y;
x = p[0]
p[0] = y
Pointer indexing
int i, *p;
x = p[i];
p[i+1] = y;
x = p[i]
p[i+1] = y
Array indexing
int i, a[];
x = a[i];
a[i+1] = y;
x = a[i]
a[i+1] = y
struct/union dereference
struct foo s;
x = s.field;
s.field = y;
x = s.field
s.field = y
struct/union pointer deref.
struct foo *sp;
x = sp->field;
sp->field = y;
x = s.field
s.field = y
Pointer arithmetic
int i, *p;
x = p + i;
y = p - i;
x = p + i
y = p - i
Pointer difference
int *p1, *p2;
x = p1 - p2;x = p1 - p2
Array element pointer
int i, a[];
x = &a[i];x = a+i
Cast pointer to address
int *p;
x = (intptr_t)p;x = tonumber(
 ffi.cast("intptr_t",
          p))
Functions with outargs
void foo(int *inoutlen);
int len = x;
foo(&len);
y = len;
local len =
  ffi.new("int[1]", x)
foo(len)
y = len[0]
Vararg conversions
int printf(char *fmt, ...);
printf("%g", 1.0);
printf("%d", 1);
 
printf("%g", 1);
printf("%d",
  ffi.new("int", 1))
- -

To Cache or Not to Cache

-

-It's a common Lua idiom to cache library functions in local variables -or upvalues, e.g.: -

-
-local byte, char = string.byte, string.char
-local function foo(x)
-  return char(byte(x)+1)
-end
-
-

-This replaces several hash-table lookups with a (faster) direct use of -a local or an upvalue. This is less important with LuaJIT, since the -JIT compiler optimizes hash-table lookups a lot and is even able to -hoist most of them out of the inner loops. It can't eliminate -all of them, though, and it saves some typing for often-used -functions. So there's still a place for this, even with LuaJIT. -

-

-The situation is a bit different with C function calls via the -FFI library. The JIT compiler has special logic to eliminate all -of the lookup overhead for functions resolved from a -C library namespace! -Thus it's not helpful and actually counter-productive to cache -individual C functions like this: -

-
-local funca, funcb = ffi.C.funca, ffi.C.funcb -- Not helpful!
-local function foo(x, n)
-  for i=1,n do funcb(funca(x, i), 1) end
-end
-
-

-This turns them into indirect calls and generates bigger and slower -machine code. Instead you'll want to cache the namespace itself and -rely on the JIT compiler to eliminate the lookups: -

-
-local C = ffi.C          -- Instead use this!
-local function foo(x, n)
-  for i=1,n do C.funcb(C.funca(x, i), 1) end
-end
-
-

-This generates both shorter and faster code. So don't cache -C functions, but do cache namespaces! Most often the -namespace is already in a local variable at an outer scope, e.g. from -local lib = ffi.load(...). Note that copying -it to a local variable in the function scope is unnecessary. -

-
-
- - - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_jit.html b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_jit.html deleted file mode 100644 index a569dd570e4..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_jit.html +++ /dev/null @@ -1,201 +0,0 @@ - - - -jit.* Library - - - - - - - - -
-Lua -
- - -
-

-The functions in this built-in module control the behavior of the JIT -compiler engine. Note that JIT-compilation is fully automatic — -you probably won't need to use any of the following functions unless -you have special needs. -

- -

jit.on()
-jit.off()

-

-Turns the whole JIT compiler on (default) or off. -

-

-These functions are typically used with the command line options --j on or -j off. -

- -

jit.flush()

-

-Flushes the whole cache of compiled code. -

- -

jit.on(func|true [,true|false])
-jit.off(func|true [,true|false])
-jit.flush(func|true [,true|false])

-

-jit.on enables JIT compilation for a Lua function (this is -the default). -

-

-jit.off disables JIT compilation for a Lua function and -flushes any already compiled code from the code cache. -

-

-jit.flush flushes the code, but doesn't affect the -enable/disable status. -

-

-The current function, i.e. the Lua function calling this library -function, can also be specified by passing true as the first -argument. -

-

-If the second argument is true, JIT compilation is also -enabled, disabled or flushed recursively for all sub-functions of a -function. With false only the sub-functions are affected. -

-

-The jit.on and jit.off functions only set a flag -which is checked when the function is about to be compiled. They do -not trigger immediate compilation. -

-

-Typical usage is jit.off(true, true) in the main chunk -of a module to turn off JIT compilation for the whole module for -debugging purposes. -

- -

jit.flush(tr)

-

-Flushes the root trace, specified by its number, and all of its side -traces from the cache. The code for the trace will be retained as long -as there are any other traces which link to it. -

- -

status, ... = jit.status()

-

-Returns the current status of the JIT compiler. The first result is -either true or false if the JIT compiler is turned -on or off. The remaining results are strings for CPU-specific features -and enabled optimizations. -

- -

jit.version

-

-Contains the LuaJIT version string. -

- -

jit.version_num

-

-Contains the version number of the LuaJIT core. Version xx.yy.zz -is represented by the decimal number xxyyzz. -

- -

jit.os

-

-Contains the target OS name: -"Windows", "Linux", "OSX", "BSD", "POSIX" or "Other". -

- -

jit.arch

-

-Contains the target architecture name: -"x86", "x64", "arm", "ppc", or "mips". -

- -

jit.opt.* — JIT compiler optimization control

-

-This sub-module provides the backend for the -O command line -option. -

-

-You can also use it programmatically, e.g.: -

-
-jit.opt.start(2) -- same as -O2
-jit.opt.start("-dce")
-jit.opt.start("hotloop=10", "hotexit=2")
-
-

-Unlike in LuaJIT 1.x, the module is built-in and -optimization is turned on by default! -It's no longer necessary to run require("jit.opt").start(), -which was one of the ways to enable optimization. -

- -

jit.util.* — JIT compiler introspection

-

-This sub-module holds functions to introspect the bytecode, generated -traces, the IR and the generated machine code. The functionality -provided by this module is still in flux and therefore undocumented. -

-

-The debug modules -jbc, -jv and -jdump make -extensive use of these functions. Please check out their source code, -if you want to know more. -

-
-
- - - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_profiler.html b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_profiler.html deleted file mode 100644 index be63662b755..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/ext_profiler.html +++ /dev/null @@ -1,365 +0,0 @@ - - - -Profiler - - - - - - - - -
-Lua -
- - -
-

-LuaJIT has an integrated statistical profiler with very low overhead. It -allows sampling the currently executing stack and other parameters in -regular intervals. -

-

-The integrated profiler can be accessed from three levels: -

- - -

High-Level Profiler

-

-The bundled high-level profiler offers basic profiling functionality. It -generates simple textual summaries or source code annotations. It can be -accessed with the -jp command line option -or from Lua code by loading the underlying jit.p module. -

-

-To cut to the chase — run this to get a CPU usage profile by -function name: -

-
-luajit -jp myapp.lua
-
-

-It's not a stated goal of the bundled profiler to add every -possible option or to cater for special profiling needs. The low-level -profiler APIs are documented below. They may be used by third-party -authors to implement advanced functionality, e.g. IDE integration or -graphical profilers. -

-

-Note: Sampling works for both interpreted and JIT-compiled code. The -results for JIT-compiled code may sometimes be surprising. LuaJIT -heavily optimizes and inlines Lua code — there's no simple -one-to-one correspondence between source code lines and the sampled -machine code. -

- -

-jp=[options[,output]]

-

-The -jp command line option starts the high-level profiler. -When the application run by the command line terminates, the profiler -stops and writes the results to stdout or to the specified -output file. -

-

-The options argument specifies how the profiling is to be -performed: -

-
    -
  • f — Stack dump: function name, otherwise module:line. -This is the default mode.
  • -
  • F — Stack dump: ditto, but dump module:name.
  • -
  • l — Stack dump: module:line.
  • -
  • <number> — stack dump depth (callee ← -caller). Default: 1.
  • -
  • -<number> — Inverse stack dump depth (caller -→ callee).
  • -
  • s — Split stack dump after first stack level. Implies -depth ≥ 2 or depth ≤ -2.
  • -
  • p — Show full path for module names.
  • -
  • v — Show VM states.
  • -
  • z — Show zones.
  • -
  • r — Show raw sample counts. Default: show percentages.
  • -
  • a — Annotate excerpts from source code files.
  • -
  • A — Annotate complete source code files.
  • -
  • G — Produce raw output suitable for graphical tools.
  • -
  • m<number> — Minimum sample percentage to be shown. -Default: 3%.
  • -
  • i<number> — Sampling interval in milliseconds. -Default: 10ms.
    -Note: The actual sampling precision is OS-dependent.
  • -
-

-The default output for -jp is a list of the most CPU consuming -spots in the application. Increasing the stack dump depth with (say) --jp=2 may help to point out the main callers or callees of -hotspots. But sample aggregation is still flat per unique stack dump. -

-

-To get a two-level view (split view) of callers/callees, use --jp=s or -jp=-s. The percentages shown for the second -level are relative to the first level. -

-

-To see how much time is spent in each line relative to a function, use --jp=fl. -

-

-To see how much time is spent in different VM states or -zones, use -jp=v or -jp=z. -

-

-Combinations of v/z with f/F/l produce two-level -views, e.g. -jp=vf or -jp=fv. This shows the time -spent in a VM state or zone vs. hotspots. This can be used to answer -questions like "Which time consuming functions are only interpreted?" or -"What's the garbage collector overhead for a specific function?". -

-

-Multiple options can be combined — but not all combinations make -sense, see above. E.g. -jp=3si4m1 samples three stack levels -deep in 4ms intervals and shows a split view of the CPU consuming -functions and their callers with a 1% threshold. -

-

-Source code annotations produced by -jp=a or -jp=A are -always flat and at the line level. Obviously, the source code files need -to be readable by the profiler script. -

-

-The high-level profiler can also be started and stopped from Lua code with: -

-
-require("jit.p").start(options, output)
-...
-require("jit.p").stop()
-
- -

jit.zone — Zones

-

-Zones can be used to provide information about different parts of an -application to the high-level profiler. E.g. a game could make use of an -"AI" zone, a "PHYS" zone, etc. Zones are hierarchical, -organized as a stack. -

-

-The jit.zone module needs to be loaded explicitly: -

-
-local zone = require("jit.zone")
-
-
    -
  • zone("name") pushes a named zone to the zone stack.
  • -
  • zone() pops the current zone from the zone stack and -returns its name.
  • -
  • zone:get() returns the current zone name or nil.
  • -
  • zone:flush() flushes the zone stack.
  • -
-

-To show the time spent in each zone use -jp=z. To show the time -spent relative to hotspots use e.g. -jp=zf or -jp=fz. -

- -

Low-level Lua API

-

-The jit.profile module gives access to the low-level API of the -profiler from Lua code. This module needs to be loaded explicitly: -

-local profile = require("jit.profile")
-
-

-This module can be used to implement your own higher-level profiler. -A typical profiling run starts the profiler, captures stack dumps in -the profiler callback, adds them to a hash table to aggregate the number -of samples, stops the profiler and then analyzes all of the captured -stack dumps. Other parameters can be sampled in the profiler callback, -too. But it's important not to spend too much time in the callback, -since this may skew the statistics. -

- -

profile.start(mode, cb) -— Start profiler

-

-This function starts the profiler. The mode argument is a -string holding options: -

-
    -
  • f — Profile with precision down to the function level.
  • -
  • l — Profile with precision down to the line level.
  • -
  • i<number> — Sampling interval in milliseconds (default -10ms).
    -Note: The actual sampling precision is OS-dependent. -
  • -
-

-The cb argument is a callback function which is called with -three arguments: (thread, samples, vmstate). The callback is -called on a separate coroutine, the thread argument is the -state that holds the stack to sample for profiling. Note: do -not modify the stack of that state or call functions on it. -

-

-samples gives the number of accumulated samples since the last -callback (usually 1). -

-

-vmstate holds the VM state at the time the profiling timer -triggered. This may or may not correspond to the state of the VM when -the profiling callback is called. The state is either 'N' -native (compiled) code, 'I' interpreted code, 'C' -C code, 'G' the garbage collector, or 'J' the JIT -compiler. -

- -

profile.stop() -— Stop profiler

-

-This function stops the profiler. -

- -

dump = profile.dumpstack([thread,] fmt, depth) -— Dump stack

-

-This function allows taking stack dumps in an efficient manner. It -returns a string with a stack dump for the thread (coroutine), -formatted according to the fmt argument: -

-
    -
  • p — Preserve the full path for module names. Otherwise -only the file name is used.
  • -
  • f — Dump the function name if it can be derived. Otherwise -use module:line.
  • -
  • F — Ditto, but dump module:name.
  • -
  • l — Dump module:line.
  • -
  • Z — Zap the following characters for the last dumped -frame.
  • -
  • All other characters are added verbatim to the output string.
  • -
-

-The depth argument gives the number of frames to dump, starting -at the topmost frame of the thread. A negative number dumps the frames in -inverse order. -

-

-The first example prints a list of the current module names and line -numbers of up to 10 frames in separate lines. The second example prints -semicolon-separated function names for all frames (up to 100) in inverse -order: -

-
-print(profile.dumpstack(thread, "l\n", 10))
-print(profile.dumpstack(thread, "lZ;", -100))
-
- -

Low-level C API

-

-The profiler can be controlled directly from C code, e.g. for -use by IDEs. The declarations are in "luajit.h" (see -Lua/C API extensions). -

- -

luaJIT_profile_start(L, mode, cb, data) -— Start profiler

-

-This function starts the profiler. See -above for a description of the mode argument. -

-

-The cb argument is a callback function with the following -declaration: -

-
-typedef void (*luaJIT_profile_callback)(void *data, lua_State *L,
-                                        int samples, int vmstate);
-
-

-data is available for use by the callback. L is the -state that holds the stack to sample for profiling. Note: do -not modify this stack or call functions on this stack — -use a separate coroutine for this purpose. See -above for a description of samples and vmstate. -

- -

luaJIT_profile_stop(L) -— Stop profiler

-

-This function stops the profiler. -

- -

p = luaJIT_profile_dumpstack(L, fmt, depth, len) -— Dump stack

-

-This function allows taking stack dumps in an efficient manner. -See above for a description of fmt -and depth. -

-

-This function returns a const char * pointing to a -private string buffer of the profiler. The int *len -argument returns the length of the output string. The buffer is -overwritten on the next call and deallocated when the profiler stops. -You either need to consume the content immediately or copy it for later -use. -

-
-
- - - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/extensions.html b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/extensions.html deleted file mode 100644 index 84ca5ce46bf..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/extensions.html +++ /dev/null @@ -1,448 +0,0 @@ - - - -Extensions - - - - - - - - - -
-Lua -
- - -
-

-LuaJIT is fully upwards-compatible with Lua 5.1. It supports all -» standard Lua -library functions and the full set of -» Lua/C API -functions. -

-

-LuaJIT is also fully ABI-compatible to Lua 5.1 at the linker/dynamic -loader level. This means you can compile a C module against the -standard Lua headers and load the same shared library from either Lua -or LuaJIT. -

-

-LuaJIT extends the standard Lua VM with new functionality and adds -several extension modules. Please note this page is only about -functional enhancements and not about performance enhancements, -such as the optimized VM, the faster interpreter or the JIT compiler. -

- -

Extensions Modules

-

-LuaJIT comes with several built-in extension modules: -

- -

bit.* — Bitwise operations

-

-LuaJIT supports all bitwise operations as defined by -» Lua BitOp: -

-
-bit.tobit  bit.tohex  bit.bnot    bit.band bit.bor  bit.bxor
-bit.lshift bit.rshift bit.arshift bit.rol  bit.ror  bit.bswap
-
-

-This module is a LuaJIT built-in — you don't need to download or -install Lua BitOp. The Lua BitOp site has full documentation for all -» Lua BitOp API functions. -The FFI adds support for -64 bit bitwise operations, -using the same API functions. -

-

-Please make sure to require the module before using any of -its functions: -

-
-local bit = require("bit")
-
-

-An already installed Lua BitOp module is ignored by LuaJIT. -This way you can use bit operations from both Lua and LuaJIT on a -shared installation. -

- -

ffi.* — FFI library

-

-The FFI library allows calling external -C functions and the use of C data structures from pure Lua -code. -

- -

jit.* — JIT compiler control

-

-The functions in this module -control the behavior of the JIT compiler engine. -

- -

C API extensions

-

-LuaJIT adds some -extra functions to the Lua/C API. -

- -

Profiler

-

-LuaJIT has an integrated profiler. -

- -

Enhanced Standard Library Functions

- -

xpcall(f, err [,args...]) passes arguments

-

-Unlike the standard implementation in Lua 5.1, xpcall() -passes any arguments after the error function to the function -which is called in a protected context. -

- -

loadfile() etc. handle UTF-8 source code

-

-Non-ASCII characters are handled transparently by the Lua source code parser. -This allows the use of UTF-8 characters in identifiers and strings. -A UTF-8 BOM is skipped at the start of the source code. -

- -

tostring() etc. canonicalize NaN and ±Inf

-

-All number-to-string conversions consistently convert non-finite numbers -to the same strings on all platforms. NaN results in "nan", -positive infinity results in "inf" and negative infinity results -in "-inf". -

- -

tonumber() etc. use builtin string to number conversion

-

-All string-to-number conversions consistently convert integer and -floating-point inputs in decimal, hexadecimal and binary on all platforms. -strtod() is not used anymore, which avoids numerous -problems with poor C library implementations. The builtin conversion -function provides full precision according to the IEEE-754 standard, it -works independently of the current locale and it supports hex floating-point -numbers (e.g. 0x1.5p-3). -

- -

string.dump(f [,strip]) generates portable bytecode

-

-An extra argument has been added to string.dump(). If set to -true, 'stripped' bytecode without debug information is -generated. This speeds up later bytecode loading and reduces memory -usage. See also the --b command line option. -

-

-The generated bytecode is portable and can be loaded on any architecture -that LuaJIT supports, independent of word size or endianess. However the -bytecode compatibility versions must match. Bytecode stays compatible -for dot releases (x.y.0 → x.y.1), but may change with major or -minor releases (2.0 → 2.1) or between any beta release. Foreign -bytecode (e.g. from Lua 5.1) is incompatible and cannot be loaded. -

-

-Note: LJ_GC64 mode requires a different frame layout, which implies -a different, incompatible bytecode format for ports that use this mode (e.g. -ARM64). This may be rectified in the future. -

- -

table.new(narray, nhash) allocates a pre-sized table

-

-An extra library function table.new() can be made available via -require("table.new"). This creates a pre-sized table, just like -the C API equivalent lua_createtable(). This is useful for big -tables if the final table size is known and automatic table resizing is -too expensive. -

- -

table.clear(tab) clears a table

-

-An extra library function table.clear() can be made available -via require("table.clear"). This clears all keys and values -from a table, but preserves the allocated array/hash sizes. This is -useful when a table, which is linked from multiple places, needs to be -cleared and/or when recycling a table for use by the same context. This -avoids managing backlinks, saves an allocation and the overhead of -incremental array/hash part growth. -

-

-Please note this function is meant for very specific situations. In most -cases it's better to replace the (usually single) link with a new table -and let the GC do its work. -

- -

Enhanced PRNG for math.random()

-

-LuaJIT uses a Tausworthe PRNG with period 2^223 to implement -math.random() and math.randomseed(). The quality of -the PRNG results is much superior compared to the standard Lua -implementation which uses the platform-specific ANSI rand(). -

-

-The PRNG generates the same sequences from the same seeds on all -platforms and makes use of all bits in the seed argument. -math.random() without arguments generates 52 pseudo-random bits -for every call. The result is uniformly distributed between 0.0 and 1.0. -It's correctly scaled up and rounded for math.random(n [,m]) to -preserve uniformity. -

- -

io.* functions handle 64 bit file offsets

-

-The file I/O functions in the standard io.* library handle -64 bit file offsets. In particular this means it's possible -to open files larger than 2 Gigabytes and to reposition or obtain -the current file position for offsets beyond 2 GB -(fp:seek() method). -

- -

debug.* functions identify metamethods

-

-debug.getinfo() and lua_getinfo() also return information -about invoked metamethods. The namewhat field is set to -"metamethod" and the name field has the name of -the corresponding metamethod (e.g. "__index"). -

- -

Fully Resumable VM

-

-The LuaJIT VM is fully resumable. This means you can yield from a -coroutine even across contexts, where this would not possible with -the standard Lua 5.1 VM: e.g. you can yield across pcall() -and xpcall(), across iterators and across metamethods. -

- -

Extensions from Lua 5.2

-

-LuaJIT supports some language and library extensions from Lua 5.2. -Features that are unlikely to break existing code are unconditionally -enabled: -

-
    -
  • goto and ::labels::.
  • -
  • Hex escapes '\x3F' and '\*' escape in strings.
  • -
  • load(string|reader [, chunkname [,mode [,env]]]).
  • -
  • loadstring() is an alias for load().
  • -
  • loadfile(filename [,mode [,env]]).
  • -
  • math.log(x [,base]). -
  • string.rep(s, n [,sep]). -
  • string.format(): %q reversible. -%s checks __tostring. -%a and "%A added.
  • -
  • String matching pattern %g added.
  • -
  • io.read("*L").
  • -
  • io.lines() and file:lines() process -io.read() options.
  • -
  • os.exit(status|true|false [,close]).
  • -
  • package.searchpath(name, path [, sep [, rep]]).
  • -
  • package.loadlib(name, "*").
  • -
  • debug.getinfo() returns nparams and isvararg -for option "u".
  • -
  • debug.getlocal() accepts function instead of level.
  • -
  • debug.getlocal() and debug.setlocal() accept negative -indexes for varargs.
  • -
  • debug.getupvalue() and debug.setupvalue() handle -C functions.
  • -
  • debug.upvalueid() and debug.upvaluejoin().
  • -
  • Command line option -E.
  • -
  • Command line checks __tostring for errors.
  • -
-

-Other features are only enabled, if LuaJIT is built with --DLUAJIT_ENABLE_LUA52COMPAT: -

-
    -
  • goto is a keyword and not a valid variable name anymore.
  • -
  • break can be placed anywhere. Empty statements (;;) -are allowed.
  • -
  • __lt, __le are invoked for mixed types.
  • -
  • __len for tables. rawlen() library function.
  • -
  • pairs() and ipairs() check for __pairs and -__ipairs.
  • -
  • coroutine.running() returns two results.
  • -
  • table.pack() and table.unpack() -(same as unpack()).
  • -
  • io.write() and file:write() return file handle -instead of true.
  • -
  • os.execute() and pipe:close() return detailed -exit status.
  • -
  • debug.setmetatable() returns object.
  • -
  • debug.getuservalue() and debug.setuservalue().
  • -
  • Remove math.mod(), string.gfind(). -
-

-Note: this provides only partial compatibility with Lua 5.2 at the -language and Lua library level. LuaJIT is API+ABI-compatible with -Lua 5.1, which prevents implementing features that would otherwise -break the Lua/C API and ABI (e.g. _ENV). -

- -

C++ Exception Interoperability

-

-LuaJIT has built-in support for interoperating with C++ exceptions. -The available range of features depends on the target platform and -the toolchain used to compile LuaJIT: -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
PlatformCompilerInteroperability
POSIX/x64, DWARF2 unwindingGCC 4.3+Full
Other platforms, DWARF2 unwindingGCCLimited
Windows/x64MSVC or WinSDKFull
Windows/x86AnyNo
Other platformsOther compilersNo
-

-Full interoperability means: -

-
    -
  • C++ exceptions can be caught on the Lua side with pcall(), -lua_pcall() etc.
  • -
  • C++ exceptions will be converted to the generic Lua error -"C++ exception", unless you use the -C call wrapper feature.
  • -
  • It's safe to throw C++ exceptions across non-protected Lua frames -on the C stack. The contents of the C++ exception object -pass through unmodified.
  • -
  • Lua errors can be caught on the C++ side with catch(...). -The corresponding Lua error message can be retrieved from the Lua stack.
  • -
  • Throwing Lua errors across C++ frames is safe. C++ destructors -will be called.
  • -
-

-Limited interoperability means: -

-
    -
  • C++ exceptions can be caught on the Lua side with pcall(), -lua_pcall() etc.
  • -
  • C++ exceptions will be converted to the generic Lua error -"C++ exception", unless you use the -C call wrapper feature.
  • -
  • C++ exceptions will be caught by non-protected Lua frames and -are rethrown as a generic Lua error. The C++ exception object will -be destroyed.
  • -
  • Lua errors cannot be caught on the C++ side.
  • -
  • Throwing Lua errors across C++ frames will not call -C++ destructors.
  • -
- -

-No interoperability means: -

-
    -
  • It's not safe to throw C++ exceptions across Lua frames.
  • -
  • C++ exceptions cannot be caught on the Lua side.
  • -
  • Lua errors cannot be caught on the C++ side.
  • -
  • Throwing Lua errors across C++ frames will not call -C++ destructors.
  • -
  • Additionally, on Windows/x86 with SEH-based C++ exceptions: -it's not safe to throw a Lua error across any frames containing -a C++ function with any try/catch construct or using variables with -(implicit) destructors. This also applies to any functions which may be -inlined in such a function. It doesn't matter whether lua_error() -is called inside or outside of a try/catch or whether any object actually -needs to be destroyed: the SEH chain is corrupted and this will eventually -lead to the termination of the process.
  • -
-
-
- - - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/faq.html b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/faq.html deleted file mode 100644 index aebaef579a4..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/faq.html +++ /dev/null @@ -1,186 +0,0 @@ - - - -Frequently Asked Questions (FAQ) - - - - - - - - - -
-Lua -
- - -
-
-
Q: Where can I learn more about LuaJIT and Lua?
-
- -
- -
-
Q: Where can I learn more about the compiler technology used by LuaJIT?
-
-I'm planning to write more documentation about the internals of LuaJIT. -In the meantime, please use the following Google Scholar searches -to find relevant papers:
-Search for: » Trace Compiler
-Search for: » JIT Compiler
-Search for: » Dynamic Language Optimizations
-Search for: » SSA Form
-Search for: » Linear Scan Register Allocation
-Here is a list of the » innovative features in LuaJIT.
-And, you know, reading the source is of course the only way to enlightenment. :-) -
-
- -
-
Q: Why do I get this error: "attempt to index global 'arg' (a nil value)"?
-Q: My vararg functions fail after switching to LuaJIT!
-
LuaJIT is compatible to the Lua 5.1 language standard. It doesn't -support the implicit arg parameter for old-style vararg -functions from Lua 5.0.
Please convert your code to the -» Lua 5.1 -vararg syntax.
-
- -
-
Q: Why do I get this error: "bad FPU precision"?
-
Q: I get weird behavior after initializing Direct3D.
-
Q: Some FPU operations crash after I load a Delphi DLL.
-
-
- -DirectX/Direct3D (up to version 9) sets the x87 FPU to single-precision -mode by default. This violates the Windows ABI and interferes with the -operation of many programs — LuaJIT is affected, too. Please make -sure you always use the D3DCREATE_FPU_PRESERVE flag when -initializing Direct3D.
- -Direct3D version 10 or higher do not show this behavior anymore. -Consider testing your application with older versions, too.
- -Similarly, the Borland/Delphi runtime modifies the FPU control word and -enables FP exceptions. Of course this violates the Windows ABI, too. -Please check the Delphi docs for the Set8087CW method. - -
- -
-
Q: Sometimes Ctrl-C fails to stop my Lua program. Why?
-
The interrupt signal handler sets a Lua debug hook. But this is -currently ignored by compiled code (this will eventually be fixed). If -your program is running in a tight loop and never falls back to the -interpreter, the debug hook never runs and can't throw the -"interrupted!" error.
In the meantime you have to press Ctrl-C -twice to get stop your program. That's similar to when it's stuck -running inside a C function under the Lua interpreter.
-
- -
-
Q: Why doesn't my favorite power-patch for Lua apply against LuaJIT?
-
Because it's a completely redesigned VM and has very little code -in common with Lua anymore. Also, if the patch introduces changes to -the Lua semantics, these would need to be reflected everywhere in the -VM, from the interpreter up to all stages of the compiler.
Please -use only standard Lua language constructs. For many common needs you -can use source transformations or use wrapper or proxy functions. -The compiler will happily optimize away such indirections.
-
- -
-
Q: Lua runs everywhere. Why doesn't LuaJIT support my CPU?
-
Because it's a compiler — it needs to generate native -machine code. This means the code generator must be ported to each -architecture. And the fast interpreter is written in assembler and -must be ported, too. This is quite an undertaking.
-The install documentation shows the supported -architectures. Other architectures will follow based on sufficient user -demand and/or sponsoring.
-
- -
-
Q: When will feature X be added? When will the next version be released?
-
When it's ready.
-C'mon, it's open source — I'm doing it on my own time and you're -getting it for free. You can either contribute a patch or sponsor -the development of certain features, if they are important to you. -
-
-
-
- - - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/img/contact.png b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/img/contact.png deleted file mode 100644 index 9c73dc594ef..00000000000 Binary files a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/img/contact.png and /dev/null differ diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/install.html b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/install.html deleted file mode 100644 index b5df697b670..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/install.html +++ /dev/null @@ -1,659 +0,0 @@ - - - -Installation - - - - - - - - - -
-Lua -
- - -
-

-LuaJIT is only distributed as a source package. This page explains -how to build and install LuaJIT with different operating systems -and C compilers. -

-

-For the impatient (on POSIX systems): -

-
-make && sudo make install
-
-

-LuaJIT currently builds out-of-the box on most systems. -Here's the compatibility matrix for the supported combinations of -operating systems, CPUs and compilers: -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
CPU / OSLinux or
Android
*BSD, OtherOSX 10.4+ or
iOS 3.0+
Windows
XP/Vista/7
x86 (32 bit)GCC 4.x
GCC 3.4
GCC 4.x
GCC 3.4
GCC 4.x
GCC 3.4
MSVC, MSVC/EE
WinSDK
MinGW, Cygwin
x64 (64 bit)GCC 4.xORBIS (PS4)GCC 4.xMSVC + SDK v7.0
WinSDK v7.0
Durango (Xbox One)
ARMv5+
ARM9E+
GCC 4.2+GCC 4.2+
PSP2 (PS VITA)
GCC 4.2+ 
ARM64GCC 4.8+ Clang 3.5+ 
PPCGCC 4.3+GCC 4.3+
GCC 4.1 (PS3)
 XEDK (Xbox 360)
MIPSGCC 4.3+GCC 4.3+  
- -

Configuring LuaJIT

-

-The standard configuration should work fine for most installations. -Usually there is no need to tweak the settings. The following files -hold all user-configurable settings: -

-
    -
  • src/luaconf.h sets some configuration variables.
  • -
  • Makefile has settings for installing LuaJIT (POSIX -only).
  • -
  • src/Makefile has settings for compiling LuaJIT -under POSIX, MinGW or Cygwin.
  • -
  • src/msvcbuild.bat has settings for compiling LuaJIT with -MSVC or WinSDK.
  • -
-

-Please read the instructions given in these files, before changing -any settings. -

- -

POSIX Systems (Linux, OSX, *BSD etc.)

-

Prerequisites

-

-Depending on your distribution, you may need to install a package for -GCC, the development headers and/or a complete SDK. E.g. on a current -Debian/Ubuntu, install libc6-dev with the package manager. -

-

-Download the current source package of LuaJIT (pick the .tar.gz), -if you haven't already done so. Move it to a directory of your choice, -open a terminal window and change to this directory. Now unpack the archive -and change to the newly created directory: -

-
-tar zxf LuaJIT-2.0.4.tar.gz
-cd LuaJIT-2.0.4
-

Building LuaJIT

-

-The supplied Makefiles try to auto-detect the settings needed for your -operating system and your compiler. They need to be run with GNU Make, -which is probably the default on your system, anyway. Simply run: -

-
-make
-
-

-This always builds a native x86, x64 or PPC binary, depending on the host OS -you're running this command on. Check the section on -cross-compilation for more options. -

-

-By default, modules are only searched under the prefix /usr/local. -You can add an extra prefix to the search paths by appending the -PREFIX option, e.g.: -

-
-make PREFIX=/home/myself/lj2
-
-

-Note for OSX: if the MACOSX_DEPLOYMENT_TARGET environment -variable is not set, then it's forced to 10.4. -

-

Installing LuaJIT

-

-The top-level Makefile installs LuaJIT by default under -/usr/local, i.e. the executable ends up in -/usr/local/bin and so on. You need root privileges -to write to this path. So, assuming sudo is installed on your system, -run the following command and enter your sudo password: -

-
-sudo make install
-
-

-Otherwise specify the directory prefix as an absolute path, e.g.: -

-
-make install PREFIX=/home/myself/lj2
-
-

-Obviously the prefixes given during build and installation need to be the same. -

- -

Windows Systems

-

Prerequisites

-

-Either install one of the open source SDKs -(» MinGW or -» Cygwin), which come with a modified -GCC plus the required development headers. -

-

-Or install Microsoft's Visual C++ (MSVC). The freely downloadable -» Express Edition -works just fine, but only contains an x86 compiler. -

-

-The freely downloadable -» Windows SDK -only comes with command line tools, but this is all you need to build LuaJIT. -It contains x86 and x64 compilers. -

-

-Next, download the source package and unpack it using an archive manager -(e.g. the Windows Explorer) to a directory of your choice. -

-

Building with MSVC

-

-Open a "Visual Studio .NET Command Prompt", cd to the -directory where you've unpacked the sources and run these commands: -

-
-cd src
-msvcbuild
-
-

-Then follow the installation instructions below. -

-

Building with the Windows SDK

-

-Open a "Windows SDK Command Shell" and select the x86 compiler: -

-
-setenv /release /x86
-
-

-Or select the x64 compiler: -

-
-setenv /release /x64
-
-

-Then cd to the directory where you've unpacked the sources -and run these commands: -

-
-cd src
-msvcbuild
-
-

-Then follow the installation instructions below. -

-

Building with MinGW or Cygwin

-

-Open a command prompt window and make sure the MinGW or Cygwin programs -are in your path. Then cd to the directory where -you've unpacked the sources and run this command for MinGW: -

-
-mingw32-make
-
-

-Or this command for Cygwin: -

-
-make
-
-

-Then follow the installation instructions below. -

-

Installing LuaJIT

-

-Copy luajit.exe and lua51.dll (built in the src -directory) to a newly created directory (any location is ok). -Add lua and lua\jit directories below it and copy -all Lua files from the src\jit directory of the distribution -to the latter directory. -

-

-There are no hardcoded -absolute path names — all modules are loaded relative to the -directory where luajit.exe is installed -(see src/luaconf.h). -

- -

Cross-compiling LuaJIT

-

-The GNU Makefile-based build system allows cross-compiling on any host -for any supported target, as long as both architectures have the same -pointer size. If you want to cross-compile to any 32 bit target on an -x64 OS, you need to install the multilib development package (e.g. -libc6-dev-i386 on Debian/Ubuntu) and build a 32 bit host part -(HOST_CC="gcc -m32"). -

-

-You need to specify TARGET_SYS whenever the host OS and the -target OS differ, or you'll get assembler or linker errors. E.g. if -you're compiling on a Windows or OSX host for embedded Linux or Android, -you need to add TARGET_SYS=Linux to the examples below. For a -minimal target OS, you may need to disable the built-in allocator in -src/Makefile and use TARGET_SYS=Other. The examples -below only show some popular targets — please check the comments -in src/Makefile for more details. -

-
-# Cross-compile to a 32 bit binary on a multilib x64 OS
-make CC="gcc -m32"
-
-# Cross-compile on Debian/Ubuntu for Windows (mingw32 package)
-make HOST_CC="gcc -m32" CROSS=i586-mingw32msvc- TARGET_SYS=Windows
-
-

-The CROSS prefix allows specifying a standard GNU cross-compile -toolchain (Binutils, GCC and a matching libc). The prefix may vary -depending on the --target the toolchain was built for (note the -CROSS prefix has a trailing "-"). The examples below -use the canonical toolchain triplets for Linux. -

-

-Since there's often no easy way to detect CPU features at runtime, it's -important to compile with the proper CPU or architecture settings. You -can specify these when building the toolchain yourself. Or add --mcpu=... or -march=... to TARGET_CFLAGS. For -ARM it's important to have the correct -mfloat-abi=... setting, -too. Otherwise LuaJIT may not run at the full performance of your target -CPU. -

-
-# ARM soft-float
-make HOST_CC="gcc -m32" CROSS=arm-linux-gnueabi- \
-     TARGET_CFLAGS="-mfloat-abi=soft"
-
-# ARM soft-float ABI with VFP (example for Cortex-A8)
-make HOST_CC="gcc -m32" CROSS=arm-linux-gnueabi- \
-     TARGET_CFLAGS="-mcpu=cortex-a8 -mfloat-abi=softfp"
-
-# ARM hard-float ABI with VFP (armhf, requires recent toolchain)
-make HOST_CC="gcc -m32" CROSS=arm-linux-gnueabihf-
-
-# ARM64 (requires x64 host)
-make CROSS=aarch64-linux-
-
-# PPC
-make HOST_CC="gcc -m32" CROSS=powerpc-linux-gnu-
-
-# MIPS big-endian
-make HOST_CC="gcc -m32" CROSS=mips-linux-
-# MIPS little-endian
-make HOST_CC="gcc -m32" CROSS=mipsel-linux-
-
-

-You can cross-compile for Android using the » Android NDK. -The environment variables need to match the install locations and the -desired target platform. E.g. Android 4.0 corresponds to ABI level 14. -For details check the folder docs in the NDK directory. -

-

-Only a few common variations for the different CPUs, ABIs and platforms -are listed. Please use your own judgement for which combination you want -to build/deploy or which lowest common denominator you want to pick: -

-
-# Android/ARM, armeabi (ARMv5TE soft-float), Android 2.2+ (Froyo)
-NDK=/opt/android/ndk
-NDKABI=8
-NDKVER=$NDK/toolchains/arm-linux-androideabi-4.6
-NDKP=$NDKVER/prebuilt/linux-x86/bin/arm-linux-androideabi-
-NDKF="--sysroot $NDK/platforms/android-$NDKABI/arch-arm"
-make HOST_CC="gcc -m32" CROSS=$NDKP TARGET_FLAGS="$NDKF"
-
-# Android/ARM, armeabi-v7a (ARMv7 VFP), Android 4.0+ (ICS)
-NDK=/opt/android/ndk
-NDKABI=14
-NDKVER=$NDK/toolchains/arm-linux-androideabi-4.6
-NDKP=$NDKVER/prebuilt/linux-x86/bin/arm-linux-androideabi-
-NDKF="--sysroot $NDK/platforms/android-$NDKABI/arch-arm"
-NDKARCH="-march=armv7-a -mfloat-abi=softfp -Wl,--fix-cortex-a8"
-make HOST_CC="gcc -m32" CROSS=$NDKP TARGET_FLAGS="$NDKF $NDKARCH"
-
-# Android/MIPS, mips (MIPS32R1 hard-float), Android 4.0+ (ICS)
-NDK=/opt/android/ndk
-NDKABI=14
-NDKVER=$NDK/toolchains/mipsel-linux-android-4.6
-NDKP=$NDKVER/prebuilt/linux-x86/bin/mipsel-linux-android-
-NDKF="--sysroot $NDK/platforms/android-$NDKABI/arch-mips"
-make HOST_CC="gcc -m32" CROSS=$NDKP TARGET_FLAGS="$NDKF"
-
-# Android/x86, x86 (i686 SSE3), Android 4.0+ (ICS)
-NDK=/opt/android/ndk
-NDKABI=14
-NDKVER=$NDK/toolchains/x86-4.6
-NDKP=$NDKVER/prebuilt/linux-x86/bin/i686-linux-android-
-NDKF="--sysroot $NDK/platforms/android-$NDKABI/arch-x86"
-make HOST_CC="gcc -m32" CROSS=$NDKP TARGET_FLAGS="$NDKF"
-
-

-You can cross-compile for iOS 3.0+ (iPhone/iPad) using the » iOS SDK. -The environment variables need to match the iOS SDK version: -

-

-Note: the JIT compiler is disabled for iOS, because regular iOS Apps -are not allowed to generate code at runtime. You'll only get the performance -of the LuaJIT interpreter on iOS. This is still faster than plain Lua, but -much slower than the JIT compiler. Please complain to Apple, not me. -Or use Android. :-p -

-
-IXCODE=`xcode-select -print-path`
-ISDK=$IXCODE/Platforms/iPhoneOS.platform/Developer
-ISDKVER=iPhoneOS6.0.sdk
-ISDKP=$ISDK/usr/bin/
-ISDKF="-arch armv7 -isysroot $ISDK/SDKs/$ISDKVER"
-make HOST_CC="gcc -m32 -arch i386" CROSS=$ISDKP TARGET_FLAGS="$ISDKF" \
-     TARGET_SYS=iOS
-
- -

Cross-compiling for consoles

-

-Building LuaJIT for consoles requires both a supported host compiler -(x86 or x64) and a cross-compiler (to PPC or ARM) from the official -console SDK. -

-

-Due to restrictions on consoles, the JIT compiler is disabled and only -the fast interpreter is built. This is still faster than plain Lua, -but much slower than the JIT compiler. The FFI is disabled, too, since -it's not very useful in such an environment. -

-

-The following commands build a static library libluajit.a, -which can be linked against your game, just like the Lua library. -

-

-To cross-compile for PS3 from a Linux host (requires -32 bit GCC, i.e. multilib Linux/x64) or a Windows host (requires -32 bit MinGW), run this command: -

-
-make HOST_CC="gcc -m32" CROSS=ppu-lv2-
-
-

-To cross-compile for PS4 from a Windows host, -open a "Visual Studio .NET Command Prompt" (64 bit host compiler), -cd to the directory where you've unpacked the sources and -run the following commands: -

-
-cd src
-ps4build
-
-

-To cross-compile for PS Vita from a Windows host, -open a "Visual Studio .NET Command Prompt" (32 bit host compiler), -cd to the directory where you've unpacked the sources and -run the following commands: -

-
-cd src
-psvitabuild
-
-

-To cross-compile for Xbox 360 from a Windows host, -open a "Visual Studio .NET Command Prompt" (32 bit host compiler), -cd to the directory where you've unpacked the sources and run -the following commands: -

-
-cd src
-xedkbuild
-
-

-To cross-compile for Xbox One from a Windows host, -open a "Visual Studio .NET Command Prompt" (64 bit host compiler), -cd to the directory where you've unpacked the sources and run -the following commands: -

-
-cd src
-xb1build
-
- -

Embedding LuaJIT

-

-LuaJIT is API-compatible with Lua 5.1. If you've already embedded Lua -into your application, you probably don't need to do anything to switch -to LuaJIT, except link with a different library: -

-
    -
  • It's strongly suggested to build LuaJIT separately using the supplied -build system. Please do not attempt to integrate the individual -source files into your build tree. You'll most likely get the internal build -dependencies wrong or mess up the compiler flags. Treat LuaJIT like any -other external library and link your application with either the dynamic -or static library, depending on your needs.
  • -
  • If you want to load C modules compiled for plain Lua -with require(), you need to make sure the public symbols -(e.g. lua_pushnumber) are exported, too: -
    • On POSIX systems you can either link to the shared library -or link the static library into your application. In the latter case -you'll need to export all public symbols from your main executable -(e.g. -Wl,-E on Linux) and add the external dependencies -(e.g. -lm -ldl on Linux).
    • -
    • Since Windows symbols are bound to a specific DLL name, you need to -link to the lua51.dll created by the LuaJIT build (do not rename -the DLL). You may link LuaJIT statically on Windows only if you don't -intend to load Lua/C modules at runtime. -
    -
  • -
  • -If you're building a 64 bit application on OSX which links directly or -indirectly against LuaJIT, you need to link your main executable -with these flags: -
    --pagezero_size 10000 -image_base 100000000
    -
    -Also, it's recommended to rebase all (self-compiled) shared libraries -which are loaded at runtime on OSX/x64 (e.g. C extension modules for Lua). -See: man rebase -
  • -
-

Additional hints for initializing LuaJIT using the C API functions:

-
    -
  • Here's a -» simple example -for embedding Lua or LuaJIT into your application.
  • -
  • Make sure you use luaL_newstate. Avoid using -lua_newstate, since this uses the (slower) default memory -allocator from your system (no support for this on x64).
  • -
  • Make sure you use luaL_openlibs and not the old Lua 5.0 style -of calling luaopen_base etc. directly.
  • -
  • To change or extend the list of standard libraries to load, copy -src/lib_init.c to your project and modify it accordingly. -Make sure the jit library is loaded or the JIT compiler -will not be activated.
  • -
  • The bit.* module for bitwise operations -is already built-in. There's no need to statically link -» Lua BitOp to your application.
  • -
- -

Hints for Distribution Maintainers

-

-The LuaJIT build system has extra provisions for the needs of most -POSIX-based distributions. If you're a package maintainer for -a distribution, please make use of these features and -avoid patching, subverting, autotoolizing or messing up the build system -in unspeakable ways. -

-

-There should be absolutely no need to patch luaconf.h or any -of the Makefiles. And please do not hand-pick files for your packages — -simply use whatever make install creates. There's a reason -for all of the files and directories it creates. -

-

-The build system uses GNU make and auto-detects most settings based on -the host you're building it on. This should work fine for native builds, -even when sandboxed. You may need to pass some of the following flags to -both the make and the make install command lines -for a regular distribution build: -

-
    -
  • PREFIX overrides the installation path and should usually -be set to /usr. Setting this also changes the module paths and -the paths needed to locate the shared library.
  • -
  • DESTDIR is an absolute path which allows you to install -to a shadow tree instead of the root tree of the build system.
  • -
  • MULTILIB sets the architecture-specific library path component -for multilib systems. The default is lib.
  • -
  • Have a look at the top-level Makefile and src/Makefile -for additional variables to tweak. The following variables may be -overridden, but it's not recommended, except for special needs -like cross-builds: -BUILDMODE, CC, HOST_CC, STATIC_CC, DYNAMIC_CC, CFLAGS, HOST_CFLAGS, -TARGET_CFLAGS, LDFLAGS, HOST_LDFLAGS, TARGET_LDFLAGS, TARGET_SHLDFLAGS, -TARGET_FLAGS, LIBS, HOST_LIBS, TARGET_LIBS, CROSS, HOST_SYS, TARGET_SYS -
  • -
-

-The build system has a special target for an amalgamated build, i.e. -make amalg. This compiles the LuaJIT core as one huge C file -and allows GCC to generate faster and shorter code. Alas, this requires -lots of memory during the build. This may be a problem for some users, -that's why it's not enabled by default. But it shouldn't be a problem for -most build farms. It's recommended that binary distributions use this -target for their LuaJIT builds. -

-

-The tl;dr version of the above: -

-
-make amalg PREFIX=/usr && \
-make install PREFIX=/usr DESTDIR=/tmp/buildroot
-
-

-Finally, if you encounter any difficulties, please -contact me first, instead of releasing a broken -package onto unsuspecting users. Because they'll usually gonna complain -to me (the upstream) and not you (the package maintainer), anyway. -

-
-
- - - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/luajit.html b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/luajit.html deleted file mode 100644 index 8a653e2d1a1..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/luajit.html +++ /dev/null @@ -1,236 +0,0 @@ - - - -LuaJIT - - - - - - - - - - -
-Lua -
- - -
-

-LuaJIT is a Just-In-Time Compiler (JIT) for the -» Lua programming language. -Lua is a powerful, dynamic and light-weight programming language. -It may be embedded or used as a general-purpose, stand-alone language. -

-

-LuaJIT is Copyright © 2005-2015 Mike Pall, released under the -» MIT open source license. -

-

-

- -

Compatibility

- - -
WindowsLinuxBSDOSXPOSIX
- - -
EmbeddedAndroidiOS
- - -
PS3PS4PS VitaXbox 360Xbox One
- - -
GCCCLANG
LLVM
MSVC
- - -
x86x64ARMARM64PPCMIPS
- - -
Lua 5.1
API+ABI
+ JIT+ BitOp+ FFIDrop-in
DLL/.so
- -

Overview

- - - - - - - - - -
3x
-  100x
115 KB
VM
90 KB
JIT
63 KLOC
C
24 KLOC
ASM
11 KLOC
Lua
-

-LuaJIT has been successfully used as a scripting middleware in -games, appliances, network and graphics apps, numerical simulations, -trading platforms and many other specialty applications. It scales from -embedded devices, smartphones, desktops up to server farms. It combines -high flexibility with » high performance -and an unmatched low memory footprint. -

-

-LuaJIT has been in continuous development since 2005. It's widely -considered to be one of the fastest dynamic language -implementations. It has outperformed other dynamic languages on many -cross-language benchmarks since its first release — often by a -substantial margin. -

-

-For LuaJIT 2.0, the whole VM has been rewritten from the ground up -and relentlessly optimized for performance. It combines a high-speed -interpreter, written in assembler, with a state-of-the-art JIT -compiler. -

-

-An innovative trace compiler is integrated with advanced, -SSA-based optimizations and highly tuned code generation backends. -A substantial reduction of the overhead associated with dynamic languages -allows it to break into the performance range traditionally reserved for -offline, static language compilers. -

- -

More ...

-

-Please select a sub-topic in the navigation bar to learn more about LuaJIT. -

-
-
- - - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/running.html b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/running.html deleted file mode 100644 index 5ee67c9587c..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/running.html +++ /dev/null @@ -1,309 +0,0 @@ - - - -Running LuaJIT - - - - - - - - - -
-Lua -
- - -
-

-LuaJIT has only a single stand-alone executable, called luajit on -POSIX systems or luajit.exe on Windows. It can be used to run simple -Lua statements or whole Lua applications from the command line. It has an -interactive mode, too. -

- -

Command Line Options

-

-The luajit stand-alone executable is just a slightly modified -version of the regular lua stand-alone executable. -It supports the same basic options, too. luajit -h -prints a short list of the available options. Please have a look at the -» Lua manual -for details. -

-

-LuaJIT has some additional options: -

- -

-b[options] input output

-

-This option saves or lists bytecode. The following additional options -are accepted: -

-
    -
  • -l — Only list bytecode.
  • -
  • -s — Strip debug info (this is the default).
  • -
  • -g — Keep debug info.
  • -
  • -n name — Set module name (default: auto-detect from input name)
  • -
  • -t type — Set output file type (default: auto-detect from output name).
  • -
  • -a arch — Override architecture for object files (default: native).
  • -
  • -o os — Override OS for object files (default: native).
  • -
  • -e chunk — Use chunk string as input.
  • -
  • - (a single minus sign) — Use stdin as input and/or stdout as output.
  • -
-

-The output file type is auto-detected from the extension of the output -file name: -

-
    -
  • c — C source file, exported bytecode data.
  • -
  • h — C header file, static bytecode data.
  • -
  • obj or o — Object file, exported bytecode data -(OS- and architecture-specific).
  • -
  • raw or any other extension — Raw bytecode file (portable). -
-

-Notes: -

-
    -
  • See also string.dump() -for information on bytecode portability and compatibility.
  • -
  • A file in raw bytecode format is auto-detected and can be loaded like -any Lua source file. E.g. directly from the command line or with -loadfile(), dofile() etc.
  • -
  • To statically embed the bytecode of a module in your application, -generate an object file and just link it with your application.
  • -
  • On most ELF-based systems (e.g. Linux) you need to explicitly export the -global symbols when linking your application, e.g. with: -Wl,-E
  • -
  • require() tries to load embedded bytecode data from exported -symbols (in *.exe or lua51.dll on Windows) and from -shared libraries in package.cpath.
  • -
-

-Typical usage examples: -

-
-luajit -b test.lua test.out                 # Save bytecode to test.out
-luajit -bg test.lua test.out                # Keep debug info
-luajit -be "print('hello world')" test.out  # Save cmdline script
-
-luajit -bl test.lua                         # List to stdout
-luajit -bl test.lua test.txt                # List to test.txt
-luajit -ble "print('hello world')"          # List cmdline script
-
-luajit -b test.lua test.obj                 # Generate object file
-# Link test.obj with your application and load it with require("test")
-
- -

-j cmd[=arg[,arg...]]

-

-This option performs a LuaJIT control command or activates one of the -loadable extension modules. The command is first looked up in the -jit.* library. If no matching function is found, a module -named jit.<cmd> is loaded and the start() -function of the module is called with the specified arguments (if -any). The space between -j and cmd is optional. -

-

-Here are the available LuaJIT control commands: -

-
    -
  • -jon — Turns the JIT compiler on (default).
  • -
  • -joff — Turns the JIT compiler off (only use the interpreter).
  • -
  • -jflush — Flushes the whole cache of compiled code.
  • -
  • -jv — Shows verbose information about the progress of the JIT compiler.
  • -
  • -jdump — Dumps the code and structures used in various compiler stages.
  • -
  • -jp — Start the integrated profiler.
  • -
-

-The -jv and -jdump commands are extension modules -written in Lua. They are mainly used for debugging the JIT compiler -itself. For a description of their options and output format, please -read the comment block at the start of their source. -They can be found in the lib directory of the source -distribution or installed under the jit directory. By default -this is /usr/local/share/luajit-2.0.4/jit on POSIX -systems. -

- -

-O[level]
--O[+]flag   -O-flag
--Oparam=value

-

-This options allows fine-tuned control of the optimizations used by -the JIT compiler. This is mainly intended for debugging LuaJIT itself. -Please note that the JIT compiler is extremely fast (we are talking -about the microsecond to millisecond range). Disabling optimizations -doesn't have any visible impact on its overhead, but usually generates -code that runs slower. -

-

-The first form sets an optimization level — this enables a -specific mix of optimization flags. -O0 turns off all -optimizations and higher numbers enable more optimizations. Omitting -the level (i.e. just -O) sets the default optimization level, -which is -O3 in the current version. -

-

-The second form adds or removes individual optimization flags. -The third form sets a parameter for the VM or the JIT compiler -to a specific value. -

-

-You can either use this option multiple times (like -Ocse --O-dce -Ohotloop=10) or separate several settings with a comma -(like -O+cse,-dce,hotloop=10). The settings are applied from -left to right and later settings override earlier ones. You can freely -mix the three forms, but note that setting an optimization level -overrides all earlier flags. -

-

-Here are the available flags and at what optimization levels they -are enabled: -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Flag-O1-O2-O3 
foldConstant Folding, Simplifications and Reassociation
cseCommon-Subexpression Elimination
dceDead-Code Elimination
narrow Narrowing of numbers to integers
loop Loop Optimizations (code hoisting)
fwd  Load Forwarding (L2L) and Store Forwarding (S2L)
dse  Dead-Store Elimination
abc  Array Bounds Check Elimination
sink  Allocation/Store Sinking
fuse  Fusion of operands into instructions
-

-Here are the parameters and their default settings: -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterDefault 
maxtrace1000Max. number of traces in the cache
maxrecord4000Max. number of recorded IR instructions
maxirconst500Max. number of IR constants of a trace
maxside100Max. number of side traces of a root trace
maxsnap500Max. number of snapshots for a trace
hotloop56Number of iterations to detect a hot loop or hot call
hotexit10Number of taken exits to start a side trace
tryside4Number of attempts to compile a side trace
instunroll4Max. unroll factor for instable loops
loopunroll15Max. unroll factor for loop ops in side traces
callunroll3Max. unroll factor for pseudo-recursive calls
recunroll2Min. unroll factor for true recursion
sizemcode32Size of each machine code area in KBytes (Windows: 64K)
maxmcode512Max. total size of all machine code areas in KBytes
-
-
- - - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/status.html b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/status.html deleted file mode 100644 index 91ed9cba33a..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/doc/status.html +++ /dev/null @@ -1,118 +0,0 @@ - - - -Status - - - - - - - - - -
-Lua -
- - -
-

-LuaJIT 2.0 is the current -stable branch. This branch is in -feature-freeze — new features will only be added to LuaJIT 2.1. -

- -

Current Status

-

-LuaJIT ought to run all Lua 5.1-compatible source code just fine. -It's considered a serious bug if the VM crashes or produces unexpected -results — please report this. -

-

-Known incompatibilities and issues in LuaJIT 2.0: -

-
    -
  • -There are some differences in implementation-defined behavior. -These either have a good reason, are arbitrary design choices -or are due to quirks in the VM. The latter cases may get fixed if a -demonstrable need is shown. -
  • -
  • -The Lua debug API is missing a couple of features (return -hooks for non-Lua functions) and shows slightly different behavior -in LuaJIT (no per-coroutine hooks, no tail call counting). -
  • -
  • -Some checks are missing in the JIT-compiled code for obscure situations -with open upvalues aliasing one of the SSA slots later on (or -vice versa). Bonus points, if you can find a real world test case for -this. -
  • -
  • -Currently some out-of-memory errors from on-trace code are not -handled correctly. The error may fall through an on-trace -pcall or it may be passed on to the function set with -lua_atpanic on x64. This issue will be fixed with the new -garbage collector. -
  • -
-
-
- - - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_arm.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_arm.h deleted file mode 100644 index 57e0116f5ac..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_arm.h +++ /dev/null @@ -1,456 +0,0 @@ -/* -** DynASM ARM encoding engine. -** Copyright (C) 2005-2015 Mike Pall. All rights reserved. -** Released under the MIT license. See dynasm.lua for full copyright notice. -*/ - -#include -#include -#include -#include - -#define DASM_ARCH "arm" - -#ifndef DASM_EXTERN -#define DASM_EXTERN(a,b,c,d) 0 -#endif - -/* Action definitions. */ -enum { - DASM_STOP, DASM_SECTION, DASM_ESC, DASM_REL_EXT, - /* The following actions need a buffer position. */ - DASM_ALIGN, DASM_REL_LG, DASM_LABEL_LG, - /* The following actions also have an argument. */ - DASM_REL_PC, DASM_LABEL_PC, - DASM_IMM, DASM_IMM12, DASM_IMM16, DASM_IMML8, DASM_IMML12, DASM_IMMV8, - DASM__MAX -}; - -/* Maximum number of section buffer positions for a single dasm_put() call. */ -#define DASM_MAXSECPOS 25 - -/* DynASM encoder status codes. Action list offset or number are or'ed in. */ -#define DASM_S_OK 0x00000000 -#define DASM_S_NOMEM 0x01000000 -#define DASM_S_PHASE 0x02000000 -#define DASM_S_MATCH_SEC 0x03000000 -#define DASM_S_RANGE_I 0x11000000 -#define DASM_S_RANGE_SEC 0x12000000 -#define DASM_S_RANGE_LG 0x13000000 -#define DASM_S_RANGE_PC 0x14000000 -#define DASM_S_RANGE_REL 0x15000000 -#define DASM_S_UNDEF_LG 0x21000000 -#define DASM_S_UNDEF_PC 0x22000000 - -/* Macros to convert positions (8 bit section + 24 bit index). */ -#define DASM_POS2IDX(pos) ((pos)&0x00ffffff) -#define DASM_POS2BIAS(pos) ((pos)&0xff000000) -#define DASM_SEC2POS(sec) ((sec)<<24) -#define DASM_POS2SEC(pos) ((pos)>>24) -#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos)) - -/* Action list type. */ -typedef const unsigned int *dasm_ActList; - -/* Per-section structure. */ -typedef struct dasm_Section { - int *rbuf; /* Biased buffer pointer (negative section bias). */ - int *buf; /* True buffer pointer. */ - size_t bsize; /* Buffer size in bytes. */ - int pos; /* Biased buffer position. */ - int epos; /* End of biased buffer position - max single put. */ - int ofs; /* Byte offset into section. */ -} dasm_Section; - -/* Core structure holding the DynASM encoding state. */ -struct dasm_State { - size_t psize; /* Allocated size of this structure. */ - dasm_ActList actionlist; /* Current actionlist pointer. */ - int *lglabels; /* Local/global chain/pos ptrs. */ - size_t lgsize; - int *pclabels; /* PC label chains/pos ptrs. */ - size_t pcsize; - void **globals; /* Array of globals (bias -10). */ - dasm_Section *section; /* Pointer to active section. */ - size_t codesize; /* Total size of all code sections. */ - int maxsection; /* 0 <= sectionidx < maxsection. */ - int status; /* Status code. */ - dasm_Section sections[1]; /* All sections. Alloc-extended. */ -}; - -/* The size of the core structure depends on the max. number of sections. */ -#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section)) - - -/* Initialize DynASM state. */ -void dasm_init(Dst_DECL, int maxsection) -{ - dasm_State *D; - size_t psz = 0; - int i; - Dst_REF = NULL; - DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection)); - D = Dst_REF; - D->psize = psz; - D->lglabels = NULL; - D->lgsize = 0; - D->pclabels = NULL; - D->pcsize = 0; - D->globals = NULL; - D->maxsection = maxsection; - for (i = 0; i < maxsection; i++) { - D->sections[i].buf = NULL; /* Need this for pass3. */ - D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i); - D->sections[i].bsize = 0; - D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */ - } -} - -/* Free DynASM state. */ -void dasm_free(Dst_DECL) -{ - dasm_State *D = Dst_REF; - int i; - for (i = 0; i < D->maxsection; i++) - if (D->sections[i].buf) - DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize); - if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize); - if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize); - DASM_M_FREE(Dst, D, D->psize); -} - -/* Setup global label array. Must be called before dasm_setup(). */ -void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl) -{ - dasm_State *D = Dst_REF; - D->globals = gl - 10; /* Negative bias to compensate for locals. */ - DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int)); -} - -/* Grow PC label array. Can be called after dasm_setup(), too. */ -void dasm_growpc(Dst_DECL, unsigned int maxpc) -{ - dasm_State *D = Dst_REF; - size_t osz = D->pcsize; - DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int)); - memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz); -} - -/* Setup encoder. */ -void dasm_setup(Dst_DECL, const void *actionlist) -{ - dasm_State *D = Dst_REF; - int i; - D->actionlist = (dasm_ActList)actionlist; - D->status = DASM_S_OK; - D->section = &D->sections[0]; - memset((void *)D->lglabels, 0, D->lgsize); - if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize); - for (i = 0; i < D->maxsection; i++) { - D->sections[i].pos = DASM_SEC2POS(i); - D->sections[i].ofs = 0; - } -} - - -#ifdef DASM_CHECKS -#define CK(x, st) \ - do { if (!(x)) { \ - D->status = DASM_S_##st|(p-D->actionlist-1); return; } } while (0) -#define CKPL(kind, st) \ - do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \ - D->status = DASM_S_RANGE_##st|(p-D->actionlist-1); return; } } while (0) -#else -#define CK(x, st) ((void)0) -#define CKPL(kind, st) ((void)0) -#endif - -static int dasm_imm12(unsigned int n) -{ - int i; - for (i = 0; i < 16; i++, n = (n << 2) | (n >> 30)) - if (n <= 255) return (int)(n + (i << 8)); - return -1; -} - -/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */ -void dasm_put(Dst_DECL, int start, ...) -{ - va_list ap; - dasm_State *D = Dst_REF; - dasm_ActList p = D->actionlist + start; - dasm_Section *sec = D->section; - int pos = sec->pos, ofs = sec->ofs; - int *b; - - if (pos >= sec->epos) { - DASM_M_GROW(Dst, int, sec->buf, sec->bsize, - sec->bsize + 2*DASM_MAXSECPOS*sizeof(int)); - sec->rbuf = sec->buf - DASM_POS2BIAS(pos); - sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos); - } - - b = sec->rbuf; - b[pos++] = start; - - va_start(ap, start); - while (1) { - unsigned int ins = *p++; - unsigned int action = (ins >> 16); - if (action >= DASM__MAX) { - ofs += 4; - } else { - int *pl, n = action >= DASM_REL_PC ? va_arg(ap, int) : 0; - switch (action) { - case DASM_STOP: goto stop; - case DASM_SECTION: - n = (ins & 255); CK(n < D->maxsection, RANGE_SEC); - D->section = &D->sections[n]; goto stop; - case DASM_ESC: p++; ofs += 4; break; - case DASM_REL_EXT: break; - case DASM_ALIGN: ofs += (ins & 255); b[pos++] = ofs; break; - case DASM_REL_LG: - n = (ins & 2047) - 10; pl = D->lglabels + n; - /* Bkwd rel or global. */ - if (n >= 0) { CK(n>=10||*pl<0, RANGE_LG); CKPL(lg, LG); goto putrel; } - pl += 10; n = *pl; - if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */ - goto linkrel; - case DASM_REL_PC: - pl = D->pclabels + n; CKPL(pc, PC); - putrel: - n = *pl; - if (n < 0) { /* Label exists. Get label pos and store it. */ - b[pos] = -n; - } else { - linkrel: - b[pos] = n; /* Else link to rel chain, anchored at label. */ - *pl = pos; - } - pos++; - break; - case DASM_LABEL_LG: - pl = D->lglabels + (ins & 2047) - 10; CKPL(lg, LG); goto putlabel; - case DASM_LABEL_PC: - pl = D->pclabels + n; CKPL(pc, PC); - putlabel: - n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */ - while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos; - } - *pl = -pos; /* Label exists now. */ - b[pos++] = ofs; /* Store pass1 offset estimate. */ - break; - case DASM_IMM: - case DASM_IMM16: -#ifdef DASM_CHECKS - CK((n & ((1<<((ins>>10)&31))-1)) == 0, RANGE_I); - if ((ins & 0x8000)) - CK(((n + (1<<(((ins>>5)&31)-1)))>>((ins>>5)&31)) == 0, RANGE_I); - else - CK((n>>((ins>>5)&31)) == 0, RANGE_I); -#endif - b[pos++] = n; - break; - case DASM_IMMV8: - CK((n & 3) == 0, RANGE_I); - n >>= 2; - case DASM_IMML8: - case DASM_IMML12: - CK(n >= 0 ? ((n>>((ins>>5)&31)) == 0) : - (((-n)>>((ins>>5)&31)) == 0), RANGE_I); - b[pos++] = n; - break; - case DASM_IMM12: - CK(dasm_imm12((unsigned int)n) != -1, RANGE_I); - b[pos++] = n; - break; - } - } - } -stop: - va_end(ap); - sec->pos = pos; - sec->ofs = ofs; -} -#undef CK - -/* Pass 2: Link sections, shrink aligns, fix label offsets. */ -int dasm_link(Dst_DECL, size_t *szp) -{ - dasm_State *D = Dst_REF; - int secnum; - int ofs = 0; - -#ifdef DASM_CHECKS - *szp = 0; - if (D->status != DASM_S_OK) return D->status; - { - int pc; - for (pc = 0; pc*sizeof(int) < D->pcsize; pc++) - if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc; - } -#endif - - { /* Handle globals not defined in this translation unit. */ - int idx; - for (idx = 20; idx*sizeof(int) < D->lgsize; idx++) { - int n = D->lglabels[idx]; - /* Undefined label: Collapse rel chain and replace with marker (< 0). */ - while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; } - } - } - - /* Combine all code sections. No support for data sections (yet). */ - for (secnum = 0; secnum < D->maxsection; secnum++) { - dasm_Section *sec = D->sections + secnum; - int *b = sec->rbuf; - int pos = DASM_SEC2POS(secnum); - int lastpos = sec->pos; - - while (pos != lastpos) { - dasm_ActList p = D->actionlist + b[pos++]; - while (1) { - unsigned int ins = *p++; - unsigned int action = (ins >> 16); - switch (action) { - case DASM_STOP: case DASM_SECTION: goto stop; - case DASM_ESC: p++; break; - case DASM_REL_EXT: break; - case DASM_ALIGN: ofs -= (b[pos++] + ofs) & (ins & 255); break; - case DASM_REL_LG: case DASM_REL_PC: pos++; break; - case DASM_LABEL_LG: case DASM_LABEL_PC: b[pos++] += ofs; break; - case DASM_IMM: case DASM_IMM12: case DASM_IMM16: - case DASM_IMML8: case DASM_IMML12: case DASM_IMMV8: pos++; break; - } - } - stop: (void)0; - } - ofs += sec->ofs; /* Next section starts right after current section. */ - } - - D->codesize = ofs; /* Total size of all code sections */ - *szp = ofs; - return DASM_S_OK; -} - -#ifdef DASM_CHECKS -#define CK(x, st) \ - do { if (!(x)) return DASM_S_##st|(p-D->actionlist-1); } while (0) -#else -#define CK(x, st) ((void)0) -#endif - -/* Pass 3: Encode sections. */ -int dasm_encode(Dst_DECL, void *buffer) -{ - dasm_State *D = Dst_REF; - char *base = (char *)buffer; - unsigned int *cp = (unsigned int *)buffer; - int secnum; - - /* Encode all code sections. No support for data sections (yet). */ - for (secnum = 0; secnum < D->maxsection; secnum++) { - dasm_Section *sec = D->sections + secnum; - int *b = sec->buf; - int *endb = sec->rbuf + sec->pos; - - while (b != endb) { - dasm_ActList p = D->actionlist + *b++; - while (1) { - unsigned int ins = *p++; - unsigned int action = (ins >> 16); - int n = (action >= DASM_ALIGN && action < DASM__MAX) ? *b++ : 0; - switch (action) { - case DASM_STOP: case DASM_SECTION: goto stop; - case DASM_ESC: *cp++ = *p++; break; - case DASM_REL_EXT: - n = DASM_EXTERN(Dst, (unsigned char *)cp, (ins&2047), !(ins&2048)); - goto patchrel; - case DASM_ALIGN: - ins &= 255; while ((((char *)cp - base) & ins)) *cp++ = 0xe1a00000; - break; - case DASM_REL_LG: - CK(n >= 0, UNDEF_LG); - case DASM_REL_PC: - CK(n >= 0, UNDEF_PC); - n = *DASM_POS2PTR(D, n) - (int)((char *)cp - base) - 4; - patchrel: - if ((ins & 0x800) == 0) { - CK((n & 3) == 0 && ((n+0x02000000) >> 26) == 0, RANGE_REL); - cp[-1] |= ((n >> 2) & 0x00ffffff); - } else if ((ins & 0x1000)) { - CK((n & 3) == 0 && -256 <= n && n <= 256, RANGE_REL); - goto patchimml8; - } else if ((ins & 0x2000) == 0) { - CK((n & 3) == 0 && -4096 <= n && n <= 4096, RANGE_REL); - goto patchimml; - } else { - CK((n & 3) == 0 && -1020 <= n && n <= 1020, RANGE_REL); - n >>= 2; - goto patchimml; - } - break; - case DASM_LABEL_LG: - ins &= 2047; if (ins >= 20) D->globals[ins-10] = (void *)(base + n); - break; - case DASM_LABEL_PC: break; - case DASM_IMM: - cp[-1] |= ((n>>((ins>>10)&31)) & ((1<<((ins>>5)&31))-1)) << (ins&31); - break; - case DASM_IMM12: - cp[-1] |= dasm_imm12((unsigned int)n); - break; - case DASM_IMM16: - cp[-1] |= ((n & 0xf000) << 4) | (n & 0x0fff); - break; - case DASM_IMML8: patchimml8: - cp[-1] |= n >= 0 ? (0x00800000 | (n & 0x0f) | ((n & 0xf0) << 4)) : - ((-n & 0x0f) | ((-n & 0xf0) << 4)); - break; - case DASM_IMML12: case DASM_IMMV8: patchimml: - cp[-1] |= n >= 0 ? (0x00800000 | n) : (-n); - break; - default: *cp++ = ins; break; - } - } - stop: (void)0; - } - } - - if (base + D->codesize != (char *)cp) /* Check for phase errors. */ - return DASM_S_PHASE; - return DASM_S_OK; -} -#undef CK - -/* Get PC label offset. */ -int dasm_getpclabel(Dst_DECL, unsigned int pc) -{ - dasm_State *D = Dst_REF; - if (pc*sizeof(int) < D->pcsize) { - int pos = D->pclabels[pc]; - if (pos < 0) return *DASM_POS2PTR(D, -pos); - if (pos > 0) return -1; /* Undefined. */ - } - return -2; /* Unused or out of range. */ -} - -#ifdef DASM_CHECKS -/* Optional sanity checker to call between isolated encoding steps. */ -int dasm_checkstep(Dst_DECL, int secmatch) -{ - dasm_State *D = Dst_REF; - if (D->status == DASM_S_OK) { - int i; - for (i = 1; i <= 9; i++) { - if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_LG|i; break; } - D->lglabels[i] = 0; - } - } - if (D->status == DASM_S_OK && secmatch >= 0 && - D->section != &D->sections[secmatch]) - D->status = DASM_S_MATCH_SEC|(D->section-D->sections); - return D->status; -} -#endif - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_arm.lua b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_arm.lua deleted file mode 100644 index 90a259c5c33..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_arm.lua +++ /dev/null @@ -1,1125 +0,0 @@ ------------------------------------------------------------------------------- --- DynASM ARM module. --- --- Copyright (C) 2005-2015 Mike Pall. All rights reserved. --- See dynasm.lua for full copyright notice. ------------------------------------------------------------------------------- - --- Module information: -local _info = { - arch = "arm", - description = "DynASM ARM module", - version = "1.3.0", - vernum = 10300, - release = "2011-05-05", - author = "Mike Pall", - license = "MIT", -} - --- Exported glue functions for the arch-specific module. -local _M = { _info = _info } - --- Cache library functions. -local type, tonumber, pairs, ipairs = type, tonumber, pairs, ipairs -local assert, setmetatable, rawget = assert, setmetatable, rawget -local _s = string -local sub, format, byte, char = _s.sub, _s.format, _s.byte, _s.char -local match, gmatch, gsub = _s.match, _s.gmatch, _s.gsub -local concat, sort, insert = table.concat, table.sort, table.insert -local bit = bit or require("bit") -local band, shl, shr, sar = bit.band, bit.lshift, bit.rshift, bit.arshift -local ror, tohex = bit.ror, bit.tohex - --- Inherited tables and callbacks. -local g_opt, g_arch -local wline, werror, wfatal, wwarn - --- Action name list. --- CHECK: Keep this in sync with the C code! -local action_names = { - "STOP", "SECTION", "ESC", "REL_EXT", - "ALIGN", "REL_LG", "LABEL_LG", - "REL_PC", "LABEL_PC", "IMM", "IMM12", "IMM16", "IMML8", "IMML12", "IMMV8", -} - --- Maximum number of section buffer positions for dasm_put(). --- CHECK: Keep this in sync with the C code! -local maxsecpos = 25 -- Keep this low, to avoid excessively long C lines. - --- Action name -> action number. -local map_action = {} -for n,name in ipairs(action_names) do - map_action[name] = n-1 -end - --- Action list buffer. -local actlist = {} - --- Argument list for next dasm_put(). Start with offset 0 into action list. -local actargs = { 0 } - --- Current number of section buffer positions for dasm_put(). -local secpos = 1 - ------------------------------------------------------------------------------- - --- Dump action names and numbers. -local function dumpactions(out) - out:write("DynASM encoding engine action codes:\n") - for n,name in ipairs(action_names) do - local num = map_action[name] - out:write(format(" %-10s %02X %d\n", name, num, num)) - end - out:write("\n") -end - --- Write action list buffer as a huge static C array. -local function writeactions(out, name) - local nn = #actlist - if nn == 0 then nn = 1; actlist[0] = map_action.STOP end - out:write("static const unsigned int ", name, "[", nn, "] = {\n") - for i = 1,nn-1 do - assert(out:write("0x", tohex(actlist[i]), ",\n")) - end - assert(out:write("0x", tohex(actlist[nn]), "\n};\n\n")) -end - ------------------------------------------------------------------------------- - --- Add word to action list. -local function wputxw(n) - assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range") - actlist[#actlist+1] = n -end - --- Add action to list with optional arg. Advance buffer pos, too. -local function waction(action, val, a, num) - local w = assert(map_action[action], "bad action name `"..action.."'") - wputxw(w * 0x10000 + (val or 0)) - if a then actargs[#actargs+1] = a end - if a or num then secpos = secpos + (num or 1) end -end - --- Flush action list (intervening C code or buffer pos overflow). -local function wflush(term) - if #actlist == actargs[1] then return end -- Nothing to flush. - if not term then waction("STOP") end -- Terminate action list. - wline(format("dasm_put(Dst, %s);", concat(actargs, ", ")), true) - actargs = { #actlist } -- Actionlist offset is 1st arg to next dasm_put(). - secpos = 1 -- The actionlist offset occupies a buffer position, too. -end - --- Put escaped word. -local function wputw(n) - if n <= 0x000fffff then waction("ESC") end - wputxw(n) -end - --- Reserve position for word. -local function wpos() - local pos = #actlist+1 - actlist[pos] = "" - return pos -end - --- Store word to reserved position. -local function wputpos(pos, n) - assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range") - if n <= 0x000fffff then - insert(actlist, pos+1, n) - n = map_action.ESC * 0x10000 - end - actlist[pos] = n -end - ------------------------------------------------------------------------------- - --- Global label name -> global label number. With auto assignment on 1st use. -local next_global = 20 -local map_global = setmetatable({}, { __index = function(t, name) - if not match(name, "^[%a_][%w_]*$") then werror("bad global label") end - local n = next_global - if n > 2047 then werror("too many global labels") end - next_global = n + 1 - t[name] = n - return n -end}) - --- Dump global labels. -local function dumpglobals(out, lvl) - local t = {} - for name, n in pairs(map_global) do t[n] = name end - out:write("Global labels:\n") - for i=20,next_global-1 do - out:write(format(" %s\n", t[i])) - end - out:write("\n") -end - --- Write global label enum. -local function writeglobals(out, prefix) - local t = {} - for name, n in pairs(map_global) do t[n] = name end - out:write("enum {\n") - for i=20,next_global-1 do - out:write(" ", prefix, t[i], ",\n") - end - out:write(" ", prefix, "_MAX\n};\n") -end - --- Write global label names. -local function writeglobalnames(out, name) - local t = {} - for name, n in pairs(map_global) do t[n] = name end - out:write("static const char *const ", name, "[] = {\n") - for i=20,next_global-1 do - out:write(" \"", t[i], "\",\n") - end - out:write(" (const char *)0\n};\n") -end - ------------------------------------------------------------------------------- - --- Extern label name -> extern label number. With auto assignment on 1st use. -local next_extern = 0 -local map_extern_ = {} -local map_extern = setmetatable({}, { __index = function(t, name) - -- No restrictions on the name for now. - local n = next_extern - if n > 2047 then werror("too many extern labels") end - next_extern = n + 1 - t[name] = n - map_extern_[n] = name - return n -end}) - --- Dump extern labels. -local function dumpexterns(out, lvl) - out:write("Extern labels:\n") - for i=0,next_extern-1 do - out:write(format(" %s\n", map_extern_[i])) - end - out:write("\n") -end - --- Write extern label names. -local function writeexternnames(out, name) - out:write("static const char *const ", name, "[] = {\n") - for i=0,next_extern-1 do - out:write(" \"", map_extern_[i], "\",\n") - end - out:write(" (const char *)0\n};\n") -end - ------------------------------------------------------------------------------- - --- Arch-specific maps. - --- Ext. register name -> int. name. -local map_archdef = { sp = "r13", lr = "r14", pc = "r15", } - --- Int. register name -> ext. name. -local map_reg_rev = { r13 = "sp", r14 = "lr", r15 = "pc", } - -local map_type = {} -- Type name -> { ctype, reg } -local ctypenum = 0 -- Type number (for Dt... macros). - --- Reverse defines for registers. -function _M.revdef(s) - return map_reg_rev[s] or s -end - -local map_shift = { lsl = 0, lsr = 1, asr = 2, ror = 3, } - -local map_cond = { - eq = 0, ne = 1, cs = 2, cc = 3, mi = 4, pl = 5, vs = 6, vc = 7, - hi = 8, ls = 9, ge = 10, lt = 11, gt = 12, le = 13, al = 14, - hs = 2, lo = 3, -} - ------------------------------------------------------------------------------- - --- Template strings for ARM instructions. -local map_op = { - -- Basic data processing instructions. - and_3 = "e0000000DNPs", - eor_3 = "e0200000DNPs", - sub_3 = "e0400000DNPs", - rsb_3 = "e0600000DNPs", - add_3 = "e0800000DNPs", - adc_3 = "e0a00000DNPs", - sbc_3 = "e0c00000DNPs", - rsc_3 = "e0e00000DNPs", - tst_2 = "e1100000NP", - teq_2 = "e1300000NP", - cmp_2 = "e1500000NP", - cmn_2 = "e1700000NP", - orr_3 = "e1800000DNPs", - mov_2 = "e1a00000DPs", - bic_3 = "e1c00000DNPs", - mvn_2 = "e1e00000DPs", - - and_4 = "e0000000DNMps", - eor_4 = "e0200000DNMps", - sub_4 = "e0400000DNMps", - rsb_4 = "e0600000DNMps", - add_4 = "e0800000DNMps", - adc_4 = "e0a00000DNMps", - sbc_4 = "e0c00000DNMps", - rsc_4 = "e0e00000DNMps", - tst_3 = "e1100000NMp", - teq_3 = "e1300000NMp", - cmp_3 = "e1500000NMp", - cmn_3 = "e1700000NMp", - orr_4 = "e1800000DNMps", - mov_3 = "e1a00000DMps", - bic_4 = "e1c00000DNMps", - mvn_3 = "e1e00000DMps", - - lsl_3 = "e1a00000DMws", - lsr_3 = "e1a00020DMws", - asr_3 = "e1a00040DMws", - ror_3 = "e1a00060DMws", - rrx_2 = "e1a00060DMs", - - -- Multiply and multiply-accumulate. - mul_3 = "e0000090NMSs", - mla_4 = "e0200090NMSDs", - umaal_4 = "e0400090DNMSs", -- v6 - mls_4 = "e0600090DNMSs", -- v6T2 - umull_4 = "e0800090DNMSs", - umlal_4 = "e0a00090DNMSs", - smull_4 = "e0c00090DNMSs", - smlal_4 = "e0e00090DNMSs", - - -- Halfword multiply and multiply-accumulate. - smlabb_4 = "e1000080NMSD", -- v5TE - smlatb_4 = "e10000a0NMSD", -- v5TE - smlabt_4 = "e10000c0NMSD", -- v5TE - smlatt_4 = "e10000e0NMSD", -- v5TE - smlawb_4 = "e1200080NMSD", -- v5TE - smulwb_3 = "e12000a0NMS", -- v5TE - smlawt_4 = "e12000c0NMSD", -- v5TE - smulwt_3 = "e12000e0NMS", -- v5TE - smlalbb_4 = "e1400080NMSD", -- v5TE - smlaltb_4 = "e14000a0NMSD", -- v5TE - smlalbt_4 = "e14000c0NMSD", -- v5TE - smlaltt_4 = "e14000e0NMSD", -- v5TE - smulbb_3 = "e1600080NMS", -- v5TE - smultb_3 = "e16000a0NMS", -- v5TE - smulbt_3 = "e16000c0NMS", -- v5TE - smultt_3 = "e16000e0NMS", -- v5TE - - -- Miscellaneous data processing instructions. - clz_2 = "e16f0f10DM", -- v5T - rev_2 = "e6bf0f30DM", -- v6 - rev16_2 = "e6bf0fb0DM", -- v6 - revsh_2 = "e6ff0fb0DM", -- v6 - sel_3 = "e6800fb0DNM", -- v6 - usad8_3 = "e780f010NMS", -- v6 - usada8_4 = "e7800010NMSD", -- v6 - rbit_2 = "e6ff0f30DM", -- v6T2 - movw_2 = "e3000000DW", -- v6T2 - movt_2 = "e3400000DW", -- v6T2 - -- Note: the X encodes width-1, not width. - sbfx_4 = "e7a00050DMvX", -- v6T2 - ubfx_4 = "e7e00050DMvX", -- v6T2 - -- Note: the X encodes the msb field, not the width. - bfc_3 = "e7c0001fDvX", -- v6T2 - bfi_4 = "e7c00010DMvX", -- v6T2 - - -- Packing and unpacking instructions. - pkhbt_3 = "e6800010DNM", pkhbt_4 = "e6800010DNMv", -- v6 - pkhtb_3 = "e6800050DNM", pkhtb_4 = "e6800050DNMv", -- v6 - sxtab_3 = "e6a00070DNM", sxtab_4 = "e6a00070DNMv", -- v6 - sxtab16_3 = "e6800070DNM", sxtab16_4 = "e6800070DNMv", -- v6 - sxtah_3 = "e6b00070DNM", sxtah_4 = "e6b00070DNMv", -- v6 - sxtb_2 = "e6af0070DM", sxtb_3 = "e6af0070DMv", -- v6 - sxtb16_2 = "e68f0070DM", sxtb16_3 = "e68f0070DMv", -- v6 - sxth_2 = "e6bf0070DM", sxth_3 = "e6bf0070DMv", -- v6 - uxtab_3 = "e6e00070DNM", uxtab_4 = "e6e00070DNMv", -- v6 - uxtab16_3 = "e6c00070DNM", uxtab16_4 = "e6c00070DNMv", -- v6 - uxtah_3 = "e6f00070DNM", uxtah_4 = "e6f00070DNMv", -- v6 - uxtb_2 = "e6ef0070DM", uxtb_3 = "e6ef0070DMv", -- v6 - uxtb16_2 = "e6cf0070DM", uxtb16_3 = "e6cf0070DMv", -- v6 - uxth_2 = "e6ff0070DM", uxth_3 = "e6ff0070DMv", -- v6 - - -- Saturating instructions. - qadd_3 = "e1000050DMN", -- v5TE - qsub_3 = "e1200050DMN", -- v5TE - qdadd_3 = "e1400050DMN", -- v5TE - qdsub_3 = "e1600050DMN", -- v5TE - -- Note: the X for ssat* encodes sat_imm-1, not sat_imm. - ssat_3 = "e6a00010DXM", ssat_4 = "e6a00010DXMp", -- v6 - usat_3 = "e6e00010DXM", usat_4 = "e6e00010DXMp", -- v6 - ssat16_3 = "e6a00f30DXM", -- v6 - usat16_3 = "e6e00f30DXM", -- v6 - - -- Parallel addition and subtraction. - sadd16_3 = "e6100f10DNM", -- v6 - sasx_3 = "e6100f30DNM", -- v6 - ssax_3 = "e6100f50DNM", -- v6 - ssub16_3 = "e6100f70DNM", -- v6 - sadd8_3 = "e6100f90DNM", -- v6 - ssub8_3 = "e6100ff0DNM", -- v6 - qadd16_3 = "e6200f10DNM", -- v6 - qasx_3 = "e6200f30DNM", -- v6 - qsax_3 = "e6200f50DNM", -- v6 - qsub16_3 = "e6200f70DNM", -- v6 - qadd8_3 = "e6200f90DNM", -- v6 - qsub8_3 = "e6200ff0DNM", -- v6 - shadd16_3 = "e6300f10DNM", -- v6 - shasx_3 = "e6300f30DNM", -- v6 - shsax_3 = "e6300f50DNM", -- v6 - shsub16_3 = "e6300f70DNM", -- v6 - shadd8_3 = "e6300f90DNM", -- v6 - shsub8_3 = "e6300ff0DNM", -- v6 - uadd16_3 = "e6500f10DNM", -- v6 - uasx_3 = "e6500f30DNM", -- v6 - usax_3 = "e6500f50DNM", -- v6 - usub16_3 = "e6500f70DNM", -- v6 - uadd8_3 = "e6500f90DNM", -- v6 - usub8_3 = "e6500ff0DNM", -- v6 - uqadd16_3 = "e6600f10DNM", -- v6 - uqasx_3 = "e6600f30DNM", -- v6 - uqsax_3 = "e6600f50DNM", -- v6 - uqsub16_3 = "e6600f70DNM", -- v6 - uqadd8_3 = "e6600f90DNM", -- v6 - uqsub8_3 = "e6600ff0DNM", -- v6 - uhadd16_3 = "e6700f10DNM", -- v6 - uhasx_3 = "e6700f30DNM", -- v6 - uhsax_3 = "e6700f50DNM", -- v6 - uhsub16_3 = "e6700f70DNM", -- v6 - uhadd8_3 = "e6700f90DNM", -- v6 - uhsub8_3 = "e6700ff0DNM", -- v6 - - -- Load/store instructions. - str_2 = "e4000000DL", str_3 = "e4000000DL", str_4 = "e4000000DL", - strb_2 = "e4400000DL", strb_3 = "e4400000DL", strb_4 = "e4400000DL", - ldr_2 = "e4100000DL", ldr_3 = "e4100000DL", ldr_4 = "e4100000DL", - ldrb_2 = "e4500000DL", ldrb_3 = "e4500000DL", ldrb_4 = "e4500000DL", - strh_2 = "e00000b0DL", strh_3 = "e00000b0DL", - ldrh_2 = "e01000b0DL", ldrh_3 = "e01000b0DL", - ldrd_2 = "e00000d0DL", ldrd_3 = "e00000d0DL", -- v5TE - ldrsb_2 = "e01000d0DL", ldrsb_3 = "e01000d0DL", - strd_2 = "e00000f0DL", strd_3 = "e00000f0DL", -- v5TE - ldrsh_2 = "e01000f0DL", ldrsh_3 = "e01000f0DL", - - ldm_2 = "e8900000oR", ldmia_2 = "e8900000oR", ldmfd_2 = "e8900000oR", - ldmda_2 = "e8100000oR", ldmfa_2 = "e8100000oR", - ldmdb_2 = "e9100000oR", ldmea_2 = "e9100000oR", - ldmib_2 = "e9900000oR", ldmed_2 = "e9900000oR", - stm_2 = "e8800000oR", stmia_2 = "e8800000oR", stmfd_2 = "e8800000oR", - stmda_2 = "e8000000oR", stmfa_2 = "e8000000oR", - stmdb_2 = "e9000000oR", stmea_2 = "e9000000oR", - stmib_2 = "e9800000oR", stmed_2 = "e9800000oR", - pop_1 = "e8bd0000R", push_1 = "e92d0000R", - - -- Branch instructions. - b_1 = "ea000000B", - bl_1 = "eb000000B", - blx_1 = "e12fff30C", - bx_1 = "e12fff10M", - - -- Miscellaneous instructions. - nop_0 = "e1a00000", - mrs_1 = "e10f0000D", - bkpt_1 = "e1200070K", -- v5T - svc_1 = "ef000000T", swi_1 = "ef000000T", - ud_0 = "e7f001f0", - - -- VFP instructions. - ["vadd.f32_3"] = "ee300a00dnm", - ["vadd.f64_3"] = "ee300b00Gdnm", - ["vsub.f32_3"] = "ee300a40dnm", - ["vsub.f64_3"] = "ee300b40Gdnm", - ["vmul.f32_3"] = "ee200a00dnm", - ["vmul.f64_3"] = "ee200b00Gdnm", - ["vnmul.f32_3"] = "ee200a40dnm", - ["vnmul.f64_3"] = "ee200b40Gdnm", - ["vmla.f32_3"] = "ee000a00dnm", - ["vmla.f64_3"] = "ee000b00Gdnm", - ["vmls.f32_3"] = "ee000a40dnm", - ["vmls.f64_3"] = "ee000b40Gdnm", - ["vnmla.f32_3"] = "ee100a40dnm", - ["vnmla.f64_3"] = "ee100b40Gdnm", - ["vnmls.f32_3"] = "ee100a00dnm", - ["vnmls.f64_3"] = "ee100b00Gdnm", - ["vdiv.f32_3"] = "ee800a00dnm", - ["vdiv.f64_3"] = "ee800b00Gdnm", - - ["vabs.f32_2"] = "eeb00ac0dm", - ["vabs.f64_2"] = "eeb00bc0Gdm", - ["vneg.f32_2"] = "eeb10a40dm", - ["vneg.f64_2"] = "eeb10b40Gdm", - ["vsqrt.f32_2"] = "eeb10ac0dm", - ["vsqrt.f64_2"] = "eeb10bc0Gdm", - ["vcmp.f32_2"] = "eeb40a40dm", - ["vcmp.f64_2"] = "eeb40b40Gdm", - ["vcmpe.f32_2"] = "eeb40ac0dm", - ["vcmpe.f64_2"] = "eeb40bc0Gdm", - ["vcmpz.f32_1"] = "eeb50a40d", - ["vcmpz.f64_1"] = "eeb50b40Gd", - ["vcmpze.f32_1"] = "eeb50ac0d", - ["vcmpze.f64_1"] = "eeb50bc0Gd", - - vldr_2 = "ed100a00dl|ed100b00Gdl", - vstr_2 = "ed000a00dl|ed000b00Gdl", - vldm_2 = "ec900a00or", - vldmia_2 = "ec900a00or", - vldmdb_2 = "ed100a00or", - vpop_1 = "ecbd0a00r", - vstm_2 = "ec800a00or", - vstmia_2 = "ec800a00or", - vstmdb_2 = "ed000a00or", - vpush_1 = "ed2d0a00r", - - ["vmov.f32_2"] = "eeb00a40dm|eeb00a00dY", -- #imm is VFPv3 only - ["vmov.f64_2"] = "eeb00b40Gdm|eeb00b00GdY", -- #imm is VFPv3 only - vmov_2 = "ee100a10Dn|ee000a10nD", - vmov_3 = "ec500a10DNm|ec400a10mDN|ec500b10GDNm|ec400b10GmDN", - - vmrs_0 = "eef1fa10", - vmrs_1 = "eef10a10D", - vmsr_1 = "eee10a10D", - - ["vcvt.s32.f32_2"] = "eebd0ac0dm", - ["vcvt.s32.f64_2"] = "eebd0bc0dGm", - ["vcvt.u32.f32_2"] = "eebc0ac0dm", - ["vcvt.u32.f64_2"] = "eebc0bc0dGm", - ["vcvtr.s32.f32_2"] = "eebd0a40dm", - ["vcvtr.s32.f64_2"] = "eebd0b40dGm", - ["vcvtr.u32.f32_2"] = "eebc0a40dm", - ["vcvtr.u32.f64_2"] = "eebc0b40dGm", - ["vcvt.f32.s32_2"] = "eeb80ac0dm", - ["vcvt.f64.s32_2"] = "eeb80bc0GdFm", - ["vcvt.f32.u32_2"] = "eeb80a40dm", - ["vcvt.f64.u32_2"] = "eeb80b40GdFm", - ["vcvt.f32.f64_2"] = "eeb70bc0dGm", - ["vcvt.f64.f32_2"] = "eeb70ac0GdFm", - - -- VFPv4 only: - ["vfma.f32_3"] = "eea00a00dnm", - ["vfma.f64_3"] = "eea00b00Gdnm", - ["vfms.f32_3"] = "eea00a40dnm", - ["vfms.f64_3"] = "eea00b40Gdnm", - ["vfnma.f32_3"] = "ee900a40dnm", - ["vfnma.f64_3"] = "ee900b40Gdnm", - ["vfnms.f32_3"] = "ee900a00dnm", - ["vfnms.f64_3"] = "ee900b00Gdnm", - - -- NYI: Advanced SIMD instructions. - - -- NYI: I have no need for these instructions right now: - -- swp, swpb, strex, ldrex, strexd, ldrexd, strexb, ldrexb, strexh, ldrexh - -- msr, nopv6, yield, wfe, wfi, sev, dbg, bxj, smc, srs, rfe - -- cps, setend, pli, pld, pldw, clrex, dsb, dmb, isb - -- stc, ldc, mcr, mcr2, mrc, mrc2, mcrr, mcrr2, mrrc, mrrc2, cdp, cdp2 -} - --- Add mnemonics for "s" variants. -do - local t = {} - for k,v in pairs(map_op) do - if sub(v, -1) == "s" then - local v2 = sub(v, 1, 2)..char(byte(v, 3)+1)..sub(v, 4, -2) - t[sub(k, 1, -3).."s"..sub(k, -2)] = v2 - end - end - for k,v in pairs(t) do - map_op[k] = v - end -end - ------------------------------------------------------------------------------- - -local function parse_gpr(expr) - local tname, ovreg = match(expr, "^([%w_]+):(r1?[0-9])$") - local tp = map_type[tname or expr] - if tp then - local reg = ovreg or tp.reg - if not reg then - werror("type `"..(tname or expr).."' needs a register override") - end - expr = reg - end - local r = match(expr, "^r(1?[0-9])$") - if r then - r = tonumber(r) - if r <= 15 then return r, tp end - end - werror("bad register name `"..expr.."'") -end - -local function parse_gpr_pm(expr) - local pm, expr2 = match(expr, "^([+-]?)(.*)$") - return parse_gpr(expr2), (pm == "-") -end - -local function parse_vr(expr, tp) - local t, r = match(expr, "^([sd])([0-9]+)$") - if t == tp then - r = tonumber(r) - if r <= 31 then - if t == "s" then return shr(r, 1), band(r, 1) end - return band(r, 15), shr(r, 4) - end - end - werror("bad register name `"..expr.."'") -end - -local function parse_reglist(reglist) - reglist = match(reglist, "^{%s*([^}]*)}$") - if not reglist then werror("register list expected") end - local rr = 0 - for p in gmatch(reglist..",", "%s*([^,]*),") do - local rbit = shl(1, parse_gpr(gsub(p, "%s+$", ""))) - if band(rr, rbit) ~= 0 then - werror("duplicate register `"..p.."'") - end - rr = rr + rbit - end - return rr -end - -local function parse_vrlist(reglist) - local ta, ra, tb, rb = match(reglist, - "^{%s*([sd])([0-9]+)%s*%-%s*([sd])([0-9]+)%s*}$") - ra, rb = tonumber(ra), tonumber(rb) - if ta and ta == tb and ra and rb and ra <= 31 and rb <= 31 and ra <= rb then - local nr = rb+1 - ra - if ta == "s" then - return shl(shr(ra,1),12)+shl(band(ra,1),22) + nr - else - return shl(band(ra,15),12)+shl(shr(ra,4),22) + nr*2 + 0x100 - end - end - werror("register list expected") -end - -local function parse_imm(imm, bits, shift, scale, signed) - imm = match(imm, "^#(.*)$") - if not imm then werror("expected immediate operand") end - local n = tonumber(imm) - if n then - local m = sar(n, scale) - if shl(m, scale) == n then - if signed then - local s = sar(m, bits-1) - if s == 0 then return shl(m, shift) - elseif s == -1 then return shl(m + shl(1, bits), shift) end - else - if sar(m, bits) == 0 then return shl(m, shift) end - end - end - werror("out of range immediate `"..imm.."'") - else - waction("IMM", (signed and 32768 or 0)+scale*1024+bits*32+shift, imm) - return 0 - end -end - -local function parse_imm12(imm) - local n = tonumber(imm) - if n then - local m = band(n) - for i=0,-15,-1 do - if shr(m, 8) == 0 then return m + shl(band(i, 15), 8) end - m = ror(m, 2) - end - werror("out of range immediate `"..imm.."'") - else - waction("IMM12", 0, imm) - return 0 - end -end - -local function parse_imm16(imm) - imm = match(imm, "^#(.*)$") - if not imm then werror("expected immediate operand") end - local n = tonumber(imm) - if n then - if shr(n, 16) == 0 then return band(n, 0x0fff) + shl(band(n, 0xf000), 4) end - werror("out of range immediate `"..imm.."'") - else - waction("IMM16", 32*16, imm) - return 0 - end -end - -local function parse_imm_load(imm, ext) - local n = tonumber(imm) - if n then - if ext then - if n >= -255 and n <= 255 then - local up = 0x00800000 - if n < 0 then n = -n; up = 0 end - return shl(band(n, 0xf0), 4) + band(n, 0x0f) + up - end - else - if n >= -4095 and n <= 4095 then - if n >= 0 then return n+0x00800000 end - return -n - end - end - werror("out of range immediate `"..imm.."'") - else - waction(ext and "IMML8" or "IMML12", 32768 + shl(ext and 8 or 12, 5), imm) - return 0 - end -end - -local function parse_shift(shift, gprok) - if shift == "rrx" then - return 3 * 32 - else - local s, s2 = match(shift, "^(%S+)%s*(.*)$") - s = map_shift[s] - if not s then werror("expected shift operand") end - if sub(s2, 1, 1) == "#" then - return parse_imm(s2, 5, 7, 0, false) + shl(s, 5) - else - if not gprok then werror("expected immediate shift operand") end - return shl(parse_gpr(s2), 8) + shl(s, 5) + 16 - end - end -end - -local function parse_label(label, def) - local prefix = sub(label, 1, 2) - -- =>label (pc label reference) - if prefix == "=>" then - return "PC", 0, sub(label, 3) - end - -- ->name (global label reference) - if prefix == "->" then - return "LG", map_global[sub(label, 3)] - end - if def then - -- [1-9] (local label definition) - if match(label, "^[1-9]$") then - return "LG", 10+tonumber(label) - end - else - -- [<>][1-9] (local label reference) - local dir, lnum = match(label, "^([<>])([1-9])$") - if dir then -- Fwd: 1-9, Bkwd: 11-19. - return "LG", lnum + (dir == ">" and 0 or 10) - end - -- extern label (extern label reference) - local extname = match(label, "^extern%s+(%S+)$") - if extname then - return "EXT", map_extern[extname] - end - end - werror("bad label `"..label.."'") -end - -local function parse_load(params, nparams, n, op) - local oplo = band(op, 255) - local ext, ldrd = (oplo ~= 0), (oplo == 208) - local d - if (ldrd or oplo == 240) then - d = band(shr(op, 12), 15) - if band(d, 1) ~= 0 then werror("odd destination register") end - end - local pn = params[n] - local p1, wb = match(pn, "^%[%s*(.-)%s*%](!?)$") - local p2 = params[n+1] - if not p1 then - if not p2 then - if match(pn, "^[<>=%-]") or match(pn, "^extern%s+") then - local mode, n, s = parse_label(pn, false) - waction("REL_"..mode, n + (ext and 0x1800 or 0x0800), s, 1) - return op + 15 * 65536 + 0x01000000 + (ext and 0x00400000 or 0) - end - local reg, tailr = match(pn, "^([%w_:]+)%s*(.*)$") - if reg and tailr ~= "" then - local d, tp = parse_gpr(reg) - if tp then - waction(ext and "IMML8" or "IMML12", 32768 + 32*(ext and 8 or 12), - format(tp.ctypefmt, tailr)) - return op + shl(d, 16) + 0x01000000 + (ext and 0x00400000 or 0) - end - end - end - werror("expected address operand") - end - if wb == "!" then op = op + 0x00200000 end - if p2 then - if wb == "!" then werror("bad use of '!'") end - local p3 = params[n+2] - op = op + shl(parse_gpr(p1), 16) - local imm = match(p2, "^#(.*)$") - if imm then - local m = parse_imm_load(imm, ext) - if p3 then werror("too many parameters") end - op = op + m + (ext and 0x00400000 or 0) - else - local m, neg = parse_gpr_pm(p2) - if ldrd and (m == d or m-1 == d) then werror("register conflict") end - op = op + m + (neg and 0 or 0x00800000) + (ext and 0 or 0x02000000) - if p3 then op = op + parse_shift(p3) end - end - else - local p1a, p2 = match(p1, "^([^,%s]*)%s*(.*)$") - op = op + shl(parse_gpr(p1a), 16) + 0x01000000 - if p2 ~= "" then - local imm = match(p2, "^,%s*#(.*)$") - if imm then - local m = parse_imm_load(imm, ext) - op = op + m + (ext and 0x00400000 or 0) - else - local p2a, p3 = match(p2, "^,%s*([^,%s]*)%s*,?%s*(.*)$") - local m, neg = parse_gpr_pm(p2a) - if ldrd and (m == d or m-1 == d) then werror("register conflict") end - op = op + m + (neg and 0 or 0x00800000) + (ext and 0 or 0x02000000) - if p3 ~= "" then - if ext then werror("too many parameters") end - op = op + parse_shift(p3) - end - end - else - if wb == "!" then werror("bad use of '!'") end - op = op + (ext and 0x00c00000 or 0x00800000) - end - end - return op -end - -local function parse_vload(q) - local reg, imm = match(q, "^%[%s*([^,%s]*)%s*(.*)%]$") - if reg then - local d = shl(parse_gpr(reg), 16) - if imm == "" then return d end - imm = match(imm, "^,%s*#(.*)$") - if imm then - local n = tonumber(imm) - if n then - if n >= -1020 and n <= 1020 and n%4 == 0 then - return d + (n >= 0 and n/4+0x00800000 or -n/4) - end - werror("out of range immediate `"..imm.."'") - else - waction("IMMV8", 32768 + 32*8, imm) - return d - end - end - else - if match(q, "^[<>=%-]") or match(q, "^extern%s+") then - local mode, n, s = parse_label(q, false) - waction("REL_"..mode, n + 0x2800, s, 1) - return 15 * 65536 - end - local reg, tailr = match(q, "^([%w_:]+)%s*(.*)$") - if reg and tailr ~= "" then - local d, tp = parse_gpr(reg) - if tp then - waction("IMMV8", 32768 + 32*8, format(tp.ctypefmt, tailr)) - return shl(d, 16) - end - end - end - werror("expected address operand") -end - ------------------------------------------------------------------------------- - --- Handle opcodes defined with template strings. -local function parse_template(params, template, nparams, pos) - local op = tonumber(sub(template, 1, 8), 16) - local n = 1 - local vr = "s" - - -- Process each character. - for p in gmatch(sub(template, 9), ".") do - local q = params[n] - if p == "D" then - op = op + shl(parse_gpr(q), 12); n = n + 1 - elseif p == "N" then - op = op + shl(parse_gpr(q), 16); n = n + 1 - elseif p == "S" then - op = op + shl(parse_gpr(q), 8); n = n + 1 - elseif p == "M" then - op = op + parse_gpr(q); n = n + 1 - elseif p == "d" then - local r,h = parse_vr(q, vr); op = op+shl(r,12)+shl(h,22); n = n + 1 - elseif p == "n" then - local r,h = parse_vr(q, vr); op = op+shl(r,16)+shl(h,7); n = n + 1 - elseif p == "m" then - local r,h = parse_vr(q, vr); op = op+r+shl(h,5); n = n + 1 - elseif p == "P" then - local imm = match(q, "^#(.*)$") - if imm then - op = op + parse_imm12(imm) + 0x02000000 - else - op = op + parse_gpr(q) - end - n = n + 1 - elseif p == "p" then - op = op + parse_shift(q, true); n = n + 1 - elseif p == "L" then - op = parse_load(params, nparams, n, op) - elseif p == "l" then - op = op + parse_vload(q) - elseif p == "B" then - local mode, n, s = parse_label(q, false) - waction("REL_"..mode, n, s, 1) - elseif p == "C" then -- blx gpr vs. blx label. - if match(q, "^([%w_]+):(r1?[0-9])$") or match(q, "^r(1?[0-9])$") then - op = op + parse_gpr(q) - else - if op < 0xe0000000 then werror("unconditional instruction") end - local mode, n, s = parse_label(q, false) - waction("REL_"..mode, n, s, 1) - op = 0xfa000000 - end - elseif p == "F" then - vr = "s" - elseif p == "G" then - vr = "d" - elseif p == "o" then - local r, wb = match(q, "^([^!]*)(!?)$") - op = op + shl(parse_gpr(r), 16) + (wb == "!" and 0x00200000 or 0) - n = n + 1 - elseif p == "R" then - op = op + parse_reglist(q); n = n + 1 - elseif p == "r" then - op = op + parse_vrlist(q); n = n + 1 - elseif p == "W" then - op = op + parse_imm16(q); n = n + 1 - elseif p == "v" then - op = op + parse_imm(q, 5, 7, 0, false); n = n + 1 - elseif p == "w" then - local imm = match(q, "^#(.*)$") - if imm then - op = op + parse_imm(q, 5, 7, 0, false); n = n + 1 - else - op = op + shl(parse_gpr(q), 8) + 16 - end - elseif p == "X" then - op = op + parse_imm(q, 5, 16, 0, false); n = n + 1 - elseif p == "Y" then - local imm = tonumber(match(q, "^#(.*)$")); n = n + 1 - if not imm or shr(imm, 8) ~= 0 then - werror("bad immediate operand") - end - op = op + shl(band(imm, 0xf0), 12) + band(imm, 0x0f) - elseif p == "K" then - local imm = tonumber(match(q, "^#(.*)$")); n = n + 1 - if not imm or shr(imm, 16) ~= 0 then - werror("bad immediate operand") - end - op = op + shl(band(imm, 0xfff0), 4) + band(imm, 0x000f) - elseif p == "T" then - op = op + parse_imm(q, 24, 0, 0, false); n = n + 1 - elseif p == "s" then - -- Ignored. - else - assert(false) - end - end - wputpos(pos, op) -end - -map_op[".template__"] = function(params, template, nparams) - if not params then return template:gsub("%x%x%x%x%x%x%x%x", "") end - - -- Limit number of section buffer positions used by a single dasm_put(). - -- A single opcode needs a maximum of 3 positions. - if secpos+3 > maxsecpos then wflush() end - local pos = wpos() - local lpos, apos, spos = #actlist, #actargs, secpos - - local ok, err - for t in gmatch(template, "[^|]+") do - ok, err = pcall(parse_template, params, t, nparams, pos) - if ok then return end - secpos = spos - actlist[lpos+1] = nil - actlist[lpos+2] = nil - actlist[lpos+3] = nil - actargs[apos+1] = nil - actargs[apos+2] = nil - actargs[apos+3] = nil - end - error(err, 0) -end - ------------------------------------------------------------------------------- - --- Pseudo-opcode to mark the position where the action list is to be emitted. -map_op[".actionlist_1"] = function(params) - if not params then return "cvar" end - local name = params[1] -- No syntax check. You get to keep the pieces. - wline(function(out) writeactions(out, name) end) -end - --- Pseudo-opcode to mark the position where the global enum is to be emitted. -map_op[".globals_1"] = function(params) - if not params then return "prefix" end - local prefix = params[1] -- No syntax check. You get to keep the pieces. - wline(function(out) writeglobals(out, prefix) end) -end - --- Pseudo-opcode to mark the position where the global names are to be emitted. -map_op[".globalnames_1"] = function(params) - if not params then return "cvar" end - local name = params[1] -- No syntax check. You get to keep the pieces. - wline(function(out) writeglobalnames(out, name) end) -end - --- Pseudo-opcode to mark the position where the extern names are to be emitted. -map_op[".externnames_1"] = function(params) - if not params then return "cvar" end - local name = params[1] -- No syntax check. You get to keep the pieces. - wline(function(out) writeexternnames(out, name) end) -end - ------------------------------------------------------------------------------- - --- Label pseudo-opcode (converted from trailing colon form). -map_op[".label_1"] = function(params) - if not params then return "[1-9] | ->global | =>pcexpr" end - if secpos+1 > maxsecpos then wflush() end - local mode, n, s = parse_label(params[1], true) - if mode == "EXT" then werror("bad label definition") end - waction("LABEL_"..mode, n, s, 1) -end - ------------------------------------------------------------------------------- - --- Pseudo-opcodes for data storage. -map_op[".long_*"] = function(params) - if not params then return "imm..." end - for _,p in ipairs(params) do - local n = tonumber(p) - if not n then werror("bad immediate `"..p.."'") end - if n < 0 then n = n + 2^32 end - wputw(n) - if secpos+2 > maxsecpos then wflush() end - end -end - --- Alignment pseudo-opcode. -map_op[".align_1"] = function(params) - if not params then return "numpow2" end - if secpos+1 > maxsecpos then wflush() end - local align = tonumber(params[1]) - if align then - local x = align - -- Must be a power of 2 in the range (2 ... 256). - for i=1,8 do - x = x / 2 - if x == 1 then - waction("ALIGN", align-1, nil, 1) -- Action byte is 2**n-1. - return - end - end - end - werror("bad alignment") -end - ------------------------------------------------------------------------------- - --- Pseudo-opcode for (primitive) type definitions (map to C types). -map_op[".type_3"] = function(params, nparams) - if not params then - return nparams == 2 and "name, ctype" or "name, ctype, reg" - end - local name, ctype, reg = params[1], params[2], params[3] - if not match(name, "^[%a_][%w_]*$") then - werror("bad type name `"..name.."'") - end - local tp = map_type[name] - if tp then - werror("duplicate type `"..name.."'") - end - -- Add #type to defines. A bit unclean to put it in map_archdef. - map_archdef["#"..name] = "sizeof("..ctype..")" - -- Add new type and emit shortcut define. - local num = ctypenum + 1 - map_type[name] = { - ctype = ctype, - ctypefmt = format("Dt%X(%%s)", num), - reg = reg, - } - wline(format("#define Dt%X(_V) (int)(ptrdiff_t)&(((%s *)0)_V)", num, ctype)) - ctypenum = num -end -map_op[".type_2"] = map_op[".type_3"] - --- Dump type definitions. -local function dumptypes(out, lvl) - local t = {} - for name in pairs(map_type) do t[#t+1] = name end - sort(t) - out:write("Type definitions:\n") - for _,name in ipairs(t) do - local tp = map_type[name] - local reg = tp.reg or "" - out:write(format(" %-20s %-20s %s\n", name, tp.ctype, reg)) - end - out:write("\n") -end - ------------------------------------------------------------------------------- - --- Set the current section. -function _M.section(num) - waction("SECTION", num) - wflush(true) -- SECTION is a terminal action. -end - ------------------------------------------------------------------------------- - --- Dump architecture description. -function _M.dumparch(out) - out:write(format("DynASM %s version %s, released %s\n\n", - _info.arch, _info.version, _info.release)) - dumpactions(out) -end - --- Dump all user defined elements. -function _M.dumpdef(out, lvl) - dumptypes(out, lvl) - dumpglobals(out, lvl) - dumpexterns(out, lvl) -end - ------------------------------------------------------------------------------- - --- Pass callbacks from/to the DynASM core. -function _M.passcb(wl, we, wf, ww) - wline, werror, wfatal, wwarn = wl, we, wf, ww - return wflush -end - --- Setup the arch-specific module. -function _M.setup(arch, opt) - g_arch, g_opt = arch, opt -end - --- Merge the core maps and the arch-specific maps. -function _M.mergemaps(map_coreop, map_def) - setmetatable(map_op, { __index = function(t, k) - local v = map_coreop[k] - if v then return v end - local k1, cc, k2 = match(k, "^(.-)(..)([._].*)$") - local cv = map_cond[cc] - if cv then - local v = rawget(t, k1..k2) - if type(v) == "string" then - local scv = format("%x", cv) - return gsub(scv..sub(v, 2), "|e", "|"..scv) - end - end - end }) - setmetatable(map_def, { __index = map_archdef }) - return map_op, map_def -end - -return _M - ------------------------------------------------------------------------------- - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_arm64.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_arm64.h deleted file mode 100644 index d912e61dda7..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_arm64.h +++ /dev/null @@ -1,518 +0,0 @@ -/* -** DynASM ARM64 encoding engine. -** Copyright (C) 2005-2015 Mike Pall. All rights reserved. -** Released under the MIT license. See dynasm.lua for full copyright notice. -*/ - -#include -#include -#include -#include - -#define DASM_ARCH "arm64" - -#ifndef DASM_EXTERN -#define DASM_EXTERN(a,b,c,d) 0 -#endif - -/* Action definitions. */ -enum { - DASM_STOP, DASM_SECTION, DASM_ESC, DASM_REL_EXT, - /* The following actions need a buffer position. */ - DASM_ALIGN, DASM_REL_LG, DASM_LABEL_LG, - /* The following actions also have an argument. */ - DASM_REL_PC, DASM_LABEL_PC, - DASM_IMM, DASM_IMM6, DASM_IMM12, DASM_IMM13W, DASM_IMM13X, DASM_IMML, - DASM__MAX -}; - -/* Maximum number of section buffer positions for a single dasm_put() call. */ -#define DASM_MAXSECPOS 25 - -/* DynASM encoder status codes. Action list offset or number are or'ed in. */ -#define DASM_S_OK 0x00000000 -#define DASM_S_NOMEM 0x01000000 -#define DASM_S_PHASE 0x02000000 -#define DASM_S_MATCH_SEC 0x03000000 -#define DASM_S_RANGE_I 0x11000000 -#define DASM_S_RANGE_SEC 0x12000000 -#define DASM_S_RANGE_LG 0x13000000 -#define DASM_S_RANGE_PC 0x14000000 -#define DASM_S_RANGE_REL 0x15000000 -#define DASM_S_UNDEF_LG 0x21000000 -#define DASM_S_UNDEF_PC 0x22000000 - -/* Macros to convert positions (8 bit section + 24 bit index). */ -#define DASM_POS2IDX(pos) ((pos)&0x00ffffff) -#define DASM_POS2BIAS(pos) ((pos)&0xff000000) -#define DASM_SEC2POS(sec) ((sec)<<24) -#define DASM_POS2SEC(pos) ((pos)>>24) -#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos)) - -/* Action list type. */ -typedef const unsigned int *dasm_ActList; - -/* Per-section structure. */ -typedef struct dasm_Section { - int *rbuf; /* Biased buffer pointer (negative section bias). */ - int *buf; /* True buffer pointer. */ - size_t bsize; /* Buffer size in bytes. */ - int pos; /* Biased buffer position. */ - int epos; /* End of biased buffer position - max single put. */ - int ofs; /* Byte offset into section. */ -} dasm_Section; - -/* Core structure holding the DynASM encoding state. */ -struct dasm_State { - size_t psize; /* Allocated size of this structure. */ - dasm_ActList actionlist; /* Current actionlist pointer. */ - int *lglabels; /* Local/global chain/pos ptrs. */ - size_t lgsize; - int *pclabels; /* PC label chains/pos ptrs. */ - size_t pcsize; - void **globals; /* Array of globals (bias -10). */ - dasm_Section *section; /* Pointer to active section. */ - size_t codesize; /* Total size of all code sections. */ - int maxsection; /* 0 <= sectionidx < maxsection. */ - int status; /* Status code. */ - dasm_Section sections[1]; /* All sections. Alloc-extended. */ -}; - -/* The size of the core structure depends on the max. number of sections. */ -#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section)) - - -/* Initialize DynASM state. */ -void dasm_init(Dst_DECL, int maxsection) -{ - dasm_State *D; - size_t psz = 0; - int i; - Dst_REF = NULL; - DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection)); - D = Dst_REF; - D->psize = psz; - D->lglabels = NULL; - D->lgsize = 0; - D->pclabels = NULL; - D->pcsize = 0; - D->globals = NULL; - D->maxsection = maxsection; - for (i = 0; i < maxsection; i++) { - D->sections[i].buf = NULL; /* Need this for pass3. */ - D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i); - D->sections[i].bsize = 0; - D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */ - } -} - -/* Free DynASM state. */ -void dasm_free(Dst_DECL) -{ - dasm_State *D = Dst_REF; - int i; - for (i = 0; i < D->maxsection; i++) - if (D->sections[i].buf) - DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize); - if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize); - if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize); - DASM_M_FREE(Dst, D, D->psize); -} - -/* Setup global label array. Must be called before dasm_setup(). */ -void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl) -{ - dasm_State *D = Dst_REF; - D->globals = gl - 10; /* Negative bias to compensate for locals. */ - DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int)); -} - -/* Grow PC label array. Can be called after dasm_setup(), too. */ -void dasm_growpc(Dst_DECL, unsigned int maxpc) -{ - dasm_State *D = Dst_REF; - size_t osz = D->pcsize; - DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int)); - memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz); -} - -/* Setup encoder. */ -void dasm_setup(Dst_DECL, const void *actionlist) -{ - dasm_State *D = Dst_REF; - int i; - D->actionlist = (dasm_ActList)actionlist; - D->status = DASM_S_OK; - D->section = &D->sections[0]; - memset((void *)D->lglabels, 0, D->lgsize); - if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize); - for (i = 0; i < D->maxsection; i++) { - D->sections[i].pos = DASM_SEC2POS(i); - D->sections[i].ofs = 0; - } -} - - -#ifdef DASM_CHECKS -#define CK(x, st) \ - do { if (!(x)) { \ - D->status = DASM_S_##st|(p-D->actionlist-1); return; } } while (0) -#define CKPL(kind, st) \ - do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \ - D->status = DASM_S_RANGE_##st|(p-D->actionlist-1); return; } } while (0) -#else -#define CK(x, st) ((void)0) -#define CKPL(kind, st) ((void)0) -#endif - -static int dasm_imm12(unsigned int n) -{ - if ((n >> 12) == 0) - return n; - else if ((n & 0xff000fff) == 0) - return (n >> 12) | 0x1000; - else - return -1; -} - -static int dasm_ffs(unsigned long long x) -{ - int n = -1; - while (x) { x >>= 1; n++; } - return n; -} - -static int dasm_imm13(int lo, int hi) -{ - int inv = 0, w = 64, s = 0xfff, xa, xb; - unsigned long long n = (((unsigned long long)hi) << 32) | (unsigned int)lo; - unsigned long long m = 1ULL, a, b, c; - if (n & 1) { n = ~n; inv = 1; } - a = n & -n; b = (n+a)&-(n+a); c = (n+a-b)&-(n+a-b); - xa = dasm_ffs(a); xb = dasm_ffs(b); - if (c) { - w = dasm_ffs(c) - xa; - if (w == 32) m = 0x0000000100000001UL; - else if (w == 16) m = 0x0001000100010001UL; - else if (w == 8) m = 0x0101010101010101UL; - else if (w == 4) m = 0x1111111111111111UL; - else if (w == 2) m = 0x5555555555555555UL; - else return -1; - s = (-2*w & 0x3f) - 1; - } else if (!a) { - return -1; - } else if (xb == -1) { - xb = 64; - } - if ((b-a) * m != n) return -1; - if (inv) { - return ((w - xb) << 6) | (s+w+xa-xb); - } else { - return ((w - xa) << 6) | (s+xb-xa); - } - return -1; -} - -/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */ -void dasm_put(Dst_DECL, int start, ...) -{ - va_list ap; - dasm_State *D = Dst_REF; - dasm_ActList p = D->actionlist + start; - dasm_Section *sec = D->section; - int pos = sec->pos, ofs = sec->ofs; - int *b; - - if (pos >= sec->epos) { - DASM_M_GROW(Dst, int, sec->buf, sec->bsize, - sec->bsize + 2*DASM_MAXSECPOS*sizeof(int)); - sec->rbuf = sec->buf - DASM_POS2BIAS(pos); - sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos); - } - - b = sec->rbuf; - b[pos++] = start; - - va_start(ap, start); - while (1) { - unsigned int ins = *p++; - unsigned int action = (ins >> 16); - if (action >= DASM__MAX) { - ofs += 4; - } else { - int *pl, n = action >= DASM_REL_PC ? va_arg(ap, int) : 0; - switch (action) { - case DASM_STOP: goto stop; - case DASM_SECTION: - n = (ins & 255); CK(n < D->maxsection, RANGE_SEC); - D->section = &D->sections[n]; goto stop; - case DASM_ESC: p++; ofs += 4; break; - case DASM_REL_EXT: break; - case DASM_ALIGN: ofs += (ins & 255); b[pos++] = ofs; break; - case DASM_REL_LG: - n = (ins & 2047) - 10; pl = D->lglabels + n; - /* Bkwd rel or global. */ - if (n >= 0) { CK(n>=10||*pl<0, RANGE_LG); CKPL(lg, LG); goto putrel; } - pl += 10; n = *pl; - if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */ - goto linkrel; - case DASM_REL_PC: - pl = D->pclabels + n; CKPL(pc, PC); - putrel: - n = *pl; - if (n < 0) { /* Label exists. Get label pos and store it. */ - b[pos] = -n; - } else { - linkrel: - b[pos] = n; /* Else link to rel chain, anchored at label. */ - *pl = pos; - } - pos++; - break; - case DASM_LABEL_LG: - pl = D->lglabels + (ins & 2047) - 10; CKPL(lg, LG); goto putlabel; - case DASM_LABEL_PC: - pl = D->pclabels + n; CKPL(pc, PC); - putlabel: - n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */ - while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos; - } - *pl = -pos; /* Label exists now. */ - b[pos++] = ofs; /* Store pass1 offset estimate. */ - break; - case DASM_IMM: - CK((n & ((1<<((ins>>10)&31))-1)) == 0, RANGE_I); - n >>= ((ins>>10)&31); -#ifdef DASM_CHECKS - if ((ins & 0x8000)) - CK(((n + (1<<(((ins>>5)&31)-1)))>>((ins>>5)&31)) == 0, RANGE_I); - else - CK((n>>((ins>>5)&31)) == 0, RANGE_I); -#endif - b[pos++] = n; - break; - case DASM_IMM6: - CK((n >> 6) == 0, RANGE_I); - b[pos++] = n; - break; - case DASM_IMM12: - CK(dasm_imm12((unsigned int)n) != -1, RANGE_I); - b[pos++] = n; - break; - case DASM_IMM13W: - CK(dasm_imm13(n, n) != -1, RANGE_I); - b[pos++] = n; - break; - case DASM_IMM13X: { - int m = va_arg(ap, int); - CK(dasm_imm13(n, m) != -1, RANGE_I); - b[pos++] = n; - b[pos++] = m; - break; - } - case DASM_IMML: { -#ifdef DASM_CHECKS - int scale = (p[-2] >> 30); - CK((!(n & ((1<>scale) < 4096) || - (unsigned int)(n+256) < 512, RANGE_I); -#endif - b[pos++] = n; - break; - } - } - } - } -stop: - va_end(ap); - sec->pos = pos; - sec->ofs = ofs; -} -#undef CK - -/* Pass 2: Link sections, shrink aligns, fix label offsets. */ -int dasm_link(Dst_DECL, size_t *szp) -{ - dasm_State *D = Dst_REF; - int secnum; - int ofs = 0; - -#ifdef DASM_CHECKS - *szp = 0; - if (D->status != DASM_S_OK) return D->status; - { - int pc; - for (pc = 0; pc*sizeof(int) < D->pcsize; pc++) - if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc; - } -#endif - - { /* Handle globals not defined in this translation unit. */ - int idx; - for (idx = 20; idx*sizeof(int) < D->lgsize; idx++) { - int n = D->lglabels[idx]; - /* Undefined label: Collapse rel chain and replace with marker (< 0). */ - while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; } - } - } - - /* Combine all code sections. No support for data sections (yet). */ - for (secnum = 0; secnum < D->maxsection; secnum++) { - dasm_Section *sec = D->sections + secnum; - int *b = sec->rbuf; - int pos = DASM_SEC2POS(secnum); - int lastpos = sec->pos; - - while (pos != lastpos) { - dasm_ActList p = D->actionlist + b[pos++]; - while (1) { - unsigned int ins = *p++; - unsigned int action = (ins >> 16); - switch (action) { - case DASM_STOP: case DASM_SECTION: goto stop; - case DASM_ESC: p++; break; - case DASM_REL_EXT: break; - case DASM_ALIGN: ofs -= (b[pos++] + ofs) & (ins & 255); break; - case DASM_REL_LG: case DASM_REL_PC: pos++; break; - case DASM_LABEL_LG: case DASM_LABEL_PC: b[pos++] += ofs; break; - case DASM_IMM: case DASM_IMM6: case DASM_IMM12: case DASM_IMM13W: - case DASM_IMML: pos++; break; - case DASM_IMM13X: pos += 2; break; - } - } - stop: (void)0; - } - ofs += sec->ofs; /* Next section starts right after current section. */ - } - - D->codesize = ofs; /* Total size of all code sections */ - *szp = ofs; - return DASM_S_OK; -} - -#ifdef DASM_CHECKS -#define CK(x, st) \ - do { if (!(x)) return DASM_S_##st|(p-D->actionlist-1); } while (0) -#else -#define CK(x, st) ((void)0) -#endif - -/* Pass 3: Encode sections. */ -int dasm_encode(Dst_DECL, void *buffer) -{ - dasm_State *D = Dst_REF; - char *base = (char *)buffer; - unsigned int *cp = (unsigned int *)buffer; - int secnum; - - /* Encode all code sections. No support for data sections (yet). */ - for (secnum = 0; secnum < D->maxsection; secnum++) { - dasm_Section *sec = D->sections + secnum; - int *b = sec->buf; - int *endb = sec->rbuf + sec->pos; - - while (b != endb) { - dasm_ActList p = D->actionlist + *b++; - while (1) { - unsigned int ins = *p++; - unsigned int action = (ins >> 16); - int n = (action >= DASM_ALIGN && action < DASM__MAX) ? *b++ : 0; - switch (action) { - case DASM_STOP: case DASM_SECTION: goto stop; - case DASM_ESC: *cp++ = *p++; break; - case DASM_REL_EXT: - n = DASM_EXTERN(Dst, (unsigned char *)cp, (ins&2047), !(ins&2048)); - goto patchrel; - case DASM_ALIGN: - ins &= 255; while ((((char *)cp - base) & ins)) *cp++ = 0xe1a00000; - break; - case DASM_REL_LG: - CK(n >= 0, UNDEF_LG); - case DASM_REL_PC: - CK(n >= 0, UNDEF_PC); - n = *DASM_POS2PTR(D, n) - (int)((char *)cp - base) + 4; - patchrel: - if (!(ins & 0xf800)) { /* B, BL */ - CK((n & 3) == 0 && ((n+0x08000000) >> 28) == 0, RANGE_REL); - cp[-1] |= ((n >> 2) & 0x03ffffff); - } else if ((ins & 0x800)) { /* B.cond, CBZ, CBNZ, LDR* literal */ - CK((n & 3) == 0 && ((n+0x00100000) >> 21) == 0, RANGE_REL); - cp[-1] |= ((n << 3) & 0x00ffffe0); - } else if ((ins & 0x3000) == 0x2000) { /* ADR */ - CK(((n+0x00100000) >> 21) == 0, RANGE_REL); - cp[-1] |= ((n << 3) & 0x00ffffe0) | ((n & 3) << 29); - } else if ((ins & 0x3000) == 0x3000) { /* ADRP */ - cp[-1] |= ((n >> 9) & 0x00ffffe0) | (((n >> 12) & 3) << 29); - } else if ((ins & 0x1000)) { /* TBZ, TBNZ */ - CK((n & 3) == 0 && ((n+0x00008000) >> 16) == 0, RANGE_REL); - cp[-1] |= ((n << 3) & 0x0007ffe0); - } - break; - case DASM_LABEL_LG: - ins &= 2047; if (ins >= 20) D->globals[ins-10] = (void *)(base + n); - break; - case DASM_LABEL_PC: break; - case DASM_IMM: - cp[-1] |= (n & ((1<<((ins>>5)&31))-1)) << (ins&31); - break; - case DASM_IMM6: - cp[-1] |= ((n&31) << 19) | ((n&32) << 26); - break; - case DASM_IMM12: - cp[-1] |= (dasm_imm12((unsigned int)n) << 10); - break; - case DASM_IMM13W: - cp[-1] |= (dasm_imm13(n, n) << 10); - break; - case DASM_IMM13X: - cp[-1] |= (dasm_imm13(n, *b++) << 10); - break; - case DASM_IMML: { - int scale = (p[-2] >> 30); - cp[-1] |= (!(n & ((1<>scale) < 4096) ? - ((n << (10-scale)) | 0x01000000) : ((n & 511) << 12); - break; - } - default: *cp++ = ins; break; - } - } - stop: (void)0; - } - } - - if (base + D->codesize != (char *)cp) /* Check for phase errors. */ - return DASM_S_PHASE; - return DASM_S_OK; -} -#undef CK - -/* Get PC label offset. */ -int dasm_getpclabel(Dst_DECL, unsigned int pc) -{ - dasm_State *D = Dst_REF; - if (pc*sizeof(int) < D->pcsize) { - int pos = D->pclabels[pc]; - if (pos < 0) return *DASM_POS2PTR(D, -pos); - if (pos > 0) return -1; /* Undefined. */ - } - return -2; /* Unused or out of range. */ -} - -#ifdef DASM_CHECKS -/* Optional sanity checker to call between isolated encoding steps. */ -int dasm_checkstep(Dst_DECL, int secmatch) -{ - dasm_State *D = Dst_REF; - if (D->status == DASM_S_OK) { - int i; - for (i = 1; i <= 9; i++) { - if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_LG|i; break; } - D->lglabels[i] = 0; - } - } - if (D->status == DASM_S_OK && secmatch >= 0 && - D->section != &D->sections[secmatch]) - D->status = DASM_S_MATCH_SEC|(D->section-D->sections); - return D->status; -} -#endif - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_arm64.lua b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_arm64.lua deleted file mode 100644 index 9766e475b0f..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_arm64.lua +++ /dev/null @@ -1,1166 +0,0 @@ ------------------------------------------------------------------------------- --- DynASM ARM64 module. --- --- Copyright (C) 2005-2015 Mike Pall. All rights reserved. --- See dynasm.lua for full copyright notice. ------------------------------------------------------------------------------- - --- Module information: -local _info = { - arch = "arm", - description = "DynASM ARM64 module", - version = "1.3.0", - vernum = 10300, - release = "2014-12-03", - author = "Mike Pall", - license = "MIT", -} - --- Exported glue functions for the arch-specific module. -local _M = { _info = _info } - --- Cache library functions. -local type, tonumber, pairs, ipairs = type, tonumber, pairs, ipairs -local assert, setmetatable, rawget = assert, setmetatable, rawget -local _s = string -local sub, format, byte, char = _s.sub, _s.format, _s.byte, _s.char -local match, gmatch, gsub = _s.match, _s.gmatch, _s.gsub -local concat, sort, insert = table.concat, table.sort, table.insert -local bit = bit or require("bit") -local band, shl, shr, sar = bit.band, bit.lshift, bit.rshift, bit.arshift -local ror, tohex = bit.ror, bit.tohex - --- Inherited tables and callbacks. -local g_opt, g_arch -local wline, werror, wfatal, wwarn - --- Action name list. --- CHECK: Keep this in sync with the C code! -local action_names = { - "STOP", "SECTION", "ESC", "REL_EXT", - "ALIGN", "REL_LG", "LABEL_LG", - "REL_PC", "LABEL_PC", "IMM", "IMM6", "IMM12", "IMM13W", "IMM13X", "IMML", -} - --- Maximum number of section buffer positions for dasm_put(). --- CHECK: Keep this in sync with the C code! -local maxsecpos = 25 -- Keep this low, to avoid excessively long C lines. - --- Action name -> action number. -local map_action = {} -for n,name in ipairs(action_names) do - map_action[name] = n-1 -end - --- Action list buffer. -local actlist = {} - --- Argument list for next dasm_put(). Start with offset 0 into action list. -local actargs = { 0 } - --- Current number of section buffer positions for dasm_put(). -local secpos = 1 - ------------------------------------------------------------------------------- - --- Dump action names and numbers. -local function dumpactions(out) - out:write("DynASM encoding engine action codes:\n") - for n,name in ipairs(action_names) do - local num = map_action[name] - out:write(format(" %-10s %02X %d\n", name, num, num)) - end - out:write("\n") -end - --- Write action list buffer as a huge static C array. -local function writeactions(out, name) - local nn = #actlist - if nn == 0 then nn = 1; actlist[0] = map_action.STOP end - out:write("static const unsigned int ", name, "[", nn, "] = {\n") - for i = 1,nn-1 do - assert(out:write("0x", tohex(actlist[i]), ",\n")) - end - assert(out:write("0x", tohex(actlist[nn]), "\n};\n\n")) -end - ------------------------------------------------------------------------------- - --- Add word to action list. -local function wputxw(n) - assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range") - actlist[#actlist+1] = n -end - --- Add action to list with optional arg. Advance buffer pos, too. -local function waction(action, val, a, num) - local w = assert(map_action[action], "bad action name `"..action.."'") - wputxw(w * 0x10000 + (val or 0)) - if a then actargs[#actargs+1] = a end - if a or num then secpos = secpos + (num or 1) end -end - --- Flush action list (intervening C code or buffer pos overflow). -local function wflush(term) - if #actlist == actargs[1] then return end -- Nothing to flush. - if not term then waction("STOP") end -- Terminate action list. - wline(format("dasm_put(Dst, %s);", concat(actargs, ", ")), true) - actargs = { #actlist } -- Actionlist offset is 1st arg to next dasm_put(). - secpos = 1 -- The actionlist offset occupies a buffer position, too. -end - --- Put escaped word. -local function wputw(n) - if n <= 0x000fffff then waction("ESC") end - wputxw(n) -end - --- Reserve position for word. -local function wpos() - local pos = #actlist+1 - actlist[pos] = "" - return pos -end - --- Store word to reserved position. -local function wputpos(pos, n) - assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range") - if n <= 0x000fffff then - insert(actlist, pos+1, n) - n = map_action.ESC * 0x10000 - end - actlist[pos] = n -end - ------------------------------------------------------------------------------- - --- Global label name -> global label number. With auto assignment on 1st use. -local next_global = 20 -local map_global = setmetatable({}, { __index = function(t, name) - if not match(name, "^[%a_][%w_]*$") then werror("bad global label") end - local n = next_global - if n > 2047 then werror("too many global labels") end - next_global = n + 1 - t[name] = n - return n -end}) - --- Dump global labels. -local function dumpglobals(out, lvl) - local t = {} - for name, n in pairs(map_global) do t[n] = name end - out:write("Global labels:\n") - for i=20,next_global-1 do - out:write(format(" %s\n", t[i])) - end - out:write("\n") -end - --- Write global label enum. -local function writeglobals(out, prefix) - local t = {} - for name, n in pairs(map_global) do t[n] = name end - out:write("enum {\n") - for i=20,next_global-1 do - out:write(" ", prefix, t[i], ",\n") - end - out:write(" ", prefix, "_MAX\n};\n") -end - --- Write global label names. -local function writeglobalnames(out, name) - local t = {} - for name, n in pairs(map_global) do t[n] = name end - out:write("static const char *const ", name, "[] = {\n") - for i=20,next_global-1 do - out:write(" \"", t[i], "\",\n") - end - out:write(" (const char *)0\n};\n") -end - ------------------------------------------------------------------------------- - --- Extern label name -> extern label number. With auto assignment on 1st use. -local next_extern = 0 -local map_extern_ = {} -local map_extern = setmetatable({}, { __index = function(t, name) - -- No restrictions on the name for now. - local n = next_extern - if n > 2047 then werror("too many extern labels") end - next_extern = n + 1 - t[name] = n - map_extern_[n] = name - return n -end}) - --- Dump extern labels. -local function dumpexterns(out, lvl) - out:write("Extern labels:\n") - for i=0,next_extern-1 do - out:write(format(" %s\n", map_extern_[i])) - end - out:write("\n") -end - --- Write extern label names. -local function writeexternnames(out, name) - out:write("static const char *const ", name, "[] = {\n") - for i=0,next_extern-1 do - out:write(" \"", map_extern_[i], "\",\n") - end - out:write(" (const char *)0\n};\n") -end - ------------------------------------------------------------------------------- - --- Arch-specific maps. - --- Ext. register name -> int. name. -local map_archdef = { xzr = "@x31", wzr = "@w31", lr = "x30", } - --- Int. register name -> ext. name. -local map_reg_rev = { ["@x31"] = "xzr", ["@w31"] = "wzr", x30 = "lr", } - -local map_type = {} -- Type name -> { ctype, reg } -local ctypenum = 0 -- Type number (for Dt... macros). - --- Reverse defines for registers. -function _M.revdef(s) - return map_reg_rev[s] or s -end - -local map_shift = { lsl = 0, lsr = 1, asr = 2, } - -local map_extend = { - uxtb = 0, uxth = 1, uxtw = 2, uxtx = 3, - sxtb = 4, sxth = 5, sxtw = 6, sxtx = 7, -} - -local map_cond = { - eq = 0, ne = 1, cs = 2, cc = 3, mi = 4, pl = 5, vs = 6, vc = 7, - hi = 8, ls = 9, ge = 10, lt = 11, gt = 12, le = 13, al = 14, - hs = 2, lo = 3, -} - ------------------------------------------------------------------------------- - -local parse_reg_type - -local function parse_reg(expr) - if not expr then werror("expected register name") end - local tname, ovreg = match(expr, "^([%w_]+):(@?%l%d+)$") - local tp = map_type[tname or expr] - if tp then - local reg = ovreg or tp.reg - if not reg then - werror("type `"..(tname or expr).."' needs a register override") - end - expr = reg - end - local ok31, rt, r = match(expr, "^(@?)([xwqdshb])([123]?[0-9])$") - if r then - r = tonumber(r) - if r <= 30 or (r == 31 and ok31 ~= "" or (rt ~= "w" and rt ~= "x")) then - if not parse_reg_type then - parse_reg_type = rt - elseif parse_reg_type ~= rt then - werror("register size mismatch") - end - return r, tp - end - end - werror("bad register name `"..expr.."'") -end - -local function parse_reg_base(expr) - if expr == "sp" then return 0x3e0 end - local base, tp = parse_reg(expr) - if parse_reg_type ~= "x" then werror("bad register type") end - parse_reg_type = false - return shl(base, 5), tp -end - -local parse_ctx = {} - -local loadenv = setfenv and function(s) - local code = loadstring(s, "") - if code then setfenv(code, parse_ctx) end - return code -end or function(s) - return load(s, "", nil, parse_ctx) -end - --- Try to parse simple arithmetic, too, since some basic ops are aliases. -local function parse_number(n) - local x = tonumber(n) - if x then return x end - local code = loadenv("return "..n) - if code then - local ok, y = pcall(code) - if ok then return y end - end - return nil -end - -local function parse_imm(imm, bits, shift, scale, signed) - imm = match(imm, "^#(.*)$") - if not imm then werror("expected immediate operand") end - local n = parse_number(imm) - if n then - local m = sar(n, scale) - if shl(m, scale) == n then - if signed then - local s = sar(m, bits-1) - if s == 0 then return shl(m, shift) - elseif s == -1 then return shl(m + shl(1, bits), shift) end - else - if sar(m, bits) == 0 then return shl(m, shift) end - end - end - werror("out of range immediate `"..imm.."'") - else - waction("IMM", (signed and 32768 or 0)+scale*1024+bits*32+shift, imm) - return 0 - end -end - -local function parse_imm12(imm) - imm = match(imm, "^#(.*)$") - if not imm then werror("expected immediate operand") end - local n = parse_number(imm) - if n then - if shr(n, 12) == 0 then - return shl(n, 10) - elseif band(n, 0xff000fff) == 0 then - return shr(n, 2) + 0x00400000 - end - werror("out of range immediate `"..imm.."'") - else - waction("IMM12", 0, imm) - return 0 - end -end - -local function parse_imm13(imm) - imm = match(imm, "^#(.*)$") - if not imm then werror("expected immediate operand") end - local n = parse_number(imm) - local r64 = parse_reg_type == "x" - if n and n % 1 == 0 and n >= 0 and n <= 0xffffffff then - local inv = false - if band(n, 1) == 1 then n = bit.bnot(n); inv = true end - local t = {} - for i=1,32 do t[i] = band(n, 1); n = shr(n, 1) end - local b = table.concat(t) - b = b..(r64 and (inv and "1" or "0"):rep(32) or b) - local p0, p1, p0a, p1a = b:match("^(0+)(1+)(0*)(1*)") - if p0 then - local w = p1a == "" and (r64 and 64 or 32) or #p1+#p0a - if band(w, w-1) == 0 and b == b:sub(1, w):rep(64/w) then - local s = band(-2*w, 0x3f) - 1 - if w == 64 then s = s + 0x1000 end - if inv then - return shl(w-#p1-#p0, 16) + shl(s+w-#p1, 10) - else - return shl(w-#p0, 16) + shl(s+#p1, 10) - end - end - end - werror("out of range immediate `"..imm.."'") - elseif r64 then - waction("IMM13X", 0, format("(unsigned int)(%s)", imm)) - actargs[#actargs+1] = format("(unsigned int)((unsigned long long)(%s)>>32)", imm) - return 0 - else - waction("IMM13W", 0, imm) - return 0 - end -end - -local function parse_imm6(imm) - imm = match(imm, "^#(.*)$") - if not imm then werror("expected immediate operand") end - local n = parse_number(imm) - if n then - if n >= 0 and n <= 63 then - return shl(band(n, 0x1f), 19) + (n >= 32 and 0x80000000 or 0) - end - werror("out of range immediate `"..imm.."'") - else - waction("IMM6", 0, imm) - return 0 - end -end - -local function parse_imm_load(imm, scale) - local n = parse_number(imm) - if n then - local m = sar(n, scale) - if shl(m, scale) == n and m >= 0 and m < 0x1000 then - return shl(m, 10) + 0x01000000 -- Scaled, unsigned 12 bit offset. - elseif n >= -256 and n < 256 then - return shl(band(n, 511), 12) -- Unscaled, signed 9 bit offset. - end - werror("out of range immediate `"..imm.."'") - else - waction("IMML", 0, imm) - return 0 - end -end - -local function parse_fpimm(imm) - imm = match(imm, "^#(.*)$") - if not imm then werror("expected immediate operand") end - local n = parse_number(imm) - if n then - local m, e = math.frexp(n) - local s, e2 = 0, band(e-2, 7) - if m < 0 then m = -m; s = 0x00100000 end - m = m*32-16 - if m % 1 == 0 and m >= 0 and m <= 15 and sar(shl(e2, 29), 29)+2 == e then - return s + shl(e2, 17) + shl(m, 13) - end - werror("out of range immediate `"..imm.."'") - else - werror("NYI fpimm action") - end -end - -local function parse_shift(expr) - local s, s2 = match(expr, "^(%S+)%s*(.*)$") - s = map_shift[s] - if not s then werror("expected shift operand") end - return parse_imm(s2, 6, 10, 0, false) + shl(s, 22) -end - -local function parse_lslx16(expr) - local n = match(expr, "^lsl%s*#(%d+)$") - n = tonumber(n) - if not n then werror("expected shift operand") end - if band(n, parse_reg_type == "x" and 0xffffffcf or 0xffffffef) ~= 0 then - werror("bad shift amount") - end - return shl(n, 17) -end - -local function parse_extend(expr) - local s, s2 = match(expr, "^(%S+)%s*(.*)$") - if s == "lsl" then - s = parse_reg_type == "x" and 3 or 2 - else - s = map_extend[s] - end - if not s then werror("expected extend operand") end - return (s2 == "" and 0 or parse_imm(s2, 3, 10, 0, false)) + shl(s, 13) -end - -local function parse_cond(expr, inv) - local c = map_cond[expr] - if not c then werror("expected condition operand") end - return shl(bit.bxor(c, inv), 12) -end - -local function parse_load(params, nparams, n, op) - if params[n+2] then werror("too many operands") end - local pn, p2 = params[n], params[n+1] - local p1, wb = match(pn, "^%[%s*(.-)%s*%](!?)$") - if not p1 then - if not p2 then - local reg, tailr = match(pn, "^([%w_:]+)%s*(.*)$") - if reg and tailr ~= "" then - local base, tp = parse_reg_base(reg) - if tp then - waction("IMML", 0, format(tp.ctypefmt, tailr)) - return op + base - end - end - end - werror("expected address operand") - end - local scale = shr(op, 30) - if p2 then - if wb == "!" then werror("bad use of '!'") end - op = op + parse_reg_base(p1) + parse_imm(p2, 9, 12, 0, true) + 0x400 - elseif wb == "!" then - local p1a, p2a = match(p1, "^([^,%s]*)%s*,%s*(.*)$") - if not p1a then werror("bad use of '!'") end - op = op + parse_reg_base(p1a) + parse_imm(p2a, 9, 12, 0, true) + 0xc00 - else - local p1a, p2a = match(p1, "^([^,%s]*)%s*(.*)$") - op = op + parse_reg_base(p1a) - if p2a ~= "" then - local imm = match(p2a, "^,%s*#(.*)$") - if imm then - op = op + parse_imm_load(imm, scale) - else - local p2b, p3b, p3s = match(p2a, "^,%s*([^,%s]*)%s*,?%s*(%S*)%s*(.*)$") - op = op + shl(parse_reg(p2b), 16) + 0x00200800 - if parse_reg_type ~= "x" and parse_reg_type ~= "w" then - werror("bad index register type") - end - if p3b == "" then - if parse_reg_type ~= "x" then werror("bad index register type") end - op = op + 0x6000 - else - if p3s == "" or p3s == "#0" then - elseif p3s == "#"..scale then - op = op + 0x1000 - else - werror("bad scale") - end - if parse_reg_type == "x" then - if p3b == "lsl" and p3s ~= "" then op = op + 0x6000 - elseif p3b == "sxtx" then op = op + 0xe000 - else - werror("bad extend/shift specifier") - end - else - if p3b == "uxtw" then op = op + 0x4000 - elseif p3b == "sxtw" then op = op + 0xc000 - else - werror("bad extend/shift specifier") - end - end - end - end - else - if wb == "!" then werror("bad use of '!'") end - op = op + 0x01000000 - end - end - return op -end - -local function parse_load_pair(params, nparams, n, op) - if params[n+2] then werror("too many operands") end - local pn, p2 = params[n], params[n+1] - local scale = shr(op, 30) == 0 and 2 or 3 - local p1, wb = match(pn, "^%[%s*(.-)%s*%](!?)$") - if not p1 then - if not p2 then - local reg, tailr = match(pn, "^([%w_:]+)%s*(.*)$") - if reg and tailr ~= "" then - local base, tp = parse_reg_base(reg) - if tp then - waction("IMM", 32768+7*32+15+scale*1024, format(tp.ctypefmt, tailr)) - return op + base + 0x01000000 - end - end - end - werror("expected address operand") - end - if p2 then - if wb == "!" then werror("bad use of '!'") end - op = op + 0x00800000 - else - local p1a, p2a = match(p1, "^([^,%s]*)%s*,%s*(.*)$") - if p1a then p1, p2 = p1a, p2a else p2 = "#0" end - op = op + (wb == "!" and 0x01800000 or 0x01000000) - end - return op + parse_reg_base(p1) + parse_imm(p2, 7, 15, scale, true) -end - -local function parse_label(label, def) - local prefix = sub(label, 1, 2) - -- =>label (pc label reference) - if prefix == "=>" then - return "PC", 0, sub(label, 3) - end - -- ->name (global label reference) - if prefix == "->" then - return "LG", map_global[sub(label, 3)] - end - if def then - -- [1-9] (local label definition) - if match(label, "^[1-9]$") then - return "LG", 10+tonumber(label) - end - else - -- [<>][1-9] (local label reference) - local dir, lnum = match(label, "^([<>])([1-9])$") - if dir then -- Fwd: 1-9, Bkwd: 11-19. - return "LG", lnum + (dir == ">" and 0 or 10) - end - -- extern label (extern label reference) - local extname = match(label, "^extern%s+(%S+)$") - if extname then - return "EXT", map_extern[extname] - end - end - werror("bad label `"..label.."'") -end - -local function branch_type(op) - if band(op, 0x7c000000) == 0x14000000 then return 0 -- B, BL - elseif shr(op, 24) == 0x54 or band(op, 0x7e000000) == 0x34000000 or - band(op, 0x3b000000) == 0x18000000 then - return 0x800 -- B.cond, CBZ, CBNZ, LDR* literal - elseif band(op, 0x7e000000) == 0x36000000 then return 0x1000 -- TBZ, TBNZ - elseif band(op, 0x9f000000) == 0x10000000 then return 0x2000 -- ADR - elseif band(op, 0x9f000000) == band(0x90000000) then return 0x3000 -- ADRP - else - assert(false, "unknown branch type") - end -end - ------------------------------------------------------------------------------- - -local map_op, op_template - -local function op_alias(opname, f) - return function(params, nparams) - if not params then return "-> "..opname:sub(1, -3) end - f(params, nparams) - op_template(params, map_op[opname], nparams) - end -end - -local function alias_bfx(p) - p[4] = "#("..p[3]:sub(2)..")+("..p[4]:sub(2)..")-1" -end - -local function alias_bfiz(p) - parse_reg(p[1]) - if parse_reg_type == "w" then - p[3] = "#-("..p[3]:sub(2)..")%32" - p[4] = "#("..p[4]:sub(2)..")-1" - else - p[3] = "#-("..p[3]:sub(2)..")%64" - p[4] = "#("..p[4]:sub(2)..")-1" - end -end - -local alias_lslimm = op_alias("ubfm_4", function(p) - parse_reg(p[1]) - local sh = p[3]:sub(2) - if parse_reg_type == "w" then - p[3] = "#-("..sh..")%32" - p[4] = "#31-("..sh..")" - else - p[3] = "#-("..sh..")%64" - p[4] = "#63-("..sh..")" - end -end) - --- Template strings for ARM instructions. -map_op = { - -- Basic data processing instructions. - add_3 = "0b000000DNMg|11000000pDpNIg|8b206000pDpNMx", - add_4 = "0b000000DNMSg|0b200000DNMXg|8b200000pDpNMXx|8b200000pDpNxMwX", - adds_3 = "2b000000DNMg|31000000DpNIg|ab206000DpNMx", - adds_4 = "2b000000DNMSg|2b200000DNMXg|ab200000DpNMXx|ab200000DpNxMwX", - cmn_2 = "2b00001fNMg|3100001fpNIg|ab20601fpNMx", - cmn_3 = "2b00001fNMSg|2b20001fNMXg|ab20001fpNMXx|ab20001fpNxMwX", - - sub_3 = "4b000000DNMg|51000000pDpNIg|cb206000pDpNMx", - sub_4 = "4b000000DNMSg|4b200000DNMXg|cb200000pDpNMXx|cb200000pDpNxMwX", - subs_3 = "6b000000DNMg|71000000DpNIg|eb206000DpNMx", - subs_4 = "6b000000DNMSg|6b200000DNMXg|eb200000DpNMXx|eb200000DpNxMwX", - cmp_2 = "6b00001fNMg|7100001fpNIg|eb20601fpNMx", - cmp_3 = "6b00001fNMSg|6b20001fNMXg|eb20001fpNMXx|eb20001fpNxMwX", - - neg_2 = "4b0003e0DMg", - neg_3 = "4b0003e0DMSg", - negs_2 = "6b0003e0DMg", - negs_3 = "6b0003e0DMSg", - - adc_3 = "1a000000DNMg", - adcs_3 = "3a000000DNMg", - sbc_3 = "5a000000DNMg", - sbcs_3 = "7a000000DNMg", - ngc_2 = "5a0003e0DMg", - ngcs_2 = "7a0003e0DMg", - - and_3 = "0a000000DNMg|12000000pDNig", - and_4 = "0a000000DNMSg", - orr_3 = "2a000000DNMg|32000000pDNig", - orr_4 = "2a000000DNMSg", - eor_3 = "4a000000DNMg|52000000pDNig", - eor_4 = "4a000000DNMSg", - ands_3 = "6a000000DNMg|72000000DNig", - ands_4 = "6a000000DNMSg", - tst_2 = "6a00001fNMg|7200001fNig", - tst_3 = "6a00001fNMSg", - - bic_3 = "0a200000DNMg", - bic_4 = "0a200000DNMSg", - orn_3 = "2a200000DNMg", - orn_4 = "2a200000DNMSg", - eon_3 = "4a200000DNMg", - eon_4 = "4a200000DNMSg", - bics_3 = "6a200000DNMg", - bics_4 = "6a200000DNMSg", - - movn_2 = "12800000DWg", - movn_3 = "12800000DWRg", - movz_2 = "52800000DWg", - movz_3 = "52800000DWRg", - movk_2 = "72800000DWg", - movk_3 = "72800000DWRg", - - -- TODO: this doesn't cover all valid immediates for mov reg, #imm. - mov_2 = "2a0003e0DMg|52800000DW|320003e0pDig|11000000pDpNg", - mov_3 = "2a0003e0DMSg", - mvn_2 = "2a2003e0DMg", - mvn_3 = "2a2003e0DMSg", - - adr_2 = "10000000DBx", - adrp_2 = "90000000DBx", - - csel_4 = "1a800000DNMCg", - csinc_4 = "1a800400DNMCg", - csinv_4 = "5a800000DNMCg", - csneg_4 = "5a800400DNMCg", - cset_2 = "1a9f07e0Dcg", - csetm_2 = "5a9f03e0Dcg", - cinc_3 = "1a800400DNmcg", - cinv_3 = "5a800000DNmcg", - cneg_3 = "5a800400DNmcg", - - ccmn_4 = "3a400000NMVCg|3a400800N5VCg", - ccmp_4 = "7a400000NMVCg|7a400800N5VCg", - - madd_4 = "1b000000DNMAg", - msub_4 = "1b008000DNMAg", - mul_3 = "1b007c00DNMg", - mneg_3 = "1b00fc00DNMg", - - smaddl_4 = "9b200000DxNMwAx", - smsubl_4 = "9b208000DxNMwAx", - smull_3 = "9b207c00DxNMw", - smnegl_3 = "9b20fc00DxNMw", - smulh_3 = "9b407c00DNMx", - umaddl_4 = "9ba00000DxNMwAx", - umsubl_4 = "9ba08000DxNMwAx", - umull_3 = "9ba07c00DxNMw", - umnegl_3 = "9ba0fc00DxNMw", - umulh_3 = "9bc07c00DNMx", - - udiv_3 = "1ac00800DNMg", - sdiv_3 = "1ac00c00DNMg", - - -- Bit operations. - sbfm_4 = "13000000DN12w|93400000DN12x", - bfm_4 = "33000000DN12w|b3400000DN12x", - ubfm_4 = "53000000DN12w|d3400000DN12x", - extr_4 = "13800000DNM2w|93c00000DNM2x", - - sxtb_2 = "13001c00DNw|93401c00DNx", - sxth_2 = "13003c00DNw|93403c00DNx", - sxtw_2 = "93407c00DxNw", - uxtb_2 = "53001c00DNw", - uxth_2 = "53003c00DNw", - - sbfx_4 = op_alias("sbfm_4", alias_bfx), - bfxil_4 = op_alias("bfm_4", alias_bfx), - ubfx_4 = op_alias("ubfm_4", alias_bfx), - sbfiz_4 = op_alias("sbfm_4", alias_bfiz), - bfi_4 = op_alias("bfm_4", alias_bfiz), - ubfiz_4 = op_alias("ubfm_4", alias_bfiz), - - lsl_3 = function(params, nparams) - if params and params[3]:byte() == 35 then - return alias_lslimm(params, nparams) - else - return op_template(params, "1ac02000DNMg", nparams) - end - end, - lsr_3 = "1ac02400DNMg|53007c00DN1w|d340fc00DN1x", - asr_3 = "1ac02800DNMg|13007c00DN1w|9340fc00DN1x", - ror_3 = "1ac02c00DNMg|13800000DNm2w|93c00000DNm2x", - - clz_2 = "5ac01000DNg", - cls_2 = "5ac01400DNg", - rbit_2 = "5ac00000DNg", - rev_2 = "5ac00800DNw|dac00c00DNx", - rev16_2 = "5ac00400DNg", - rev32_2 = "dac00800DNx", - - -- Loads and stores. - ["strb_*"] = "38000000DwL", - ["ldrb_*"] = "38400000DwL", - ["ldrsb_*"] = "38c00000DwL|38800000DxL", - ["strh_*"] = "78000000DwL", - ["ldrh_*"] = "78400000DwL", - ["ldrsh_*"] = "78c00000DwL|78800000DxL", - ["str_*"] = "b8000000DwL|f8000000DxL|bc000000DsL|fc000000DdL", - ["ldr_*"] = "18000000DwB|58000000DxB|1c000000DsB|5c000000DdB|b8400000DwL|f8400000DxL|bc400000DsL|fc400000DdL", - ["ldrsw_*"] = "98000000DxB|b8800000DxL", - -- NOTE: ldur etc. are handled by ldr et al. - - ["stp_*"] = "28000000DAwP|a8000000DAxP|2c000000DAsP|6c000000DAdP", - ["ldp_*"] = "28400000DAwP|a8400000DAxP|2c400000DAsP|6c400000DAdP", - ["ldpsw_*"] = "68400000DAxP", - - -- Branches. - b_1 = "14000000B", - bl_1 = "94000000B", - blr_1 = "d63f0000Nx", - br_1 = "d61f0000Nx", - ret_0 = "d65f03c0", - ret_1 = "d65f0000Nx", - -- b.cond is added below. - cbz_2 = "34000000DBg", - cbnz_2 = "35000000DBg", - tbz_3 = "36000000DTBw|36000000DTBx", - tbnz_3 = "37000000DTBw|37000000DTBx", - - -- Miscellaneous instructions. - -- TODO: hlt, hvc, smc, svc, eret, dcps[123], drps, mrs, msr - -- TODO: sys, sysl, ic, dc, at, tlbi - -- TODO: hint, yield, wfe, wfi, sev, sevl - -- TODO: clrex, dsb, dmb, isb - nop_0 = "d503201f", - brk_0 = "d4200000", - brk_1 = "d4200000W", - - -- Floating point instructions. - fmov_2 = "1e204000DNf|1e260000DwNs|1e270000DsNw|9e660000DxNd|9e670000DdNx|1e201000DFf", - fabs_2 = "1e20c000DNf", - fneg_2 = "1e214000DNf", - fsqrt_2 = "1e21c000DNf", - - fcvt_2 = "1e22c000DdNs|1e624000DsNd", - - -- TODO: half-precision and fixed-point conversions. - fcvtas_2 = "1e240000DwNs|9e240000DxNs|1e640000DwNd|9e640000DxNd", - fcvtau_2 = "1e250000DwNs|9e250000DxNs|1e650000DwNd|9e650000DxNd", - fcvtms_2 = "1e300000DwNs|9e300000DxNs|1e700000DwNd|9e700000DxNd", - fcvtmu_2 = "1e310000DwNs|9e310000DxNs|1e710000DwNd|9e710000DxNd", - fcvtns_2 = "1e200000DwNs|9e200000DxNs|1e600000DwNd|9e600000DxNd", - fcvtnu_2 = "1e210000DwNs|9e210000DxNs|1e610000DwNd|9e610000DxNd", - fcvtps_2 = "1e280000DwNs|9e280000DxNs|1e680000DwNd|9e680000DxNd", - fcvtpu_2 = "1e290000DwNs|9e290000DxNs|1e690000DwNd|9e690000DxNd", - fcvtzs_2 = "1e380000DwNs|9e380000DxNs|1e780000DwNd|9e780000DxNd", - fcvtzu_2 = "1e390000DwNs|9e390000DxNs|1e790000DwNd|9e790000DxNd", - - scvtf_2 = "1e220000DsNw|9e220000DsNx|1e620000DdNw|9e620000DdNx", - ucvtf_2 = "1e230000DsNw|9e230000DsNx|1e630000DdNw|9e630000DdNx", - - frintn_2 = "1e244000DNf", - frintp_2 = "1e24c000DNf", - frintm_2 = "1e254000DNf", - frintz_2 = "1e25c000DNf", - frinta_2 = "1e264000DNf", - frintx_2 = "1e274000DNf", - frinti_2 = "1e27c000DNf", - - fadd_3 = "1e202800DNMf", - fsub_3 = "1e203800DNMf", - fmul_3 = "1e200800DNMf", - fnmul_3 = "1e208800DNMf", - fdiv_3 = "1e201800DNMf", - - fmadd_4 = "1f000000DNMAf", - fmsub_4 = "1f008000DNMAf", - fnmadd_4 = "1f200000DNMAf", - fnmsub_4 = "1f208000DNMAf", - - fmax_3 = "1e204800DNMf", - fmaxnm_3 = "1e206800DNMf", - fmin_3 = "1e205800DNMf", - fminnm_3 = "1e207800DNMf", - - fcmp_2 = "1e202000NMf|1e202008NZf", - fcmpe_2 = "1e202010NMf|1e202018NZf", - - fccmp_4 = "1e200400NMVCf", - fccmpe_4 = "1e200410NMVCf", - - fcsel_4 = "1e200c00DNMCf", - - -- TODO: crc32*, aes*, sha*, pmull - -- TODO: SIMD instructions. -} - -for cond,c in pairs(map_cond) do - map_op["b"..cond.."_1"] = tohex(0x54000000+c).."B" -end - ------------------------------------------------------------------------------- - --- Handle opcodes defined with template strings. -local function parse_template(params, template, nparams, pos) - local op = tonumber(sub(template, 1, 8), 16) - local n = 1 - local rtt = {} - - parse_reg_type = false - - -- Process each character. - for p in gmatch(sub(template, 9), ".") do - local q = params[n] - if p == "D" then - op = op + parse_reg(q); n = n + 1 - elseif p == "N" then - op = op + shl(parse_reg(q), 5); n = n + 1 - elseif p == "M" then - op = op + shl(parse_reg(q), 16); n = n + 1 - elseif p == "A" then - op = op + shl(parse_reg(q), 10); n = n + 1 - elseif p == "m" then - op = op + shl(parse_reg(params[n-1]), 16) - - elseif p == "p" then - if q == "sp" then params[n] = "@x31" end - elseif p == "g" then - if parse_reg_type == "x" then - op = op + 0x80000000 - elseif parse_reg_type ~= "w" then - werror("bad register type") - end - parse_reg_type = false - elseif p == "f" then - if parse_reg_type == "d" then - op = op + 0x00400000 - elseif parse_reg_type ~= "s" then - werror("bad register type") - end - parse_reg_type = false - elseif p == "x" or p == "w" or p == "d" or p == "s" then - if parse_reg_type ~= p then - werror("register size mismatch") - end - parse_reg_type = false - - elseif p == "L" then - op = parse_load(params, nparams, n, op) - elseif p == "P" then - op = parse_load_pair(params, nparams, n, op) - - elseif p == "B" then - local mode, v, s = parse_label(q, false); n = n + 1 - local m = branch_type(op) - waction("REL_"..mode, v+m, s, 1) - - elseif p == "I" then - op = op + parse_imm12(q); n = n + 1 - elseif p == "i" then - op = op + parse_imm13(q); n = n + 1 - elseif p == "W" then - op = op + parse_imm(q, 16, 5, 0, false); n = n + 1 - elseif p == "T" then - op = op + parse_imm6(q); n = n + 1 - elseif p == "1" then - op = op + parse_imm(q, 6, 16, 0, false); n = n + 1 - elseif p == "2" then - op = op + parse_imm(q, 6, 10, 0, false); n = n + 1 - elseif p == "5" then - op = op + parse_imm(q, 5, 16, 0, false); n = n + 1 - elseif p == "V" then - op = op + parse_imm(q, 4, 0, 0, false); n = n + 1 - elseif p == "F" then - op = op + parse_fpimm(q); n = n + 1 - elseif p == "Z" then - if q ~= "#0" and q ~= "#0.0" then werror("expected zero immediate") end - n = n + 1 - - elseif p == "S" then - op = op + parse_shift(q); n = n + 1 - elseif p == "X" then - op = op + parse_extend(q); n = n + 1 - elseif p == "R" then - op = op + parse_lslx16(q); n = n + 1 - elseif p == "C" then - op = op + parse_cond(q, 0); n = n + 1 - elseif p == "c" then - op = op + parse_cond(q, 1); n = n + 1 - - else - assert(false) - end - end - wputpos(pos, op) -end - -function op_template(params, template, nparams) - if not params then return template:gsub("%x%x%x%x%x%x%x%x", "") end - - -- Limit number of section buffer positions used by a single dasm_put(). - -- A single opcode needs a maximum of 3 positions. - if secpos+3 > maxsecpos then wflush() end - local pos = wpos() - local lpos, apos, spos = #actlist, #actargs, secpos - - local ok, err - for t in gmatch(template, "[^|]+") do - ok, err = pcall(parse_template, params, t, nparams, pos) - if ok then return end - secpos = spos - actlist[lpos+1] = nil - actlist[lpos+2] = nil - actlist[lpos+3] = nil - actargs[apos+1] = nil - actargs[apos+2] = nil - actargs[apos+3] = nil - end - error(err, 0) -end - -map_op[".template__"] = op_template - ------------------------------------------------------------------------------- - --- Pseudo-opcode to mark the position where the action list is to be emitted. -map_op[".actionlist_1"] = function(params) - if not params then return "cvar" end - local name = params[1] -- No syntax check. You get to keep the pieces. - wline(function(out) writeactions(out, name) end) -end - --- Pseudo-opcode to mark the position where the global enum is to be emitted. -map_op[".globals_1"] = function(params) - if not params then return "prefix" end - local prefix = params[1] -- No syntax check. You get to keep the pieces. - wline(function(out) writeglobals(out, prefix) end) -end - --- Pseudo-opcode to mark the position where the global names are to be emitted. -map_op[".globalnames_1"] = function(params) - if not params then return "cvar" end - local name = params[1] -- No syntax check. You get to keep the pieces. - wline(function(out) writeglobalnames(out, name) end) -end - --- Pseudo-opcode to mark the position where the extern names are to be emitted. -map_op[".externnames_1"] = function(params) - if not params then return "cvar" end - local name = params[1] -- No syntax check. You get to keep the pieces. - wline(function(out) writeexternnames(out, name) end) -end - ------------------------------------------------------------------------------- - --- Label pseudo-opcode (converted from trailing colon form). -map_op[".label_1"] = function(params) - if not params then return "[1-9] | ->global | =>pcexpr" end - if secpos+1 > maxsecpos then wflush() end - local mode, n, s = parse_label(params[1], true) - if mode == "EXT" then werror("bad label definition") end - waction("LABEL_"..mode, n, s, 1) -end - ------------------------------------------------------------------------------- - --- Pseudo-opcodes for data storage. -map_op[".long_*"] = function(params) - if not params then return "imm..." end - for _,p in ipairs(params) do - local n = tonumber(p) - if not n then werror("bad immediate `"..p.."'") end - if n < 0 then n = n + 2^32 end - wputw(n) - if secpos+2 > maxsecpos then wflush() end - end -end - --- Alignment pseudo-opcode. -map_op[".align_1"] = function(params) - if not params then return "numpow2" end - if secpos+1 > maxsecpos then wflush() end - local align = tonumber(params[1]) - if align then - local x = align - -- Must be a power of 2 in the range (2 ... 256). - for i=1,8 do - x = x / 2 - if x == 1 then - waction("ALIGN", align-1, nil, 1) -- Action byte is 2**n-1. - return - end - end - end - werror("bad alignment") -end - ------------------------------------------------------------------------------- - --- Pseudo-opcode for (primitive) type definitions (map to C types). -map_op[".type_3"] = function(params, nparams) - if not params then - return nparams == 2 and "name, ctype" or "name, ctype, reg" - end - local name, ctype, reg = params[1], params[2], params[3] - if not match(name, "^[%a_][%w_]*$") then - werror("bad type name `"..name.."'") - end - local tp = map_type[name] - if tp then - werror("duplicate type `"..name.."'") - end - -- Add #type to defines. A bit unclean to put it in map_archdef. - map_archdef["#"..name] = "sizeof("..ctype..")" - -- Add new type and emit shortcut define. - local num = ctypenum + 1 - map_type[name] = { - ctype = ctype, - ctypefmt = format("Dt%X(%%s)", num), - reg = reg, - } - wline(format("#define Dt%X(_V) (int)(ptrdiff_t)&(((%s *)0)_V)", num, ctype)) - ctypenum = num -end -map_op[".type_2"] = map_op[".type_3"] - --- Dump type definitions. -local function dumptypes(out, lvl) - local t = {} - for name in pairs(map_type) do t[#t+1] = name end - sort(t) - out:write("Type definitions:\n") - for _,name in ipairs(t) do - local tp = map_type[name] - local reg = tp.reg or "" - out:write(format(" %-20s %-20s %s\n", name, tp.ctype, reg)) - end - out:write("\n") -end - ------------------------------------------------------------------------------- - --- Set the current section. -function _M.section(num) - waction("SECTION", num) - wflush(true) -- SECTION is a terminal action. -end - ------------------------------------------------------------------------------- - --- Dump architecture description. -function _M.dumparch(out) - out:write(format("DynASM %s version %s, released %s\n\n", - _info.arch, _info.version, _info.release)) - dumpactions(out) -end - --- Dump all user defined elements. -function _M.dumpdef(out, lvl) - dumptypes(out, lvl) - dumpglobals(out, lvl) - dumpexterns(out, lvl) -end - ------------------------------------------------------------------------------- - --- Pass callbacks from/to the DynASM core. -function _M.passcb(wl, we, wf, ww) - wline, werror, wfatal, wwarn = wl, we, wf, ww - return wflush -end - --- Setup the arch-specific module. -function _M.setup(arch, opt) - g_arch, g_opt = arch, opt -end - --- Merge the core maps and the arch-specific maps. -function _M.mergemaps(map_coreop, map_def) - setmetatable(map_op, { __index = map_coreop }) - setmetatable(map_def, { __index = map_archdef }) - return map_op, map_def -end - -return _M - ------------------------------------------------------------------------------- - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_mips.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_mips.h deleted file mode 100644 index 2f4c2d2220a..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_mips.h +++ /dev/null @@ -1,416 +0,0 @@ -/* -** DynASM MIPS encoding engine. -** Copyright (C) 2005-2015 Mike Pall. All rights reserved. -** Released under the MIT license. See dynasm.lua for full copyright notice. -*/ - -#include -#include -#include -#include - -#define DASM_ARCH "mips" - -#ifndef DASM_EXTERN -#define DASM_EXTERN(a,b,c,d) 0 -#endif - -/* Action definitions. */ -enum { - DASM_STOP, DASM_SECTION, DASM_ESC, DASM_REL_EXT, - /* The following actions need a buffer position. */ - DASM_ALIGN, DASM_REL_LG, DASM_LABEL_LG, - /* The following actions also have an argument. */ - DASM_REL_PC, DASM_LABEL_PC, DASM_IMM, - DASM__MAX -}; - -/* Maximum number of section buffer positions for a single dasm_put() call. */ -#define DASM_MAXSECPOS 25 - -/* DynASM encoder status codes. Action list offset or number are or'ed in. */ -#define DASM_S_OK 0x00000000 -#define DASM_S_NOMEM 0x01000000 -#define DASM_S_PHASE 0x02000000 -#define DASM_S_MATCH_SEC 0x03000000 -#define DASM_S_RANGE_I 0x11000000 -#define DASM_S_RANGE_SEC 0x12000000 -#define DASM_S_RANGE_LG 0x13000000 -#define DASM_S_RANGE_PC 0x14000000 -#define DASM_S_RANGE_REL 0x15000000 -#define DASM_S_UNDEF_LG 0x21000000 -#define DASM_S_UNDEF_PC 0x22000000 - -/* Macros to convert positions (8 bit section + 24 bit index). */ -#define DASM_POS2IDX(pos) ((pos)&0x00ffffff) -#define DASM_POS2BIAS(pos) ((pos)&0xff000000) -#define DASM_SEC2POS(sec) ((sec)<<24) -#define DASM_POS2SEC(pos) ((pos)>>24) -#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos)) - -/* Action list type. */ -typedef const unsigned int *dasm_ActList; - -/* Per-section structure. */ -typedef struct dasm_Section { - int *rbuf; /* Biased buffer pointer (negative section bias). */ - int *buf; /* True buffer pointer. */ - size_t bsize; /* Buffer size in bytes. */ - int pos; /* Biased buffer position. */ - int epos; /* End of biased buffer position - max single put. */ - int ofs; /* Byte offset into section. */ -} dasm_Section; - -/* Core structure holding the DynASM encoding state. */ -struct dasm_State { - size_t psize; /* Allocated size of this structure. */ - dasm_ActList actionlist; /* Current actionlist pointer. */ - int *lglabels; /* Local/global chain/pos ptrs. */ - size_t lgsize; - int *pclabels; /* PC label chains/pos ptrs. */ - size_t pcsize; - void **globals; /* Array of globals (bias -10). */ - dasm_Section *section; /* Pointer to active section. */ - size_t codesize; /* Total size of all code sections. */ - int maxsection; /* 0 <= sectionidx < maxsection. */ - int status; /* Status code. */ - dasm_Section sections[1]; /* All sections. Alloc-extended. */ -}; - -/* The size of the core structure depends on the max. number of sections. */ -#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section)) - - -/* Initialize DynASM state. */ -void dasm_init(Dst_DECL, int maxsection) -{ - dasm_State *D; - size_t psz = 0; - int i; - Dst_REF = NULL; - DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection)); - D = Dst_REF; - D->psize = psz; - D->lglabels = NULL; - D->lgsize = 0; - D->pclabels = NULL; - D->pcsize = 0; - D->globals = NULL; - D->maxsection = maxsection; - for (i = 0; i < maxsection; i++) { - D->sections[i].buf = NULL; /* Need this for pass3. */ - D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i); - D->sections[i].bsize = 0; - D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */ - } -} - -/* Free DynASM state. */ -void dasm_free(Dst_DECL) -{ - dasm_State *D = Dst_REF; - int i; - for (i = 0; i < D->maxsection; i++) - if (D->sections[i].buf) - DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize); - if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize); - if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize); - DASM_M_FREE(Dst, D, D->psize); -} - -/* Setup global label array. Must be called before dasm_setup(). */ -void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl) -{ - dasm_State *D = Dst_REF; - D->globals = gl - 10; /* Negative bias to compensate for locals. */ - DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int)); -} - -/* Grow PC label array. Can be called after dasm_setup(), too. */ -void dasm_growpc(Dst_DECL, unsigned int maxpc) -{ - dasm_State *D = Dst_REF; - size_t osz = D->pcsize; - DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int)); - memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz); -} - -/* Setup encoder. */ -void dasm_setup(Dst_DECL, const void *actionlist) -{ - dasm_State *D = Dst_REF; - int i; - D->actionlist = (dasm_ActList)actionlist; - D->status = DASM_S_OK; - D->section = &D->sections[0]; - memset((void *)D->lglabels, 0, D->lgsize); - if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize); - for (i = 0; i < D->maxsection; i++) { - D->sections[i].pos = DASM_SEC2POS(i); - D->sections[i].ofs = 0; - } -} - - -#ifdef DASM_CHECKS -#define CK(x, st) \ - do { if (!(x)) { \ - D->status = DASM_S_##st|(p-D->actionlist-1); return; } } while (0) -#define CKPL(kind, st) \ - do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \ - D->status = DASM_S_RANGE_##st|(p-D->actionlist-1); return; } } while (0) -#else -#define CK(x, st) ((void)0) -#define CKPL(kind, st) ((void)0) -#endif - -/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */ -void dasm_put(Dst_DECL, int start, ...) -{ - va_list ap; - dasm_State *D = Dst_REF; - dasm_ActList p = D->actionlist + start; - dasm_Section *sec = D->section; - int pos = sec->pos, ofs = sec->ofs; - int *b; - - if (pos >= sec->epos) { - DASM_M_GROW(Dst, int, sec->buf, sec->bsize, - sec->bsize + 2*DASM_MAXSECPOS*sizeof(int)); - sec->rbuf = sec->buf - DASM_POS2BIAS(pos); - sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos); - } - - b = sec->rbuf; - b[pos++] = start; - - va_start(ap, start); - while (1) { - unsigned int ins = *p++; - unsigned int action = (ins >> 16) - 0xff00; - if (action >= DASM__MAX) { - ofs += 4; - } else { - int *pl, n = action >= DASM_REL_PC ? va_arg(ap, int) : 0; - switch (action) { - case DASM_STOP: goto stop; - case DASM_SECTION: - n = (ins & 255); CK(n < D->maxsection, RANGE_SEC); - D->section = &D->sections[n]; goto stop; - case DASM_ESC: p++; ofs += 4; break; - case DASM_REL_EXT: break; - case DASM_ALIGN: ofs += (ins & 255); b[pos++] = ofs; break; - case DASM_REL_LG: - n = (ins & 2047) - 10; pl = D->lglabels + n; - /* Bkwd rel or global. */ - if (n >= 0) { CK(n>=10||*pl<0, RANGE_LG); CKPL(lg, LG); goto putrel; } - pl += 10; n = *pl; - if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */ - goto linkrel; - case DASM_REL_PC: - pl = D->pclabels + n; CKPL(pc, PC); - putrel: - n = *pl; - if (n < 0) { /* Label exists. Get label pos and store it. */ - b[pos] = -n; - } else { - linkrel: - b[pos] = n; /* Else link to rel chain, anchored at label. */ - *pl = pos; - } - pos++; - break; - case DASM_LABEL_LG: - pl = D->lglabels + (ins & 2047) - 10; CKPL(lg, LG); goto putlabel; - case DASM_LABEL_PC: - pl = D->pclabels + n; CKPL(pc, PC); - putlabel: - n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */ - while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos; - } - *pl = -pos; /* Label exists now. */ - b[pos++] = ofs; /* Store pass1 offset estimate. */ - break; - case DASM_IMM: -#ifdef DASM_CHECKS - CK((n & ((1<<((ins>>10)&31))-1)) == 0, RANGE_I); -#endif - n >>= ((ins>>10)&31); -#ifdef DASM_CHECKS - if (ins & 0x8000) - CK(((n + (1<<(((ins>>5)&31)-1)))>>((ins>>5)&31)) == 0, RANGE_I); - else - CK((n>>((ins>>5)&31)) == 0, RANGE_I); -#endif - b[pos++] = n; - break; - } - } - } -stop: - va_end(ap); - sec->pos = pos; - sec->ofs = ofs; -} -#undef CK - -/* Pass 2: Link sections, shrink aligns, fix label offsets. */ -int dasm_link(Dst_DECL, size_t *szp) -{ - dasm_State *D = Dst_REF; - int secnum; - int ofs = 0; - -#ifdef DASM_CHECKS - *szp = 0; - if (D->status != DASM_S_OK) return D->status; - { - int pc; - for (pc = 0; pc*sizeof(int) < D->pcsize; pc++) - if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc; - } -#endif - - { /* Handle globals not defined in this translation unit. */ - int idx; - for (idx = 20; idx*sizeof(int) < D->lgsize; idx++) { - int n = D->lglabels[idx]; - /* Undefined label: Collapse rel chain and replace with marker (< 0). */ - while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; } - } - } - - /* Combine all code sections. No support for data sections (yet). */ - for (secnum = 0; secnum < D->maxsection; secnum++) { - dasm_Section *sec = D->sections + secnum; - int *b = sec->rbuf; - int pos = DASM_SEC2POS(secnum); - int lastpos = sec->pos; - - while (pos != lastpos) { - dasm_ActList p = D->actionlist + b[pos++]; - while (1) { - unsigned int ins = *p++; - unsigned int action = (ins >> 16) - 0xff00; - switch (action) { - case DASM_STOP: case DASM_SECTION: goto stop; - case DASM_ESC: p++; break; - case DASM_REL_EXT: break; - case DASM_ALIGN: ofs -= (b[pos++] + ofs) & (ins & 255); break; - case DASM_REL_LG: case DASM_REL_PC: pos++; break; - case DASM_LABEL_LG: case DASM_LABEL_PC: b[pos++] += ofs; break; - case DASM_IMM: pos++; break; - } - } - stop: (void)0; - } - ofs += sec->ofs; /* Next section starts right after current section. */ - } - - D->codesize = ofs; /* Total size of all code sections */ - *szp = ofs; - return DASM_S_OK; -} - -#ifdef DASM_CHECKS -#define CK(x, st) \ - do { if (!(x)) return DASM_S_##st|(p-D->actionlist-1); } while (0) -#else -#define CK(x, st) ((void)0) -#endif - -/* Pass 3: Encode sections. */ -int dasm_encode(Dst_DECL, void *buffer) -{ - dasm_State *D = Dst_REF; - char *base = (char *)buffer; - unsigned int *cp = (unsigned int *)buffer; - int secnum; - - /* Encode all code sections. No support for data sections (yet). */ - for (secnum = 0; secnum < D->maxsection; secnum++) { - dasm_Section *sec = D->sections + secnum; - int *b = sec->buf; - int *endb = sec->rbuf + sec->pos; - - while (b != endb) { - dasm_ActList p = D->actionlist + *b++; - while (1) { - unsigned int ins = *p++; - unsigned int action = (ins >> 16) - 0xff00; - int n = (action >= DASM_ALIGN && action < DASM__MAX) ? *b++ : 0; - switch (action) { - case DASM_STOP: case DASM_SECTION: goto stop; - case DASM_ESC: *cp++ = *p++; break; - case DASM_REL_EXT: - n = DASM_EXTERN(Dst, (unsigned char *)cp, (ins & 2047), 1); - goto patchrel; - case DASM_ALIGN: - ins &= 255; while ((((char *)cp - base) & ins)) *cp++ = 0x60000000; - break; - case DASM_REL_LG: - CK(n >= 0, UNDEF_LG); - case DASM_REL_PC: - CK(n >= 0, UNDEF_PC); - n = *DASM_POS2PTR(D, n); - if (ins & 2048) - n = n - (int)((char *)cp - base); - else - n = (n + (int)base) & 0x0fffffff; - patchrel: - CK((n & 3) == 0 && - ((n + ((ins & 2048) ? 0x00020000 : 0)) >> - ((ins & 2048) ? 18 : 28)) == 0, RANGE_REL); - cp[-1] |= ((n>>2) & ((ins & 2048) ? 0x0000ffff: 0x03ffffff)); - break; - case DASM_LABEL_LG: - ins &= 2047; if (ins >= 20) D->globals[ins-10] = (void *)(base + n); - break; - case DASM_LABEL_PC: break; - case DASM_IMM: - cp[-1] |= (n & ((1<<((ins>>5)&31))-1)) << (ins&31); - break; - default: *cp++ = ins; break; - } - } - stop: (void)0; - } - } - - if (base + D->codesize != (char *)cp) /* Check for phase errors. */ - return DASM_S_PHASE; - return DASM_S_OK; -} -#undef CK - -/* Get PC label offset. */ -int dasm_getpclabel(Dst_DECL, unsigned int pc) -{ - dasm_State *D = Dst_REF; - if (pc*sizeof(int) < D->pcsize) { - int pos = D->pclabels[pc]; - if (pos < 0) return *DASM_POS2PTR(D, -pos); - if (pos > 0) return -1; /* Undefined. */ - } - return -2; /* Unused or out of range. */ -} - -#ifdef DASM_CHECKS -/* Optional sanity checker to call between isolated encoding steps. */ -int dasm_checkstep(Dst_DECL, int secmatch) -{ - dasm_State *D = Dst_REF; - if (D->status == DASM_S_OK) { - int i; - for (i = 1; i <= 9; i++) { - if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_LG|i; break; } - D->lglabels[i] = 0; - } - } - if (D->status == DASM_S_OK && secmatch >= 0 && - D->section != &D->sections[secmatch]) - D->status = DASM_S_MATCH_SEC|(D->section-D->sections); - return D->status; -} -#endif - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_mips.lua b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_mips.lua deleted file mode 100644 index ae0dbd7a9bc..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_mips.lua +++ /dev/null @@ -1,953 +0,0 @@ ------------------------------------------------------------------------------- --- DynASM MIPS module. --- --- Copyright (C) 2005-2015 Mike Pall. All rights reserved. --- See dynasm.lua for full copyright notice. ------------------------------------------------------------------------------- - --- Module information: -local _info = { - arch = "mips", - description = "DynASM MIPS module", - version = "1.3.0", - vernum = 10300, - release = "2012-01-23", - author = "Mike Pall", - license = "MIT", -} - --- Exported glue functions for the arch-specific module. -local _M = { _info = _info } - --- Cache library functions. -local type, tonumber, pairs, ipairs = type, tonumber, pairs, ipairs -local assert, setmetatable = assert, setmetatable -local _s = string -local sub, format, byte, char = _s.sub, _s.format, _s.byte, _s.char -local match, gmatch = _s.match, _s.gmatch -local concat, sort = table.concat, table.sort -local bit = bit or require("bit") -local band, shl, sar, tohex = bit.band, bit.lshift, bit.arshift, bit.tohex - --- Inherited tables and callbacks. -local g_opt, g_arch -local wline, werror, wfatal, wwarn - --- Action name list. --- CHECK: Keep this in sync with the C code! -local action_names = { - "STOP", "SECTION", "ESC", "REL_EXT", - "ALIGN", "REL_LG", "LABEL_LG", - "REL_PC", "LABEL_PC", "IMM", -} - --- Maximum number of section buffer positions for dasm_put(). --- CHECK: Keep this in sync with the C code! -local maxsecpos = 25 -- Keep this low, to avoid excessively long C lines. - --- Action name -> action number. -local map_action = {} -for n,name in ipairs(action_names) do - map_action[name] = n-1 -end - --- Action list buffer. -local actlist = {} - --- Argument list for next dasm_put(). Start with offset 0 into action list. -local actargs = { 0 } - --- Current number of section buffer positions for dasm_put(). -local secpos = 1 - ------------------------------------------------------------------------------- - --- Dump action names and numbers. -local function dumpactions(out) - out:write("DynASM encoding engine action codes:\n") - for n,name in ipairs(action_names) do - local num = map_action[name] - out:write(format(" %-10s %02X %d\n", name, num, num)) - end - out:write("\n") -end - --- Write action list buffer as a huge static C array. -local function writeactions(out, name) - local nn = #actlist - if nn == 0 then nn = 1; actlist[0] = map_action.STOP end - out:write("static const unsigned int ", name, "[", nn, "] = {\n") - for i = 1,nn-1 do - assert(out:write("0x", tohex(actlist[i]), ",\n")) - end - assert(out:write("0x", tohex(actlist[nn]), "\n};\n\n")) -end - ------------------------------------------------------------------------------- - --- Add word to action list. -local function wputxw(n) - assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range") - actlist[#actlist+1] = n -end - --- Add action to list with optional arg. Advance buffer pos, too. -local function waction(action, val, a, num) - local w = assert(map_action[action], "bad action name `"..action.."'") - wputxw(0xff000000 + w * 0x10000 + (val or 0)) - if a then actargs[#actargs+1] = a end - if a or num then secpos = secpos + (num or 1) end -end - --- Flush action list (intervening C code or buffer pos overflow). -local function wflush(term) - if #actlist == actargs[1] then return end -- Nothing to flush. - if not term then waction("STOP") end -- Terminate action list. - wline(format("dasm_put(Dst, %s);", concat(actargs, ", ")), true) - actargs = { #actlist } -- Actionlist offset is 1st arg to next dasm_put(). - secpos = 1 -- The actionlist offset occupies a buffer position, too. -end - --- Put escaped word. -local function wputw(n) - if n >= 0xff000000 then waction("ESC") end - wputxw(n) -end - --- Reserve position for word. -local function wpos() - local pos = #actlist+1 - actlist[pos] = "" - return pos -end - --- Store word to reserved position. -local function wputpos(pos, n) - assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range") - actlist[pos] = n -end - ------------------------------------------------------------------------------- - --- Global label name -> global label number. With auto assignment on 1st use. -local next_global = 20 -local map_global = setmetatable({}, { __index = function(t, name) - if not match(name, "^[%a_][%w_]*$") then werror("bad global label") end - local n = next_global - if n > 2047 then werror("too many global labels") end - next_global = n + 1 - t[name] = n - return n -end}) - --- Dump global labels. -local function dumpglobals(out, lvl) - local t = {} - for name, n in pairs(map_global) do t[n] = name end - out:write("Global labels:\n") - for i=20,next_global-1 do - out:write(format(" %s\n", t[i])) - end - out:write("\n") -end - --- Write global label enum. -local function writeglobals(out, prefix) - local t = {} - for name, n in pairs(map_global) do t[n] = name end - out:write("enum {\n") - for i=20,next_global-1 do - out:write(" ", prefix, t[i], ",\n") - end - out:write(" ", prefix, "_MAX\n};\n") -end - --- Write global label names. -local function writeglobalnames(out, name) - local t = {} - for name, n in pairs(map_global) do t[n] = name end - out:write("static const char *const ", name, "[] = {\n") - for i=20,next_global-1 do - out:write(" \"", t[i], "\",\n") - end - out:write(" (const char *)0\n};\n") -end - ------------------------------------------------------------------------------- - --- Extern label name -> extern label number. With auto assignment on 1st use. -local next_extern = 0 -local map_extern_ = {} -local map_extern = setmetatable({}, { __index = function(t, name) - -- No restrictions on the name for now. - local n = next_extern - if n > 2047 then werror("too many extern labels") end - next_extern = n + 1 - t[name] = n - map_extern_[n] = name - return n -end}) - --- Dump extern labels. -local function dumpexterns(out, lvl) - out:write("Extern labels:\n") - for i=0,next_extern-1 do - out:write(format(" %s\n", map_extern_[i])) - end - out:write("\n") -end - --- Write extern label names. -local function writeexternnames(out, name) - out:write("static const char *const ", name, "[] = {\n") - for i=0,next_extern-1 do - out:write(" \"", map_extern_[i], "\",\n") - end - out:write(" (const char *)0\n};\n") -end - ------------------------------------------------------------------------------- - --- Arch-specific maps. -local map_archdef = { sp="r29", ra="r31" } -- Ext. register name -> int. name. - -local map_type = {} -- Type name -> { ctype, reg } -local ctypenum = 0 -- Type number (for Dt... macros). - --- Reverse defines for registers. -function _M.revdef(s) - if s == "r29" then return "sp" - elseif s == "r31" then return "ra" end - return s -end - ------------------------------------------------------------------------------- - --- Template strings for MIPS instructions. -local map_op = { - -- First-level opcodes. - j_1 = "08000000J", - jal_1 = "0c000000J", - b_1 = "10000000B", - beqz_2 = "10000000SB", - beq_3 = "10000000STB", - bnez_2 = "14000000SB", - bne_3 = "14000000STB", - blez_2 = "18000000SB", - bgtz_2 = "1c000000SB", - addi_3 = "20000000TSI", - li_2 = "24000000TI", - addiu_3 = "24000000TSI", - slti_3 = "28000000TSI", - sltiu_3 = "2c000000TSI", - andi_3 = "30000000TSU", - lu_2 = "34000000TU", - ori_3 = "34000000TSU", - xori_3 = "38000000TSU", - lui_2 = "3c000000TU", - beqzl_2 = "50000000SB", - beql_3 = "50000000STB", - bnezl_2 = "54000000SB", - bnel_3 = "54000000STB", - blezl_2 = "58000000SB", - bgtzl_2 = "5c000000SB", - lb_2 = "80000000TO", - lh_2 = "84000000TO", - lwl_2 = "88000000TO", - lw_2 = "8c000000TO", - lbu_2 = "90000000TO", - lhu_2 = "94000000TO", - lwr_2 = "98000000TO", - sb_2 = "a0000000TO", - sh_2 = "a4000000TO", - swl_2 = "a8000000TO", - sw_2 = "ac000000TO", - swr_2 = "b8000000TO", - cache_2 = "bc000000NO", - ll_2 = "c0000000TO", - lwc1_2 = "c4000000HO", - pref_2 = "cc000000NO", - ldc1_2 = "d4000000HO", - sc_2 = "e0000000TO", - swc1_2 = "e4000000HO", - sdc1_2 = "f4000000HO", - - -- Opcode SPECIAL. - nop_0 = "00000000", - sll_3 = "00000000DTA", - movf_2 = "00000001DS", - movf_3 = "00000001DSC", - movt_2 = "00010001DS", - movt_3 = "00010001DSC", - srl_3 = "00000002DTA", - rotr_3 = "00200002DTA", - sra_3 = "00000003DTA", - sllv_3 = "00000004DTS", - srlv_3 = "00000006DTS", - rotrv_3 = "00000046DTS", - srav_3 = "00000007DTS", - jr_1 = "00000008S", - jalr_1 = "0000f809S", - jalr_2 = "00000009DS", - movz_3 = "0000000aDST", - movn_3 = "0000000bDST", - syscall_0 = "0000000c", - syscall_1 = "0000000cY", - break_0 = "0000000d", - break_1 = "0000000dY", - sync_0 = "0000000f", - mfhi_1 = "00000010D", - mthi_1 = "00000011S", - mflo_1 = "00000012D", - mtlo_1 = "00000013S", - mult_2 = "00000018ST", - multu_2 = "00000019ST", - div_2 = "0000001aST", - divu_2 = "0000001bST", - add_3 = "00000020DST", - move_2 = "00000021DS", - addu_3 = "00000021DST", - sub_3 = "00000022DST", - negu_2 = "00000023DT", - subu_3 = "00000023DST", - and_3 = "00000024DST", - or_3 = "00000025DST", - xor_3 = "00000026DST", - not_2 = "00000027DS", - nor_3 = "00000027DST", - slt_3 = "0000002aDST", - sltu_3 = "0000002bDST", - tge_2 = "00000030ST", - tge_3 = "00000030STZ", - tgeu_2 = "00000031ST", - tgeu_3 = "00000031STZ", - tlt_2 = "00000032ST", - tlt_3 = "00000032STZ", - tltu_2 = "00000033ST", - tltu_3 = "00000033STZ", - teq_2 = "00000034ST", - teq_3 = "00000034STZ", - tne_2 = "00000036ST", - tne_3 = "00000036STZ", - - -- Opcode REGIMM. - bltz_2 = "04000000SB", - bgez_2 = "04010000SB", - bltzl_2 = "04020000SB", - bgezl_2 = "04030000SB", - tgei_2 = "04080000SI", - tgeiu_2 = "04090000SI", - tlti_2 = "040a0000SI", - tltiu_2 = "040b0000SI", - teqi_2 = "040c0000SI", - tnei_2 = "040e0000SI", - bltzal_2 = "04100000SB", - bal_1 = "04110000B", - bgezal_2 = "04110000SB", - bltzall_2 = "04120000SB", - bgezall_2 = "04130000SB", - synci_1 = "041f0000O", - - -- Opcode SPECIAL2. - madd_2 = "70000000ST", - maddu_2 = "70000001ST", - mul_3 = "70000002DST", - msub_2 = "70000004ST", - msubu_2 = "70000005ST", - clz_2 = "70000020DS=", - clo_2 = "70000021DS=", - sdbbp_0 = "7000003f", - sdbbp_1 = "7000003fY", - - -- Opcode SPECIAL3. - ext_4 = "7c000000TSAM", -- Note: last arg is msbd = size-1 - ins_4 = "7c000004TSAM", -- Note: last arg is msb = pos+size-1 - wsbh_2 = "7c0000a0DT", - seb_2 = "7c000420DT", - seh_2 = "7c000620DT", - rdhwr_2 = "7c00003bTD", - - -- Opcode COP0. - mfc0_2 = "40000000TD", - mfc0_3 = "40000000TDW", - mtc0_2 = "40800000TD", - mtc0_3 = "40800000TDW", - rdpgpr_2 = "41400000DT", - di_0 = "41606000", - di_1 = "41606000T", - ei_0 = "41606020", - ei_1 = "41606020T", - wrpgpr_2 = "41c00000DT", - tlbr_0 = "42000001", - tlbwi_0 = "42000002", - tlbwr_0 = "42000006", - tlbp_0 = "42000008", - eret_0 = "42000018", - deret_0 = "4200001f", - wait_0 = "42000020", - - -- Opcode COP1. - mfc1_2 = "44000000TG", - cfc1_2 = "44400000TG", - mfhc1_2 = "44600000TG", - mtc1_2 = "44800000TG", - ctc1_2 = "44c00000TG", - mthc1_2 = "44e00000TG", - - bc1f_1 = "45000000B", - bc1f_2 = "45000000CB", - bc1t_1 = "45010000B", - bc1t_2 = "45010000CB", - bc1fl_1 = "45020000B", - bc1fl_2 = "45020000CB", - bc1tl_1 = "45030000B", - bc1tl_2 = "45030000CB", - - ["add.s_3"] = "46000000FGH", - ["sub.s_3"] = "46000001FGH", - ["mul.s_3"] = "46000002FGH", - ["div.s_3"] = "46000003FGH", - ["sqrt.s_2"] = "46000004FG", - ["abs.s_2"] = "46000005FG", - ["mov.s_2"] = "46000006FG", - ["neg.s_2"] = "46000007FG", - ["round.l.s_2"] = "46000008FG", - ["trunc.l.s_2"] = "46000009FG", - ["ceil.l.s_2"] = "4600000aFG", - ["floor.l.s_2"] = "4600000bFG", - ["round.w.s_2"] = "4600000cFG", - ["trunc.w.s_2"] = "4600000dFG", - ["ceil.w.s_2"] = "4600000eFG", - ["floor.w.s_2"] = "4600000fFG", - ["movf.s_2"] = "46000011FG", - ["movf.s_3"] = "46000011FGC", - ["movt.s_2"] = "46010011FG", - ["movt.s_3"] = "46010011FGC", - ["movz.s_3"] = "46000012FGT", - ["movn.s_3"] = "46000013FGT", - ["recip.s_2"] = "46000015FG", - ["rsqrt.s_2"] = "46000016FG", - ["cvt.d.s_2"] = "46000021FG", - ["cvt.w.s_2"] = "46000024FG", - ["cvt.l.s_2"] = "46000025FG", - ["cvt.ps.s_3"] = "46000026FGH", - ["c.f.s_2"] = "46000030GH", - ["c.f.s_3"] = "46000030VGH", - ["c.un.s_2"] = "46000031GH", - ["c.un.s_3"] = "46000031VGH", - ["c.eq.s_2"] = "46000032GH", - ["c.eq.s_3"] = "46000032VGH", - ["c.ueq.s_2"] = "46000033GH", - ["c.ueq.s_3"] = "46000033VGH", - ["c.olt.s_2"] = "46000034GH", - ["c.olt.s_3"] = "46000034VGH", - ["c.ult.s_2"] = "46000035GH", - ["c.ult.s_3"] = "46000035VGH", - ["c.ole.s_2"] = "46000036GH", - ["c.ole.s_3"] = "46000036VGH", - ["c.ule.s_2"] = "46000037GH", - ["c.ule.s_3"] = "46000037VGH", - ["c.sf.s_2"] = "46000038GH", - ["c.sf.s_3"] = "46000038VGH", - ["c.ngle.s_2"] = "46000039GH", - ["c.ngle.s_3"] = "46000039VGH", - ["c.seq.s_2"] = "4600003aGH", - ["c.seq.s_3"] = "4600003aVGH", - ["c.ngl.s_2"] = "4600003bGH", - ["c.ngl.s_3"] = "4600003bVGH", - ["c.lt.s_2"] = "4600003cGH", - ["c.lt.s_3"] = "4600003cVGH", - ["c.nge.s_2"] = "4600003dGH", - ["c.nge.s_3"] = "4600003dVGH", - ["c.le.s_2"] = "4600003eGH", - ["c.le.s_3"] = "4600003eVGH", - ["c.ngt.s_2"] = "4600003fGH", - ["c.ngt.s_3"] = "4600003fVGH", - - ["add.d_3"] = "46200000FGH", - ["sub.d_3"] = "46200001FGH", - ["mul.d_3"] = "46200002FGH", - ["div.d_3"] = "46200003FGH", - ["sqrt.d_2"] = "46200004FG", - ["abs.d_2"] = "46200005FG", - ["mov.d_2"] = "46200006FG", - ["neg.d_2"] = "46200007FG", - ["round.l.d_2"] = "46200008FG", - ["trunc.l.d_2"] = "46200009FG", - ["ceil.l.d_2"] = "4620000aFG", - ["floor.l.d_2"] = "4620000bFG", - ["round.w.d_2"] = "4620000cFG", - ["trunc.w.d_2"] = "4620000dFG", - ["ceil.w.d_2"] = "4620000eFG", - ["floor.w.d_2"] = "4620000fFG", - ["movf.d_2"] = "46200011FG", - ["movf.d_3"] = "46200011FGC", - ["movt.d_2"] = "46210011FG", - ["movt.d_3"] = "46210011FGC", - ["movz.d_3"] = "46200012FGT", - ["movn.d_3"] = "46200013FGT", - ["recip.d_2"] = "46200015FG", - ["rsqrt.d_2"] = "46200016FG", - ["cvt.s.d_2"] = "46200020FG", - ["cvt.w.d_2"] = "46200024FG", - ["cvt.l.d_2"] = "46200025FG", - ["c.f.d_2"] = "46200030GH", - ["c.f.d_3"] = "46200030VGH", - ["c.un.d_2"] = "46200031GH", - ["c.un.d_3"] = "46200031VGH", - ["c.eq.d_2"] = "46200032GH", - ["c.eq.d_3"] = "46200032VGH", - ["c.ueq.d_2"] = "46200033GH", - ["c.ueq.d_3"] = "46200033VGH", - ["c.olt.d_2"] = "46200034GH", - ["c.olt.d_3"] = "46200034VGH", - ["c.ult.d_2"] = "46200035GH", - ["c.ult.d_3"] = "46200035VGH", - ["c.ole.d_2"] = "46200036GH", - ["c.ole.d_3"] = "46200036VGH", - ["c.ule.d_2"] = "46200037GH", - ["c.ule.d_3"] = "46200037VGH", - ["c.sf.d_2"] = "46200038GH", - ["c.sf.d_3"] = "46200038VGH", - ["c.ngle.d_2"] = "46200039GH", - ["c.ngle.d_3"] = "46200039VGH", - ["c.seq.d_2"] = "4620003aGH", - ["c.seq.d_3"] = "4620003aVGH", - ["c.ngl.d_2"] = "4620003bGH", - ["c.ngl.d_3"] = "4620003bVGH", - ["c.lt.d_2"] = "4620003cGH", - ["c.lt.d_3"] = "4620003cVGH", - ["c.nge.d_2"] = "4620003dGH", - ["c.nge.d_3"] = "4620003dVGH", - ["c.le.d_2"] = "4620003eGH", - ["c.le.d_3"] = "4620003eVGH", - ["c.ngt.d_2"] = "4620003fGH", - ["c.ngt.d_3"] = "4620003fVGH", - - ["add.ps_3"] = "46c00000FGH", - ["sub.ps_3"] = "46c00001FGH", - ["mul.ps_3"] = "46c00002FGH", - ["abs.ps_2"] = "46c00005FG", - ["mov.ps_2"] = "46c00006FG", - ["neg.ps_2"] = "46c00007FG", - ["movf.ps_2"] = "46c00011FG", - ["movf.ps_3"] = "46c00011FGC", - ["movt.ps_2"] = "46c10011FG", - ["movt.ps_3"] = "46c10011FGC", - ["movz.ps_3"] = "46c00012FGT", - ["movn.ps_3"] = "46c00013FGT", - ["cvt.s.pu_2"] = "46c00020FG", - ["cvt.s.pl_2"] = "46c00028FG", - ["pll.ps_3"] = "46c0002cFGH", - ["plu.ps_3"] = "46c0002dFGH", - ["pul.ps_3"] = "46c0002eFGH", - ["puu.ps_3"] = "46c0002fFGH", - ["c.f.ps_2"] = "46c00030GH", - ["c.f.ps_3"] = "46c00030VGH", - ["c.un.ps_2"] = "46c00031GH", - ["c.un.ps_3"] = "46c00031VGH", - ["c.eq.ps_2"] = "46c00032GH", - ["c.eq.ps_3"] = "46c00032VGH", - ["c.ueq.ps_2"] = "46c00033GH", - ["c.ueq.ps_3"] = "46c00033VGH", - ["c.olt.ps_2"] = "46c00034GH", - ["c.olt.ps_3"] = "46c00034VGH", - ["c.ult.ps_2"] = "46c00035GH", - ["c.ult.ps_3"] = "46c00035VGH", - ["c.ole.ps_2"] = "46c00036GH", - ["c.ole.ps_3"] = "46c00036VGH", - ["c.ule.ps_2"] = "46c00037GH", - ["c.ule.ps_3"] = "46c00037VGH", - ["c.sf.ps_2"] = "46c00038GH", - ["c.sf.ps_3"] = "46c00038VGH", - ["c.ngle.ps_2"] = "46c00039GH", - ["c.ngle.ps_3"] = "46c00039VGH", - ["c.seq.ps_2"] = "46c0003aGH", - ["c.seq.ps_3"] = "46c0003aVGH", - ["c.ngl.ps_2"] = "46c0003bGH", - ["c.ngl.ps_3"] = "46c0003bVGH", - ["c.lt.ps_2"] = "46c0003cGH", - ["c.lt.ps_3"] = "46c0003cVGH", - ["c.nge.ps_2"] = "46c0003dGH", - ["c.nge.ps_3"] = "46c0003dVGH", - ["c.le.ps_2"] = "46c0003eGH", - ["c.le.ps_3"] = "46c0003eVGH", - ["c.ngt.ps_2"] = "46c0003fGH", - ["c.ngt.ps_3"] = "46c0003fVGH", - - ["cvt.s.w_2"] = "46800020FG", - ["cvt.d.w_2"] = "46800021FG", - - ["cvt.s.l_2"] = "46a00020FG", - ["cvt.d.l_2"] = "46a00021FG", - - -- Opcode COP1X. - lwxc1_2 = "4c000000FX", - ldxc1_2 = "4c000001FX", - luxc1_2 = "4c000005FX", - swxc1_2 = "4c000008FX", - sdxc1_2 = "4c000009FX", - suxc1_2 = "4c00000dFX", - prefx_2 = "4c00000fMX", - ["alnv.ps_4"] = "4c00001eFGHS", - ["madd.s_4"] = "4c000020FRGH", - ["madd.d_4"] = "4c000021FRGH", - ["madd.ps_4"] = "4c000026FRGH", - ["msub.s_4"] = "4c000028FRGH", - ["msub.d_4"] = "4c000029FRGH", - ["msub.ps_4"] = "4c00002eFRGH", - ["nmadd.s_4"] = "4c000030FRGH", - ["nmadd.d_4"] = "4c000031FRGH", - ["nmadd.ps_4"] = "4c000036FRGH", - ["nmsub.s_4"] = "4c000038FRGH", - ["nmsub.d_4"] = "4c000039FRGH", - ["nmsub.ps_4"] = "4c00003eFRGH", -} - ------------------------------------------------------------------------------- - -local function parse_gpr(expr) - local tname, ovreg = match(expr, "^([%w_]+):(r[1-3]?[0-9])$") - local tp = map_type[tname or expr] - if tp then - local reg = ovreg or tp.reg - if not reg then - werror("type `"..(tname or expr).."' needs a register override") - end - expr = reg - end - local r = match(expr, "^r([1-3]?[0-9])$") - if r then - r = tonumber(r) - if r <= 31 then return r, tp end - end - werror("bad register name `"..expr.."'") -end - -local function parse_fpr(expr) - local r = match(expr, "^f([1-3]?[0-9])$") - if r then - r = tonumber(r) - if r <= 31 then return r end - end - werror("bad register name `"..expr.."'") -end - -local function parse_imm(imm, bits, shift, scale, signed) - local n = tonumber(imm) - if n then - local m = sar(n, scale) - if shl(m, scale) == n then - if signed then - local s = sar(m, bits-1) - if s == 0 then return shl(m, shift) - elseif s == -1 then return shl(m + shl(1, bits), shift) end - else - if sar(m, bits) == 0 then return shl(m, shift) end - end - end - werror("out of range immediate `"..imm.."'") - elseif match(imm, "^[rf]([1-3]?[0-9])$") or - match(imm, "^([%w_]+):([rf][1-3]?[0-9])$") then - werror("expected immediate operand, got register") - else - waction("IMM", (signed and 32768 or 0)+scale*1024+bits*32+shift, imm) - return 0 - end -end - -local function parse_disp(disp) - local imm, reg = match(disp, "^(.*)%(([%w_:]+)%)$") - if imm then - local r = shl(parse_gpr(reg), 21) - local extname = match(imm, "^extern%s+(%S+)$") - if extname then - waction("REL_EXT", map_extern[extname], nil, 1) - return r - else - return r + parse_imm(imm, 16, 0, 0, true) - end - end - local reg, tailr = match(disp, "^([%w_:]+)%s*(.*)$") - if reg and tailr ~= "" then - local r, tp = parse_gpr(reg) - if tp then - waction("IMM", 32768+16*32, format(tp.ctypefmt, tailr)) - return shl(r, 21) - end - end - werror("bad displacement `"..disp.."'") -end - -local function parse_index(idx) - local rt, rs = match(idx, "^(.*)%(([%w_:]+)%)$") - if rt then - rt = parse_gpr(rt) - rs = parse_gpr(rs) - return shl(rt, 16) + shl(rs, 21) - end - werror("bad index `"..idx.."'") -end - -local function parse_label(label, def) - local prefix = sub(label, 1, 2) - -- =>label (pc label reference) - if prefix == "=>" then - return "PC", 0, sub(label, 3) - end - -- ->name (global label reference) - if prefix == "->" then - return "LG", map_global[sub(label, 3)] - end - if def then - -- [1-9] (local label definition) - if match(label, "^[1-9]$") then - return "LG", 10+tonumber(label) - end - else - -- [<>][1-9] (local label reference) - local dir, lnum = match(label, "^([<>])([1-9])$") - if dir then -- Fwd: 1-9, Bkwd: 11-19. - return "LG", lnum + (dir == ">" and 0 or 10) - end - -- extern label (extern label reference) - local extname = match(label, "^extern%s+(%S+)$") - if extname then - return "EXT", map_extern[extname] - end - end - werror("bad label `"..label.."'") -end - ------------------------------------------------------------------------------- - --- Handle opcodes defined with template strings. -map_op[".template__"] = function(params, template, nparams) - if not params then return sub(template, 9) end - local op = tonumber(sub(template, 1, 8), 16) - local n = 1 - - -- Limit number of section buffer positions used by a single dasm_put(). - -- A single opcode needs a maximum of 2 positions (ins/ext). - if secpos+2 > maxsecpos then wflush() end - local pos = wpos() - - -- Process each character. - for p in gmatch(sub(template, 9), ".") do - if p == "D" then - op = op + shl(parse_gpr(params[n]), 11); n = n + 1 - elseif p == "T" then - op = op + shl(parse_gpr(params[n]), 16); n = n + 1 - elseif p == "S" then - op = op + shl(parse_gpr(params[n]), 21); n = n + 1 - elseif p == "F" then - op = op + shl(parse_fpr(params[n]), 6); n = n + 1 - elseif p == "G" then - op = op + shl(parse_fpr(params[n]), 11); n = n + 1 - elseif p == "H" then - op = op + shl(parse_fpr(params[n]), 16); n = n + 1 - elseif p == "R" then - op = op + shl(parse_fpr(params[n]), 21); n = n + 1 - elseif p == "I" then - op = op + parse_imm(params[n], 16, 0, 0, true); n = n + 1 - elseif p == "U" then - op = op + parse_imm(params[n], 16, 0, 0, false); n = n + 1 - elseif p == "O" then - op = op + parse_disp(params[n]); n = n + 1 - elseif p == "X" then - op = op + parse_index(params[n]); n = n + 1 - elseif p == "B" or p == "J" then - local mode, n, s = parse_label(params[n], false) - if p == "B" then n = n + 2048 end - waction("REL_"..mode, n, s, 1) - n = n + 1 - elseif p == "A" then - op = op + parse_imm(params[n], 5, 6, 0, false); n = n + 1 - elseif p == "M" then - op = op + parse_imm(params[n], 5, 11, 0, false); n = n + 1 - elseif p == "N" then - op = op + parse_imm(params[n], 5, 16, 0, false); n = n + 1 - elseif p == "C" then - op = op + parse_imm(params[n], 3, 18, 0, false); n = n + 1 - elseif p == "V" then - op = op + parse_imm(params[n], 3, 8, 0, false); n = n + 1 - elseif p == "W" then - op = op + parse_imm(params[n], 3, 0, 0, false); n = n + 1 - elseif p == "Y" then - op = op + parse_imm(params[n], 20, 6, 0, false); n = n + 1 - elseif p == "Z" then - op = op + parse_imm(params[n], 10, 6, 0, false); n = n + 1 - elseif p == "=" then - op = op + shl(band(op, 0xf800), 5) -- Copy D to T for clz, clo. - else - assert(false) - end - end - wputpos(pos, op) -end - ------------------------------------------------------------------------------- - --- Pseudo-opcode to mark the position where the action list is to be emitted. -map_op[".actionlist_1"] = function(params) - if not params then return "cvar" end - local name = params[1] -- No syntax check. You get to keep the pieces. - wline(function(out) writeactions(out, name) end) -end - --- Pseudo-opcode to mark the position where the global enum is to be emitted. -map_op[".globals_1"] = function(params) - if not params then return "prefix" end - local prefix = params[1] -- No syntax check. You get to keep the pieces. - wline(function(out) writeglobals(out, prefix) end) -end - --- Pseudo-opcode to mark the position where the global names are to be emitted. -map_op[".globalnames_1"] = function(params) - if not params then return "cvar" end - local name = params[1] -- No syntax check. You get to keep the pieces. - wline(function(out) writeglobalnames(out, name) end) -end - --- Pseudo-opcode to mark the position where the extern names are to be emitted. -map_op[".externnames_1"] = function(params) - if not params then return "cvar" end - local name = params[1] -- No syntax check. You get to keep the pieces. - wline(function(out) writeexternnames(out, name) end) -end - ------------------------------------------------------------------------------- - --- Label pseudo-opcode (converted from trailing colon form). -map_op[".label_1"] = function(params) - if not params then return "[1-9] | ->global | =>pcexpr" end - if secpos+1 > maxsecpos then wflush() end - local mode, n, s = parse_label(params[1], true) - if mode == "EXT" then werror("bad label definition") end - waction("LABEL_"..mode, n, s, 1) -end - ------------------------------------------------------------------------------- - --- Pseudo-opcodes for data storage. -map_op[".long_*"] = function(params) - if not params then return "imm..." end - for _,p in ipairs(params) do - local n = tonumber(p) - if not n then werror("bad immediate `"..p.."'") end - if n < 0 then n = n + 2^32 end - wputw(n) - if secpos+2 > maxsecpos then wflush() end - end -end - --- Alignment pseudo-opcode. -map_op[".align_1"] = function(params) - if not params then return "numpow2" end - if secpos+1 > maxsecpos then wflush() end - local align = tonumber(params[1]) - if align then - local x = align - -- Must be a power of 2 in the range (2 ... 256). - for i=1,8 do - x = x / 2 - if x == 1 then - waction("ALIGN", align-1, nil, 1) -- Action byte is 2**n-1. - return - end - end - end - werror("bad alignment") -end - ------------------------------------------------------------------------------- - --- Pseudo-opcode for (primitive) type definitions (map to C types). -map_op[".type_3"] = function(params, nparams) - if not params then - return nparams == 2 and "name, ctype" or "name, ctype, reg" - end - local name, ctype, reg = params[1], params[2], params[3] - if not match(name, "^[%a_][%w_]*$") then - werror("bad type name `"..name.."'") - end - local tp = map_type[name] - if tp then - werror("duplicate type `"..name.."'") - end - -- Add #type to defines. A bit unclean to put it in map_archdef. - map_archdef["#"..name] = "sizeof("..ctype..")" - -- Add new type and emit shortcut define. - local num = ctypenum + 1 - map_type[name] = { - ctype = ctype, - ctypefmt = format("Dt%X(%%s)", num), - reg = reg, - } - wline(format("#define Dt%X(_V) (int)(ptrdiff_t)&(((%s *)0)_V)", num, ctype)) - ctypenum = num -end -map_op[".type_2"] = map_op[".type_3"] - --- Dump type definitions. -local function dumptypes(out, lvl) - local t = {} - for name in pairs(map_type) do t[#t+1] = name end - sort(t) - out:write("Type definitions:\n") - for _,name in ipairs(t) do - local tp = map_type[name] - local reg = tp.reg or "" - out:write(format(" %-20s %-20s %s\n", name, tp.ctype, reg)) - end - out:write("\n") -end - ------------------------------------------------------------------------------- - --- Set the current section. -function _M.section(num) - waction("SECTION", num) - wflush(true) -- SECTION is a terminal action. -end - ------------------------------------------------------------------------------- - --- Dump architecture description. -function _M.dumparch(out) - out:write(format("DynASM %s version %s, released %s\n\n", - _info.arch, _info.version, _info.release)) - dumpactions(out) -end - --- Dump all user defined elements. -function _M.dumpdef(out, lvl) - dumptypes(out, lvl) - dumpglobals(out, lvl) - dumpexterns(out, lvl) -end - ------------------------------------------------------------------------------- - --- Pass callbacks from/to the DynASM core. -function _M.passcb(wl, we, wf, ww) - wline, werror, wfatal, wwarn = wl, we, wf, ww - return wflush -end - --- Setup the arch-specific module. -function _M.setup(arch, opt) - g_arch, g_opt = arch, opt -end - --- Merge the core maps and the arch-specific maps. -function _M.mergemaps(map_coreop, map_def) - setmetatable(map_op, { __index = map_coreop }) - setmetatable(map_def, { __index = map_archdef }) - return map_op, map_def -end - -return _M - ------------------------------------------------------------------------------- - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_ppc.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_ppc.h deleted file mode 100644 index 332c64dc68a..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_ppc.h +++ /dev/null @@ -1,419 +0,0 @@ -/* -** DynASM PPC/PPC64 encoding engine. -** Copyright (C) 2005-2015 Mike Pall. All rights reserved. -** Released under the MIT license. See dynasm.lua for full copyright notice. -*/ - -#include -#include -#include -#include - -#define DASM_ARCH "ppc" - -#ifndef DASM_EXTERN -#define DASM_EXTERN(a,b,c,d) 0 -#endif - -/* Action definitions. */ -enum { - DASM_STOP, DASM_SECTION, DASM_ESC, DASM_REL_EXT, - /* The following actions need a buffer position. */ - DASM_ALIGN, DASM_REL_LG, DASM_LABEL_LG, - /* The following actions also have an argument. */ - DASM_REL_PC, DASM_LABEL_PC, DASM_IMM, DASM_IMMSH, - DASM__MAX -}; - -/* Maximum number of section buffer positions for a single dasm_put() call. */ -#define DASM_MAXSECPOS 25 - -/* DynASM encoder status codes. Action list offset or number are or'ed in. */ -#define DASM_S_OK 0x00000000 -#define DASM_S_NOMEM 0x01000000 -#define DASM_S_PHASE 0x02000000 -#define DASM_S_MATCH_SEC 0x03000000 -#define DASM_S_RANGE_I 0x11000000 -#define DASM_S_RANGE_SEC 0x12000000 -#define DASM_S_RANGE_LG 0x13000000 -#define DASM_S_RANGE_PC 0x14000000 -#define DASM_S_RANGE_REL 0x15000000 -#define DASM_S_UNDEF_LG 0x21000000 -#define DASM_S_UNDEF_PC 0x22000000 - -/* Macros to convert positions (8 bit section + 24 bit index). */ -#define DASM_POS2IDX(pos) ((pos)&0x00ffffff) -#define DASM_POS2BIAS(pos) ((pos)&0xff000000) -#define DASM_SEC2POS(sec) ((sec)<<24) -#define DASM_POS2SEC(pos) ((pos)>>24) -#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos)) - -/* Action list type. */ -typedef const unsigned int *dasm_ActList; - -/* Per-section structure. */ -typedef struct dasm_Section { - int *rbuf; /* Biased buffer pointer (negative section bias). */ - int *buf; /* True buffer pointer. */ - size_t bsize; /* Buffer size in bytes. */ - int pos; /* Biased buffer position. */ - int epos; /* End of biased buffer position - max single put. */ - int ofs; /* Byte offset into section. */ -} dasm_Section; - -/* Core structure holding the DynASM encoding state. */ -struct dasm_State { - size_t psize; /* Allocated size of this structure. */ - dasm_ActList actionlist; /* Current actionlist pointer. */ - int *lglabels; /* Local/global chain/pos ptrs. */ - size_t lgsize; - int *pclabels; /* PC label chains/pos ptrs. */ - size_t pcsize; - void **globals; /* Array of globals (bias -10). */ - dasm_Section *section; /* Pointer to active section. */ - size_t codesize; /* Total size of all code sections. */ - int maxsection; /* 0 <= sectionidx < maxsection. */ - int status; /* Status code. */ - dasm_Section sections[1]; /* All sections. Alloc-extended. */ -}; - -/* The size of the core structure depends on the max. number of sections. */ -#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section)) - - -/* Initialize DynASM state. */ -void dasm_init(Dst_DECL, int maxsection) -{ - dasm_State *D; - size_t psz = 0; - int i; - Dst_REF = NULL; - DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection)); - D = Dst_REF; - D->psize = psz; - D->lglabels = NULL; - D->lgsize = 0; - D->pclabels = NULL; - D->pcsize = 0; - D->globals = NULL; - D->maxsection = maxsection; - for (i = 0; i < maxsection; i++) { - D->sections[i].buf = NULL; /* Need this for pass3. */ - D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i); - D->sections[i].bsize = 0; - D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */ - } -} - -/* Free DynASM state. */ -void dasm_free(Dst_DECL) -{ - dasm_State *D = Dst_REF; - int i; - for (i = 0; i < D->maxsection; i++) - if (D->sections[i].buf) - DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize); - if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize); - if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize); - DASM_M_FREE(Dst, D, D->psize); -} - -/* Setup global label array. Must be called before dasm_setup(). */ -void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl) -{ - dasm_State *D = Dst_REF; - D->globals = gl - 10; /* Negative bias to compensate for locals. */ - DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int)); -} - -/* Grow PC label array. Can be called after dasm_setup(), too. */ -void dasm_growpc(Dst_DECL, unsigned int maxpc) -{ - dasm_State *D = Dst_REF; - size_t osz = D->pcsize; - DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int)); - memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz); -} - -/* Setup encoder. */ -void dasm_setup(Dst_DECL, const void *actionlist) -{ - dasm_State *D = Dst_REF; - int i; - D->actionlist = (dasm_ActList)actionlist; - D->status = DASM_S_OK; - D->section = &D->sections[0]; - memset((void *)D->lglabels, 0, D->lgsize); - if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize); - for (i = 0; i < D->maxsection; i++) { - D->sections[i].pos = DASM_SEC2POS(i); - D->sections[i].ofs = 0; - } -} - - -#ifdef DASM_CHECKS -#define CK(x, st) \ - do { if (!(x)) { \ - D->status = DASM_S_##st|(p-D->actionlist-1); return; } } while (0) -#define CKPL(kind, st) \ - do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \ - D->status = DASM_S_RANGE_##st|(p-D->actionlist-1); return; } } while (0) -#else -#define CK(x, st) ((void)0) -#define CKPL(kind, st) ((void)0) -#endif - -/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */ -void dasm_put(Dst_DECL, int start, ...) -{ - va_list ap; - dasm_State *D = Dst_REF; - dasm_ActList p = D->actionlist + start; - dasm_Section *sec = D->section; - int pos = sec->pos, ofs = sec->ofs; - int *b; - - if (pos >= sec->epos) { - DASM_M_GROW(Dst, int, sec->buf, sec->bsize, - sec->bsize + 2*DASM_MAXSECPOS*sizeof(int)); - sec->rbuf = sec->buf - DASM_POS2BIAS(pos); - sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos); - } - - b = sec->rbuf; - b[pos++] = start; - - va_start(ap, start); - while (1) { - unsigned int ins = *p++; - unsigned int action = (ins >> 16); - if (action >= DASM__MAX) { - ofs += 4; - } else { - int *pl, n = action >= DASM_REL_PC ? va_arg(ap, int) : 0; - switch (action) { - case DASM_STOP: goto stop; - case DASM_SECTION: - n = (ins & 255); CK(n < D->maxsection, RANGE_SEC); - D->section = &D->sections[n]; goto stop; - case DASM_ESC: p++; ofs += 4; break; - case DASM_REL_EXT: break; - case DASM_ALIGN: ofs += (ins & 255); b[pos++] = ofs; break; - case DASM_REL_LG: - n = (ins & 2047) - 10; pl = D->lglabels + n; - /* Bkwd rel or global. */ - if (n >= 0) { CK(n>=10||*pl<0, RANGE_LG); CKPL(lg, LG); goto putrel; } - pl += 10; n = *pl; - if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */ - goto linkrel; - case DASM_REL_PC: - pl = D->pclabels + n; CKPL(pc, PC); - putrel: - n = *pl; - if (n < 0) { /* Label exists. Get label pos and store it. */ - b[pos] = -n; - } else { - linkrel: - b[pos] = n; /* Else link to rel chain, anchored at label. */ - *pl = pos; - } - pos++; - break; - case DASM_LABEL_LG: - pl = D->lglabels + (ins & 2047) - 10; CKPL(lg, LG); goto putlabel; - case DASM_LABEL_PC: - pl = D->pclabels + n; CKPL(pc, PC); - putlabel: - n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */ - while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos; - } - *pl = -pos; /* Label exists now. */ - b[pos++] = ofs; /* Store pass1 offset estimate. */ - break; - case DASM_IMM: -#ifdef DASM_CHECKS - CK((n & ((1<<((ins>>10)&31))-1)) == 0, RANGE_I); -#endif - n >>= ((ins>>10)&31); -#ifdef DASM_CHECKS - if (ins & 0x8000) - CK(((n + (1<<(((ins>>5)&31)-1)))>>((ins>>5)&31)) == 0, RANGE_I); - else - CK((n>>((ins>>5)&31)) == 0, RANGE_I); -#endif - b[pos++] = n; - break; - case DASM_IMMSH: - CK((n >> 6) == 0, RANGE_I); - b[pos++] = n; - break; - } - } - } -stop: - va_end(ap); - sec->pos = pos; - sec->ofs = ofs; -} -#undef CK - -/* Pass 2: Link sections, shrink aligns, fix label offsets. */ -int dasm_link(Dst_DECL, size_t *szp) -{ - dasm_State *D = Dst_REF; - int secnum; - int ofs = 0; - -#ifdef DASM_CHECKS - *szp = 0; - if (D->status != DASM_S_OK) return D->status; - { - int pc; - for (pc = 0; pc*sizeof(int) < D->pcsize; pc++) - if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc; - } -#endif - - { /* Handle globals not defined in this translation unit. */ - int idx; - for (idx = 20; idx*sizeof(int) < D->lgsize; idx++) { - int n = D->lglabels[idx]; - /* Undefined label: Collapse rel chain and replace with marker (< 0). */ - while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; } - } - } - - /* Combine all code sections. No support for data sections (yet). */ - for (secnum = 0; secnum < D->maxsection; secnum++) { - dasm_Section *sec = D->sections + secnum; - int *b = sec->rbuf; - int pos = DASM_SEC2POS(secnum); - int lastpos = sec->pos; - - while (pos != lastpos) { - dasm_ActList p = D->actionlist + b[pos++]; - while (1) { - unsigned int ins = *p++; - unsigned int action = (ins >> 16); - switch (action) { - case DASM_STOP: case DASM_SECTION: goto stop; - case DASM_ESC: p++; break; - case DASM_REL_EXT: break; - case DASM_ALIGN: ofs -= (b[pos++] + ofs) & (ins & 255); break; - case DASM_REL_LG: case DASM_REL_PC: pos++; break; - case DASM_LABEL_LG: case DASM_LABEL_PC: b[pos++] += ofs; break; - case DASM_IMM: case DASM_IMMSH: pos++; break; - } - } - stop: (void)0; - } - ofs += sec->ofs; /* Next section starts right after current section. */ - } - - D->codesize = ofs; /* Total size of all code sections */ - *szp = ofs; - return DASM_S_OK; -} - -#ifdef DASM_CHECKS -#define CK(x, st) \ - do { if (!(x)) return DASM_S_##st|(p-D->actionlist-1); } while (0) -#else -#define CK(x, st) ((void)0) -#endif - -/* Pass 3: Encode sections. */ -int dasm_encode(Dst_DECL, void *buffer) -{ - dasm_State *D = Dst_REF; - char *base = (char *)buffer; - unsigned int *cp = (unsigned int *)buffer; - int secnum; - - /* Encode all code sections. No support for data sections (yet). */ - for (secnum = 0; secnum < D->maxsection; secnum++) { - dasm_Section *sec = D->sections + secnum; - int *b = sec->buf; - int *endb = sec->rbuf + sec->pos; - - while (b != endb) { - dasm_ActList p = D->actionlist + *b++; - while (1) { - unsigned int ins = *p++; - unsigned int action = (ins >> 16); - int n = (action >= DASM_ALIGN && action < DASM__MAX) ? *b++ : 0; - switch (action) { - case DASM_STOP: case DASM_SECTION: goto stop; - case DASM_ESC: *cp++ = *p++; break; - case DASM_REL_EXT: - n = DASM_EXTERN(Dst, (unsigned char *)cp, (ins & 2047), 1) - 4; - goto patchrel; - case DASM_ALIGN: - ins &= 255; while ((((char *)cp - base) & ins)) *cp++ = 0x60000000; - break; - case DASM_REL_LG: - CK(n >= 0, UNDEF_LG); - case DASM_REL_PC: - CK(n >= 0, UNDEF_PC); - n = *DASM_POS2PTR(D, n) - (int)((char *)cp - base); - patchrel: - CK((n & 3) == 0 && - (((n+4) + ((ins & 2048) ? 0x00008000 : 0x02000000)) >> - ((ins & 2048) ? 16 : 26)) == 0, RANGE_REL); - cp[-1] |= ((n+4) & ((ins & 2048) ? 0x0000fffc: 0x03fffffc)); - break; - case DASM_LABEL_LG: - ins &= 2047; if (ins >= 20) D->globals[ins-10] = (void *)(base + n); - break; - case DASM_LABEL_PC: break; - case DASM_IMM: - cp[-1] |= (n & ((1<<((ins>>5)&31))-1)) << (ins&31); - break; - case DASM_IMMSH: - cp[-1] |= (ins & 1) ? ((n&31)<<11)|((n&32)>>4) : ((n&31)<<6)|(n&32); - break; - default: *cp++ = ins; break; - } - } - stop: (void)0; - } - } - - if (base + D->codesize != (char *)cp) /* Check for phase errors. */ - return DASM_S_PHASE; - return DASM_S_OK; -} -#undef CK - -/* Get PC label offset. */ -int dasm_getpclabel(Dst_DECL, unsigned int pc) -{ - dasm_State *D = Dst_REF; - if (pc*sizeof(int) < D->pcsize) { - int pos = D->pclabels[pc]; - if (pos < 0) return *DASM_POS2PTR(D, -pos); - if (pos > 0) return -1; /* Undefined. */ - } - return -2; /* Unused or out of range. */ -} - -#ifdef DASM_CHECKS -/* Optional sanity checker to call between isolated encoding steps. */ -int dasm_checkstep(Dst_DECL, int secmatch) -{ - dasm_State *D = Dst_REF; - if (D->status == DASM_S_OK) { - int i; - for (i = 1; i <= 9; i++) { - if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_LG|i; break; } - D->lglabels[i] = 0; - } - } - if (D->status == DASM_S_OK && secmatch >= 0 && - D->section != &D->sections[secmatch]) - D->status = DASM_S_MATCH_SEC|(D->section-D->sections); - return D->status; -} -#endif - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_ppc.lua b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_ppc.lua deleted file mode 100644 index 278f09526dd..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_ppc.lua +++ /dev/null @@ -1,1919 +0,0 @@ ------------------------------------------------------------------------------- --- DynASM PPC/PPC64 module. --- --- Copyright (C) 2005-2015 Mike Pall. All rights reserved. --- See dynasm.lua for full copyright notice. --- --- Support for various extensions contributed by Caio Souza Oliveira. ------------------------------------------------------------------------------- - --- Module information: -local _info = { - arch = "ppc", - description = "DynASM PPC module", - version = "1.3.0", - vernum = 10300, - release = "2015-01-14", - author = "Mike Pall", - license = "MIT", -} - --- Exported glue functions for the arch-specific module. -local _M = { _info = _info } - --- Cache library functions. -local type, tonumber, pairs, ipairs = type, tonumber, pairs, ipairs -local assert, setmetatable = assert, setmetatable -local _s = string -local sub, format, byte, char = _s.sub, _s.format, _s.byte, _s.char -local match, gmatch = _s.match, _s.gmatch -local concat, sort = table.concat, table.sort -local bit = bit or require("bit") -local band, shl, shr, sar = bit.band, bit.lshift, bit.rshift, bit.arshift -local tohex = bit.tohex - --- Inherited tables and callbacks. -local g_opt, g_arch -local wline, werror, wfatal, wwarn - --- Action name list. --- CHECK: Keep this in sync with the C code! -local action_names = { - "STOP", "SECTION", "ESC", "REL_EXT", - "ALIGN", "REL_LG", "LABEL_LG", - "REL_PC", "LABEL_PC", "IMM", "IMMSH" -} - --- Maximum number of section buffer positions for dasm_put(). --- CHECK: Keep this in sync with the C code! -local maxsecpos = 25 -- Keep this low, to avoid excessively long C lines. - --- Action name -> action number. -local map_action = {} -for n,name in ipairs(action_names) do - map_action[name] = n-1 -end - --- Action list buffer. -local actlist = {} - --- Argument list for next dasm_put(). Start with offset 0 into action list. -local actargs = { 0 } - --- Current number of section buffer positions for dasm_put(). -local secpos = 1 - ------------------------------------------------------------------------------- - --- Dump action names and numbers. -local function dumpactions(out) - out:write("DynASM encoding engine action codes:\n") - for n,name in ipairs(action_names) do - local num = map_action[name] - out:write(format(" %-10s %02X %d\n", name, num, num)) - end - out:write("\n") -end - --- Write action list buffer as a huge static C array. -local function writeactions(out, name) - local nn = #actlist - if nn == 0 then nn = 1; actlist[0] = map_action.STOP end - out:write("static const unsigned int ", name, "[", nn, "] = {\n") - for i = 1,nn-1 do - assert(out:write("0x", tohex(actlist[i]), ",\n")) - end - assert(out:write("0x", tohex(actlist[nn]), "\n};\n\n")) -end - ------------------------------------------------------------------------------- - --- Add word to action list. -local function wputxw(n) - assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range") - actlist[#actlist+1] = n -end - --- Add action to list with optional arg. Advance buffer pos, too. -local function waction(action, val, a, num) - local w = assert(map_action[action], "bad action name `"..action.."'") - wputxw(w * 0x10000 + (val or 0)) - if a then actargs[#actargs+1] = a end - if a or num then secpos = secpos + (num or 1) end -end - --- Flush action list (intervening C code or buffer pos overflow). -local function wflush(term) - if #actlist == actargs[1] then return end -- Nothing to flush. - if not term then waction("STOP") end -- Terminate action list. - wline(format("dasm_put(Dst, %s);", concat(actargs, ", ")), true) - actargs = { #actlist } -- Actionlist offset is 1st arg to next dasm_put(). - secpos = 1 -- The actionlist offset occupies a buffer position, too. -end - --- Put escaped word. -local function wputw(n) - if n <= 0xffffff then waction("ESC") end - wputxw(n) -end - --- Reserve position for word. -local function wpos() - local pos = #actlist+1 - actlist[pos] = "" - return pos -end - --- Store word to reserved position. -local function wputpos(pos, n) - assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range") - actlist[pos] = n -end - ------------------------------------------------------------------------------- - --- Global label name -> global label number. With auto assignment on 1st use. -local next_global = 20 -local map_global = setmetatable({}, { __index = function(t, name) - if not match(name, "^[%a_][%w_]*$") then werror("bad global label") end - local n = next_global - if n > 2047 then werror("too many global labels") end - next_global = n + 1 - t[name] = n - return n -end}) - --- Dump global labels. -local function dumpglobals(out, lvl) - local t = {} - for name, n in pairs(map_global) do t[n] = name end - out:write("Global labels:\n") - for i=20,next_global-1 do - out:write(format(" %s\n", t[i])) - end - out:write("\n") -end - --- Write global label enum. -local function writeglobals(out, prefix) - local t = {} - for name, n in pairs(map_global) do t[n] = name end - out:write("enum {\n") - for i=20,next_global-1 do - out:write(" ", prefix, t[i], ",\n") - end - out:write(" ", prefix, "_MAX\n};\n") -end - --- Write global label names. -local function writeglobalnames(out, name) - local t = {} - for name, n in pairs(map_global) do t[n] = name end - out:write("static const char *const ", name, "[] = {\n") - for i=20,next_global-1 do - out:write(" \"", t[i], "\",\n") - end - out:write(" (const char *)0\n};\n") -end - ------------------------------------------------------------------------------- - --- Extern label name -> extern label number. With auto assignment on 1st use. -local next_extern = 0 -local map_extern_ = {} -local map_extern = setmetatable({}, { __index = function(t, name) - -- No restrictions on the name for now. - local n = next_extern - if n > 2047 then werror("too many extern labels") end - next_extern = n + 1 - t[name] = n - map_extern_[n] = name - return n -end}) - --- Dump extern labels. -local function dumpexterns(out, lvl) - out:write("Extern labels:\n") - for i=0,next_extern-1 do - out:write(format(" %s\n", map_extern_[i])) - end - out:write("\n") -end - --- Write extern label names. -local function writeexternnames(out, name) - out:write("static const char *const ", name, "[] = {\n") - for i=0,next_extern-1 do - out:write(" \"", map_extern_[i], "\",\n") - end - out:write(" (const char *)0\n};\n") -end - ------------------------------------------------------------------------------- - --- Arch-specific maps. -local map_archdef = { sp = "r1" } -- Ext. register name -> int. name. - -local map_type = {} -- Type name -> { ctype, reg } -local ctypenum = 0 -- Type number (for Dt... macros). - --- Reverse defines for registers. -function _M.revdef(s) - if s == "r1" then return "sp" end - return s -end - -local map_cond = { - lt = 0, gt = 1, eq = 2, so = 3, - ge = 4, le = 5, ne = 6, ns = 7, -} - ------------------------------------------------------------------------------- - -local map_op, op_template - -local function op_alias(opname, f) - return function(params, nparams) - if not params then return "-> "..opname:sub(1, -3) end - f(params, nparams) - op_template(params, map_op[opname], nparams) - end -end - --- Template strings for PPC instructions. -map_op = { - tdi_3 = "08000000ARI", - twi_3 = "0c000000ARI", - mulli_3 = "1c000000RRI", - subfic_3 = "20000000RRI", - cmplwi_3 = "28000000XRU", - cmplwi_2 = "28000000-RU", - cmpldi_3 = "28200000XRU", - cmpldi_2 = "28200000-RU", - cmpwi_3 = "2c000000XRI", - cmpwi_2 = "2c000000-RI", - cmpdi_3 = "2c200000XRI", - cmpdi_2 = "2c200000-RI", - addic_3 = "30000000RRI", - ["addic._3"] = "34000000RRI", - addi_3 = "38000000RR0I", - li_2 = "38000000RI", - la_2 = "38000000RD", - addis_3 = "3c000000RR0I", - lis_2 = "3c000000RI", - lus_2 = "3c000000RU", - bc_3 = "40000000AAK", - bcl_3 = "40000001AAK", - bdnz_1 = "42000000K", - bdz_1 = "42400000K", - sc_0 = "44000000", - b_1 = "48000000J", - bl_1 = "48000001J", - rlwimi_5 = "50000000RR~AAA.", - rlwinm_5 = "54000000RR~AAA.", - rlwnm_5 = "5c000000RR~RAA.", - ori_3 = "60000000RR~U", - nop_0 = "60000000", - oris_3 = "64000000RR~U", - xori_3 = "68000000RR~U", - xoris_3 = "6c000000RR~U", - ["andi._3"] = "70000000RR~U", - ["andis._3"] = "74000000RR~U", - lwz_2 = "80000000RD", - lwzu_2 = "84000000RD", - lbz_2 = "88000000RD", - lbzu_2 = "8c000000RD", - stw_2 = "90000000RD", - stwu_2 = "94000000RD", - stb_2 = "98000000RD", - stbu_2 = "9c000000RD", - lhz_2 = "a0000000RD", - lhzu_2 = "a4000000RD", - lha_2 = "a8000000RD", - lhau_2 = "ac000000RD", - sth_2 = "b0000000RD", - sthu_2 = "b4000000RD", - lmw_2 = "b8000000RD", - stmw_2 = "bc000000RD", - lfs_2 = "c0000000FD", - lfsu_2 = "c4000000FD", - lfd_2 = "c8000000FD", - lfdu_2 = "cc000000FD", - stfs_2 = "d0000000FD", - stfsu_2 = "d4000000FD", - stfd_2 = "d8000000FD", - stfdu_2 = "dc000000FD", - ld_2 = "e8000000RD", -- NYI: displacement must be divisible by 4. - ldu_2 = "e8000001RD", - lwa_2 = "e8000002RD", - std_2 = "f8000000RD", - stdu_2 = "f8000001RD", - - subi_3 = op_alias("addi_3", function(p) p[3] = "-("..p[3]..")" end), - subis_3 = op_alias("addis_3", function(p) p[3] = "-("..p[3]..")" end), - subic_3 = op_alias("addic_3", function(p) p[3] = "-("..p[3]..")" end), - ["subic._3"] = op_alias("addic._3", function(p) p[3] = "-("..p[3]..")" end), - - rotlwi_3 = op_alias("rlwinm_5", function(p) - p[4] = "0"; p[5] = "31" - end), - rotrwi_3 = op_alias("rlwinm_5", function(p) - p[3] = "32-("..p[3]..")"; p[4] = "0"; p[5] = "31" - end), - rotlw_3 = op_alias("rlwnm_5", function(p) - p[4] = "0"; p[5] = "31" - end), - slwi_3 = op_alias("rlwinm_5", function(p) - p[5] = "31-("..p[3]..")"; p[4] = "0" - end), - srwi_3 = op_alias("rlwinm_5", function(p) - p[4] = p[3]; p[3] = "32-("..p[3]..")"; p[5] = "31" - end), - clrlwi_3 = op_alias("rlwinm_5", function(p) - p[4] = p[3]; p[3] = "0"; p[5] = "31" - end), - clrrwi_3 = op_alias("rlwinm_5", function(p) - p[5] = "31-("..p[3]..")"; p[3] = "0"; p[4] = "0" - end), - - -- Primary opcode 4: - mulhhwu_3 = "10000010RRR.", - machhwu_3 = "10000018RRR.", - mulhhw_3 = "10000050RRR.", - nmachhw_3 = "1000005cRRR.", - machhwsu_3 = "10000098RRR.", - machhws_3 = "100000d8RRR.", - nmachhws_3 = "100000dcRRR.", - mulchwu_3 = "10000110RRR.", - macchwu_3 = "10000118RRR.", - mulchw_3 = "10000150RRR.", - macchw_3 = "10000158RRR.", - nmacchw_3 = "1000015cRRR.", - macchwsu_3 = "10000198RRR.", - macchws_3 = "100001d8RRR.", - nmacchws_3 = "100001dcRRR.", - mullhw_3 = "10000350RRR.", - maclhw_3 = "10000358RRR.", - nmaclhw_3 = "1000035cRRR.", - maclhwsu_3 = "10000398RRR.", - maclhws_3 = "100003d8RRR.", - nmaclhws_3 = "100003dcRRR.", - machhwuo_3 = "10000418RRR.", - nmachhwo_3 = "1000045cRRR.", - machhwsuo_3 = "10000498RRR.", - machhwso_3 = "100004d8RRR.", - nmachhwso_3 = "100004dcRRR.", - macchwuo_3 = "10000518RRR.", - macchwo_3 = "10000558RRR.", - nmacchwo_3 = "1000055cRRR.", - macchwsuo_3 = "10000598RRR.", - macchwso_3 = "100005d8RRR.", - nmacchwso_3 = "100005dcRRR.", - maclhwo_3 = "10000758RRR.", - nmaclhwo_3 = "1000075cRRR.", - maclhwsuo_3 = "10000798RRR.", - maclhwso_3 = "100007d8RRR.", - nmaclhwso_3 = "100007dcRRR.", - - vaddubm_3 = "10000000VVV", - vmaxub_3 = "10000002VVV", - vrlb_3 = "10000004VVV", - vcmpequb_3 = "10000006VVV", - vmuloub_3 = "10000008VVV", - vaddfp_3 = "1000000aVVV", - vmrghb_3 = "1000000cVVV", - vpkuhum_3 = "1000000eVVV", - vmhaddshs_4 = "10000020VVVV", - vmhraddshs_4 = "10000021VVVV", - vmladduhm_4 = "10000022VVVV", - vmsumubm_4 = "10000024VVVV", - vmsummbm_4 = "10000025VVVV", - vmsumuhm_4 = "10000026VVVV", - vmsumuhs_4 = "10000027VVVV", - vmsumshm_4 = "10000028VVVV", - vmsumshs_4 = "10000029VVVV", - vsel_4 = "1000002aVVVV", - vperm_4 = "1000002bVVVV", - vsldoi_4 = "1000002cVVVP", - vpermxor_4 = "1000002dVVVV", - vmaddfp_4 = "1000002eVVVV~", - vnmsubfp_4 = "1000002fVVVV~", - vaddeuqm_4 = "1000003cVVVV", - vaddecuq_4 = "1000003dVVVV", - vsubeuqm_4 = "1000003eVVVV", - vsubecuq_4 = "1000003fVVVV", - vadduhm_3 = "10000040VVV", - vmaxuh_3 = "10000042VVV", - vrlh_3 = "10000044VVV", - vcmpequh_3 = "10000046VVV", - vmulouh_3 = "10000048VVV", - vsubfp_3 = "1000004aVVV", - vmrghh_3 = "1000004cVVV", - vpkuwum_3 = "1000004eVVV", - vadduwm_3 = "10000080VVV", - vmaxuw_3 = "10000082VVV", - vrlw_3 = "10000084VVV", - vcmpequw_3 = "10000086VVV", - vmulouw_3 = "10000088VVV", - vmuluwm_3 = "10000089VVV", - vmrghw_3 = "1000008cVVV", - vpkuhus_3 = "1000008eVVV", - vaddudm_3 = "100000c0VVV", - vmaxud_3 = "100000c2VVV", - vrld_3 = "100000c4VVV", - vcmpeqfp_3 = "100000c6VVV", - vcmpequd_3 = "100000c7VVV", - vpkuwus_3 = "100000ceVVV", - vadduqm_3 = "10000100VVV", - vmaxsb_3 = "10000102VVV", - vslb_3 = "10000104VVV", - vmulosb_3 = "10000108VVV", - vrefp_2 = "1000010aV-V", - vmrglb_3 = "1000010cVVV", - vpkshus_3 = "1000010eVVV", - vaddcuq_3 = "10000140VVV", - vmaxsh_3 = "10000142VVV", - vslh_3 = "10000144VVV", - vmulosh_3 = "10000148VVV", - vrsqrtefp_2 = "1000014aV-V", - vmrglh_3 = "1000014cVVV", - vpkswus_3 = "1000014eVVV", - vaddcuw_3 = "10000180VVV", - vmaxsw_3 = "10000182VVV", - vslw_3 = "10000184VVV", - vmulosw_3 = "10000188VVV", - vexptefp_2 = "1000018aV-V", - vmrglw_3 = "1000018cVVV", - vpkshss_3 = "1000018eVVV", - vmaxsd_3 = "100001c2VVV", - vsl_3 = "100001c4VVV", - vcmpgefp_3 = "100001c6VVV", - vlogefp_2 = "100001caV-V", - vpkswss_3 = "100001ceVVV", - vadduhs_3 = "10000240VVV", - vminuh_3 = "10000242VVV", - vsrh_3 = "10000244VVV", - vcmpgtuh_3 = "10000246VVV", - vmuleuh_3 = "10000248VVV", - vrfiz_2 = "1000024aV-V", - vsplth_3 = "1000024cVV3", - vupkhsh_2 = "1000024eV-V", - vminuw_3 = "10000282VVV", - vminud_3 = "100002c2VVV", - vcmpgtud_3 = "100002c7VVV", - vrfim_2 = "100002caV-V", - vcmpgtsb_3 = "10000306VVV", - vcfux_3 = "1000030aVVA~", - vaddshs_3 = "10000340VVV", - vminsh_3 = "10000342VVV", - vsrah_3 = "10000344VVV", - vcmpgtsh_3 = "10000346VVV", - vmulesh_3 = "10000348VVV", - vcfsx_3 = "1000034aVVA~", - vspltish_2 = "1000034cVS", - vupkhpx_2 = "1000034eV-V", - vaddsws_3 = "10000380VVV", - vminsw_3 = "10000382VVV", - vsraw_3 = "10000384VVV", - vcmpgtsw_3 = "10000386VVV", - vmulesw_3 = "10000388VVV", - vctuxs_3 = "1000038aVVA~", - vspltisw_2 = "1000038cVS", - vminsd_3 = "100003c2VVV", - vsrad_3 = "100003c4VVV", - vcmpbfp_3 = "100003c6VVV", - vcmpgtsd_3 = "100003c7VVV", - vctsxs_3 = "100003caVVA~", - vupklpx_2 = "100003ceV-V", - vsububm_3 = "10000400VVV", - ["bcdadd._4"] = "10000401VVVy.", - vavgub_3 = "10000402VVV", - vand_3 = "10000404VVV", - ["vcmpequb._3"] = "10000406VVV", - vmaxfp_3 = "1000040aVVV", - vsubuhm_3 = "10000440VVV", - ["bcdsub._4"] = "10000441VVVy.", - vavguh_3 = "10000442VVV", - vandc_3 = "10000444VVV", - ["vcmpequh._3"] = "10000446VVV", - vminfp_3 = "1000044aVVV", - vpkudum_3 = "1000044eVVV", - vsubuwm_3 = "10000480VVV", - vavguw_3 = "10000482VVV", - vor_3 = "10000484VVV", - ["vcmpequw._3"] = "10000486VVV", - vpmsumw_3 = "10000488VVV", - ["vcmpeqfp._3"] = "100004c6VVV", - ["vcmpequd._3"] = "100004c7VVV", - vpkudus_3 = "100004ceVVV", - vavgsb_3 = "10000502VVV", - vavgsh_3 = "10000542VVV", - vorc_3 = "10000544VVV", - vbpermq_3 = "1000054cVVV", - vpksdus_3 = "1000054eVVV", - vavgsw_3 = "10000582VVV", - vsld_3 = "100005c4VVV", - ["vcmpgefp._3"] = "100005c6VVV", - vpksdss_3 = "100005ceVVV", - vsububs_3 = "10000600VVV", - mfvscr_1 = "10000604V--", - vsum4ubs_3 = "10000608VVV", - vsubuhs_3 = "10000640VVV", - mtvscr_1 = "10000644--V", - ["vcmpgtuh._3"] = "10000646VVV", - vsum4shs_3 = "10000648VVV", - vupkhsw_2 = "1000064eV-V", - vsubuws_3 = "10000680VVV", - vshasigmaw_4 = "10000682VVYp", - veqv_3 = "10000684VVV", - vsum2sws_3 = "10000688VVV", - vmrgow_3 = "1000068cVVV", - vshasigmad_4 = "100006c2VVYp", - vsrd_3 = "100006c4VVV", - ["vcmpgtud._3"] = "100006c7VVV", - vupklsw_2 = "100006ceV-V", - vupkslw_2 = "100006ceV-V", - vsubsbs_3 = "10000700VVV", - vclzb_2 = "10000702V-V", - vpopcntb_2 = "10000703V-V", - ["vcmpgtsb._3"] = "10000706VVV", - vsum4sbs_3 = "10000708VVV", - vsubshs_3 = "10000740VVV", - vclzh_2 = "10000742V-V", - vpopcnth_2 = "10000743V-V", - ["vcmpgtsh._3"] = "10000746VVV", - vsubsws_3 = "10000780VVV", - vclzw_2 = "10000782V-V", - vpopcntw_2 = "10000783V-V", - ["vcmpgtsw._3"] = "10000786VVV", - vsumsws_3 = "10000788VVV", - vmrgew_3 = "1000078cVVV", - vclzd_2 = "100007c2V-V", - vpopcntd_2 = "100007c3V-V", - ["vcmpbfp._3"] = "100007c6VVV", - ["vcmpgtsd._3"] = "100007c7VVV", - - -- Primary opcode 19: - mcrf_2 = "4c000000XX", - isync_0 = "4c00012c", - crnor_3 = "4c000042CCC", - crnot_2 = "4c000042CC=", - crandc_3 = "4c000102CCC", - crxor_3 = "4c000182CCC", - crclr_1 = "4c000182C==", - crnand_3 = "4c0001c2CCC", - crand_3 = "4c000202CCC", - creqv_3 = "4c000242CCC", - crset_1 = "4c000242C==", - crorc_3 = "4c000342CCC", - cror_3 = "4c000382CCC", - crmove_2 = "4c000382CC=", - bclr_2 = "4c000020AA", - bclrl_2 = "4c000021AA", - bcctr_2 = "4c000420AA", - bcctrl_2 = "4c000421AA", - bctar_2 = "4c000460AA", - bctarl_2 = "4c000461AA", - blr_0 = "4e800020", - blrl_0 = "4e800021", - bctr_0 = "4e800420", - bctrl_0 = "4e800421", - - -- Primary opcode 31: - cmpw_3 = "7c000000XRR", - cmpw_2 = "7c000000-RR", - cmpd_3 = "7c200000XRR", - cmpd_2 = "7c200000-RR", - tw_3 = "7c000008ARR", - lvsl_3 = "7c00000cVRR", - subfc_3 = "7c000010RRR.", - subc_3 = "7c000010RRR~.", - mulhdu_3 = "7c000012RRR.", - addc_3 = "7c000014RRR.", - mulhwu_3 = "7c000016RRR.", - isel_4 = "7c00001eRRRC", - isellt_3 = "7c00001eRRR", - iselgt_3 = "7c00005eRRR", - iseleq_3 = "7c00009eRRR", - mfcr_1 = "7c000026R", - mfocrf_2 = "7c100026RG", - mtcrf_2 = "7c000120GR", - mtocrf_2 = "7c100120GR", - lwarx_3 = "7c000028RR0R", - ldx_3 = "7c00002aRR0R", - lwzx_3 = "7c00002eRR0R", - slw_3 = "7c000030RR~R.", - cntlzw_2 = "7c000034RR~", - sld_3 = "7c000036RR~R.", - and_3 = "7c000038RR~R.", - cmplw_3 = "7c000040XRR", - cmplw_2 = "7c000040-RR", - cmpld_3 = "7c200040XRR", - cmpld_2 = "7c200040-RR", - lvsr_3 = "7c00004cVRR", - subf_3 = "7c000050RRR.", - sub_3 = "7c000050RRR~.", - lbarx_3 = "7c000068RR0R", - ldux_3 = "7c00006aRR0R", - dcbst_2 = "7c00006c-RR", - lwzux_3 = "7c00006eRR0R", - cntlzd_2 = "7c000074RR~", - andc_3 = "7c000078RR~R.", - td_3 = "7c000088ARR", - lvewx_3 = "7c00008eVRR", - mulhd_3 = "7c000092RRR.", - addg6s_3 = "7c000094RRR", - mulhw_3 = "7c000096RRR.", - dlmzb_3 = "7c00009cRR~R.", - ldarx_3 = "7c0000a8RR0R", - dcbf_2 = "7c0000ac-RR", - lbzx_3 = "7c0000aeRR0R", - lvx_3 = "7c0000ceVRR", - neg_2 = "7c0000d0RR.", - lharx_3 = "7c0000e8RR0R", - lbzux_3 = "7c0000eeRR0R", - popcntb_2 = "7c0000f4RR~", - not_2 = "7c0000f8RR~%.", - nor_3 = "7c0000f8RR~R.", - stvebx_3 = "7c00010eVRR", - subfe_3 = "7c000110RRR.", - sube_3 = "7c000110RRR~.", - adde_3 = "7c000114RRR.", - stdx_3 = "7c00012aRR0R", - ["stwcx._3"] = "7c00012dRR0R.", - stwx_3 = "7c00012eRR0R", - prtyw_2 = "7c000134RR~", - stvehx_3 = "7c00014eVRR", - stdux_3 = "7c00016aRR0R", - ["stqcx._3"] = "7c00016dR:R0R.", - stwux_3 = "7c00016eRR0R", - prtyd_2 = "7c000174RR~", - stvewx_3 = "7c00018eVRR", - subfze_2 = "7c000190RR.", - addze_2 = "7c000194RR.", - ["stdcx._3"] = "7c0001adRR0R.", - stbx_3 = "7c0001aeRR0R", - stvx_3 = "7c0001ceVRR", - subfme_2 = "7c0001d0RR.", - mulld_3 = "7c0001d2RRR.", - addme_2 = "7c0001d4RR.", - mullw_3 = "7c0001d6RRR.", - dcbtst_2 = "7c0001ec-RR", - stbux_3 = "7c0001eeRR0R", - bpermd_3 = "7c0001f8RR~R", - lvepxl_3 = "7c00020eVRR", - add_3 = "7c000214RRR.", - lqarx_3 = "7c000228R:R0R", - dcbt_2 = "7c00022c-RR", - lhzx_3 = "7c00022eRR0R", - cdtbcd_2 = "7c000234RR~", - eqv_3 = "7c000238RR~R.", - lvepx_3 = "7c00024eVRR", - eciwx_3 = "7c00026cRR0R", - lhzux_3 = "7c00026eRR0R", - cbcdtd_2 = "7c000274RR~", - xor_3 = "7c000278RR~R.", - mfspefscr_1 = "7c0082a6R", - mfxer_1 = "7c0102a6R", - mflr_1 = "7c0802a6R", - mfctr_1 = "7c0902a6R", - lwax_3 = "7c0002aaRR0R", - lhax_3 = "7c0002aeRR0R", - mftb_1 = "7c0c42e6R", - mftbu_1 = "7c0d42e6R", - lvxl_3 = "7c0002ceVRR", - lwaux_3 = "7c0002eaRR0R", - lhaux_3 = "7c0002eeRR0R", - popcntw_2 = "7c0002f4RR~", - divdeu_3 = "7c000312RRR.", - divweu_3 = "7c000316RRR.", - sthx_3 = "7c00032eRR0R", - orc_3 = "7c000338RR~R.", - ecowx_3 = "7c00036cRR0R", - sthux_3 = "7c00036eRR0R", - or_3 = "7c000378RR~R.", - mr_2 = "7c000378RR~%.", - divdu_3 = "7c000392RRR.", - divwu_3 = "7c000396RRR.", - mtspefscr_1 = "7c0083a6R", - mtxer_1 = "7c0103a6R", - mtlr_1 = "7c0803a6R", - mtctr_1 = "7c0903a6R", - dcbi_2 = "7c0003ac-RR", - nand_3 = "7c0003b8RR~R.", - dsn_2 = "7c0003c6-RR", - stvxl_3 = "7c0003ceVRR", - divd_3 = "7c0003d2RRR.", - divw_3 = "7c0003d6RRR.", - popcntd_2 = "7c0003f4RR~", - cmpb_3 = "7c0003f8RR~R.", - mcrxr_1 = "7c000400X", - lbdx_3 = "7c000406RRR", - subfco_3 = "7c000410RRR.", - subco_3 = "7c000410RRR~.", - addco_3 = "7c000414RRR.", - ldbrx_3 = "7c000428RR0R", - lswx_3 = "7c00042aRR0R", - lwbrx_3 = "7c00042cRR0R", - lfsx_3 = "7c00042eFR0R", - srw_3 = "7c000430RR~R.", - srd_3 = "7c000436RR~R.", - lhdx_3 = "7c000446RRR", - subfo_3 = "7c000450RRR.", - subo_3 = "7c000450RRR~.", - lfsux_3 = "7c00046eFR0R", - lwdx_3 = "7c000486RRR", - lswi_3 = "7c0004aaRR0A", - sync_0 = "7c0004ac", - lwsync_0 = "7c2004ac", - ptesync_0 = "7c4004ac", - lfdx_3 = "7c0004aeFR0R", - lddx_3 = "7c0004c6RRR", - nego_2 = "7c0004d0RR.", - lfdux_3 = "7c0004eeFR0R", - stbdx_3 = "7c000506RRR", - subfeo_3 = "7c000510RRR.", - subeo_3 = "7c000510RRR~.", - addeo_3 = "7c000514RRR.", - stdbrx_3 = "7c000528RR0R", - stswx_3 = "7c00052aRR0R", - stwbrx_3 = "7c00052cRR0R", - stfsx_3 = "7c00052eFR0R", - sthdx_3 = "7c000546RRR", - ["stbcx._3"] = "7c00056dRRR", - stfsux_3 = "7c00056eFR0R", - stwdx_3 = "7c000586RRR", - subfzeo_2 = "7c000590RR.", - addzeo_2 = "7c000594RR.", - stswi_3 = "7c0005aaRR0A", - ["sthcx._3"] = "7c0005adRRR", - stfdx_3 = "7c0005aeFR0R", - stddx_3 = "7c0005c6RRR", - subfmeo_2 = "7c0005d0RR.", - mulldo_3 = "7c0005d2RRR.", - addmeo_2 = "7c0005d4RR.", - mullwo_3 = "7c0005d6RRR.", - dcba_2 = "7c0005ec-RR", - stfdux_3 = "7c0005eeFR0R", - stvepxl_3 = "7c00060eVRR", - addo_3 = "7c000614RRR.", - lhbrx_3 = "7c00062cRR0R", - lfdpx_3 = "7c00062eF:RR", - sraw_3 = "7c000630RR~R.", - srad_3 = "7c000634RR~R.", - lfddx_3 = "7c000646FRR", - stvepx_3 = "7c00064eVRR", - srawi_3 = "7c000670RR~A.", - sradi_3 = "7c000674RR~H.", - eieio_0 = "7c0006ac", - lfiwax_3 = "7c0006aeFR0R", - divdeuo_3 = "7c000712RRR.", - divweuo_3 = "7c000716RRR.", - sthbrx_3 = "7c00072cRR0R", - stfdpx_3 = "7c00072eF:RR", - extsh_2 = "7c000734RR~.", - stfddx_3 = "7c000746FRR", - divdeo_3 = "7c000752RRR.", - divweo_3 = "7c000756RRR.", - extsb_2 = "7c000774RR~.", - divduo_3 = "7c000792RRR.", - divwou_3 = "7c000796RRR.", - icbi_2 = "7c0007ac-RR", - stfiwx_3 = "7c0007aeFR0R", - extsw_2 = "7c0007b4RR~.", - divdo_3 = "7c0007d2RRR.", - divwo_3 = "7c0007d6RRR.", - dcbz_2 = "7c0007ec-RR", - - ["tbegin._1"] = "7c00051d1", - ["tbegin._0"] = "7c00051d", - ["tend._1"] = "7c00055dY", - ["tend._0"] = "7c00055d", - ["tendall._0"] = "7e00055d", - tcheck_1 = "7c00059cX", - ["tsr._1"] = "7c0005dd1", - ["tsuspend._0"] = "7c0005dd", - ["tresume._0"] = "7c2005dd", - ["tabortwc._3"] = "7c00061dARR", - ["tabortdc._3"] = "7c00065dARR", - ["tabortwci._3"] = "7c00069dARS", - ["tabortdci._3"] = "7c0006ddARS", - ["tabort._1"] = "7c00071d-R-", - ["treclaim._1"] = "7c00075d-R", - ["trechkpt._0"] = "7c0007dd", - - lxsiwzx_3 = "7c000018QRR", - lxsiwax_3 = "7c000098QRR", - mfvsrd_2 = "7c000066-Rq", - mfvsrwz_2 = "7c0000e6-Rq", - stxsiwx_3 = "7c000118QRR", - mtvsrd_2 = "7c000166QR", - mtvsrwa_2 = "7c0001a6QR", - lxvdsx_3 = "7c000298QRR", - lxsspx_3 = "7c000418QRR", - lxsdx_3 = "7c000498QRR", - stxsspx_3 = "7c000518QRR", - stxsdx_3 = "7c000598QRR", - lxvw4x_3 = "7c000618QRR", - lxvd2x_3 = "7c000698QRR", - stxvw4x_3 = "7c000718QRR", - stxvd2x_3 = "7c000798QRR", - - -- Primary opcode 30: - rldicl_4 = "78000000RR~HM.", - rldicr_4 = "78000004RR~HM.", - rldic_4 = "78000008RR~HM.", - rldimi_4 = "7800000cRR~HM.", - rldcl_4 = "78000010RR~RM.", - rldcr_4 = "78000012RR~RM.", - - rotldi_3 = op_alias("rldicl_4", function(p) - p[4] = "0" - end), - rotrdi_3 = op_alias("rldicl_4", function(p) - p[3] = "64-("..p[3]..")"; p[4] = "0" - end), - rotld_3 = op_alias("rldcl_4", function(p) - p[4] = "0" - end), - sldi_3 = op_alias("rldicr_4", function(p) - p[4] = "63-("..p[3]..")" - end), - srdi_3 = op_alias("rldicl_4", function(p) - p[4] = p[3]; p[3] = "64-("..p[3]..")" - end), - clrldi_3 = op_alias("rldicl_4", function(p) - p[4] = p[3]; p[3] = "0" - end), - clrrdi_3 = op_alias("rldicr_4", function(p) - p[4] = "63-("..p[3]..")"; p[3] = "0" - end), - - -- Primary opcode 56: - lq_2 = "e0000000R:D", -- NYI: displacement must be divisible by 8. - - -- Primary opcode 57: - lfdp_2 = "e4000000F:D", -- NYI: displacement must be divisible by 4. - - -- Primary opcode 59: - fdivs_3 = "ec000024FFF.", - fsubs_3 = "ec000028FFF.", - fadds_3 = "ec00002aFFF.", - fsqrts_2 = "ec00002cF-F.", - fres_2 = "ec000030F-F.", - fmuls_3 = "ec000032FF-F.", - frsqrtes_2 = "ec000034F-F.", - fmsubs_4 = "ec000038FFFF~.", - fmadds_4 = "ec00003aFFFF~.", - fnmsubs_4 = "ec00003cFFFF~.", - fnmadds_4 = "ec00003eFFFF~.", - fcfids_2 = "ec00069cF-F.", - fcfidus_2 = "ec00079cF-F.", - - dadd_3 = "ec000004FFF.", - dqua_4 = "ec000006FFFZ.", - dmul_3 = "ec000044FFF.", - drrnd_4 = "ec000046FFFZ.", - dscli_3 = "ec000084FF6.", - dquai_4 = "ec000086SF~FZ.", - dscri_3 = "ec0000c4FF6.", - drintx_4 = "ec0000c61F~FZ.", - dcmpo_3 = "ec000104XFF", - dtstex_3 = "ec000144XFF", - dtstdc_3 = "ec000184XF6", - dtstdg_3 = "ec0001c4XF6", - drintn_4 = "ec0001c61F~FZ.", - dctdp_2 = "ec000204F-F.", - dctfix_2 = "ec000244F-F.", - ddedpd_3 = "ec000284ZF~F.", - dxex_2 = "ec0002c4F-F.", - dsub_3 = "ec000404FFF.", - ddiv_3 = "ec000444FFF.", - dcmpu_3 = "ec000504XFF", - dtstsf_3 = "ec000544XFF", - drsp_2 = "ec000604F-F.", - dcffix_2 = "ec000644F-F.", - denbcd_3 = "ec000684YF~F.", - diex_3 = "ec0006c4FFF.", - - -- Primary opcode 60: - xsaddsp_3 = "f0000000QQQ", - xsmaddasp_3 = "f0000008QQQ", - xxsldwi_4 = "f0000010QQQz", - xsrsqrtesp_2 = "f0000028Q-Q", - xssqrtsp_2 = "f000002cQ-Q", - xxsel_4 = "f0000030QQQQ", - xssubsp_3 = "f0000040QQQ", - xsmaddmsp_3 = "f0000048QQQ", - xxpermdi_4 = "f0000050QQQz", - xsresp_2 = "f0000068Q-Q", - xsmulsp_3 = "f0000080QQQ", - xsmsubasp_3 = "f0000088QQQ", - xxmrghw_3 = "f0000090QQQ", - xsdivsp_3 = "f00000c0QQQ", - xsmsubmsp_3 = "f00000c8QQQ", - xsadddp_3 = "f0000100QQQ", - xsmaddadp_3 = "f0000108QQQ", - xscmpudp_3 = "f0000118XQQ", - xscvdpuxws_2 = "f0000120Q-Q", - xsrdpi_2 = "f0000124Q-Q", - xsrsqrtedp_2 = "f0000128Q-Q", - xssqrtdp_2 = "f000012cQ-Q", - xssubdp_3 = "f0000140QQQ", - xsmaddmdp_3 = "f0000148QQQ", - xscmpodp_3 = "f0000158XQQ", - xscvdpsxws_2 = "f0000160Q-Q", - xsrdpiz_2 = "f0000164Q-Q", - xsredp_2 = "f0000168Q-Q", - xsmuldp_3 = "f0000180QQQ", - xsmsubadp_3 = "f0000188QQQ", - xxmrglw_3 = "f0000190QQQ", - xsrdpip_2 = "f00001a4Q-Q", - xstsqrtdp_2 = "f00001a8X-Q", - xsrdpic_2 = "f00001acQ-Q", - xsdivdp_3 = "f00001c0QQQ", - xsmsubmdp_3 = "f00001c8QQQ", - xsrdpim_2 = "f00001e4Q-Q", - xstdivdp_3 = "f00001e8XQQ", - xvaddsp_3 = "f0000200QQQ", - xvmaddasp_3 = "f0000208QQQ", - xvcmpeqsp_3 = "f0000218QQQ", - xvcvspuxws_2 = "f0000220Q-Q", - xvrspi_2 = "f0000224Q-Q", - xvrsqrtesp_2 = "f0000228Q-Q", - xvsqrtsp_2 = "f000022cQ-Q", - xvsubsp_3 = "f0000240QQQ", - xvmaddmsp_3 = "f0000248QQQ", - xvcmpgtsp_3 = "f0000258QQQ", - xvcvspsxws_2 = "f0000260Q-Q", - xvrspiz_2 = "f0000264Q-Q", - xvresp_2 = "f0000268Q-Q", - xvmulsp_3 = "f0000280QQQ", - xvmsubasp_3 = "f0000288QQQ", - xxspltw_3 = "f0000290QQg~", - xvcmpgesp_3 = "f0000298QQQ", - xvcvuxwsp_2 = "f00002a0Q-Q", - xvrspip_2 = "f00002a4Q-Q", - xvtsqrtsp_2 = "f00002a8X-Q", - xvrspic_2 = "f00002acQ-Q", - xvdivsp_3 = "f00002c0QQQ", - xvmsubmsp_3 = "f00002c8QQQ", - xvcvsxwsp_2 = "f00002e0Q-Q", - xvrspim_2 = "f00002e4Q-Q", - xvtdivsp_3 = "f00002e8XQQ", - xvadddp_3 = "f0000300QQQ", - xvmaddadp_3 = "f0000308QQQ", - xvcmpeqdp_3 = "f0000318QQQ", - xvcvdpuxws_2 = "f0000320Q-Q", - xvrdpi_2 = "f0000324Q-Q", - xvrsqrtedp_2 = "f0000328Q-Q", - xvsqrtdp_2 = "f000032cQ-Q", - xvsubdp_3 = "f0000340QQQ", - xvmaddmdp_3 = "f0000348QQQ", - xvcmpgtdp_3 = "f0000358QQQ", - xvcvdpsxws_2 = "f0000360Q-Q", - xvrdpiz_2 = "f0000364Q-Q", - xvredp_2 = "f0000368Q-Q", - xvmuldp_3 = "f0000380QQQ", - xvmsubadp_3 = "f0000388QQQ", - xvcmpgedp_3 = "f0000398QQQ", - xvcvuxwdp_2 = "f00003a0Q-Q", - xvrdpip_2 = "f00003a4Q-Q", - xvtsqrtdp_2 = "f00003a8X-Q", - xvrdpic_2 = "f00003acQ-Q", - xvdivdp_3 = "f00003c0QQQ", - xvmsubmdp_3 = "f00003c8QQQ", - xvcvsxwdp_2 = "f00003e0Q-Q", - xvrdpim_2 = "f00003e4Q-Q", - xvtdivdp_3 = "f00003e8XQQ", - xsnmaddasp_3 = "f0000408QQQ", - xxland_3 = "f0000410QQQ", - xscvdpsp_2 = "f0000424Q-Q", - xscvdpspn_2 = "f000042cQ-Q", - xsnmaddmsp_3 = "f0000448QQQ", - xxlandc_3 = "f0000450QQQ", - xsrsp_2 = "f0000464Q-Q", - xsnmsubasp_3 = "f0000488QQQ", - xxlor_3 = "f0000490QQQ", - xscvuxdsp_2 = "f00004a0Q-Q", - xsnmsubmsp_3 = "f00004c8QQQ", - xxlxor_3 = "f00004d0QQQ", - xscvsxdsp_2 = "f00004e0Q-Q", - xsmaxdp_3 = "f0000500QQQ", - xsnmaddadp_3 = "f0000508QQQ", - xxlnor_3 = "f0000510QQQ", - xscvdpuxds_2 = "f0000520Q-Q", - xscvspdp_2 = "f0000524Q-Q", - xscvspdpn_2 = "f000052cQ-Q", - xsmindp_3 = "f0000540QQQ", - xsnmaddmdp_3 = "f0000548QQQ", - xxlorc_3 = "f0000550QQQ", - xscvdpsxds_2 = "f0000560Q-Q", - xsabsdp_2 = "f0000564Q-Q", - xscpsgndp_3 = "f0000580QQQ", - xsnmsubadp_3 = "f0000588QQQ", - xxlnand_3 = "f0000590QQQ", - xscvuxddp_2 = "f00005a0Q-Q", - xsnabsdp_2 = "f00005a4Q-Q", - xsnmsubmdp_3 = "f00005c8QQQ", - xxleqv_3 = "f00005d0QQQ", - xscvsxddp_2 = "f00005e0Q-Q", - xsnegdp_2 = "f00005e4Q-Q", - xvmaxsp_3 = "f0000600QQQ", - xvnmaddasp_3 = "f0000608QQQ", - ["xvcmpeqsp._3"] = "f0000618QQQ", - xvcvspuxds_2 = "f0000620Q-Q", - xvcvdpsp_2 = "f0000624Q-Q", - xvminsp_3 = "f0000640QQQ", - xvnmaddmsp_3 = "f0000648QQQ", - ["xvcmpgtsp._3"] = "f0000658QQQ", - xvcvspsxds_2 = "f0000660Q-Q", - xvabssp_2 = "f0000664Q-Q", - xvcpsgnsp_3 = "f0000680QQQ", - xvnmsubasp_3 = "f0000688QQQ", - ["xvcmpgesp._3"] = "f0000698QQQ", - xvcvuxdsp_2 = "f00006a0Q-Q", - xvnabssp_2 = "f00006a4Q-Q", - xvnmsubmsp_3 = "f00006c8QQQ", - xvcvsxdsp_2 = "f00006e0Q-Q", - xvnegsp_2 = "f00006e4Q-Q", - xvmaxdp_3 = "f0000700QQQ", - xvnmaddadp_3 = "f0000708QQQ", - ["xvcmpeqdp._3"] = "f0000718QQQ", - xvcvdpuxds_2 = "f0000720Q-Q", - xvcvspdp_2 = "f0000724Q-Q", - xvmindp_3 = "f0000740QQQ", - xvnmaddmdp_3 = "f0000748QQQ", - ["xvcmpgtdp._3"] = "f0000758QQQ", - xvcvdpsxds_2 = "f0000760Q-Q", - xvabsdp_2 = "f0000764Q-Q", - xvcpsgndp_3 = "f0000780QQQ", - xvnmsubadp_3 = "f0000788QQQ", - ["xvcmpgedp._3"] = "f0000798QQQ", - xvcvuxddp_2 = "f00007a0Q-Q", - xvnabsdp_2 = "f00007a4Q-Q", - xvnmsubmdp_3 = "f00007c8QQQ", - xvcvsxddp_2 = "f00007e0Q-Q", - xvnegdp_2 = "f00007e4Q-Q", - - -- Primary opcode 61: - stfdp_2 = "f4000000F:D", -- NYI: displacement must be divisible by 4. - - -- Primary opcode 62: - stq_2 = "f8000002R:D", -- NYI: displacement must be divisible by 8. - - -- Primary opcode 63: - fdiv_3 = "fc000024FFF.", - fsub_3 = "fc000028FFF.", - fadd_3 = "fc00002aFFF.", - fsqrt_2 = "fc00002cF-F.", - fsel_4 = "fc00002eFFFF~.", - fre_2 = "fc000030F-F.", - fmul_3 = "fc000032FF-F.", - frsqrte_2 = "fc000034F-F.", - fmsub_4 = "fc000038FFFF~.", - fmadd_4 = "fc00003aFFFF~.", - fnmsub_4 = "fc00003cFFFF~.", - fnmadd_4 = "fc00003eFFFF~.", - fcmpu_3 = "fc000000XFF", - fcpsgn_3 = "fc000010FFF.", - fcmpo_3 = "fc000040XFF", - mtfsb1_1 = "fc00004cA", - fneg_2 = "fc000050F-F.", - mcrfs_2 = "fc000080XX", - mtfsb0_1 = "fc00008cA", - fmr_2 = "fc000090F-F.", - frsp_2 = "fc000018F-F.", - fctiw_2 = "fc00001cF-F.", - fctiwz_2 = "fc00001eF-F.", - ftdiv_2 = "fc000100X-F.", - fctiwu_2 = "fc00011cF-F.", - fctiwuz_2 = "fc00011eF-F.", - mtfsfi_2 = "fc00010cAA", -- NYI: upshift. - fnabs_2 = "fc000110F-F.", - ftsqrt_2 = "fc000140X-F.", - fabs_2 = "fc000210F-F.", - frin_2 = "fc000310F-F.", - friz_2 = "fc000350F-F.", - frip_2 = "fc000390F-F.", - frim_2 = "fc0003d0F-F.", - mffs_1 = "fc00048eF.", - -- NYI: mtfsf, mtfsb0, mtfsb1. - fctid_2 = "fc00065cF-F.", - fctidz_2 = "fc00065eF-F.", - fmrgow_3 = "fc00068cFFF", - fcfid_2 = "fc00069cF-F.", - fctidu_2 = "fc00075cF-F.", - fctiduz_2 = "fc00075eF-F.", - fmrgew_3 = "fc00078cFFF", - fcfidu_2 = "fc00079cF-F.", - - daddq_3 = "fc000004F:F:F:.", - dquaq_4 = "fc000006F:F:F:Z.", - dmulq_3 = "fc000044F:F:F:.", - drrndq_4 = "fc000046F:F:F:Z.", - dscliq_3 = "fc000084F:F:6.", - dquaiq_4 = "fc000086SF:~F:Z.", - dscriq_3 = "fc0000c4F:F:6.", - drintxq_4 = "fc0000c61F:~F:Z.", - dcmpoq_3 = "fc000104XF:F:", - dtstexq_3 = "fc000144XF:F:", - dtstdcq_3 = "fc000184XF:6", - dtstdgq_3 = "fc0001c4XF:6", - drintnq_4 = "fc0001c61F:~F:Z.", - dctqpq_2 = "fc000204F:-F:.", - dctfixq_2 = "fc000244F:-F:.", - ddedpdq_3 = "fc000284ZF:~F:.", - dxexq_2 = "fc0002c4F:-F:.", - dsubq_3 = "fc000404F:F:F:.", - ddivq_3 = "fc000444F:F:F:.", - dcmpuq_3 = "fc000504XF:F:", - dtstsfq_3 = "fc000544XF:F:", - drdpq_2 = "fc000604F:-F:.", - dcffixq_2 = "fc000644F:-F:.", - denbcdq_3 = "fc000684YF:~F:.", - diexq_3 = "fc0006c4F:FF:.", - - -- Primary opcode 4, SPE APU extension: - evaddw_3 = "10000200RRR", - evaddiw_3 = "10000202RAR~", - evsubw_3 = "10000204RRR~", - evsubiw_3 = "10000206RAR~", - evabs_2 = "10000208RR", - evneg_2 = "10000209RR", - evextsb_2 = "1000020aRR", - evextsh_2 = "1000020bRR", - evrndw_2 = "1000020cRR", - evcntlzw_2 = "1000020dRR", - evcntlsw_2 = "1000020eRR", - brinc_3 = "1000020fRRR", - evand_3 = "10000211RRR", - evandc_3 = "10000212RRR", - evxor_3 = "10000216RRR", - evor_3 = "10000217RRR", - evmr_2 = "10000217RR=", - evnor_3 = "10000218RRR", - evnot_2 = "10000218RR=", - eveqv_3 = "10000219RRR", - evorc_3 = "1000021bRRR", - evnand_3 = "1000021eRRR", - evsrwu_3 = "10000220RRR", - evsrws_3 = "10000221RRR", - evsrwiu_3 = "10000222RRA", - evsrwis_3 = "10000223RRA", - evslw_3 = "10000224RRR", - evslwi_3 = "10000226RRA", - evrlw_3 = "10000228RRR", - evsplati_2 = "10000229RS", - evrlwi_3 = "1000022aRRA", - evsplatfi_2 = "1000022bRS", - evmergehi_3 = "1000022cRRR", - evmergelo_3 = "1000022dRRR", - evcmpgtu_3 = "10000230XRR", - evcmpgtu_2 = "10000230-RR", - evcmpgts_3 = "10000231XRR", - evcmpgts_2 = "10000231-RR", - evcmpltu_3 = "10000232XRR", - evcmpltu_2 = "10000232-RR", - evcmplts_3 = "10000233XRR", - evcmplts_2 = "10000233-RR", - evcmpeq_3 = "10000234XRR", - evcmpeq_2 = "10000234-RR", - evsel_4 = "10000278RRRW", - evsel_3 = "10000278RRR", - evfsadd_3 = "10000280RRR", - evfssub_3 = "10000281RRR", - evfsabs_2 = "10000284RR", - evfsnabs_2 = "10000285RR", - evfsneg_2 = "10000286RR", - evfsmul_3 = "10000288RRR", - evfsdiv_3 = "10000289RRR", - evfscmpgt_3 = "1000028cXRR", - evfscmpgt_2 = "1000028c-RR", - evfscmplt_3 = "1000028dXRR", - evfscmplt_2 = "1000028d-RR", - evfscmpeq_3 = "1000028eXRR", - evfscmpeq_2 = "1000028e-RR", - evfscfui_2 = "10000290R-R", - evfscfsi_2 = "10000291R-R", - evfscfuf_2 = "10000292R-R", - evfscfsf_2 = "10000293R-R", - evfsctui_2 = "10000294R-R", - evfsctsi_2 = "10000295R-R", - evfsctuf_2 = "10000296R-R", - evfsctsf_2 = "10000297R-R", - evfsctuiz_2 = "10000298R-R", - evfsctsiz_2 = "1000029aR-R", - evfststgt_3 = "1000029cXRR", - evfststgt_2 = "1000029c-RR", - evfststlt_3 = "1000029dXRR", - evfststlt_2 = "1000029d-RR", - evfststeq_3 = "1000029eXRR", - evfststeq_2 = "1000029e-RR", - efsadd_3 = "100002c0RRR", - efssub_3 = "100002c1RRR", - efsabs_2 = "100002c4RR", - efsnabs_2 = "100002c5RR", - efsneg_2 = "100002c6RR", - efsmul_3 = "100002c8RRR", - efsdiv_3 = "100002c9RRR", - efscmpgt_3 = "100002ccXRR", - efscmpgt_2 = "100002cc-RR", - efscmplt_3 = "100002cdXRR", - efscmplt_2 = "100002cd-RR", - efscmpeq_3 = "100002ceXRR", - efscmpeq_2 = "100002ce-RR", - efscfd_2 = "100002cfR-R", - efscfui_2 = "100002d0R-R", - efscfsi_2 = "100002d1R-R", - efscfuf_2 = "100002d2R-R", - efscfsf_2 = "100002d3R-R", - efsctui_2 = "100002d4R-R", - efsctsi_2 = "100002d5R-R", - efsctuf_2 = "100002d6R-R", - efsctsf_2 = "100002d7R-R", - efsctuiz_2 = "100002d8R-R", - efsctsiz_2 = "100002daR-R", - efststgt_3 = "100002dcXRR", - efststgt_2 = "100002dc-RR", - efststlt_3 = "100002ddXRR", - efststlt_2 = "100002dd-RR", - efststeq_3 = "100002deXRR", - efststeq_2 = "100002de-RR", - efdadd_3 = "100002e0RRR", - efdsub_3 = "100002e1RRR", - efdcfuid_2 = "100002e2R-R", - efdcfsid_2 = "100002e3R-R", - efdabs_2 = "100002e4RR", - efdnabs_2 = "100002e5RR", - efdneg_2 = "100002e6RR", - efdmul_3 = "100002e8RRR", - efddiv_3 = "100002e9RRR", - efdctuidz_2 = "100002eaR-R", - efdctsidz_2 = "100002ebR-R", - efdcmpgt_3 = "100002ecXRR", - efdcmpgt_2 = "100002ec-RR", - efdcmplt_3 = "100002edXRR", - efdcmplt_2 = "100002ed-RR", - efdcmpeq_3 = "100002eeXRR", - efdcmpeq_2 = "100002ee-RR", - efdcfs_2 = "100002efR-R", - efdcfui_2 = "100002f0R-R", - efdcfsi_2 = "100002f1R-R", - efdcfuf_2 = "100002f2R-R", - efdcfsf_2 = "100002f3R-R", - efdctui_2 = "100002f4R-R", - efdctsi_2 = "100002f5R-R", - efdctuf_2 = "100002f6R-R", - efdctsf_2 = "100002f7R-R", - efdctuiz_2 = "100002f8R-R", - efdctsiz_2 = "100002faR-R", - efdtstgt_3 = "100002fcXRR", - efdtstgt_2 = "100002fc-RR", - efdtstlt_3 = "100002fdXRR", - efdtstlt_2 = "100002fd-RR", - efdtsteq_3 = "100002feXRR", - efdtsteq_2 = "100002fe-RR", - evlddx_3 = "10000300RR0R", - evldd_2 = "10000301R8", - evldwx_3 = "10000302RR0R", - evldw_2 = "10000303R8", - evldhx_3 = "10000304RR0R", - evldh_2 = "10000305R8", - evlwhex_3 = "10000310RR0R", - evlwhe_2 = "10000311R4", - evlwhoux_3 = "10000314RR0R", - evlwhou_2 = "10000315R4", - evlwhosx_3 = "10000316RR0R", - evlwhos_2 = "10000317R4", - evstddx_3 = "10000320RR0R", - evstdd_2 = "10000321R8", - evstdwx_3 = "10000322RR0R", - evstdw_2 = "10000323R8", - evstdhx_3 = "10000324RR0R", - evstdh_2 = "10000325R8", - evstwhex_3 = "10000330RR0R", - evstwhe_2 = "10000331R4", - evstwhox_3 = "10000334RR0R", - evstwho_2 = "10000335R4", - evstwwex_3 = "10000338RR0R", - evstwwe_2 = "10000339R4", - evstwwox_3 = "1000033cRR0R", - evstwwo_2 = "1000033dR4", - evmhessf_3 = "10000403RRR", - evmhossf_3 = "10000407RRR", - evmheumi_3 = "10000408RRR", - evmhesmi_3 = "10000409RRR", - evmhesmf_3 = "1000040bRRR", - evmhoumi_3 = "1000040cRRR", - evmhosmi_3 = "1000040dRRR", - evmhosmf_3 = "1000040fRRR", - evmhessfa_3 = "10000423RRR", - evmhossfa_3 = "10000427RRR", - evmheumia_3 = "10000428RRR", - evmhesmia_3 = "10000429RRR", - evmhesmfa_3 = "1000042bRRR", - evmhoumia_3 = "1000042cRRR", - evmhosmia_3 = "1000042dRRR", - evmhosmfa_3 = "1000042fRRR", - evmwhssf_3 = "10000447RRR", - evmwlumi_3 = "10000448RRR", - evmwhumi_3 = "1000044cRRR", - evmwhsmi_3 = "1000044dRRR", - evmwhsmf_3 = "1000044fRRR", - evmwssf_3 = "10000453RRR", - evmwumi_3 = "10000458RRR", - evmwsmi_3 = "10000459RRR", - evmwsmf_3 = "1000045bRRR", - evmwhssfa_3 = "10000467RRR", - evmwlumia_3 = "10000468RRR", - evmwhumia_3 = "1000046cRRR", - evmwhsmia_3 = "1000046dRRR", - evmwhsmfa_3 = "1000046fRRR", - evmwssfa_3 = "10000473RRR", - evmwumia_3 = "10000478RRR", - evmwsmia_3 = "10000479RRR", - evmwsmfa_3 = "1000047bRRR", - evmra_2 = "100004c4RR", - evdivws_3 = "100004c6RRR", - evdivwu_3 = "100004c7RRR", - evmwssfaa_3 = "10000553RRR", - evmwumiaa_3 = "10000558RRR", - evmwsmiaa_3 = "10000559RRR", - evmwsmfaa_3 = "1000055bRRR", - evmwssfan_3 = "100005d3RRR", - evmwumian_3 = "100005d8RRR", - evmwsmian_3 = "100005d9RRR", - evmwsmfan_3 = "100005dbRRR", - evmergehilo_3 = "1000022eRRR", - evmergelohi_3 = "1000022fRRR", - evlhhesplatx_3 = "10000308RR0R", - evlhhesplat_2 = "10000309R2", - evlhhousplatx_3 = "1000030cRR0R", - evlhhousplat_2 = "1000030dR2", - evlhhossplatx_3 = "1000030eRR0R", - evlhhossplat_2 = "1000030fR2", - evlwwsplatx_3 = "10000318RR0R", - evlwwsplat_2 = "10000319R4", - evlwhsplatx_3 = "1000031cRR0R", - evlwhsplat_2 = "1000031dR4", - evaddusiaaw_2 = "100004c0RR", - evaddssiaaw_2 = "100004c1RR", - evsubfusiaaw_2 = "100004c2RR", - evsubfssiaaw_2 = "100004c3RR", - evaddumiaaw_2 = "100004c8RR", - evaddsmiaaw_2 = "100004c9RR", - evsubfumiaaw_2 = "100004caRR", - evsubfsmiaaw_2 = "100004cbRR", - evmheusiaaw_3 = "10000500RRR", - evmhessiaaw_3 = "10000501RRR", - evmhessfaaw_3 = "10000503RRR", - evmhousiaaw_3 = "10000504RRR", - evmhossiaaw_3 = "10000505RRR", - evmhossfaaw_3 = "10000507RRR", - evmheumiaaw_3 = "10000508RRR", - evmhesmiaaw_3 = "10000509RRR", - evmhesmfaaw_3 = "1000050bRRR", - evmhoumiaaw_3 = "1000050cRRR", - evmhosmiaaw_3 = "1000050dRRR", - evmhosmfaaw_3 = "1000050fRRR", - evmhegumiaa_3 = "10000528RRR", - evmhegsmiaa_3 = "10000529RRR", - evmhegsmfaa_3 = "1000052bRRR", - evmhogumiaa_3 = "1000052cRRR", - evmhogsmiaa_3 = "1000052dRRR", - evmhogsmfaa_3 = "1000052fRRR", - evmwlusiaaw_3 = "10000540RRR", - evmwlssiaaw_3 = "10000541RRR", - evmwlumiaaw_3 = "10000548RRR", - evmwlsmiaaw_3 = "10000549RRR", - evmheusianw_3 = "10000580RRR", - evmhessianw_3 = "10000581RRR", - evmhessfanw_3 = "10000583RRR", - evmhousianw_3 = "10000584RRR", - evmhossianw_3 = "10000585RRR", - evmhossfanw_3 = "10000587RRR", - evmheumianw_3 = "10000588RRR", - evmhesmianw_3 = "10000589RRR", - evmhesmfanw_3 = "1000058bRRR", - evmhoumianw_3 = "1000058cRRR", - evmhosmianw_3 = "1000058dRRR", - evmhosmfanw_3 = "1000058fRRR", - evmhegumian_3 = "100005a8RRR", - evmhegsmian_3 = "100005a9RRR", - evmhegsmfan_3 = "100005abRRR", - evmhogumian_3 = "100005acRRR", - evmhogsmian_3 = "100005adRRR", - evmhogsmfan_3 = "100005afRRR", - evmwlusianw_3 = "100005c0RRR", - evmwlssianw_3 = "100005c1RRR", - evmwlumianw_3 = "100005c8RRR", - evmwlsmianw_3 = "100005c9RRR", - - -- NYI: Book E instructions. -} - --- Add mnemonics for "." variants. -do - local t = {} - for k,v in pairs(map_op) do - if type(v) == "string" and sub(v, -1) == "." then - local v2 = sub(v, 1, 7)..char(byte(v, 8)+1)..sub(v, 9, -2) - t[sub(k, 1, -3).."."..sub(k, -2)] = v2 - end - end - for k,v in pairs(t) do - map_op[k] = v - end -end - --- Add more branch mnemonics. -for cond,c in pairs(map_cond) do - local b1 = "b"..cond - local c1 = shl(band(c, 3), 16) + (c < 4 and 0x01000000 or 0) - -- bX[l] - map_op[b1.."_1"] = tohex(0x40800000 + c1).."K" - map_op[b1.."y_1"] = tohex(0x40a00000 + c1).."K" - map_op[b1.."l_1"] = tohex(0x40800001 + c1).."K" - map_op[b1.."_2"] = tohex(0x40800000 + c1).."-XK" - map_op[b1.."y_2"] = tohex(0x40a00000 + c1).."-XK" - map_op[b1.."l_2"] = tohex(0x40800001 + c1).."-XK" - -- bXlr[l] - map_op[b1.."lr_0"] = tohex(0x4c800020 + c1) - map_op[b1.."lrl_0"] = tohex(0x4c800021 + c1) - map_op[b1.."ctr_0"] = tohex(0x4c800420 + c1) - map_op[b1.."ctrl_0"] = tohex(0x4c800421 + c1) - -- bXctr[l] - map_op[b1.."lr_1"] = tohex(0x4c800020 + c1).."-X" - map_op[b1.."lrl_1"] = tohex(0x4c800021 + c1).."-X" - map_op[b1.."ctr_1"] = tohex(0x4c800420 + c1).."-X" - map_op[b1.."ctrl_1"] = tohex(0x4c800421 + c1).."-X" -end - ------------------------------------------------------------------------------- - -local function parse_gpr(expr) - local tname, ovreg = match(expr, "^([%w_]+):(r[1-3]?[0-9])$") - local tp = map_type[tname or expr] - if tp then - local reg = ovreg or tp.reg - if not reg then - werror("type `"..(tname or expr).."' needs a register override") - end - expr = reg - end - local r = match(expr, "^r([1-3]?[0-9])$") - if r then - r = tonumber(r) - if r <= 31 then return r, tp end - end - werror("bad register name `"..expr.."'") -end - -local function parse_fpr(expr) - local r = match(expr, "^f([1-3]?[0-9])$") - if r then - r = tonumber(r) - if r <= 31 then return r end - end - werror("bad register name `"..expr.."'") -end - -local function parse_vr(expr) - local r = match(expr, "^v([1-3]?[0-9])$") - if r then - r = tonumber(r) - if r <= 31 then return r end - end - werror("bad register name `"..expr.."'") -end - -local function parse_vs(expr) - local r = match(expr, "^vs([1-6]?[0-9])$") - if r then - r = tonumber(r) - if r <= 63 then return r end - end - werror("bad register name `"..expr.."'") -end - -local function parse_cr(expr) - local r = match(expr, "^cr([0-7])$") - if r then return tonumber(r) end - werror("bad condition register name `"..expr.."'") -end - -local function parse_cond(expr) - local r, cond = match(expr, "^4%*cr([0-7])%+(%w%w)$") - if r then - r = tonumber(r) - local c = map_cond[cond] - if c and c < 4 then return r*4+c end - end - werror("bad condition bit name `"..expr.."'") -end - -local parse_ctx = {} - -local loadenv = setfenv and function(s) - local code = loadstring(s, "") - if code then setfenv(code, parse_ctx) end - return code -end or function(s) - return load(s, "", nil, parse_ctx) -end - --- Try to parse simple arithmetic, too, since some basic ops are aliases. -local function parse_number(n) - local x = tonumber(n) - if x then return x end - local code = loadenv("return "..n) - if code then - local ok, y = pcall(code) - if ok then return y end - end - return nil -end - -local function parse_imm(imm, bits, shift, scale, signed) - local n = parse_number(imm) - if n then - local m = sar(n, scale) - if shl(m, scale) == n then - if signed then - local s = sar(m, bits-1) - if s == 0 then return shl(m, shift) - elseif s == -1 then return shl(m + shl(1, bits), shift) end - else - if sar(m, bits) == 0 then return shl(m, shift) end - end - end - werror("out of range immediate `"..imm.."'") - elseif match(imm, "^[rfv]([1-3]?[0-9])$") or - match(imm, "^vs([1-6]?[0-9])$") or - match(imm, "^([%w_]+):(r[1-3]?[0-9])$") then - werror("expected immediate operand, got register") - else - waction("IMM", (signed and 32768 or 0)+scale*1024+bits*32+shift, imm) - return 0 - end -end - -local function parse_shiftmask(imm, isshift) - local n = parse_number(imm) - if n then - if shr(n, 6) == 0 then - local lsb = band(n, 31) - local msb = n - lsb - return isshift and (shl(lsb, 11)+shr(msb, 4)) or (shl(lsb, 6)+msb) - end - werror("out of range immediate `"..imm.."'") - elseif match(imm, "^r([1-3]?[0-9])$") or - match(imm, "^([%w_]+):(r[1-3]?[0-9])$") then - werror("expected immediate operand, got register") - else - waction("IMMSH", isshift and 1 or 0, imm) - return 0; - end -end - -local function parse_disp(disp) - local imm, reg = match(disp, "^(.*)%(([%w_:]+)%)$") - if imm then - local r = parse_gpr(reg) - if r == 0 then werror("cannot use r0 in displacement") end - return shl(r, 16) + parse_imm(imm, 16, 0, 0, true) - end - local reg, tailr = match(disp, "^([%w_:]+)%s*(.*)$") - if reg and tailr ~= "" then - local r, tp = parse_gpr(reg) - if r == 0 then werror("cannot use r0 in displacement") end - if tp then - waction("IMM", 32768+16*32, format(tp.ctypefmt, tailr)) - return shl(r, 16) - end - end - werror("bad displacement `"..disp.."'") -end - -local function parse_u5disp(disp, scale) - local imm, reg = match(disp, "^(.*)%(([%w_:]+)%)$") - if imm then - local r = parse_gpr(reg) - if r == 0 then werror("cannot use r0 in displacement") end - return shl(r, 16) + parse_imm(imm, 5, 11, scale, false) - end - local reg, tailr = match(disp, "^([%w_:]+)%s*(.*)$") - if reg and tailr ~= "" then - local r, tp = parse_gpr(reg) - if r == 0 then werror("cannot use r0 in displacement") end - if tp then - waction("IMM", scale*1024+5*32+11, format(tp.ctypefmt, tailr)) - return shl(r, 16) - end - end - werror("bad displacement `"..disp.."'") -end - -local function parse_label(label, def) - local prefix = sub(label, 1, 2) - -- =>label (pc label reference) - if prefix == "=>" then - return "PC", 0, sub(label, 3) - end - -- ->name (global label reference) - if prefix == "->" then - return "LG", map_global[sub(label, 3)] - end - if def then - -- [1-9] (local label definition) - if match(label, "^[1-9]$") then - return "LG", 10+tonumber(label) - end - else - -- [<>][1-9] (local label reference) - local dir, lnum = match(label, "^([<>])([1-9])$") - if dir then -- Fwd: 1-9, Bkwd: 11-19. - return "LG", lnum + (dir == ">" and 0 or 10) - end - -- extern label (extern label reference) - local extname = match(label, "^extern%s+(%S+)$") - if extname then - return "EXT", map_extern[extname] - end - end - werror("bad label `"..label.."'") -end - ------------------------------------------------------------------------------- - --- Handle opcodes defined with template strings. -op_template = function(params, template, nparams) - if not params then return sub(template, 9) end - local op = tonumber(sub(template, 1, 8), 16) - local n, rs = 1, 26 - - -- Limit number of section buffer positions used by a single dasm_put(). - -- A single opcode needs a maximum of 3 positions (rlwinm). - if secpos+3 > maxsecpos then wflush() end - local pos = wpos() - - -- Process each character. - for p in gmatch(sub(template, 9), ".") do - if p == "R" then - rs = rs - 5; op = op + shl(parse_gpr(params[n]), rs); n = n + 1 - elseif p == "F" then - rs = rs - 5; op = op + shl(parse_fpr(params[n]), rs); n = n + 1 - elseif p == "V" then - rs = rs - 5; op = op + shl(parse_vr(params[n]), rs); n = n + 1 - elseif p == "Q" then - local vs = parse_vs(params[n]); n = n + 1; rs = rs - 5 - local sh = rs == 6 and 2 or 3 + band(shr(rs, 1), 3) - op = op + shl(band(vs, 31), rs) + shr(band(vs, 32), sh) - elseif p == "q" then - local vs = parse_vs(params[n]); n = n + 1 - op = op + shl(band(vs, 31), 21) + shr(band(vs, 32), 5) - elseif p == "A" then - rs = rs - 5; op = op + parse_imm(params[n], 5, rs, 0, false); n = n + 1 - elseif p == "S" then - rs = rs - 5; op = op + parse_imm(params[n], 5, rs, 0, true); n = n + 1 - elseif p == "I" then - op = op + parse_imm(params[n], 16, 0, 0, true); n = n + 1 - elseif p == "U" then - op = op + parse_imm(params[n], 16, 0, 0, false); n = n + 1 - elseif p == "D" then - op = op + parse_disp(params[n]); n = n + 1 - elseif p == "2" then - op = op + parse_u5disp(params[n], 1); n = n + 1 - elseif p == "4" then - op = op + parse_u5disp(params[n], 2); n = n + 1 - elseif p == "8" then - op = op + parse_u5disp(params[n], 3); n = n + 1 - elseif p == "C" then - rs = rs - 5; op = op + shl(parse_cond(params[n]), rs); n = n + 1 - elseif p == "X" then - rs = rs - 5; op = op + shl(parse_cr(params[n]), rs+2); n = n + 1 - elseif p == "1" then - rs = rs - 5; op = op + parse_imm(params[n], 1, rs, 0, false); n = n + 1 - elseif p == "g" then - rs = rs - 5; op = op + parse_imm(params[n], 2, rs, 0, false); n = n + 1 - elseif p == "3" then - rs = rs - 5; op = op + parse_imm(params[n], 3, rs, 0, false); n = n + 1 - elseif p == "P" then - rs = rs - 5; op = op + parse_imm(params[n], 4, rs, 0, false); n = n + 1 - elseif p == "p" then - op = op + parse_imm(params[n], 4, rs, 0, false); n = n + 1 - elseif p == "6" then - rs = rs - 6; op = op + parse_imm(params[n], 6, rs, 0, false); n = n + 1 - elseif p == "Y" then - rs = rs - 5; op = op + parse_imm(params[n], 1, rs+4, 0, false); n = n + 1 - elseif p == "y" then - rs = rs - 5; op = op + parse_imm(params[n], 1, rs+3, 0, false); n = n + 1 - elseif p == "Z" then - rs = rs - 5; op = op + parse_imm(params[n], 2, rs+3, 0, false); n = n + 1 - elseif p == "z" then - rs = rs - 5; op = op + parse_imm(params[n], 2, rs+2, 0, false); n = n + 1 - elseif p == "W" then - op = op + parse_cr(params[n]); n = n + 1 - elseif p == "G" then - op = op + parse_imm(params[n], 8, 12, 0, false); n = n + 1 - elseif p == "H" then - op = op + parse_shiftmask(params[n], true); n = n + 1 - elseif p == "M" then - op = op + parse_shiftmask(params[n], false); n = n + 1 - elseif p == "J" or p == "K" then - local mode, n, s = parse_label(params[n], false) - if p == "K" then n = n + 2048 end - waction("REL_"..mode, n, s, 1) - n = n + 1 - elseif p == "0" then - if band(shr(op, rs), 31) == 0 then werror("cannot use r0") end - elseif p == "=" or p == "%" then - local t = band(shr(op, p == "%" and rs+5 or rs), 31) - rs = rs - 5 - op = op + shl(t, rs) - elseif p == "~" then - local mm = shl(31, rs) - local lo = band(op, mm) - local hi = band(op, shl(mm, 5)) - op = op - lo - hi + shl(lo, 5) + shr(hi, 5) - elseif p == ":" then - if band(shr(op, rs), 1) ~= 0 then werror("register pair expected") end - elseif p == "-" then - rs = rs - 5 - elseif p == "." then - -- Ignored. - else - assert(false) - end - end - wputpos(pos, op) -end - -map_op[".template__"] = op_template - ------------------------------------------------------------------------------- - --- Pseudo-opcode to mark the position where the action list is to be emitted. -map_op[".actionlist_1"] = function(params) - if not params then return "cvar" end - local name = params[1] -- No syntax check. You get to keep the pieces. - wline(function(out) writeactions(out, name) end) -end - --- Pseudo-opcode to mark the position where the global enum is to be emitted. -map_op[".globals_1"] = function(params) - if not params then return "prefix" end - local prefix = params[1] -- No syntax check. You get to keep the pieces. - wline(function(out) writeglobals(out, prefix) end) -end - --- Pseudo-opcode to mark the position where the global names are to be emitted. -map_op[".globalnames_1"] = function(params) - if not params then return "cvar" end - local name = params[1] -- No syntax check. You get to keep the pieces. - wline(function(out) writeglobalnames(out, name) end) -end - --- Pseudo-opcode to mark the position where the extern names are to be emitted. -map_op[".externnames_1"] = function(params) - if not params then return "cvar" end - local name = params[1] -- No syntax check. You get to keep the pieces. - wline(function(out) writeexternnames(out, name) end) -end - ------------------------------------------------------------------------------- - --- Label pseudo-opcode (converted from trailing colon form). -map_op[".label_1"] = function(params) - if not params then return "[1-9] | ->global | =>pcexpr" end - if secpos+1 > maxsecpos then wflush() end - local mode, n, s = parse_label(params[1], true) - if mode == "EXT" then werror("bad label definition") end - waction("LABEL_"..mode, n, s, 1) -end - ------------------------------------------------------------------------------- - --- Pseudo-opcodes for data storage. -map_op[".long_*"] = function(params) - if not params then return "imm..." end - for _,p in ipairs(params) do - local n = tonumber(p) - if not n then werror("bad immediate `"..p.."'") end - if n < 0 then n = n + 2^32 end - wputw(n) - if secpos+2 > maxsecpos then wflush() end - end -end - --- Alignment pseudo-opcode. -map_op[".align_1"] = function(params) - if not params then return "numpow2" end - if secpos+1 > maxsecpos then wflush() end - local align = tonumber(params[1]) - if align then - local x = align - -- Must be a power of 2 in the range (2 ... 256). - for i=1,8 do - x = x / 2 - if x == 1 then - waction("ALIGN", align-1, nil, 1) -- Action byte is 2**n-1. - return - end - end - end - werror("bad alignment") -end - ------------------------------------------------------------------------------- - --- Pseudo-opcode for (primitive) type definitions (map to C types). -map_op[".type_3"] = function(params, nparams) - if not params then - return nparams == 2 and "name, ctype" or "name, ctype, reg" - end - local name, ctype, reg = params[1], params[2], params[3] - if not match(name, "^[%a_][%w_]*$") then - werror("bad type name `"..name.."'") - end - local tp = map_type[name] - if tp then - werror("duplicate type `"..name.."'") - end - -- Add #type to defines. A bit unclean to put it in map_archdef. - map_archdef["#"..name] = "sizeof("..ctype..")" - -- Add new type and emit shortcut define. - local num = ctypenum + 1 - map_type[name] = { - ctype = ctype, - ctypefmt = format("Dt%X(%%s)", num), - reg = reg, - } - wline(format("#define Dt%X(_V) (int)(ptrdiff_t)&(((%s *)0)_V)", num, ctype)) - ctypenum = num -end -map_op[".type_2"] = map_op[".type_3"] - --- Dump type definitions. -local function dumptypes(out, lvl) - local t = {} - for name in pairs(map_type) do t[#t+1] = name end - sort(t) - out:write("Type definitions:\n") - for _,name in ipairs(t) do - local tp = map_type[name] - local reg = tp.reg or "" - out:write(format(" %-20s %-20s %s\n", name, tp.ctype, reg)) - end - out:write("\n") -end - ------------------------------------------------------------------------------- - --- Set the current section. -function _M.section(num) - waction("SECTION", num) - wflush(true) -- SECTION is a terminal action. -end - ------------------------------------------------------------------------------- - --- Dump architecture description. -function _M.dumparch(out) - out:write(format("DynASM %s version %s, released %s\n\n", - _info.arch, _info.version, _info.release)) - dumpactions(out) -end - --- Dump all user defined elements. -function _M.dumpdef(out, lvl) - dumptypes(out, lvl) - dumpglobals(out, lvl) - dumpexterns(out, lvl) -end - ------------------------------------------------------------------------------- - --- Pass callbacks from/to the DynASM core. -function _M.passcb(wl, we, wf, ww) - wline, werror, wfatal, wwarn = wl, we, wf, ww - return wflush -end - --- Setup the arch-specific module. -function _M.setup(arch, opt) - g_arch, g_opt = arch, opt -end - --- Merge the core maps and the arch-specific maps. -function _M.mergemaps(map_coreop, map_def) - setmetatable(map_op, { __index = map_coreop }) - setmetatable(map_def, { __index = map_archdef }) - return map_op, map_def -end - -return _M - ------------------------------------------------------------------------------- - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_proto.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_proto.h deleted file mode 100644 index a8bc6fd285d..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_proto.h +++ /dev/null @@ -1,83 +0,0 @@ -/* -** DynASM encoding engine prototypes. -** Copyright (C) 2005-2015 Mike Pall. All rights reserved. -** Released under the MIT license. See dynasm.lua for full copyright notice. -*/ - -#ifndef _DASM_PROTO_H -#define _DASM_PROTO_H - -#include -#include - -#define DASM_IDENT "DynASM 1.3.0" -#define DASM_VERSION 10300 /* 1.3.0 */ - -#ifndef Dst_DECL -#define Dst_DECL dasm_State **Dst -#endif - -#ifndef Dst_REF -#define Dst_REF (*Dst) -#endif - -#ifndef DASM_FDEF -#define DASM_FDEF extern -#endif - -#ifndef DASM_M_GROW -#define DASM_M_GROW(ctx, t, p, sz, need) \ - do { \ - size_t _sz = (sz), _need = (need); \ - if (_sz < _need) { \ - if (_sz < 16) _sz = 16; \ - while (_sz < _need) _sz += _sz; \ - (p) = (t *)realloc((p), _sz); \ - if ((p) == NULL) exit(1); \ - (sz) = _sz; \ - } \ - } while(0) -#endif - -#ifndef DASM_M_FREE -#define DASM_M_FREE(ctx, p, sz) free(p) -#endif - -/* Internal DynASM encoder state. */ -typedef struct dasm_State dasm_State; - - -/* Initialize and free DynASM state. */ -DASM_FDEF void dasm_init(Dst_DECL, int maxsection); -DASM_FDEF void dasm_free(Dst_DECL); - -/* Setup global array. Must be called before dasm_setup(). */ -DASM_FDEF void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl); - -/* Grow PC label array. Can be called after dasm_setup(), too. */ -DASM_FDEF void dasm_growpc(Dst_DECL, unsigned int maxpc); - -/* Setup encoder. */ -DASM_FDEF void dasm_setup(Dst_DECL, const void *actionlist); - -/* Feed encoder with actions. Calls are generated by pre-processor. */ -DASM_FDEF void dasm_put(Dst_DECL, int start, ...); - -/* Link sections and return the resulting size. */ -DASM_FDEF int dasm_link(Dst_DECL, size_t *szp); - -/* Encode sections into buffer. */ -DASM_FDEF int dasm_encode(Dst_DECL, void *buffer); - -/* Get PC label offset. */ -DASM_FDEF int dasm_getpclabel(Dst_DECL, unsigned int pc); - -#ifdef DASM_CHECKS -/* Optional sanity checker to call between isolated encoding steps. */ -DASM_FDEF int dasm_checkstep(Dst_DECL, int secmatch); -#else -#define dasm_checkstep(a, b) 0 -#endif - - -#endif /* _DASM_PROTO_H */ diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_x64.lua b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_x64.lua deleted file mode 100644 index b1b62022f3c..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_x64.lua +++ /dev/null @@ -1,12 +0,0 @@ ------------------------------------------------------------------------------- --- DynASM x64 module. --- --- Copyright (C) 2005-2015 Mike Pall. All rights reserved. --- See dynasm.lua for full copyright notice. ------------------------------------------------------------------------------- --- This module just sets 64 bit mode for the combined x86/x64 module. --- All the interesting stuff is there. ------------------------------------------------------------------------------- - -x64 = true -- Using a global is an ugly, but effective solution. -return require("dasm_x86") diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_x86.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_x86.h deleted file mode 100644 index 652e8c99b08..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_x86.h +++ /dev/null @@ -1,471 +0,0 @@ -/* -** DynASM x86 encoding engine. -** Copyright (C) 2005-2015 Mike Pall. All rights reserved. -** Released under the MIT license. See dynasm.lua for full copyright notice. -*/ - -#include -#include -#include -#include - -#define DASM_ARCH "x86" - -#ifndef DASM_EXTERN -#define DASM_EXTERN(a,b,c,d) 0 -#endif - -/* Action definitions. DASM_STOP must be 255. */ -enum { - DASM_DISP = 233, - DASM_IMM_S, DASM_IMM_B, DASM_IMM_W, DASM_IMM_D, DASM_IMM_WB, DASM_IMM_DB, - DASM_VREG, DASM_SPACE, DASM_SETLABEL, DASM_REL_A, DASM_REL_LG, DASM_REL_PC, - DASM_IMM_LG, DASM_IMM_PC, DASM_LABEL_LG, DASM_LABEL_PC, DASM_ALIGN, - DASM_EXTERN, DASM_ESC, DASM_MARK, DASM_SECTION, DASM_STOP -}; - -/* Maximum number of section buffer positions for a single dasm_put() call. */ -#define DASM_MAXSECPOS 25 - -/* DynASM encoder status codes. Action list offset or number are or'ed in. */ -#define DASM_S_OK 0x00000000 -#define DASM_S_NOMEM 0x01000000 -#define DASM_S_PHASE 0x02000000 -#define DASM_S_MATCH_SEC 0x03000000 -#define DASM_S_RANGE_I 0x11000000 -#define DASM_S_RANGE_SEC 0x12000000 -#define DASM_S_RANGE_LG 0x13000000 -#define DASM_S_RANGE_PC 0x14000000 -#define DASM_S_RANGE_VREG 0x15000000 -#define DASM_S_UNDEF_L 0x21000000 -#define DASM_S_UNDEF_PC 0x22000000 - -/* Macros to convert positions (8 bit section + 24 bit index). */ -#define DASM_POS2IDX(pos) ((pos)&0x00ffffff) -#define DASM_POS2BIAS(pos) ((pos)&0xff000000) -#define DASM_SEC2POS(sec) ((sec)<<24) -#define DASM_POS2SEC(pos) ((pos)>>24) -#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos)) - -/* Action list type. */ -typedef const unsigned char *dasm_ActList; - -/* Per-section structure. */ -typedef struct dasm_Section { - int *rbuf; /* Biased buffer pointer (negative section bias). */ - int *buf; /* True buffer pointer. */ - size_t bsize; /* Buffer size in bytes. */ - int pos; /* Biased buffer position. */ - int epos; /* End of biased buffer position - max single put. */ - int ofs; /* Byte offset into section. */ -} dasm_Section; - -/* Core structure holding the DynASM encoding state. */ -struct dasm_State { - size_t psize; /* Allocated size of this structure. */ - dasm_ActList actionlist; /* Current actionlist pointer. */ - int *lglabels; /* Local/global chain/pos ptrs. */ - size_t lgsize; - int *pclabels; /* PC label chains/pos ptrs. */ - size_t pcsize; - void **globals; /* Array of globals (bias -10). */ - dasm_Section *section; /* Pointer to active section. */ - size_t codesize; /* Total size of all code sections. */ - int maxsection; /* 0 <= sectionidx < maxsection. */ - int status; /* Status code. */ - dasm_Section sections[1]; /* All sections. Alloc-extended. */ -}; - -/* The size of the core structure depends on the max. number of sections. */ -#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section)) - - -/* Initialize DynASM state. */ -void dasm_init(Dst_DECL, int maxsection) -{ - dasm_State *D; - size_t psz = 0; - int i; - Dst_REF = NULL; - DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection)); - D = Dst_REF; - D->psize = psz; - D->lglabels = NULL; - D->lgsize = 0; - D->pclabels = NULL; - D->pcsize = 0; - D->globals = NULL; - D->maxsection = maxsection; - for (i = 0; i < maxsection; i++) { - D->sections[i].buf = NULL; /* Need this for pass3. */ - D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i); - D->sections[i].bsize = 0; - D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */ - } -} - -/* Free DynASM state. */ -void dasm_free(Dst_DECL) -{ - dasm_State *D = Dst_REF; - int i; - for (i = 0; i < D->maxsection; i++) - if (D->sections[i].buf) - DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize); - if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize); - if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize); - DASM_M_FREE(Dst, D, D->psize); -} - -/* Setup global label array. Must be called before dasm_setup(). */ -void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl) -{ - dasm_State *D = Dst_REF; - D->globals = gl - 10; /* Negative bias to compensate for locals. */ - DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int)); -} - -/* Grow PC label array. Can be called after dasm_setup(), too. */ -void dasm_growpc(Dst_DECL, unsigned int maxpc) -{ - dasm_State *D = Dst_REF; - size_t osz = D->pcsize; - DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int)); - memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz); -} - -/* Setup encoder. */ -void dasm_setup(Dst_DECL, const void *actionlist) -{ - dasm_State *D = Dst_REF; - int i; - D->actionlist = (dasm_ActList)actionlist; - D->status = DASM_S_OK; - D->section = &D->sections[0]; - memset((void *)D->lglabels, 0, D->lgsize); - if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize); - for (i = 0; i < D->maxsection; i++) { - D->sections[i].pos = DASM_SEC2POS(i); - D->sections[i].ofs = 0; - } -} - - -#ifdef DASM_CHECKS -#define CK(x, st) \ - do { if (!(x)) { \ - D->status = DASM_S_##st|(int)(p-D->actionlist-1); return; } } while (0) -#define CKPL(kind, st) \ - do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \ - D->status=DASM_S_RANGE_##st|(int)(p-D->actionlist-1); return; } } while (0) -#else -#define CK(x, st) ((void)0) -#define CKPL(kind, st) ((void)0) -#endif - -/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */ -void dasm_put(Dst_DECL, int start, ...) -{ - va_list ap; - dasm_State *D = Dst_REF; - dasm_ActList p = D->actionlist + start; - dasm_Section *sec = D->section; - int pos = sec->pos, ofs = sec->ofs, mrm = 4; - int *b; - - if (pos >= sec->epos) { - DASM_M_GROW(Dst, int, sec->buf, sec->bsize, - sec->bsize + 2*DASM_MAXSECPOS*sizeof(int)); - sec->rbuf = sec->buf - DASM_POS2BIAS(pos); - sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos); - } - - b = sec->rbuf; - b[pos++] = start; - - va_start(ap, start); - while (1) { - int action = *p++; - if (action < DASM_DISP) { - ofs++; - } else if (action <= DASM_REL_A) { - int n = va_arg(ap, int); - b[pos++] = n; - switch (action) { - case DASM_DISP: - if (n == 0) { if ((mrm&7) == 4) mrm = p[-2]; if ((mrm&7) != 5) break; } - case DASM_IMM_DB: if (((n+128)&-256) == 0) goto ob; - case DASM_REL_A: /* Assumes ptrdiff_t is int. !x64 */ - case DASM_IMM_D: ofs += 4; break; - case DASM_IMM_S: CK(((n+128)&-256) == 0, RANGE_I); goto ob; - case DASM_IMM_B: CK((n&-256) == 0, RANGE_I); ob: ofs++; break; - case DASM_IMM_WB: if (((n+128)&-256) == 0) goto ob; - case DASM_IMM_W: CK((n&-65536) == 0, RANGE_I); ofs += 2; break; - case DASM_SPACE: p++; ofs += n; break; - case DASM_SETLABEL: b[pos-2] = -0x40000000; break; /* Neg. label ofs. */ - case DASM_VREG: CK((n&-8) == 0 && (n != 4 || (*p&1) == 0), RANGE_VREG); - if (*p++ == 1 && *p == DASM_DISP) mrm = n; continue; - } - mrm = 4; - } else { - int *pl, n; - switch (action) { - case DASM_REL_LG: - case DASM_IMM_LG: - n = *p++; pl = D->lglabels + n; - /* Bkwd rel or global. */ - if (n <= 246) { CK(n>=10||*pl<0, RANGE_LG); CKPL(lg, LG); goto putrel; } - pl -= 246; n = *pl; - if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */ - goto linkrel; - case DASM_REL_PC: - case DASM_IMM_PC: pl = D->pclabels + va_arg(ap, int); CKPL(pc, PC); - putrel: - n = *pl; - if (n < 0) { /* Label exists. Get label pos and store it. */ - b[pos] = -n; - } else { - linkrel: - b[pos] = n; /* Else link to rel chain, anchored at label. */ - *pl = pos; - } - pos++; - ofs += 4; /* Maximum offset needed. */ - if (action == DASM_REL_LG || action == DASM_REL_PC) - b[pos++] = ofs; /* Store pass1 offset estimate. */ - break; - case DASM_LABEL_LG: pl = D->lglabels + *p++; CKPL(lg, LG); goto putlabel; - case DASM_LABEL_PC: pl = D->pclabels + va_arg(ap, int); CKPL(pc, PC); - putlabel: - n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */ - while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos; } - *pl = -pos; /* Label exists now. */ - b[pos++] = ofs; /* Store pass1 offset estimate. */ - break; - case DASM_ALIGN: - ofs += *p++; /* Maximum alignment needed (arg is 2**n-1). */ - b[pos++] = ofs; /* Store pass1 offset estimate. */ - break; - case DASM_EXTERN: p += 2; ofs += 4; break; - case DASM_ESC: p++; ofs++; break; - case DASM_MARK: mrm = p[-2]; break; - case DASM_SECTION: - n = *p; CK(n < D->maxsection, RANGE_SEC); D->section = &D->sections[n]; - case DASM_STOP: goto stop; - } - } - } -stop: - va_end(ap); - sec->pos = pos; - sec->ofs = ofs; -} -#undef CK - -/* Pass 2: Link sections, shrink branches/aligns, fix label offsets. */ -int dasm_link(Dst_DECL, size_t *szp) -{ - dasm_State *D = Dst_REF; - int secnum; - int ofs = 0; - -#ifdef DASM_CHECKS - *szp = 0; - if (D->status != DASM_S_OK) return D->status; - { - int pc; - for (pc = 0; pc*sizeof(int) < D->pcsize; pc++) - if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc; - } -#endif - - { /* Handle globals not defined in this translation unit. */ - int idx; - for (idx = 10; idx*sizeof(int) < D->lgsize; idx++) { - int n = D->lglabels[idx]; - /* Undefined label: Collapse rel chain and replace with marker (< 0). */ - while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; } - } - } - - /* Combine all code sections. No support for data sections (yet). */ - for (secnum = 0; secnum < D->maxsection; secnum++) { - dasm_Section *sec = D->sections + secnum; - int *b = sec->rbuf; - int pos = DASM_SEC2POS(secnum); - int lastpos = sec->pos; - - while (pos != lastpos) { - dasm_ActList p = D->actionlist + b[pos++]; - while (1) { - int op, action = *p++; - switch (action) { - case DASM_REL_LG: p++; op = p[-3]; goto rel_pc; - case DASM_REL_PC: op = p[-2]; rel_pc: { - int shrink = op == 0xe9 ? 3 : ((op&0xf0) == 0x80 ? 4 : 0); - if (shrink) { /* Shrinkable branch opcode? */ - int lofs, lpos = b[pos]; - if (lpos < 0) goto noshrink; /* Ext global? */ - lofs = *DASM_POS2PTR(D, lpos); - if (lpos > pos) { /* Fwd label: add cumulative section offsets. */ - int i; - for (i = secnum; i < DASM_POS2SEC(lpos); i++) - lofs += D->sections[i].ofs; - } else { - lofs -= ofs; /* Bkwd label: unfix offset. */ - } - lofs -= b[pos+1]; /* Short branch ok? */ - if (lofs >= -128-shrink && lofs <= 127) ofs -= shrink; /* Yes. */ - else { noshrink: shrink = 0; } /* No, cannot shrink op. */ - } - b[pos+1] = shrink; - pos += 2; - break; - } - case DASM_SPACE: case DASM_IMM_LG: case DASM_VREG: p++; - case DASM_DISP: case DASM_IMM_S: case DASM_IMM_B: case DASM_IMM_W: - case DASM_IMM_D: case DASM_IMM_WB: case DASM_IMM_DB: - case DASM_SETLABEL: case DASM_REL_A: case DASM_IMM_PC: pos++; break; - case DASM_LABEL_LG: p++; - case DASM_LABEL_PC: b[pos++] += ofs; break; /* Fix label offset. */ - case DASM_ALIGN: ofs -= (b[pos++]+ofs)&*p++; break; /* Adjust ofs. */ - case DASM_EXTERN: p += 2; break; - case DASM_ESC: p++; break; - case DASM_MARK: break; - case DASM_SECTION: case DASM_STOP: goto stop; - } - } - stop: (void)0; - } - ofs += sec->ofs; /* Next section starts right after current section. */ - } - - D->codesize = ofs; /* Total size of all code sections */ - *szp = ofs; - return DASM_S_OK; -} - -#define dasmb(x) *cp++ = (unsigned char)(x) -#ifndef DASM_ALIGNED_WRITES -#define dasmw(x) \ - do { *((unsigned short *)cp) = (unsigned short)(x); cp+=2; } while (0) -#define dasmd(x) \ - do { *((unsigned int *)cp) = (unsigned int)(x); cp+=4; } while (0) -#else -#define dasmw(x) do { dasmb(x); dasmb((x)>>8); } while (0) -#define dasmd(x) do { dasmw(x); dasmw((x)>>16); } while (0) -#endif - -/* Pass 3: Encode sections. */ -int dasm_encode(Dst_DECL, void *buffer) -{ - dasm_State *D = Dst_REF; - unsigned char *base = (unsigned char *)buffer; - unsigned char *cp = base; - int secnum; - - /* Encode all code sections. No support for data sections (yet). */ - for (secnum = 0; secnum < D->maxsection; secnum++) { - dasm_Section *sec = D->sections + secnum; - int *b = sec->buf; - int *endb = sec->rbuf + sec->pos; - - while (b != endb) { - dasm_ActList p = D->actionlist + *b++; - unsigned char *mark = NULL; - while (1) { - int action = *p++; - int n = (action >= DASM_DISP && action <= DASM_ALIGN) ? *b++ : 0; - switch (action) { - case DASM_DISP: if (!mark) mark = cp; { - unsigned char *mm = mark; - if (*p != DASM_IMM_DB && *p != DASM_IMM_WB) mark = NULL; - if (n == 0) { int mrm = mm[-1]&7; if (mrm == 4) mrm = mm[0]&7; - if (mrm != 5) { mm[-1] -= 0x80; break; } } - if (((n+128) & -256) != 0) goto wd; else mm[-1] -= 0x40; - } - case DASM_IMM_S: case DASM_IMM_B: wb: dasmb(n); break; - case DASM_IMM_DB: if (((n+128)&-256) == 0) { - db: if (!mark) mark = cp; mark[-2] += 2; mark = NULL; goto wb; - } else mark = NULL; - case DASM_IMM_D: wd: dasmd(n); break; - case DASM_IMM_WB: if (((n+128)&-256) == 0) goto db; else mark = NULL; - case DASM_IMM_W: dasmw(n); break; - case DASM_VREG: { int t = *p++; if (t >= 2) n<<=3; cp[-1] |= n; break; } - case DASM_REL_LG: p++; if (n >= 0) goto rel_pc; - b++; n = (int)(ptrdiff_t)D->globals[-n]; - case DASM_REL_A: rel_a: n -= (int)(ptrdiff_t)(cp+4); goto wd; /* !x64 */ - case DASM_REL_PC: rel_pc: { - int shrink = *b++; - int *pb = DASM_POS2PTR(D, n); if (*pb < 0) { n = pb[1]; goto rel_a; } - n = *pb - ((int)(cp-base) + 4-shrink); - if (shrink == 0) goto wd; - if (shrink == 4) { cp--; cp[-1] = *cp-0x10; } else cp[-1] = 0xeb; - goto wb; - } - case DASM_IMM_LG: - p++; if (n < 0) { n = (int)(ptrdiff_t)D->globals[-n]; goto wd; } - case DASM_IMM_PC: { - int *pb = DASM_POS2PTR(D, n); - n = *pb < 0 ? pb[1] : (*pb + (int)(ptrdiff_t)base); - goto wd; - } - case DASM_LABEL_LG: { - int idx = *p++; - if (idx >= 10) - D->globals[idx] = (void *)(base + (*p == DASM_SETLABEL ? *b : n)); - break; - } - case DASM_LABEL_PC: case DASM_SETLABEL: break; - case DASM_SPACE: { int fill = *p++; while (n--) *cp++ = fill; break; } - case DASM_ALIGN: - n = *p++; - while (((cp-base) & n)) *cp++ = 0x90; /* nop */ - break; - case DASM_EXTERN: n = DASM_EXTERN(Dst, cp, p[1], *p); p += 2; goto wd; - case DASM_MARK: mark = cp; break; - case DASM_ESC: action = *p++; - default: *cp++ = action; break; - case DASM_SECTION: case DASM_STOP: goto stop; - } - } - stop: (void)0; - } - } - - if (base + D->codesize != cp) /* Check for phase errors. */ - return DASM_S_PHASE; - return DASM_S_OK; -} - -/* Get PC label offset. */ -int dasm_getpclabel(Dst_DECL, unsigned int pc) -{ - dasm_State *D = Dst_REF; - if (pc*sizeof(int) < D->pcsize) { - int pos = D->pclabels[pc]; - if (pos < 0) return *DASM_POS2PTR(D, -pos); - if (pos > 0) return -1; /* Undefined. */ - } - return -2; /* Unused or out of range. */ -} - -#ifdef DASM_CHECKS -/* Optional sanity checker to call between isolated encoding steps. */ -int dasm_checkstep(Dst_DECL, int secmatch) -{ - dasm_State *D = Dst_REF; - if (D->status == DASM_S_OK) { - int i; - for (i = 1; i <= 9; i++) { - if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_L|i; break; } - D->lglabels[i] = 0; - } - } - if (D->status == DASM_S_OK && secmatch >= 0 && - D->section != &D->sections[secmatch]) - D->status = DASM_S_MATCH_SEC|(int)(D->section-D->sections); - return D->status; -} -#endif - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_x86.lua b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_x86.lua deleted file mode 100644 index 7ca061d22f6..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dasm_x86.lua +++ /dev/null @@ -1,1945 +0,0 @@ ------------------------------------------------------------------------------- --- DynASM x86/x64 module. --- --- Copyright (C) 2005-2015 Mike Pall. All rights reserved. --- See dynasm.lua for full copyright notice. ------------------------------------------------------------------------------- - -local x64 = x64 - --- Module information: -local _info = { - arch = x64 and "x64" or "x86", - description = "DynASM x86/x64 module", - version = "1.3.0", - vernum = 10300, - release = "2011-05-05", - author = "Mike Pall", - license = "MIT", -} - --- Exported glue functions for the arch-specific module. -local _M = { _info = _info } - --- Cache library functions. -local type, tonumber, pairs, ipairs = type, tonumber, pairs, ipairs -local assert, unpack, setmetatable = assert, unpack or table.unpack, setmetatable -local _s = string -local sub, format, byte, char = _s.sub, _s.format, _s.byte, _s.char -local find, match, gmatch, gsub = _s.find, _s.match, _s.gmatch, _s.gsub -local concat, sort = table.concat, table.sort -local bit = bit or require("bit") -local band, shl, shr = bit.band, bit.lshift, bit.rshift - --- Inherited tables and callbacks. -local g_opt, g_arch -local wline, werror, wfatal, wwarn - --- Action name list. --- CHECK: Keep this in sync with the C code! -local action_names = { - -- int arg, 1 buffer pos: - "DISP", "IMM_S", "IMM_B", "IMM_W", "IMM_D", "IMM_WB", "IMM_DB", - -- action arg (1 byte), int arg, 1 buffer pos (reg/num): - "VREG", "SPACE", -- !x64: VREG support NYI. - -- ptrdiff_t arg, 1 buffer pos (address): !x64 - "SETLABEL", "REL_A", - -- action arg (1 byte) or int arg, 2 buffer pos (link, offset): - "REL_LG", "REL_PC", - -- action arg (1 byte) or int arg, 1 buffer pos (link): - "IMM_LG", "IMM_PC", - -- action arg (1 byte) or int arg, 1 buffer pos (offset): - "LABEL_LG", "LABEL_PC", - -- action arg (1 byte), 1 buffer pos (offset): - "ALIGN", - -- action args (2 bytes), no buffer pos. - "EXTERN", - -- action arg (1 byte), no buffer pos. - "ESC", - -- no action arg, no buffer pos. - "MARK", - -- action arg (1 byte), no buffer pos, terminal action: - "SECTION", - -- no args, no buffer pos, terminal action: - "STOP" -} - --- Maximum number of section buffer positions for dasm_put(). --- CHECK: Keep this in sync with the C code! -local maxsecpos = 25 -- Keep this low, to avoid excessively long C lines. - --- Action name -> action number (dynamically generated below). -local map_action = {} --- First action number. Everything below does not need to be escaped. -local actfirst = 256-#action_names - --- Action list buffer and string (only used to remove dupes). -local actlist = {} -local actstr = "" - --- Argument list for next dasm_put(). Start with offset 0 into action list. -local actargs = { 0 } - --- Current number of section buffer positions for dasm_put(). -local secpos = 1 - ------------------------------------------------------------------------------- - --- Compute action numbers for action names. -for n,name in ipairs(action_names) do - local num = actfirst + n - 1 - map_action[name] = num -end - --- Dump action names and numbers. -local function dumpactions(out) - out:write("DynASM encoding engine action codes:\n") - for n,name in ipairs(action_names) do - local num = map_action[name] - out:write(format(" %-10s %02X %d\n", name, num, num)) - end - out:write("\n") -end - --- Write action list buffer as a huge static C array. -local function writeactions(out, name) - local nn = #actlist - local last = actlist[nn] or 255 - actlist[nn] = nil -- Remove last byte. - if nn == 0 then nn = 1 end - out:write("static const unsigned char ", name, "[", nn, "] = {\n") - local s = " " - for n,b in ipairs(actlist) do - s = s..b.."," - if #s >= 75 then - assert(out:write(s, "\n")) - s = " " - end - end - out:write(s, last, "\n};\n\n") -- Add last byte back. -end - ------------------------------------------------------------------------------- - --- Add byte to action list. -local function wputxb(n) - assert(n >= 0 and n <= 255 and n % 1 == 0, "byte out of range") - actlist[#actlist+1] = n -end - --- Add action to list with optional arg. Advance buffer pos, too. -local function waction(action, a, num) - wputxb(assert(map_action[action], "bad action name `"..action.."'")) - if a then actargs[#actargs+1] = a end - if a or num then secpos = secpos + (num or 1) end -end - --- Add call to embedded DynASM C code. -local function wcall(func, args) - wline(format("dasm_%s(Dst, %s);", func, concat(args, ", ")), true) -end - --- Delete duplicate action list chunks. A tad slow, but so what. -local function dedupechunk(offset) - local al, as = actlist, actstr - local chunk = char(unpack(al, offset+1, #al)) - local orig = find(as, chunk, 1, true) - if orig then - actargs[1] = orig-1 -- Replace with original offset. - for i=offset+1,#al do al[i] = nil end -- Kill dupe. - else - actstr = as..chunk - end -end - --- Flush action list (intervening C code or buffer pos overflow). -local function wflush(term) - local offset = actargs[1] - if #actlist == offset then return end -- Nothing to flush. - if not term then waction("STOP") end -- Terminate action list. - dedupechunk(offset) - wcall("put", actargs) -- Add call to dasm_put(). - actargs = { #actlist } -- Actionlist offset is 1st arg to next dasm_put(). - secpos = 1 -- The actionlist offset occupies a buffer position, too. -end - --- Put escaped byte. -local function wputb(n) - if n >= actfirst then waction("ESC") end -- Need to escape byte. - wputxb(n) -end - ------------------------------------------------------------------------------- - --- Global label name -> global label number. With auto assignment on 1st use. -local next_global = 10 -local map_global = setmetatable({}, { __index = function(t, name) - if not match(name, "^[%a_][%w_@]*$") then werror("bad global label") end - local n = next_global - if n > 246 then werror("too many global labels") end - next_global = n + 1 - t[name] = n - return n -end}) - --- Dump global labels. -local function dumpglobals(out, lvl) - local t = {} - for name, n in pairs(map_global) do t[n] = name end - out:write("Global labels:\n") - for i=10,next_global-1 do - out:write(format(" %s\n", t[i])) - end - out:write("\n") -end - --- Write global label enum. -local function writeglobals(out, prefix) - local t = {} - for name, n in pairs(map_global) do t[n] = name end - out:write("enum {\n") - for i=10,next_global-1 do - out:write(" ", prefix, gsub(t[i], "@.*", ""), ",\n") - end - out:write(" ", prefix, "_MAX\n};\n") -end - --- Write global label names. -local function writeglobalnames(out, name) - local t = {} - for name, n in pairs(map_global) do t[n] = name end - out:write("static const char *const ", name, "[] = {\n") - for i=10,next_global-1 do - out:write(" \"", t[i], "\",\n") - end - out:write(" (const char *)0\n};\n") -end - ------------------------------------------------------------------------------- - --- Extern label name -> extern label number. With auto assignment on 1st use. -local next_extern = -1 -local map_extern = setmetatable({}, { __index = function(t, name) - -- No restrictions on the name for now. - local n = next_extern - if n < -256 then werror("too many extern labels") end - next_extern = n - 1 - t[name] = n - return n -end}) - --- Dump extern labels. -local function dumpexterns(out, lvl) - local t = {} - for name, n in pairs(map_extern) do t[-n] = name end - out:write("Extern labels:\n") - for i=1,-next_extern-1 do - out:write(format(" %s\n", t[i])) - end - out:write("\n") -end - --- Write extern label names. -local function writeexternnames(out, name) - local t = {} - for name, n in pairs(map_extern) do t[-n] = name end - out:write("static const char *const ", name, "[] = {\n") - for i=1,-next_extern-1 do - out:write(" \"", t[i], "\",\n") - end - out:write(" (const char *)0\n};\n") -end - ------------------------------------------------------------------------------- - --- Arch-specific maps. -local map_archdef = {} -- Ext. register name -> int. name. -local map_reg_rev = {} -- Int. register name -> ext. name. -local map_reg_num = {} -- Int. register name -> register number. -local map_reg_opsize = {} -- Int. register name -> operand size. -local map_reg_valid_base = {} -- Int. register name -> valid base register? -local map_reg_valid_index = {} -- Int. register name -> valid index register? -local map_reg_needrex = {} -- Int. register name -> need rex vs. no rex. -local reg_list = {} -- Canonical list of int. register names. - -local map_type = {} -- Type name -> { ctype, reg } -local ctypenum = 0 -- Type number (for _PTx macros). - -local addrsize = x64 and "q" or "d" -- Size for address operands. - --- Helper functions to fill register maps. -local function mkrmap(sz, cl, names) - local cname = format("@%s", sz) - reg_list[#reg_list+1] = cname - map_archdef[cl] = cname - map_reg_rev[cname] = cl - map_reg_num[cname] = -1 - map_reg_opsize[cname] = sz - if sz == addrsize or sz == "d" then - map_reg_valid_base[cname] = true - map_reg_valid_index[cname] = true - end - if names then - for n,name in ipairs(names) do - local iname = format("@%s%x", sz, n-1) - reg_list[#reg_list+1] = iname - map_archdef[name] = iname - map_reg_rev[iname] = name - map_reg_num[iname] = n-1 - map_reg_opsize[iname] = sz - if sz == "b" and n > 4 then map_reg_needrex[iname] = false end - if sz == addrsize or sz == "d" then - map_reg_valid_base[iname] = true - map_reg_valid_index[iname] = true - end - end - end - for i=0,(x64 and sz ~= "f") and 15 or 7 do - local needrex = sz == "b" and i > 3 - local iname = format("@%s%x%s", sz, i, needrex and "R" or "") - if needrex then map_reg_needrex[iname] = true end - local name - if sz == "o" then name = format("xmm%d", i) - elseif sz == "f" then name = format("st%d", i) - else name = format("r%d%s", i, sz == addrsize and "" or sz) end - map_archdef[name] = iname - if not map_reg_rev[iname] then - reg_list[#reg_list+1] = iname - map_reg_rev[iname] = name - map_reg_num[iname] = i - map_reg_opsize[iname] = sz - if sz == addrsize or sz == "d" then - map_reg_valid_base[iname] = true - map_reg_valid_index[iname] = true - end - end - end - reg_list[#reg_list+1] = "" -end - --- Integer registers (qword, dword, word and byte sized). -if x64 then - mkrmap("q", "Rq", {"rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi"}) -end -mkrmap("d", "Rd", {"eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"}) -mkrmap("w", "Rw", {"ax", "cx", "dx", "bx", "sp", "bp", "si", "di"}) -mkrmap("b", "Rb", {"al", "cl", "dl", "bl", "ah", "ch", "dh", "bh"}) -map_reg_valid_index[map_archdef.esp] = false -if x64 then map_reg_valid_index[map_archdef.rsp] = false end -map_archdef["Ra"] = "@"..addrsize - --- FP registers (internally tword sized, but use "f" as operand size). -mkrmap("f", "Rf") - --- SSE registers (oword sized, but qword and dword accessible). -mkrmap("o", "xmm") - --- Operand size prefixes to codes. -local map_opsize = { - byte = "b", word = "w", dword = "d", qword = "q", oword = "o", tword = "t", - aword = addrsize, -} - --- Operand size code to number. -local map_opsizenum = { - b = 1, w = 2, d = 4, q = 8, o = 16, t = 10, -} - --- Operand size code to name. -local map_opsizename = { - b = "byte", w = "word", d = "dword", q = "qword", o = "oword", t = "tword", - f = "fpword", -} - --- Valid index register scale factors. -local map_xsc = { - ["1"] = 0, ["2"] = 1, ["4"] = 2, ["8"] = 3, -} - --- Condition codes. -local map_cc = { - o = 0, no = 1, b = 2, nb = 3, e = 4, ne = 5, be = 6, nbe = 7, - s = 8, ns = 9, p = 10, np = 11, l = 12, nl = 13, le = 14, nle = 15, - c = 2, nae = 2, nc = 3, ae = 3, z = 4, nz = 5, na = 6, a = 7, - pe = 10, po = 11, nge = 12, ge = 13, ng = 14, g = 15, -} - - --- Reverse defines for registers. -function _M.revdef(s) - return gsub(s, "@%w+", map_reg_rev) -end - --- Dump register names and numbers -local function dumpregs(out) - out:write("Register names, sizes and internal numbers:\n") - for _,reg in ipairs(reg_list) do - if reg == "" then - out:write("\n") - else - local name = map_reg_rev[reg] - local num = map_reg_num[reg] - local opsize = map_opsizename[map_reg_opsize[reg]] - out:write(format(" %-5s %-8s %s\n", name, opsize, - num < 0 and "(variable)" or num)) - end - end -end - ------------------------------------------------------------------------------- - --- Put action for label arg (IMM_LG, IMM_PC, REL_LG, REL_PC). -local function wputlabel(aprefix, imm, num) - if type(imm) == "number" then - if imm < 0 then - waction("EXTERN") - wputxb(aprefix == "IMM_" and 0 or 1) - imm = -imm-1 - else - waction(aprefix.."LG", nil, num); - end - wputxb(imm) - else - waction(aprefix.."PC", imm, num) - end -end - --- Put signed byte or arg. -local function wputsbarg(n) - if type(n) == "number" then - if n < -128 or n > 127 then - werror("signed immediate byte out of range") - end - if n < 0 then n = n + 256 end - wputb(n) - else waction("IMM_S", n) end -end - --- Put unsigned byte or arg. -local function wputbarg(n) - if type(n) == "number" then - if n < 0 or n > 255 then - werror("unsigned immediate byte out of range") - end - wputb(n) - else waction("IMM_B", n) end -end - --- Put unsigned word or arg. -local function wputwarg(n) - if type(n) == "number" then - if shr(n, 16) ~= 0 then - werror("unsigned immediate word out of range") - end - wputb(band(n, 255)); wputb(shr(n, 8)); - else waction("IMM_W", n) end -end - --- Put signed or unsigned dword or arg. -local function wputdarg(n) - local tn = type(n) - if tn == "number" then - wputb(band(n, 255)) - wputb(band(shr(n, 8), 255)) - wputb(band(shr(n, 16), 255)) - wputb(shr(n, 24)) - elseif tn == "table" then - wputlabel("IMM_", n[1], 1) - else - waction("IMM_D", n) - end -end - --- Put operand-size dependent number or arg (defaults to dword). -local function wputszarg(sz, n) - if not sz or sz == "d" or sz == "q" then wputdarg(n) - elseif sz == "w" then wputwarg(n) - elseif sz == "b" then wputbarg(n) - elseif sz == "s" then wputsbarg(n) - else werror("bad operand size") end -end - --- Put multi-byte opcode with operand-size dependent modifications. -local function wputop(sz, op, rex) - local r - if rex ~= 0 and not x64 then werror("bad operand size") end - if sz == "w" then wputb(102) end - -- Needs >32 bit numbers, but only for crc32 eax, word [ebx] - if op >= 4294967296 then r = op%4294967296 wputb((op-r)/4294967296) op = r end - if op >= 16777216 then wputb(shr(op, 24)); op = band(op, 0xffffff) end - if op >= 65536 then - if rex ~= 0 then - local opc3 = band(op, 0xffff00) - if opc3 == 0x0f3a00 or opc3 == 0x0f3800 then - wputb(64 + band(rex, 15)); rex = 0 - end - end - wputb(shr(op, 16)); op = band(op, 0xffff) - end - if op >= 256 then - local b = shr(op, 8) - if b == 15 and rex ~= 0 then wputb(64 + band(rex, 15)); rex = 0 end - wputb(b) - op = band(op, 255) - end - if rex ~= 0 then wputb(64 + band(rex, 15)) end - if sz == "b" then op = op - 1 end - wputb(op) -end - --- Put ModRM or SIB formatted byte. -local function wputmodrm(m, s, rm, vs, vrm) - assert(m < 4 and s < 16 and rm < 16, "bad modrm operands") - wputb(shl(m, 6) + shl(band(s, 7), 3) + band(rm, 7)) -end - --- Put ModRM/SIB plus optional displacement. -local function wputmrmsib(t, imark, s, vsreg) - local vreg, vxreg - local reg, xreg = t.reg, t.xreg - if reg and reg < 0 then reg = 0; vreg = t.vreg end - if xreg and xreg < 0 then xreg = 0; vxreg = t.vxreg end - if s < 0 then s = 0 end - - -- Register mode. - if sub(t.mode, 1, 1) == "r" then - wputmodrm(3, s, reg) - if vsreg then waction("VREG", vsreg); wputxb(2) end - if vreg then waction("VREG", vreg); wputxb(0) end - return - end - - local disp = t.disp - local tdisp = type(disp) - -- No base register? - if not reg then - local riprel = false - if xreg then - -- Indexed mode with index register only. - -- [xreg*xsc+disp] -> (0, s, esp) (xsc, xreg, ebp) - wputmodrm(0, s, 4) - if imark == "I" then waction("MARK") end - if vsreg then waction("VREG", vsreg); wputxb(2) end - wputmodrm(t.xsc, xreg, 5) - if vxreg then waction("VREG", vxreg); wputxb(3) end - else - -- Pure 32 bit displacement. - if x64 and tdisp ~= "table" then - wputmodrm(0, s, 4) -- [disp] -> (0, s, esp) (0, esp, ebp) - if imark == "I" then waction("MARK") end - wputmodrm(0, 4, 5) - else - riprel = x64 - wputmodrm(0, s, 5) -- [disp|rip-label] -> (0, s, ebp) - if imark == "I" then waction("MARK") end - end - if vsreg then waction("VREG", vsreg); wputxb(2) end - end - if riprel then -- Emit rip-relative displacement. - if match("UWSiI", imark) then - werror("NYI: rip-relative displacement followed by immediate") - end - -- The previous byte in the action buffer cannot be 0xe9 or 0x80-0x8f. - wputlabel("REL_", disp[1], 2) - else - wputdarg(disp) - end - return - end - - local m - if tdisp == "number" then -- Check displacement size at assembly time. - if disp == 0 and band(reg, 7) ~= 5 then -- [ebp] -> [ebp+0] (in SIB, too) - if not vreg then m = 0 end -- Force DISP to allow [Rd(5)] -> [ebp+0] - elseif disp >= -128 and disp <= 127 then m = 1 - else m = 2 end - elseif tdisp == "table" then - m = 2 - end - - -- Index register present or esp as base register: need SIB encoding. - if xreg or band(reg, 7) == 4 then - wputmodrm(m or 2, s, 4) -- ModRM. - if m == nil or imark == "I" then waction("MARK") end - if vsreg then waction("VREG", vsreg); wputxb(2) end - wputmodrm(t.xsc or 0, xreg or 4, reg) -- SIB. - if vxreg then waction("VREG", vxreg); wputxb(3) end - if vreg then waction("VREG", vreg); wputxb(1) end - else - wputmodrm(m or 2, s, reg) -- ModRM. - if (imark == "I" and (m == 1 or m == 2)) or - (m == nil and (vsreg or vreg)) then waction("MARK") end - if vsreg then waction("VREG", vsreg); wputxb(2) end - if vreg then waction("VREG", vreg); wputxb(1) end - end - - -- Put displacement. - if m == 1 then wputsbarg(disp) - elseif m == 2 then wputdarg(disp) - elseif m == nil then waction("DISP", disp) end -end - ------------------------------------------------------------------------------- - --- Return human-readable operand mode string. -local function opmodestr(op, args) - local m = {} - for i=1,#args do - local a = args[i] - m[#m+1] = sub(a.mode, 1, 1)..(a.opsize or "?") - end - return op.." "..concat(m, ",") -end - --- Convert number to valid integer or nil. -local function toint(expr) - local n = tonumber(expr) - if n then - if n % 1 ~= 0 or n < -2147483648 or n > 4294967295 then - werror("bad integer number `"..expr.."'") - end - return n - end -end - --- Parse immediate expression. -local function immexpr(expr) - -- &expr (pointer) - if sub(expr, 1, 1) == "&" then - return "iPJ", format("(ptrdiff_t)(%s)", sub(expr,2)) - end - - local prefix = sub(expr, 1, 2) - -- =>expr (pc label reference) - if prefix == "=>" then - return "iJ", sub(expr, 3) - end - -- ->name (global label reference) - if prefix == "->" then - return "iJ", map_global[sub(expr, 3)] - end - - -- [<>][1-9] (local label reference) - local dir, lnum = match(expr, "^([<>])([1-9])$") - if dir then -- Fwd: 247-255, Bkwd: 1-9. - return "iJ", lnum + (dir == ">" and 246 or 0) - end - - local extname = match(expr, "^extern%s+(%S+)$") - if extname then - return "iJ", map_extern[extname] - end - - -- expr (interpreted as immediate) - return "iI", expr -end - --- Parse displacement expression: +-num, +-expr, +-opsize*num -local function dispexpr(expr) - local disp = expr == "" and 0 or toint(expr) - if disp then return disp end - local c, dispt = match(expr, "^([+-])%s*(.+)$") - if c == "+" then - expr = dispt - elseif not c then - werror("bad displacement expression `"..expr.."'") - end - local opsize, tailops = match(dispt, "^(%w+)%s*%*%s*(.+)$") - local ops, imm = map_opsize[opsize], toint(tailops) - if ops and imm then - if c == "-" then imm = -imm end - return imm*map_opsizenum[ops] - end - local mode, iexpr = immexpr(dispt) - if mode == "iJ" then - if c == "-" then werror("cannot invert label reference") end - return { iexpr } - end - return expr -- Need to return original signed expression. -end - --- Parse register or type expression. -local function rtexpr(expr) - if not expr then return end - local tname, ovreg = match(expr, "^([%w_]+):(@[%w_]+)$") - local tp = map_type[tname or expr] - if tp then - local reg = ovreg or tp.reg - local rnum = map_reg_num[reg] - if not rnum then - werror("type `"..(tname or expr).."' needs a register override") - end - if not map_reg_valid_base[reg] then - werror("bad base register override `"..(map_reg_rev[reg] or reg).."'") - end - return reg, rnum, tp - end - return expr, map_reg_num[expr] -end - --- Parse operand and return { mode, opsize, reg, xreg, xsc, disp, imm }. -local function parseoperand(param) - local t = {} - - local expr = param - local opsize, tailops = match(param, "^(%w+)%s*(.+)$") - if opsize then - t.opsize = map_opsize[opsize] - if t.opsize then expr = tailops end - end - - local br = match(expr, "^%[%s*(.-)%s*%]$") - repeat - if br then - t.mode = "xm" - - -- [disp] - t.disp = toint(br) - if t.disp then - t.mode = x64 and "xm" or "xmO" - break - end - - -- [reg...] - local tp - local reg, tailr = match(br, "^([@%w_:]+)%s*(.*)$") - reg, t.reg, tp = rtexpr(reg) - if not t.reg then - -- [expr] - t.mode = x64 and "xm" or "xmO" - t.disp = dispexpr("+"..br) - break - end - - if t.reg == -1 then - t.vreg, tailr = match(tailr, "^(%b())(.*)$") - if not t.vreg then werror("bad variable register expression") end - end - - -- [xreg*xsc] or [xreg*xsc+-disp] or [xreg*xsc+-expr] - local xsc, tailsc = match(tailr, "^%*%s*([1248])%s*(.*)$") - if xsc then - if not map_reg_valid_index[reg] then - werror("bad index register `"..map_reg_rev[reg].."'") - end - t.xsc = map_xsc[xsc] - t.xreg = t.reg - t.vxreg = t.vreg - t.reg = nil - t.vreg = nil - t.disp = dispexpr(tailsc) - break - end - if not map_reg_valid_base[reg] then - werror("bad base register `"..map_reg_rev[reg].."'") - end - - -- [reg] or [reg+-disp] - t.disp = toint(tailr) or (tailr == "" and 0) - if t.disp then break end - - -- [reg+xreg...] - local xreg, tailx = match(tailr, "^+%s*([@%w_:]+)%s*(.*)$") - xreg, t.xreg, tp = rtexpr(xreg) - if not t.xreg then - -- [reg+-expr] - t.disp = dispexpr(tailr) - break - end - if not map_reg_valid_index[xreg] then - werror("bad index register `"..map_reg_rev[xreg].."'") - end - - if t.xreg == -1 then - t.vxreg, tailx = match(tailx, "^(%b())(.*)$") - if not t.vxreg then werror("bad variable register expression") end - end - - -- [reg+xreg*xsc...] - local xsc, tailsc = match(tailx, "^%*%s*([1248])%s*(.*)$") - if xsc then - t.xsc = map_xsc[xsc] - tailx = tailsc - end - - -- [...] or [...+-disp] or [...+-expr] - t.disp = dispexpr(tailx) - else - -- imm or opsize*imm - local imm = toint(expr) - if not imm and sub(expr, 1, 1) == "*" and t.opsize then - imm = toint(sub(expr, 2)) - if imm then - imm = imm * map_opsizenum[t.opsize] - t.opsize = nil - end - end - if imm then - if t.opsize then werror("bad operand size override") end - local m = "i" - if imm == 1 then m = m.."1" end - if imm >= 4294967168 and imm <= 4294967295 then imm = imm-4294967296 end - if imm >= -128 and imm <= 127 then m = m.."S" end - t.imm = imm - t.mode = m - break - end - - local tp - local reg, tailr = match(expr, "^([@%w_:]+)%s*(.*)$") - reg, t.reg, tp = rtexpr(reg) - if t.reg then - if t.reg == -1 then - t.vreg, tailr = match(tailr, "^(%b())(.*)$") - if not t.vreg then werror("bad variable register expression") end - end - -- reg - if tailr == "" then - if t.opsize then werror("bad operand size override") end - t.opsize = map_reg_opsize[reg] - if t.opsize == "f" then - t.mode = t.reg == 0 and "fF" or "f" - else - if reg == "@w4" or (x64 and reg == "@d4") then - wwarn("bad idea, try again with `"..(x64 and "rsp'" or "esp'")) - end - t.mode = t.reg == 0 and "rmR" or (reg == "@b1" and "rmC" or "rm") - end - t.needrex = map_reg_needrex[reg] - break - end - - -- type[idx], type[idx].field, type->field -> [reg+offset_expr] - if not tp then werror("bad operand `"..param.."'") end - t.mode = "xm" - t.disp = format(tp.ctypefmt, tailr) - else - t.mode, t.imm = immexpr(expr) - if sub(t.mode, -1) == "J" then - if t.opsize and t.opsize ~= addrsize then - werror("bad operand size override") - end - t.opsize = addrsize - end - end - end - until true - return t -end - ------------------------------------------------------------------------------- --- x86 Template String Description --- =============================== --- --- Each template string is a list of [match:]pattern pairs, --- separated by "|". The first match wins. No match means a --- bad or unsupported combination of operand modes or sizes. --- --- The match part and the ":" is omitted if the operation has --- no operands. Otherwise the first N characters are matched --- against the mode strings of each of the N operands. --- --- The mode string for each operand type is (see parseoperand()): --- Integer register: "rm", +"R" for eax, ax, al, +"C" for cl --- FP register: "f", +"F" for st0 --- Index operand: "xm", +"O" for [disp] (pure offset) --- Immediate: "i", +"S" for signed 8 bit, +"1" for 1, --- +"I" for arg, +"P" for pointer --- Any: +"J" for valid jump targets --- --- So a match character "m" (mixed) matches both an integer register --- and an index operand (to be encoded with the ModRM/SIB scheme). --- But "r" matches only a register and "x" only an index operand --- (e.g. for FP memory access operations). --- --- The operand size match string starts right after the mode match --- characters and ends before the ":". "dwb" or "qdwb" is assumed, if empty. --- The effective data size of the operation is matched against this list. --- --- If only the regular "b", "w", "d", "q", "t" operand sizes are --- present, then all operands must be the same size. Unspecified sizes --- are ignored, but at least one operand must have a size or the pattern --- won't match (use the "byte", "word", "dword", "qword", "tword" --- operand size overrides. E.g.: mov dword [eax], 1). --- --- If the list has a "1" or "2" prefix, the operand size is taken --- from the respective operand and any other operand sizes are ignored. --- If the list contains only ".", all operand sizes are ignored. --- If the list has a "/" prefix, the concatenated (mixed) operand sizes --- are compared to the match. --- --- E.g. "rrdw" matches for either two dword registers or two word --- registers. "Fx2dq" matches an st0 operand plus an index operand --- pointing to a dword (float) or qword (double). --- --- Every character after the ":" is part of the pattern string: --- Hex chars are accumulated to form the opcode (left to right). --- "n" disables the standard opcode mods --- (otherwise: -1 for "b", o16 prefix for "w", rex.w for "q") --- "X" Force REX.W. --- "r"/"R" adds the reg. number from the 1st/2nd operand to the opcode. --- "m"/"M" generates ModRM/SIB from the 1st/2nd operand. --- The spare 3 bits are either filled with the last hex digit or --- the result from a previous "r"/"R". The opcode is restored. --- --- All of the following characters force a flush of the opcode: --- "o"/"O" stores a pure 32 bit disp (offset) from the 1st/2nd operand. --- "S" stores a signed 8 bit immediate from the last operand. --- "U" stores an unsigned 8 bit immediate from the last operand. --- "W" stores an unsigned 16 bit immediate from the last operand. --- "i" stores an operand sized immediate from the last operand. --- "I" dito, but generates an action code to optionally modify --- the opcode (+2) for a signed 8 bit immediate. --- "J" generates one of the REL action codes from the last operand. --- ------------------------------------------------------------------------------- - --- Template strings for x86 instructions. Ordered by first opcode byte. --- Unimplemented opcodes (deliberate omissions) are marked with *. -local map_op = { - -- 00-05: add... - -- 06: *push es - -- 07: *pop es - -- 08-0D: or... - -- 0E: *push cs - -- 0F: two byte opcode prefix - -- 10-15: adc... - -- 16: *push ss - -- 17: *pop ss - -- 18-1D: sbb... - -- 1E: *push ds - -- 1F: *pop ds - -- 20-25: and... - es_0 = "26", - -- 27: *daa - -- 28-2D: sub... - cs_0 = "2E", - -- 2F: *das - -- 30-35: xor... - ss_0 = "36", - -- 37: *aaa - -- 38-3D: cmp... - ds_0 = "3E", - -- 3F: *aas - inc_1 = x64 and "m:FF0m" or "rdw:40r|m:FF0m", - dec_1 = x64 and "m:FF1m" or "rdw:48r|m:FF1m", - push_1 = (x64 and "rq:n50r|rw:50r|mq:nFF6m|mw:FF6m" or - "rdw:50r|mdw:FF6m").."|S.:6AS|ib:n6Ai|i.:68i", - pop_1 = x64 and "rq:n58r|rw:58r|mq:n8F0m|mw:8F0m" or "rdw:58r|mdw:8F0m", - -- 60: *pusha, *pushad, *pushaw - -- 61: *popa, *popad, *popaw - -- 62: *bound rdw,x - -- 63: x86: *arpl mw,rw - movsxd_2 = x64 and "rm/qd:63rM", - fs_0 = "64", - gs_0 = "65", - o16_0 = "66", - a16_0 = not x64 and "67" or nil, - a32_0 = x64 and "67", - -- 68: push idw - -- 69: imul rdw,mdw,idw - -- 6A: push ib - -- 6B: imul rdw,mdw,S - -- 6C: *insb - -- 6D: *insd, *insw - -- 6E: *outsb - -- 6F: *outsd, *outsw - -- 70-7F: jcc lb - -- 80: add... mb,i - -- 81: add... mdw,i - -- 82: *undefined - -- 83: add... mdw,S - test_2 = "mr:85Rm|rm:85rM|Ri:A9ri|mi:F70mi", - -- 86: xchg rb,mb - -- 87: xchg rdw,mdw - -- 88: mov mb,r - -- 89: mov mdw,r - -- 8A: mov r,mb - -- 8B: mov r,mdw - -- 8C: *mov mdw,seg - lea_2 = "rx1dq:8DrM", - -- 8E: *mov seg,mdw - -- 8F: pop mdw - nop_0 = "90", - xchg_2 = "Rrqdw:90R|rRqdw:90r|rm:87rM|mr:87Rm", - cbw_0 = "6698", - cwde_0 = "98", - cdqe_0 = "4898", - cwd_0 = "6699", - cdq_0 = "99", - cqo_0 = "4899", - -- 9A: *call iw:idw - wait_0 = "9B", - fwait_0 = "9B", - pushf_0 = "9C", - pushfd_0 = not x64 and "9C", - pushfq_0 = x64 and "9C", - popf_0 = "9D", - popfd_0 = not x64 and "9D", - popfq_0 = x64 and "9D", - sahf_0 = "9E", - lahf_0 = "9F", - mov_2 = "OR:A3o|RO:A1O|mr:89Rm|rm:8BrM|rib:nB0ri|ridw:B8ri|mi:C70mi", - movsb_0 = "A4", - movsw_0 = "66A5", - movsd_0 = "A5", - cmpsb_0 = "A6", - cmpsw_0 = "66A7", - cmpsd_0 = "A7", - -- A8: test Rb,i - -- A9: test Rdw,i - stosb_0 = "AA", - stosw_0 = "66AB", - stosd_0 = "AB", - lodsb_0 = "AC", - lodsw_0 = "66AD", - lodsd_0 = "AD", - scasb_0 = "AE", - scasw_0 = "66AF", - scasd_0 = "AF", - -- B0-B7: mov rb,i - -- B8-BF: mov rdw,i - -- C0: rol... mb,i - -- C1: rol... mdw,i - ret_1 = "i.:nC2W", - ret_0 = "C3", - -- C4: *les rdw,mq - -- C5: *lds rdw,mq - -- C6: mov mb,i - -- C7: mov mdw,i - -- C8: *enter iw,ib - leave_0 = "C9", - -- CA: *retf iw - -- CB: *retf - int3_0 = "CC", - int_1 = "i.:nCDU", - into_0 = "CE", - -- CF: *iret - -- D0: rol... mb,1 - -- D1: rol... mdw,1 - -- D2: rol... mb,cl - -- D3: rol... mb,cl - -- D4: *aam ib - -- D5: *aad ib - -- D6: *salc - -- D7: *xlat - -- D8-DF: floating point ops - -- E0: *loopne - -- E1: *loope - -- E2: *loop - -- E3: *jcxz, *jecxz - -- E4: *in Rb,ib - -- E5: *in Rdw,ib - -- E6: *out ib,Rb - -- E7: *out ib,Rdw - call_1 = x64 and "mq:nFF2m|J.:E8nJ" or "md:FF2m|J.:E8J", - jmp_1 = x64 and "mq:nFF4m|J.:E9nJ" or "md:FF4m|J.:E9J", -- short: EB - -- EA: *jmp iw:idw - -- EB: jmp ib - -- EC: *in Rb,dx - -- ED: *in Rdw,dx - -- EE: *out dx,Rb - -- EF: *out dx,Rdw - lock_0 = "F0", - int1_0 = "F1", - repne_0 = "F2", - repnz_0 = "F2", - rep_0 = "F3", - repe_0 = "F3", - repz_0 = "F3", - -- F4: *hlt - cmc_0 = "F5", - -- F6: test... mb,i; div... mb - -- F7: test... mdw,i; div... mdw - clc_0 = "F8", - stc_0 = "F9", - -- FA: *cli - cld_0 = "FC", - std_0 = "FD", - -- FE: inc... mb - -- FF: inc... mdw - - -- misc ops - not_1 = "m:F72m", - neg_1 = "m:F73m", - mul_1 = "m:F74m", - imul_1 = "m:F75m", - div_1 = "m:F76m", - idiv_1 = "m:F77m", - - imul_2 = "rmqdw:0FAFrM|rIqdw:69rmI|rSqdw:6BrmS|riqdw:69rmi", - imul_3 = "rmIqdw:69rMI|rmSqdw:6BrMS|rmiqdw:69rMi", - - movzx_2 = "rm/db:0FB6rM|rm/qb:|rm/wb:0FB6rM|rm/dw:0FB7rM|rm/qw:", - movsx_2 = "rm/db:0FBErM|rm/qb:|rm/wb:0FBErM|rm/dw:0FBFrM|rm/qw:", - - bswap_1 = "rqd:0FC8r", - bsf_2 = "rmqdw:0FBCrM", - bsr_2 = "rmqdw:0FBDrM", - bt_2 = "mrqdw:0FA3Rm|miqdw:0FBA4mU", - btc_2 = "mrqdw:0FBBRm|miqdw:0FBA7mU", - btr_2 = "mrqdw:0FB3Rm|miqdw:0FBA6mU", - bts_2 = "mrqdw:0FABRm|miqdw:0FBA5mU", - - shld_3 = "mriqdw:0FA4RmU|mrCqdw:0FA5Rm", - shrd_3 = "mriqdw:0FACRmU|mrCqdw:0FADRm", - - rdtsc_0 = "0F31", -- P1+ - cpuid_0 = "0FA2", -- P1+ - - -- floating point ops - fst_1 = "ff:DDD0r|xd:D92m|xq:nDD2m", - fstp_1 = "ff:DDD8r|xd:D93m|xq:nDD3m|xt:DB7m", - fld_1 = "ff:D9C0r|xd:D90m|xq:nDD0m|xt:DB5m", - - fpop_0 = "DDD8", -- Alias for fstp st0. - - fist_1 = "xw:nDF2m|xd:DB2m", - fistp_1 = "xw:nDF3m|xd:DB3m|xq:nDF7m", - fild_1 = "xw:nDF0m|xd:DB0m|xq:nDF5m", - - fxch_0 = "D9C9", - fxch_1 = "ff:D9C8r", - fxch_2 = "fFf:D9C8r|Fff:D9C8R", - - fucom_1 = "ff:DDE0r", - fucom_2 = "Fff:DDE0R", - fucomp_1 = "ff:DDE8r", - fucomp_2 = "Fff:DDE8R", - fucomi_1 = "ff:DBE8r", -- P6+ - fucomi_2 = "Fff:DBE8R", -- P6+ - fucomip_1 = "ff:DFE8r", -- P6+ - fucomip_2 = "Fff:DFE8R", -- P6+ - fcomi_1 = "ff:DBF0r", -- P6+ - fcomi_2 = "Fff:DBF0R", -- P6+ - fcomip_1 = "ff:DFF0r", -- P6+ - fcomip_2 = "Fff:DFF0R", -- P6+ - fucompp_0 = "DAE9", - fcompp_0 = "DED9", - - fldenv_1 = "x.:D94m", - fnstenv_1 = "x.:D96m", - fstenv_1 = "x.:9BD96m", - fldcw_1 = "xw:nD95m", - fstcw_1 = "xw:n9BD97m", - fnstcw_1 = "xw:nD97m", - fstsw_1 = "Rw:n9BDFE0|xw:n9BDD7m", - fnstsw_1 = "Rw:nDFE0|xw:nDD7m", - fclex_0 = "9BDBE2", - fnclex_0 = "DBE2", - - fnop_0 = "D9D0", - -- D9D1-D9DF: unassigned - - fchs_0 = "D9E0", - fabs_0 = "D9E1", - -- D9E2: unassigned - -- D9E3: unassigned - ftst_0 = "D9E4", - fxam_0 = "D9E5", - -- D9E6: unassigned - -- D9E7: unassigned - fld1_0 = "D9E8", - fldl2t_0 = "D9E9", - fldl2e_0 = "D9EA", - fldpi_0 = "D9EB", - fldlg2_0 = "D9EC", - fldln2_0 = "D9ED", - fldz_0 = "D9EE", - -- D9EF: unassigned - - f2xm1_0 = "D9F0", - fyl2x_0 = "D9F1", - fptan_0 = "D9F2", - fpatan_0 = "D9F3", - fxtract_0 = "D9F4", - fprem1_0 = "D9F5", - fdecstp_0 = "D9F6", - fincstp_0 = "D9F7", - fprem_0 = "D9F8", - fyl2xp1_0 = "D9F9", - fsqrt_0 = "D9FA", - fsincos_0 = "D9FB", - frndint_0 = "D9FC", - fscale_0 = "D9FD", - fsin_0 = "D9FE", - fcos_0 = "D9FF", - - -- SSE, SSE2 - andnpd_2 = "rmo:660F55rM", - andnps_2 = "rmo:0F55rM", - andpd_2 = "rmo:660F54rM", - andps_2 = "rmo:0F54rM", - clflush_1 = "x.:0FAE7m", - cmppd_3 = "rmio:660FC2rMU", - cmpps_3 = "rmio:0FC2rMU", - cmpsd_3 = "rrio:F20FC2rMU|rxi/oq:", - cmpss_3 = "rrio:F30FC2rMU|rxi/od:", - comisd_2 = "rro:660F2FrM|rx/oq:", - comiss_2 = "rro:0F2FrM|rx/od:", - cvtdq2pd_2 = "rro:F30FE6rM|rx/oq:", - cvtdq2ps_2 = "rmo:0F5BrM", - cvtpd2dq_2 = "rmo:F20FE6rM", - cvtpd2ps_2 = "rmo:660F5ArM", - cvtpi2pd_2 = "rx/oq:660F2ArM", - cvtpi2ps_2 = "rx/oq:0F2ArM", - cvtps2dq_2 = "rmo:660F5BrM", - cvtps2pd_2 = "rro:0F5ArM|rx/oq:", - cvtsd2si_2 = "rr/do:F20F2DrM|rr/qo:|rx/dq:|rxq:", - cvtsd2ss_2 = "rro:F20F5ArM|rx/oq:", - cvtsi2sd_2 = "rm/od:F20F2ArM|rm/oq:F20F2ArXM", - cvtsi2ss_2 = "rm/od:F30F2ArM|rm/oq:F30F2ArXM", - cvtss2sd_2 = "rro:F30F5ArM|rx/od:", - cvtss2si_2 = "rr/do:F20F2CrM|rr/qo:|rxd:|rx/qd:", - cvttpd2dq_2 = "rmo:660FE6rM", - cvttps2dq_2 = "rmo:F30F5BrM", - cvttsd2si_2 = "rr/do:F20F2CrM|rr/qo:|rx/dq:|rxq:", - cvttss2si_2 = "rr/do:F30F2CrM|rr/qo:|rxd:|rx/qd:", - fxsave_1 = "x.:0FAE0m", - fxrstor_1 = "x.:0FAE1m", - ldmxcsr_1 = "xd:0FAE2m", - lfence_0 = "0FAEE8", - maskmovdqu_2 = "rro:660FF7rM", - mfence_0 = "0FAEF0", - movapd_2 = "rmo:660F28rM|mro:660F29Rm", - movaps_2 = "rmo:0F28rM|mro:0F29Rm", - movd_2 = "rm/od:660F6ErM|rm/oq:660F6ErXM|mr/do:660F7ERm|mr/qo:", - movdqa_2 = "rmo:660F6FrM|mro:660F7FRm", - movdqu_2 = "rmo:F30F6FrM|mro:F30F7FRm", - movhlps_2 = "rro:0F12rM", - movhpd_2 = "rx/oq:660F16rM|xr/qo:n660F17Rm", - movhps_2 = "rx/oq:0F16rM|xr/qo:n0F17Rm", - movlhps_2 = "rro:0F16rM", - movlpd_2 = "rx/oq:660F12rM|xr/qo:n660F13Rm", - movlps_2 = "rx/oq:0F12rM|xr/qo:n0F13Rm", - movmskpd_2 = "rr/do:660F50rM", - movmskps_2 = "rr/do:0F50rM", - movntdq_2 = "xro:660FE7Rm", - movnti_2 = "xrqd:0FC3Rm", - movntpd_2 = "xro:660F2BRm", - movntps_2 = "xro:0F2BRm", - movq_2 = "rro:F30F7ErM|rx/oq:|xr/qo:n660FD6Rm", - movsd_2 = "rro:F20F10rM|rx/oq:|xr/qo:nF20F11Rm", - movss_2 = "rro:F30F10rM|rx/od:|xr/do:F30F11Rm", - movupd_2 = "rmo:660F10rM|mro:660F11Rm", - movups_2 = "rmo:0F10rM|mro:0F11Rm", - orpd_2 = "rmo:660F56rM", - orps_2 = "rmo:0F56rM", - packssdw_2 = "rmo:660F6BrM", - packsswb_2 = "rmo:660F63rM", - packuswb_2 = "rmo:660F67rM", - paddb_2 = "rmo:660FFCrM", - paddd_2 = "rmo:660FFErM", - paddq_2 = "rmo:660FD4rM", - paddsb_2 = "rmo:660FECrM", - paddsw_2 = "rmo:660FEDrM", - paddusb_2 = "rmo:660FDCrM", - paddusw_2 = "rmo:660FDDrM", - paddw_2 = "rmo:660FFDrM", - pand_2 = "rmo:660FDBrM", - pandn_2 = "rmo:660FDFrM", - pause_0 = "F390", - pavgb_2 = "rmo:660FE0rM", - pavgw_2 = "rmo:660FE3rM", - pcmpeqb_2 = "rmo:660F74rM", - pcmpeqd_2 = "rmo:660F76rM", - pcmpeqw_2 = "rmo:660F75rM", - pcmpgtb_2 = "rmo:660F64rM", - pcmpgtd_2 = "rmo:660F66rM", - pcmpgtw_2 = "rmo:660F65rM", - pextrw_3 = "rri/do:660FC5rMU|xri/wo:660F3A15nrMU", -- Mem op: SSE4.1 only. - pinsrw_3 = "rri/od:660FC4rMU|rxi/ow:", - pmaddwd_2 = "rmo:660FF5rM", - pmaxsw_2 = "rmo:660FEErM", - pmaxub_2 = "rmo:660FDErM", - pminsw_2 = "rmo:660FEArM", - pminub_2 = "rmo:660FDArM", - pmovmskb_2 = "rr/do:660FD7rM", - pmulhuw_2 = "rmo:660FE4rM", - pmulhw_2 = "rmo:660FE5rM", - pmullw_2 = "rmo:660FD5rM", - pmuludq_2 = "rmo:660FF4rM", - por_2 = "rmo:660FEBrM", - prefetchnta_1 = "xb:n0F180m", - prefetcht0_1 = "xb:n0F181m", - prefetcht1_1 = "xb:n0F182m", - prefetcht2_1 = "xb:n0F183m", - psadbw_2 = "rmo:660FF6rM", - pshufd_3 = "rmio:660F70rMU", - pshufhw_3 = "rmio:F30F70rMU", - pshuflw_3 = "rmio:F20F70rMU", - pslld_2 = "rmo:660FF2rM|rio:660F726mU", - pslldq_2 = "rio:660F737mU", - psllq_2 = "rmo:660FF3rM|rio:660F736mU", - psllw_2 = "rmo:660FF1rM|rio:660F716mU", - psrad_2 = "rmo:660FE2rM|rio:660F724mU", - psraw_2 = "rmo:660FE1rM|rio:660F714mU", - psrld_2 = "rmo:660FD2rM|rio:660F722mU", - psrldq_2 = "rio:660F733mU", - psrlq_2 = "rmo:660FD3rM|rio:660F732mU", - psrlw_2 = "rmo:660FD1rM|rio:660F712mU", - psubb_2 = "rmo:660FF8rM", - psubd_2 = "rmo:660FFArM", - psubq_2 = "rmo:660FFBrM", - psubsb_2 = "rmo:660FE8rM", - psubsw_2 = "rmo:660FE9rM", - psubusb_2 = "rmo:660FD8rM", - psubusw_2 = "rmo:660FD9rM", - psubw_2 = "rmo:660FF9rM", - punpckhbw_2 = "rmo:660F68rM", - punpckhdq_2 = "rmo:660F6ArM", - punpckhqdq_2 = "rmo:660F6DrM", - punpckhwd_2 = "rmo:660F69rM", - punpcklbw_2 = "rmo:660F60rM", - punpckldq_2 = "rmo:660F62rM", - punpcklqdq_2 = "rmo:660F6CrM", - punpcklwd_2 = "rmo:660F61rM", - pxor_2 = "rmo:660FEFrM", - rcpps_2 = "rmo:0F53rM", - rcpss_2 = "rro:F30F53rM|rx/od:", - rsqrtps_2 = "rmo:0F52rM", - rsqrtss_2 = "rmo:F30F52rM", - sfence_0 = "0FAEF8", - shufpd_3 = "rmio:660FC6rMU", - shufps_3 = "rmio:0FC6rMU", - stmxcsr_1 = "xd:0FAE3m", - ucomisd_2 = "rro:660F2ErM|rx/oq:", - ucomiss_2 = "rro:0F2ErM|rx/od:", - unpckhpd_2 = "rmo:660F15rM", - unpckhps_2 = "rmo:0F15rM", - unpcklpd_2 = "rmo:660F14rM", - unpcklps_2 = "rmo:0F14rM", - xorpd_2 = "rmo:660F57rM", - xorps_2 = "rmo:0F57rM", - - -- SSE3 ops - fisttp_1 = "xw:nDF1m|xd:DB1m|xq:nDD1m", - addsubpd_2 = "rmo:660FD0rM", - addsubps_2 = "rmo:F20FD0rM", - haddpd_2 = "rmo:660F7CrM", - haddps_2 = "rmo:F20F7CrM", - hsubpd_2 = "rmo:660F7DrM", - hsubps_2 = "rmo:F20F7DrM", - lddqu_2 = "rxo:F20FF0rM", - movddup_2 = "rmo:F20F12rM", - movshdup_2 = "rmo:F30F16rM", - movsldup_2 = "rmo:F30F12rM", - - -- SSSE3 ops - pabsb_2 = "rmo:660F381CrM", - pabsd_2 = "rmo:660F381ErM", - pabsw_2 = "rmo:660F381DrM", - palignr_3 = "rmio:660F3A0FrMU", - phaddd_2 = "rmo:660F3802rM", - phaddsw_2 = "rmo:660F3803rM", - phaddw_2 = "rmo:660F3801rM", - phsubd_2 = "rmo:660F3806rM", - phsubsw_2 = "rmo:660F3807rM", - phsubw_2 = "rmo:660F3805rM", - pmaddubsw_2 = "rmo:660F3804rM", - pmulhrsw_2 = "rmo:660F380BrM", - pshufb_2 = "rmo:660F3800rM", - psignb_2 = "rmo:660F3808rM", - psignd_2 = "rmo:660F380ArM", - psignw_2 = "rmo:660F3809rM", - - -- SSE4.1 ops - blendpd_3 = "rmio:660F3A0DrMU", - blendps_3 = "rmio:660F3A0CrMU", - blendvpd_3 = "rmRo:660F3815rM", - blendvps_3 = "rmRo:660F3814rM", - dppd_3 = "rmio:660F3A41rMU", - dpps_3 = "rmio:660F3A40rMU", - extractps_3 = "mri/do:660F3A17RmU|rri/qo:660F3A17RXmU", - insertps_3 = "rrio:660F3A41rMU|rxi/od:", - movntdqa_2 = "rmo:660F382ArM", - mpsadbw_3 = "rmio:660F3A42rMU", - packusdw_2 = "rmo:660F382BrM", - pblendvb_3 = "rmRo:660F3810rM", - pblendw_3 = "rmio:660F3A0ErMU", - pcmpeqq_2 = "rmo:660F3829rM", - pextrb_3 = "rri/do:660F3A14nRmU|rri/qo:|xri/bo:", - pextrd_3 = "mri/do:660F3A16RmU", - pextrq_3 = "mri/qo:660F3A16RmU", - -- pextrw is SSE2, mem operand is SSE4.1 only - phminposuw_2 = "rmo:660F3841rM", - pinsrb_3 = "rri/od:660F3A20nrMU|rxi/ob:", - pinsrd_3 = "rmi/od:660F3A22rMU", - pinsrq_3 = "rmi/oq:660F3A22rXMU", - pmaxsb_2 = "rmo:660F383CrM", - pmaxsd_2 = "rmo:660F383DrM", - pmaxud_2 = "rmo:660F383FrM", - pmaxuw_2 = "rmo:660F383ErM", - pminsb_2 = "rmo:660F3838rM", - pminsd_2 = "rmo:660F3839rM", - pminud_2 = "rmo:660F383BrM", - pminuw_2 = "rmo:660F383ArM", - pmovsxbd_2 = "rro:660F3821rM|rx/od:", - pmovsxbq_2 = "rro:660F3822rM|rx/ow:", - pmovsxbw_2 = "rro:660F3820rM|rx/oq:", - pmovsxdq_2 = "rro:660F3825rM|rx/oq:", - pmovsxwd_2 = "rro:660F3823rM|rx/oq:", - pmovsxwq_2 = "rro:660F3824rM|rx/od:", - pmovzxbd_2 = "rro:660F3831rM|rx/od:", - pmovzxbq_2 = "rro:660F3832rM|rx/ow:", - pmovzxbw_2 = "rro:660F3830rM|rx/oq:", - pmovzxdq_2 = "rro:660F3835rM|rx/oq:", - pmovzxwd_2 = "rro:660F3833rM|rx/oq:", - pmovzxwq_2 = "rro:660F3834rM|rx/od:", - pmuldq_2 = "rmo:660F3828rM", - pmulld_2 = "rmo:660F3840rM", - ptest_2 = "rmo:660F3817rM", - roundpd_3 = "rmio:660F3A09rMU", - roundps_3 = "rmio:660F3A08rMU", - roundsd_3 = "rrio:660F3A0BrMU|rxi/oq:", - roundss_3 = "rrio:660F3A0ArMU|rxi/od:", - - -- SSE4.2 ops - crc32_2 = "rmqd:F20F38F1rM|rm/dw:66F20F38F1rM|rm/db:F20F38F0rM|rm/qb:", - pcmpestri_3 = "rmio:660F3A61rMU", - pcmpestrm_3 = "rmio:660F3A60rMU", - pcmpgtq_2 = "rmo:660F3837rM", - pcmpistri_3 = "rmio:660F3A63rMU", - pcmpistrm_3 = "rmio:660F3A62rMU", - popcnt_2 = "rmqdw:F30FB8rM", - - -- SSE4a - extrq_2 = "rro:660F79rM", - extrq_3 = "riio:660F780mUU", - insertq_2 = "rro:F20F79rM", - insertq_4 = "rriio:F20F78rMUU", - lzcnt_2 = "rmqdw:F30FBDrM", - movntsd_2 = "xr/qo:nF20F2BRm", - movntss_2 = "xr/do:F30F2BRm", - -- popcnt is also in SSE4.2 -} - ------------------------------------------------------------------------------- - --- Arithmetic ops. -for name,n in pairs{ add = 0, ["or"] = 1, adc = 2, sbb = 3, - ["and"] = 4, sub = 5, xor = 6, cmp = 7 } do - local n8 = shl(n, 3) - map_op[name.."_2"] = format( - "mr:%02XRm|rm:%02XrM|mI1qdw:81%XmI|mS1qdw:83%XmS|Ri1qdwb:%02Xri|mi1qdwb:81%Xmi", - 1+n8, 3+n8, n, n, 5+n8, n) -end - --- Shift ops. -for name,n in pairs{ rol = 0, ror = 1, rcl = 2, rcr = 3, - shl = 4, shr = 5, sar = 7, sal = 4 } do - map_op[name.."_2"] = format("m1:D1%Xm|mC1qdwb:D3%Xm|mi:C1%XmU", n, n, n) -end - --- Conditional ops. -for cc,n in pairs(map_cc) do - map_op["j"..cc.."_1"] = format("J.:n0F8%XJ", n) -- short: 7%X - map_op["set"..cc.."_1"] = format("mb:n0F9%X2m", n) - map_op["cmov"..cc.."_2"] = format("rmqdw:0F4%XrM", n) -- P6+ -end - --- FP arithmetic ops. -for name,n in pairs{ add = 0, mul = 1, com = 2, comp = 3, - sub = 4, subr = 5, div = 6, divr = 7 } do - local nc = 0xc0 + shl(n, 3) - local nr = nc + (n < 4 and 0 or (n % 2 == 0 and 8 or -8)) - local fn = "f"..name - map_op[fn.."_1"] = format("ff:D8%02Xr|xd:D8%Xm|xq:nDC%Xm", nc, n, n) - if n == 2 or n == 3 then - map_op[fn.."_2"] = format("Fff:D8%02XR|Fx2d:D8%XM|Fx2q:nDC%XM", nc, n, n) - else - map_op[fn.."_2"] = format("Fff:D8%02XR|fFf:DC%02Xr|Fx2d:D8%XM|Fx2q:nDC%XM", nc, nr, n, n) - map_op[fn.."p_1"] = format("ff:DE%02Xr", nr) - map_op[fn.."p_2"] = format("fFf:DE%02Xr", nr) - end - map_op["fi"..name.."_1"] = format("xd:DA%Xm|xw:nDE%Xm", n, n) -end - --- FP conditional moves. -for cc,n in pairs{ b=0, e=1, be=2, u=3, nb=4, ne=5, nbe=6, nu=7 } do - local nc = 0xdac0 + shl(band(n, 3), 3) + shl(band(n, 4), 6) - map_op["fcmov"..cc.."_1"] = format("ff:%04Xr", nc) -- P6+ - map_op["fcmov"..cc.."_2"] = format("Fff:%04XR", nc) -- P6+ -end - --- SSE FP arithmetic ops. -for name,n in pairs{ sqrt = 1, add = 8, mul = 9, - sub = 12, min = 13, div = 14, max = 15 } do - map_op[name.."ps_2"] = format("rmo:0F5%XrM", n) - map_op[name.."ss_2"] = format("rro:F30F5%XrM|rx/od:", n) - map_op[name.."pd_2"] = format("rmo:660F5%XrM", n) - map_op[name.."sd_2"] = format("rro:F20F5%XrM|rx/oq:", n) -end - ------------------------------------------------------------------------------- - --- Process pattern string. -local function dopattern(pat, args, sz, op, needrex) - local digit, addin - local opcode = 0 - local szov = sz - local narg = 1 - local rex = 0 - - -- Limit number of section buffer positions used by a single dasm_put(). - -- A single opcode needs a maximum of 5 positions. - if secpos+5 > maxsecpos then wflush() end - - -- Process each character. - for c in gmatch(pat.."|", ".") do - if match(c, "%x") then -- Hex digit. - digit = byte(c) - 48 - if digit > 48 then digit = digit - 39 - elseif digit > 16 then digit = digit - 7 end - opcode = opcode*16 + digit - addin = nil - elseif c == "n" then -- Disable operand size mods for opcode. - szov = nil - elseif c == "X" then -- Force REX.W. - rex = 8 - elseif c == "r" then -- Merge 1st operand regno. into opcode. - addin = args[1]; opcode = opcode + (addin.reg % 8) - if narg < 2 then narg = 2 end - elseif c == "R" then -- Merge 2nd operand regno. into opcode. - addin = args[2]; opcode = opcode + (addin.reg % 8) - narg = 3 - elseif c == "m" or c == "M" then -- Encode ModRM/SIB. - local s - if addin then - s = addin.reg - opcode = opcode - band(s, 7) -- Undo regno opcode merge. - else - s = band(opcode, 15) -- Undo last digit. - opcode = shr(opcode, 4) - end - local nn = c == "m" and 1 or 2 - local t = args[nn] - if narg <= nn then narg = nn + 1 end - if szov == "q" and rex == 0 then rex = rex + 8 end - if t.reg and t.reg > 7 then rex = rex + 1 end - if t.xreg and t.xreg > 7 then rex = rex + 2 end - if s > 7 then rex = rex + 4 end - if needrex then rex = rex + 16 end - wputop(szov, opcode, rex); opcode = nil - local imark = sub(pat, -1) -- Force a mark (ugly). - -- Put ModRM/SIB with regno/last digit as spare. - wputmrmsib(t, imark, s, addin and addin.vreg) - addin = nil - else - if opcode then -- Flush opcode. - if szov == "q" and rex == 0 then rex = rex + 8 end - if needrex then rex = rex + 16 end - if addin and addin.reg == -1 then - wputop(szov, opcode - 7, rex) - waction("VREG", addin.vreg); wputxb(0) - else - if addin and addin.reg > 7 then rex = rex + 1 end - wputop(szov, opcode, rex) - end - opcode = nil - end - if c == "|" then break end - if c == "o" then -- Offset (pure 32 bit displacement). - wputdarg(args[1].disp); if narg < 2 then narg = 2 end - elseif c == "O" then - wputdarg(args[2].disp); narg = 3 - else - -- Anything else is an immediate operand. - local a = args[narg] - narg = narg + 1 - local mode, imm = a.mode, a.imm - if mode == "iJ" and not match("iIJ", c) then - werror("bad operand size for label") - end - if c == "S" then - wputsbarg(imm) - elseif c == "U" then - wputbarg(imm) - elseif c == "W" then - wputwarg(imm) - elseif c == "i" or c == "I" then - if mode == "iJ" then - wputlabel("IMM_", imm, 1) - elseif mode == "iI" and c == "I" then - waction(sz == "w" and "IMM_WB" or "IMM_DB", imm) - else - wputszarg(sz, imm) - end - elseif c == "J" then - if mode == "iPJ" then - waction("REL_A", imm) -- !x64 (secpos) - else - wputlabel("REL_", imm, 2) - end - else - werror("bad char `"..c.."' in pattern `"..pat.."' for `"..op.."'") - end - end - end - end -end - ------------------------------------------------------------------------------- - --- Mapping of operand modes to short names. Suppress output with '#'. -local map_modename = { - r = "reg", R = "eax", C = "cl", x = "mem", m = "mrm", i = "imm", - f = "stx", F = "st0", J = "lbl", ["1"] = "1", - I = "#", S = "#", O = "#", -} - --- Return a table/string showing all possible operand modes. -local function templatehelp(template, nparams) - if nparams == 0 then return "" end - local t = {} - for tm in gmatch(template, "[^%|]+") do - local s = map_modename[sub(tm, 1, 1)] - s = s..gsub(sub(tm, 2, nparams), ".", function(c) - return ", "..map_modename[c] - end) - if not match(s, "#") then t[#t+1] = s end - end - return t -end - --- Match operand modes against mode match part of template. -local function matchtm(tm, args) - for i=1,#args do - if not match(args[i].mode, sub(tm, i, i)) then return end - end - return true -end - --- Handle opcodes defined with template strings. -map_op[".template__"] = function(params, template, nparams) - if not params then return templatehelp(template, nparams) end - local args = {} - - -- Zero-operand opcodes have no match part. - if #params == 0 then - dopattern(template, args, "d", params.op, nil) - return - end - - -- Determine common operand size (coerce undefined size) or flag as mixed. - local sz, szmix, needrex - for i,p in ipairs(params) do - args[i] = parseoperand(p) - local nsz = args[i].opsize - if nsz then - if sz and sz ~= nsz then szmix = true else sz = nsz end - end - local nrex = args[i].needrex - if nrex ~= nil then - if needrex == nil then - needrex = nrex - elseif needrex ~= nrex then - werror("bad mix of byte-addressable registers") - end - end - end - - -- Try all match:pattern pairs (separated by '|'). - local gotmatch, lastpat - for tm in gmatch(template, "[^%|]+") do - -- Split off size match (starts after mode match) and pattern string. - local szm, pat = match(tm, "^(.-):(.*)$", #args+1) - if pat == "" then pat = lastpat else lastpat = pat end - if matchtm(tm, args) then - local prefix = sub(szm, 1, 1) - if prefix == "/" then -- Match both operand sizes. - if args[1].opsize == sub(szm, 2, 2) and - args[2].opsize == sub(szm, 3, 3) then - dopattern(pat, args, sz, params.op, needrex) -- Process pattern. - return - end - else -- Match common operand size. - local szp = sz - if szm == "" then szm = x64 and "qdwb" or "dwb" end -- Default sizes. - if prefix == "1" then szp = args[1].opsize; szmix = nil - elseif prefix == "2" then szp = args[2].opsize; szmix = nil end - if not szmix and (prefix == "." or match(szm, szp or "#")) then - dopattern(pat, args, szp, params.op, needrex) -- Process pattern. - return - end - end - gotmatch = true - end - end - - local msg = "bad operand mode" - if gotmatch then - if szmix then - msg = "mixed operand size" - else - msg = sz and "bad operand size" or "missing operand size" - end - end - - werror(msg.." in `"..opmodestr(params.op, args).."'") -end - ------------------------------------------------------------------------------- - --- x64-specific opcode for 64 bit immediates and displacements. -if x64 then - function map_op.mov64_2(params) - if not params then return { "reg, imm", "reg, [disp]", "[disp], reg" } end - if secpos+2 > maxsecpos then wflush() end - local opcode, op64, sz, rex, vreg - local op64 = match(params[1], "^%[%s*(.-)%s*%]$") - if op64 then - local a = parseoperand(params[2]) - if a.mode ~= "rmR" then werror("bad operand mode") end - sz = a.opsize - rex = sz == "q" and 8 or 0 - opcode = 0xa3 - else - op64 = match(params[2], "^%[%s*(.-)%s*%]$") - local a = parseoperand(params[1]) - if op64 then - if a.mode ~= "rmR" then werror("bad operand mode") end - sz = a.opsize - rex = sz == "q" and 8 or 0 - opcode = 0xa1 - else - if sub(a.mode, 1, 1) ~= "r" or a.opsize ~= "q" then - werror("bad operand mode") - end - op64 = params[2] - if a.reg == -1 then - vreg = a.vreg - opcode = 0xb8 - else - opcode = 0xb8 + band(a.reg, 7) - end - rex = a.reg > 7 and 9 or 8 - end - end - wputop(sz, opcode, rex) - if vreg then waction("VREG", vreg); wputxb(0) end - waction("IMM_D", format("(unsigned int)(%s)", op64)) - waction("IMM_D", format("(unsigned int)((%s)>>32)", op64)) - end -end - ------------------------------------------------------------------------------- - --- Pseudo-opcodes for data storage. -local function op_data(params) - if not params then return "imm..." end - local sz = sub(params.op, 2, 2) - if sz == "a" then sz = addrsize end - for _,p in ipairs(params) do - local a = parseoperand(p) - if sub(a.mode, 1, 1) ~= "i" or (a.opsize and a.opsize ~= sz) then - werror("bad mode or size in `"..p.."'") - end - if a.mode == "iJ" then - wputlabel("IMM_", a.imm, 1) - else - wputszarg(sz, a.imm) - end - if secpos+2 > maxsecpos then wflush() end - end -end - -map_op[".byte_*"] = op_data -map_op[".sbyte_*"] = op_data -map_op[".word_*"] = op_data -map_op[".dword_*"] = op_data -map_op[".aword_*"] = op_data - ------------------------------------------------------------------------------- - --- Pseudo-opcode to mark the position where the action list is to be emitted. -map_op[".actionlist_1"] = function(params) - if not params then return "cvar" end - local name = params[1] -- No syntax check. You get to keep the pieces. - wline(function(out) writeactions(out, name) end) -end - --- Pseudo-opcode to mark the position where the global enum is to be emitted. -map_op[".globals_1"] = function(params) - if not params then return "prefix" end - local prefix = params[1] -- No syntax check. You get to keep the pieces. - wline(function(out) writeglobals(out, prefix) end) -end - --- Pseudo-opcode to mark the position where the global names are to be emitted. -map_op[".globalnames_1"] = function(params) - if not params then return "cvar" end - local name = params[1] -- No syntax check. You get to keep the pieces. - wline(function(out) writeglobalnames(out, name) end) -end - --- Pseudo-opcode to mark the position where the extern names are to be emitted. -map_op[".externnames_1"] = function(params) - if not params then return "cvar" end - local name = params[1] -- No syntax check. You get to keep the pieces. - wline(function(out) writeexternnames(out, name) end) -end - ------------------------------------------------------------------------------- - --- Label pseudo-opcode (converted from trailing colon form). -map_op[".label_2"] = function(params) - if not params then return "[1-9] | ->global | =>pcexpr [, addr]" end - if secpos+2 > maxsecpos then wflush() end - local a = parseoperand(params[1]) - local mode, imm = a.mode, a.imm - if type(imm) == "number" and (mode == "iJ" or (imm >= 1 and imm <= 9)) then - -- Local label (1: ... 9:) or global label (->global:). - waction("LABEL_LG", nil, 1) - wputxb(imm) - elseif mode == "iJ" then - -- PC label (=>pcexpr:). - waction("LABEL_PC", imm) - else - werror("bad label definition") - end - -- SETLABEL must immediately follow LABEL_LG/LABEL_PC. - local addr = params[2] - if addr then - local a = parseoperand(addr) - if a.mode == "iPJ" then - waction("SETLABEL", a.imm) - else - werror("bad label assignment") - end - end -end -map_op[".label_1"] = map_op[".label_2"] - ------------------------------------------------------------------------------- - --- Alignment pseudo-opcode. -map_op[".align_1"] = function(params) - if not params then return "numpow2" end - if secpos+1 > maxsecpos then wflush() end - local align = tonumber(params[1]) or map_opsizenum[map_opsize[params[1]]] - if align then - local x = align - -- Must be a power of 2 in the range (2 ... 256). - for i=1,8 do - x = x / 2 - if x == 1 then - waction("ALIGN", nil, 1) - wputxb(align-1) -- Action byte is 2**n-1. - return - end - end - end - werror("bad alignment") -end - --- Spacing pseudo-opcode. -map_op[".space_2"] = function(params) - if not params then return "num [, filler]" end - if secpos+1 > maxsecpos then wflush() end - waction("SPACE", params[1]) - local fill = params[2] - if fill then - fill = tonumber(fill) - if not fill or fill < 0 or fill > 255 then werror("bad filler") end - end - wputxb(fill or 0) -end -map_op[".space_1"] = map_op[".space_2"] - ------------------------------------------------------------------------------- - --- Pseudo-opcode for (primitive) type definitions (map to C types). -map_op[".type_3"] = function(params, nparams) - if not params then - return nparams == 2 and "name, ctype" or "name, ctype, reg" - end - local name, ctype, reg = params[1], params[2], params[3] - if not match(name, "^[%a_][%w_]*$") then - werror("bad type name `"..name.."'") - end - local tp = map_type[name] - if tp then - werror("duplicate type `"..name.."'") - end - if reg and not map_reg_valid_base[reg] then - werror("bad base register `"..(map_reg_rev[reg] or reg).."'") - end - -- Add #type to defines. A bit unclean to put it in map_archdef. - map_archdef["#"..name] = "sizeof("..ctype..")" - -- Add new type and emit shortcut define. - local num = ctypenum + 1 - map_type[name] = { - ctype = ctype, - ctypefmt = format("Dt%X(%%s)", num), - reg = reg, - } - wline(format("#define Dt%X(_V) (int)(ptrdiff_t)&(((%s *)0)_V)", num, ctype)) - ctypenum = num -end -map_op[".type_2"] = map_op[".type_3"] - --- Dump type definitions. -local function dumptypes(out, lvl) - local t = {} - for name in pairs(map_type) do t[#t+1] = name end - sort(t) - out:write("Type definitions:\n") - for _,name in ipairs(t) do - local tp = map_type[name] - local reg = tp.reg and map_reg_rev[tp.reg] or "" - out:write(format(" %-20s %-20s %s\n", name, tp.ctype, reg)) - end - out:write("\n") -end - ------------------------------------------------------------------------------- - --- Set the current section. -function _M.section(num) - waction("SECTION") - wputxb(num) - wflush(true) -- SECTION is a terminal action. -end - ------------------------------------------------------------------------------- - --- Dump architecture description. -function _M.dumparch(out) - out:write(format("DynASM %s version %s, released %s\n\n", - _info.arch, _info.version, _info.release)) - dumpregs(out) - dumpactions(out) -end - --- Dump all user defined elements. -function _M.dumpdef(out, lvl) - dumptypes(out, lvl) - dumpglobals(out, lvl) - dumpexterns(out, lvl) -end - ------------------------------------------------------------------------------- - --- Pass callbacks from/to the DynASM core. -function _M.passcb(wl, we, wf, ww) - wline, werror, wfatal, wwarn = wl, we, wf, ww - return wflush -end - --- Setup the arch-specific module. -function _M.setup(arch, opt) - g_arch, g_opt = arch, opt -end - --- Merge the core maps and the arch-specific maps. -function _M.mergemaps(map_coreop, map_def) - setmetatable(map_op, { __index = map_coreop }) - setmetatable(map_def, { __index = map_archdef }) - return map_op, map_def -end - -return _M - ------------------------------------------------------------------------------- - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dynasm.lua b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dynasm.lua deleted file mode 100644 index fffda7513c8..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/dynasm/dynasm.lua +++ /dev/null @@ -1,1094 +0,0 @@ ------------------------------------------------------------------------------- --- DynASM. A dynamic assembler for code generation engines. --- Originally designed and implemented for LuaJIT. --- --- Copyright (C) 2005-2015 Mike Pall. All rights reserved. --- See below for full copyright notice. ------------------------------------------------------------------------------- - --- Application information. -local _info = { - name = "DynASM", - description = "A dynamic assembler for code generation engines", - version = "1.3.0", - vernum = 10300, - release = "2011-05-05", - author = "Mike Pall", - url = "http://luajit.org/dynasm.html", - license = "MIT", - copyright = [[ -Copyright (C) 2005-2015 Mike Pall. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -[ MIT license: http://www.opensource.org/licenses/mit-license.php ] -]], -} - --- Cache library functions. -local type, pairs, ipairs = type, pairs, ipairs -local pcall, error, assert = pcall, error, assert -local _s = string -local sub, match, gmatch, gsub = _s.sub, _s.match, _s.gmatch, _s.gsub -local format, rep, upper = _s.format, _s.rep, _s.upper -local _t = table -local insert, remove, concat, sort = _t.insert, _t.remove, _t.concat, _t.sort -local exit = os.exit -local io = io -local stdin, stdout, stderr = io.stdin, io.stdout, io.stderr - ------------------------------------------------------------------------------- - --- Program options. -local g_opt = {} - --- Global state for current file. -local g_fname, g_curline, g_indent, g_lineno, g_synclineno, g_arch -local g_errcount = 0 - --- Write buffer for output file. -local g_wbuffer, g_capbuffer - ------------------------------------------------------------------------------- - --- Write an output line (or callback function) to the buffer. -local function wline(line, needindent) - local buf = g_capbuffer or g_wbuffer - buf[#buf+1] = needindent and g_indent..line or line - g_synclineno = g_synclineno + 1 -end - --- Write assembler line as a comment, if requestd. -local function wcomment(aline) - if g_opt.comment then - wline(g_opt.comment..aline..g_opt.endcomment, true) - end -end - --- Resync CPP line numbers. -local function wsync() - if g_synclineno ~= g_lineno and g_opt.cpp then - wline("#line "..g_lineno..' "'..g_fname..'"') - g_synclineno = g_lineno - end -end - --- Dummy action flush function. Replaced with arch-specific function later. -local function wflush(term) -end - --- Dump all buffered output lines. -local function wdumplines(out, buf) - for _,line in ipairs(buf) do - if type(line) == "string" then - assert(out:write(line, "\n")) - else - -- Special callback to dynamically insert lines after end of processing. - line(out) - end - end -end - ------------------------------------------------------------------------------- - --- Emit an error. Processing continues with next statement. -local function werror(msg) - error(format("%s:%s: error: %s:\n%s", g_fname, g_lineno, msg, g_curline), 0) -end - --- Emit a fatal error. Processing stops. -local function wfatal(msg) - g_errcount = "fatal" - werror(msg) -end - --- Print a warning. Processing continues. -local function wwarn(msg) - stderr:write(format("%s:%s: warning: %s:\n%s\n", - g_fname, g_lineno, msg, g_curline)) -end - --- Print caught error message. But suppress excessive errors. -local function wprinterr(...) - if type(g_errcount) == "number" then - -- Regular error. - g_errcount = g_errcount + 1 - if g_errcount < 21 then -- Seems to be a reasonable limit. - stderr:write(...) - elseif g_errcount == 21 then - stderr:write(g_fname, - ":*: warning: too many errors (suppressed further messages).\n") - end - else - -- Fatal error. - stderr:write(...) - return true -- Stop processing. - end -end - ------------------------------------------------------------------------------- - --- Map holding all option handlers. -local opt_map = {} -local opt_current - --- Print error and exit with error status. -local function opterror(...) - stderr:write("dynasm.lua: ERROR: ", ...) - stderr:write("\n") - exit(1) -end - --- Get option parameter. -local function optparam(args) - local argn = args.argn - local p = args[argn] - if not p then - opterror("missing parameter for option `", opt_current, "'.") - end - args.argn = argn + 1 - return p -end - ------------------------------------------------------------------------------- - --- Core pseudo-opcodes. -local map_coreop = {} --- Dummy opcode map. Replaced by arch-specific map. -local map_op = {} - --- Forward declarations. -local dostmt -local readfile - ------------------------------------------------------------------------------- - --- Map for defines (initially empty, chains to arch-specific map). -local map_def = {} - --- Pseudo-opcode to define a substitution. -map_coreop[".define_2"] = function(params, nparams) - if not params then return nparams == 1 and "name" or "name, subst" end - local name, def = params[1], params[2] or "1" - if not match(name, "^[%a_][%w_]*$") then werror("bad or duplicate define") end - map_def[name] = def -end -map_coreop[".define_1"] = map_coreop[".define_2"] - --- Define a substitution on the command line. -function opt_map.D(args) - local namesubst = optparam(args) - local name, subst = match(namesubst, "^([%a_][%w_]*)=(.*)$") - if name then - map_def[name] = subst - elseif match(namesubst, "^[%a_][%w_]*$") then - map_def[namesubst] = "1" - else - opterror("bad define") - end -end - --- Undefine a substitution on the command line. -function opt_map.U(args) - local name = optparam(args) - if match(name, "^[%a_][%w_]*$") then - map_def[name] = nil - else - opterror("bad define") - end -end - --- Helper for definesubst. -local gotsubst - -local function definesubst_one(word) - local subst = map_def[word] - if subst then gotsubst = word; return subst else return word end -end - --- Iteratively substitute defines. -local function definesubst(stmt) - -- Limit number of iterations. - for i=1,100 do - gotsubst = false - stmt = gsub(stmt, "#?[%w_]+", definesubst_one) - if not gotsubst then break end - end - if gotsubst then wfatal("recursive define involving `"..gotsubst.."'") end - return stmt -end - --- Dump all defines. -local function dumpdefines(out, lvl) - local t = {} - for name in pairs(map_def) do - t[#t+1] = name - end - sort(t) - out:write("Defines:\n") - for _,name in ipairs(t) do - local subst = map_def[name] - if g_arch then subst = g_arch.revdef(subst) end - out:write(format(" %-20s %s\n", name, subst)) - end - out:write("\n") -end - ------------------------------------------------------------------------------- - --- Support variables for conditional assembly. -local condlevel = 0 -local condstack = {} - --- Evaluate condition with a Lua expression. Substitutions already performed. -local function cond_eval(cond) - local func, err - if setfenv then - func, err = loadstring("return "..cond, "=expr") - else - -- No globals. All unknown identifiers evaluate to nil. - func, err = load("return "..cond, "=expr", "t", {}) - end - if func then - if setfenv then - setfenv(func, {}) -- No globals. All unknown identifiers evaluate to nil. - end - local ok, res = pcall(func) - if ok then - if res == 0 then return false end -- Oh well. - return not not res - end - err = res - end - wfatal("bad condition: "..err) -end - --- Skip statements until next conditional pseudo-opcode at the same level. -local function stmtskip() - local dostmt_save = dostmt - local lvl = 0 - dostmt = function(stmt) - local op = match(stmt, "^%s*(%S+)") - if op == ".if" then - lvl = lvl + 1 - elseif lvl ~= 0 then - if op == ".endif" then lvl = lvl - 1 end - elseif op == ".elif" or op == ".else" or op == ".endif" then - dostmt = dostmt_save - dostmt(stmt) - end - end -end - --- Pseudo-opcodes for conditional assembly. -map_coreop[".if_1"] = function(params) - if not params then return "condition" end - local lvl = condlevel + 1 - local res = cond_eval(params[1]) - condlevel = lvl - condstack[lvl] = res - if not res then stmtskip() end -end - -map_coreop[".elif_1"] = function(params) - if not params then return "condition" end - if condlevel == 0 then wfatal(".elif without .if") end - local lvl = condlevel - local res = condstack[lvl] - if res then - if res == "else" then wfatal(".elif after .else") end - else - res = cond_eval(params[1]) - if res then - condstack[lvl] = res - return - end - end - stmtskip() -end - -map_coreop[".else_0"] = function(params) - if condlevel == 0 then wfatal(".else without .if") end - local lvl = condlevel - local res = condstack[lvl] - condstack[lvl] = "else" - if res then - if res == "else" then wfatal(".else after .else") end - stmtskip() - end -end - -map_coreop[".endif_0"] = function(params) - local lvl = condlevel - if lvl == 0 then wfatal(".endif without .if") end - condlevel = lvl - 1 -end - --- Check for unfinished conditionals. -local function checkconds() - if g_errcount ~= "fatal" and condlevel ~= 0 then - wprinterr(g_fname, ":*: error: unbalanced conditional\n") - end -end - ------------------------------------------------------------------------------- - --- Search for a file in the given path and open it for reading. -local function pathopen(path, name) - local dirsep = package and match(package.path, "\\") and "\\" or "/" - for _,p in ipairs(path) do - local fullname = p == "" and name or p..dirsep..name - local fin = io.open(fullname, "r") - if fin then - g_fname = fullname - return fin - end - end -end - --- Include a file. -map_coreop[".include_1"] = function(params) - if not params then return "filename" end - local name = params[1] - -- Save state. Ugly, I know. but upvalues are fast. - local gf, gl, gcl, gi = g_fname, g_lineno, g_curline, g_indent - -- Read the included file. - local fatal = readfile(pathopen(g_opt.include, name) or - wfatal("include file `"..name.."' not found")) - -- Restore state. - g_synclineno = -1 - g_fname, g_lineno, g_curline, g_indent = gf, gl, gcl, gi - if fatal then wfatal("in include file") end -end - --- Make .include and conditionals initially available, too. -map_op[".include_1"] = map_coreop[".include_1"] -map_op[".if_1"] = map_coreop[".if_1"] -map_op[".elif_1"] = map_coreop[".elif_1"] -map_op[".else_0"] = map_coreop[".else_0"] -map_op[".endif_0"] = map_coreop[".endif_0"] - ------------------------------------------------------------------------------- - --- Support variables for macros. -local mac_capture, mac_lineno, mac_name -local mac_active = {} -local mac_list = {} - --- Pseudo-opcode to define a macro. -map_coreop[".macro_*"] = function(mparams) - if not mparams then return "name [, params...]" end - -- Split off and validate macro name. - local name = remove(mparams, 1) - if not name then werror("missing macro name") end - if not (match(name, "^[%a_][%w_%.]*$") or match(name, "^%.[%w_%.]*$")) then - wfatal("bad macro name `"..name.."'") - end - -- Validate macro parameter names. - local mdup = {} - for _,mp in ipairs(mparams) do - if not match(mp, "^[%a_][%w_]*$") then - wfatal("bad macro parameter name `"..mp.."'") - end - if mdup[mp] then wfatal("duplicate macro parameter name `"..mp.."'") end - mdup[mp] = true - end - -- Check for duplicate or recursive macro definitions. - local opname = name.."_"..#mparams - if map_op[opname] or map_op[name.."_*"] then - wfatal("duplicate macro `"..name.."' ("..#mparams.." parameters)") - end - if mac_capture then wfatal("recursive macro definition") end - - -- Enable statement capture. - local lines = {} - mac_lineno = g_lineno - mac_name = name - mac_capture = function(stmt) -- Statement capture function. - -- Stop macro definition with .endmacro pseudo-opcode. - if not match(stmt, "^%s*.endmacro%s*$") then - lines[#lines+1] = stmt - return - end - mac_capture = nil - mac_lineno = nil - mac_name = nil - mac_list[#mac_list+1] = opname - -- Add macro-op definition. - map_op[opname] = function(params) - if not params then return mparams, lines end - -- Protect against recursive macro invocation. - if mac_active[opname] then wfatal("recursive macro invocation") end - mac_active[opname] = true - -- Setup substitution map. - local subst = {} - for i,mp in ipairs(mparams) do subst[mp] = params[i] end - local mcom - if g_opt.maccomment and g_opt.comment then - mcom = " MACRO "..name.." ("..#mparams..")" - wcomment("{"..mcom) - end - -- Loop through all captured statements - for _,stmt in ipairs(lines) do - -- Substitute macro parameters. - local st = gsub(stmt, "[%w_]+", subst) - st = definesubst(st) - st = gsub(st, "%s*%.%.%s*", "") -- Token paste a..b. - if mcom and sub(st, 1, 1) ~= "|" then wcomment(st) end - -- Emit statement. Use a protected call for better diagnostics. - local ok, err = pcall(dostmt, st) - if not ok then - -- Add the captured statement to the error. - wprinterr(err, "\n", g_indent, "| ", stmt, - "\t[MACRO ", name, " (", #mparams, ")]\n") - end - end - if mcom then wcomment("}"..mcom) end - mac_active[opname] = nil - end - end -end - --- An .endmacro pseudo-opcode outside of a macro definition is an error. -map_coreop[".endmacro_0"] = function(params) - wfatal(".endmacro without .macro") -end - --- Dump all macros and their contents (with -PP only). -local function dumpmacros(out, lvl) - sort(mac_list) - out:write("Macros:\n") - for _,opname in ipairs(mac_list) do - local name = sub(opname, 1, -3) - local params, lines = map_op[opname]() - out:write(format(" %-20s %s\n", name, concat(params, ", "))) - if lvl > 1 then - for _,line in ipairs(lines) do - out:write(" |", line, "\n") - end - out:write("\n") - end - end - out:write("\n") -end - --- Check for unfinished macro definitions. -local function checkmacros() - if mac_capture then - wprinterr(g_fname, ":", mac_lineno, - ": error: unfinished .macro `", mac_name ,"'\n") - end -end - ------------------------------------------------------------------------------- - --- Support variables for captures. -local cap_lineno, cap_name -local cap_buffers = {} -local cap_used = {} - --- Start a capture. -map_coreop[".capture_1"] = function(params) - if not params then return "name" end - wflush() - local name = params[1] - if not match(name, "^[%a_][%w_]*$") then - wfatal("bad capture name `"..name.."'") - end - if cap_name then - wfatal("already capturing to `"..cap_name.."' since line "..cap_lineno) - end - cap_name = name - cap_lineno = g_lineno - -- Create or continue a capture buffer and start the output line capture. - local buf = cap_buffers[name] - if not buf then buf = {}; cap_buffers[name] = buf end - g_capbuffer = buf - g_synclineno = 0 -end - --- Stop a capture. -map_coreop[".endcapture_0"] = function(params) - wflush() - if not cap_name then wfatal(".endcapture without a valid .capture") end - cap_name = nil - cap_lineno = nil - g_capbuffer = nil - g_synclineno = 0 -end - --- Dump a capture buffer. -map_coreop[".dumpcapture_1"] = function(params) - if not params then return "name" end - wflush() - local name = params[1] - if not match(name, "^[%a_][%w_]*$") then - wfatal("bad capture name `"..name.."'") - end - cap_used[name] = true - wline(function(out) - local buf = cap_buffers[name] - if buf then wdumplines(out, buf) end - end) - g_synclineno = 0 -end - --- Dump all captures and their buffers (with -PP only). -local function dumpcaptures(out, lvl) - out:write("Captures:\n") - for name,buf in pairs(cap_buffers) do - out:write(format(" %-20s %4s)\n", name, "("..#buf)) - if lvl > 1 then - local bar = rep("=", 76) - out:write(" ", bar, "\n") - for _,line in ipairs(buf) do - out:write(" ", line, "\n") - end - out:write(" ", bar, "\n\n") - end - end - out:write("\n") -end - --- Check for unfinished or unused captures. -local function checkcaptures() - if cap_name then - wprinterr(g_fname, ":", cap_lineno, - ": error: unfinished .capture `", cap_name,"'\n") - return - end - for name in pairs(cap_buffers) do - if not cap_used[name] then - wprinterr(g_fname, ":*: error: missing .dumpcapture ", name ,"\n") - end - end -end - ------------------------------------------------------------------------------- - --- Sections names. -local map_sections = {} - --- Pseudo-opcode to define code sections. --- TODO: Data sections, BSS sections. Needs extra C code and API. -map_coreop[".section_*"] = function(params) - if not params then return "name..." end - if #map_sections > 0 then werror("duplicate section definition") end - wflush() - for sn,name in ipairs(params) do - local opname = "."..name.."_0" - if not match(name, "^[%a][%w_]*$") or - map_op[opname] or map_op["."..name.."_*"] then - werror("bad section name `"..name.."'") - end - map_sections[#map_sections+1] = name - wline(format("#define DASM_SECTION_%s\t%d", upper(name), sn-1)) - map_op[opname] = function(params) g_arch.section(sn-1) end - end - wline(format("#define DASM_MAXSECTION\t\t%d", #map_sections)) -end - --- Dump all sections. -local function dumpsections(out, lvl) - out:write("Sections:\n") - for _,name in ipairs(map_sections) do - out:write(format(" %s\n", name)) - end - out:write("\n") -end - ------------------------------------------------------------------------------- - --- Replacement for customized Lua, which lacks the package library. -local prefix = "" -if not require then - function require(name) - local fp = assert(io.open(prefix..name..".lua")) - local s = fp:read("*a") - assert(fp:close()) - return assert(loadstring(s, "@"..name..".lua"))() - end -end - --- Load architecture-specific module. -local function loadarch(arch) - if not match(arch, "^[%w_]+$") then return "bad arch name" end - local ok, m_arch = pcall(require, "dasm_"..arch) - if not ok then return "cannot load module: "..m_arch end - g_arch = m_arch - wflush = m_arch.passcb(wline, werror, wfatal, wwarn) - m_arch.setup(arch, g_opt) - map_op, map_def = m_arch.mergemaps(map_coreop, map_def) -end - --- Dump architecture description. -function opt_map.dumparch(args) - local name = optparam(args) - if not g_arch then - local err = loadarch(name) - if err then opterror(err) end - end - - local t = {} - for name in pairs(map_coreop) do t[#t+1] = name end - for name in pairs(map_op) do t[#t+1] = name end - sort(t) - - local out = stdout - local _arch = g_arch._info - out:write(format("%s version %s, released %s, %s\n", - _info.name, _info.version, _info.release, _info.url)) - g_arch.dumparch(out) - - local pseudo = true - out:write("Pseudo-Opcodes:\n") - for _,sname in ipairs(t) do - local name, nparam = match(sname, "^(.+)_([0-9%*])$") - if name then - if pseudo and sub(name, 1, 1) ~= "." then - out:write("\nOpcodes:\n") - pseudo = false - end - local f = map_op[sname] - local s - if nparam ~= "*" then nparam = nparam + 0 end - if nparam == 0 then - s = "" - elseif type(f) == "string" then - s = map_op[".template__"](nil, f, nparam) - else - s = f(nil, nparam) - end - if type(s) == "table" then - for _,s2 in ipairs(s) do - out:write(format(" %-12s %s\n", name, s2)) - end - else - out:write(format(" %-12s %s\n", name, s)) - end - end - end - out:write("\n") - exit(0) -end - --- Pseudo-opcode to set the architecture. --- Only initially available (map_op is replaced when called). -map_op[".arch_1"] = function(params) - if not params then return "name" end - local err = loadarch(params[1]) - if err then wfatal(err) end - wline(format("#if DASM_VERSION != %d", _info.vernum)) - wline('#error "Version mismatch between DynASM and included encoding engine"') - wline("#endif") -end - --- Dummy .arch pseudo-opcode to improve the error report. -map_coreop[".arch_1"] = function(params) - if not params then return "name" end - wfatal("duplicate .arch statement") -end - ------------------------------------------------------------------------------- - --- Dummy pseudo-opcode. Don't confuse '.nop' with 'nop'. -map_coreop[".nop_*"] = function(params) - if not params then return "[ignored...]" end -end - --- Pseudo-opcodes to raise errors. -map_coreop[".error_1"] = function(params) - if not params then return "message" end - werror(params[1]) -end - -map_coreop[".fatal_1"] = function(params) - if not params then return "message" end - wfatal(params[1]) -end - --- Dump all user defined elements. -local function dumpdef(out) - local lvl = g_opt.dumpdef - if lvl == 0 then return end - dumpsections(out, lvl) - dumpdefines(out, lvl) - if g_arch then g_arch.dumpdef(out, lvl) end - dumpmacros(out, lvl) - dumpcaptures(out, lvl) -end - ------------------------------------------------------------------------------- - --- Helper for splitstmt. -local splitlvl - -local function splitstmt_one(c) - if c == "(" then - splitlvl = ")"..splitlvl - elseif c == "[" then - splitlvl = "]"..splitlvl - elseif c == "{" then - splitlvl = "}"..splitlvl - elseif c == ")" or c == "]" or c == "}" then - if sub(splitlvl, 1, 1) ~= c then werror("unbalanced (), [] or {}") end - splitlvl = sub(splitlvl, 2) - elseif splitlvl == "" then - return " \0 " - end - return c -end - --- Split statement into (pseudo-)opcode and params. -local function splitstmt(stmt) - -- Convert label with trailing-colon into .label statement. - local label = match(stmt, "^%s*(.+):%s*$") - if label then return ".label", {label} end - - -- Split at commas and equal signs, but obey parentheses and brackets. - splitlvl = "" - stmt = gsub(stmt, "[,%(%)%[%]{}]", splitstmt_one) - if splitlvl ~= "" then werror("unbalanced () or []") end - - -- Split off opcode. - local op, other = match(stmt, "^%s*([^%s%z]+)%s*(.*)$") - if not op then werror("bad statement syntax") end - - -- Split parameters. - local params = {} - for p in gmatch(other, "%s*(%Z+)%z?") do - params[#params+1] = gsub(p, "%s+$", "") - end - if #params > 16 then werror("too many parameters") end - - params.op = op - return op, params -end - --- Process a single statement. -dostmt = function(stmt) - -- Ignore empty statements. - if match(stmt, "^%s*$") then return end - - -- Capture macro defs before substitution. - if mac_capture then return mac_capture(stmt) end - stmt = definesubst(stmt) - - -- Emit C code without parsing the line. - if sub(stmt, 1, 1) == "|" then - local tail = sub(stmt, 2) - wflush() - if sub(tail, 1, 2) == "//" then wcomment(tail) else wline(tail, true) end - return - end - - -- Split into (pseudo-)opcode and params. - local op, params = splitstmt(stmt) - - -- Get opcode handler (matching # of parameters or generic handler). - local f = map_op[op.."_"..#params] or map_op[op.."_*"] - if not f then - if not g_arch then wfatal("first statement must be .arch") end - -- Improve error report. - for i=0,9 do - if map_op[op.."_"..i] then - werror("wrong number of parameters for `"..op.."'") - end - end - werror("unknown statement `"..op.."'") - end - - -- Call opcode handler or special handler for template strings. - if type(f) == "string" then - map_op[".template__"](params, f) - else - f(params) - end -end - --- Process a single line. -local function doline(line) - if g_opt.flushline then wflush() end - - -- Assembler line? - local indent, aline = match(line, "^(%s*)%|(.*)$") - if not aline then - -- No, plain C code line, need to flush first. - wflush() - wsync() - wline(line, false) - return - end - - g_indent = indent -- Remember current line indentation. - - -- Emit C code (even from macros). Avoids echo and line parsing. - if sub(aline, 1, 1) == "|" then - if not mac_capture then - wsync() - elseif g_opt.comment then - wsync() - wcomment(aline) - end - dostmt(aline) - return - end - - -- Echo assembler line as a comment. - if g_opt.comment then - wsync() - wcomment(aline) - end - - -- Strip assembler comments. - aline = gsub(aline, "//.*$", "") - - -- Split line into statements at semicolons. - if match(aline, ";") then - for stmt in gmatch(aline, "[^;]+") do dostmt(stmt) end - else - dostmt(aline) - end -end - ------------------------------------------------------------------------------- - --- Write DynASM header. -local function dasmhead(out) - out:write(format([[ -/* -** This file has been pre-processed with DynASM. -** %s -** DynASM version %s, DynASM %s version %s -** DO NOT EDIT! The original file is in "%s". -*/ - -]], _info.url, - _info.version, g_arch._info.arch, g_arch._info.version, - g_fname)) -end - --- Read input file. -readfile = function(fin) - g_indent = "" - g_lineno = 0 - g_synclineno = -1 - - -- Process all lines. - for line in fin:lines() do - g_lineno = g_lineno + 1 - g_curline = line - local ok, err = pcall(doline, line) - if not ok and wprinterr(err, "\n") then return true end - end - wflush() - - -- Close input file. - assert(fin == stdin or fin:close()) -end - --- Write output file. -local function writefile(outfile) - local fout - - -- Open output file. - if outfile == nil or outfile == "-" then - fout = stdout - else - fout = assert(io.open(outfile, "w")) - end - - -- Write all buffered lines - wdumplines(fout, g_wbuffer) - - -- Close output file. - assert(fout == stdout or fout:close()) - - -- Optionally dump definitions. - dumpdef(fout == stdout and stderr or stdout) -end - --- Translate an input file to an output file. -local function translate(infile, outfile) - g_wbuffer = {} - g_indent = "" - g_lineno = 0 - g_synclineno = -1 - - -- Put header. - wline(dasmhead) - - -- Read input file. - local fin - if infile == "-" then - g_fname = "(stdin)" - fin = stdin - else - g_fname = infile - fin = assert(io.open(infile, "r")) - end - readfile(fin) - - -- Check for errors. - if not g_arch then - wprinterr(g_fname, ":*: error: missing .arch directive\n") - end - checkconds() - checkmacros() - checkcaptures() - - if g_errcount ~= 0 then - stderr:write(g_fname, ":*: info: ", g_errcount, " error", - (type(g_errcount) == "number" and g_errcount > 1) and "s" or "", - " in input file -- no output file generated.\n") - dumpdef(stderr) - exit(1) - end - - -- Write output file. - writefile(outfile) -end - ------------------------------------------------------------------------------- - --- Print help text. -function opt_map.help() - stdout:write("DynASM -- ", _info.description, ".\n") - stdout:write("DynASM ", _info.version, " ", _info.release, " ", _info.url, "\n") - stdout:write[[ - -Usage: dynasm [OPTION]... INFILE.dasc|- - - -h, --help Display this help text. - -V, --version Display version and copyright information. - - -o, --outfile FILE Output file name (default is stdout). - -I, --include DIR Add directory to the include search path. - - -c, --ccomment Use /* */ comments for assembler lines. - -C, --cppcomment Use // comments for assembler lines (default). - -N, --nocomment Suppress assembler lines in output. - -M, --maccomment Show macro expansions as comments (default off). - - -L, --nolineno Suppress CPP line number information in output. - -F, --flushline Flush action list for every line. - - -D NAME[=SUBST] Define a substitution. - -U NAME Undefine a substitution. - - -P, --dumpdef Dump defines, macros, etc. Repeat for more output. - -A, --dumparch ARCH Load architecture ARCH and dump description. -]] - exit(0) -end - --- Print version information. -function opt_map.version() - stdout:write(format("%s version %s, released %s\n%s\n\n%s", - _info.name, _info.version, _info.release, _info.url, _info.copyright)) - exit(0) -end - --- Misc. options. -function opt_map.outfile(args) g_opt.outfile = optparam(args) end -function opt_map.include(args) insert(g_opt.include, 1, optparam(args)) end -function opt_map.ccomment() g_opt.comment = "/*|"; g_opt.endcomment = " */" end -function opt_map.cppcomment() g_opt.comment = "//|"; g_opt.endcomment = "" end -function opt_map.nocomment() g_opt.comment = false end -function opt_map.maccomment() g_opt.maccomment = true end -function opt_map.nolineno() g_opt.cpp = false end -function opt_map.flushline() g_opt.flushline = true end -function opt_map.dumpdef() g_opt.dumpdef = g_opt.dumpdef + 1 end - ------------------------------------------------------------------------------- - --- Short aliases for long options. -local opt_alias = { - h = "help", ["?"] = "help", V = "version", - o = "outfile", I = "include", - c = "ccomment", C = "cppcomment", N = "nocomment", M = "maccomment", - L = "nolineno", F = "flushline", - P = "dumpdef", A = "dumparch", -} - --- Parse single option. -local function parseopt(opt, args) - opt_current = #opt == 1 and "-"..opt or "--"..opt - local f = opt_map[opt] or opt_map[opt_alias[opt]] - if not f then - opterror("unrecognized option `", opt_current, "'. Try `--help'.\n") - end - f(args) -end - --- Parse arguments. -local function parseargs(args) - -- Default options. - g_opt.comment = "//|" - g_opt.endcomment = "" - g_opt.cpp = true - g_opt.dumpdef = 0 - g_opt.include = { "" } - - -- Process all option arguments. - args.argn = 1 - repeat - local a = args[args.argn] - if not a then break end - local lopt, opt = match(a, "^%-(%-?)(.+)") - if not opt then break end - args.argn = args.argn + 1 - if lopt == "" then - -- Loop through short options. - for o in gmatch(opt, ".") do parseopt(o, args) end - else - -- Long option. - parseopt(opt, args) - end - until false - - -- Check for proper number of arguments. - local nargs = #args - args.argn + 1 - if nargs ~= 1 then - if nargs == 0 then - if g_opt.dumpdef > 0 then return dumpdef(stdout) end - end - opt_map.help() - end - - -- Translate a single input file to a single output file - -- TODO: Handle multiple files? - translate(args[args.argn], g_opt.outfile) -end - ------------------------------------------------------------------------------- - --- Add the directory dynasm.lua resides in to the Lua module search path. -local arg = arg -if arg and arg[0] then - prefix = match(arg[0], "^(.*[/\\])") - if package and prefix then package.path = prefix.."?.lua;"..package.path end -end - --- Start DynASM. -parseargs{...} - ------------------------------------------------------------------------------- - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/etc/luajit.1 b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/etc/luajit.1 deleted file mode 100644 index fd38b0a92c7..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/etc/luajit.1 +++ /dev/null @@ -1,88 +0,0 @@ -.TH luajit 1 "" "" "LuaJIT documentation" -.SH NAME -luajit \- Just-In-Time Compiler for the Lua Language -\fB -.SH SYNOPSIS -.B luajit -[\fIoptions\fR]... [\fIscript\fR [\fIargs\fR]...] -.SH "WEB SITE" -.IR http://luajit.org -.SH DESCRIPTION -.PP -This is the command-line program to run Lua programs with \fBLuaJIT\fR. -.PP -\fBLuaJIT\fR is a just-in-time (JIT) compiler for the Lua language. -The virtual machine (VM) is based on a fast interpreter combined with -a trace compiler. It can significantly improve the performance of Lua programs. -.PP -\fBLuaJIT\fR is API\- and ABI-compatible with the VM of the standard -Lua\ 5.1 interpreter. When embedding the VM into an application, -the built library can be used as a drop-in replacement. -.SH OPTIONS -.TP -.BI "\-e " chunk -Run the given chunk of Lua code. -.TP -.BI "\-l " library -Load the named library, just like \fBrequire("\fR\fIlibrary\fR\fB")\fR. -.TP -.BI "\-b " ... -Save or list bytecode. Run without arguments to get help on options. -.TP -.BI "\-j " command -Perform LuaJIT control command (optional space after \fB\-j\fR). -.TP -.BI "\-O" [opt] -Control LuaJIT optimizations. -.TP -.B "\-i" -Run in interactive mode. -.TP -.B "\-v" -Show \fBLuaJIT\fR version. -.TP -.B "\-E" -Ignore environment variables. -.TP -.B "\-\-" -Stop processing options. -.TP -.B "\-" -Read script from stdin instead. -.PP -After all options are processed, the given \fIscript\fR is run. -The arguments are passed in the global \fIarg\fR table. -.PP -Interactive mode is only entered, if no \fIscript\fR and no \fB\-e\fR -option is given. Interactive mode can be left with EOF (\fICtrl\-Z\fB). -.SH EXAMPLES -.TP -luajit hello.lua world - -Prints "Hello world", assuming \fIhello.lua\fR contains: -.br - print("Hello", arg[1]) -.TP -luajit \-e "local x=0; for i=1,1e9 do x=x+i end; print(x)" - -Calculates the sum of the numbers from 1 to 1000000000. -.br -And finishes in a reasonable amount of time, too. -.TP -luajit \-jv \-e "for i=1,10 do for j=1,10 do for k=1,100 do end end end" - -Runs some nested loops and shows the resulting traces. -.SH COPYRIGHT -.PP -\fBLuaJIT\fR is Copyright \(co 2005-2015 Mike Pall. -.br -\fBLuaJIT\fR is open source software, released under the MIT license. -.SH SEE ALSO -.PP -More details in the provided HTML docs or at: -.IR http://luajit.org -.br -More about the Lua language can be found at: -.IR http://lua.org/docs.html -.PP -lua(1) diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/etc/luajit.pc b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/etc/luajit.pc deleted file mode 100644 index c99057f40c3..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/etc/luajit.pc +++ /dev/null @@ -1,25 +0,0 @@ -# Package information for LuaJIT to be used by pkg-config. -majver=2 -minver=1 -relver=0 -version=${majver}.${minver}.${relver}-beta1 -abiver=5.1 - -prefix=/usr/local -multilib=lib -exec_prefix=${prefix} -libdir=${exec_prefix}/${multilib} -libname=luajit-${abiver} -includedir=${prefix}/include/luajit-${majver}.${minver} - -INSTALL_LMOD=${prefix}/share/lua/${abiver} -INSTALL_CMOD=${prefix}/${multilib}/lua/${abiver} - -Name: LuaJIT -Description: Just-in-time compiler for Lua -URL: http://luajit.org -Version: ${version} -Requires: -Libs: -L${libdir} -l${libname} -Libs.private: -Wl,-E -lm -ldl -Cflags: -I${includedir} diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/Makefile b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/Makefile deleted file mode 100644 index 7f86e98ec03..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/Makefile +++ /dev/null @@ -1,697 +0,0 @@ -############################################################################## -# LuaJIT Makefile. Requires GNU Make. -# -# Please read doc/install.html before changing any variables! -# -# Suitable for POSIX platforms (Linux, *BSD, OSX etc.). -# Also works with MinGW and Cygwin on Windows. -# Please check msvcbuild.bat for building with MSVC on Windows. -# -# Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -############################################################################## - -MAJVER= 2 -MINVER= 1 -RELVER= 0 -ABIVER= 5.1 -NODOTABIVER= 51 - -############################################################################## -############################# COMPILER OPTIONS ############################# -############################################################################## -# These options mainly affect the speed of the JIT compiler itself, not the -# speed of the JIT-compiled code. Turn any of the optional settings on by -# removing the '#' in front of them. Make sure you force a full recompile -# with "make clean", followed by "make" if you change any options. -# -# LuaJIT builds as a native 32 or 64 bit binary by default. -CC= gcc -# -# Use this if you want to force a 32 bit build on a 64 bit multilib OS. -#CC= gcc -m32 -# -# Since the assembler part does NOT maintain a frame pointer, it's pointless -# to slow down the C part by not omitting it. Debugging, tracebacks and -# unwinding are not affected -- the assembler part has frame unwind -# information and GCC emits it where needed (x64) or with -g (see CCDEBUG). -CCOPT= -O2 -fomit-frame-pointer -# Use this if you want to generate a smaller binary (but it's slower): -#CCOPT= -Os -fomit-frame-pointer -# Note: it's no longer recommended to use -O3 with GCC 4.x. -# The I-Cache bloat usually outweighs the benefits from aggressive inlining. -# -# Target-specific compiler options: -# -# x86/x64 only: For GCC 4.2 or higher and if you don't intend to distribute -# the binaries to a different machine you could also use: -march=native -# -CCOPT_x86= -march=i686 -msse -msse2 -mfpmath=sse -CCOPT_x64= -CCOPT_arm= -CCOPT_arm64= -CCOPT_ppc= -CCOPT_mips= -# -CCDEBUG= -# Uncomment the next line to generate debug information: -#CCDEBUG= -g -# -CCWARN= -Wall -# Uncomment the next line to enable more warnings: -#CCWARN+= -Wextra -Wdeclaration-after-statement -Wredundant-decls -Wshadow -Wpointer-arith -# -############################################################################## - -############################################################################## -################################ BUILD MODE ################################ -############################################################################## -# The default build mode is mixed mode on POSIX. On Windows this is the same -# as dynamic mode. -# -# Mixed mode creates a static + dynamic library and a statically linked luajit. -BUILDMODE= mixed -# -# Static mode creates a static library and a statically linked luajit. -#BUILDMODE= static -# -# Dynamic mode creates a dynamic library and a dynamically linked luajit. -# Note: this executable will only run when the library is installed! -#BUILDMODE= dynamic -# -############################################################################## - -############################################################################## -################################# FEATURES ################################# -############################################################################## -# Enable/disable these features as needed, but make sure you force a full -# recompile with "make clean", followed by "make". -XCFLAGS= -# -# Permanently disable the FFI extension to reduce the size of the LuaJIT -# executable. But please consider that the FFI library is compiled-in, -# but NOT loaded by default. It only allocates any memory, if you actually -# make use of it. -#XCFLAGS+= -DLUAJIT_DISABLE_FFI -# -# Features from Lua 5.2 that are unlikely to break existing code are -# enabled by default. Some other features that *might* break some existing -# code (e.g. __pairs or os.execute() return values) can be enabled here. -# Note: this does not provide full compatibility with Lua 5.2 at this time. -XCFLAGS+= -DLUAJIT_ENABLE_LUA52COMPAT -# -# Disable the JIT compiler, i.e. turn LuaJIT into a pure interpreter. -#XCFLAGS+= -DLUAJIT_DISABLE_JIT -# -# Some architectures (e.g. PPC) can use either single-number (1) or -# dual-number (2) mode. Uncomment one of these lines to override the -# default mode. Please see LJ_ARCH_NUMMODE in lj_arch.h for details. -#XCFLAGS+= -DLUAJIT_NUMMODE=1 -#XCFLAGS+= -DLUAJIT_NUMMODE=2 -# -############################################################################## - -############################################################################## -############################ DEBUGGING SUPPORT ############################# -############################################################################## -# Enable these options as needed, but make sure you force a full recompile -# with "make clean", followed by "make". -# Note that most of these are NOT suitable for benchmarking or release mode! -# -# Use the system provided memory allocator (realloc) instead of the -# bundled memory allocator. This is slower, but sometimes helpful for -# debugging. This option cannot be enabled on x64, since realloc usually -# doesn't return addresses in the right address range. -# OTOH this option is mandatory for Valgrind's memcheck tool on x64 and -# the only way to get useful results from it for all other architectures. -#XCFLAGS+= -DLUAJIT_USE_SYSMALLOC -# -# This define is required to run LuaJIT under Valgrind. The Valgrind -# header files must be installed. You should enable debug information, too. -# Use --suppressions=lj.supp to avoid some false positives. -#XCFLAGS+= -DLUAJIT_USE_VALGRIND -# -# This is the client for the GDB JIT API. GDB 7.0 or higher is required -# to make use of it. See lj_gdbjit.c for details. Enabling this causes -# a non-negligible overhead, even when not running under GDB. -#XCFLAGS+= -DLUAJIT_USE_GDBJIT -# -# Turn on assertions for the Lua/C API to debug problems with lua_* calls. -# This is rather slow -- use only while developing C libraries/embeddings. -#XCFLAGS+= -DLUA_USE_APICHECK -# -# Turn on assertions for the whole LuaJIT VM. This significantly slows down -# everything. Use only if you suspect a problem with LuaJIT itself. -#XCFLAGS+= -DLUA_USE_ASSERT -# -############################################################################## -# You probably don't need to change anything below this line! -############################################################################## - -############################################################################## -# Flags and options for host and target. -############################################################################## - -# You can override the following variables at the make command line: -# CC HOST_CC STATIC_CC DYNAMIC_CC -# CFLAGS HOST_CFLAGS TARGET_CFLAGS -# LDFLAGS HOST_LDFLAGS TARGET_LDFLAGS TARGET_SHLDFLAGS -# LIBS HOST_LIBS TARGET_LIBS -# CROSS HOST_SYS TARGET_SYS TARGET_FLAGS -# -# Cross-compilation examples: -# make HOST_CC="gcc -m32" CROSS=i586-mingw32msvc- TARGET_SYS=Windows -# make HOST_CC="gcc -m32" CROSS=powerpc-linux-gnu- - -ASOPTIONS= $(CCOPT) $(CCWARN) $(XCFLAGS) $(CFLAGS) -CCOPTIONS= $(CCDEBUG) $(ASOPTIONS) -LDOPTIONS= $(CCDEBUG) $(LDFLAGS) - -HOST_CC= $(CC) -HOST_RM= rm -f -# If left blank, minilua is built and used. You can supply an installed -# copy of (plain) Lua 5.1 or 5.2, plus Lua BitOp. E.g. with: HOST_LUA=lua -HOST_LUA= - -HOST_XCFLAGS= -I. -HOST_XLDFLAGS= -HOST_XLIBS= -HOST_ACFLAGS= $(CCOPTIONS) $(HOST_XCFLAGS) $(TARGET_ARCH) $(HOST_CFLAGS) -HOST_ALDFLAGS= $(LDOPTIONS) $(HOST_XLDFLAGS) $(HOST_LDFLAGS) -HOST_ALIBS= $(HOST_XLIBS) $(LIBS) $(HOST_LIBS) - -STATIC_CC = $(CROSS)$(CC) -DYNAMIC_CC = $(CROSS)$(CC) -fPIC -TARGET_CC= $(STATIC_CC) -TARGET_STCC= $(STATIC_CC) -TARGET_DYNCC= $(DYNAMIC_CC) -TARGET_LD= $(CROSS)$(CC) -TARGET_AR= $(CROSS)ar rcus -TARGET_STRIP= $(CROSS)strip - -TARGET_LIBPATH= $(or $(PREFIX),/usr/local)/$(or $(MULTILIB),lib) -TARGET_SONAME= libluajit-$(ABIVER).so.$(MAJVER) -TARGET_DYLIBNAME= libluajit-$(ABIVER).$(MAJVER).dylib -TARGET_DYLIBPATH= $(TARGET_LIBPATH)/$(TARGET_DYLIBNAME) -TARGET_DLLNAME= lua$(NODOTABIVER).dll -TARGET_XSHLDFLAGS= -shared -fPIC -Wl,-soname,$(TARGET_SONAME) -TARGET_DYNXLDOPTS= - -TARGET_LFSFLAGS= -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -TARGET_XCFLAGS= $(TARGET_LFSFLAGS) -U_FORTIFY_SOURCE -TARGET_XLDFLAGS= -TARGET_XLIBS= -lm -TARGET_TCFLAGS= $(CCOPTIONS) $(TARGET_XCFLAGS) $(TARGET_FLAGS) $(TARGET_CFLAGS) -TARGET_ACFLAGS= $(CCOPTIONS) $(TARGET_XCFLAGS) $(TARGET_FLAGS) $(TARGET_CFLAGS) -TARGET_ASFLAGS= $(ASOPTIONS) $(TARGET_XCFLAGS) $(TARGET_FLAGS) $(TARGET_CFLAGS) -TARGET_ALDFLAGS= $(LDOPTIONS) $(TARGET_XLDFLAGS) $(TARGET_FLAGS) $(TARGET_LDFLAGS) -TARGET_ASHLDFLAGS= $(LDOPTIONS) $(TARGET_XSHLDFLAGS) $(TARGET_FLAGS) $(TARGET_SHLDFLAGS) -TARGET_ALIBS= $(TARGET_XLIBS) $(LIBS) $(TARGET_LIBS) - -TARGET_TESTARCH=$(shell $(TARGET_CC) $(TARGET_TCFLAGS) -E lj_arch.h -dM) -ifneq (,$(findstring LJ_TARGET_X64 ,$(TARGET_TESTARCH))) - TARGET_LJARCH= x64 -else -ifneq (,$(findstring LJ_TARGET_X86 ,$(TARGET_TESTARCH))) - TARGET_LJARCH= x86 -else -ifneq (,$(findstring LJ_TARGET_ARM ,$(TARGET_TESTARCH))) - TARGET_LJARCH= arm -else -ifneq (,$(findstring LJ_TARGET_ARM64 ,$(TARGET_TESTARCH))) - TARGET_LJARCH= arm64 -else -ifneq (,$(findstring LJ_TARGET_PPC ,$(TARGET_TESTARCH))) - ifneq (,$(findstring LJ_LE 1,$(TARGET_TESTARCH))) - TARGET_ARCH= -DLJ_ARCH_ENDIAN=LUAJIT_LE - else - TARGET_ARCH= -DLJ_ARCH_ENDIAN=LUAJIT_BE - endif - TARGET_LJARCH= ppc -else -ifneq (,$(findstring LJ_TARGET_MIPS ,$(TARGET_TESTARCH))) - ifneq (,$(findstring MIPSEL ,$(TARGET_TESTARCH))) - TARGET_ARCH= -D__MIPSEL__=1 - endif - TARGET_LJARCH= mips -else - $(error Unsupported target architecture) -endif -endif -endif -endif -endif -endif - -ifneq (,$(findstring LJ_TARGET_PS3 1,$(TARGET_TESTARCH))) - TARGET_SYS= PS3 - TARGET_ARCH+= -D__CELLOS_LV2__ - TARGET_XCFLAGS+= -DLUAJIT_USE_SYSMALLOC - TARGET_XLIBS+= -lpthread -endif - -TARGET_XCFLAGS+= $(CCOPT_$(TARGET_LJARCH)) -TARGET_ARCH+= $(patsubst %,-DLUAJIT_TARGET=LUAJIT_ARCH_%,$(TARGET_LJARCH)) - -ifneq (,$(PREFIX)) -ifneq (/usr/local,$(PREFIX)) - TARGET_XCFLAGS+= -DLUA_ROOT=\"$(PREFIX)\" - ifneq (/usr,$(PREFIX)) - TARGET_DYNXLDOPTS= -Wl,-rpath,$(TARGET_LIBPATH) - endif -endif -endif -ifneq (,$(MULTILIB)) - TARGET_XCFLAGS+= -DLUA_MULTILIB=\"$(MULTILIB)\" -endif -ifneq (,$(LMULTILIB)) - TARGET_XCFLAGS+= -DLUA_LMULTILIB=\"$(LMULTILIB)\" -endif - -############################################################################## -# System detection. -############################################################################## - -ifeq (Windows,$(findstring Windows,$(OS))$(MSYSTEM)$(TERM)) - HOST_SYS= Windows - HOST_RM= del -else - HOST_SYS:= $(shell uname -s) - ifneq (,$(findstring MINGW,$(HOST_SYS))) - HOST_SYS= Windows - HOST_MSYS= mingw - endif - ifneq (,$(findstring CYGWIN,$(HOST_SYS))) - HOST_SYS= Windows - HOST_MSYS= cygwin - endif -endif - -TARGET_SYS?= $(HOST_SYS) -ifeq (Windows,$(TARGET_SYS)) - TARGET_STRIP+= --strip-unneeded - TARGET_XSHLDFLAGS= -shared - TARGET_DYNXLDOPTS= -else -ifeq (,$(shell $(TARGET_CC) -o /dev/null -c -x c /dev/null -fno-stack-protector 2>/dev/null || echo 1)) - TARGET_XCFLAGS+= -fno-stack-protector -endif -ifeq (Darwin,$(TARGET_SYS)) - ifeq (,$(MACOSX_DEPLOYMENT_TARGET)) - export MACOSX_DEPLOYMENT_TARGET=10.4 - endif - TARGET_STRIP+= -x - TARGET_AR+= 2>/dev/null - TARGET_XSHLDFLAGS= -dynamiclib -single_module -undefined dynamic_lookup -fPIC - TARGET_DYNXLDOPTS= - TARGET_XSHLDFLAGS+= -install_name $(TARGET_DYLIBPATH) -compatibility_version $(MAJVER).$(MINVER) -current_version $(MAJVER).$(MINVER).$(RELVER) - ifeq (x64,$(TARGET_LJARCH)) - TARGET_XLDFLAGS+= -pagezero_size 10000 -image_base 100000000 - TARGET_XSHLDFLAGS+= -image_base 7fff04c4a000 - endif -else -ifeq (iOS,$(TARGET_SYS)) - TARGET_STRIP+= -x - TARGET_AR+= 2>/dev/null - TARGET_XSHLDFLAGS= -dynamiclib -single_module -undefined dynamic_lookup -fPIC - TARGET_DYNXLDOPTS= - TARGET_XSHLDFLAGS+= -install_name $(TARGET_DYLIBPATH) -compatibility_version $(MAJVER).$(MINVER) -current_version $(MAJVER).$(MINVER).$(RELVER) - ifeq (arm64,$(TARGET_LJARCH)) - TARGET_XCFLAGS+= -fno-omit-frame-pointer - endif -else - ifneq (SunOS,$(TARGET_SYS)) - ifneq (PS3,$(TARGET_SYS)) - TARGET_XLDFLAGS+= -Wl,-E - endif - endif - ifeq (Linux,$(TARGET_SYS)) - TARGET_XLIBS+= -ldl - endif - ifeq (GNU/kFreeBSD,$(TARGET_SYS)) - TARGET_XLIBS+= -ldl - endif -endif -endif -endif - -ifneq ($(HOST_SYS),$(TARGET_SYS)) - ifeq (Windows,$(TARGET_SYS)) - HOST_XCFLAGS+= -malign-double -DLUAJIT_OS=LUAJIT_OS_WINDOWS - else - ifeq (Linux,$(TARGET_SYS)) - HOST_XCFLAGS+= -DLUAJIT_OS=LUAJIT_OS_LINUX - else - ifeq (Darwin,$(TARGET_SYS)) - HOST_XCFLAGS+= -DLUAJIT_OS=LUAJIT_OS_OSX - else - ifeq (iOS,$(TARGET_SYS)) - HOST_XCFLAGS+= -DLUAJIT_OS=LUAJIT_OS_OSX - else - HOST_XCFLAGS+= -DLUAJIT_OS=LUAJIT_OS_OTHER - endif - endif - endif - endif -endif - -ifneq (,$(CCDEBUG)) - TARGET_STRIP= @: -endif - -############################################################################## -# Files and pathnames. -############################################################################## - -MINILUA_O= host/minilua.o -MINILUA_LIBS= -lm -MINILUA_T= host/minilua -MINILUA_X= $(MINILUA_T) - -ifeq (,$(HOST_LUA)) - HOST_LUA= $(MINILUA_X) - DASM_DEP= $(MINILUA_T) -endif - -DASM_DIR= ../dynasm -DASM= $(HOST_LUA) $(DASM_DIR)/dynasm.lua -DASM_XFLAGS= -DASM_AFLAGS= -DASM_ARCH= $(TARGET_LJARCH) - -ifneq (,$(findstring LJ_ARCH_BITS 64,$(TARGET_TESTARCH))) - DASM_AFLAGS+= -D P64 -endif -ifneq (,$(findstring LJ_HASJIT 1,$(TARGET_TESTARCH))) - DASM_AFLAGS+= -D JIT -endif -ifneq (,$(findstring LJ_HASFFI 1,$(TARGET_TESTARCH))) - DASM_AFLAGS+= -D FFI -endif -ifneq (,$(findstring LJ_DUALNUM 1,$(TARGET_TESTARCH))) - DASM_AFLAGS+= -D DUALNUM -endif -ifneq (,$(findstring LJ_ARCH_HASFPU 1,$(TARGET_TESTARCH))) - DASM_AFLAGS+= -D FPU - TARGET_ARCH+= -DLJ_ARCH_HASFPU=1 -else - TARGET_ARCH+= -DLJ_ARCH_HASFPU=0 -endif -ifeq (,$(findstring LJ_ABI_SOFTFP 1,$(TARGET_TESTARCH))) - DASM_AFLAGS+= -D HFABI - TARGET_ARCH+= -DLJ_ABI_SOFTFP=0 -else - TARGET_ARCH+= -DLJ_ABI_SOFTFP=1 -endif -ifneq (,$(findstring LJ_NO_UNWIND 1,$(TARGET_TESTARCH))) - DASM_AFLAGS+= -D NO_UNWIND - TARGET_ARCH+= -DLUAJIT_NO_UNWIND -endif -DASM_AFLAGS+= -D VER=$(subst LJ_ARCH_VERSION_,,$(filter LJ_ARCH_VERSION_%,$(subst LJ_ARCH_VERSION ,LJ_ARCH_VERSION_,$(TARGET_TESTARCH)))) -ifeq (Windows,$(TARGET_SYS)) - DASM_AFLAGS+= -D WIN -endif -ifeq (x64,$(TARGET_LJARCH)) - ifeq (,$(findstring LJ_FR2 1,$(TARGET_TESTARCH))) - DASM_ARCH= x86 - endif -else -ifeq (arm,$(TARGET_LJARCH)) - ifeq (iOS,$(TARGET_SYS)) - DASM_AFLAGS+= -D IOS - endif -else -ifeq (ppc,$(TARGET_LJARCH)) - ifneq (,$(findstring LJ_ARCH_SQRT 1,$(TARGET_TESTARCH))) - DASM_AFLAGS+= -D SQRT - endif - ifneq (,$(findstring LJ_ARCH_ROUND 1,$(TARGET_TESTARCH))) - DASM_AFLAGS+= -D ROUND - endif - ifneq (,$(findstring LJ_ARCH_PPC32ON64 1,$(TARGET_TESTARCH))) - DASM_AFLAGS+= -D GPR64 - endif - ifeq (PS3,$(TARGET_SYS)) - DASM_AFLAGS+= -D PPE -D TOC - endif - ifneq (,$(findstring LJ_ARCH_PPC64 ,$(TARGET_TESTARCH))) - DASM_ARCH= ppc64 - endif -endif -endif -endif - -DASM_FLAGS= $(DASM_XFLAGS) $(DASM_AFLAGS) -DASM_DASC= vm_$(DASM_ARCH).dasc - -BUILDVM_O= host/buildvm.o host/buildvm_asm.o host/buildvm_peobj.o \ - host/buildvm_lib.o host/buildvm_fold.o -BUILDVM_T= host/buildvm -BUILDVM_X= $(BUILDVM_T) - -HOST_O= $(MINILUA_O) $(BUILDVM_O) -HOST_T= $(MINILUA_T) $(BUILDVM_T) - -LJVM_S= lj_vm.S -LJVM_O= lj_vm.o -LJVM_BOUT= $(LJVM_S) -LJVM_MODE= elfasm - -LJLIB_O= lib_base.o lib_math.o lbitlib.o lib_bit.o lib_string.o lib_table.o \ - lib_io.o lib_os.o lib_package.o lib_debug.o lib_jit.o lib_ffi.o -LJLIB_C= $(LJLIB_O:.o=.c) - -LJCORE_O= lj_gc.o lj_err.o lj_char.o lj_bc.o lj_obj.o lj_buf.o \ - lj_str.o lj_tab.o lj_func.o lj_udata.o lj_meta.o lj_debug.o \ - lj_state.o lj_dispatch.o lj_vmevent.o lj_vmmath.o lj_strscan.o \ - lj_strfmt.o lj_api.o lj_profile.o \ - lj_lex.o lj_parse.o lj_bcread.o lj_bcwrite.o lj_load.o \ - lj_ir.o lj_opt_mem.o lj_opt_fold.o lj_opt_narrow.o \ - lj_opt_dce.o lj_opt_loop.o lj_opt_split.o lj_opt_sink.o \ - lj_mcode.o lj_snap.o lj_record.o lj_crecord.o lj_ffrecord.o \ - lj_asm.o lj_trace.o lj_gdbjit.o \ - lj_ctype.o lj_cdata.o lj_cconv.o lj_ccall.o lj_ccallback.o \ - lj_carith.o lj_clib.o lj_cparse.o \ - lj_lib.o lj_alloc.o lib_aux.o \ - $(LJLIB_O) lib_init.o - -LJVMCORE_O= $(LJVM_O) $(LJCORE_O) -LJVMCORE_DYNO= $(LJVMCORE_O:.o=_dyn.o) - -LIB_VMDEF= jit/vmdef.lua -LIB_VMDEFP= $(LIB_VMDEF) - -LUAJIT_O= luajit.o -LUAJIT_A= libluajit.a -LUAJIT_SO= libluajit.so -LUAJIT_T= luajit - -ALL_T= $(LUAJIT_T) $(LUAJIT_A) $(LUAJIT_SO) $(HOST_T) -ALL_HDRGEN= lj_bcdef.h lj_ffdef.h lj_libdef.h lj_recdef.h lj_folddef.h \ - host/buildvm_arch.h -ALL_GEN= $(LJVM_S) $(ALL_HDRGEN) $(LIB_VMDEFP) -WIN_RM= *.obj *.lib *.exp *.dll *.exe *.manifest *.pdb *.ilk -ALL_RM= $(ALL_T) $(ALL_GEN) *.o host/*.o $(WIN_RM) - -############################################################################## -# Build mode handling. -############################################################################## - -# Mixed mode defaults. -TARGET_O= $(LUAJIT_A) -TARGET_T= $(LUAJIT_T) $(LUAJIT_SO) -TARGET_DEP= $(LIB_VMDEF) $(LUAJIT_SO) - -ifeq (Windows,$(TARGET_SYS)) - TARGET_DYNCC= $(STATIC_CC) - LJVM_MODE= peobj - LJVM_BOUT= $(LJVM_O) - LUAJIT_T= luajit.exe - ifeq (cygwin,$(HOST_MSYS)) - LUAJIT_SO= cyg$(TARGET_DLLNAME) - else - LUAJIT_SO= $(TARGET_DLLNAME) - endif - # Mixed mode is not supported on Windows. And static mode doesn't work well. - # C modules cannot be loaded, because they bind to lua51.dll. - ifneq (static,$(BUILDMODE)) - BUILDMODE= dynamic - TARGET_XCFLAGS+= -DLUA_BUILD_AS_DLL - endif -endif -ifeq (Darwin,$(TARGET_SYS)) - LJVM_MODE= machasm -endif -ifeq (iOS,$(TARGET_SYS)) - LJVM_MODE= machasm -endif -ifeq (SunOS,$(TARGET_SYS)) - BUILDMODE= static -endif -ifeq (PS3,$(TARGET_SYS)) - BUILDMODE= static -endif - -ifeq (Windows,$(HOST_SYS)) - MINILUA_T= host/minilua.exe - BUILDVM_T= host/buildvm.exe - ifeq (,$(HOST_MSYS)) - MINILUA_X= host\minilua - BUILDVM_X= host\buildvm - ALL_RM:= $(subst /,\,$(ALL_RM)) - endif -endif - -ifeq (static,$(BUILDMODE)) - TARGET_DYNCC= @: - TARGET_T= $(LUAJIT_T) - TARGET_DEP= $(LIB_VMDEF) -else -ifeq (dynamic,$(BUILDMODE)) - ifneq (Windows,$(TARGET_SYS)) - TARGET_CC= $(DYNAMIC_CC) - endif - TARGET_DYNCC= @: - LJVMCORE_DYNO= $(LJVMCORE_O) - TARGET_O= $(LUAJIT_SO) - TARGET_XLDFLAGS+= $(TARGET_DYNXLDOPTS) -else -ifeq (Darwin,$(TARGET_SYS)) - TARGET_DYNCC= @: - LJVMCORE_DYNO= $(LJVMCORE_O) -endif -ifeq (iOS,$(TARGET_SYS)) - TARGET_DYNCC= @: - LJVMCORE_DYNO= $(LJVMCORE_O) -endif -endif -endif - -Q= @ -E= @echo -#Q= -#E= @: - -############################################################################## -# Make targets. -############################################################################## - -default all: $(TARGET_T) - -amalg: - @grep "^[+|]" ljamalg.c - $(MAKE) all "LJCORE_O=ljamalg.o" - -clean: - $(HOST_RM) $(ALL_RM) - -libbc: - ./$(LUAJIT_T) host/genlibbc.lua -o host/buildvm_libbc.h $(LJLIB_C) - $(MAKE) all - -depend: - @for file in $(ALL_HDRGEN); do \ - test -f $$file || touch $$file; \ - done - @$(HOST_CC) $(HOST_ACFLAGS) -MM *.c host/*.c | \ - sed -e "s| [^ ]*/dasm_\S*\.h||g" \ - -e "s|^\([^l ]\)|host/\1|" \ - -e "s| lj_target_\S*\.h| lj_target_*.h|g" \ - -e "s| lj_emit_\S*\.h| lj_emit_*.h|g" \ - -e "s| lj_asm_\S*\.h| lj_asm_*.h|g" >Makefile.dep - @for file in $(ALL_HDRGEN); do \ - test -s $$file || $(HOST_RM) $$file; \ - done - -.PHONY: default all amalg clean libbc depend - -############################################################################## -# Rules for generated files. -############################################################################## - -$(MINILUA_T): $(MINILUA_O) - $(E) "HOSTLINK $@" - $(Q)$(HOST_CC) $(HOST_ALDFLAGS) -o $@ $(MINILUA_O) $(MINILUA_LIBS) $(HOST_ALIBS) - -host/buildvm_arch.h: $(DASM_DASC) $(DASM_DEP) - $(E) "DYNASM $@" - $(Q)$(DASM) $(DASM_FLAGS) -o $@ $(DASM_DASC) - -host/buildvm.o: $(DASM_DIR)/dasm_*.h - -$(BUILDVM_T): $(BUILDVM_O) - $(E) "HOSTLINK $@" - $(Q)$(HOST_CC) $(HOST_ALDFLAGS) -o $@ $(BUILDVM_O) $(HOST_ALIBS) - -$(LJVM_BOUT): $(BUILDVM_T) - $(E) "BUILDVM $@" - $(Q)$(BUILDVM_X) -m $(LJVM_MODE) -o $@ - -lj_bcdef.h: $(BUILDVM_T) $(LJLIB_C) - $(E) "BUILDVM $@" - $(Q)$(BUILDVM_X) -m bcdef -o $@ $(LJLIB_C) - -lj_ffdef.h: $(BUILDVM_T) $(LJLIB_C) - $(E) "BUILDVM $@" - $(Q)$(BUILDVM_X) -m ffdef -o $@ $(LJLIB_C) - -lj_libdef.h: $(BUILDVM_T) $(LJLIB_C) - $(E) "BUILDVM $@" - $(Q)$(BUILDVM_X) -m libdef -o $@ $(LJLIB_C) - -lj_recdef.h: $(BUILDVM_T) $(LJLIB_C) - $(E) "BUILDVM $@" - $(Q)$(BUILDVM_X) -m recdef -o $@ $(LJLIB_C) - -$(LIB_VMDEF): $(BUILDVM_T) $(LJLIB_C) - $(E) "BUILDVM $@" - $(Q)$(BUILDVM_X) -m vmdef -o $(LIB_VMDEFP) $(LJLIB_C) - -lj_folddef.h: $(BUILDVM_T) lj_opt_fold.c - $(E) "BUILDVM $@" - $(Q)$(BUILDVM_X) -m folddef -o $@ lj_opt_fold.c - -############################################################################## -# Object file rules. -############################################################################## - -%.o: %.c - $(E) "CC $@" - $(Q)$(TARGET_DYNCC) $(TARGET_ACFLAGS) -c -o $(@:.o=_dyn.o) $< - $(Q)$(TARGET_CC) $(TARGET_ACFLAGS) -c -o $@ $< - -%.o: %.S - $(E) "ASM $@" - $(Q)$(TARGET_DYNCC) $(TARGET_ASFLAGS) -c -o $(@:.o=_dyn.o) $< - $(Q)$(TARGET_CC) $(TARGET_ASFLAGS) -c -o $@ $< - -$(LUAJIT_O): - $(E) "CC $@" - $(Q)$(TARGET_STCC) $(TARGET_ACFLAGS) -c -o $@ $< - -$(HOST_O): %.o: %.c - $(E) "HOSTCC $@" - $(Q)$(HOST_CC) $(HOST_ACFLAGS) -c -o $@ $< - -include Makefile.dep - -############################################################################## -# Target file rules. -############################################################################## - -$(LUAJIT_A): $(LJVMCORE_O) - $(E) "AR $@" - $(Q)$(TARGET_AR) $@ $(LJVMCORE_O) - -# The dependency on _O, but linking with _DYNO is intentional. -$(LUAJIT_SO): $(LJVMCORE_O) - $(E) "DYNLINK $@" - $(Q)$(TARGET_LD) $(TARGET_ASHLDFLAGS) -o $@ $(LJVMCORE_DYNO) $(TARGET_ALIBS) - $(Q)$(TARGET_STRIP) $@ - -$(LUAJIT_T): $(TARGET_O) $(LUAJIT_O) $(TARGET_DEP) - $(E) "LINK $@" - $(Q)$(TARGET_LD) $(TARGET_ALDFLAGS) -o $@ $(LUAJIT_O) $(TARGET_O) $(TARGET_ALIBS) - $(Q)$(TARGET_STRIP) $@ - $(E) "OK Successfully built LuaJIT" - -############################################################################## diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/Makefile.dep b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/Makefile.dep deleted file mode 100644 index ccad82c7137..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/Makefile.dep +++ /dev/null @@ -1,245 +0,0 @@ -lib_aux.o: lib_aux.c lua.h luaconf.h lauxlib.h lj_obj.h lj_def.h \ - lj_arch.h lj_err.h lj_errmsg.h lj_state.h lj_trace.h lj_jit.h lj_ir.h \ - lj_dispatch.h lj_bc.h lj_traceerr.h lj_lib.h lj_alloc.h -lib_base.o: lib_base.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \ - lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h \ - lj_tab.h lj_meta.h lj_state.h lj_ctype.h lj_cconv.h lj_bc.h lj_ff.h \ - lj_ffdef.h lj_dispatch.h lj_jit.h lj_ir.h lj_char.h lj_strscan.h \ - lj_strfmt.h lj_lib.h lj_libdef.h -lbitlib.o: lbitlib.c lua.h luaconf.h lauxlib.h lualib.h -lib_bit.o: lib_bit.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \ - lj_arch.h lj_err.h lj_errmsg.h lj_buf.h lj_gc.h lj_str.h lj_strscan.h \ - lj_strfmt.h lj_ctype.h lj_cdata.h lj_cconv.h lj_carith.h lj_ff.h \ - lj_ffdef.h lj_lib.h lj_libdef.h -lib_debug.o: lib_debug.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \ - lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_lib.h \ - lj_libdef.h -lib_ffi.o: lib_ffi.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \ - lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_meta.h \ - lj_ctype.h lj_cparse.h lj_cdata.h lj_cconv.h lj_carith.h lj_ccall.h \ - lj_ccallback.h lj_clib.h lj_strfmt.h lj_ff.h lj_ffdef.h lj_lib.h \ - lj_libdef.h -lib_init.o: lib_init.c lua.h luaconf.h lauxlib.h lualib.h lj_arch.h -lib_io.o: lib_io.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \ - lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_state.h \ - lj_strfmt.h lj_ff.h lj_ffdef.h lj_lib.h lj_libdef.h -lib_jit.o: lib_jit.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \ - lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h \ - lj_state.h lj_bc.h lj_ctype.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h \ - lj_target.h lj_target_*.h lj_trace.h lj_dispatch.h lj_traceerr.h \ - lj_vm.h lj_vmevent.h lj_lib.h luajit.h lj_libdef.h -lib_math.o: lib_math.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \ - lj_def.h lj_arch.h lj_lib.h lj_vm.h lj_libdef.h -lib_os.o: lib_os.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \ - lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_lib.h \ - lj_libdef.h -lib_package.o: lib_package.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \ - lj_def.h lj_arch.h lj_err.h lj_errmsg.h lj_lib.h -lib_string.o: lib_string.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \ - lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h \ - lj_tab.h lj_meta.h lj_state.h lj_ff.h lj_ffdef.h lj_bcdump.h lj_lex.h \ - lj_char.h lj_strfmt.h lj_lib.h lj_libdef.h -lib_table.o: lib_table.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \ - lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h \ - lj_tab.h lj_ff.h lj_ffdef.h lj_lib.h lj_libdef.h -lj_alloc.o: lj_alloc.c lj_def.h lua.h luaconf.h lj_arch.h lj_alloc.h -lj_api.o: lj_api.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ - lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h lj_func.h lj_udata.h \ - lj_meta.h lj_state.h lj_bc.h lj_frame.h lj_trace.h lj_jit.h lj_ir.h \ - lj_dispatch.h lj_traceerr.h lj_vm.h lj_strscan.h lj_strfmt.h -lj_asm.o: lj_asm.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ - lj_str.h lj_tab.h lj_frame.h lj_bc.h lj_ctype.h lj_ir.h lj_jit.h \ - lj_ircall.h lj_iropt.h lj_mcode.h lj_trace.h lj_dispatch.h lj_traceerr.h \ - lj_snap.h lj_asm.h lj_vm.h lj_target.h lj_target_*.h lj_emit_*.h \ - lj_asm_*.h -lj_bc.o: lj_bc.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_bc.h \ - lj_bcdef.h -lj_bcread.o: lj_bcread.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_bc.h \ - lj_ctype.h lj_cdata.h lualib.h lj_lex.h lj_bcdump.h lj_state.h \ - lj_strfmt.h -lj_bcwrite.o: lj_bcwrite.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_gc.h lj_buf.h lj_str.h lj_bc.h lj_ctype.h lj_dispatch.h lj_jit.h \ - lj_ir.h lj_strfmt.h lj_bcdump.h lj_lex.h lj_err.h lj_errmsg.h lj_vm.h -lj_buf.o: lj_buf.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ - lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_strfmt.h -lj_carith.o: lj_carith.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_meta.h lj_ir.h lj_ctype.h \ - lj_cconv.h lj_cdata.h lj_carith.h lj_strscan.h -lj_ccall.o: lj_ccall.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_ctype.h lj_cconv.h lj_cdata.h \ - lj_ccall.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_bc.h \ - lj_traceerr.h -lj_ccallback.o: lj_ccallback.c lj_obj.h lua.h luaconf.h lj_def.h \ - lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_state.h lj_frame.h \ - lj_bc.h lj_ctype.h lj_cconv.h lj_ccall.h lj_ccallback.h lj_target.h \ - lj_target_*.h lj_mcode.h lj_jit.h lj_ir.h lj_trace.h lj_dispatch.h \ - lj_traceerr.h lj_vm.h -lj_cconv.o: lj_cconv.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_err.h lj_errmsg.h lj_tab.h lj_ctype.h lj_gc.h lj_cdata.h lj_cconv.h \ - lj_ccallback.h -lj_cdata.o: lj_cdata.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_ctype.h lj_cconv.h lj_cdata.h -lj_char.o: lj_char.c lj_char.h lj_def.h lua.h luaconf.h -lj_clib.o: lj_clib.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ - lj_err.h lj_errmsg.h lj_tab.h lj_str.h lj_udata.h lj_ctype.h lj_cconv.h \ - lj_cdata.h lj_clib.h lj_strfmt.h -lj_cparse.o: lj_cparse.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_ctype.h lj_cparse.h \ - lj_frame.h lj_bc.h lj_vm.h lj_char.h lj_strscan.h lj_strfmt.h -lj_crecord.o: lj_crecord.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_err.h lj_errmsg.h lj_tab.h lj_frame.h lj_bc.h lj_ctype.h lj_gc.h \ - lj_cdata.h lj_cparse.h lj_cconv.h lj_carith.h lj_clib.h lj_ccall.h \ - lj_ff.h lj_ffdef.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h lj_trace.h \ - lj_dispatch.h lj_traceerr.h lj_record.h lj_ffrecord.h lj_snap.h \ - lj_crecord.h lj_strfmt.h -lj_ctype.o: lj_ctype.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_strfmt.h lj_ctype.h \ - lj_ccallback.h -lj_debug.o: lj_debug.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_err.h lj_errmsg.h lj_debug.h lj_buf.h lj_gc.h lj_str.h lj_tab.h \ - lj_state.h lj_frame.h lj_bc.h lj_strfmt.h lj_jit.h lj_ir.h -lj_dispatch.o: lj_dispatch.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_err.h lj_errmsg.h lj_buf.h lj_gc.h lj_str.h lj_func.h lj_tab.h \ - lj_meta.h lj_debug.h lj_state.h lj_frame.h lj_bc.h lj_ff.h lj_ffdef.h \ - lj_strfmt.h lj_jit.h lj_ir.h lj_ccallback.h lj_ctype.h lj_trace.h \ - lj_dispatch.h lj_traceerr.h lj_profile.h lj_vm.h luajit.h -lj_err.o: lj_err.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_err.h \ - lj_errmsg.h lj_debug.h lj_str.h lj_func.h lj_state.h lj_frame.h lj_bc.h \ - lj_ff.h lj_ffdef.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h \ - lj_traceerr.h lj_vm.h lj_strfmt.h -lj_ffrecord.o: lj_ffrecord.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_frame.h lj_bc.h lj_ff.h \ - lj_ffdef.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h lj_trace.h \ - lj_dispatch.h lj_traceerr.h lj_record.h lj_ffrecord.h lj_crecord.h \ - lj_vm.h lj_strscan.h lj_strfmt.h lj_recdef.h -lj_func.o: lj_func.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ - lj_func.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_bc.h \ - lj_traceerr.h lj_vm.h -lj_gc.o: lj_gc.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ - lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_func.h lj_udata.h \ - lj_meta.h lj_state.h lj_frame.h lj_bc.h lj_ctype.h lj_cdata.h lj_trace.h \ - lj_jit.h lj_ir.h lj_dispatch.h lj_traceerr.h lj_vm.h -lj_gdbjit.o: lj_gdbjit.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_frame.h lj_bc.h lj_buf.h \ - lj_str.h lj_strfmt.h lj_jit.h lj_ir.h lj_dispatch.h -lj_ir.o: lj_ir.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ - lj_buf.h lj_str.h lj_tab.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h \ - lj_trace.h lj_dispatch.h lj_bc.h lj_traceerr.h lj_ctype.h lj_cdata.h \ - lj_carith.h lj_vm.h lj_strscan.h lj_strfmt.h lj_lib.h -lj_lex.o: lj_lex.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ - lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_ctype.h lj_cdata.h \ - lualib.h lj_state.h lj_lex.h lj_parse.h lj_char.h lj_strscan.h \ - lj_strfmt.h -lj_lib.o: lj_lib.c lauxlib.h lua.h luaconf.h lj_obj.h lj_def.h lj_arch.h \ - lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_func.h lj_bc.h \ - lj_dispatch.h lj_jit.h lj_ir.h lj_vm.h lj_strscan.h lj_strfmt.h lj_lex.h \ - lj_bcdump.h lj_lib.h -lj_load.o: lj_load.c lua.h luaconf.h lauxlib.h lj_obj.h lj_def.h \ - lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_func.h \ - lj_frame.h lj_bc.h lj_vm.h lj_lex.h lj_bcdump.h lj_parse.h -lj_mcode.o: lj_mcode.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_gc.h lj_err.h lj_errmsg.h lj_jit.h lj_ir.h lj_mcode.h lj_trace.h \ - lj_dispatch.h lj_bc.h lj_traceerr.h lj_vm.h -lj_meta.o: lj_meta.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ - lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_meta.h lj_frame.h \ - lj_bc.h lj_vm.h lj_strscan.h lj_strfmt.h lj_lib.h -lj_obj.o: lj_obj.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h -lj_opt_dce.o: lj_opt_dce.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_ir.h lj_jit.h lj_iropt.h -lj_opt_fold.o: lj_opt_fold.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_buf.h lj_gc.h lj_str.h lj_tab.h lj_ir.h lj_jit.h lj_ircall.h \ - lj_iropt.h lj_trace.h lj_dispatch.h lj_bc.h lj_traceerr.h lj_ctype.h \ - lj_carith.h lj_vm.h lj_strscan.h lj_strfmt.h lj_folddef.h -lj_opt_loop.o: lj_opt_loop.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_err.h lj_errmsg.h lj_buf.h lj_gc.h lj_str.h lj_ir.h lj_jit.h \ - lj_iropt.h lj_trace.h lj_dispatch.h lj_bc.h lj_traceerr.h lj_snap.h \ - lj_vm.h -lj_opt_mem.o: lj_opt_mem.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_tab.h lj_ir.h lj_jit.h lj_iropt.h lj_ircall.h -lj_opt_narrow.o: lj_opt_narrow.c lj_obj.h lua.h luaconf.h lj_def.h \ - lj_arch.h lj_bc.h lj_ir.h lj_jit.h lj_iropt.h lj_trace.h lj_dispatch.h \ - lj_traceerr.h lj_vm.h lj_strscan.h -lj_opt_sink.o: lj_opt_sink.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_ir.h lj_jit.h lj_iropt.h lj_target.h lj_target_*.h -lj_opt_split.o: lj_opt_split.c lj_obj.h lua.h luaconf.h lj_def.h \ - lj_arch.h lj_err.h lj_errmsg.h lj_buf.h lj_gc.h lj_str.h lj_ir.h \ - lj_jit.h lj_ircall.h lj_iropt.h lj_vm.h -lj_parse.o: lj_parse.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_buf.h lj_str.h lj_tab.h \ - lj_func.h lj_state.h lj_bc.h lj_ctype.h lj_strfmt.h lj_lex.h lj_parse.h \ - lj_vm.h lj_vmevent.h -lj_profile.o: lj_profile.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_buf.h lj_gc.h lj_str.h lj_frame.h lj_bc.h lj_debug.h lj_dispatch.h \ - lj_jit.h lj_ir.h lj_trace.h lj_traceerr.h lj_profile.h luajit.h -lj_record.o: lj_record.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_meta.h lj_frame.h lj_bc.h \ - lj_ctype.h lj_gc.h lj_ff.h lj_ffdef.h lj_debug.h lj_ir.h lj_jit.h \ - lj_ircall.h lj_iropt.h lj_trace.h lj_dispatch.h lj_traceerr.h \ - lj_record.h lj_ffrecord.h lj_snap.h lj_vm.h -lj_snap.o: lj_snap.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ - lj_tab.h lj_state.h lj_frame.h lj_bc.h lj_ir.h lj_jit.h lj_iropt.h \ - lj_trace.h lj_dispatch.h lj_traceerr.h lj_snap.h lj_target.h \ - lj_target_*.h lj_ctype.h lj_cdata.h -lj_state.o: lj_state.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_func.h \ - lj_meta.h lj_state.h lj_frame.h lj_bc.h lj_ctype.h lj_trace.h lj_jit.h \ - lj_ir.h lj_dispatch.h lj_traceerr.h lj_vm.h lj_lex.h lj_alloc.h luajit.h -lj_str.o: lj_str.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ - lj_err.h lj_errmsg.h lj_str.h lj_char.h -lj_strfmt.o: lj_strfmt.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_buf.h lj_gc.h lj_str.h lj_state.h lj_char.h lj_strfmt.h -lj_strscan.o: lj_strscan.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_char.h lj_strscan.h -lj_tab.o: lj_tab.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ - lj_err.h lj_errmsg.h lj_tab.h -lj_trace.o: lj_trace.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_frame.h lj_bc.h \ - lj_state.h lj_ir.h lj_jit.h lj_iropt.h lj_mcode.h lj_trace.h \ - lj_dispatch.h lj_traceerr.h lj_snap.h lj_gdbjit.h lj_record.h lj_asm.h \ - lj_vm.h lj_vmevent.h lj_target.h lj_target_*.h -lj_udata.o: lj_udata.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_gc.h lj_udata.h -lj_vmevent.o: lj_vmevent.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_str.h lj_tab.h lj_state.h lj_dispatch.h lj_bc.h lj_jit.h lj_ir.h \ - lj_vm.h lj_vmevent.h -lj_vmmath.o: lj_vmmath.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ - lj_ir.h lj_vm.h -ljamalg.o: ljamalg.c lua.h luaconf.h lauxlib.h lj_gc.c lj_obj.h lj_def.h \ - lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h \ - lj_func.h lj_udata.h lj_meta.h lj_state.h lj_frame.h lj_bc.h lj_ctype.h \ - lj_cdata.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_traceerr.h \ - lj_vm.h lj_err.c lj_debug.h lj_ff.h lj_ffdef.h lj_strfmt.h lj_char.c \ - lj_char.h lj_bc.c lj_bcdef.h lj_obj.c lj_buf.c lj_str.c lj_tab.c \ - lj_func.c lj_udata.c lj_meta.c lj_strscan.h lj_lib.h lj_debug.c \ - lj_state.c lj_lex.h lj_alloc.h luajit.h lj_dispatch.c lj_ccallback.h \ - lj_profile.h lj_vmevent.c lj_vmevent.h lj_vmmath.c lj_strscan.c \ - lj_strfmt.c lj_api.c lj_profile.c lj_lex.c lualib.h lj_parse.h \ - lj_parse.c lj_bcread.c lj_bcdump.h lj_bcwrite.c lj_load.c lj_ctype.c \ - lj_cdata.c lj_cconv.h lj_cconv.c lj_ccall.c lj_ccall.h lj_ccallback.c \ - lj_target.h lj_target_*.h lj_mcode.h lj_carith.c lj_carith.h lj_clib.c \ - lj_clib.h lj_cparse.c lj_cparse.h lj_lib.c lj_ir.c lj_ircall.h \ - lj_iropt.h lj_opt_mem.c lj_opt_fold.c lj_folddef.h lj_opt_narrow.c \ - lj_opt_dce.c lj_opt_loop.c lj_snap.h lj_opt_split.c lj_opt_sink.c \ - lj_mcode.c lj_snap.c lj_record.c lj_record.h lj_ffrecord.h lj_crecord.c \ - lj_crecord.h lj_ffrecord.c lj_recdef.h lj_asm.c lj_asm.h lj_emit_*.h \ - lj_asm_*.h lj_trace.c lj_gdbjit.h lj_gdbjit.c lj_alloc.c lib_aux.c \ - lib_base.c lj_libdef.h lib_math.c lib_string.c lib_table.c lib_io.c \ - lib_os.c lib_package.c lib_debug.c lib_bit.c lib_jit.c lib_ffi.c \ - lib_init.c -luajit.o: luajit.c lua.h luaconf.h lauxlib.h lualib.h luajit.h lj_arch.h -host/buildvm.o: host/buildvm.c host/buildvm.h lj_def.h lua.h luaconf.h \ - lj_arch.h lj_obj.h lj_def.h lj_arch.h lj_gc.h lj_obj.h lj_bc.h lj_ir.h \ - lj_ircall.h lj_ir.h lj_jit.h lj_frame.h lj_bc.h lj_dispatch.h lj_ctype.h \ - lj_gc.h lj_ccall.h lj_ctype.h luajit.h \ - host/buildvm_arch.h lj_traceerr.h -host/buildvm_asm.o: host/buildvm_asm.c host/buildvm.h lj_def.h lua.h luaconf.h \ - lj_arch.h lj_bc.h lj_def.h lj_arch.h -host/buildvm_fold.o: host/buildvm_fold.c host/buildvm.h lj_def.h lua.h \ - luaconf.h lj_arch.h lj_obj.h lj_def.h lj_arch.h lj_ir.h lj_obj.h -host/buildvm_lib.o: host/buildvm_lib.c host/buildvm.h lj_def.h lua.h luaconf.h \ - lj_arch.h lj_obj.h lj_def.h lj_arch.h lj_bc.h lj_lib.h lj_obj.h \ - host/buildvm_libbc.h -host/buildvm_peobj.o: host/buildvm_peobj.c host/buildvm.h lj_def.h lua.h \ - luaconf.h lj_arch.h lj_bc.h lj_def.h lj_arch.h -host/minilua.o: host/minilua.c diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/README b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/README deleted file mode 100644 index abfcdaa76e9..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/README +++ /dev/null @@ -1,4 +0,0 @@ -The files in this directory are only used during the build process of LuaJIT. -For cross-compilation, they must be executed on the host, not on the target. - -These files should NOT be installed! diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm.c deleted file mode 100644 index 8584e614d09..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm.c +++ /dev/null @@ -1,518 +0,0 @@ -/* -** LuaJIT VM builder. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -** -** This is a tool to build the hand-tuned assembler code required for -** LuaJIT's bytecode interpreter. It supports a variety of output formats -** to feed different toolchains (see usage() below). -** -** This tool is not particularly optimized because it's only used while -** _building_ LuaJIT. There's no point in distributing or installing it. -** Only the object code generated by this tool is linked into LuaJIT. -** -** Caveat: some memory is not free'd, error handling is lazy. -** It's a one-shot tool -- any effort fixing this would be wasted. -*/ - -#include "buildvm.h" -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_bc.h" -#include "lj_ir.h" -#include "lj_ircall.h" -#include "lj_frame.h" -#include "lj_dispatch.h" -#if LJ_HASFFI -#include "lj_ctype.h" -#include "lj_ccall.h" -#endif -#include "luajit.h" - -#if defined(_WIN32) -#include -#include -#endif - -/* ------------------------------------------------------------------------ */ - -/* DynASM glue definitions. */ -#define Dst ctx -#define Dst_DECL BuildCtx *ctx -#define Dst_REF (ctx->D) -#define DASM_CHECKS 1 - -#include "../dynasm/dasm_proto.h" - -/* Glue macros for DynASM. */ -static int collect_reloc(BuildCtx *ctx, uint8_t *addr, int idx, int type); - -#define DASM_EXTERN(ctx, addr, idx, type) \ - collect_reloc(ctx, addr, idx, type) - -/* ------------------------------------------------------------------------ */ - -/* Avoid trouble if cross-compiling for an x86 target. Speed doesn't matter. */ -#define DASM_ALIGNED_WRITES 1 - -/* Embed architecture-specific DynASM encoder. */ -#if LJ_TARGET_X86ORX64 -#include "../dynasm/dasm_x86.h" -#elif LJ_TARGET_ARM -#include "../dynasm/dasm_arm.h" -#elif LJ_TARGET_ARM64 -#include "../dynasm/dasm_arm64.h" -#elif LJ_TARGET_PPC -#include "../dynasm/dasm_ppc.h" -#elif LJ_TARGET_MIPS -#include "../dynasm/dasm_mips.h" -#else -#error "No support for this architecture (yet)" -#endif - -/* Embed generated architecture-specific backend. */ -#include "buildvm_arch.h" - -/* ------------------------------------------------------------------------ */ - -void owrite(BuildCtx *ctx, const void *ptr, size_t sz) -{ - if (fwrite(ptr, 1, sz, ctx->fp) != sz) { - fprintf(stderr, "Error: cannot write to output file: %s\n", - strerror(errno)); - exit(1); - } -} - -/* ------------------------------------------------------------------------ */ - -/* Emit code as raw bytes. Only used for DynASM debugging. */ -static void emit_raw(BuildCtx *ctx) -{ - owrite(ctx, ctx->code, ctx->codesz); -} - -/* -- Build machine code -------------------------------------------------- */ - -static const char *sym_decorate(BuildCtx *ctx, - const char *prefix, const char *suffix) -{ - char name[256]; - char *p; -#if LJ_64 - const char *symprefix = ctx->mode == BUILD_machasm ? "_" : ""; -#elif LJ_TARGET_XBOX360 - const char *symprefix = ""; -#else - const char *symprefix = ctx->mode != BUILD_elfasm ? "_" : ""; -#endif - sprintf(name, "%s%s%s", symprefix, prefix, suffix); - p = strchr(name, '@'); - if (p) { -#if LJ_TARGET_X86ORX64 - if (!LJ_64 && (ctx->mode == BUILD_coffasm || ctx->mode == BUILD_peobj)) - name[0] = '@'; - else - *p = '\0'; -#elif LJ_TARGET_PPC && !LJ_TARGET_OSX && !LJ_TARGET_CONSOLE - /* Keep @plt etc. */ -#else - *p = '\0'; -#endif - } - p = (char *)malloc(strlen(name)+1); /* MSVC doesn't like strdup. */ - strcpy(p, name); - return p; -} - -#define NRELOCSYM (sizeof(extnames)/sizeof(extnames[0])-1) - -static int relocmap[NRELOCSYM]; - -/* Collect external relocations. */ -static int collect_reloc(BuildCtx *ctx, uint8_t *addr, int idx, int type) -{ - if (ctx->nreloc >= BUILD_MAX_RELOC) { - fprintf(stderr, "Error: too many relocations, increase BUILD_MAX_RELOC.\n"); - exit(1); - } - if (relocmap[idx] < 0) { - relocmap[idx] = ctx->nrelocsym; - ctx->relocsym[ctx->nrelocsym] = sym_decorate(ctx, "", extnames[idx]); - ctx->nrelocsym++; - } - ctx->reloc[ctx->nreloc].ofs = (int32_t)(addr - ctx->code); - ctx->reloc[ctx->nreloc].sym = relocmap[idx]; - ctx->reloc[ctx->nreloc].type = type; - ctx->nreloc++; -#if LJ_TARGET_XBOX360 - return (int)(ctx->code - addr) + 4; /* Encode symbol offset of .text. */ -#else - return 0; /* Encode symbol offset of 0. */ -#endif -} - -/* Naive insertion sort. Performance doesn't matter here. */ -static void sym_insert(BuildCtx *ctx, int32_t ofs, - const char *prefix, const char *suffix) -{ - ptrdiff_t i = ctx->nsym++; - while (i > 0) { - if (ctx->sym[i-1].ofs <= ofs) - break; - ctx->sym[i] = ctx->sym[i-1]; - i--; - } - ctx->sym[i].ofs = ofs; - ctx->sym[i].name = sym_decorate(ctx, prefix, suffix); -} - -/* Build the machine code. */ -static int build_code(BuildCtx *ctx) -{ - int status; - int i; - - /* Initialize DynASM structures. */ - ctx->nglob = GLOB__MAX; - ctx->glob = (void **)malloc(ctx->nglob*sizeof(void *)); - memset(ctx->glob, 0, ctx->nglob*sizeof(void *)); - ctx->nreloc = 0; - - ctx->globnames = globnames; - ctx->extnames = extnames; - ctx->relocsym = (const char **)malloc(NRELOCSYM*sizeof(const char *)); - ctx->nrelocsym = 0; - for (i = 0; i < (int)NRELOCSYM; i++) relocmap[i] = -1; - - ctx->dasm_ident = DASM_IDENT; - ctx->dasm_arch = DASM_ARCH; - - dasm_init(Dst, DASM_MAXSECTION); - dasm_setupglobal(Dst, ctx->glob, ctx->nglob); - dasm_setup(Dst, build_actionlist); - - /* Call arch-specific backend to emit the code. */ - ctx->npc = build_backend(ctx); - - /* Finalize the code. */ - (void)dasm_checkstep(Dst, -1); - if ((status = dasm_link(Dst, &ctx->codesz))) return status; - ctx->code = (uint8_t *)malloc(ctx->codesz); - if ((status = dasm_encode(Dst, (void *)ctx->code))) return status; - - /* Allocate symbol table and bytecode offsets. */ - ctx->beginsym = sym_decorate(ctx, "", LABEL_PREFIX "vm_asm_begin"); - ctx->sym = (BuildSym *)malloc((ctx->npc+ctx->nglob+1)*sizeof(BuildSym)); - ctx->nsym = 0; - ctx->bc_ofs = (int32_t *)malloc(ctx->npc*sizeof(int32_t)); - - /* Collect the opcodes (PC labels). */ - for (i = 0; i < ctx->npc; i++) { - int32_t ofs = dasm_getpclabel(Dst, i); - if (ofs < 0) return 0x22000000|i; - ctx->bc_ofs[i] = ofs; - if ((LJ_HASJIT || - !(i == BC_JFORI || i == BC_JFORL || i == BC_JITERL || i == BC_JLOOP || - i == BC_IFORL || i == BC_IITERL || i == BC_ILOOP)) && - (LJ_HASFFI || i != BC_KCDATA)) - sym_insert(ctx, ofs, LABEL_PREFIX_BC, bc_names[i]); - } - - /* Collect the globals (named labels). */ - for (i = 0; i < ctx->nglob; i++) { - const char *gl = globnames[i]; - int len = (int)strlen(gl); - if (!ctx->glob[i]) { - fprintf(stderr, "Error: undefined global %s\n", gl); - exit(2); - } - /* Skip the _Z symbols. */ - if (!(len >= 2 && gl[len-2] == '_' && gl[len-1] == 'Z')) - sym_insert(ctx, (int32_t)((uint8_t *)(ctx->glob[i]) - ctx->code), - LABEL_PREFIX, globnames[i]); - } - - /* Close the address range. */ - sym_insert(ctx, (int32_t)ctx->codesz, "", ""); - ctx->nsym--; - - dasm_free(Dst); - - return 0; -} - -/* -- Generate VM enums --------------------------------------------------- */ - -const char *const bc_names[] = { -#define BCNAME(name, ma, mb, mc, mt) #name, -BCDEF(BCNAME) -#undef BCNAME - NULL -}; - -const char *const ir_names[] = { -#define IRNAME(name, m, m1, m2) #name, -IRDEF(IRNAME) -#undef IRNAME - NULL -}; - -const char *const irt_names[] = { -#define IRTNAME(name, size) #name, -IRTDEF(IRTNAME) -#undef IRTNAME - NULL -}; - -const char *const irfpm_names[] = { -#define FPMNAME(name) #name, -IRFPMDEF(FPMNAME) -#undef FPMNAME - NULL -}; - -const char *const irfield_names[] = { -#define FLNAME(name, ofs) #name, -IRFLDEF(FLNAME) -#undef FLNAME - NULL -}; - -const char *const ircall_names[] = { -#define IRCALLNAME(cond, name, nargs, kind, type, flags) #name, -IRCALLDEF(IRCALLNAME) -#undef IRCALLNAME - NULL -}; - -static const char *const trace_errors[] = { -#define TREDEF(name, msg) msg, -#include "lj_traceerr.h" - NULL -}; - -static const char *lower(char *buf, const char *s) -{ - char *p = buf; - while (*s) { - *p++ = (*s >= 'A' && *s <= 'Z') ? *s+0x20 : *s; - s++; - } - *p = '\0'; - return buf; -} - -/* Emit C source code for bytecode-related definitions. */ -static void emit_bcdef(BuildCtx *ctx) -{ - int i; - fprintf(ctx->fp, "/* This is a generated file. DO NOT EDIT! */\n\n"); - fprintf(ctx->fp, "LJ_DATADEF const uint16_t lj_bc_ofs[] = {\n"); - for (i = 0; i < ctx->npc; i++) { - if (i != 0) - fprintf(ctx->fp, ",\n"); - fprintf(ctx->fp, "%d", ctx->bc_ofs[i]); - } -} - -/* Emit VM definitions as Lua code for debug modules. */ -static void emit_vmdef(BuildCtx *ctx) -{ - char buf[80]; - int i; - fprintf(ctx->fp, "-- This is a generated file. DO NOT EDIT!\n\n"); - fprintf(ctx->fp, "return {\n\n"); - - fprintf(ctx->fp, "bcnames = \""); - for (i = 0; bc_names[i]; i++) fprintf(ctx->fp, "%-6s", bc_names[i]); - fprintf(ctx->fp, "\",\n\n"); - - fprintf(ctx->fp, "irnames = \""); - for (i = 0; ir_names[i]; i++) fprintf(ctx->fp, "%-6s", ir_names[i]); - fprintf(ctx->fp, "\",\n\n"); - - fprintf(ctx->fp, "irfpm = { [0]="); - for (i = 0; irfpm_names[i]; i++) - fprintf(ctx->fp, "\"%s\", ", lower(buf, irfpm_names[i])); - fprintf(ctx->fp, "},\n\n"); - - fprintf(ctx->fp, "irfield = { [0]="); - for (i = 0; irfield_names[i]; i++) { - char *p; - lower(buf, irfield_names[i]); - p = strchr(buf, '_'); - if (p) *p = '.'; - fprintf(ctx->fp, "\"%s\", ", buf); - } - fprintf(ctx->fp, "},\n\n"); - - fprintf(ctx->fp, "ircall = {\n[0]="); - for (i = 0; ircall_names[i]; i++) - fprintf(ctx->fp, "\"%s\",\n", ircall_names[i]); - fprintf(ctx->fp, "},\n\n"); - - fprintf(ctx->fp, "traceerr = {\n[0]="); - for (i = 0; trace_errors[i]; i++) - fprintf(ctx->fp, "\"%s\",\n", trace_errors[i]); - fprintf(ctx->fp, "},\n\n"); -} - -/* -- Argument parsing ---------------------------------------------------- */ - -/* Build mode names. */ -static const char *const modenames[] = { -#define BUILDNAME(name) #name, -BUILDDEF(BUILDNAME) -#undef BUILDNAME - NULL -}; - -/* Print usage information and exit. */ -static void usage(void) -{ - int i; - fprintf(stderr, LUAJIT_VERSION " VM builder.\n"); - fprintf(stderr, LUAJIT_COPYRIGHT ", " LUAJIT_URL "\n"); - fprintf(stderr, "Target architecture: " LJ_ARCH_NAME "\n\n"); - fprintf(stderr, "Usage: buildvm -m mode [-o outfile] [infiles...]\n\n"); - fprintf(stderr, "Available modes:\n"); - for (i = 0; i < BUILD__MAX; i++) - fprintf(stderr, " %s\n", modenames[i]); - exit(1); -} - -/* Parse the output mode name. */ -static BuildMode parsemode(const char *mode) -{ - int i; - for (i = 0; modenames[i]; i++) - if (!strcmp(mode, modenames[i])) - return (BuildMode)i; - usage(); - return (BuildMode)-1; -} - -/* Parse arguments. */ -static void parseargs(BuildCtx *ctx, char **argv) -{ - const char *a; - int i; - ctx->mode = (BuildMode)-1; - ctx->outname = "-"; - for (i = 1; (a = argv[i]) != NULL; i++) { - if (a[0] != '-') - break; - switch (a[1]) { - case '-': - if (a[2]) goto err; - i++; - goto ok; - case '\0': - goto ok; - case 'm': - i++; - if (a[2] || argv[i] == NULL) goto err; - ctx->mode = parsemode(argv[i]); - break; - case 'o': - i++; - if (a[2] || argv[i] == NULL) goto err; - ctx->outname = argv[i]; - break; - default: err: - usage(); - break; - } - } -ok: - ctx->args = argv+i; - if (ctx->mode == (BuildMode)-1) goto err; -} - -int main(int argc, char **argv) -{ - BuildCtx ctx_; - BuildCtx *ctx = &ctx_; - int status, binmode; - - if (sizeof(void *) != 4*LJ_32+8*LJ_64) { - fprintf(stderr,"Error: pointer size mismatch in cross-build.\n"); - fprintf(stderr,"Try: make HOST_CC=\"gcc -m32\" CROSS=...\n\n"); - return 1; - } - - UNUSED(argc); - parseargs(ctx, argv); - - if ((status = build_code(ctx))) { - fprintf(stderr,"Error: DASM error %08x\n", status); - return 1; - } - - switch (ctx->mode) { - case BUILD_peobj: - case BUILD_raw: - binmode = 1; - break; - default: - binmode = 0; - break; - } - - if (ctx->outname[0] == '-' && ctx->outname[1] == '\0') { - ctx->fp = stdout; -#if defined(_WIN32) - if (binmode) - _setmode(_fileno(stdout), _O_BINARY); /* Yuck. */ -#endif - } else if (!(ctx->fp = fopen(ctx->outname, binmode ? "wb" : "w"))) { - fprintf(stderr, "Error: cannot open output file '%s': %s\n", - ctx->outname, strerror(errno)); - exit(1); - } - - switch (ctx->mode) { - case BUILD_elfasm: - case BUILD_coffasm: - case BUILD_machasm: - emit_asm(ctx); - emit_asm_debug(ctx); - break; - case BUILD_peobj: - emit_peobj(ctx); - break; - case BUILD_raw: - emit_raw(ctx); - break; - case BUILD_bcdef: - emit_bcdef(ctx); - emit_lib(ctx); - break; - case BUILD_vmdef: - emit_vmdef(ctx); - emit_lib(ctx); - fprintf(ctx->fp, "}\n\n"); - break; - case BUILD_ffdef: - case BUILD_libdef: - case BUILD_recdef: - emit_lib(ctx); - break; - case BUILD_folddef: - emit_fold(ctx); - break; - default: - break; - } - - fflush(ctx->fp); - if (ferror(ctx->fp)) { - fprintf(stderr, "Error: cannot write to output file: %s\n", - strerror(errno)); - exit(1); - } - fclose(ctx->fp); - - return 0; -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm.h deleted file mode 100644 index 5588555307e..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm.h +++ /dev/null @@ -1,105 +0,0 @@ -/* -** LuaJIT VM builder. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _BUILDVM_H -#define _BUILDVM_H - -#include -#include -#include -#include -#include - -#include "lj_def.h" -#include "lj_arch.h" - -/* Hardcoded limits. Increase as needed. */ -#define BUILD_MAX_RELOC 200 /* Max. number of relocations. */ -#define BUILD_MAX_FOLD 4096 /* Max. number of fold rules. */ - -/* Prefix for scanned library definitions. */ -#define LIBDEF_PREFIX "LJLIB_" - -/* Prefix for scanned fold definitions. */ -#define FOLDDEF_PREFIX "LJFOLD" - -/* Prefixes for generated labels. */ -#define LABEL_PREFIX "lj_" -#define LABEL_PREFIX_BC LABEL_PREFIX "BC_" -#define LABEL_PREFIX_FF LABEL_PREFIX "ff_" -#define LABEL_PREFIX_CF LABEL_PREFIX "cf_" -#define LABEL_PREFIX_FFH LABEL_PREFIX "ffh_" -#define LABEL_PREFIX_LIBCF LABEL_PREFIX "lib_cf_" -#define LABEL_PREFIX_LIBINIT LABEL_PREFIX "lib_init_" - -/* Forward declaration. */ -struct dasm_State; - -/* Build modes. */ -#define BUILDDEF(_) \ - _(elfasm) _(coffasm) _(machasm) _(peobj) _(raw) \ - _(bcdef) _(ffdef) _(libdef) _(recdef) _(vmdef) \ - _(folddef) - -typedef enum { -#define BUILDENUM(name) BUILD_##name, -BUILDDEF(BUILDENUM) -#undef BUILDENUM - BUILD__MAX -} BuildMode; - -/* Code relocation. */ -typedef struct BuildReloc { - int32_t ofs; - int sym; - int type; -} BuildReloc; - -typedef struct BuildSym { - const char *name; - int32_t ofs; -} BuildSym; - -/* Build context structure. */ -typedef struct BuildCtx { - /* DynASM state pointer. Should be first member. */ - struct dasm_State *D; - /* Parsed command line. */ - BuildMode mode; - FILE *fp; - const char *outname; - char **args; - /* Code and symbols generated by DynASM. */ - uint8_t *code; - size_t codesz; - int npc, nglob, nsym, nreloc, nrelocsym; - void **glob; - BuildSym *sym; - const char **relocsym; - int32_t *bc_ofs; - const char *beginsym; - /* Strings generated by DynASM. */ - const char *const *globnames; - const char *const *extnames; - const char *dasm_ident; - const char *dasm_arch; - /* Relocations. */ - BuildReloc reloc[BUILD_MAX_RELOC]; -} BuildCtx; - -extern void owrite(BuildCtx *ctx, const void *ptr, size_t sz); -extern void emit_asm(BuildCtx *ctx); -extern void emit_peobj(BuildCtx *ctx); -extern void emit_lib(BuildCtx *ctx); -extern void emit_fold(BuildCtx *ctx); - -extern const char *const bc_names[]; -extern const char *const ir_names[]; -extern const char *const irt_names[]; -extern const char *const irfpm_names[]; -extern const char *const irfield_names[]; -extern const char *const ircall_names[]; - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm_asm.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm_asm.c deleted file mode 100644 index 9b7ae53a26a..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm_asm.c +++ /dev/null @@ -1,345 +0,0 @@ -/* -** LuaJIT VM builder: Assembler source code emitter. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#include "buildvm.h" -#include "lj_bc.h" - -/* ------------------------------------------------------------------------ */ - -#if LJ_TARGET_X86ORX64 -/* Emit bytes piecewise as assembler text. */ -static void emit_asm_bytes(BuildCtx *ctx, uint8_t *p, int n) -{ - int i; - for (i = 0; i < n; i++) { - if ((i & 15) == 0) - fprintf(ctx->fp, "\t.byte %d", p[i]); - else - fprintf(ctx->fp, ",%d", p[i]); - if ((i & 15) == 15) putc('\n', ctx->fp); - } - if ((n & 15) != 0) putc('\n', ctx->fp); -} - -/* Emit relocation */ -static void emit_asm_reloc(BuildCtx *ctx, int type, const char *sym) -{ - switch (ctx->mode) { - case BUILD_elfasm: - if (type) - fprintf(ctx->fp, "\t.long %s-.-4\n", sym); - else - fprintf(ctx->fp, "\t.long %s\n", sym); - break; - case BUILD_coffasm: - fprintf(ctx->fp, "\t.def %s; .scl 3; .type 32; .endef\n", sym); - if (type) - fprintf(ctx->fp, "\t.long %s-.-4\n", sym); - else - fprintf(ctx->fp, "\t.long %s\n", sym); - break; - default: /* BUILD_machasm for relative relocations handled below. */ - fprintf(ctx->fp, "\t.long %s\n", sym); - break; - } -} - -static const char *const jccnames[] = { - "jo", "jno", "jb", "jnb", "jz", "jnz", "jbe", "ja", - "js", "jns", "jpe", "jpo", "jl", "jge", "jle", "jg" -}; - -/* Emit x86/x64 text relocations. */ -static void emit_asm_reloc_text(BuildCtx *ctx, uint8_t *cp, int n, - const char *sym) -{ - const char *opname = NULL; - if (--n < 0) goto err; - if (cp[n] == 0xe8) { - opname = "call"; - } else if (cp[n] == 0xe9) { - opname = "jmp"; - } else if (cp[n] >= 0x80 && cp[n] <= 0x8f && n > 0 && cp[n-1] == 0x0f) { - opname = jccnames[cp[n]-0x80]; - n--; - } else { -err: - fprintf(stderr, "Error: unsupported opcode for %s symbol relocation.\n", - sym); - exit(1); - } - emit_asm_bytes(ctx, cp, n); - if (strncmp(sym+(*sym == '_'), LABEL_PREFIX, sizeof(LABEL_PREFIX)-1)) { - /* Various fixups for external symbols outside of our binary. */ - if (ctx->mode == BUILD_elfasm) { - if (LJ_32) - fprintf(ctx->fp, "#if __PIC__\n\t%s lj_wrap_%s\n#else\n", opname, sym); - fprintf(ctx->fp, "\t%s %s@PLT\n", opname, sym); - if (LJ_32) - fprintf(ctx->fp, "#endif\n"); - return; - } else if (LJ_32 && ctx->mode == BUILD_machasm) { - fprintf(ctx->fp, "\t%s L%s$stub\n", opname, sym); - return; - } - } - fprintf(ctx->fp, "\t%s %s\n", opname, sym); -} -#else -/* Emit words piecewise as assembler text. */ -static void emit_asm_words(BuildCtx *ctx, uint8_t *p, int n) -{ - int i; - for (i = 0; i < n; i += 4) { - if ((i & 15) == 0) - fprintf(ctx->fp, "\t.long 0x%08x", *(uint32_t *)(p+i)); - else - fprintf(ctx->fp, ",0x%08x", *(uint32_t *)(p+i)); - if ((i & 15) == 12) putc('\n', ctx->fp); - } - if ((n & 15) != 0) putc('\n', ctx->fp); -} - -/* Emit relocation as part of an instruction. */ -static void emit_asm_wordreloc(BuildCtx *ctx, uint8_t *p, int n, - const char *sym) -{ - uint32_t ins; - emit_asm_words(ctx, p, n-4); - ins = *(uint32_t *)(p+n-4); -#if LJ_TARGET_ARM - if ((ins & 0xff000000u) == 0xfa000000u) { - fprintf(ctx->fp, "\tblx %s\n", sym); - } else if ((ins & 0x0e000000u) == 0x0a000000u) { - fprintf(ctx->fp, "\t%s%.2s %s\n", (ins & 0x01000000u) ? "bl" : "b", - &"eqnecsccmiplvsvchilsgeltgtle"[2*(ins >> 28)], sym); - } else { - fprintf(stderr, - "Error: unsupported opcode %08x for %s symbol relocation.\n", - ins, sym); - exit(1); - } -#elif LJ_TARGET_ARM64 - if ((ins >> 26) == 0x25u) { - fprintf(ctx->fp, "\tbl %s\n", sym); - } else { - fprintf(stderr, - "Error: unsupported opcode %08x for %s symbol relocation.\n", - ins, sym); - exit(1); - } -#elif LJ_TARGET_PPC -#if LJ_TARGET_PS3 -#define TOCPREFIX "." -#else -#define TOCPREFIX "" -#endif - if ((ins >> 26) == 16) { - fprintf(ctx->fp, "\t%s %d, %d, " TOCPREFIX "%s\n", - (ins & 1) ? "bcl" : "bc", (ins >> 21) & 31, (ins >> 16) & 31, sym); - } else if ((ins >> 26) == 18) { -#if LJ_ARCH_PPC64 - const char *suffix = strchr(sym, '@'); - if (suffix && suffix[1] == 'h') { - fprintf(ctx->fp, "\taddis 11, 2, %s\n", sym); - } else if (suffix && suffix[1] == 'l') { - fprintf(ctx->fp, "\tld 12, %s\n", sym); - } else -#endif - fprintf(ctx->fp, "\t%s " TOCPREFIX "%s\n", (ins & 1) ? "bl" : "b", sym); - } else { - fprintf(stderr, - "Error: unsupported opcode %08x for %s symbol relocation.\n", - ins, sym); - exit(1); - } -#elif LJ_TARGET_MIPS - fprintf(stderr, - "Error: unsupported opcode %08x for %s symbol relocation.\n", - ins, sym); - exit(1); -#else -#error "missing relocation support for this architecture" -#endif -} -#endif - -#if LJ_TARGET_ARM -#define ELFASM_PX "%%" -#else -#define ELFASM_PX "@" -#endif - -/* Emit an assembler label. */ -static void emit_asm_label(BuildCtx *ctx, const char *name, int size, int isfunc) -{ - switch (ctx->mode) { - case BUILD_elfasm: -#if LJ_TARGET_PS3 - if (!strncmp(name, "lj_vm_", 6) && - strcmp(name, ctx->beginsym) && - !strstr(name, "hook")) { - fprintf(ctx->fp, - "\n\t.globl %s\n" - "\t.section \".opd\",\"aw\"\n" - "%s:\n" - "\t.long .%s,.TOC.@tocbase32\n" - "\t.size %s,8\n" - "\t.previous\n" - "\t.globl .%s\n" - "\t.hidden .%s\n" - "\t.type .%s, " ELFASM_PX "function\n" - "\t.size .%s, %d\n" - ".%s:\n", - name, name, name, name, name, name, name, name, size, name); - break; - } -#endif - fprintf(ctx->fp, - "\n\t.globl %s\n" - "\t.hidden %s\n" - "\t.type %s, " ELFASM_PX "%s\n" - "\t.size %s, %d\n" - "%s:\n", - name, name, name, isfunc ? "function" : "object", name, size, name); - break; - case BUILD_coffasm: - fprintf(ctx->fp, "\n\t.globl %s\n", name); - if (isfunc) - fprintf(ctx->fp, "\t.def %s; .scl 3; .type 32; .endef\n", name); - fprintf(ctx->fp, "%s:\n", name); - break; - case BUILD_machasm: - fprintf(ctx->fp, - "\n\t.private_extern %s\n" - "%s:\n", name, name); - break; - default: - break; - } -} - -/* Emit alignment. */ -static void emit_asm_align(BuildCtx *ctx, int bits) -{ - switch (ctx->mode) { - case BUILD_elfasm: - case BUILD_coffasm: - fprintf(ctx->fp, "\t.p2align %d\n", bits); - break; - case BUILD_machasm: - fprintf(ctx->fp, "\t.align %d\n", bits); - break; - default: - break; - } -} - -/* ------------------------------------------------------------------------ */ - -/* Emit assembler source code. */ -void emit_asm(BuildCtx *ctx) -{ - int i, rel; - - fprintf(ctx->fp, "\t.file \"buildvm_%s.dasc\"\n", ctx->dasm_arch); -#if LJ_ARCH_PPC64 - fprintf(ctx->fp, "\t.abiversion 2\n"); -#endif - fprintf(ctx->fp, "\t.text\n"); - emit_asm_align(ctx, 4); - -#if LJ_TARGET_PS3 - emit_asm_label(ctx, ctx->beginsym, ctx->codesz, 0); -#else - emit_asm_label(ctx, ctx->beginsym, 0, 0); -#endif - if (ctx->mode != BUILD_machasm) - fprintf(ctx->fp, ".Lbegin:\n"); - -#if LJ_TARGET_ARM && defined(__GNUC__) && !LJ_NO_UNWIND - /* This should really be moved into buildvm_arm.dasc. */ - fprintf(ctx->fp, - ".fnstart\n" - ".save {r4, r5, r6, r7, r8, r9, r10, r11, lr}\n" - ".pad #28\n"); -#endif -#if LJ_TARGET_MIPS - fprintf(ctx->fp, ".set nomips16\n.abicalls\n.set noreorder\n.set nomacro\n"); -#endif - - for (i = rel = 0; i < ctx->nsym; i++) { - int32_t ofs = ctx->sym[i].ofs; - int32_t next = ctx->sym[i+1].ofs; -#if LJ_TARGET_ARM && defined(__GNUC__) && !LJ_NO_UNWIND && LJ_HASFFI - if (!strcmp(ctx->sym[i].name, "lj_vm_ffi_call")) - fprintf(ctx->fp, - ".globl lj_err_unwind_arm\n" - ".personality lj_err_unwind_arm\n" - ".fnend\n" - ".fnstart\n" - ".save {r4, r5, r11, lr}\n" - ".setfp r11, sp\n"); -#endif - emit_asm_label(ctx, ctx->sym[i].name, next - ofs, 1); - while (rel < ctx->nreloc && ctx->reloc[rel].ofs <= next) { - BuildReloc *r = &ctx->reloc[rel]; - int n = r->ofs - ofs; -#if LJ_TARGET_X86ORX64 - if (r->type != 0 && - (ctx->mode == BUILD_elfasm || ctx->mode == BUILD_machasm)) { - emit_asm_reloc_text(ctx, ctx->code+ofs, n, ctx->relocsym[r->sym]); - } else { - emit_asm_bytes(ctx, ctx->code+ofs, n); - emit_asm_reloc(ctx, r->type, ctx->relocsym[r->sym]); - } - ofs += n+4; -#else - emit_asm_wordreloc(ctx, ctx->code+ofs, n, ctx->relocsym[r->sym]); - ofs += n; -#endif - rel++; - } -#if LJ_TARGET_X86ORX64 - emit_asm_bytes(ctx, ctx->code+ofs, next-ofs); -#else - emit_asm_words(ctx, ctx->code+ofs, next-ofs); -#endif - } - -#if LJ_TARGET_ARM && defined(__GNUC__) && !LJ_NO_UNWIND - fprintf(ctx->fp, -#if !LJ_HASFFI - ".globl lj_err_unwind_arm\n" - ".personality lj_err_unwind_arm\n" -#endif - ".fnend\n"); -#endif - - fprintf(ctx->fp, "\n"); - switch (ctx->mode) { - case BUILD_elfasm: -#if !(LJ_TARGET_PS3 || LJ_TARGET_PSVITA) - fprintf(ctx->fp, "\t.section .note.GNU-stack,\"\"," ELFASM_PX "progbits\n"); -#endif -#if LJ_TARGET_PPC && !LJ_TARGET_PS3 - /* Hard-float ABI. */ - fprintf(ctx->fp, "\t.gnu_attribute 4, 1\n"); -#endif - /* fallthrough */ - case BUILD_coffasm: - fprintf(ctx->fp, "\t.ident \"%s\"\n", ctx->dasm_ident); - break; - case BUILD_machasm: - fprintf(ctx->fp, - "\t.cstring\n" - "\t.ascii \"%s\\0\"\n", ctx->dasm_ident); - break; - default: - break; - } - fprintf(ctx->fp, "\n"); -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm_fold.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm_fold.c deleted file mode 100644 index daed7eccb22..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm_fold.c +++ /dev/null @@ -1,229 +0,0 @@ -/* -** LuaJIT VM builder: IR folding hash table generator. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#include "buildvm.h" -#include "lj_obj.h" -#include "lj_ir.h" - -/* Context for the folding hash table generator. */ -static int lineno; -static int funcidx; -static uint32_t foldkeys[BUILD_MAX_FOLD]; -static uint32_t nkeys; - -/* Try to fill the hash table with keys using the hash parameters. */ -static int tryhash(uint32_t *htab, uint32_t sz, uint32_t r, int dorol) -{ - uint32_t i; - if (dorol && ((r & 31) == 0 || (r>>5) == 0)) - return 0; /* Avoid zero rotates. */ - memset(htab, 0xff, (sz+1)*sizeof(uint32_t)); - for (i = 0; i < nkeys; i++) { - uint32_t key = foldkeys[i]; - uint32_t k = key & 0xffffff; - uint32_t h = (dorol ? lj_rol(lj_rol(k, r>>5) - k, r&31) : - (((k << (r>>5)) - k) << (r&31))) % sz; - if (htab[h] != 0xffffffff) { /* Collision on primary slot. */ - if (htab[h+1] != 0xffffffff) { /* Collision on secondary slot. */ - /* Try to move the colliding key, if possible. */ - if (h < sz-1 && htab[h+2] == 0xffffffff) { - uint32_t k2 = htab[h+1] & 0xffffff; - uint32_t h2 = (dorol ? lj_rol(lj_rol(k2, r>>5) - k2, r&31) : - (((k2 << (r>>5)) - k2) << (r&31))) % sz; - if (h2 != h+1) return 0; /* Cannot resolve collision. */ - htab[h+2] = htab[h+1]; /* Move colliding key to secondary slot. */ - } else { - return 0; /* Collision. */ - } - } - htab[h+1] = key; - } else { - htab[h] = key; - } - } - return 1; /* Success, all keys could be stored. */ -} - -/* Print the generated hash table. */ -static void printhash(BuildCtx *ctx, uint32_t *htab, uint32_t sz) -{ - uint32_t i; - fprintf(ctx->fp, "static const uint32_t fold_hash[%d] = {\n0x%08x", - sz+1, htab[0]); - for (i = 1; i < sz+1; i++) - fprintf(ctx->fp, ",\n0x%08x", htab[i]); - fprintf(ctx->fp, "\n};\n\n"); -} - -/* Exhaustive search for the shortest semi-perfect hash table. */ -static void makehash(BuildCtx *ctx) -{ - uint32_t htab[BUILD_MAX_FOLD*2+1]; - uint32_t sz, r; - /* Search for the smallest hash table with an odd size. */ - for (sz = (nkeys|1); sz < BUILD_MAX_FOLD*2; sz += 2) { - /* First try all shift hash combinations. */ - for (r = 0; r < 32*32; r++) { - if (tryhash(htab, sz, r, 0)) { - printhash(ctx, htab, sz); - fprintf(ctx->fp, - "#define fold_hashkey(k)\t(((((k)<<%u)-(k))<<%u)%%%u)\n\n", - r>>5, r&31, sz); - return; - } - } - /* Then try all rotate hash combinations. */ - for (r = 0; r < 32*32; r++) { - if (tryhash(htab, sz, r, 1)) { - printhash(ctx, htab, sz); - fprintf(ctx->fp, - "#define fold_hashkey(k)\t(lj_rol(lj_rol((k),%u)-(k),%u)%%%u)\n\n", - r>>5, r&31, sz); - return; - } - } - } - fprintf(stderr, "Error: search for perfect hash failed\n"); - exit(1); -} - -/* Parse one token of a fold rule. */ -static uint32_t nexttoken(char **pp, int allowlit, int allowany) -{ - char *p = *pp; - if (p) { - uint32_t i; - char *q = strchr(p, ' '); - if (q) *q++ = '\0'; - *pp = q; - if (allowlit && !strncmp(p, "IRFPM_", 6)) { - for (i = 0; irfpm_names[i]; i++) - if (!strcmp(irfpm_names[i], p+6)) - return i; - } else if (allowlit && !strncmp(p, "IRFL_", 5)) { - for (i = 0; irfield_names[i]; i++) - if (!strcmp(irfield_names[i], p+5)) - return i; - } else if (allowlit && !strncmp(p, "IRCALL_", 7)) { - for (i = 0; ircall_names[i]; i++) - if (!strcmp(ircall_names[i], p+7)) - return i; - } else if (allowlit && !strncmp(p, "IRCONV_", 7)) { - for (i = 0; irt_names[i]; i++) { - const char *r = strchr(p+7, '_'); - if (r && !strncmp(irt_names[i], p+7, r-(p+7))) { - uint32_t j; - for (j = 0; irt_names[j]; j++) - if (!strcmp(irt_names[j], r+1)) - return (i << 5) + j; - } - } - } else if (allowlit && *p >= '0' && *p <= '9') { - for (i = 0; *p >= '0' && *p <= '9'; p++) - i = i*10 + (*p - '0'); - if (*p == '\0') - return i; - } else if (allowany && !strcmp("any", p)) { - return allowany; - } else { - for (i = 0; ir_names[i]; i++) - if (!strcmp(ir_names[i], p)) - return i; - } - fprintf(stderr, "Error: bad fold definition token \"%s\" at line %d\n", p, lineno); - exit(1); - } - return 0; -} - -/* Parse a fold rule. */ -static void foldrule(char *p) -{ - uint32_t op = nexttoken(&p, 0, 0); - uint32_t left = nexttoken(&p, 0, 0x7f); - uint32_t right = nexttoken(&p, 1, 0x3ff); - uint32_t key = (funcidx << 24) | (op << 17) | (left << 10) | right; - uint32_t i; - if (nkeys >= BUILD_MAX_FOLD) { - fprintf(stderr, "Error: too many fold rules, increase BUILD_MAX_FOLD.\n"); - exit(1); - } - /* Simple insertion sort to detect duplicates. */ - for (i = nkeys; i > 0; i--) { - if ((foldkeys[i-1]&0xffffff) < (key & 0xffffff)) - break; - if ((foldkeys[i-1]&0xffffff) == (key & 0xffffff)) { - fprintf(stderr, "Error: duplicate fold definition at line %d\n", lineno); - exit(1); - } - foldkeys[i] = foldkeys[i-1]; - } - foldkeys[i] = key; - nkeys++; -} - -/* Emit C source code for IR folding hash table. */ -void emit_fold(BuildCtx *ctx) -{ - char buf[256]; /* We don't care about analyzing lines longer than that. */ - const char *fname = ctx->args[0]; - FILE *fp; - - if (fname == NULL) { - fprintf(stderr, "Error: missing input filename\n"); - exit(1); - } - - if (fname[0] == '-' && fname[1] == '\0') { - fp = stdin; - } else { - fp = fopen(fname, "r"); - if (!fp) { - fprintf(stderr, "Error: cannot open input file '%s': %s\n", - fname, strerror(errno)); - exit(1); - } - } - - fprintf(ctx->fp, "/* This is a generated file. DO NOT EDIT! */\n\n"); - fprintf(ctx->fp, "static const FoldFunc fold_func[] = {\n"); - - lineno = 0; - funcidx = 0; - nkeys = 0; - while (fgets(buf, sizeof(buf), fp) != NULL) { - lineno++; - /* The prefix must be at the start of a line, otherwise it's ignored. */ - if (!strncmp(buf, FOLDDEF_PREFIX, sizeof(FOLDDEF_PREFIX)-1)) { - char *p = buf+sizeof(FOLDDEF_PREFIX)-1; - char *q = strchr(p, ')'); - if (p[0] == '(' && q) { - p++; - *q = '\0'; - foldrule(p); - } else if ((p[0] == 'F' || p[0] == 'X') && p[1] == '(' && q) { - p += 2; - *q = '\0'; - if (funcidx) - fprintf(ctx->fp, ",\n"); - if (p[-2] == 'X') - fprintf(ctx->fp, " %s", p); - else - fprintf(ctx->fp, " fold_%s", p); - funcidx++; - } else { - buf[strlen(buf)-1] = '\0'; - fprintf(stderr, "Error: unknown fold definition tag %s%s at line %d\n", - FOLDDEF_PREFIX, p, lineno); - exit(1); - } - } - } - fclose(fp); - fprintf(ctx->fp, "\n};\n\n"); - - makehash(ctx); -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm_lib.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm_lib.c deleted file mode 100644 index e928673d4e0..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm_lib.c +++ /dev/null @@ -1,457 +0,0 @@ -/* -** LuaJIT VM builder: library definition compiler. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#include "buildvm.h" -#include "lj_obj.h" -#include "lj_bc.h" -#include "lj_lib.h" -#include "buildvm_libbc.h" - -/* Context for library definitions. */ -static uint8_t obuf[8192]; -static uint8_t *optr; -static char modname[80]; -static size_t modnamelen; -static char funcname[80]; -static int modstate, regfunc; -static int ffid, recffid, ffasmfunc; - -enum { - REGFUNC_OK, - REGFUNC_NOREG, - REGFUNC_NOREGUV -}; - -static void libdef_name(const char *p, int kind) -{ - size_t n = strlen(p); - if (kind != LIBINIT_STRING) { - if (n > modnamelen && p[modnamelen] == '_' && - !strncmp(p, modname, modnamelen)) { - p += modnamelen+1; - n -= modnamelen+1; - } - } - if (n > LIBINIT_MAXSTR) { - fprintf(stderr, "Error: string too long: '%s'\n", p); - exit(1); - } - if (optr+1+n+2 > obuf+sizeof(obuf)) { /* +2 for caller. */ - fprintf(stderr, "Error: output buffer overflow\n"); - exit(1); - } - *optr++ = (uint8_t)(n | kind); - memcpy(optr, p, n); - optr += n; -} - -static void libdef_endmodule(BuildCtx *ctx) -{ - if (modstate != 0) { - char line[80]; - const uint8_t *p; - int n; - if (modstate == 1) - fprintf(ctx->fp, " (lua_CFunction)0"); - fprintf(ctx->fp, "\n};\n"); - fprintf(ctx->fp, "static const uint8_t %s%s[] = {\n", - LABEL_PREFIX_LIBINIT, modname); - line[0] = '\0'; - for (n = 0, p = obuf; p < optr; p++) { - n += sprintf(line+n, "%d,", *p); - if (n >= 75) { - fprintf(ctx->fp, "%s\n", line); - n = 0; - line[0] = '\0'; - } - } - fprintf(ctx->fp, "%s%d\n};\n#endif\n\n", line, LIBINIT_END); - } -} - -static void libdef_module(BuildCtx *ctx, char *p, int arg) -{ - UNUSED(arg); - if (ctx->mode == BUILD_libdef) { - libdef_endmodule(ctx); - optr = obuf; - *optr++ = (uint8_t)ffid; - *optr++ = (uint8_t)ffasmfunc; - *optr++ = 0; /* Hash table size. */ - modstate = 1; - fprintf(ctx->fp, "#ifdef %sMODULE_%s\n", LIBDEF_PREFIX, p); - fprintf(ctx->fp, "#undef %sMODULE_%s\n", LIBDEF_PREFIX, p); - fprintf(ctx->fp, "static const lua_CFunction %s%s[] = {\n", - LABEL_PREFIX_LIBCF, p); - } - modnamelen = strlen(p); - if (modnamelen > sizeof(modname)-1) { - fprintf(stderr, "Error: module name too long: '%s'\n", p); - exit(1); - } - strcpy(modname, p); -} - -static int find_ffofs(BuildCtx *ctx, const char *name) -{ - int i; - for (i = 0; i < ctx->nglob; i++) { - const char *gl = ctx->globnames[i]; - if (gl[0] == 'f' && gl[1] == 'f' && gl[2] == '_' && !strcmp(gl+3, name)) { - return (int)((uint8_t *)ctx->glob[i] - ctx->code); - } - } - fprintf(stderr, "Error: undefined fast function %s%s\n", - LABEL_PREFIX_FF, name); - exit(1); -} - -static void libdef_func(BuildCtx *ctx, char *p, int arg) -{ - if (arg != LIBINIT_CF) - ffasmfunc++; - if (ctx->mode == BUILD_libdef) { - if (modstate == 0) { - fprintf(stderr, "Error: no module for function definition %s\n", p); - exit(1); - } - if (regfunc == REGFUNC_NOREG) { - if (optr+1 > obuf+sizeof(obuf)) { - fprintf(stderr, "Error: output buffer overflow\n"); - exit(1); - } - *optr++ = LIBINIT_FFID; - } else { - if (arg != LIBINIT_ASM_) { - if (modstate != 1) fprintf(ctx->fp, ",\n"); - modstate = 2; - fprintf(ctx->fp, " %s%s", arg ? LABEL_PREFIX_FFH : LABEL_PREFIX_CF, p); - } - if (regfunc != REGFUNC_NOREGUV) obuf[2]++; /* Bump hash table size. */ - libdef_name(regfunc == REGFUNC_NOREGUV ? "" : p, arg); - } - } else if (ctx->mode == BUILD_ffdef) { - fprintf(ctx->fp, "FFDEF(%s)\n", p); - } else if (ctx->mode == BUILD_recdef) { - if (strlen(p) > sizeof(funcname)-1) { - fprintf(stderr, "Error: function name too long: '%s'\n", p); - exit(1); - } - strcpy(funcname, p); - } else if (ctx->mode == BUILD_vmdef) { - int i; - for (i = 1; p[i] && modname[i-1]; i++) - if (p[i] == '_') p[i] = '.'; - fprintf(ctx->fp, "\"%s\",\n", p); - } else if (ctx->mode == BUILD_bcdef) { - if (arg != LIBINIT_CF) - fprintf(ctx->fp, ",\n%d", find_ffofs(ctx, p)); - } - ffid++; - regfunc = REGFUNC_OK; -} - -static uint8_t *libdef_uleb128(uint8_t *p, uint32_t *vv) -{ - uint32_t v = *p++; - if (v >= 0x80) { - int sh = 0; v &= 0x7f; - do { v |= ((*p & 0x7f) << (sh += 7)); } while (*p++ >= 0x80); - } - *vv = v; - return p; -} - -static void libdef_fixupbc(uint8_t *p) -{ - uint32_t i, sizebc; - p += 4; - p = libdef_uleb128(p, &sizebc); - p = libdef_uleb128(p, &sizebc); - p = libdef_uleb128(p, &sizebc); - for (i = 0; i < sizebc; i++, p += 4) { - uint8_t op = p[libbc_endian ? 3 : 0]; - uint8_t ra = p[libbc_endian ? 2 : 1]; - uint8_t rc = p[libbc_endian ? 1 : 2]; - uint8_t rb = p[libbc_endian ? 0 : 3]; - if (!LJ_DUALNUM && op == BC_ISTYPE && rc == ~LJ_TNUMX+1) { - op = BC_ISNUM; rc++; - } - p[LJ_ENDIAN_SELECT(0, 3)] = op; - p[LJ_ENDIAN_SELECT(1, 2)] = ra; - p[LJ_ENDIAN_SELECT(2, 1)] = rc; - p[LJ_ENDIAN_SELECT(3, 0)] = rb; - } -} - -static void libdef_lua(BuildCtx *ctx, char *p, int arg) -{ - UNUSED(arg); - if (ctx->mode == BUILD_libdef) { - int i; - for (i = 0; libbc_map[i].name != NULL; i++) { - if (!strcmp(libbc_map[i].name, p)) { - int ofs = libbc_map[i].ofs; - int len = libbc_map[i+1].ofs - ofs; - obuf[2]++; /* Bump hash table size. */ - *optr++ = LIBINIT_LUA; - libdef_name(p, 0); - memcpy(optr, libbc_code + ofs, len); - libdef_fixupbc(optr); - optr += len; - return; - } - } - fprintf(stderr, "Error: missing libbc definition for %s\n", p); - exit(1); - } -} - -static uint32_t find_rec(char *name) -{ - char *p = (char *)obuf; - uint32_t n; - for (n = 2; *p; n++) { - if (strcmp(p, name) == 0) - return n; - p += strlen(p)+1; - } - if (p+strlen(name)+1 >= (char *)obuf+sizeof(obuf)) { - fprintf(stderr, "Error: output buffer overflow\n"); - exit(1); - } - strcpy(p, name); - return n; -} - -static void libdef_rec(BuildCtx *ctx, char *p, int arg) -{ - UNUSED(arg); - if (ctx->mode == BUILD_recdef) { - char *q; - uint32_t n; - for (; recffid+1 < ffid; recffid++) - fprintf(ctx->fp, ",\n0"); - recffid = ffid; - if (*p == '.') p = funcname; - q = strchr(p, ' '); - if (q) *q++ = '\0'; - n = find_rec(p); - if (q) - fprintf(ctx->fp, ",\n0x%02x00+(%s)", n, q); - else - fprintf(ctx->fp, ",\n0x%02x00", n); - } -} - -static void memcpy_endian(void *dst, void *src, size_t n) -{ - union { uint8_t b; uint32_t u; } host_endian; - host_endian.u = 1; - if (host_endian.b == LJ_ENDIAN_SELECT(1, 0)) { - memcpy(dst, src, n); - } else { - size_t i; - for (i = 0; i < n; i++) - ((uint8_t *)dst)[i] = ((uint8_t *)src)[n-i-1]; - } -} - -static void libdef_push(BuildCtx *ctx, char *p, int arg) -{ - UNUSED(arg); - if (ctx->mode == BUILD_libdef) { - int len = (int)strlen(p); - if (*p == '"') { - if (len > 1 && p[len-1] == '"') { - p[len-1] = '\0'; - libdef_name(p+1, LIBINIT_STRING); - return; - } - } else if (*p >= '0' && *p <= '9') { - char *ep; - double d = strtod(p, &ep); - if (*ep == '\0') { - if (optr+1+sizeof(double) > obuf+sizeof(obuf)) { - fprintf(stderr, "Error: output buffer overflow\n"); - exit(1); - } - *optr++ = LIBINIT_NUMBER; - memcpy_endian(optr, &d, sizeof(double)); - optr += sizeof(double); - return; - } - } else if (!strcmp(p, "lastcl")) { - if (optr+1 > obuf+sizeof(obuf)) { - fprintf(stderr, "Error: output buffer overflow\n"); - exit(1); - } - *optr++ = LIBINIT_LASTCL; - return; - } else if (len > 4 && !strncmp(p, "top-", 4)) { - if (optr+2 > obuf+sizeof(obuf)) { - fprintf(stderr, "Error: output buffer overflow\n"); - exit(1); - } - *optr++ = LIBINIT_COPY; - *optr++ = (uint8_t)atoi(p+4); - return; - } - fprintf(stderr, "Error: bad value for %sPUSH(%s)\n", LIBDEF_PREFIX, p); - exit(1); - } -} - -static void libdef_set(BuildCtx *ctx, char *p, int arg) -{ - UNUSED(arg); - if (ctx->mode == BUILD_libdef) { - if (p[0] == '!' && p[1] == '\0') p[0] = '\0'; /* Set env. */ - libdef_name(p, LIBINIT_STRING); - *optr++ = LIBINIT_SET; - obuf[2]++; /* Bump hash table size. */ - } -} - -static void libdef_regfunc(BuildCtx *ctx, char *p, int arg) -{ - UNUSED(ctx); UNUSED(p); - regfunc = arg; -} - -typedef void (*LibDefFunc)(BuildCtx *ctx, char *p, int arg); - -typedef struct LibDefHandler { - const char *suffix; - const char *stop; - const LibDefFunc func; - const int arg; -} LibDefHandler; - -static const LibDefHandler libdef_handlers[] = { - { "MODULE_", " \t\r\n", libdef_module, 0 }, - { "CF(", ")", libdef_func, LIBINIT_CF }, - { "ASM(", ")", libdef_func, LIBINIT_ASM }, - { "ASM_(", ")", libdef_func, LIBINIT_ASM_ }, - { "LUA(", ")", libdef_lua, 0 }, - { "REC(", ")", libdef_rec, 0 }, - { "PUSH(", ")", libdef_push, 0 }, - { "SET(", ")", libdef_set, 0 }, - { "NOREGUV", NULL, libdef_regfunc, REGFUNC_NOREGUV }, - { "NOREG", NULL, libdef_regfunc, REGFUNC_NOREG }, - { NULL, NULL, (LibDefFunc)0, 0 } -}; - -/* Emit C source code for library function definitions. */ -void emit_lib(BuildCtx *ctx) -{ - const char *fname; - - if (ctx->mode == BUILD_ffdef || ctx->mode == BUILD_libdef || - ctx->mode == BUILD_recdef) - fprintf(ctx->fp, "/* This is a generated file. DO NOT EDIT! */\n\n"); - else if (ctx->mode == BUILD_vmdef) - fprintf(ctx->fp, "ffnames = {\n[0]=\"Lua\",\n\"C\",\n"); - if (ctx->mode == BUILD_recdef) - fprintf(ctx->fp, "static const uint16_t recff_idmap[] = {\n0,\n0x0100"); - recffid = ffid = FF_C+1; - ffasmfunc = 0; - - while ((fname = *ctx->args++)) { - char buf[256]; /* We don't care about analyzing lines longer than that. */ - FILE *fp; - if (fname[0] == '-' && fname[1] == '\0') { - fp = stdin; - } else { - fp = fopen(fname, "r"); - if (!fp) { - fprintf(stderr, "Error: cannot open input file '%s': %s\n", - fname, strerror(errno)); - exit(1); - } - } - modstate = 0; - regfunc = REGFUNC_OK; - while (fgets(buf, sizeof(buf), fp) != NULL) { - char *p; - /* Simplistic pre-processor. Only handles top-level #if/#endif. */ - if (buf[0] == '#' && buf[1] == 'i' && buf[2] == 'f') { - int ok = 1; - if (!strcmp(buf, "#if LJ_52\n")) - ok = LJ_52; - else if (!strcmp(buf, "#if LJ_HASJIT\n")) - ok = LJ_HASJIT; - else if (!strcmp(buf, "#if LJ_HASFFI\n")) - ok = LJ_HASFFI; - if (!ok) { - int lvl = 1; - while (fgets(buf, sizeof(buf), fp) != NULL) { - if (buf[0] == '#' && buf[1] == 'e' && buf[2] == 'n') { - if (--lvl == 0) break; - } else if (buf[0] == '#' && buf[1] == 'i' && buf[2] == 'f') { - lvl++; - } - } - continue; - } - } - for (p = buf; (p = strstr(p, LIBDEF_PREFIX)) != NULL; ) { - const LibDefHandler *ldh; - p += sizeof(LIBDEF_PREFIX)-1; - for (ldh = libdef_handlers; ldh->suffix != NULL; ldh++) { - size_t n, len = strlen(ldh->suffix); - if (!strncmp(p, ldh->suffix, len)) { - p += len; - n = ldh->stop ? strcspn(p, ldh->stop) : 0; - if (!p[n]) break; - p[n] = '\0'; - ldh->func(ctx, p, ldh->arg); - p += n+1; - break; - } - } - if (ldh->suffix == NULL) { - buf[strlen(buf)-1] = '\0'; - fprintf(stderr, "Error: unknown library definition tag %s%s\n", - LIBDEF_PREFIX, p); - exit(1); - } - } - } - fclose(fp); - if (ctx->mode == BUILD_libdef) { - libdef_endmodule(ctx); - } - } - - if (ctx->mode == BUILD_ffdef) { - fprintf(ctx->fp, "\n#undef FFDEF\n\n"); - fprintf(ctx->fp, - "#ifndef FF_NUM_ASMFUNC\n#define FF_NUM_ASMFUNC %d\n#endif\n\n", - ffasmfunc); - } else if (ctx->mode == BUILD_vmdef) { - fprintf(ctx->fp, "},\n\n"); - } else if (ctx->mode == BUILD_bcdef) { - int i; - fprintf(ctx->fp, "\n};\n\n"); - fprintf(ctx->fp, "LJ_DATADEF const uint16_t lj_bc_mode[] = {\n"); - fprintf(ctx->fp, "BCDEF(BCMODE)\n"); - for (i = ffasmfunc-1; i > 0; i--) - fprintf(ctx->fp, "BCMODE_FF,\n"); - fprintf(ctx->fp, "BCMODE_FF\n};\n\n"); - } else if (ctx->mode == BUILD_recdef) { - char *p = (char *)obuf; - fprintf(ctx->fp, "\n};\n\n"); - fprintf(ctx->fp, "static const RecordFunc recff_func[] = {\n" - "recff_nyi,\n" - "recff_c"); - while (*p) { - fprintf(ctx->fp, ",\nrecff_%s", p); - p += strlen(p)+1; - } - fprintf(ctx->fp, "\n};\n\n"); - } -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm_libbc.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm_libbc.h deleted file mode 100644 index 45f8f8cbdf2..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm_libbc.h +++ /dev/null @@ -1,45 +0,0 @@ -/* This is a generated file. DO NOT EDIT! */ - -static const int libbc_endian = 0; - -static const uint8_t libbc_code[] = { -#if LJ_FR2 -0,1,2,0,0,1,2,24,1,0,0,76,1,2,0,241,135,158,166,3,220,203,178,130,4,0,1,2,0, -0,1,2,24,1,0,0,76,1,2,0,243,244,148,165,20,198,190,199,252,3,0,1,2,0,0,0,3, -16,0,5,0,21,1,0,0,76,1,2,0,0,2,10,0,0,0,15,16,0,12,0,16,1,9,0,41,2,1,0,21,3, -0,0,41,4,1,0,77,2,8,128,18,6,1,0,18,8,5,0,59,9,5,0,66,6,3,2,10,6,0,0,88,7,1, -128,76,6,2,0,79,2,248,127,75,0,1,0,0,2,11,0,0,0,16,16,0,12,0,16,1,9,0,43,2, -0,0,18,3,0,0,41,4,0,0,88,5,7,128,18,7,1,0,18,9,5,0,18,10,6,0,66,7,3,2,10,7, -0,0,88,8,1,128,76,7,2,0,70,5,3,3,82,5,247,127,75,0,1,0,0,1,2,0,0,0,3,16,0,12, -0,21,1,0,0,76,1,2,0,0,2,10,0,0,2,30,16,0,12,0,21,2,0,0,11,1,0,0,88,3,7,128, -8,2,0,0,88,3,23,128,59,3,2,0,43,4,0,0,64,4,2,0,76,3,2,0,88,3,18,128,16,1,14, -0,41,3,1,0,3,3,1,0,88,3,14,128,3,1,2,0,88,3,12,128,59,3,1,0,22,4,1,1,18,5,2, -0,41,6,1,0,77,4,4,128,23,8,1,7,59,9,7,0,64,9,8,0,79,4,252,127,43,4,0,0,64,4, -2,0,76,3,2,0,75,0,1,0,0,2,0 -#else -0,1,2,0,0,1,2,24,1,0,0,76,1,2,0,241,135,158,166,3,220,203,178,130,4,0,1,2,0, -0,1,2,24,1,0,0,76,1,2,0,243,244,148,165,20,198,190,199,252,3,0,1,2,0,0,0,3, -16,0,5,0,21,1,0,0,76,1,2,0,0,2,9,0,0,0,15,16,0,12,0,16,1,9,0,41,2,1,0,21,3, -0,0,41,4,1,0,77,2,8,128,18,6,1,0,18,7,5,0,59,8,5,0,66,6,3,2,10,6,0,0,88,7,1, -128,76,6,2,0,79,2,248,127,75,0,1,0,0,2,10,0,0,0,16,16,0,12,0,16,1,9,0,43,2, -0,0,18,3,0,0,41,4,0,0,88,5,7,128,18,7,1,0,18,8,5,0,18,9,6,0,66,7,3,2,10,7,0, -0,88,8,1,128,76,7,2,0,70,5,3,3,82,5,247,127,75,0,1,0,0,1,2,0,0,0,3,16,0,12, -0,21,1,0,0,76,1,2,0,0,2,10,0,0,2,30,16,0,12,0,21,2,0,0,11,1,0,0,88,3,7,128, -8,2,0,0,88,3,23,128,59,3,2,0,43,4,0,0,64,4,2,0,76,3,2,0,88,3,18,128,16,1,14, -0,41,3,1,0,3,3,1,0,88,3,14,128,3,1,2,0,88,3,12,128,59,3,1,0,22,4,1,1,18,5,2, -0,41,6,1,0,77,4,4,128,23,8,1,7,59,9,7,0,64,9,8,0,79,4,252,127,43,4,0,0,64,4, -2,0,76,3,2,0,75,0,1,0,0,2,0 -#endif -}; - -static const struct { const char *name; int ofs; } libbc_map[] = { -{"math_deg",0}, -{"math_rad",25}, -{"string_len",50}, -{"table_foreachi",69}, -{"table_foreach",136}, -{"table_getn",207}, -{"table_remove",226}, -{NULL,355} -}; - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm_peobj.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm_peobj.c deleted file mode 100644 index 4279f50050b..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/buildvm_peobj.c +++ /dev/null @@ -1,368 +0,0 @@ -/* -** LuaJIT VM builder: PE object emitter. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -** -** Only used for building on Windows, since we cannot assume the presence -** of a suitable assembler. The host and target byte order must match. -*/ - -#include "buildvm.h" -#include "lj_bc.h" - -#if LJ_TARGET_X86ORX64 || LJ_TARGET_PPC - -/* Context for PE object emitter. */ -static char *strtab; -static size_t strtabofs; - -/* -- PE object definitions ----------------------------------------------- */ - -/* PE header. */ -typedef struct PEheader { - uint16_t arch; - uint16_t nsects; - uint32_t time; - uint32_t symtabofs; - uint32_t nsyms; - uint16_t opthdrsz; - uint16_t flags; -} PEheader; - -/* PE section. */ -typedef struct PEsection { - char name[8]; - uint32_t vsize; - uint32_t vaddr; - uint32_t size; - uint32_t ofs; - uint32_t relocofs; - uint32_t lineofs; - uint16_t nreloc; - uint16_t nline; - uint32_t flags; -} PEsection; - -/* PE relocation. */ -typedef struct PEreloc { - uint32_t vaddr; - uint32_t symidx; - uint16_t type; -} PEreloc; - -/* Cannot use sizeof, because it pads up to the max. alignment. */ -#define PEOBJ_RELOC_SIZE (4+4+2) - -/* PE symbol table entry. */ -typedef struct PEsym { - union { - char name[8]; - uint32_t nameref[2]; - } n; - uint32_t value; - int16_t sect; - uint16_t type; - uint8_t scl; - uint8_t naux; -} PEsym; - -/* PE symbol table auxiliary entry for a section. */ -typedef struct PEsymaux { - uint32_t size; - uint16_t nreloc; - uint16_t nline; - uint32_t cksum; - uint16_t assoc; - uint8_t comdatsel; - uint8_t unused[3]; -} PEsymaux; - -/* Cannot use sizeof, because it pads up to the max. alignment. */ -#define PEOBJ_SYM_SIZE (8+4+2+2+1+1) - -/* PE object CPU specific defines. */ -#if LJ_TARGET_X86 -#define PEOBJ_ARCH_TARGET 0x014c -#define PEOBJ_RELOC_REL32 0x14 /* MS: REL32, GNU: DISP32. */ -#define PEOBJ_RELOC_DIR32 0x06 -#define PEOBJ_RELOC_OFS 0 -#define PEOBJ_TEXT_FLAGS 0x60500020 /* 60=r+x, 50=align16, 20=code. */ -#elif LJ_TARGET_X64 -#define PEOBJ_ARCH_TARGET 0x8664 -#define PEOBJ_RELOC_REL32 0x04 /* MS: REL32, GNU: DISP32. */ -#define PEOBJ_RELOC_DIR32 0x02 -#define PEOBJ_RELOC_ADDR32NB 0x03 -#define PEOBJ_RELOC_OFS 0 -#define PEOBJ_TEXT_FLAGS 0x60500020 /* 60=r+x, 50=align16, 20=code. */ -#elif LJ_TARGET_PPC -#define PEOBJ_ARCH_TARGET 0x01f2 -#define PEOBJ_RELOC_REL32 0x06 -#define PEOBJ_RELOC_DIR32 0x02 -#define PEOBJ_RELOC_OFS (-4) -#define PEOBJ_TEXT_FLAGS 0x60400020 /* 60=r+x, 40=align8, 20=code. */ -#endif - -/* Section numbers (0-based). */ -enum { - PEOBJ_SECT_ABS = -2, - PEOBJ_SECT_UNDEF = -1, - PEOBJ_SECT_TEXT, -#if LJ_TARGET_X64 - PEOBJ_SECT_PDATA, - PEOBJ_SECT_XDATA, -#endif - PEOBJ_SECT_RDATA_Z, - PEOBJ_NSECTIONS -}; - -/* Symbol types. */ -#define PEOBJ_TYPE_NULL 0 -#define PEOBJ_TYPE_FUNC 0x20 - -/* Symbol storage class. */ -#define PEOBJ_SCL_EXTERN 2 -#define PEOBJ_SCL_STATIC 3 - -/* -- PE object emitter --------------------------------------------------- */ - -/* Emit PE object symbol. */ -static void emit_peobj_sym(BuildCtx *ctx, const char *name, uint32_t value, - int sect, int type, int scl) -{ - PEsym sym; - size_t len = strlen(name); - if (!strtab) { /* Pass 1: only calculate string table length. */ - if (len > 8) strtabofs += len+1; - return; - } - if (len <= 8) { - memcpy(sym.n.name, name, len); - memset(sym.n.name+len, 0, 8-len); - } else { - sym.n.nameref[0] = 0; - sym.n.nameref[1] = (uint32_t)strtabofs; - memcpy(strtab + strtabofs, name, len); - strtab[strtabofs+len] = 0; - strtabofs += len+1; - } - sym.value = value; - sym.sect = (int16_t)(sect+1); /* 1-based section number. */ - sym.type = (uint16_t)type; - sym.scl = (uint8_t)scl; - sym.naux = 0; - owrite(ctx, &sym, PEOBJ_SYM_SIZE); -} - -/* Emit PE object section symbol. */ -static void emit_peobj_sym_sect(BuildCtx *ctx, PEsection *pesect, int sect) -{ - PEsym sym; - PEsymaux aux; - if (!strtab) return; /* Pass 1: no output. */ - memcpy(sym.n.name, pesect[sect].name, 8); - sym.value = 0; - sym.sect = (int16_t)(sect+1); /* 1-based section number. */ - sym.type = PEOBJ_TYPE_NULL; - sym.scl = PEOBJ_SCL_STATIC; - sym.naux = 1; - owrite(ctx, &sym, PEOBJ_SYM_SIZE); - memset(&aux, 0, sizeof(PEsymaux)); - aux.size = pesect[sect].size; - aux.nreloc = pesect[sect].nreloc; - owrite(ctx, &aux, PEOBJ_SYM_SIZE); -} - -/* Emit Windows PE object file. */ -void emit_peobj(BuildCtx *ctx) -{ - PEheader pehdr; - PEsection pesect[PEOBJ_NSECTIONS]; - uint32_t sofs; - int i, nrsym; - union { uint8_t b; uint32_t u; } host_endian; - - sofs = sizeof(PEheader) + PEOBJ_NSECTIONS*sizeof(PEsection); - - /* Fill in PE sections. */ - memset(&pesect, 0, PEOBJ_NSECTIONS*sizeof(PEsection)); - memcpy(pesect[PEOBJ_SECT_TEXT].name, ".text", sizeof(".text")-1); - pesect[PEOBJ_SECT_TEXT].ofs = sofs; - sofs += (pesect[PEOBJ_SECT_TEXT].size = (uint32_t)ctx->codesz); - pesect[PEOBJ_SECT_TEXT].relocofs = sofs; - sofs += (pesect[PEOBJ_SECT_TEXT].nreloc = (uint16_t)ctx->nreloc) * PEOBJ_RELOC_SIZE; - /* Flags: 60 = read+execute, 50 = align16, 20 = code. */ - pesect[PEOBJ_SECT_TEXT].flags = PEOBJ_TEXT_FLAGS; - -#if LJ_TARGET_X64 - memcpy(pesect[PEOBJ_SECT_PDATA].name, ".pdata", sizeof(".pdata")-1); - pesect[PEOBJ_SECT_PDATA].ofs = sofs; - sofs += (pesect[PEOBJ_SECT_PDATA].size = 6*4); - pesect[PEOBJ_SECT_PDATA].relocofs = sofs; - sofs += (pesect[PEOBJ_SECT_PDATA].nreloc = 6) * PEOBJ_RELOC_SIZE; - /* Flags: 40 = read, 30 = align4, 40 = initialized data. */ - pesect[PEOBJ_SECT_PDATA].flags = 0x40300040; - - memcpy(pesect[PEOBJ_SECT_XDATA].name, ".xdata", sizeof(".xdata")-1); - pesect[PEOBJ_SECT_XDATA].ofs = sofs; - sofs += (pesect[PEOBJ_SECT_XDATA].size = 8*2+4+6*2); /* See below. */ - pesect[PEOBJ_SECT_XDATA].relocofs = sofs; - sofs += (pesect[PEOBJ_SECT_XDATA].nreloc = 1) * PEOBJ_RELOC_SIZE; - /* Flags: 40 = read, 30 = align4, 40 = initialized data. */ - pesect[PEOBJ_SECT_XDATA].flags = 0x40300040; -#endif - - memcpy(pesect[PEOBJ_SECT_RDATA_Z].name, ".rdata$Z", sizeof(".rdata$Z")-1); - pesect[PEOBJ_SECT_RDATA_Z].ofs = sofs; - sofs += (pesect[PEOBJ_SECT_RDATA_Z].size = (uint32_t)strlen(ctx->dasm_ident)+1); - /* Flags: 40 = read, 30 = align4, 40 = initialized data. */ - pesect[PEOBJ_SECT_RDATA_Z].flags = 0x40300040; - - /* Fill in PE header. */ - pehdr.arch = PEOBJ_ARCH_TARGET; - pehdr.nsects = PEOBJ_NSECTIONS; - pehdr.time = 0; /* Timestamp is optional. */ - pehdr.symtabofs = sofs; - pehdr.opthdrsz = 0; - pehdr.flags = 0; - - /* Compute the size of the symbol table: - ** @feat.00 + nsections*2 - ** + asm_start + nsym - ** + nrsym - */ - nrsym = ctx->nrelocsym; - pehdr.nsyms = 1+PEOBJ_NSECTIONS*2 + 1+ctx->nsym + nrsym; -#if LJ_TARGET_X64 - pehdr.nsyms += 1; /* Symbol for lj_err_unwind_win64. */ -#endif - - /* Write PE object header and all sections. */ - owrite(ctx, &pehdr, sizeof(PEheader)); - owrite(ctx, &pesect, sizeof(PEsection)*PEOBJ_NSECTIONS); - - /* Write .text section. */ - host_endian.u = 1; - if (host_endian.b != LJ_ENDIAN_SELECT(1, 0)) { -#if LJ_TARGET_PPC - uint32_t *p = (uint32_t *)ctx->code; - int n = (int)(ctx->codesz >> 2); - for (i = 0; i < n; i++, p++) - *p = lj_bswap(*p); /* Byteswap .text section. */ -#else - fprintf(stderr, "Error: different byte order for host and target\n"); - exit(1); -#endif - } - owrite(ctx, ctx->code, ctx->codesz); - for (i = 0; i < ctx->nreloc; i++) { - PEreloc reloc; - reloc.vaddr = (uint32_t)ctx->reloc[i].ofs + PEOBJ_RELOC_OFS; - reloc.symidx = 1+2+ctx->reloc[i].sym; /* Reloc syms are after .text sym. */ - reloc.type = ctx->reloc[i].type ? PEOBJ_RELOC_REL32 : PEOBJ_RELOC_DIR32; - owrite(ctx, &reloc, PEOBJ_RELOC_SIZE); - } - -#if LJ_TARGET_X64 - { /* Write .pdata section. */ - uint32_t fcofs = (uint32_t)ctx->sym[ctx->nsym-1].ofs; - uint32_t pdata[3]; /* Start of .text, end of .text and .xdata. */ - PEreloc reloc; - pdata[0] = 0; pdata[1] = fcofs; pdata[2] = 0; - owrite(ctx, &pdata, sizeof(pdata)); - pdata[0] = fcofs; pdata[1] = (uint32_t)ctx->codesz; pdata[2] = 20; - owrite(ctx, &pdata, sizeof(pdata)); - reloc.vaddr = 0; reloc.symidx = 1+2+nrsym+2+2+1; - reloc.type = PEOBJ_RELOC_ADDR32NB; - owrite(ctx, &reloc, PEOBJ_RELOC_SIZE); - reloc.vaddr = 4; reloc.symidx = 1+2+nrsym+2+2+1; - reloc.type = PEOBJ_RELOC_ADDR32NB; - owrite(ctx, &reloc, PEOBJ_RELOC_SIZE); - reloc.vaddr = 8; reloc.symidx = 1+2+nrsym+2; - reloc.type = PEOBJ_RELOC_ADDR32NB; - owrite(ctx, &reloc, PEOBJ_RELOC_SIZE); - reloc.vaddr = 12; reloc.symidx = 1+2+nrsym+2+2+1; - reloc.type = PEOBJ_RELOC_ADDR32NB; - owrite(ctx, &reloc, PEOBJ_RELOC_SIZE); - reloc.vaddr = 16; reloc.symidx = 1+2+nrsym+2+2+1; - reloc.type = PEOBJ_RELOC_ADDR32NB; - owrite(ctx, &reloc, PEOBJ_RELOC_SIZE); - reloc.vaddr = 20; reloc.symidx = 1+2+nrsym+2; - reloc.type = PEOBJ_RELOC_ADDR32NB; - owrite(ctx, &reloc, PEOBJ_RELOC_SIZE); - } - { /* Write .xdata section. */ - uint16_t xdata[8+2+6]; - PEreloc reloc; - xdata[0] = 0x01|0x08|0x10; /* Ver. 1, uhandler/ehandler, prolog size 0. */ - xdata[1] = 0x0005; /* Number of unwind codes, no frame pointer. */ - xdata[2] = 0x4200; /* Stack offset 4*8+8 = aword*5. */ - xdata[3] = 0x3000; /* Push rbx. */ - xdata[4] = 0x6000; /* Push rsi. */ - xdata[5] = 0x7000; /* Push rdi. */ - xdata[6] = 0x5000; /* Push rbp. */ - xdata[7] = 0; /* Alignment. */ - xdata[8] = xdata[9] = 0; /* Relocated address of exception handler. */ - xdata[10] = 0x01; /* Ver. 1, no handler, prolog size 0. */ - xdata[11] = 0x1504; /* Number of unwind codes, fp = rbp, fpofs = 16. */ - xdata[12] = 0x0300; /* set_fpreg. */ - xdata[13] = 0x0200; /* stack offset 0*8+8 = aword*1. */ - xdata[14] = 0x3000; /* Push rbx. */ - xdata[15] = 0x5000; /* Push rbp. */ - owrite(ctx, &xdata, sizeof(xdata)); - reloc.vaddr = 2*8; reloc.symidx = 1+2+nrsym+2+2; - reloc.type = PEOBJ_RELOC_ADDR32NB; - owrite(ctx, &reloc, PEOBJ_RELOC_SIZE); - } -#endif - - /* Write .rdata$Z section. */ - owrite(ctx, ctx->dasm_ident, strlen(ctx->dasm_ident)+1); - - /* Write symbol table. */ - strtab = NULL; /* 1st pass: collect string sizes. */ - for (;;) { - strtabofs = 4; - /* Mark as SafeSEH compliant. */ - emit_peobj_sym(ctx, "@feat.00", 1, - PEOBJ_SECT_ABS, PEOBJ_TYPE_NULL, PEOBJ_SCL_STATIC); - - emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_TEXT); - for (i = 0; i < nrsym; i++) - emit_peobj_sym(ctx, ctx->relocsym[i], 0, - PEOBJ_SECT_UNDEF, PEOBJ_TYPE_FUNC, PEOBJ_SCL_EXTERN); - -#if LJ_TARGET_X64 - emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_PDATA); - emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_XDATA); - emit_peobj_sym(ctx, "lj_err_unwind_win64", 0, - PEOBJ_SECT_UNDEF, PEOBJ_TYPE_FUNC, PEOBJ_SCL_EXTERN); -#endif - - emit_peobj_sym(ctx, ctx->beginsym, 0, - PEOBJ_SECT_TEXT, PEOBJ_TYPE_NULL, PEOBJ_SCL_EXTERN); - for (i = 0; i < ctx->nsym; i++) - emit_peobj_sym(ctx, ctx->sym[i].name, (uint32_t)ctx->sym[i].ofs, - PEOBJ_SECT_TEXT, PEOBJ_TYPE_FUNC, PEOBJ_SCL_EXTERN); - - emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_RDATA_Z); - - if (strtab) - break; - /* 2nd pass: alloc strtab, write syms and copy strings. */ - strtab = (char *)malloc(strtabofs); - *(uint32_t *)strtab = (uint32_t)strtabofs; - } - - /* Write string table. */ - owrite(ctx, strtab, strtabofs); -} - -#else - -void emit_peobj(BuildCtx *ctx) -{ - UNUSED(ctx); - fprintf(stderr, "Error: no PE object support for this target\n"); - exit(1); -} - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/genlibbc.lua b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/genlibbc.lua deleted file mode 100644 index 4398d8e74d9..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/genlibbc.lua +++ /dev/null @@ -1,197 +0,0 @@ ----------------------------------------------------------------------------- --- Lua script to dump the bytecode of the library functions written in Lua. --- The resulting 'buildvm_libbc.h' is used for the build process of LuaJIT. ----------------------------------------------------------------------------- --- Copyright (C) 2005-2015 Mike Pall. All rights reserved. --- Released under the MIT license. See Copyright Notice in luajit.h ----------------------------------------------------------------------------- - -local ffi = require("ffi") -local bit = require("bit") -local vmdef = require("jit.vmdef") -local bcnames = vmdef.bcnames - -local format = string.format - -local isbe = (string.byte(string.dump(function() end), 5) % 2 == 1) - -local function usage(arg) - io.stderr:write("Usage: ", arg and arg[0] or "genlibbc", - " [-o buildvm_libbc.h] lib_*.c\n") - os.exit(1) -end - -local function parse_arg(arg) - local outfile = "-" - if not (arg and arg[1]) then - usage(arg) - end - if arg[1] == "-o" then - outfile = arg[2] - if not outfile then usage(arg) end - table.remove(arg, 1) - table.remove(arg, 1) - end - return outfile -end - -local function read_files(names) - local src = "" - for _,name in ipairs(names) do - local fp = assert(io.open(name)) - src = src .. fp:read("*a") - fp:close() - end - return src -end - -local function transform_lua(code) - local fixup = {} - local n = -30000 - code = string.gsub(code, "CHECK_(%w*)%((.-)%)", function(tp, var) - n = n + 1 - fixup[n] = { "CHECK", tp } - return format("%s=%d", var, n) - end) - code = string.gsub(code, "PAIRS%((.-)%)", function(var) - fixup.PAIRS = true - return format("nil, %s, 0", var) - end) - return "return "..code, fixup -end - -local function read_uleb128(p) - local v = p[0]; p = p + 1 - if v >= 128 then - local sh = 7; v = v - 128 - repeat - local r = p[0] - v = v + bit.lshift(bit.band(r, 127), sh) - sh = sh + 7 - p = p + 1 - until r < 128 - end - return p, v -end - --- ORDER LJ_T -local name2itype = { - str = 5, func = 9, tab = 12, int = 14, num = 15 -} - -local BC = {} -for i=0,#bcnames/6-1 do - BC[string.gsub(string.sub(bcnames, i*6+1, i*6+6), " ", "")] = i -end -local xop, xra = isbe and 3 or 0, isbe and 2 or 1 -local xrc, xrb = isbe and 1 or 2, isbe and 0 or 3 - -local function fixup_dump(dump, fixup) - local buf = ffi.new("uint8_t[?]", #dump+1, dump) - local p = buf+5 - local n, sizebc - p, n = read_uleb128(p) - local start = p - p = p + 4 - p = read_uleb128(p) - p = read_uleb128(p) - p, sizebc = read_uleb128(p) - local rawtab = {} - for i=0,sizebc-1 do - local op = p[xop] - if op == BC.KSHORT then - local rd = p[xrc] + 256*p[xrb] - rd = bit.arshift(bit.lshift(rd, 16), 16) - local f = fixup[rd] - if f then - if f[1] == "CHECK" then - local tp = f[2] - if tp == "tab" then rawtab[p[xra]] = true end - p[xop] = tp == "num" and BC.ISNUM or BC.ISTYPE - p[xrb] = 0 - p[xrc] = name2itype[tp] - else - error("unhandled fixup type: "..f[1]) - end - end - elseif op == BC.TGETV then - if rawtab[p[xrb]] then - p[xop] = BC.TGETR - end - elseif op == BC.TSETV then - if rawtab[p[xrb]] then - p[xop] = BC.TSETR - end - elseif op == BC.ITERC then - if fixup.PAIRS then - p[xop] = BC.ITERN - end - end - p = p + 4 - end - return ffi.string(start, n) -end - -local function find_defs(src) - local defs = {} - for name, code in string.gmatch(src, "LJLIB_LUA%(([^)]*)%)%s*/%*(.-)%*/") do - local env = {} - local tcode, fixup = transform_lua(code) - local func = assert(load(tcode, "", nil, env))() - defs[name] = fixup_dump(string.dump(func, true), fixup) - defs[#defs+1] = name - end - return defs -end - -local function gen_header(defs) - local t = {} - local function w(x) t[#t+1] = x end - w("/* This is a generated file. DO NOT EDIT! */\n\n") - w("static const int libbc_endian = ") w(isbe and 1 or 0) w(";\n\n") - local s = "" - for _,name in ipairs(defs) do - s = s .. defs[name] - end - w("static const uint8_t libbc_code[] = {\n") - local n = 0 - for i=1,#s do - local x = string.byte(s, i) - w(x); w(",") - n = n + (x < 10 and 2 or (x < 100 and 3 or 4)) - if n >= 75 then n = 0; w("\n") end - end - w("0\n};\n\n") - w("static const struct { const char *name; int ofs; } libbc_map[] = {\n") - local m = 0 - for _,name in ipairs(defs) do - w('{"'); w(name); w('",'); w(m) w('},\n') - m = m + #defs[name] - end - w("{NULL,"); w(m); w("}\n};\n\n") - return table.concat(t) -end - -local function write_file(name, data) - if name == "-" then - assert(io.write(data)) - assert(io.flush()) - else - local fp = io.open(name) - if fp then - local old = fp:read("*a") - fp:close() - if data == old then return end - end - fp = assert(io.open(name, "w")) - assert(fp:write(data)) - assert(fp:close()) - end -end - -local outfile = parse_arg(arg) -local src = read_files(arg) -local defs = find_defs(src) -local hdr = gen_header(defs) -write_file(outfile, hdr) - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/genminilua.lua b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/genminilua.lua deleted file mode 100644 index cd0d946628c..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/host/genminilua.lua +++ /dev/null @@ -1,428 +0,0 @@ ----------------------------------------------------------------------------- --- Lua script to generate a customized, minified version of Lua. --- The resulting 'minilua' is used for the build process of LuaJIT. ----------------------------------------------------------------------------- --- Copyright (C) 2005-2015 Mike Pall. All rights reserved. --- Released under the MIT license. See Copyright Notice in luajit.h ----------------------------------------------------------------------------- - -local sub, match, gsub = string.sub, string.match, string.gsub - -local LUA_VERSION = "5.1.5" -local LUA_SOURCE - -local function usage() - io.stderr:write("Usage: ", arg and arg[0] or "genminilua", - " lua-", LUA_VERSION, "-source-dir\n") - os.exit(1) -end - -local function find_sources() - LUA_SOURCE = arg and arg[1] - if not LUA_SOURCE then usage() end - if sub(LUA_SOURCE, -1) ~= "/" then LUA_SOURCE = LUA_SOURCE.."/" end - local fp = io.open(LUA_SOURCE .. "lua.h") - if not fp then - LUA_SOURCE = LUA_SOURCE.."src/" - fp = io.open(LUA_SOURCE .. "lua.h") - if not fp then usage() end - end - local all = fp:read("*a") - fp:close() - if not match(all, 'LUA_RELEASE%s*"Lua '..LUA_VERSION..'"') then - io.stderr:write("Error: version mismatch\n") - usage() - end -end - -local LUA_FILES = { -"lmem.c", "lobject.c", "ltm.c", "lfunc.c", "ldo.c", "lstring.c", "ltable.c", -"lgc.c", "lstate.c", "ldebug.c", "lzio.c", "lopcodes.c", -"llex.c", "lcode.c", "lparser.c", "lvm.c", "lapi.c", "lauxlib.c", -"lbaselib.c", "ltablib.c", "liolib.c", "loslib.c", "lstrlib.c", "linit.c", -} - -local REMOVE_LIB = {} -gsub([[ -collectgarbage dofile gcinfo getfenv getmetatable load print rawequal rawset -select tostring xpcall -foreach foreachi getn maxn setn -popen tmpfile seek setvbuf __tostring -clock date difftime execute getenv rename setlocale time tmpname -dump gfind len reverse -LUA_LOADLIBNAME LUA_MATHLIBNAME LUA_DBLIBNAME -]], "%S+", function(name) - REMOVE_LIB[name] = true -end) - -local REMOVE_EXTINC = { [""] = true, [""] = true, } - -local CUSTOM_MAIN = [[ -typedef unsigned int UB; -static UB barg(lua_State *L,int idx){ -union{lua_Number n;U64 b;}bn; -bn.n=lua_tonumber(L,idx)+6755399441055744.0; -if (bn.n==0.0&&!lua_isnumber(L,idx))luaL_typerror(L,idx,"number"); -return(UB)bn.b; -} -#define BRET(b) lua_pushnumber(L,(lua_Number)(int)(b));return 1; -static int tobit(lua_State *L){ -BRET(barg(L,1))} -static int bnot(lua_State *L){ -BRET(~barg(L,1))} -static int band(lua_State *L){ -int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b&=barg(L,i);BRET(b)} -static int bor(lua_State *L){ -int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b|=barg(L,i);BRET(b)} -static int bxor(lua_State *L){ -int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b^=barg(L,i);BRET(b)} -static int lshift(lua_State *L){ -UB b=barg(L,1),n=barg(L,2)&31;BRET(b<>n)} -static int arshift(lua_State *L){ -UB b=barg(L,1),n=barg(L,2)&31;BRET((int)b>>n)} -static int rol(lua_State *L){ -UB b=barg(L,1),n=barg(L,2)&31;BRET((b<>(32-n)))} -static int ror(lua_State *L){ -UB b=barg(L,1),n=barg(L,2)&31;BRET((b>>n)|(b<<(32-n)))} -static int bswap(lua_State *L){ -UB b=barg(L,1);b=(b>>24)|((b>>8)&0xff00)|((b&0xff00)<<8)|(b<<24);BRET(b)} -static int tohex(lua_State *L){ -UB b=barg(L,1); -int n=lua_isnone(L,2)?8:(int)barg(L,2); -const char *hexdigits="0123456789abcdef"; -char buf[8]; -int i; -if(n<0){n=-n;hexdigits="0123456789ABCDEF";} -if(n>8)n=8; -for(i=(int)n;--i>=0;){buf[i]=hexdigits[b&15];b>>=4;} -lua_pushlstring(L,buf,(size_t)n); -return 1; -} -static const struct luaL_Reg bitlib[] = { -{"tobit",tobit}, -{"bnot",bnot}, -{"band",band}, -{"bor",bor}, -{"bxor",bxor}, -{"lshift",lshift}, -{"rshift",rshift}, -{"arshift",arshift}, -{"rol",rol}, -{"ror",ror}, -{"bswap",bswap}, -{"tohex",tohex}, -{NULL,NULL} -}; -int main(int argc, char **argv){ - lua_State *L = luaL_newstate(); - int i; - luaL_openlibs(L); - luaL_register(L, "bit", bitlib); - if (argc < 2) return sizeof(void *); - lua_createtable(L, 0, 1); - lua_pushstring(L, argv[1]); - lua_rawseti(L, -2, 0); - lua_setglobal(L, "arg"); - if (luaL_loadfile(L, argv[1])) - goto err; - for (i = 2; i < argc; i++) - lua_pushstring(L, argv[i]); - if (lua_pcall(L, argc - 2, 0, 0)) { - err: - fprintf(stderr, "Error: %s\n", lua_tostring(L, -1)); - return 1; - } - lua_close(L); - return 0; -} -]] - -local function read_sources() - local t = {} - for i, name in ipairs(LUA_FILES) do - local fp = assert(io.open(LUA_SOURCE..name, "r")) - t[i] = fp:read("*a") - assert(fp:close()) - end - t[#t+1] = CUSTOM_MAIN - return table.concat(t) -end - -local includes = {} - -local function merge_includes(src) - return gsub(src, '#include%s*"([^"]*)"%s*\n', function(name) - if includes[name] then return "" end - includes[name] = true - local fp = assert(io.open(LUA_SOURCE..name, "r")) - local src = fp:read("*a") - assert(fp:close()) - src = gsub(src, "#ifndef%s+%w+_h\n#define%s+%w+_h\n", "") - src = gsub(src, "#endif%s*$", "") - return merge_includes(src) - end) -end - -local function get_license(src) - return match(src, "/%*+\n%* Copyright %(.-%*/\n") -end - -local function fold_lines(src) - return gsub(src, "\\\n", " ") -end - -local strings = {} - -local function save_str(str) - local n = #strings+1 - strings[n] = str - return "\1"..n.."\2" -end - -local function save_strings(src) - src = gsub(src, '"[^"\n]*"', save_str) - return gsub(src, "'[^'\n]*'", save_str) -end - -local function restore_strings(src) - return gsub(src, "\1(%d+)\2", function(numstr) - return strings[tonumber(numstr)] - end) -end - -local function def_istrue(def) - return def == "INT_MAX > 2147483640L" or - def == "LUAI_BITSINT >= 32" or - def == "SIZE_Bx < LUAI_BITSINT-1" or - def == "cast" or - def == "defined(LUA_CORE)" or - def == "MINSTRTABSIZE" or - def == "LUA_MINBUFFER" or - def == "HARDSTACKTESTS" or - def == "UNUSED" -end - -local head, defs = {[[ -#ifdef _MSC_VER -typedef unsigned __int64 U64; -#else -typedef unsigned long long U64; -#endif -int _CRT_glob = 0; -]]}, {} - -local function preprocess(src) - local t = { match(src, "^(.-)#") } - local lvl, on, oldon = 0, true, {} - for pp, def, txt in string.gmatch(src, "#(%w+) *([^\n]*)\n([^#]*)") do - if pp == "if" or pp == "ifdef" or pp == "ifndef" then - lvl = lvl + 1 - oldon[lvl] = on - on = def_istrue(def) - elseif pp == "else" then - if oldon[lvl] then - if on == false then on = true else on = false end - end - elseif pp == "elif" then - if oldon[lvl] then - on = def_istrue(def) - end - elseif pp == "endif" then - on = oldon[lvl] - lvl = lvl - 1 - elseif on then - if pp == "include" then - if not head[def] and not REMOVE_EXTINC[def] then - head[def] = true - head[#head+1] = "#include "..def.."\n" - end - elseif pp == "define" then - local k, sp, v = match(def, "([%w_]+)(%s*)(.*)") - if k and not (sp == "" and sub(v, 1, 1) == "(") then - defs[k] = gsub(v, "%a[%w_]*", function(tok) - return defs[tok] or tok - end) - else - t[#t+1] = "#define "..def.."\n" - end - elseif pp ~= "undef" then - error("unexpected directive: "..pp.." "..def) - end - end - if on then t[#t+1] = txt end - end - return gsub(table.concat(t), "%a[%w_]*", function(tok) - return defs[tok] or tok - end) -end - -local function merge_header(src, license) - local hdr = string.format([[ -/* This is a heavily customized and minimized copy of Lua %s. */ -/* It's only used to build LuaJIT. It does NOT have all standard functions! */ -]], LUA_VERSION) - return hdr..license..table.concat(head)..src -end - -local function strip_unused1(src) - return gsub(src, '( {"?([%w_]+)"?,%s+%a[%w_]*},\n)', function(line, func) - return REMOVE_LIB[func] and "" or line - end) -end - -local function strip_unused2(src) - return gsub(src, "Symbolic Execution.-}=", "") -end - -local function strip_unused3(src) - src = gsub(src, "extern", "static") - src = gsub(src, "\nstatic([^\n]-)%(([^)]*)%)%(", "\nstatic%1 %2(") - src = gsub(src, "#define lua_assert[^\n]*\n", "") - src = gsub(src, "lua_assert%b();?", "") - src = gsub(src, "default:\n}", "default:;\n}") - src = gsub(src, "lua_lock%b();", "") - src = gsub(src, "lua_unlock%b();", "") - src = gsub(src, "luai_threadyield%b();", "") - src = gsub(src, "luai_userstateopen%b();", "{}") - src = gsub(src, "luai_userstate%w+%b();", "") - src = gsub(src, "%(%(c==.*luaY_parser%)", "luaY_parser") - src = gsub(src, "trydecpoint%(ls,seminfo%)", - "luaX_lexerror(ls,\"malformed number\",TK_NUMBER)") - src = gsub(src, "int c=luaZ_lookahead%b();", "") - src = gsub(src, "luaL_register%(L,[^,]*,co_funcs%);\nreturn 2;", - "return 1;") - src = gsub(src, "getfuncname%b():", "NULL:") - src = gsub(src, "getobjname%b():", "NULL:") - src = gsub(src, "if%([^\n]*hookmask[^\n]*%)\n[^\n]*\n", "") - src = gsub(src, "if%([^\n]*hookmask[^\n]*%)%b{}\n", "") - src = gsub(src, "if%([^\n]*hookmask[^\n]*&&\n[^\n]*%b{}\n", "") - src = gsub(src, "(twoto%b()%()", "%1(size_t)") - src = gsub(src, "i -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -typedef enum{ -TM_INDEX, -TM_NEWINDEX, -TM_GC, -TM_MODE, -TM_EQ, -TM_ADD, -TM_SUB, -TM_MUL, -TM_DIV, -TM_MOD, -TM_POW, -TM_UNM, -TM_LEN, -TM_LT, -TM_LE, -TM_CONCAT, -TM_CALL, -TM_N -}TMS; -enum OpMode{iABC,iABx,iAsBx}; -typedef enum{ -OP_MOVE, -OP_LOADK, -OP_LOADBOOL, -OP_LOADNIL, -OP_GETUPVAL, -OP_GETGLOBAL, -OP_GETTABLE, -OP_SETGLOBAL, -OP_SETUPVAL, -OP_SETTABLE, -OP_NEWTABLE, -OP_SELF, -OP_ADD, -OP_SUB, -OP_MUL, -OP_DIV, -OP_MOD, -OP_POW, -OP_UNM, -OP_NOT, -OP_LEN, -OP_CONCAT, -OP_JMP, -OP_EQ, -OP_LT, -OP_LE, -OP_TEST, -OP_TESTSET, -OP_CALL, -OP_TAILCALL, -OP_RETURN, -OP_FORLOOP, -OP_FORPREP, -OP_TFORLOOP, -OP_SETLIST, -OP_CLOSE, -OP_CLOSURE, -OP_VARARG -}OpCode; -enum OpArgMask{ -OpArgN, -OpArgU, -OpArgR, -OpArgK -}; -typedef enum{ -VVOID, -VNIL, -VTRUE, -VFALSE, -VK, -VKNUM, -VLOCAL, -VUPVAL, -VGLOBAL, -VINDEXED, -VJMP, -VRELOCABLE, -VNONRELOC, -VCALL, -VVARARG -}expkind; -enum RESERVED{ -TK_AND=257,TK_BREAK, -TK_DO,TK_ELSE,TK_ELSEIF,TK_END,TK_FALSE,TK_FOR,TK_FUNCTION, -TK_IF,TK_IN,TK_LOCAL,TK_NIL,TK_NOT,TK_OR,TK_REPEAT, -TK_RETURN,TK_THEN,TK_TRUE,TK_UNTIL,TK_WHILE, -TK_CONCAT,TK_DOTS,TK_EQ,TK_GE,TK_LE,TK_NE,TK_NUMBER, -TK_NAME,TK_STRING,TK_EOS -}; -typedef enum BinOpr{ -OPR_ADD,OPR_SUB,OPR_MUL,OPR_DIV,OPR_MOD,OPR_POW, -OPR_CONCAT, -OPR_NE,OPR_EQ, -OPR_LT,OPR_LE,OPR_GT,OPR_GE, -OPR_AND,OPR_OR, -OPR_NOBINOPR -}BinOpr; -typedef enum UnOpr{OPR_MINUS,OPR_NOT,OPR_LEN,OPR_NOUNOPR}UnOpr; -#define LUA_QL(x)"'"x"'" -#define luai_apicheck(L,o){(void)L;} -#define lua_number2str(s,n)sprintf((s),"%.14g",(n)) -#define lua_str2number(s,p)strtod((s),(p)) -#define luai_numadd(a,b)((a)+(b)) -#define luai_numsub(a,b)((a)-(b)) -#define luai_nummul(a,b)((a)*(b)) -#define luai_numdiv(a,b)((a)/(b)) -#define luai_nummod(a,b)((a)-floor((a)/(b))*(b)) -#define luai_numpow(a,b)(pow(a,b)) -#define luai_numunm(a)(-(a)) -#define luai_numeq(a,b)((a)==(b)) -#define luai_numlt(a,b)((a)<(b)) -#define luai_numle(a,b)((a)<=(b)) -#define luai_numisnan(a)(!luai_numeq((a),(a))) -#define lua_number2int(i,d)((i)=(int)(d)) -#define lua_number2integer(i,d)((i)=(lua_Integer)(d)) -#define LUAI_THROW(L,c)longjmp((c)->b,1) -#define LUAI_TRY(L,c,a)if(setjmp((c)->b)==0){a} -#define lua_pclose(L,file)((void)((void)L,file),0) -#define lua_upvalueindex(i)((-10002)-(i)) -typedef struct lua_State lua_State; -typedef int(*lua_CFunction)(lua_State*L); -typedef const char*(*lua_Reader)(lua_State*L,void*ud,size_t*sz); -typedef void*(*lua_Alloc)(void*ud,void*ptr,size_t osize,size_t nsize); -typedef double lua_Number; -typedef ptrdiff_t lua_Integer; -static void lua_settop(lua_State*L,int idx); -static int lua_type(lua_State*L,int idx); -static const char* lua_tolstring(lua_State*L,int idx,size_t*len); -static size_t lua_objlen(lua_State*L,int idx); -static void lua_pushlstring(lua_State*L,const char*s,size_t l); -static void lua_pushcclosure(lua_State*L,lua_CFunction fn,int n); -static void lua_createtable(lua_State*L,int narr,int nrec); -static void lua_setfield(lua_State*L,int idx,const char*k); -#define lua_pop(L,n)lua_settop(L,-(n)-1) -#define lua_newtable(L)lua_createtable(L,0,0) -#define lua_pushcfunction(L,f)lua_pushcclosure(L,(f),0) -#define lua_strlen(L,i)lua_objlen(L,(i)) -#define lua_isfunction(L,n)(lua_type(L,(n))==6) -#define lua_istable(L,n)(lua_type(L,(n))==5) -#define lua_isnil(L,n)(lua_type(L,(n))==0) -#define lua_isboolean(L,n)(lua_type(L,(n))==1) -#define lua_isnone(L,n)(lua_type(L,(n))==(-1)) -#define lua_isnoneornil(L,n)(lua_type(L,(n))<=0) -#define lua_pushliteral(L,s)lua_pushlstring(L,""s,(sizeof(s)/sizeof(char))-1) -#define lua_setglobal(L,s)lua_setfield(L,(-10002),(s)) -#define lua_tostring(L,i)lua_tolstring(L,(i),NULL) -typedef struct lua_Debug lua_Debug; -typedef void(*lua_Hook)(lua_State*L,lua_Debug*ar); -struct lua_Debug{ -int event; -const char*name; -const char*namewhat; -const char*what; -const char*source; -int currentline; -int nups; -int linedefined; -int lastlinedefined; -char short_src[60]; -int i_ci; -}; -typedef unsigned int lu_int32; -typedef size_t lu_mem; -typedef ptrdiff_t l_mem; -typedef unsigned char lu_byte; -#define IntPoint(p)((unsigned int)(lu_mem)(p)) -typedef union{double u;void*s;long l;}L_Umaxalign; -typedef double l_uacNumber; -#define check_exp(c,e)(e) -#define UNUSED(x)((void)(x)) -#define cast(t,exp)((t)(exp)) -#define cast_byte(i)cast(lu_byte,(i)) -#define cast_num(i)cast(lua_Number,(i)) -#define cast_int(i)cast(int,(i)) -typedef lu_int32 Instruction; -#define condhardstacktests(x)((void)0) -typedef union GCObject GCObject; -typedef struct GCheader{ -GCObject*next;lu_byte tt;lu_byte marked; -}GCheader; -typedef union{ -GCObject*gc; -void*p; -lua_Number n; -int b; -}Value; -typedef struct lua_TValue{ -Value value;int tt; -}TValue; -#define ttisnil(o)(ttype(o)==0) -#define ttisnumber(o)(ttype(o)==3) -#define ttisstring(o)(ttype(o)==4) -#define ttistable(o)(ttype(o)==5) -#define ttisfunction(o)(ttype(o)==6) -#define ttisboolean(o)(ttype(o)==1) -#define ttisuserdata(o)(ttype(o)==7) -#define ttisthread(o)(ttype(o)==8) -#define ttislightuserdata(o)(ttype(o)==2) -#define ttype(o)((o)->tt) -#define gcvalue(o)check_exp(iscollectable(o),(o)->value.gc) -#define pvalue(o)check_exp(ttislightuserdata(o),(o)->value.p) -#define nvalue(o)check_exp(ttisnumber(o),(o)->value.n) -#define rawtsvalue(o)check_exp(ttisstring(o),&(o)->value.gc->ts) -#define tsvalue(o)(&rawtsvalue(o)->tsv) -#define rawuvalue(o)check_exp(ttisuserdata(o),&(o)->value.gc->u) -#define uvalue(o)(&rawuvalue(o)->uv) -#define clvalue(o)check_exp(ttisfunction(o),&(o)->value.gc->cl) -#define hvalue(o)check_exp(ttistable(o),&(o)->value.gc->h) -#define bvalue(o)check_exp(ttisboolean(o),(o)->value.b) -#define thvalue(o)check_exp(ttisthread(o),&(o)->value.gc->th) -#define l_isfalse(o)(ttisnil(o)||(ttisboolean(o)&&bvalue(o)==0)) -#define checkconsistency(obj) -#define checkliveness(g,obj) -#define setnilvalue(obj)((obj)->tt=0) -#define setnvalue(obj,x){TValue*i_o=(obj);i_o->value.n=(x);i_o->tt=3;} -#define setbvalue(obj,x){TValue*i_o=(obj);i_o->value.b=(x);i_o->tt=1;} -#define setsvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=4;checkliveness(G(L),i_o);} -#define setuvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=7;checkliveness(G(L),i_o);} -#define setthvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=8;checkliveness(G(L),i_o);} -#define setclvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=6;checkliveness(G(L),i_o);} -#define sethvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=5;checkliveness(G(L),i_o);} -#define setptvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=(8+1);checkliveness(G(L),i_o);} -#define setobj(L,obj1,obj2){const TValue*o2=(obj2);TValue*o1=(obj1);o1->value=o2->value;o1->tt=o2->tt;checkliveness(G(L),o1);} -#define setttype(obj,tt)(ttype(obj)=(tt)) -#define iscollectable(o)(ttype(o)>=4) -typedef TValue*StkId; -typedef union TString{ -L_Umaxalign dummy; -struct{ -GCObject*next;lu_byte tt;lu_byte marked; -lu_byte reserved; -unsigned int hash; -size_t len; -}tsv; -}TString; -#define getstr(ts)cast(const char*,(ts)+1) -#define svalue(o)getstr(rawtsvalue(o)) -typedef union Udata{ -L_Umaxalign dummy; -struct{ -GCObject*next;lu_byte tt;lu_byte marked; -struct Table*metatable; -struct Table*env; -size_t len; -}uv; -}Udata; -typedef struct Proto{ -GCObject*next;lu_byte tt;lu_byte marked; -TValue*k; -Instruction*code; -struct Proto**p; -int*lineinfo; -struct LocVar*locvars; -TString**upvalues; -TString*source; -int sizeupvalues; -int sizek; -int sizecode; -int sizelineinfo; -int sizep; -int sizelocvars; -int linedefined; -int lastlinedefined; -GCObject*gclist; -lu_byte nups; -lu_byte numparams; -lu_byte is_vararg; -lu_byte maxstacksize; -}Proto; -typedef struct LocVar{ -TString*varname; -int startpc; -int endpc; -}LocVar; -typedef struct UpVal{ -GCObject*next;lu_byte tt;lu_byte marked; -TValue*v; -union{ -TValue value; -struct{ -struct UpVal*prev; -struct UpVal*next; -}l; -}u; -}UpVal; -typedef struct CClosure{ -GCObject*next;lu_byte tt;lu_byte marked;lu_byte isC;lu_byte nupvalues;GCObject*gclist;struct Table*env; -lua_CFunction f; -TValue upvalue[1]; -}CClosure; -typedef struct LClosure{ -GCObject*next;lu_byte tt;lu_byte marked;lu_byte isC;lu_byte nupvalues;GCObject*gclist;struct Table*env; -struct Proto*p; -UpVal*upvals[1]; -}LClosure; -typedef union Closure{ -CClosure c; -LClosure l; -}Closure; -#define iscfunction(o)(ttype(o)==6&&clvalue(o)->c.isC) -typedef union TKey{ -struct{ -Value value;int tt; -struct Node*next; -}nk; -TValue tvk; -}TKey; -typedef struct Node{ -TValue i_val; -TKey i_key; -}Node; -typedef struct Table{ -GCObject*next;lu_byte tt;lu_byte marked; -lu_byte flags; -lu_byte lsizenode; -struct Table*metatable; -TValue*array; -Node*node; -Node*lastfree; -GCObject*gclist; -int sizearray; -}Table; -#define lmod(s,size)(check_exp((size&(size-1))==0,(cast(int,(s)&((size)-1))))) -#define twoto(x)((size_t)1<<(x)) -#define sizenode(t)(twoto((t)->lsizenode)) -static const TValue luaO_nilobject_; -#define ceillog2(x)(luaO_log2((x)-1)+1) -static int luaO_log2(unsigned int x); -#define gfasttm(g,et,e)((et)==NULL?NULL:((et)->flags&(1u<<(e)))?NULL:luaT_gettm(et,e,(g)->tmname[e])) -#define fasttm(l,et,e)gfasttm(G(l),et,e) -static const TValue*luaT_gettm(Table*events,TMS event,TString*ename); -#define luaM_reallocv(L,b,on,n,e)((cast(size_t,(n)+1)<=((size_t)(~(size_t)0)-2)/(e))?luaM_realloc_(L,(b),(on)*(e),(n)*(e)):luaM_toobig(L)) -#define luaM_freemem(L,b,s)luaM_realloc_(L,(b),(s),0) -#define luaM_free(L,b)luaM_realloc_(L,(b),sizeof(*(b)),0) -#define luaM_freearray(L,b,n,t)luaM_reallocv(L,(b),n,0,sizeof(t)) -#define luaM_malloc(L,t)luaM_realloc_(L,NULL,0,(t)) -#define luaM_new(L,t)cast(t*,luaM_malloc(L,sizeof(t))) -#define luaM_newvector(L,n,t)cast(t*,luaM_reallocv(L,NULL,0,n,sizeof(t))) -#define luaM_growvector(L,v,nelems,size,t,limit,e)if((nelems)+1>(size))((v)=cast(t*,luaM_growaux_(L,v,&(size),sizeof(t),limit,e))) -#define luaM_reallocvector(L,v,oldn,n,t)((v)=cast(t*,luaM_reallocv(L,v,oldn,n,sizeof(t)))) -static void*luaM_realloc_(lua_State*L,void*block,size_t oldsize, -size_t size); -static void*luaM_toobig(lua_State*L); -static void*luaM_growaux_(lua_State*L,void*block,int*size, -size_t size_elem,int limit, -const char*errormsg); -typedef struct Zio ZIO; -#define char2int(c)cast(int,cast(unsigned char,(c))) -#define zgetc(z)(((z)->n--)>0?char2int(*(z)->p++):luaZ_fill(z)) -typedef struct Mbuffer{ -char*buffer; -size_t n; -size_t buffsize; -}Mbuffer; -#define luaZ_initbuffer(L,buff)((buff)->buffer=NULL,(buff)->buffsize=0) -#define luaZ_buffer(buff)((buff)->buffer) -#define luaZ_sizebuffer(buff)((buff)->buffsize) -#define luaZ_bufflen(buff)((buff)->n) -#define luaZ_resetbuffer(buff)((buff)->n=0) -#define luaZ_resizebuffer(L,buff,size)(luaM_reallocvector(L,(buff)->buffer,(buff)->buffsize,size,char),(buff)->buffsize=size) -#define luaZ_freebuffer(L,buff)luaZ_resizebuffer(L,buff,0) -struct Zio{ -size_t n; -const char*p; -lua_Reader reader; -void*data; -lua_State*L; -}; -static int luaZ_fill(ZIO*z); -struct lua_longjmp; -#define gt(L)(&L->l_gt) -#define registry(L)(&G(L)->l_registry) -typedef struct stringtable{ -GCObject**hash; -lu_int32 nuse; -int size; -}stringtable; -typedef struct CallInfo{ -StkId base; -StkId func; -StkId top; -const Instruction*savedpc; -int nresults; -int tailcalls; -}CallInfo; -#define curr_func(L)(clvalue(L->ci->func)) -#define ci_func(ci)(clvalue((ci)->func)) -#define f_isLua(ci)(!ci_func(ci)->c.isC) -#define isLua(ci)(ttisfunction((ci)->func)&&f_isLua(ci)) -typedef struct global_State{ -stringtable strt; -lua_Alloc frealloc; -void*ud; -lu_byte currentwhite; -lu_byte gcstate; -int sweepstrgc; -GCObject*rootgc; -GCObject**sweepgc; -GCObject*gray; -GCObject*grayagain; -GCObject*weak; -GCObject*tmudata; -Mbuffer buff; -lu_mem GCthreshold; -lu_mem totalbytes; -lu_mem estimate; -lu_mem gcdept; -int gcpause; -int gcstepmul; -lua_CFunction panic; -TValue l_registry; -struct lua_State*mainthread; -UpVal uvhead; -struct Table*mt[(8+1)]; -TString*tmname[TM_N]; -}global_State; -struct lua_State{ -GCObject*next;lu_byte tt;lu_byte marked; -lu_byte status; -StkId top; -StkId base; -global_State*l_G; -CallInfo*ci; -const Instruction*savedpc; -StkId stack_last; -StkId stack; -CallInfo*end_ci; -CallInfo*base_ci; -int stacksize; -int size_ci; -unsigned short nCcalls; -unsigned short baseCcalls; -lu_byte hookmask; -lu_byte allowhook; -int basehookcount; -int hookcount; -lua_Hook hook; -TValue l_gt; -TValue env; -GCObject*openupval; -GCObject*gclist; -struct lua_longjmp*errorJmp; -ptrdiff_t errfunc; -}; -#define G(L)(L->l_G) -union GCObject{ -GCheader gch; -union TString ts; -union Udata u; -union Closure cl; -struct Table h; -struct Proto p; -struct UpVal uv; -struct lua_State th; -}; -#define rawgco2ts(o)check_exp((o)->gch.tt==4,&((o)->ts)) -#define gco2ts(o)(&rawgco2ts(o)->tsv) -#define rawgco2u(o)check_exp((o)->gch.tt==7,&((o)->u)) -#define gco2u(o)(&rawgco2u(o)->uv) -#define gco2cl(o)check_exp((o)->gch.tt==6,&((o)->cl)) -#define gco2h(o)check_exp((o)->gch.tt==5,&((o)->h)) -#define gco2p(o)check_exp((o)->gch.tt==(8+1),&((o)->p)) -#define gco2uv(o)check_exp((o)->gch.tt==(8+2),&((o)->uv)) -#define ngcotouv(o)check_exp((o)==NULL||(o)->gch.tt==(8+2),&((o)->uv)) -#define gco2th(o)check_exp((o)->gch.tt==8,&((o)->th)) -#define obj2gco(v)(cast(GCObject*,(v))) -static void luaE_freethread(lua_State*L,lua_State*L1); -#define pcRel(pc,p)(cast(int,(pc)-(p)->code)-1) -#define getline_(f,pc)(((f)->lineinfo)?(f)->lineinfo[pc]:0) -#define resethookcount(L)(L->hookcount=L->basehookcount) -static void luaG_typeerror(lua_State*L,const TValue*o, -const char*opname); -static void luaG_runerror(lua_State*L,const char*fmt,...); -#define luaD_checkstack(L,n)if((char*)L->stack_last-(char*)L->top<=(n)*(int)sizeof(TValue))luaD_growstack(L,n);else condhardstacktests(luaD_reallocstack(L,L->stacksize-5-1)); -#define incr_top(L){luaD_checkstack(L,1);L->top++;} -#define savestack(L,p)((char*)(p)-(char*)L->stack) -#define restorestack(L,n)((TValue*)((char*)L->stack+(n))) -#define saveci(L,p)((char*)(p)-(char*)L->base_ci) -#define restoreci(L,n)((CallInfo*)((char*)L->base_ci+(n))) -typedef void(*Pfunc)(lua_State*L,void*ud); -static int luaD_poscall(lua_State*L,StkId firstResult); -static void luaD_reallocCI(lua_State*L,int newsize); -static void luaD_reallocstack(lua_State*L,int newsize); -static void luaD_growstack(lua_State*L,int n); -static void luaD_throw(lua_State*L,int errcode); -static void*luaM_growaux_(lua_State*L,void*block,int*size,size_t size_elems, -int limit,const char*errormsg){ -void*newblock; -int newsize; -if(*size>=limit/2){ -if(*size>=limit) -luaG_runerror(L,errormsg); -newsize=limit; -} -else{ -newsize=(*size)*2; -if(newsize<4) -newsize=4; -} -newblock=luaM_reallocv(L,block,*size,newsize,size_elems); -*size=newsize; -return newblock; -} -static void*luaM_toobig(lua_State*L){ -luaG_runerror(L,"memory allocation error: block too big"); -return NULL; -} -static void*luaM_realloc_(lua_State*L,void*block,size_t osize,size_t nsize){ -global_State*g=G(L); -block=(*g->frealloc)(g->ud,block,osize,nsize); -if(block==NULL&&nsize>0) -luaD_throw(L,4); -g->totalbytes=(g->totalbytes-osize)+nsize; -return block; -} -#define resetbits(x,m)((x)&=cast(lu_byte,~(m))) -#define setbits(x,m)((x)|=(m)) -#define testbits(x,m)((x)&(m)) -#define bitmask(b)(1<<(b)) -#define bit2mask(b1,b2)(bitmask(b1)|bitmask(b2)) -#define l_setbit(x,b)setbits(x,bitmask(b)) -#define resetbit(x,b)resetbits(x,bitmask(b)) -#define testbit(x,b)testbits(x,bitmask(b)) -#define set2bits(x,b1,b2)setbits(x,(bit2mask(b1,b2))) -#define reset2bits(x,b1,b2)resetbits(x,(bit2mask(b1,b2))) -#define test2bits(x,b1,b2)testbits(x,(bit2mask(b1,b2))) -#define iswhite(x)test2bits((x)->gch.marked,0,1) -#define isblack(x)testbit((x)->gch.marked,2) -#define isgray(x)(!isblack(x)&&!iswhite(x)) -#define otherwhite(g)(g->currentwhite^bit2mask(0,1)) -#define isdead(g,v)((v)->gch.marked&otherwhite(g)&bit2mask(0,1)) -#define changewhite(x)((x)->gch.marked^=bit2mask(0,1)) -#define gray2black(x)l_setbit((x)->gch.marked,2) -#define valiswhite(x)(iscollectable(x)&&iswhite(gcvalue(x))) -#define luaC_white(g)cast(lu_byte,(g)->currentwhite&bit2mask(0,1)) -#define luaC_checkGC(L){condhardstacktests(luaD_reallocstack(L,L->stacksize-5-1));if(G(L)->totalbytes>=G(L)->GCthreshold)luaC_step(L);} -#define luaC_barrier(L,p,v){if(valiswhite(v)&&isblack(obj2gco(p)))luaC_barrierf(L,obj2gco(p),gcvalue(v));} -#define luaC_barriert(L,t,v){if(valiswhite(v)&&isblack(obj2gco(t)))luaC_barrierback(L,t);} -#define luaC_objbarrier(L,p,o){if(iswhite(obj2gco(o))&&isblack(obj2gco(p)))luaC_barrierf(L,obj2gco(p),obj2gco(o));} -#define luaC_objbarriert(L,t,o){if(iswhite(obj2gco(o))&&isblack(obj2gco(t)))luaC_barrierback(L,t);} -static void luaC_step(lua_State*L); -static void luaC_link(lua_State*L,GCObject*o,lu_byte tt); -static void luaC_linkupval(lua_State*L,UpVal*uv); -static void luaC_barrierf(lua_State*L,GCObject*o,GCObject*v); -static void luaC_barrierback(lua_State*L,Table*t); -#define sizestring(s)(sizeof(union TString)+((s)->len+1)*sizeof(char)) -#define sizeudata(u)(sizeof(union Udata)+(u)->len) -#define luaS_new(L,s)(luaS_newlstr(L,s,strlen(s))) -#define luaS_newliteral(L,s)(luaS_newlstr(L,""s,(sizeof(s)/sizeof(char))-1)) -#define luaS_fix(s)l_setbit((s)->tsv.marked,5) -static TString*luaS_newlstr(lua_State*L,const char*str,size_t l); -#define tostring(L,o)((ttype(o)==4)||(luaV_tostring(L,o))) -#define tonumber(o,n)(ttype(o)==3||(((o)=luaV_tonumber(o,n))!=NULL)) -#define equalobj(L,o1,o2)(ttype(o1)==ttype(o2)&&luaV_equalval(L,o1,o2)) -static int luaV_equalval(lua_State*L,const TValue*t1,const TValue*t2); -static const TValue*luaV_tonumber(const TValue*obj,TValue*n); -static int luaV_tostring(lua_State*L,StkId obj); -static void luaV_execute(lua_State*L,int nexeccalls); -static void luaV_concat(lua_State*L,int total,int last); -static const TValue luaO_nilobject_={{NULL},0}; -static int luaO_int2fb(unsigned int x){ -int e=0; -while(x>=16){ -x=(x+1)>>1; -e++; -} -if(x<8)return x; -else return((e+1)<<3)|(cast_int(x)-8); -} -static int luaO_fb2int(int x){ -int e=(x>>3)&31; -if(e==0)return x; -else return((x&7)+8)<<(e-1); -} -static int luaO_log2(unsigned int x){ -static const lu_byte log_2[256]={ -0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, -6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, -7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, -7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, -8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, -8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, -8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, -8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8 -}; -int l=-1; -while(x>=256){l+=8;x>>=8;} -return l+log_2[x]; -} -static int luaO_rawequalObj(const TValue*t1,const TValue*t2){ -if(ttype(t1)!=ttype(t2))return 0; -else switch(ttype(t1)){ -case 0: -return 1; -case 3: -return luai_numeq(nvalue(t1),nvalue(t2)); -case 1: -return bvalue(t1)==bvalue(t2); -case 2: -return pvalue(t1)==pvalue(t2); -default: -return gcvalue(t1)==gcvalue(t2); -} -} -static int luaO_str2d(const char*s,lua_Number*result){ -char*endptr; -*result=lua_str2number(s,&endptr); -if(endptr==s)return 0; -if(*endptr=='x'||*endptr=='X') -*result=cast_num(strtoul(s,&endptr,16)); -if(*endptr=='\0')return 1; -while(isspace(cast(unsigned char,*endptr)))endptr++; -if(*endptr!='\0')return 0; -return 1; -} -static void pushstr(lua_State*L,const char*str){ -setsvalue(L,L->top,luaS_new(L,str)); -incr_top(L); -} -static const char*luaO_pushvfstring(lua_State*L,const char*fmt,va_list argp){ -int n=1; -pushstr(L,""); -for(;;){ -const char*e=strchr(fmt,'%'); -if(e==NULL)break; -setsvalue(L,L->top,luaS_newlstr(L,fmt,e-fmt)); -incr_top(L); -switch(*(e+1)){ -case's':{ -const char*s=va_arg(argp,char*); -if(s==NULL)s="(null)"; -pushstr(L,s); -break; -} -case'c':{ -char buff[2]; -buff[0]=cast(char,va_arg(argp,int)); -buff[1]='\0'; -pushstr(L,buff); -break; -} -case'd':{ -setnvalue(L->top,cast_num(va_arg(argp,int))); -incr_top(L); -break; -} -case'f':{ -setnvalue(L->top,cast_num(va_arg(argp,l_uacNumber))); -incr_top(L); -break; -} -case'p':{ -char buff[4*sizeof(void*)+8]; -sprintf(buff,"%p",va_arg(argp,void*)); -pushstr(L,buff); -break; -} -case'%':{ -pushstr(L,"%"); -break; -} -default:{ -char buff[3]; -buff[0]='%'; -buff[1]=*(e+1); -buff[2]='\0'; -pushstr(L,buff); -break; -} -} -n+=2; -fmt=e+2; -} -pushstr(L,fmt); -luaV_concat(L,n+1,cast_int(L->top-L->base)-1); -L->top-=n; -return svalue(L->top-1); -} -static const char*luaO_pushfstring(lua_State*L,const char*fmt,...){ -const char*msg; -va_list argp; -va_start(argp,fmt); -msg=luaO_pushvfstring(L,fmt,argp); -va_end(argp); -return msg; -} -static void luaO_chunkid(char*out,const char*source,size_t bufflen){ -if(*source=='='){ -strncpy(out,source+1,bufflen); -out[bufflen-1]='\0'; -} -else{ -if(*source=='@'){ -size_t l; -source++; -bufflen-=sizeof(" '...' "); -l=strlen(source); -strcpy(out,""); -if(l>bufflen){ -source+=(l-bufflen); -strcat(out,"..."); -} -strcat(out,source); -} -else{ -size_t len=strcspn(source,"\n\r"); -bufflen-=sizeof(" [string \"...\"] "); -if(len>bufflen)len=bufflen; -strcpy(out,"[string \""); -if(source[len]!='\0'){ -strncat(out,source,len); -strcat(out,"..."); -} -else -strcat(out,source); -strcat(out,"\"]"); -} -} -} -#define gnode(t,i)(&(t)->node[i]) -#define gkey(n)(&(n)->i_key.nk) -#define gval(n)(&(n)->i_val) -#define gnext(n)((n)->i_key.nk.next) -#define key2tval(n)(&(n)->i_key.tvk) -static TValue*luaH_setnum(lua_State*L,Table*t,int key); -static const TValue*luaH_getstr(Table*t,TString*key); -static TValue*luaH_set(lua_State*L,Table*t,const TValue*key); -static const char*const luaT_typenames[]={ -"nil","boolean","userdata","number", -"string","table","function","userdata","thread", -"proto","upval" -}; -static void luaT_init(lua_State*L){ -static const char*const luaT_eventname[]={ -"__index","__newindex", -"__gc","__mode","__eq", -"__add","__sub","__mul","__div","__mod", -"__pow","__unm","__len","__lt","__le", -"__concat","__call" -}; -int i; -for(i=0;itmname[i]=luaS_new(L,luaT_eventname[i]); -luaS_fix(G(L)->tmname[i]); -} -} -static const TValue*luaT_gettm(Table*events,TMS event,TString*ename){ -const TValue*tm=luaH_getstr(events,ename); -if(ttisnil(tm)){ -events->flags|=cast_byte(1u<metatable; -break; -case 7: -mt=uvalue(o)->metatable; -break; -default: -mt=G(L)->mt[ttype(o)]; -} -return(mt?luaH_getstr(mt,G(L)->tmname[event]):(&luaO_nilobject_)); -} -#define sizeCclosure(n)(cast(int,sizeof(CClosure))+cast(int,sizeof(TValue)*((n)-1))) -#define sizeLclosure(n)(cast(int,sizeof(LClosure))+cast(int,sizeof(TValue*)*((n)-1))) -static Closure*luaF_newCclosure(lua_State*L,int nelems,Table*e){ -Closure*c=cast(Closure*,luaM_malloc(L,sizeCclosure(nelems))); -luaC_link(L,obj2gco(c),6); -c->c.isC=1; -c->c.env=e; -c->c.nupvalues=cast_byte(nelems); -return c; -} -static Closure*luaF_newLclosure(lua_State*L,int nelems,Table*e){ -Closure*c=cast(Closure*,luaM_malloc(L,sizeLclosure(nelems))); -luaC_link(L,obj2gco(c),6); -c->l.isC=0; -c->l.env=e; -c->l.nupvalues=cast_byte(nelems); -while(nelems--)c->l.upvals[nelems]=NULL; -return c; -} -static UpVal*luaF_newupval(lua_State*L){ -UpVal*uv=luaM_new(L,UpVal); -luaC_link(L,obj2gco(uv),(8+2)); -uv->v=&uv->u.value; -setnilvalue(uv->v); -return uv; -} -static UpVal*luaF_findupval(lua_State*L,StkId level){ -global_State*g=G(L); -GCObject**pp=&L->openupval; -UpVal*p; -UpVal*uv; -while(*pp!=NULL&&(p=ngcotouv(*pp))->v>=level){ -if(p->v==level){ -if(isdead(g,obj2gco(p))) -changewhite(obj2gco(p)); -return p; -} -pp=&p->next; -} -uv=luaM_new(L,UpVal); -uv->tt=(8+2); -uv->marked=luaC_white(g); -uv->v=level; -uv->next=*pp; -*pp=obj2gco(uv); -uv->u.l.prev=&g->uvhead; -uv->u.l.next=g->uvhead.u.l.next; -uv->u.l.next->u.l.prev=uv; -g->uvhead.u.l.next=uv; -return uv; -} -static void unlinkupval(UpVal*uv){ -uv->u.l.next->u.l.prev=uv->u.l.prev; -uv->u.l.prev->u.l.next=uv->u.l.next; -} -static void luaF_freeupval(lua_State*L,UpVal*uv){ -if(uv->v!=&uv->u.value) -unlinkupval(uv); -luaM_free(L,uv); -} -static void luaF_close(lua_State*L,StkId level){ -UpVal*uv; -global_State*g=G(L); -while(L->openupval!=NULL&&(uv=ngcotouv(L->openupval))->v>=level){ -GCObject*o=obj2gco(uv); -L->openupval=uv->next; -if(isdead(g,o)) -luaF_freeupval(L,uv); -else{ -unlinkupval(uv); -setobj(L,&uv->u.value,uv->v); -uv->v=&uv->u.value; -luaC_linkupval(L,uv); -} -} -} -static Proto*luaF_newproto(lua_State*L){ -Proto*f=luaM_new(L,Proto); -luaC_link(L,obj2gco(f),(8+1)); -f->k=NULL; -f->sizek=0; -f->p=NULL; -f->sizep=0; -f->code=NULL; -f->sizecode=0; -f->sizelineinfo=0; -f->sizeupvalues=0; -f->nups=0; -f->upvalues=NULL; -f->numparams=0; -f->is_vararg=0; -f->maxstacksize=0; -f->lineinfo=NULL; -f->sizelocvars=0; -f->locvars=NULL; -f->linedefined=0; -f->lastlinedefined=0; -f->source=NULL; -return f; -} -static void luaF_freeproto(lua_State*L,Proto*f){ -luaM_freearray(L,f->code,f->sizecode,Instruction); -luaM_freearray(L,f->p,f->sizep,Proto*); -luaM_freearray(L,f->k,f->sizek,TValue); -luaM_freearray(L,f->lineinfo,f->sizelineinfo,int); -luaM_freearray(L,f->locvars,f->sizelocvars,struct LocVar); -luaM_freearray(L,f->upvalues,f->sizeupvalues,TString*); -luaM_free(L,f); -} -static void luaF_freeclosure(lua_State*L,Closure*c){ -int size=(c->c.isC)?sizeCclosure(c->c.nupvalues): -sizeLclosure(c->l.nupvalues); -luaM_freemem(L,c,size); -} -#define MASK1(n,p)((~((~(Instruction)0)<>0)&MASK1(6,0))) -#define SET_OPCODE(i,o)((i)=(((i)&MASK0(6,0))|((cast(Instruction,o)<<0)&MASK1(6,0)))) -#define GETARG_A(i)(cast(int,((i)>>(0+6))&MASK1(8,0))) -#define SETARG_A(i,u)((i)=(((i)&MASK0(8,(0+6)))|((cast(Instruction,u)<<(0+6))&MASK1(8,(0+6))))) -#define GETARG_B(i)(cast(int,((i)>>(((0+6)+8)+9))&MASK1(9,0))) -#define SETARG_B(i,b)((i)=(((i)&MASK0(9,(((0+6)+8)+9)))|((cast(Instruction,b)<<(((0+6)+8)+9))&MASK1(9,(((0+6)+8)+9))))) -#define GETARG_C(i)(cast(int,((i)>>((0+6)+8))&MASK1(9,0))) -#define SETARG_C(i,b)((i)=(((i)&MASK0(9,((0+6)+8)))|((cast(Instruction,b)<<((0+6)+8))&MASK1(9,((0+6)+8))))) -#define GETARG_Bx(i)(cast(int,((i)>>((0+6)+8))&MASK1((9+9),0))) -#define SETARG_Bx(i,b)((i)=(((i)&MASK0((9+9),((0+6)+8)))|((cast(Instruction,b)<<((0+6)+8))&MASK1((9+9),((0+6)+8))))) -#define GETARG_sBx(i)(GETARG_Bx(i)-(((1<<(9+9))-1)>>1)) -#define SETARG_sBx(i,b)SETARG_Bx((i),cast(unsigned int,(b)+(((1<<(9+9))-1)>>1))) -#define CREATE_ABC(o,a,b,c)((cast(Instruction,o)<<0)|(cast(Instruction,a)<<(0+6))|(cast(Instruction,b)<<(((0+6)+8)+9))|(cast(Instruction,c)<<((0+6)+8))) -#define CREATE_ABx(o,a,bc)((cast(Instruction,o)<<0)|(cast(Instruction,a)<<(0+6))|(cast(Instruction,bc)<<((0+6)+8))) -#define ISK(x)((x)&(1<<(9-1))) -#define INDEXK(r)((int)(r)&~(1<<(9-1))) -#define RKASK(x)((x)|(1<<(9-1))) -static const lu_byte luaP_opmodes[(cast(int,OP_VARARG)+1)]; -#define getBMode(m)(cast(enum OpArgMask,(luaP_opmodes[m]>>4)&3)) -#define getCMode(m)(cast(enum OpArgMask,(luaP_opmodes[m]>>2)&3)) -#define testTMode(m)(luaP_opmodes[m]&(1<<7)) -typedef struct expdesc{ -expkind k; -union{ -struct{int info,aux;}s; -lua_Number nval; -}u; -int t; -int f; -}expdesc; -typedef struct upvaldesc{ -lu_byte k; -lu_byte info; -}upvaldesc; -struct BlockCnt; -typedef struct FuncState{ -Proto*f; -Table*h; -struct FuncState*prev; -struct LexState*ls; -struct lua_State*L; -struct BlockCnt*bl; -int pc; -int lasttarget; -int jpc; -int freereg; -int nk; -int np; -short nlocvars; -lu_byte nactvar; -upvaldesc upvalues[60]; -unsigned short actvar[200]; -}FuncState; -static Proto*luaY_parser(lua_State*L,ZIO*z,Mbuffer*buff, -const char*name); -struct lua_longjmp{ -struct lua_longjmp*previous; -jmp_buf b; -volatile int status; -}; -static void luaD_seterrorobj(lua_State*L,int errcode,StkId oldtop){ -switch(errcode){ -case 4:{ -setsvalue(L,oldtop,luaS_newliteral(L,"not enough memory")); -break; -} -case 5:{ -setsvalue(L,oldtop,luaS_newliteral(L,"error in error handling")); -break; -} -case 3: -case 2:{ -setobj(L,oldtop,L->top-1); -break; -} -} -L->top=oldtop+1; -} -static void restore_stack_limit(lua_State*L){ -if(L->size_ci>20000){ -int inuse=cast_int(L->ci-L->base_ci); -if(inuse+1<20000) -luaD_reallocCI(L,20000); -} -} -static void resetstack(lua_State*L,int status){ -L->ci=L->base_ci; -L->base=L->ci->base; -luaF_close(L,L->base); -luaD_seterrorobj(L,status,L->base); -L->nCcalls=L->baseCcalls; -L->allowhook=1; -restore_stack_limit(L); -L->errfunc=0; -L->errorJmp=NULL; -} -static void luaD_throw(lua_State*L,int errcode){ -if(L->errorJmp){ -L->errorJmp->status=errcode; -LUAI_THROW(L,L->errorJmp); -} -else{ -L->status=cast_byte(errcode); -if(G(L)->panic){ -resetstack(L,errcode); -G(L)->panic(L); -} -exit(EXIT_FAILURE); -} -} -static int luaD_rawrunprotected(lua_State*L,Pfunc f,void*ud){ -struct lua_longjmp lj; -lj.status=0; -lj.previous=L->errorJmp; -L->errorJmp=&lj; -LUAI_TRY(L,&lj, -(*f)(L,ud); -); -L->errorJmp=lj.previous; -return lj.status; -} -static void correctstack(lua_State*L,TValue*oldstack){ -CallInfo*ci; -GCObject*up; -L->top=(L->top-oldstack)+L->stack; -for(up=L->openupval;up!=NULL;up=up->gch.next) -gco2uv(up)->v=(gco2uv(up)->v-oldstack)+L->stack; -for(ci=L->base_ci;ci<=L->ci;ci++){ -ci->top=(ci->top-oldstack)+L->stack; -ci->base=(ci->base-oldstack)+L->stack; -ci->func=(ci->func-oldstack)+L->stack; -} -L->base=(L->base-oldstack)+L->stack; -} -static void luaD_reallocstack(lua_State*L,int newsize){ -TValue*oldstack=L->stack; -int realsize=newsize+1+5; -luaM_reallocvector(L,L->stack,L->stacksize,realsize,TValue); -L->stacksize=realsize; -L->stack_last=L->stack+newsize; -correctstack(L,oldstack); -} -static void luaD_reallocCI(lua_State*L,int newsize){ -CallInfo*oldci=L->base_ci; -luaM_reallocvector(L,L->base_ci,L->size_ci,newsize,CallInfo); -L->size_ci=newsize; -L->ci=(L->ci-oldci)+L->base_ci; -L->end_ci=L->base_ci+L->size_ci-1; -} -static void luaD_growstack(lua_State*L,int n){ -if(n<=L->stacksize) -luaD_reallocstack(L,2*L->stacksize); -else -luaD_reallocstack(L,L->stacksize+n); -} -static CallInfo*growCI(lua_State*L){ -if(L->size_ci>20000) -luaD_throw(L,5); -else{ -luaD_reallocCI(L,2*L->size_ci); -if(L->size_ci>20000) -luaG_runerror(L,"stack overflow"); -} -return++L->ci; -} -static StkId adjust_varargs(lua_State*L,Proto*p,int actual){ -int i; -int nfixargs=p->numparams; -Table*htab=NULL; -StkId base,fixed; -for(;actualtop++); -fixed=L->top-actual; -base=L->top; -for(i=0;itop++,fixed+i); -setnilvalue(fixed+i); -} -if(htab){ -sethvalue(L,L->top++,htab); -} -return base; -} -static StkId tryfuncTM(lua_State*L,StkId func){ -const TValue*tm=luaT_gettmbyobj(L,func,TM_CALL); -StkId p; -ptrdiff_t funcr=savestack(L,func); -if(!ttisfunction(tm)) -luaG_typeerror(L,func,"call"); -for(p=L->top;p>func;p--)setobj(L,p,p-1); -incr_top(L); -func=restorestack(L,funcr); -setobj(L,func,tm); -return func; -} -#define inc_ci(L)((L->ci==L->end_ci)?growCI(L):(condhardstacktests(luaD_reallocCI(L,L->size_ci)),++L->ci)) -static int luaD_precall(lua_State*L,StkId func,int nresults){ -LClosure*cl; -ptrdiff_t funcr; -if(!ttisfunction(func)) -func=tryfuncTM(L,func); -funcr=savestack(L,func); -cl=&clvalue(func)->l; -L->ci->savedpc=L->savedpc; -if(!cl->isC){ -CallInfo*ci; -StkId st,base; -Proto*p=cl->p; -luaD_checkstack(L,p->maxstacksize); -func=restorestack(L,funcr); -if(!p->is_vararg){ -base=func+1; -if(L->top>base+p->numparams) -L->top=base+p->numparams; -} -else{ -int nargs=cast_int(L->top-func)-1; -base=adjust_varargs(L,p,nargs); -func=restorestack(L,funcr); -} -ci=inc_ci(L); -ci->func=func; -L->base=ci->base=base; -ci->top=L->base+p->maxstacksize; -L->savedpc=p->code; -ci->tailcalls=0; -ci->nresults=nresults; -for(st=L->top;sttop;st++) -setnilvalue(st); -L->top=ci->top; -return 0; -} -else{ -CallInfo*ci; -int n; -luaD_checkstack(L,20); -ci=inc_ci(L); -ci->func=restorestack(L,funcr); -L->base=ci->base=ci->func+1; -ci->top=L->top+20; -ci->nresults=nresults; -n=(*curr_func(L)->c.f)(L); -if(n<0) -return 2; -else{ -luaD_poscall(L,L->top-n); -return 1; -} -} -} -static int luaD_poscall(lua_State*L,StkId firstResult){ -StkId res; -int wanted,i; -CallInfo*ci; -ci=L->ci--; -res=ci->func; -wanted=ci->nresults; -L->base=(ci-1)->base; -L->savedpc=(ci-1)->savedpc; -for(i=wanted;i!=0&&firstResulttop;i--) -setobj(L,res++,firstResult++); -while(i-->0) -setnilvalue(res++); -L->top=res; -return(wanted-(-1)); -} -static void luaD_call(lua_State*L,StkId func,int nResults){ -if(++L->nCcalls>=200){ -if(L->nCcalls==200) -luaG_runerror(L,"C stack overflow"); -else if(L->nCcalls>=(200+(200>>3))) -luaD_throw(L,5); -} -if(luaD_precall(L,func,nResults)==0) -luaV_execute(L,1); -L->nCcalls--; -luaC_checkGC(L); -} -static int luaD_pcall(lua_State*L,Pfunc func,void*u, -ptrdiff_t old_top,ptrdiff_t ef){ -int status; -unsigned short oldnCcalls=L->nCcalls; -ptrdiff_t old_ci=saveci(L,L->ci); -lu_byte old_allowhooks=L->allowhook; -ptrdiff_t old_errfunc=L->errfunc; -L->errfunc=ef; -status=luaD_rawrunprotected(L,func,u); -if(status!=0){ -StkId oldtop=restorestack(L,old_top); -luaF_close(L,oldtop); -luaD_seterrorobj(L,status,oldtop); -L->nCcalls=oldnCcalls; -L->ci=restoreci(L,old_ci); -L->base=L->ci->base; -L->savedpc=L->ci->savedpc; -L->allowhook=old_allowhooks; -restore_stack_limit(L); -} -L->errfunc=old_errfunc; -return status; -} -struct SParser{ -ZIO*z; -Mbuffer buff; -const char*name; -}; -static void f_parser(lua_State*L,void*ud){ -int i; -Proto*tf; -Closure*cl; -struct SParser*p=cast(struct SParser*,ud); -luaC_checkGC(L); -tf=luaY_parser(L,p->z, -&p->buff,p->name); -cl=luaF_newLclosure(L,tf->nups,hvalue(gt(L))); -cl->l.p=tf; -for(i=0;inups;i++) -cl->l.upvals[i]=luaF_newupval(L); -setclvalue(L,L->top,cl); -incr_top(L); -} -static int luaD_protectedparser(lua_State*L,ZIO*z,const char*name){ -struct SParser p; -int status; -p.z=z;p.name=name; -luaZ_initbuffer(L,&p.buff); -status=luaD_pcall(L,f_parser,&p,savestack(L,L->top),L->errfunc); -luaZ_freebuffer(L,&p.buff); -return status; -} -static void luaS_resize(lua_State*L,int newsize){ -GCObject**newhash; -stringtable*tb; -int i; -if(G(L)->gcstate==2) -return; -newhash=luaM_newvector(L,newsize,GCObject*); -tb=&G(L)->strt; -for(i=0;isize;i++){ -GCObject*p=tb->hash[i]; -while(p){ -GCObject*next=p->gch.next; -unsigned int h=gco2ts(p)->hash; -int h1=lmod(h,newsize); -p->gch.next=newhash[h1]; -newhash[h1]=p; -p=next; -} -} -luaM_freearray(L,tb->hash,tb->size,TString*); -tb->size=newsize; -tb->hash=newhash; -} -static TString*newlstr(lua_State*L,const char*str,size_t l, -unsigned int h){ -TString*ts; -stringtable*tb; -if(l+1>(((size_t)(~(size_t)0)-2)-sizeof(TString))/sizeof(char)) -luaM_toobig(L); -ts=cast(TString*,luaM_malloc(L,(l+1)*sizeof(char)+sizeof(TString))); -ts->tsv.len=l; -ts->tsv.hash=h; -ts->tsv.marked=luaC_white(G(L)); -ts->tsv.tt=4; -ts->tsv.reserved=0; -memcpy(ts+1,str,l*sizeof(char)); -((char*)(ts+1))[l]='\0'; -tb=&G(L)->strt; -h=lmod(h,tb->size); -ts->tsv.next=tb->hash[h]; -tb->hash[h]=obj2gco(ts); -tb->nuse++; -if(tb->nuse>cast(lu_int32,tb->size)&&tb->size<=(INT_MAX-2)/2) -luaS_resize(L,tb->size*2); -return ts; -} -static TString*luaS_newlstr(lua_State*L,const char*str,size_t l){ -GCObject*o; -unsigned int h=cast(unsigned int,l); -size_t step=(l>>5)+1; -size_t l1; -for(l1=l;l1>=step;l1-=step) -h=h^((h<<5)+(h>>2)+cast(unsigned char,str[l1-1])); -for(o=G(L)->strt.hash[lmod(h,G(L)->strt.size)]; -o!=NULL; -o=o->gch.next){ -TString*ts=rawgco2ts(o); -if(ts->tsv.len==l&&(memcmp(str,getstr(ts),l)==0)){ -if(isdead(G(L),o))changewhite(o); -return ts; -} -} -return newlstr(L,str,l,h); -} -static Udata*luaS_newudata(lua_State*L,size_t s,Table*e){ -Udata*u; -if(s>((size_t)(~(size_t)0)-2)-sizeof(Udata)) -luaM_toobig(L); -u=cast(Udata*,luaM_malloc(L,s+sizeof(Udata))); -u->uv.marked=luaC_white(G(L)); -u->uv.tt=7; -u->uv.len=s; -u->uv.metatable=NULL; -u->uv.env=e; -u->uv.next=G(L)->mainthread->next; -G(L)->mainthread->next=obj2gco(u); -return u; -} -#define hashpow2(t,n)(gnode(t,lmod((n),sizenode(t)))) -#define hashstr(t,str)hashpow2(t,(str)->tsv.hash) -#define hashboolean(t,p)hashpow2(t,p) -#define hashmod(t,n)(gnode(t,((n)%((sizenode(t)-1)|1)))) -#define hashpointer(t,p)hashmod(t,IntPoint(p)) -static const Node dummynode_={ -{{NULL},0}, -{{{NULL},0,NULL}} -}; -static Node*hashnum(const Table*t,lua_Number n){ -unsigned int a[cast_int(sizeof(lua_Number)/sizeof(int))]; -int i; -if(luai_numeq(n,0)) -return gnode(t,0); -memcpy(a,&n,sizeof(a)); -for(i=1;isizearray) -return i-1; -else{ -Node*n=mainposition(t,key); -do{ -if(luaO_rawequalObj(key2tval(n),key)|| -(ttype(gkey(n))==(8+3)&&iscollectable(key)&& -gcvalue(gkey(n))==gcvalue(key))){ -i=cast_int(n-gnode(t,0)); -return i+t->sizearray; -} -else n=gnext(n); -}while(n); -luaG_runerror(L,"invalid key to "LUA_QL("next")); -return 0; -} -} -static int luaH_next(lua_State*L,Table*t,StkId key){ -int i=findindex(L,t,key); -for(i++;isizearray;i++){ -if(!ttisnil(&t->array[i])){ -setnvalue(key,cast_num(i+1)); -setobj(L,key+1,&t->array[i]); -return 1; -} -} -for(i-=t->sizearray;i<(int)sizenode(t);i++){ -if(!ttisnil(gval(gnode(t,i)))){ -setobj(L,key,key2tval(gnode(t,i))); -setobj(L,key+1,gval(gnode(t,i))); -return 1; -} -} -return 0; -} -static int computesizes(int nums[],int*narray){ -int i; -int twotoi; -int a=0; -int na=0; -int n=0; -for(i=0,twotoi=1;twotoi/2<*narray;i++,twotoi*=2){ -if(nums[i]>0){ -a+=nums[i]; -if(a>twotoi/2){ -n=twotoi; -na=a; -} -} -if(a==*narray)break; -} -*narray=n; -return na; -} -static int countint(const TValue*key,int*nums){ -int k=arrayindex(key); -if(0t->sizearray){ -lim=t->sizearray; -if(i>lim) -break; -} -for(;i<=lim;i++){ -if(!ttisnil(&t->array[i-1])) -lc++; -} -nums[lg]+=lc; -ause+=lc; -} -return ause; -} -static int numusehash(const Table*t,int*nums,int*pnasize){ -int totaluse=0; -int ause=0; -int i=sizenode(t); -while(i--){ -Node*n=&t->node[i]; -if(!ttisnil(gval(n))){ -ause+=countint(key2tval(n),nums); -totaluse++; -} -} -*pnasize+=ause; -return totaluse; -} -static void setarrayvector(lua_State*L,Table*t,int size){ -int i; -luaM_reallocvector(L,t->array,t->sizearray,size,TValue); -for(i=t->sizearray;iarray[i]); -t->sizearray=size; -} -static void setnodevector(lua_State*L,Table*t,int size){ -int lsize; -if(size==0){ -t->node=cast(Node*,(&dummynode_)); -lsize=0; -} -else{ -int i; -lsize=ceillog2(size); -if(lsize>(32-2)) -luaG_runerror(L,"table overflow"); -size=twoto(lsize); -t->node=luaM_newvector(L,size,Node); -for(i=0;ilsizenode=cast_byte(lsize); -t->lastfree=gnode(t,size); -} -static void resize(lua_State*L,Table*t,int nasize,int nhsize){ -int i; -int oldasize=t->sizearray; -int oldhsize=t->lsizenode; -Node*nold=t->node; -if(nasize>oldasize) -setarrayvector(L,t,nasize); -setnodevector(L,t,nhsize); -if(nasizesizearray=nasize; -for(i=nasize;iarray[i])) -setobj(L,luaH_setnum(L,t,i+1),&t->array[i]); -} -luaM_reallocvector(L,t->array,oldasize,nasize,TValue); -} -for(i=twoto(oldhsize)-1;i>=0;i--){ -Node*old=nold+i; -if(!ttisnil(gval(old))) -setobj(L,luaH_set(L,t,key2tval(old)),gval(old)); -} -if(nold!=(&dummynode_)) -luaM_freearray(L,nold,twoto(oldhsize),Node); -} -static void luaH_resizearray(lua_State*L,Table*t,int nasize){ -int nsize=(t->node==(&dummynode_))?0:sizenode(t); -resize(L,t,nasize,nsize); -} -static void rehash(lua_State*L,Table*t,const TValue*ek){ -int nasize,na; -int nums[(32-2)+1]; -int i; -int totaluse; -for(i=0;i<=(32-2);i++)nums[i]=0; -nasize=numusearray(t,nums); -totaluse=nasize; -totaluse+=numusehash(t,nums,&nasize); -nasize+=countint(ek,nums); -totaluse++; -na=computesizes(nums,&nasize); -resize(L,t,nasize,totaluse-na); -} -static Table*luaH_new(lua_State*L,int narray,int nhash){ -Table*t=luaM_new(L,Table); -luaC_link(L,obj2gco(t),5); -t->metatable=NULL; -t->flags=cast_byte(~0); -t->array=NULL; -t->sizearray=0; -t->lsizenode=0; -t->node=cast(Node*,(&dummynode_)); -setarrayvector(L,t,narray); -setnodevector(L,t,nhash); -return t; -} -static void luaH_free(lua_State*L,Table*t){ -if(t->node!=(&dummynode_)) -luaM_freearray(L,t->node,sizenode(t),Node); -luaM_freearray(L,t->array,t->sizearray,TValue); -luaM_free(L,t); -} -static Node*getfreepos(Table*t){ -while(t->lastfree-->t->node){ -if(ttisnil(gkey(t->lastfree))) -return t->lastfree; -} -return NULL; -} -static TValue*newkey(lua_State*L,Table*t,const TValue*key){ -Node*mp=mainposition(t,key); -if(!ttisnil(gval(mp))||mp==(&dummynode_)){ -Node*othern; -Node*n=getfreepos(t); -if(n==NULL){ -rehash(L,t,key); -return luaH_set(L,t,key); -} -othern=mainposition(t,key2tval(mp)); -if(othern!=mp){ -while(gnext(othern)!=mp)othern=gnext(othern); -gnext(othern)=n; -*n=*mp; -gnext(mp)=NULL; -setnilvalue(gval(mp)); -} -else{ -gnext(n)=gnext(mp); -gnext(mp)=n; -mp=n; -} -} -gkey(mp)->value=key->value;gkey(mp)->tt=key->tt; -luaC_barriert(L,t,key); -return gval(mp); -} -static const TValue*luaH_getnum(Table*t,int key){ -if(cast(unsigned int,key-1)sizearray)) -return&t->array[key-1]; -else{ -lua_Number nk=cast_num(key); -Node*n=hashnum(t,nk); -do{ -if(ttisnumber(gkey(n))&&luai_numeq(nvalue(gkey(n)),nk)) -return gval(n); -else n=gnext(n); -}while(n); -return(&luaO_nilobject_); -} -} -static const TValue*luaH_getstr(Table*t,TString*key){ -Node*n=hashstr(t,key); -do{ -if(ttisstring(gkey(n))&&rawtsvalue(gkey(n))==key) -return gval(n); -else n=gnext(n); -}while(n); -return(&luaO_nilobject_); -} -static const TValue*luaH_get(Table*t,const TValue*key){ -switch(ttype(key)){ -case 0:return(&luaO_nilobject_); -case 4:return luaH_getstr(t,rawtsvalue(key)); -case 3:{ -int k; -lua_Number n=nvalue(key); -lua_number2int(k,n); -if(luai_numeq(cast_num(k),nvalue(key))) -return luaH_getnum(t,k); -} -default:{ -Node*n=mainposition(t,key); -do{ -if(luaO_rawequalObj(key2tval(n),key)) -return gval(n); -else n=gnext(n); -}while(n); -return(&luaO_nilobject_); -} -} -} -static TValue*luaH_set(lua_State*L,Table*t,const TValue*key){ -const TValue*p=luaH_get(t,key); -t->flags=0; -if(p!=(&luaO_nilobject_)) -return cast(TValue*,p); -else{ -if(ttisnil(key))luaG_runerror(L,"table index is nil"); -else if(ttisnumber(key)&&luai_numisnan(nvalue(key))) -luaG_runerror(L,"table index is NaN"); -return newkey(L,t,key); -} -} -static TValue*luaH_setnum(lua_State*L,Table*t,int key){ -const TValue*p=luaH_getnum(t,key); -if(p!=(&luaO_nilobject_)) -return cast(TValue*,p); -else{ -TValue k; -setnvalue(&k,cast_num(key)); -return newkey(L,t,&k); -} -} -static TValue*luaH_setstr(lua_State*L,Table*t,TString*key){ -const TValue*p=luaH_getstr(t,key); -if(p!=(&luaO_nilobject_)) -return cast(TValue*,p); -else{ -TValue k; -setsvalue(L,&k,key); -return newkey(L,t,&k); -} -} -static int unbound_search(Table*t,unsigned int j){ -unsigned int i=j; -j++; -while(!ttisnil(luaH_getnum(t,j))){ -i=j; -j*=2; -if(j>cast(unsigned int,(INT_MAX-2))){ -i=1; -while(!ttisnil(luaH_getnum(t,i)))i++; -return i-1; -} -} -while(j-i>1){ -unsigned int m=(i+j)/2; -if(ttisnil(luaH_getnum(t,m)))j=m; -else i=m; -} -return i; -} -static int luaH_getn(Table*t){ -unsigned int j=t->sizearray; -if(j>0&&ttisnil(&t->array[j-1])){ -unsigned int i=0; -while(j-i>1){ -unsigned int m=(i+j)/2; -if(ttisnil(&t->array[m-1]))j=m; -else i=m; -} -return i; -} -else if(t->node==(&dummynode_)) -return j; -else return unbound_search(t,j); -} -#define makewhite(g,x)((x)->gch.marked=cast_byte(((x)->gch.marked&cast_byte(~(bitmask(2)|bit2mask(0,1))))|luaC_white(g))) -#define white2gray(x)reset2bits((x)->gch.marked,0,1) -#define black2gray(x)resetbit((x)->gch.marked,2) -#define stringmark(s)reset2bits((s)->tsv.marked,0,1) -#define isfinalized(u)testbit((u)->marked,3) -#define markfinalized(u)l_setbit((u)->marked,3) -#define markvalue(g,o){checkconsistency(o);if(iscollectable(o)&&iswhite(gcvalue(o)))reallymarkobject(g,gcvalue(o));} -#define markobject(g,t){if(iswhite(obj2gco(t)))reallymarkobject(g,obj2gco(t));} -#define setthreshold(g)(g->GCthreshold=(g->estimate/100)*g->gcpause) -static void removeentry(Node*n){ -if(iscollectable(gkey(n))) -setttype(gkey(n),(8+3)); -} -static void reallymarkobject(global_State*g,GCObject*o){ -white2gray(o); -switch(o->gch.tt){ -case 4:{ -return; -} -case 7:{ -Table*mt=gco2u(o)->metatable; -gray2black(o); -if(mt)markobject(g,mt); -markobject(g,gco2u(o)->env); -return; -} -case(8+2):{ -UpVal*uv=gco2uv(o); -markvalue(g,uv->v); -if(uv->v==&uv->u.value) -gray2black(o); -return; -} -case 6:{ -gco2cl(o)->c.gclist=g->gray; -g->gray=o; -break; -} -case 5:{ -gco2h(o)->gclist=g->gray; -g->gray=o; -break; -} -case 8:{ -gco2th(o)->gclist=g->gray; -g->gray=o; -break; -} -case(8+1):{ -gco2p(o)->gclist=g->gray; -g->gray=o; -break; -} -default:; -} -} -static void marktmu(global_State*g){ -GCObject*u=g->tmudata; -if(u){ -do{ -u=u->gch.next; -makewhite(g,u); -reallymarkobject(g,u); -}while(u!=g->tmudata); -} -} -static size_t luaC_separateudata(lua_State*L,int all){ -global_State*g=G(L); -size_t deadmem=0; -GCObject**p=&g->mainthread->next; -GCObject*curr; -while((curr=*p)!=NULL){ -if(!(iswhite(curr)||all)||isfinalized(gco2u(curr))) -p=&curr->gch.next; -else if(fasttm(L,gco2u(curr)->metatable,TM_GC)==NULL){ -markfinalized(gco2u(curr)); -p=&curr->gch.next; -} -else{ -deadmem+=sizeudata(gco2u(curr)); -markfinalized(gco2u(curr)); -*p=curr->gch.next; -if(g->tmudata==NULL) -g->tmudata=curr->gch.next=curr; -else{ -curr->gch.next=g->tmudata->gch.next; -g->tmudata->gch.next=curr; -g->tmudata=curr; -} -} -} -return deadmem; -} -static int traversetable(global_State*g,Table*h){ -int i; -int weakkey=0; -int weakvalue=0; -const TValue*mode; -if(h->metatable) -markobject(g,h->metatable); -mode=gfasttm(g,h->metatable,TM_MODE); -if(mode&&ttisstring(mode)){ -weakkey=(strchr(svalue(mode),'k')!=NULL); -weakvalue=(strchr(svalue(mode),'v')!=NULL); -if(weakkey||weakvalue){ -h->marked&=~(bitmask(3)|bitmask(4)); -h->marked|=cast_byte((weakkey<<3)| -(weakvalue<<4)); -h->gclist=g->weak; -g->weak=obj2gco(h); -} -} -if(weakkey&&weakvalue)return 1; -if(!weakvalue){ -i=h->sizearray; -while(i--) -markvalue(g,&h->array[i]); -} -i=sizenode(h); -while(i--){ -Node*n=gnode(h,i); -if(ttisnil(gval(n))) -removeentry(n); -else{ -if(!weakkey)markvalue(g,gkey(n)); -if(!weakvalue)markvalue(g,gval(n)); -} -} -return weakkey||weakvalue; -} -static void traverseproto(global_State*g,Proto*f){ -int i; -if(f->source)stringmark(f->source); -for(i=0;isizek;i++) -markvalue(g,&f->k[i]); -for(i=0;isizeupvalues;i++){ -if(f->upvalues[i]) -stringmark(f->upvalues[i]); -} -for(i=0;isizep;i++){ -if(f->p[i]) -markobject(g,f->p[i]); -} -for(i=0;isizelocvars;i++){ -if(f->locvars[i].varname) -stringmark(f->locvars[i].varname); -} -} -static void traverseclosure(global_State*g,Closure*cl){ -markobject(g,cl->c.env); -if(cl->c.isC){ -int i; -for(i=0;ic.nupvalues;i++) -markvalue(g,&cl->c.upvalue[i]); -} -else{ -int i; -markobject(g,cl->l.p); -for(i=0;il.nupvalues;i++) -markobject(g,cl->l.upvals[i]); -} -} -static void checkstacksizes(lua_State*L,StkId max){ -int ci_used=cast_int(L->ci-L->base_ci); -int s_used=cast_int(max-L->stack); -if(L->size_ci>20000) -return; -if(4*ci_usedsize_ci&&2*8size_ci) -luaD_reallocCI(L,L->size_ci/2); -condhardstacktests(luaD_reallocCI(L,ci_used+1)); -if(4*s_usedstacksize&& -2*((2*20)+5)stacksize) -luaD_reallocstack(L,L->stacksize/2); -condhardstacktests(luaD_reallocstack(L,s_used)); -} -static void traversestack(global_State*g,lua_State*l){ -StkId o,lim; -CallInfo*ci; -markvalue(g,gt(l)); -lim=l->top; -for(ci=l->base_ci;ci<=l->ci;ci++){ -if(limtop)lim=ci->top; -} -for(o=l->stack;otop;o++) -markvalue(g,o); -for(;o<=lim;o++) -setnilvalue(o); -checkstacksizes(l,lim); -} -static l_mem propagatemark(global_State*g){ -GCObject*o=g->gray; -gray2black(o); -switch(o->gch.tt){ -case 5:{ -Table*h=gco2h(o); -g->gray=h->gclist; -if(traversetable(g,h)) -black2gray(o); -return sizeof(Table)+sizeof(TValue)*h->sizearray+ -sizeof(Node)*sizenode(h); -} -case 6:{ -Closure*cl=gco2cl(o); -g->gray=cl->c.gclist; -traverseclosure(g,cl); -return(cl->c.isC)?sizeCclosure(cl->c.nupvalues): -sizeLclosure(cl->l.nupvalues); -} -case 8:{ -lua_State*th=gco2th(o); -g->gray=th->gclist; -th->gclist=g->grayagain; -g->grayagain=o; -black2gray(o); -traversestack(g,th); -return sizeof(lua_State)+sizeof(TValue)*th->stacksize+ -sizeof(CallInfo)*th->size_ci; -} -case(8+1):{ -Proto*p=gco2p(o); -g->gray=p->gclist; -traverseproto(g,p); -return sizeof(Proto)+sizeof(Instruction)*p->sizecode+ -sizeof(Proto*)*p->sizep+ -sizeof(TValue)*p->sizek+ -sizeof(int)*p->sizelineinfo+ -sizeof(LocVar)*p->sizelocvars+ -sizeof(TString*)*p->sizeupvalues; -} -default:return 0; -} -} -static size_t propagateall(global_State*g){ -size_t m=0; -while(g->gray)m+=propagatemark(g); -return m; -} -static int iscleared(const TValue*o,int iskey){ -if(!iscollectable(o))return 0; -if(ttisstring(o)){ -stringmark(rawtsvalue(o)); -return 0; -} -return iswhite(gcvalue(o))|| -(ttisuserdata(o)&&(!iskey&&isfinalized(uvalue(o)))); -} -static void cleartable(GCObject*l){ -while(l){ -Table*h=gco2h(l); -int i=h->sizearray; -if(testbit(h->marked,4)){ -while(i--){ -TValue*o=&h->array[i]; -if(iscleared(o,0)) -setnilvalue(o); -} -} -i=sizenode(h); -while(i--){ -Node*n=gnode(h,i); -if(!ttisnil(gval(n))&& -(iscleared(key2tval(n),1)||iscleared(gval(n),0))){ -setnilvalue(gval(n)); -removeentry(n); -} -} -l=h->gclist; -} -} -static void freeobj(lua_State*L,GCObject*o){ -switch(o->gch.tt){ -case(8+1):luaF_freeproto(L,gco2p(o));break; -case 6:luaF_freeclosure(L,gco2cl(o));break; -case(8+2):luaF_freeupval(L,gco2uv(o));break; -case 5:luaH_free(L,gco2h(o));break; -case 8:{ -luaE_freethread(L,gco2th(o)); -break; -} -case 4:{ -G(L)->strt.nuse--; -luaM_freemem(L,o,sizestring(gco2ts(o))); -break; -} -case 7:{ -luaM_freemem(L,o,sizeudata(gco2u(o))); -break; -} -default:; -} -} -#define sweepwholelist(L,p)sweeplist(L,p,((lu_mem)(~(lu_mem)0)-2)) -static GCObject**sweeplist(lua_State*L,GCObject**p,lu_mem count){ -GCObject*curr; -global_State*g=G(L); -int deadmask=otherwhite(g); -while((curr=*p)!=NULL&&count-->0){ -if(curr->gch.tt==8) -sweepwholelist(L,&gco2th(curr)->openupval); -if((curr->gch.marked^bit2mask(0,1))&deadmask){ -makewhite(g,curr); -p=&curr->gch.next; -} -else{ -*p=curr->gch.next; -if(curr==g->rootgc) -g->rootgc=curr->gch.next; -freeobj(L,curr); -} -} -return p; -} -static void checkSizes(lua_State*L){ -global_State*g=G(L); -if(g->strt.nusestrt.size/4)&& -g->strt.size>32*2) -luaS_resize(L,g->strt.size/2); -if(luaZ_sizebuffer(&g->buff)>32*2){ -size_t newsize=luaZ_sizebuffer(&g->buff)/2; -luaZ_resizebuffer(L,&g->buff,newsize); -} -} -static void GCTM(lua_State*L){ -global_State*g=G(L); -GCObject*o=g->tmudata->gch.next; -Udata*udata=rawgco2u(o); -const TValue*tm; -if(o==g->tmudata) -g->tmudata=NULL; -else -g->tmudata->gch.next=udata->uv.next; -udata->uv.next=g->mainthread->next; -g->mainthread->next=o; -makewhite(g,o); -tm=fasttm(L,udata->uv.metatable,TM_GC); -if(tm!=NULL){ -lu_byte oldah=L->allowhook; -lu_mem oldt=g->GCthreshold; -L->allowhook=0; -g->GCthreshold=2*g->totalbytes; -setobj(L,L->top,tm); -setuvalue(L,L->top+1,udata); -L->top+=2; -luaD_call(L,L->top-2,0); -L->allowhook=oldah; -g->GCthreshold=oldt; -} -} -static void luaC_callGCTM(lua_State*L){ -while(G(L)->tmudata) -GCTM(L); -} -static void luaC_freeall(lua_State*L){ -global_State*g=G(L); -int i; -g->currentwhite=bit2mask(0,1)|bitmask(6); -sweepwholelist(L,&g->rootgc); -for(i=0;istrt.size;i++) -sweepwholelist(L,&g->strt.hash[i]); -} -static void markmt(global_State*g){ -int i; -for(i=0;i<(8+1);i++) -if(g->mt[i])markobject(g,g->mt[i]); -} -static void markroot(lua_State*L){ -global_State*g=G(L); -g->gray=NULL; -g->grayagain=NULL; -g->weak=NULL; -markobject(g,g->mainthread); -markvalue(g,gt(g->mainthread)); -markvalue(g,registry(L)); -markmt(g); -g->gcstate=1; -} -static void remarkupvals(global_State*g){ -UpVal*uv; -for(uv=g->uvhead.u.l.next;uv!=&g->uvhead;uv=uv->u.l.next){ -if(isgray(obj2gco(uv))) -markvalue(g,uv->v); -} -} -static void atomic(lua_State*L){ -global_State*g=G(L); -size_t udsize; -remarkupvals(g); -propagateall(g); -g->gray=g->weak; -g->weak=NULL; -markobject(g,L); -markmt(g); -propagateall(g); -g->gray=g->grayagain; -g->grayagain=NULL; -propagateall(g); -udsize=luaC_separateudata(L,0); -marktmu(g); -udsize+=propagateall(g); -cleartable(g->weak); -g->currentwhite=cast_byte(otherwhite(g)); -g->sweepstrgc=0; -g->sweepgc=&g->rootgc; -g->gcstate=2; -g->estimate=g->totalbytes-udsize; -} -static l_mem singlestep(lua_State*L){ -global_State*g=G(L); -switch(g->gcstate){ -case 0:{ -markroot(L); -return 0; -} -case 1:{ -if(g->gray) -return propagatemark(g); -else{ -atomic(L); -return 0; -} -} -case 2:{ -lu_mem old=g->totalbytes; -sweepwholelist(L,&g->strt.hash[g->sweepstrgc++]); -if(g->sweepstrgc>=g->strt.size) -g->gcstate=3; -g->estimate-=old-g->totalbytes; -return 10; -} -case 3:{ -lu_mem old=g->totalbytes; -g->sweepgc=sweeplist(L,g->sweepgc,40); -if(*g->sweepgc==NULL){ -checkSizes(L); -g->gcstate=4; -} -g->estimate-=old-g->totalbytes; -return 40*10; -} -case 4:{ -if(g->tmudata){ -GCTM(L); -if(g->estimate>100) -g->estimate-=100; -return 100; -} -else{ -g->gcstate=0; -g->gcdept=0; -return 0; -} -} -default:return 0; -} -} -static void luaC_step(lua_State*L){ -global_State*g=G(L); -l_mem lim=(1024u/100)*g->gcstepmul; -if(lim==0) -lim=(((lu_mem)(~(lu_mem)0)-2)-1)/2; -g->gcdept+=g->totalbytes-g->GCthreshold; -do{ -lim-=singlestep(L); -if(g->gcstate==0) -break; -}while(lim>0); -if(g->gcstate!=0){ -if(g->gcdept<1024u) -g->GCthreshold=g->totalbytes+1024u; -else{ -g->gcdept-=1024u; -g->GCthreshold=g->totalbytes; -} -} -else{ -setthreshold(g); -} -} -static void luaC_barrierf(lua_State*L,GCObject*o,GCObject*v){ -global_State*g=G(L); -if(g->gcstate==1) -reallymarkobject(g,v); -else -makewhite(g,o); -} -static void luaC_barrierback(lua_State*L,Table*t){ -global_State*g=G(L); -GCObject*o=obj2gco(t); -black2gray(o); -t->gclist=g->grayagain; -g->grayagain=o; -} -static void luaC_link(lua_State*L,GCObject*o,lu_byte tt){ -global_State*g=G(L); -o->gch.next=g->rootgc; -g->rootgc=o; -o->gch.marked=luaC_white(g); -o->gch.tt=tt; -} -static void luaC_linkupval(lua_State*L,UpVal*uv){ -global_State*g=G(L); -GCObject*o=obj2gco(uv); -o->gch.next=g->rootgc; -g->rootgc=o; -if(isgray(o)){ -if(g->gcstate==1){ -gray2black(o); -luaC_barrier(L,uv,uv->v); -} -else{ -makewhite(g,o); -} -} -} -typedef union{ -lua_Number r; -TString*ts; -}SemInfo; -typedef struct Token{ -int token; -SemInfo seminfo; -}Token; -typedef struct LexState{ -int current; -int linenumber; -int lastline; -Token t; -Token lookahead; -struct FuncState*fs; -struct lua_State*L; -ZIO*z; -Mbuffer*buff; -TString*source; -char decpoint; -}LexState; -static void luaX_init(lua_State*L); -static void luaX_lexerror(LexState*ls,const char*msg,int token); -#define state_size(x)(sizeof(x)+0) -#define fromstate(l)(cast(lu_byte*,(l))-0) -#define tostate(l)(cast(lua_State*,cast(lu_byte*,l)+0)) -typedef struct LG{ -lua_State l; -global_State g; -}LG; -static void stack_init(lua_State*L1,lua_State*L){ -L1->base_ci=luaM_newvector(L,8,CallInfo); -L1->ci=L1->base_ci; -L1->size_ci=8; -L1->end_ci=L1->base_ci+L1->size_ci-1; -L1->stack=luaM_newvector(L,(2*20)+5,TValue); -L1->stacksize=(2*20)+5; -L1->top=L1->stack; -L1->stack_last=L1->stack+(L1->stacksize-5)-1; -L1->ci->func=L1->top; -setnilvalue(L1->top++); -L1->base=L1->ci->base=L1->top; -L1->ci->top=L1->top+20; -} -static void freestack(lua_State*L,lua_State*L1){ -luaM_freearray(L,L1->base_ci,L1->size_ci,CallInfo); -luaM_freearray(L,L1->stack,L1->stacksize,TValue); -} -static void f_luaopen(lua_State*L,void*ud){ -global_State*g=G(L); -UNUSED(ud); -stack_init(L,L); -sethvalue(L,gt(L),luaH_new(L,0,2)); -sethvalue(L,registry(L),luaH_new(L,0,2)); -luaS_resize(L,32); -luaT_init(L); -luaX_init(L); -luaS_fix(luaS_newliteral(L,"not enough memory")); -g->GCthreshold=4*g->totalbytes; -} -static void preinit_state(lua_State*L,global_State*g){ -G(L)=g; -L->stack=NULL; -L->stacksize=0; -L->errorJmp=NULL; -L->hook=NULL; -L->hookmask=0; -L->basehookcount=0; -L->allowhook=1; -resethookcount(L); -L->openupval=NULL; -L->size_ci=0; -L->nCcalls=L->baseCcalls=0; -L->status=0; -L->base_ci=L->ci=NULL; -L->savedpc=NULL; -L->errfunc=0; -setnilvalue(gt(L)); -} -static void close_state(lua_State*L){ -global_State*g=G(L); -luaF_close(L,L->stack); -luaC_freeall(L); -luaM_freearray(L,G(L)->strt.hash,G(L)->strt.size,TString*); -luaZ_freebuffer(L,&g->buff); -freestack(L,L); -(*g->frealloc)(g->ud,fromstate(L),state_size(LG),0); -} -static void luaE_freethread(lua_State*L,lua_State*L1){ -luaF_close(L1,L1->stack); -freestack(L,L1); -luaM_freemem(L,fromstate(L1),state_size(lua_State)); -} -static lua_State*lua_newstate(lua_Alloc f,void*ud){ -int i; -lua_State*L; -global_State*g; -void*l=(*f)(ud,NULL,0,state_size(LG)); -if(l==NULL)return NULL; -L=tostate(l); -g=&((LG*)L)->g; -L->next=NULL; -L->tt=8; -g->currentwhite=bit2mask(0,5); -L->marked=luaC_white(g); -set2bits(L->marked,5,6); -preinit_state(L,g); -g->frealloc=f; -g->ud=ud; -g->mainthread=L; -g->uvhead.u.l.prev=&g->uvhead; -g->uvhead.u.l.next=&g->uvhead; -g->GCthreshold=0; -g->strt.size=0; -g->strt.nuse=0; -g->strt.hash=NULL; -setnilvalue(registry(L)); -luaZ_initbuffer(L,&g->buff); -g->panic=NULL; -g->gcstate=0; -g->rootgc=obj2gco(L); -g->sweepstrgc=0; -g->sweepgc=&g->rootgc; -g->gray=NULL; -g->grayagain=NULL; -g->weak=NULL; -g->tmudata=NULL; -g->totalbytes=sizeof(LG); -g->gcpause=200; -g->gcstepmul=200; -g->gcdept=0; -for(i=0;i<(8+1);i++)g->mt[i]=NULL; -if(luaD_rawrunprotected(L,f_luaopen,NULL)!=0){ -close_state(L); -L=NULL; -} -else -{} -return L; -} -static void callallgcTM(lua_State*L,void*ud){ -UNUSED(ud); -luaC_callGCTM(L); -} -static void lua_close(lua_State*L){ -L=G(L)->mainthread; -luaF_close(L,L->stack); -luaC_separateudata(L,1); -L->errfunc=0; -do{ -L->ci=L->base_ci; -L->base=L->top=L->ci->base; -L->nCcalls=L->baseCcalls=0; -}while(luaD_rawrunprotected(L,callallgcTM,NULL)!=0); -close_state(L); -} -#define getcode(fs,e)((fs)->f->code[(e)->u.s.info]) -#define luaK_codeAsBx(fs,o,A,sBx)luaK_codeABx(fs,o,A,(sBx)+(((1<<(9+9))-1)>>1)) -#define luaK_setmultret(fs,e)luaK_setreturns(fs,e,(-1)) -static int luaK_codeABx(FuncState*fs,OpCode o,int A,unsigned int Bx); -static int luaK_codeABC(FuncState*fs,OpCode o,int A,int B,int C); -static void luaK_setreturns(FuncState*fs,expdesc*e,int nresults); -static void luaK_patchtohere(FuncState*fs,int list); -static void luaK_concat(FuncState*fs,int*l1,int l2); -static int currentpc(lua_State*L,CallInfo*ci){ -if(!isLua(ci))return-1; -if(ci==L->ci) -ci->savedpc=L->savedpc; -return pcRel(ci->savedpc,ci_func(ci)->l.p); -} -static int currentline(lua_State*L,CallInfo*ci){ -int pc=currentpc(L,ci); -if(pc<0) -return-1; -else -return getline_(ci_func(ci)->l.p,pc); -} -static int lua_getstack(lua_State*L,int level,lua_Debug*ar){ -int status; -CallInfo*ci; -for(ci=L->ci;level>0&&ci>L->base_ci;ci--){ -level--; -if(f_isLua(ci)) -level-=ci->tailcalls; -} -if(level==0&&ci>L->base_ci){ -status=1; -ar->i_ci=cast_int(ci-L->base_ci); -} -else if(level<0){ -status=1; -ar->i_ci=0; -} -else status=0; -return status; -} -static Proto*getluaproto(CallInfo*ci){ -return(isLua(ci)?ci_func(ci)->l.p:NULL); -} -static void funcinfo(lua_Debug*ar,Closure*cl){ -if(cl->c.isC){ -ar->source="=[C]"; -ar->linedefined=-1; -ar->lastlinedefined=-1; -ar->what="C"; -} -else{ -ar->source=getstr(cl->l.p->source); -ar->linedefined=cl->l.p->linedefined; -ar->lastlinedefined=cl->l.p->lastlinedefined; -ar->what=(ar->linedefined==0)?"main":"Lua"; -} -luaO_chunkid(ar->short_src,ar->source,60); -} -static void info_tailcall(lua_Debug*ar){ -ar->name=ar->namewhat=""; -ar->what="tail"; -ar->lastlinedefined=ar->linedefined=ar->currentline=-1; -ar->source="=(tail call)"; -luaO_chunkid(ar->short_src,ar->source,60); -ar->nups=0; -} -static void collectvalidlines(lua_State*L,Closure*f){ -if(f==NULL||f->c.isC){ -setnilvalue(L->top); -} -else{ -Table*t=luaH_new(L,0,0); -int*lineinfo=f->l.p->lineinfo; -int i; -for(i=0;il.p->sizelineinfo;i++) -setbvalue(luaH_setnum(L,t,lineinfo[i]),1); -sethvalue(L,L->top,t); -} -incr_top(L); -} -static int auxgetinfo(lua_State*L,const char*what,lua_Debug*ar, -Closure*f,CallInfo*ci){ -int status=1; -if(f==NULL){ -info_tailcall(ar); -return status; -} -for(;*what;what++){ -switch(*what){ -case'S':{ -funcinfo(ar,f); -break; -} -case'l':{ -ar->currentline=(ci)?currentline(L,ci):-1; -break; -} -case'u':{ -ar->nups=f->c.nupvalues; -break; -} -case'n':{ -ar->namewhat=(ci)?NULL:NULL; -if(ar->namewhat==NULL){ -ar->namewhat=""; -ar->name=NULL; -} -break; -} -case'L': -case'f': -break; -default:status=0; -} -} -return status; -} -static int lua_getinfo(lua_State*L,const char*what,lua_Debug*ar){ -int status; -Closure*f=NULL; -CallInfo*ci=NULL; -if(*what=='>'){ -StkId func=L->top-1; -luai_apicheck(L,ttisfunction(func)); -what++; -f=clvalue(func); -L->top--; -} -else if(ar->i_ci!=0){ -ci=L->base_ci+ar->i_ci; -f=clvalue(ci->func); -} -status=auxgetinfo(L,what,ar,f,ci); -if(strchr(what,'f')){ -if(f==NULL)setnilvalue(L->top); -else setclvalue(L,L->top,f); -incr_top(L); -} -if(strchr(what,'L')) -collectvalidlines(L,f); -return status; -} -static int isinstack(CallInfo*ci,const TValue*o){ -StkId p; -for(p=ci->base;ptop;p++) -if(o==p)return 1; -return 0; -} -static void luaG_typeerror(lua_State*L,const TValue*o,const char*op){ -const char*name=NULL; -const char*t=luaT_typenames[ttype(o)]; -const char*kind=(isinstack(L->ci,o))? -NULL: -NULL; -if(kind) -luaG_runerror(L,"attempt to %s %s "LUA_QL("%s")" (a %s value)", -op,kind,name,t); -else -luaG_runerror(L,"attempt to %s a %s value",op,t); -} -static void luaG_concaterror(lua_State*L,StkId p1,StkId p2){ -if(ttisstring(p1)||ttisnumber(p1))p1=p2; -luaG_typeerror(L,p1,"concatenate"); -} -static void luaG_aritherror(lua_State*L,const TValue*p1,const TValue*p2){ -TValue temp; -if(luaV_tonumber(p1,&temp)==NULL) -p2=p1; -luaG_typeerror(L,p2,"perform arithmetic on"); -} -static int luaG_ordererror(lua_State*L,const TValue*p1,const TValue*p2){ -const char*t1=luaT_typenames[ttype(p1)]; -const char*t2=luaT_typenames[ttype(p2)]; -if(t1[2]==t2[2]) -luaG_runerror(L,"attempt to compare two %s values",t1); -else -luaG_runerror(L,"attempt to compare %s with %s",t1,t2); -return 0; -} -static void addinfo(lua_State*L,const char*msg){ -CallInfo*ci=L->ci; -if(isLua(ci)){ -char buff[60]; -int line=currentline(L,ci); -luaO_chunkid(buff,getstr(getluaproto(ci)->source),60); -luaO_pushfstring(L,"%s:%d: %s",buff,line,msg); -} -} -static void luaG_errormsg(lua_State*L){ -if(L->errfunc!=0){ -StkId errfunc=restorestack(L,L->errfunc); -if(!ttisfunction(errfunc))luaD_throw(L,5); -setobj(L,L->top,L->top-1); -setobj(L,L->top-1,errfunc); -incr_top(L); -luaD_call(L,L->top-2,1); -} -luaD_throw(L,2); -} -static void luaG_runerror(lua_State*L,const char*fmt,...){ -va_list argp; -va_start(argp,fmt); -addinfo(L,luaO_pushvfstring(L,fmt,argp)); -va_end(argp); -luaG_errormsg(L); -} -static int luaZ_fill(ZIO*z){ -size_t size; -lua_State*L=z->L; -const char*buff; -buff=z->reader(L,z->data,&size); -if(buff==NULL||size==0)return(-1); -z->n=size-1; -z->p=buff; -return char2int(*(z->p++)); -} -static void luaZ_init(lua_State*L,ZIO*z,lua_Reader reader,void*data){ -z->L=L; -z->reader=reader; -z->data=data; -z->n=0; -z->p=NULL; -} -static char*luaZ_openspace(lua_State*L,Mbuffer*buff,size_t n){ -if(n>buff->buffsize){ -if(n<32)n=32; -luaZ_resizebuffer(L,buff,n); -} -return buff->buffer; -} -#define opmode(t,a,b,c,m)(((t)<<7)|((a)<<6)|((b)<<4)|((c)<<2)|(m)) -static const lu_byte luaP_opmodes[(cast(int,OP_VARARG)+1)]={ -opmode(0,1,OpArgR,OpArgN,iABC) -,opmode(0,1,OpArgK,OpArgN,iABx) -,opmode(0,1,OpArgU,OpArgU,iABC) -,opmode(0,1,OpArgR,OpArgN,iABC) -,opmode(0,1,OpArgU,OpArgN,iABC) -,opmode(0,1,OpArgK,OpArgN,iABx) -,opmode(0,1,OpArgR,OpArgK,iABC) -,opmode(0,0,OpArgK,OpArgN,iABx) -,opmode(0,0,OpArgU,OpArgN,iABC) -,opmode(0,0,OpArgK,OpArgK,iABC) -,opmode(0,1,OpArgU,OpArgU,iABC) -,opmode(0,1,OpArgR,OpArgK,iABC) -,opmode(0,1,OpArgK,OpArgK,iABC) -,opmode(0,1,OpArgK,OpArgK,iABC) -,opmode(0,1,OpArgK,OpArgK,iABC) -,opmode(0,1,OpArgK,OpArgK,iABC) -,opmode(0,1,OpArgK,OpArgK,iABC) -,opmode(0,1,OpArgK,OpArgK,iABC) -,opmode(0,1,OpArgR,OpArgN,iABC) -,opmode(0,1,OpArgR,OpArgN,iABC) -,opmode(0,1,OpArgR,OpArgN,iABC) -,opmode(0,1,OpArgR,OpArgR,iABC) -,opmode(0,0,OpArgR,OpArgN,iAsBx) -,opmode(1,0,OpArgK,OpArgK,iABC) -,opmode(1,0,OpArgK,OpArgK,iABC) -,opmode(1,0,OpArgK,OpArgK,iABC) -,opmode(1,1,OpArgR,OpArgU,iABC) -,opmode(1,1,OpArgR,OpArgU,iABC) -,opmode(0,1,OpArgU,OpArgU,iABC) -,opmode(0,1,OpArgU,OpArgU,iABC) -,opmode(0,0,OpArgU,OpArgN,iABC) -,opmode(0,1,OpArgR,OpArgN,iAsBx) -,opmode(0,1,OpArgR,OpArgN,iAsBx) -,opmode(1,0,OpArgN,OpArgU,iABC) -,opmode(0,0,OpArgU,OpArgU,iABC) -,opmode(0,0,OpArgN,OpArgN,iABC) -,opmode(0,1,OpArgU,OpArgN,iABx) -,opmode(0,1,OpArgU,OpArgN,iABC) -}; -#define next(ls)(ls->current=zgetc(ls->z)) -#define currIsNewline(ls)(ls->current=='\n'||ls->current=='\r') -static const char*const luaX_tokens[]={ -"and","break","do","else","elseif", -"end","false","for","function","if", -"in","local","nil","not","or","repeat", -"return","then","true","until","while", -"..","...","==",">=","<=","~=", -"","","","", -NULL -}; -#define save_and_next(ls)(save(ls,ls->current),next(ls)) -static void save(LexState*ls,int c){ -Mbuffer*b=ls->buff; -if(b->n+1>b->buffsize){ -size_t newsize; -if(b->buffsize>=((size_t)(~(size_t)0)-2)/2) -luaX_lexerror(ls,"lexical element too long",0); -newsize=b->buffsize*2; -luaZ_resizebuffer(ls->L,b,newsize); -} -b->buffer[b->n++]=cast(char,c); -} -static void luaX_init(lua_State*L){ -int i; -for(i=0;i<(cast(int,TK_WHILE-257+1));i++){ -TString*ts=luaS_new(L,luaX_tokens[i]); -luaS_fix(ts); -ts->tsv.reserved=cast_byte(i+1); -} -} -static const char*luaX_token2str(LexState*ls,int token){ -if(token<257){ -return(iscntrl(token))?luaO_pushfstring(ls->L,"char(%d)",token): -luaO_pushfstring(ls->L,"%c",token); -} -else -return luaX_tokens[token-257]; -} -static const char*txtToken(LexState*ls,int token){ -switch(token){ -case TK_NAME: -case TK_STRING: -case TK_NUMBER: -save(ls,'\0'); -return luaZ_buffer(ls->buff); -default: -return luaX_token2str(ls,token); -} -} -static void luaX_lexerror(LexState*ls,const char*msg,int token){ -char buff[80]; -luaO_chunkid(buff,getstr(ls->source),80); -msg=luaO_pushfstring(ls->L,"%s:%d: %s",buff,ls->linenumber,msg); -if(token) -luaO_pushfstring(ls->L,"%s near "LUA_QL("%s"),msg,txtToken(ls,token)); -luaD_throw(ls->L,3); -} -static void luaX_syntaxerror(LexState*ls,const char*msg){ -luaX_lexerror(ls,msg,ls->t.token); -} -static TString*luaX_newstring(LexState*ls,const char*str,size_t l){ -lua_State*L=ls->L; -TString*ts=luaS_newlstr(L,str,l); -TValue*o=luaH_setstr(L,ls->fs->h,ts); -if(ttisnil(o)){ -setbvalue(o,1); -luaC_checkGC(L); -} -return ts; -} -static void inclinenumber(LexState*ls){ -int old=ls->current; -next(ls); -if(currIsNewline(ls)&&ls->current!=old) -next(ls); -if(++ls->linenumber>=(INT_MAX-2)) -luaX_syntaxerror(ls,"chunk has too many lines"); -} -static void luaX_setinput(lua_State*L,LexState*ls,ZIO*z,TString*source){ -ls->decpoint='.'; -ls->L=L; -ls->lookahead.token=TK_EOS; -ls->z=z; -ls->fs=NULL; -ls->linenumber=1; -ls->lastline=1; -ls->source=source; -luaZ_resizebuffer(ls->L,ls->buff,32); -next(ls); -} -static int check_next(LexState*ls,const char*set){ -if(!strchr(set,ls->current)) -return 0; -save_and_next(ls); -return 1; -} -static void buffreplace(LexState*ls,char from,char to){ -size_t n=luaZ_bufflen(ls->buff); -char*p=luaZ_buffer(ls->buff); -while(n--) -if(p[n]==from)p[n]=to; -} -static void read_numeral(LexState*ls,SemInfo*seminfo){ -do{ -save_and_next(ls); -}while(isdigit(ls->current)||ls->current=='.'); -if(check_next(ls,"Ee")) -check_next(ls,"+-"); -while(isalnum(ls->current)||ls->current=='_') -save_and_next(ls); -save(ls,'\0'); -buffreplace(ls,'.',ls->decpoint); -if(!luaO_str2d(luaZ_buffer(ls->buff),&seminfo->r)) -luaX_lexerror(ls,"malformed number",TK_NUMBER); -} -static int skip_sep(LexState*ls){ -int count=0; -int s=ls->current; -save_and_next(ls); -while(ls->current=='='){ -save_and_next(ls); -count++; -} -return(ls->current==s)?count:(-count)-1; -} -static void read_long_string(LexState*ls,SemInfo*seminfo,int sep){ -int cont=0; -(void)(cont); -save_and_next(ls); -if(currIsNewline(ls)) -inclinenumber(ls); -for(;;){ -switch(ls->current){ -case(-1): -luaX_lexerror(ls,(seminfo)?"unfinished long string": -"unfinished long comment",TK_EOS); -break; -case']':{ -if(skip_sep(ls)==sep){ -save_and_next(ls); -goto endloop; -} -break; -} -case'\n': -case'\r':{ -save(ls,'\n'); -inclinenumber(ls); -if(!seminfo)luaZ_resetbuffer(ls->buff); -break; -} -default:{ -if(seminfo)save_and_next(ls); -else next(ls); -} -} -}endloop: -if(seminfo) -seminfo->ts=luaX_newstring(ls,luaZ_buffer(ls->buff)+(2+sep), -luaZ_bufflen(ls->buff)-2*(2+sep)); -} -static void read_string(LexState*ls,int del,SemInfo*seminfo){ -save_and_next(ls); -while(ls->current!=del){ -switch(ls->current){ -case(-1): -luaX_lexerror(ls,"unfinished string",TK_EOS); -continue; -case'\n': -case'\r': -luaX_lexerror(ls,"unfinished string",TK_STRING); -continue; -case'\\':{ -int c; -next(ls); -switch(ls->current){ -case'a':c='\a';break; -case'b':c='\b';break; -case'f':c='\f';break; -case'n':c='\n';break; -case'r':c='\r';break; -case't':c='\t';break; -case'v':c='\v';break; -case'\n': -case'\r':save(ls,'\n');inclinenumber(ls);continue; -case(-1):continue; -default:{ -if(!isdigit(ls->current)) -save_and_next(ls); -else{ -int i=0; -c=0; -do{ -c=10*c+(ls->current-'0'); -next(ls); -}while(++i<3&&isdigit(ls->current)); -if(c>UCHAR_MAX) -luaX_lexerror(ls,"escape sequence too large",TK_STRING); -save(ls,c); -} -continue; -} -} -save(ls,c); -next(ls); -continue; -} -default: -save_and_next(ls); -} -} -save_and_next(ls); -seminfo->ts=luaX_newstring(ls,luaZ_buffer(ls->buff)+1, -luaZ_bufflen(ls->buff)-2); -} -static int llex(LexState*ls,SemInfo*seminfo){ -luaZ_resetbuffer(ls->buff); -for(;;){ -switch(ls->current){ -case'\n': -case'\r':{ -inclinenumber(ls); -continue; -} -case'-':{ -next(ls); -if(ls->current!='-')return'-'; -next(ls); -if(ls->current=='['){ -int sep=skip_sep(ls); -luaZ_resetbuffer(ls->buff); -if(sep>=0){ -read_long_string(ls,NULL,sep); -luaZ_resetbuffer(ls->buff); -continue; -} -} -while(!currIsNewline(ls)&&ls->current!=(-1)) -next(ls); -continue; -} -case'[':{ -int sep=skip_sep(ls); -if(sep>=0){ -read_long_string(ls,seminfo,sep); -return TK_STRING; -} -else if(sep==-1)return'['; -else luaX_lexerror(ls,"invalid long string delimiter",TK_STRING); -} -case'=':{ -next(ls); -if(ls->current!='=')return'='; -else{next(ls);return TK_EQ;} -} -case'<':{ -next(ls); -if(ls->current!='=')return'<'; -else{next(ls);return TK_LE;} -} -case'>':{ -next(ls); -if(ls->current!='=')return'>'; -else{next(ls);return TK_GE;} -} -case'~':{ -next(ls); -if(ls->current!='=')return'~'; -else{next(ls);return TK_NE;} -} -case'"': -case'\'':{ -read_string(ls,ls->current,seminfo); -return TK_STRING; -} -case'.':{ -save_and_next(ls); -if(check_next(ls,".")){ -if(check_next(ls,".")) -return TK_DOTS; -else return TK_CONCAT; -} -else if(!isdigit(ls->current))return'.'; -else{ -read_numeral(ls,seminfo); -return TK_NUMBER; -} -} -case(-1):{ -return TK_EOS; -} -default:{ -if(isspace(ls->current)){ -next(ls); -continue; -} -else if(isdigit(ls->current)){ -read_numeral(ls,seminfo); -return TK_NUMBER; -} -else if(isalpha(ls->current)||ls->current=='_'){ -TString*ts; -do{ -save_and_next(ls); -}while(isalnum(ls->current)||ls->current=='_'); -ts=luaX_newstring(ls,luaZ_buffer(ls->buff), -luaZ_bufflen(ls->buff)); -if(ts->tsv.reserved>0) -return ts->tsv.reserved-1+257; -else{ -seminfo->ts=ts; -return TK_NAME; -} -} -else{ -int c=ls->current; -next(ls); -return c; -} -} -} -} -} -static void luaX_next(LexState*ls){ -ls->lastline=ls->linenumber; -if(ls->lookahead.token!=TK_EOS){ -ls->t=ls->lookahead; -ls->lookahead.token=TK_EOS; -} -else -ls->t.token=llex(ls,&ls->t.seminfo); -} -static void luaX_lookahead(LexState*ls){ -ls->lookahead.token=llex(ls,&ls->lookahead.seminfo); -} -#define hasjumps(e)((e)->t!=(e)->f) -static int isnumeral(expdesc*e){ -return(e->k==VKNUM&&e->t==(-1)&&e->f==(-1)); -} -static void luaK_nil(FuncState*fs,int from,int n){ -Instruction*previous; -if(fs->pc>fs->lasttarget){ -if(fs->pc==0){ -if(from>=fs->nactvar) -return; -} -else{ -previous=&fs->f->code[fs->pc-1]; -if(GET_OPCODE(*previous)==OP_LOADNIL){ -int pfrom=GETARG_A(*previous); -int pto=GETARG_B(*previous); -if(pfrom<=from&&from<=pto+1){ -if(from+n-1>pto) -SETARG_B(*previous,from+n-1); -return; -} -} -} -} -luaK_codeABC(fs,OP_LOADNIL,from,from+n-1,0); -} -static int luaK_jump(FuncState*fs){ -int jpc=fs->jpc; -int j; -fs->jpc=(-1); -j=luaK_codeAsBx(fs,OP_JMP,0,(-1)); -luaK_concat(fs,&j,jpc); -return j; -} -static void luaK_ret(FuncState*fs,int first,int nret){ -luaK_codeABC(fs,OP_RETURN,first,nret+1,0); -} -static int condjump(FuncState*fs,OpCode op,int A,int B,int C){ -luaK_codeABC(fs,op,A,B,C); -return luaK_jump(fs); -} -static void fixjump(FuncState*fs,int pc,int dest){ -Instruction*jmp=&fs->f->code[pc]; -int offset=dest-(pc+1); -if(abs(offset)>(((1<<(9+9))-1)>>1)) -luaX_syntaxerror(fs->ls,"control structure too long"); -SETARG_sBx(*jmp,offset); -} -static int luaK_getlabel(FuncState*fs){ -fs->lasttarget=fs->pc; -return fs->pc; -} -static int getjump(FuncState*fs,int pc){ -int offset=GETARG_sBx(fs->f->code[pc]); -if(offset==(-1)) -return(-1); -else -return(pc+1)+offset; -} -static Instruction*getjumpcontrol(FuncState*fs,int pc){ -Instruction*pi=&fs->f->code[pc]; -if(pc>=1&&testTMode(GET_OPCODE(*(pi-1)))) -return pi-1; -else -return pi; -} -static int need_value(FuncState*fs,int list){ -for(;list!=(-1);list=getjump(fs,list)){ -Instruction i=*getjumpcontrol(fs,list); -if(GET_OPCODE(i)!=OP_TESTSET)return 1; -} -return 0; -} -static int patchtestreg(FuncState*fs,int node,int reg){ -Instruction*i=getjumpcontrol(fs,node); -if(GET_OPCODE(*i)!=OP_TESTSET) -return 0; -if(reg!=((1<<8)-1)&®!=GETARG_B(*i)) -SETARG_A(*i,reg); -else -*i=CREATE_ABC(OP_TEST,GETARG_B(*i),0,GETARG_C(*i)); -return 1; -} -static void removevalues(FuncState*fs,int list){ -for(;list!=(-1);list=getjump(fs,list)) -patchtestreg(fs,list,((1<<8)-1)); -} -static void patchlistaux(FuncState*fs,int list,int vtarget,int reg, -int dtarget){ -while(list!=(-1)){ -int next=getjump(fs,list); -if(patchtestreg(fs,list,reg)) -fixjump(fs,list,vtarget); -else -fixjump(fs,list,dtarget); -list=next; -} -} -static void dischargejpc(FuncState*fs){ -patchlistaux(fs,fs->jpc,fs->pc,((1<<8)-1),fs->pc); -fs->jpc=(-1); -} -static void luaK_patchlist(FuncState*fs,int list,int target){ -if(target==fs->pc) -luaK_patchtohere(fs,list); -else{ -patchlistaux(fs,list,target,((1<<8)-1),target); -} -} -static void luaK_patchtohere(FuncState*fs,int list){ -luaK_getlabel(fs); -luaK_concat(fs,&fs->jpc,list); -} -static void luaK_concat(FuncState*fs,int*l1,int l2){ -if(l2==(-1))return; -else if(*l1==(-1)) -*l1=l2; -else{ -int list=*l1; -int next; -while((next=getjump(fs,list))!=(-1)) -list=next; -fixjump(fs,list,l2); -} -} -static void luaK_checkstack(FuncState*fs,int n){ -int newstack=fs->freereg+n; -if(newstack>fs->f->maxstacksize){ -if(newstack>=250) -luaX_syntaxerror(fs->ls,"function or expression too complex"); -fs->f->maxstacksize=cast_byte(newstack); -} -} -static void luaK_reserveregs(FuncState*fs,int n){ -luaK_checkstack(fs,n); -fs->freereg+=n; -} -static void freereg(FuncState*fs,int reg){ -if(!ISK(reg)&®>=fs->nactvar){ -fs->freereg--; -} -} -static void freeexp(FuncState*fs,expdesc*e){ -if(e->k==VNONRELOC) -freereg(fs,e->u.s.info); -} -static int addk(FuncState*fs,TValue*k,TValue*v){ -lua_State*L=fs->L; -TValue*idx=luaH_set(L,fs->h,k); -Proto*f=fs->f; -int oldsize=f->sizek; -if(ttisnumber(idx)){ -return cast_int(nvalue(idx)); -} -else{ -setnvalue(idx,cast_num(fs->nk)); -luaM_growvector(L,f->k,fs->nk,f->sizek,TValue, -((1<<(9+9))-1),"constant table overflow"); -while(oldsizesizek)setnilvalue(&f->k[oldsize++]); -setobj(L,&f->k[fs->nk],v); -luaC_barrier(L,f,v); -return fs->nk++; -} -} -static int luaK_stringK(FuncState*fs,TString*s){ -TValue o; -setsvalue(fs->L,&o,s); -return addk(fs,&o,&o); -} -static int luaK_numberK(FuncState*fs,lua_Number r){ -TValue o; -setnvalue(&o,r); -return addk(fs,&o,&o); -} -static int boolK(FuncState*fs,int b){ -TValue o; -setbvalue(&o,b); -return addk(fs,&o,&o); -} -static int nilK(FuncState*fs){ -TValue k,v; -setnilvalue(&v); -sethvalue(fs->L,&k,fs->h); -return addk(fs,&k,&v); -} -static void luaK_setreturns(FuncState*fs,expdesc*e,int nresults){ -if(e->k==VCALL){ -SETARG_C(getcode(fs,e),nresults+1); -} -else if(e->k==VVARARG){ -SETARG_B(getcode(fs,e),nresults+1); -SETARG_A(getcode(fs,e),fs->freereg); -luaK_reserveregs(fs,1); -} -} -static void luaK_setoneret(FuncState*fs,expdesc*e){ -if(e->k==VCALL){ -e->k=VNONRELOC; -e->u.s.info=GETARG_A(getcode(fs,e)); -} -else if(e->k==VVARARG){ -SETARG_B(getcode(fs,e),2); -e->k=VRELOCABLE; -} -} -static void luaK_dischargevars(FuncState*fs,expdesc*e){ -switch(e->k){ -case VLOCAL:{ -e->k=VNONRELOC; -break; -} -case VUPVAL:{ -e->u.s.info=luaK_codeABC(fs,OP_GETUPVAL,0,e->u.s.info,0); -e->k=VRELOCABLE; -break; -} -case VGLOBAL:{ -e->u.s.info=luaK_codeABx(fs,OP_GETGLOBAL,0,e->u.s.info); -e->k=VRELOCABLE; -break; -} -case VINDEXED:{ -freereg(fs,e->u.s.aux); -freereg(fs,e->u.s.info); -e->u.s.info=luaK_codeABC(fs,OP_GETTABLE,0,e->u.s.info,e->u.s.aux); -e->k=VRELOCABLE; -break; -} -case VVARARG: -case VCALL:{ -luaK_setoneret(fs,e); -break; -} -default:break; -} -} -static int code_label(FuncState*fs,int A,int b,int jump){ -luaK_getlabel(fs); -return luaK_codeABC(fs,OP_LOADBOOL,A,b,jump); -} -static void discharge2reg(FuncState*fs,expdesc*e,int reg){ -luaK_dischargevars(fs,e); -switch(e->k){ -case VNIL:{ -luaK_nil(fs,reg,1); -break; -} -case VFALSE:case VTRUE:{ -luaK_codeABC(fs,OP_LOADBOOL,reg,e->k==VTRUE,0); -break; -} -case VK:{ -luaK_codeABx(fs,OP_LOADK,reg,e->u.s.info); -break; -} -case VKNUM:{ -luaK_codeABx(fs,OP_LOADK,reg,luaK_numberK(fs,e->u.nval)); -break; -} -case VRELOCABLE:{ -Instruction*pc=&getcode(fs,e); -SETARG_A(*pc,reg); -break; -} -case VNONRELOC:{ -if(reg!=e->u.s.info) -luaK_codeABC(fs,OP_MOVE,reg,e->u.s.info,0); -break; -} -default:{ -return; -} -} -e->u.s.info=reg; -e->k=VNONRELOC; -} -static void discharge2anyreg(FuncState*fs,expdesc*e){ -if(e->k!=VNONRELOC){ -luaK_reserveregs(fs,1); -discharge2reg(fs,e,fs->freereg-1); -} -} -static void exp2reg(FuncState*fs,expdesc*e,int reg){ -discharge2reg(fs,e,reg); -if(e->k==VJMP) -luaK_concat(fs,&e->t,e->u.s.info); -if(hasjumps(e)){ -int final; -int p_f=(-1); -int p_t=(-1); -if(need_value(fs,e->t)||need_value(fs,e->f)){ -int fj=(e->k==VJMP)?(-1):luaK_jump(fs); -p_f=code_label(fs,reg,0,1); -p_t=code_label(fs,reg,1,0); -luaK_patchtohere(fs,fj); -} -final=luaK_getlabel(fs); -patchlistaux(fs,e->f,final,reg,p_f); -patchlistaux(fs,e->t,final,reg,p_t); -} -e->f=e->t=(-1); -e->u.s.info=reg; -e->k=VNONRELOC; -} -static void luaK_exp2nextreg(FuncState*fs,expdesc*e){ -luaK_dischargevars(fs,e); -freeexp(fs,e); -luaK_reserveregs(fs,1); -exp2reg(fs,e,fs->freereg-1); -} -static int luaK_exp2anyreg(FuncState*fs,expdesc*e){ -luaK_dischargevars(fs,e); -if(e->k==VNONRELOC){ -if(!hasjumps(e))return e->u.s.info; -if(e->u.s.info>=fs->nactvar){ -exp2reg(fs,e,e->u.s.info); -return e->u.s.info; -} -} -luaK_exp2nextreg(fs,e); -return e->u.s.info; -} -static void luaK_exp2val(FuncState*fs,expdesc*e){ -if(hasjumps(e)) -luaK_exp2anyreg(fs,e); -else -luaK_dischargevars(fs,e); -} -static int luaK_exp2RK(FuncState*fs,expdesc*e){ -luaK_exp2val(fs,e); -switch(e->k){ -case VKNUM: -case VTRUE: -case VFALSE: -case VNIL:{ -if(fs->nk<=((1<<(9-1))-1)){ -e->u.s.info=(e->k==VNIL)?nilK(fs): -(e->k==VKNUM)?luaK_numberK(fs,e->u.nval): -boolK(fs,(e->k==VTRUE)); -e->k=VK; -return RKASK(e->u.s.info); -} -else break; -} -case VK:{ -if(e->u.s.info<=((1<<(9-1))-1)) -return RKASK(e->u.s.info); -else break; -} -default:break; -} -return luaK_exp2anyreg(fs,e); -} -static void luaK_storevar(FuncState*fs,expdesc*var,expdesc*ex){ -switch(var->k){ -case VLOCAL:{ -freeexp(fs,ex); -exp2reg(fs,ex,var->u.s.info); -return; -} -case VUPVAL:{ -int e=luaK_exp2anyreg(fs,ex); -luaK_codeABC(fs,OP_SETUPVAL,e,var->u.s.info,0); -break; -} -case VGLOBAL:{ -int e=luaK_exp2anyreg(fs,ex); -luaK_codeABx(fs,OP_SETGLOBAL,e,var->u.s.info); -break; -} -case VINDEXED:{ -int e=luaK_exp2RK(fs,ex); -luaK_codeABC(fs,OP_SETTABLE,var->u.s.info,var->u.s.aux,e); -break; -} -default:{ -break; -} -} -freeexp(fs,ex); -} -static void luaK_self(FuncState*fs,expdesc*e,expdesc*key){ -int func; -luaK_exp2anyreg(fs,e); -freeexp(fs,e); -func=fs->freereg; -luaK_reserveregs(fs,2); -luaK_codeABC(fs,OP_SELF,func,e->u.s.info,luaK_exp2RK(fs,key)); -freeexp(fs,key); -e->u.s.info=func; -e->k=VNONRELOC; -} -static void invertjump(FuncState*fs,expdesc*e){ -Instruction*pc=getjumpcontrol(fs,e->u.s.info); -SETARG_A(*pc,!(GETARG_A(*pc))); -} -static int jumponcond(FuncState*fs,expdesc*e,int cond){ -if(e->k==VRELOCABLE){ -Instruction ie=getcode(fs,e); -if(GET_OPCODE(ie)==OP_NOT){ -fs->pc--; -return condjump(fs,OP_TEST,GETARG_B(ie),0,!cond); -} -} -discharge2anyreg(fs,e); -freeexp(fs,e); -return condjump(fs,OP_TESTSET,((1<<8)-1),e->u.s.info,cond); -} -static void luaK_goiftrue(FuncState*fs,expdesc*e){ -int pc; -luaK_dischargevars(fs,e); -switch(e->k){ -case VK:case VKNUM:case VTRUE:{ -pc=(-1); -break; -} -case VJMP:{ -invertjump(fs,e); -pc=e->u.s.info; -break; -} -default:{ -pc=jumponcond(fs,e,0); -break; -} -} -luaK_concat(fs,&e->f,pc); -luaK_patchtohere(fs,e->t); -e->t=(-1); -} -static void luaK_goiffalse(FuncState*fs,expdesc*e){ -int pc; -luaK_dischargevars(fs,e); -switch(e->k){ -case VNIL:case VFALSE:{ -pc=(-1); -break; -} -case VJMP:{ -pc=e->u.s.info; -break; -} -default:{ -pc=jumponcond(fs,e,1); -break; -} -} -luaK_concat(fs,&e->t,pc); -luaK_patchtohere(fs,e->f); -e->f=(-1); -} -static void codenot(FuncState*fs,expdesc*e){ -luaK_dischargevars(fs,e); -switch(e->k){ -case VNIL:case VFALSE:{ -e->k=VTRUE; -break; -} -case VK:case VKNUM:case VTRUE:{ -e->k=VFALSE; -break; -} -case VJMP:{ -invertjump(fs,e); -break; -} -case VRELOCABLE: -case VNONRELOC:{ -discharge2anyreg(fs,e); -freeexp(fs,e); -e->u.s.info=luaK_codeABC(fs,OP_NOT,0,e->u.s.info,0); -e->k=VRELOCABLE; -break; -} -default:{ -break; -} -} -{int temp=e->f;e->f=e->t;e->t=temp;} -removevalues(fs,e->f); -removevalues(fs,e->t); -} -static void luaK_indexed(FuncState*fs,expdesc*t,expdesc*k){ -t->u.s.aux=luaK_exp2RK(fs,k); -t->k=VINDEXED; -} -static int constfolding(OpCode op,expdesc*e1,expdesc*e2){ -lua_Number v1,v2,r; -if(!isnumeral(e1)||!isnumeral(e2))return 0; -v1=e1->u.nval; -v2=e2->u.nval; -switch(op){ -case OP_ADD:r=luai_numadd(v1,v2);break; -case OP_SUB:r=luai_numsub(v1,v2);break; -case OP_MUL:r=luai_nummul(v1,v2);break; -case OP_DIV: -if(v2==0)return 0; -r=luai_numdiv(v1,v2);break; -case OP_MOD: -if(v2==0)return 0; -r=luai_nummod(v1,v2);break; -case OP_POW:r=luai_numpow(v1,v2);break; -case OP_UNM:r=luai_numunm(v1);break; -case OP_LEN:return 0; -default:r=0;break; -} -if(luai_numisnan(r))return 0; -e1->u.nval=r; -return 1; -} -static void codearith(FuncState*fs,OpCode op,expdesc*e1,expdesc*e2){ -if(constfolding(op,e1,e2)) -return; -else{ -int o2=(op!=OP_UNM&&op!=OP_LEN)?luaK_exp2RK(fs,e2):0; -int o1=luaK_exp2RK(fs,e1); -if(o1>o2){ -freeexp(fs,e1); -freeexp(fs,e2); -} -else{ -freeexp(fs,e2); -freeexp(fs,e1); -} -e1->u.s.info=luaK_codeABC(fs,op,0,o1,o2); -e1->k=VRELOCABLE; -} -} -static void codecomp(FuncState*fs,OpCode op,int cond,expdesc*e1, -expdesc*e2){ -int o1=luaK_exp2RK(fs,e1); -int o2=luaK_exp2RK(fs,e2); -freeexp(fs,e2); -freeexp(fs,e1); -if(cond==0&&op!=OP_EQ){ -int temp; -temp=o1;o1=o2;o2=temp; -cond=1; -} -e1->u.s.info=condjump(fs,op,cond,o1,o2); -e1->k=VJMP; -} -static void luaK_prefix(FuncState*fs,UnOpr op,expdesc*e){ -expdesc e2; -e2.t=e2.f=(-1);e2.k=VKNUM;e2.u.nval=0; -switch(op){ -case OPR_MINUS:{ -if(!isnumeral(e)) -luaK_exp2anyreg(fs,e); -codearith(fs,OP_UNM,e,&e2); -break; -} -case OPR_NOT:codenot(fs,e);break; -case OPR_LEN:{ -luaK_exp2anyreg(fs,e); -codearith(fs,OP_LEN,e,&e2); -break; -} -default:; -} -} -static void luaK_infix(FuncState*fs,BinOpr op,expdesc*v){ -switch(op){ -case OPR_AND:{ -luaK_goiftrue(fs,v); -break; -} -case OPR_OR:{ -luaK_goiffalse(fs,v); -break; -} -case OPR_CONCAT:{ -luaK_exp2nextreg(fs,v); -break; -} -case OPR_ADD:case OPR_SUB:case OPR_MUL:case OPR_DIV: -case OPR_MOD:case OPR_POW:{ -if(!isnumeral(v))luaK_exp2RK(fs,v); -break; -} -default:{ -luaK_exp2RK(fs,v); -break; -} -} -} -static void luaK_posfix(FuncState*fs,BinOpr op,expdesc*e1,expdesc*e2){ -switch(op){ -case OPR_AND:{ -luaK_dischargevars(fs,e2); -luaK_concat(fs,&e2->f,e1->f); -*e1=*e2; -break; -} -case OPR_OR:{ -luaK_dischargevars(fs,e2); -luaK_concat(fs,&e2->t,e1->t); -*e1=*e2; -break; -} -case OPR_CONCAT:{ -luaK_exp2val(fs,e2); -if(e2->k==VRELOCABLE&&GET_OPCODE(getcode(fs,e2))==OP_CONCAT){ -freeexp(fs,e1); -SETARG_B(getcode(fs,e2),e1->u.s.info); -e1->k=VRELOCABLE;e1->u.s.info=e2->u.s.info; -} -else{ -luaK_exp2nextreg(fs,e2); -codearith(fs,OP_CONCAT,e1,e2); -} -break; -} -case OPR_ADD:codearith(fs,OP_ADD,e1,e2);break; -case OPR_SUB:codearith(fs,OP_SUB,e1,e2);break; -case OPR_MUL:codearith(fs,OP_MUL,e1,e2);break; -case OPR_DIV:codearith(fs,OP_DIV,e1,e2);break; -case OPR_MOD:codearith(fs,OP_MOD,e1,e2);break; -case OPR_POW:codearith(fs,OP_POW,e1,e2);break; -case OPR_EQ:codecomp(fs,OP_EQ,1,e1,e2);break; -case OPR_NE:codecomp(fs,OP_EQ,0,e1,e2);break; -case OPR_LT:codecomp(fs,OP_LT,1,e1,e2);break; -case OPR_LE:codecomp(fs,OP_LE,1,e1,e2);break; -case OPR_GT:codecomp(fs,OP_LT,0,e1,e2);break; -case OPR_GE:codecomp(fs,OP_LE,0,e1,e2);break; -default:; -} -} -static void luaK_fixline(FuncState*fs,int line){ -fs->f->lineinfo[fs->pc-1]=line; -} -static int luaK_code(FuncState*fs,Instruction i,int line){ -Proto*f=fs->f; -dischargejpc(fs); -luaM_growvector(fs->L,f->code,fs->pc,f->sizecode,Instruction, -(INT_MAX-2),"code size overflow"); -f->code[fs->pc]=i; -luaM_growvector(fs->L,f->lineinfo,fs->pc,f->sizelineinfo,int, -(INT_MAX-2),"code size overflow"); -f->lineinfo[fs->pc]=line; -return fs->pc++; -} -static int luaK_codeABC(FuncState*fs,OpCode o,int a,int b,int c){ -return luaK_code(fs,CREATE_ABC(o,a,b,c),fs->ls->lastline); -} -static int luaK_codeABx(FuncState*fs,OpCode o,int a,unsigned int bc){ -return luaK_code(fs,CREATE_ABx(o,a,bc),fs->ls->lastline); -} -static void luaK_setlist(FuncState*fs,int base,int nelems,int tostore){ -int c=(nelems-1)/50+1; -int b=(tostore==(-1))?0:tostore; -if(c<=((1<<9)-1)) -luaK_codeABC(fs,OP_SETLIST,base,b,c); -else{ -luaK_codeABC(fs,OP_SETLIST,base,b,0); -luaK_code(fs,cast(Instruction,c),fs->ls->lastline); -} -fs->freereg=base+1; -} -#define hasmultret(k)((k)==VCALL||(k)==VVARARG) -#define getlocvar(fs,i)((fs)->f->locvars[(fs)->actvar[i]]) -#define luaY_checklimit(fs,v,l,m)if((v)>(l))errorlimit(fs,l,m) -typedef struct BlockCnt{ -struct BlockCnt*previous; -int breaklist; -lu_byte nactvar; -lu_byte upval; -lu_byte isbreakable; -}BlockCnt; -static void chunk(LexState*ls); -static void expr(LexState*ls,expdesc*v); -static void anchor_token(LexState*ls){ -if(ls->t.token==TK_NAME||ls->t.token==TK_STRING){ -TString*ts=ls->t.seminfo.ts; -luaX_newstring(ls,getstr(ts),ts->tsv.len); -} -} -static void error_expected(LexState*ls,int token){ -luaX_syntaxerror(ls, -luaO_pushfstring(ls->L,LUA_QL("%s")" expected",luaX_token2str(ls,token))); -} -static void errorlimit(FuncState*fs,int limit,const char*what){ -const char*msg=(fs->f->linedefined==0)? -luaO_pushfstring(fs->L,"main function has more than %d %s",limit,what): -luaO_pushfstring(fs->L,"function at line %d has more than %d %s", -fs->f->linedefined,limit,what); -luaX_lexerror(fs->ls,msg,0); -} -static int testnext(LexState*ls,int c){ -if(ls->t.token==c){ -luaX_next(ls); -return 1; -} -else return 0; -} -static void check(LexState*ls,int c){ -if(ls->t.token!=c) -error_expected(ls,c); -} -static void checknext(LexState*ls,int c){ -check(ls,c); -luaX_next(ls); -} -#define check_condition(ls,c,msg){if(!(c))luaX_syntaxerror(ls,msg);} -static void check_match(LexState*ls,int what,int who,int where){ -if(!testnext(ls,what)){ -if(where==ls->linenumber) -error_expected(ls,what); -else{ -luaX_syntaxerror(ls,luaO_pushfstring(ls->L, -LUA_QL("%s")" expected (to close "LUA_QL("%s")" at line %d)", -luaX_token2str(ls,what),luaX_token2str(ls,who),where)); -} -} -} -static TString*str_checkname(LexState*ls){ -TString*ts; -check(ls,TK_NAME); -ts=ls->t.seminfo.ts; -luaX_next(ls); -return ts; -} -static void init_exp(expdesc*e,expkind k,int i){ -e->f=e->t=(-1); -e->k=k; -e->u.s.info=i; -} -static void codestring(LexState*ls,expdesc*e,TString*s){ -init_exp(e,VK,luaK_stringK(ls->fs,s)); -} -static void checkname(LexState*ls,expdesc*e){ -codestring(ls,e,str_checkname(ls)); -} -static int registerlocalvar(LexState*ls,TString*varname){ -FuncState*fs=ls->fs; -Proto*f=fs->f; -int oldsize=f->sizelocvars; -luaM_growvector(ls->L,f->locvars,fs->nlocvars,f->sizelocvars, -LocVar,SHRT_MAX,"too many local variables"); -while(oldsizesizelocvars)f->locvars[oldsize++].varname=NULL; -f->locvars[fs->nlocvars].varname=varname; -luaC_objbarrier(ls->L,f,varname); -return fs->nlocvars++; -} -#define new_localvarliteral(ls,v,n)new_localvar(ls,luaX_newstring(ls,""v,(sizeof(v)/sizeof(char))-1),n) -static void new_localvar(LexState*ls,TString*name,int n){ -FuncState*fs=ls->fs; -luaY_checklimit(fs,fs->nactvar+n+1,200,"local variables"); -fs->actvar[fs->nactvar+n]=cast(unsigned short,registerlocalvar(ls,name)); -} -static void adjustlocalvars(LexState*ls,int nvars){ -FuncState*fs=ls->fs; -fs->nactvar=cast_byte(fs->nactvar+nvars); -for(;nvars;nvars--){ -getlocvar(fs,fs->nactvar-nvars).startpc=fs->pc; -} -} -static void removevars(LexState*ls,int tolevel){ -FuncState*fs=ls->fs; -while(fs->nactvar>tolevel) -getlocvar(fs,--fs->nactvar).endpc=fs->pc; -} -static int indexupvalue(FuncState*fs,TString*name,expdesc*v){ -int i; -Proto*f=fs->f; -int oldsize=f->sizeupvalues; -for(i=0;inups;i++){ -if(fs->upvalues[i].k==v->k&&fs->upvalues[i].info==v->u.s.info){ -return i; -} -} -luaY_checklimit(fs,f->nups+1,60,"upvalues"); -luaM_growvector(fs->L,f->upvalues,f->nups,f->sizeupvalues, -TString*,(INT_MAX-2),""); -while(oldsizesizeupvalues)f->upvalues[oldsize++]=NULL; -f->upvalues[f->nups]=name; -luaC_objbarrier(fs->L,f,name); -fs->upvalues[f->nups].k=cast_byte(v->k); -fs->upvalues[f->nups].info=cast_byte(v->u.s.info); -return f->nups++; -} -static int searchvar(FuncState*fs,TString*n){ -int i; -for(i=fs->nactvar-1;i>=0;i--){ -if(n==getlocvar(fs,i).varname) -return i; -} -return-1; -} -static void markupval(FuncState*fs,int level){ -BlockCnt*bl=fs->bl; -while(bl&&bl->nactvar>level)bl=bl->previous; -if(bl)bl->upval=1; -} -static int singlevaraux(FuncState*fs,TString*n,expdesc*var,int base){ -if(fs==NULL){ -init_exp(var,VGLOBAL,((1<<8)-1)); -return VGLOBAL; -} -else{ -int v=searchvar(fs,n); -if(v>=0){ -init_exp(var,VLOCAL,v); -if(!base) -markupval(fs,v); -return VLOCAL; -} -else{ -if(singlevaraux(fs->prev,n,var,0)==VGLOBAL) -return VGLOBAL; -var->u.s.info=indexupvalue(fs,n,var); -var->k=VUPVAL; -return VUPVAL; -} -} -} -static void singlevar(LexState*ls,expdesc*var){ -TString*varname=str_checkname(ls); -FuncState*fs=ls->fs; -if(singlevaraux(fs,varname,var,1)==VGLOBAL) -var->u.s.info=luaK_stringK(fs,varname); -} -static void adjust_assign(LexState*ls,int nvars,int nexps,expdesc*e){ -FuncState*fs=ls->fs; -int extra=nvars-nexps; -if(hasmultret(e->k)){ -extra++; -if(extra<0)extra=0; -luaK_setreturns(fs,e,extra); -if(extra>1)luaK_reserveregs(fs,extra-1); -} -else{ -if(e->k!=VVOID)luaK_exp2nextreg(fs,e); -if(extra>0){ -int reg=fs->freereg; -luaK_reserveregs(fs,extra); -luaK_nil(fs,reg,extra); -} -} -} -static void enterlevel(LexState*ls){ -if(++ls->L->nCcalls>200) -luaX_lexerror(ls,"chunk has too many syntax levels",0); -} -#define leavelevel(ls)((ls)->L->nCcalls--) -static void enterblock(FuncState*fs,BlockCnt*bl,lu_byte isbreakable){ -bl->breaklist=(-1); -bl->isbreakable=isbreakable; -bl->nactvar=fs->nactvar; -bl->upval=0; -bl->previous=fs->bl; -fs->bl=bl; -} -static void leaveblock(FuncState*fs){ -BlockCnt*bl=fs->bl; -fs->bl=bl->previous; -removevars(fs->ls,bl->nactvar); -if(bl->upval) -luaK_codeABC(fs,OP_CLOSE,bl->nactvar,0,0); -fs->freereg=fs->nactvar; -luaK_patchtohere(fs,bl->breaklist); -} -static void pushclosure(LexState*ls,FuncState*func,expdesc*v){ -FuncState*fs=ls->fs; -Proto*f=fs->f; -int oldsize=f->sizep; -int i; -luaM_growvector(ls->L,f->p,fs->np,f->sizep,Proto*, -((1<<(9+9))-1),"constant table overflow"); -while(oldsizesizep)f->p[oldsize++]=NULL; -f->p[fs->np++]=func->f; -luaC_objbarrier(ls->L,f,func->f); -init_exp(v,VRELOCABLE,luaK_codeABx(fs,OP_CLOSURE,0,fs->np-1)); -for(i=0;if->nups;i++){ -OpCode o=(func->upvalues[i].k==VLOCAL)?OP_MOVE:OP_GETUPVAL; -luaK_codeABC(fs,o,0,func->upvalues[i].info,0); -} -} -static void open_func(LexState*ls,FuncState*fs){ -lua_State*L=ls->L; -Proto*f=luaF_newproto(L); -fs->f=f; -fs->prev=ls->fs; -fs->ls=ls; -fs->L=L; -ls->fs=fs; -fs->pc=0; -fs->lasttarget=-1; -fs->jpc=(-1); -fs->freereg=0; -fs->nk=0; -fs->np=0; -fs->nlocvars=0; -fs->nactvar=0; -fs->bl=NULL; -f->source=ls->source; -f->maxstacksize=2; -fs->h=luaH_new(L,0,0); -sethvalue(L,L->top,fs->h); -incr_top(L); -setptvalue(L,L->top,f); -incr_top(L); -} -static void close_func(LexState*ls){ -lua_State*L=ls->L; -FuncState*fs=ls->fs; -Proto*f=fs->f; -removevars(ls,0); -luaK_ret(fs,0,0); -luaM_reallocvector(L,f->code,f->sizecode,fs->pc,Instruction); -f->sizecode=fs->pc; -luaM_reallocvector(L,f->lineinfo,f->sizelineinfo,fs->pc,int); -f->sizelineinfo=fs->pc; -luaM_reallocvector(L,f->k,f->sizek,fs->nk,TValue); -f->sizek=fs->nk; -luaM_reallocvector(L,f->p,f->sizep,fs->np,Proto*); -f->sizep=fs->np; -luaM_reallocvector(L,f->locvars,f->sizelocvars,fs->nlocvars,LocVar); -f->sizelocvars=fs->nlocvars; -luaM_reallocvector(L,f->upvalues,f->sizeupvalues,f->nups,TString*); -f->sizeupvalues=f->nups; -ls->fs=fs->prev; -if(fs)anchor_token(ls); -L->top-=2; -} -static Proto*luaY_parser(lua_State*L,ZIO*z,Mbuffer*buff,const char*name){ -struct LexState lexstate; -struct FuncState funcstate; -lexstate.buff=buff; -luaX_setinput(L,&lexstate,z,luaS_new(L,name)); -open_func(&lexstate,&funcstate); -funcstate.f->is_vararg=2; -luaX_next(&lexstate); -chunk(&lexstate); -check(&lexstate,TK_EOS); -close_func(&lexstate); -return funcstate.f; -} -static void field(LexState*ls,expdesc*v){ -FuncState*fs=ls->fs; -expdesc key; -luaK_exp2anyreg(fs,v); -luaX_next(ls); -checkname(ls,&key); -luaK_indexed(fs,v,&key); -} -static void yindex(LexState*ls,expdesc*v){ -luaX_next(ls); -expr(ls,v); -luaK_exp2val(ls->fs,v); -checknext(ls,']'); -} -struct ConsControl{ -expdesc v; -expdesc*t; -int nh; -int na; -int tostore; -}; -static void recfield(LexState*ls,struct ConsControl*cc){ -FuncState*fs=ls->fs; -int reg=ls->fs->freereg; -expdesc key,val; -int rkkey; -if(ls->t.token==TK_NAME){ -luaY_checklimit(fs,cc->nh,(INT_MAX-2),"items in a constructor"); -checkname(ls,&key); -} -else -yindex(ls,&key); -cc->nh++; -checknext(ls,'='); -rkkey=luaK_exp2RK(fs,&key); -expr(ls,&val); -luaK_codeABC(fs,OP_SETTABLE,cc->t->u.s.info,rkkey,luaK_exp2RK(fs,&val)); -fs->freereg=reg; -} -static void closelistfield(FuncState*fs,struct ConsControl*cc){ -if(cc->v.k==VVOID)return; -luaK_exp2nextreg(fs,&cc->v); -cc->v.k=VVOID; -if(cc->tostore==50){ -luaK_setlist(fs,cc->t->u.s.info,cc->na,cc->tostore); -cc->tostore=0; -} -} -static void lastlistfield(FuncState*fs,struct ConsControl*cc){ -if(cc->tostore==0)return; -if(hasmultret(cc->v.k)){ -luaK_setmultret(fs,&cc->v); -luaK_setlist(fs,cc->t->u.s.info,cc->na,(-1)); -cc->na--; -} -else{ -if(cc->v.k!=VVOID) -luaK_exp2nextreg(fs,&cc->v); -luaK_setlist(fs,cc->t->u.s.info,cc->na,cc->tostore); -} -} -static void listfield(LexState*ls,struct ConsControl*cc){ -expr(ls,&cc->v); -luaY_checklimit(ls->fs,cc->na,(INT_MAX-2),"items in a constructor"); -cc->na++; -cc->tostore++; -} -static void constructor(LexState*ls,expdesc*t){ -FuncState*fs=ls->fs; -int line=ls->linenumber; -int pc=luaK_codeABC(fs,OP_NEWTABLE,0,0,0); -struct ConsControl cc; -cc.na=cc.nh=cc.tostore=0; -cc.t=t; -init_exp(t,VRELOCABLE,pc); -init_exp(&cc.v,VVOID,0); -luaK_exp2nextreg(ls->fs,t); -checknext(ls,'{'); -do{ -if(ls->t.token=='}')break; -closelistfield(fs,&cc); -switch(ls->t.token){ -case TK_NAME:{ -luaX_lookahead(ls); -if(ls->lookahead.token!='=') -listfield(ls,&cc); -else -recfield(ls,&cc); -break; -} -case'[':{ -recfield(ls,&cc); -break; -} -default:{ -listfield(ls,&cc); -break; -} -} -}while(testnext(ls,',')||testnext(ls,';')); -check_match(ls,'}','{',line); -lastlistfield(fs,&cc); -SETARG_B(fs->f->code[pc],luaO_int2fb(cc.na)); -SETARG_C(fs->f->code[pc],luaO_int2fb(cc.nh)); -} -static void parlist(LexState*ls){ -FuncState*fs=ls->fs; -Proto*f=fs->f; -int nparams=0; -f->is_vararg=0; -if(ls->t.token!=')'){ -do{ -switch(ls->t.token){ -case TK_NAME:{ -new_localvar(ls,str_checkname(ls),nparams++); -break; -} -case TK_DOTS:{ -luaX_next(ls); -f->is_vararg|=2; -break; -} -default:luaX_syntaxerror(ls," or "LUA_QL("...")" expected"); -} -}while(!f->is_vararg&&testnext(ls,',')); -} -adjustlocalvars(ls,nparams); -f->numparams=cast_byte(fs->nactvar-(f->is_vararg&1)); -luaK_reserveregs(fs,fs->nactvar); -} -static void body(LexState*ls,expdesc*e,int needself,int line){ -FuncState new_fs; -open_func(ls,&new_fs); -new_fs.f->linedefined=line; -checknext(ls,'('); -if(needself){ -new_localvarliteral(ls,"self",0); -adjustlocalvars(ls,1); -} -parlist(ls); -checknext(ls,')'); -chunk(ls); -new_fs.f->lastlinedefined=ls->linenumber; -check_match(ls,TK_END,TK_FUNCTION,line); -close_func(ls); -pushclosure(ls,&new_fs,e); -} -static int explist1(LexState*ls,expdesc*v){ -int n=1; -expr(ls,v); -while(testnext(ls,',')){ -luaK_exp2nextreg(ls->fs,v); -expr(ls,v); -n++; -} -return n; -} -static void funcargs(LexState*ls,expdesc*f){ -FuncState*fs=ls->fs; -expdesc args; -int base,nparams; -int line=ls->linenumber; -switch(ls->t.token){ -case'(':{ -if(line!=ls->lastline) -luaX_syntaxerror(ls,"ambiguous syntax (function call x new statement)"); -luaX_next(ls); -if(ls->t.token==')') -args.k=VVOID; -else{ -explist1(ls,&args); -luaK_setmultret(fs,&args); -} -check_match(ls,')','(',line); -break; -} -case'{':{ -constructor(ls,&args); -break; -} -case TK_STRING:{ -codestring(ls,&args,ls->t.seminfo.ts); -luaX_next(ls); -break; -} -default:{ -luaX_syntaxerror(ls,"function arguments expected"); -return; -} -} -base=f->u.s.info; -if(hasmultret(args.k)) -nparams=(-1); -else{ -if(args.k!=VVOID) -luaK_exp2nextreg(fs,&args); -nparams=fs->freereg-(base+1); -} -init_exp(f,VCALL,luaK_codeABC(fs,OP_CALL,base,nparams+1,2)); -luaK_fixline(fs,line); -fs->freereg=base+1; -} -static void prefixexp(LexState*ls,expdesc*v){ -switch(ls->t.token){ -case'(':{ -int line=ls->linenumber; -luaX_next(ls); -expr(ls,v); -check_match(ls,')','(',line); -luaK_dischargevars(ls->fs,v); -return; -} -case TK_NAME:{ -singlevar(ls,v); -return; -} -default:{ -luaX_syntaxerror(ls,"unexpected symbol"); -return; -} -} -} -static void primaryexp(LexState*ls,expdesc*v){ -FuncState*fs=ls->fs; -prefixexp(ls,v); -for(;;){ -switch(ls->t.token){ -case'.':{ -field(ls,v); -break; -} -case'[':{ -expdesc key; -luaK_exp2anyreg(fs,v); -yindex(ls,&key); -luaK_indexed(fs,v,&key); -break; -} -case':':{ -expdesc key; -luaX_next(ls); -checkname(ls,&key); -luaK_self(fs,v,&key); -funcargs(ls,v); -break; -} -case'(':case TK_STRING:case'{':{ -luaK_exp2nextreg(fs,v); -funcargs(ls,v); -break; -} -default:return; -} -} -} -static void simpleexp(LexState*ls,expdesc*v){ -switch(ls->t.token){ -case TK_NUMBER:{ -init_exp(v,VKNUM,0); -v->u.nval=ls->t.seminfo.r; -break; -} -case TK_STRING:{ -codestring(ls,v,ls->t.seminfo.ts); -break; -} -case TK_NIL:{ -init_exp(v,VNIL,0); -break; -} -case TK_TRUE:{ -init_exp(v,VTRUE,0); -break; -} -case TK_FALSE:{ -init_exp(v,VFALSE,0); -break; -} -case TK_DOTS:{ -FuncState*fs=ls->fs; -check_condition(ls,fs->f->is_vararg, -"cannot use "LUA_QL("...")" outside a vararg function"); -fs->f->is_vararg&=~4; -init_exp(v,VVARARG,luaK_codeABC(fs,OP_VARARG,0,1,0)); -break; -} -case'{':{ -constructor(ls,v); -return; -} -case TK_FUNCTION:{ -luaX_next(ls); -body(ls,v,0,ls->linenumber); -return; -} -default:{ -primaryexp(ls,v); -return; -} -} -luaX_next(ls); -} -static UnOpr getunopr(int op){ -switch(op){ -case TK_NOT:return OPR_NOT; -case'-':return OPR_MINUS; -case'#':return OPR_LEN; -default:return OPR_NOUNOPR; -} -} -static BinOpr getbinopr(int op){ -switch(op){ -case'+':return OPR_ADD; -case'-':return OPR_SUB; -case'*':return OPR_MUL; -case'/':return OPR_DIV; -case'%':return OPR_MOD; -case'^':return OPR_POW; -case TK_CONCAT:return OPR_CONCAT; -case TK_NE:return OPR_NE; -case TK_EQ:return OPR_EQ; -case'<':return OPR_LT; -case TK_LE:return OPR_LE; -case'>':return OPR_GT; -case TK_GE:return OPR_GE; -case TK_AND:return OPR_AND; -case TK_OR:return OPR_OR; -default:return OPR_NOBINOPR; -} -} -static const struct{ -lu_byte left; -lu_byte right; -}priority[]={ -{6,6},{6,6},{7,7},{7,7},{7,7}, -{10,9},{5,4}, -{3,3},{3,3}, -{3,3},{3,3},{3,3},{3,3}, -{2,2},{1,1} -}; -static BinOpr subexpr(LexState*ls,expdesc*v,unsigned int limit){ -BinOpr op; -UnOpr uop; -enterlevel(ls); -uop=getunopr(ls->t.token); -if(uop!=OPR_NOUNOPR){ -luaX_next(ls); -subexpr(ls,v,8); -luaK_prefix(ls->fs,uop,v); -} -else simpleexp(ls,v); -op=getbinopr(ls->t.token); -while(op!=OPR_NOBINOPR&&priority[op].left>limit){ -expdesc v2; -BinOpr nextop; -luaX_next(ls); -luaK_infix(ls->fs,op,v); -nextop=subexpr(ls,&v2,priority[op].right); -luaK_posfix(ls->fs,op,v,&v2); -op=nextop; -} -leavelevel(ls); -return op; -} -static void expr(LexState*ls,expdesc*v){ -subexpr(ls,v,0); -} -static int block_follow(int token){ -switch(token){ -case TK_ELSE:case TK_ELSEIF:case TK_END: -case TK_UNTIL:case TK_EOS: -return 1; -default:return 0; -} -} -static void block(LexState*ls){ -FuncState*fs=ls->fs; -BlockCnt bl; -enterblock(fs,&bl,0); -chunk(ls); -leaveblock(fs); -} -struct LHS_assign{ -struct LHS_assign*prev; -expdesc v; -}; -static void check_conflict(LexState*ls,struct LHS_assign*lh,expdesc*v){ -FuncState*fs=ls->fs; -int extra=fs->freereg; -int conflict=0; -for(;lh;lh=lh->prev){ -if(lh->v.k==VINDEXED){ -if(lh->v.u.s.info==v->u.s.info){ -conflict=1; -lh->v.u.s.info=extra; -} -if(lh->v.u.s.aux==v->u.s.info){ -conflict=1; -lh->v.u.s.aux=extra; -} -} -} -if(conflict){ -luaK_codeABC(fs,OP_MOVE,fs->freereg,v->u.s.info,0); -luaK_reserveregs(fs,1); -} -} -static void assignment(LexState*ls,struct LHS_assign*lh,int nvars){ -expdesc e; -check_condition(ls,VLOCAL<=lh->v.k&&lh->v.k<=VINDEXED, -"syntax error"); -if(testnext(ls,',')){ -struct LHS_assign nv; -nv.prev=lh; -primaryexp(ls,&nv.v); -if(nv.v.k==VLOCAL) -check_conflict(ls,lh,&nv.v); -luaY_checklimit(ls->fs,nvars,200-ls->L->nCcalls, -"variables in assignment"); -assignment(ls,&nv,nvars+1); -} -else{ -int nexps; -checknext(ls,'='); -nexps=explist1(ls,&e); -if(nexps!=nvars){ -adjust_assign(ls,nvars,nexps,&e); -if(nexps>nvars) -ls->fs->freereg-=nexps-nvars; -} -else{ -luaK_setoneret(ls->fs,&e); -luaK_storevar(ls->fs,&lh->v,&e); -return; -} -} -init_exp(&e,VNONRELOC,ls->fs->freereg-1); -luaK_storevar(ls->fs,&lh->v,&e); -} -static int cond(LexState*ls){ -expdesc v; -expr(ls,&v); -if(v.k==VNIL)v.k=VFALSE; -luaK_goiftrue(ls->fs,&v); -return v.f; -} -static void breakstat(LexState*ls){ -FuncState*fs=ls->fs; -BlockCnt*bl=fs->bl; -int upval=0; -while(bl&&!bl->isbreakable){ -upval|=bl->upval; -bl=bl->previous; -} -if(!bl) -luaX_syntaxerror(ls,"no loop to break"); -if(upval) -luaK_codeABC(fs,OP_CLOSE,bl->nactvar,0,0); -luaK_concat(fs,&bl->breaklist,luaK_jump(fs)); -} -static void whilestat(LexState*ls,int line){ -FuncState*fs=ls->fs; -int whileinit; -int condexit; -BlockCnt bl; -luaX_next(ls); -whileinit=luaK_getlabel(fs); -condexit=cond(ls); -enterblock(fs,&bl,1); -checknext(ls,TK_DO); -block(ls); -luaK_patchlist(fs,luaK_jump(fs),whileinit); -check_match(ls,TK_END,TK_WHILE,line); -leaveblock(fs); -luaK_patchtohere(fs,condexit); -} -static void repeatstat(LexState*ls,int line){ -int condexit; -FuncState*fs=ls->fs; -int repeat_init=luaK_getlabel(fs); -BlockCnt bl1,bl2; -enterblock(fs,&bl1,1); -enterblock(fs,&bl2,0); -luaX_next(ls); -chunk(ls); -check_match(ls,TK_UNTIL,TK_REPEAT,line); -condexit=cond(ls); -if(!bl2.upval){ -leaveblock(fs); -luaK_patchlist(ls->fs,condexit,repeat_init); -} -else{ -breakstat(ls); -luaK_patchtohere(ls->fs,condexit); -leaveblock(fs); -luaK_patchlist(ls->fs,luaK_jump(fs),repeat_init); -} -leaveblock(fs); -} -static int exp1(LexState*ls){ -expdesc e; -int k; -expr(ls,&e); -k=e.k; -luaK_exp2nextreg(ls->fs,&e); -return k; -} -static void forbody(LexState*ls,int base,int line,int nvars,int isnum){ -BlockCnt bl; -FuncState*fs=ls->fs; -int prep,endfor; -adjustlocalvars(ls,3); -checknext(ls,TK_DO); -prep=isnum?luaK_codeAsBx(fs,OP_FORPREP,base,(-1)):luaK_jump(fs); -enterblock(fs,&bl,0); -adjustlocalvars(ls,nvars); -luaK_reserveregs(fs,nvars); -block(ls); -leaveblock(fs); -luaK_patchtohere(fs,prep); -endfor=(isnum)?luaK_codeAsBx(fs,OP_FORLOOP,base,(-1)): -luaK_codeABC(fs,OP_TFORLOOP,base,0,nvars); -luaK_fixline(fs,line); -luaK_patchlist(fs,(isnum?endfor:luaK_jump(fs)),prep+1); -} -static void fornum(LexState*ls,TString*varname,int line){ -FuncState*fs=ls->fs; -int base=fs->freereg; -new_localvarliteral(ls,"(for index)",0); -new_localvarliteral(ls,"(for limit)",1); -new_localvarliteral(ls,"(for step)",2); -new_localvar(ls,varname,3); -checknext(ls,'='); -exp1(ls); -checknext(ls,','); -exp1(ls); -if(testnext(ls,',')) -exp1(ls); -else{ -luaK_codeABx(fs,OP_LOADK,fs->freereg,luaK_numberK(fs,1)); -luaK_reserveregs(fs,1); -} -forbody(ls,base,line,1,1); -} -static void forlist(LexState*ls,TString*indexname){ -FuncState*fs=ls->fs; -expdesc e; -int nvars=0; -int line; -int base=fs->freereg; -new_localvarliteral(ls,"(for generator)",nvars++); -new_localvarliteral(ls,"(for state)",nvars++); -new_localvarliteral(ls,"(for control)",nvars++); -new_localvar(ls,indexname,nvars++); -while(testnext(ls,',')) -new_localvar(ls,str_checkname(ls),nvars++); -checknext(ls,TK_IN); -line=ls->linenumber; -adjust_assign(ls,3,explist1(ls,&e),&e); -luaK_checkstack(fs,3); -forbody(ls,base,line,nvars-3,0); -} -static void forstat(LexState*ls,int line){ -FuncState*fs=ls->fs; -TString*varname; -BlockCnt bl; -enterblock(fs,&bl,1); -luaX_next(ls); -varname=str_checkname(ls); -switch(ls->t.token){ -case'=':fornum(ls,varname,line);break; -case',':case TK_IN:forlist(ls,varname);break; -default:luaX_syntaxerror(ls,LUA_QL("=")" or "LUA_QL("in")" expected"); -} -check_match(ls,TK_END,TK_FOR,line); -leaveblock(fs); -} -static int test_then_block(LexState*ls){ -int condexit; -luaX_next(ls); -condexit=cond(ls); -checknext(ls,TK_THEN); -block(ls); -return condexit; -} -static void ifstat(LexState*ls,int line){ -FuncState*fs=ls->fs; -int flist; -int escapelist=(-1); -flist=test_then_block(ls); -while(ls->t.token==TK_ELSEIF){ -luaK_concat(fs,&escapelist,luaK_jump(fs)); -luaK_patchtohere(fs,flist); -flist=test_then_block(ls); -} -if(ls->t.token==TK_ELSE){ -luaK_concat(fs,&escapelist,luaK_jump(fs)); -luaK_patchtohere(fs,flist); -luaX_next(ls); -block(ls); -} -else -luaK_concat(fs,&escapelist,flist); -luaK_patchtohere(fs,escapelist); -check_match(ls,TK_END,TK_IF,line); -} -static void localfunc(LexState*ls){ -expdesc v,b; -FuncState*fs=ls->fs; -new_localvar(ls,str_checkname(ls),0); -init_exp(&v,VLOCAL,fs->freereg); -luaK_reserveregs(fs,1); -adjustlocalvars(ls,1); -body(ls,&b,0,ls->linenumber); -luaK_storevar(fs,&v,&b); -getlocvar(fs,fs->nactvar-1).startpc=fs->pc; -} -static void localstat(LexState*ls){ -int nvars=0; -int nexps; -expdesc e; -do{ -new_localvar(ls,str_checkname(ls),nvars++); -}while(testnext(ls,',')); -if(testnext(ls,'=')) -nexps=explist1(ls,&e); -else{ -e.k=VVOID; -nexps=0; -} -adjust_assign(ls,nvars,nexps,&e); -adjustlocalvars(ls,nvars); -} -static int funcname(LexState*ls,expdesc*v){ -int needself=0; -singlevar(ls,v); -while(ls->t.token=='.') -field(ls,v); -if(ls->t.token==':'){ -needself=1; -field(ls,v); -} -return needself; -} -static void funcstat(LexState*ls,int line){ -int needself; -expdesc v,b; -luaX_next(ls); -needself=funcname(ls,&v); -body(ls,&b,needself,line); -luaK_storevar(ls->fs,&v,&b); -luaK_fixline(ls->fs,line); -} -static void exprstat(LexState*ls){ -FuncState*fs=ls->fs; -struct LHS_assign v; -primaryexp(ls,&v.v); -if(v.v.k==VCALL) -SETARG_C(getcode(fs,&v.v),1); -else{ -v.prev=NULL; -assignment(ls,&v,1); -} -} -static void retstat(LexState*ls){ -FuncState*fs=ls->fs; -expdesc e; -int first,nret; -luaX_next(ls); -if(block_follow(ls->t.token)||ls->t.token==';') -first=nret=0; -else{ -nret=explist1(ls,&e); -if(hasmultret(e.k)){ -luaK_setmultret(fs,&e); -if(e.k==VCALL&&nret==1){ -SET_OPCODE(getcode(fs,&e),OP_TAILCALL); -} -first=fs->nactvar; -nret=(-1); -} -else{ -if(nret==1) -first=luaK_exp2anyreg(fs,&e); -else{ -luaK_exp2nextreg(fs,&e); -first=fs->nactvar; -} -} -} -luaK_ret(fs,first,nret); -} -static int statement(LexState*ls){ -int line=ls->linenumber; -switch(ls->t.token){ -case TK_IF:{ -ifstat(ls,line); -return 0; -} -case TK_WHILE:{ -whilestat(ls,line); -return 0; -} -case TK_DO:{ -luaX_next(ls); -block(ls); -check_match(ls,TK_END,TK_DO,line); -return 0; -} -case TK_FOR:{ -forstat(ls,line); -return 0; -} -case TK_REPEAT:{ -repeatstat(ls,line); -return 0; -} -case TK_FUNCTION:{ -funcstat(ls,line); -return 0; -} -case TK_LOCAL:{ -luaX_next(ls); -if(testnext(ls,TK_FUNCTION)) -localfunc(ls); -else -localstat(ls); -return 0; -} -case TK_RETURN:{ -retstat(ls); -return 1; -} -case TK_BREAK:{ -luaX_next(ls); -breakstat(ls); -return 1; -} -default:{ -exprstat(ls); -return 0; -} -} -} -static void chunk(LexState*ls){ -int islast=0; -enterlevel(ls); -while(!islast&&!block_follow(ls->t.token)){ -islast=statement(ls); -testnext(ls,';'); -ls->fs->freereg=ls->fs->nactvar; -} -leavelevel(ls); -} -static const TValue*luaV_tonumber(const TValue*obj,TValue*n){ -lua_Number num; -if(ttisnumber(obj))return obj; -if(ttisstring(obj)&&luaO_str2d(svalue(obj),&num)){ -setnvalue(n,num); -return n; -} -else -return NULL; -} -static int luaV_tostring(lua_State*L,StkId obj){ -if(!ttisnumber(obj)) -return 0; -else{ -char s[32]; -lua_Number n=nvalue(obj); -lua_number2str(s,n); -setsvalue(L,obj,luaS_new(L,s)); -return 1; -} -} -static void callTMres(lua_State*L,StkId res,const TValue*f, -const TValue*p1,const TValue*p2){ -ptrdiff_t result=savestack(L,res); -setobj(L,L->top,f); -setobj(L,L->top+1,p1); -setobj(L,L->top+2,p2); -luaD_checkstack(L,3); -L->top+=3; -luaD_call(L,L->top-3,1); -res=restorestack(L,result); -L->top--; -setobj(L,res,L->top); -} -static void callTM(lua_State*L,const TValue*f,const TValue*p1, -const TValue*p2,const TValue*p3){ -setobj(L,L->top,f); -setobj(L,L->top+1,p1); -setobj(L,L->top+2,p2); -setobj(L,L->top+3,p3); -luaD_checkstack(L,4); -L->top+=4; -luaD_call(L,L->top-4,0); -} -static void luaV_gettable(lua_State*L,const TValue*t,TValue*key,StkId val){ -int loop; -for(loop=0;loop<100;loop++){ -const TValue*tm; -if(ttistable(t)){ -Table*h=hvalue(t); -const TValue*res=luaH_get(h,key); -if(!ttisnil(res)|| -(tm=fasttm(L,h->metatable,TM_INDEX))==NULL){ -setobj(L,val,res); -return; -} -} -else if(ttisnil(tm=luaT_gettmbyobj(L,t,TM_INDEX))) -luaG_typeerror(L,t,"index"); -if(ttisfunction(tm)){ -callTMres(L,val,tm,t,key); -return; -} -t=tm; -} -luaG_runerror(L,"loop in gettable"); -} -static void luaV_settable(lua_State*L,const TValue*t,TValue*key,StkId val){ -int loop; -TValue temp; -for(loop=0;loop<100;loop++){ -const TValue*tm; -if(ttistable(t)){ -Table*h=hvalue(t); -TValue*oldval=luaH_set(L,h,key); -if(!ttisnil(oldval)|| -(tm=fasttm(L,h->metatable,TM_NEWINDEX))==NULL){ -setobj(L,oldval,val); -h->flags=0; -luaC_barriert(L,h,val); -return; -} -} -else if(ttisnil(tm=luaT_gettmbyobj(L,t,TM_NEWINDEX))) -luaG_typeerror(L,t,"index"); -if(ttisfunction(tm)){ -callTM(L,tm,t,key,val); -return; -} -setobj(L,&temp,tm); -t=&temp; -} -luaG_runerror(L,"loop in settable"); -} -static int call_binTM(lua_State*L,const TValue*p1,const TValue*p2, -StkId res,TMS event){ -const TValue*tm=luaT_gettmbyobj(L,p1,event); -if(ttisnil(tm)) -tm=luaT_gettmbyobj(L,p2,event); -if(ttisnil(tm))return 0; -callTMres(L,res,tm,p1,p2); -return 1; -} -static const TValue*get_compTM(lua_State*L,Table*mt1,Table*mt2, -TMS event){ -const TValue*tm1=fasttm(L,mt1,event); -const TValue*tm2; -if(tm1==NULL)return NULL; -if(mt1==mt2)return tm1; -tm2=fasttm(L,mt2,event); -if(tm2==NULL)return NULL; -if(luaO_rawequalObj(tm1,tm2)) -return tm1; -return NULL; -} -static int call_orderTM(lua_State*L,const TValue*p1,const TValue*p2, -TMS event){ -const TValue*tm1=luaT_gettmbyobj(L,p1,event); -const TValue*tm2; -if(ttisnil(tm1))return-1; -tm2=luaT_gettmbyobj(L,p2,event); -if(!luaO_rawequalObj(tm1,tm2)) -return-1; -callTMres(L,L->top,tm1,p1,p2); -return!l_isfalse(L->top); -} -static int l_strcmp(const TString*ls,const TString*rs){ -const char*l=getstr(ls); -size_t ll=ls->tsv.len; -const char*r=getstr(rs); -size_t lr=rs->tsv.len; -for(;;){ -int temp=strcoll(l,r); -if(temp!=0)return temp; -else{ -size_t len=strlen(l); -if(len==lr) -return(len==ll)?0:1; -else if(len==ll) -return-1; -len++; -l+=len;ll-=len;r+=len;lr-=len; -} -} -} -static int luaV_lessthan(lua_State*L,const TValue*l,const TValue*r){ -int res; -if(ttype(l)!=ttype(r)) -return luaG_ordererror(L,l,r); -else if(ttisnumber(l)) -return luai_numlt(nvalue(l),nvalue(r)); -else if(ttisstring(l)) -return l_strcmp(rawtsvalue(l),rawtsvalue(r))<0; -else if((res=call_orderTM(L,l,r,TM_LT))!=-1) -return res; -return luaG_ordererror(L,l,r); -} -static int lessequal(lua_State*L,const TValue*l,const TValue*r){ -int res; -if(ttype(l)!=ttype(r)) -return luaG_ordererror(L,l,r); -else if(ttisnumber(l)) -return luai_numle(nvalue(l),nvalue(r)); -else if(ttisstring(l)) -return l_strcmp(rawtsvalue(l),rawtsvalue(r))<=0; -else if((res=call_orderTM(L,l,r,TM_LE))!=-1) -return res; -else if((res=call_orderTM(L,r,l,TM_LT))!=-1) -return!res; -return luaG_ordererror(L,l,r); -} -static int luaV_equalval(lua_State*L,const TValue*t1,const TValue*t2){ -const TValue*tm; -switch(ttype(t1)){ -case 0:return 1; -case 3:return luai_numeq(nvalue(t1),nvalue(t2)); -case 1:return bvalue(t1)==bvalue(t2); -case 2:return pvalue(t1)==pvalue(t2); -case 7:{ -if(uvalue(t1)==uvalue(t2))return 1; -tm=get_compTM(L,uvalue(t1)->metatable,uvalue(t2)->metatable, -TM_EQ); -break; -} -case 5:{ -if(hvalue(t1)==hvalue(t2))return 1; -tm=get_compTM(L,hvalue(t1)->metatable,hvalue(t2)->metatable,TM_EQ); -break; -} -default:return gcvalue(t1)==gcvalue(t2); -} -if(tm==NULL)return 0; -callTMres(L,L->top,tm,t1,t2); -return!l_isfalse(L->top); -} -static void luaV_concat(lua_State*L,int total,int last){ -do{ -StkId top=L->base+last+1; -int n=2; -if(!(ttisstring(top-2)||ttisnumber(top-2))||!tostring(L,top-1)){ -if(!call_binTM(L,top-2,top-1,top-2,TM_CONCAT)) -luaG_concaterror(L,top-2,top-1); -}else if(tsvalue(top-1)->len==0) -(void)tostring(L,top-2); -else{ -size_t tl=tsvalue(top-1)->len; -char*buffer; -int i; -for(n=1;nlen; -if(l>=((size_t)(~(size_t)0)-2)-tl)luaG_runerror(L,"string length overflow"); -tl+=l; -} -buffer=luaZ_openspace(L,&G(L)->buff,tl); -tl=0; -for(i=n;i>0;i--){ -size_t l=tsvalue(top-i)->len; -memcpy(buffer+tl,svalue(top-i),l); -tl+=l; -} -setsvalue(L,top-n,luaS_newlstr(L,buffer,tl)); -} -total-=n-1; -last-=n-1; -}while(total>1); -} -static void Arith(lua_State*L,StkId ra,const TValue*rb, -const TValue*rc,TMS op){ -TValue tempb,tempc; -const TValue*b,*c; -if((b=luaV_tonumber(rb,&tempb))!=NULL&& -(c=luaV_tonumber(rc,&tempc))!=NULL){ -lua_Number nb=nvalue(b),nc=nvalue(c); -switch(op){ -case TM_ADD:setnvalue(ra,luai_numadd(nb,nc));break; -case TM_SUB:setnvalue(ra,luai_numsub(nb,nc));break; -case TM_MUL:setnvalue(ra,luai_nummul(nb,nc));break; -case TM_DIV:setnvalue(ra,luai_numdiv(nb,nc));break; -case TM_MOD:setnvalue(ra,luai_nummod(nb,nc));break; -case TM_POW:setnvalue(ra,luai_numpow(nb,nc));break; -case TM_UNM:setnvalue(ra,luai_numunm(nb));break; -default:break; -} -} -else if(!call_binTM(L,rb,rc,ra,op)) -luaG_aritherror(L,rb,rc); -} -#define runtime_check(L,c){if(!(c))break;} -#define RA(i)(base+GETARG_A(i)) -#define RB(i)check_exp(getBMode(GET_OPCODE(i))==OpArgR,base+GETARG_B(i)) -#define RKB(i)check_exp(getBMode(GET_OPCODE(i))==OpArgK,ISK(GETARG_B(i))?k+INDEXK(GETARG_B(i)):base+GETARG_B(i)) -#define RKC(i)check_exp(getCMode(GET_OPCODE(i))==OpArgK,ISK(GETARG_C(i))?k+INDEXK(GETARG_C(i)):base+GETARG_C(i)) -#define KBx(i)check_exp(getBMode(GET_OPCODE(i))==OpArgK,k+GETARG_Bx(i)) -#define dojump(L,pc,i){(pc)+=(i);} -#define Protect(x){L->savedpc=pc;{x;};base=L->base;} -#define arith_op(op,tm){TValue*rb=RKB(i);TValue*rc=RKC(i);if(ttisnumber(rb)&&ttisnumber(rc)){lua_Number nb=nvalue(rb),nc=nvalue(rc);setnvalue(ra,op(nb,nc));}else Protect(Arith(L,ra,rb,rc,tm));} -static void luaV_execute(lua_State*L,int nexeccalls){ -LClosure*cl; -StkId base; -TValue*k; -const Instruction*pc; -reentry: -pc=L->savedpc; -cl=&clvalue(L->ci->func)->l; -base=L->base; -k=cl->p->k; -for(;;){ -const Instruction i=*pc++; -StkId ra; -ra=RA(i); -switch(GET_OPCODE(i)){ -case OP_MOVE:{ -setobj(L,ra,RB(i)); -continue; -} -case OP_LOADK:{ -setobj(L,ra,KBx(i)); -continue; -} -case OP_LOADBOOL:{ -setbvalue(ra,GETARG_B(i)); -if(GETARG_C(i))pc++; -continue; -} -case OP_LOADNIL:{ -TValue*rb=RB(i); -do{ -setnilvalue(rb--); -}while(rb>=ra); -continue; -} -case OP_GETUPVAL:{ -int b=GETARG_B(i); -setobj(L,ra,cl->upvals[b]->v); -continue; -} -case OP_GETGLOBAL:{ -TValue g; -TValue*rb=KBx(i); -sethvalue(L,&g,cl->env); -Protect(luaV_gettable(L,&g,rb,ra)); -continue; -} -case OP_GETTABLE:{ -Protect(luaV_gettable(L,RB(i),RKC(i),ra)); -continue; -} -case OP_SETGLOBAL:{ -TValue g; -sethvalue(L,&g,cl->env); -Protect(luaV_settable(L,&g,KBx(i),ra)); -continue; -} -case OP_SETUPVAL:{ -UpVal*uv=cl->upvals[GETARG_B(i)]; -setobj(L,uv->v,ra); -luaC_barrier(L,uv,ra); -continue; -} -case OP_SETTABLE:{ -Protect(luaV_settable(L,ra,RKB(i),RKC(i))); -continue; -} -case OP_NEWTABLE:{ -int b=GETARG_B(i); -int c=GETARG_C(i); -sethvalue(L,ra,luaH_new(L,luaO_fb2int(b),luaO_fb2int(c))); -Protect(luaC_checkGC(L)); -continue; -} -case OP_SELF:{ -StkId rb=RB(i); -setobj(L,ra+1,rb); -Protect(luaV_gettable(L,rb,RKC(i),ra)); -continue; -} -case OP_ADD:{ -arith_op(luai_numadd,TM_ADD); -continue; -} -case OP_SUB:{ -arith_op(luai_numsub,TM_SUB); -continue; -} -case OP_MUL:{ -arith_op(luai_nummul,TM_MUL); -continue; -} -case OP_DIV:{ -arith_op(luai_numdiv,TM_DIV); -continue; -} -case OP_MOD:{ -arith_op(luai_nummod,TM_MOD); -continue; -} -case OP_POW:{ -arith_op(luai_numpow,TM_POW); -continue; -} -case OP_UNM:{ -TValue*rb=RB(i); -if(ttisnumber(rb)){ -lua_Number nb=nvalue(rb); -setnvalue(ra,luai_numunm(nb)); -} -else{ -Protect(Arith(L,ra,rb,rb,TM_UNM)); -} -continue; -} -case OP_NOT:{ -int res=l_isfalse(RB(i)); -setbvalue(ra,res); -continue; -} -case OP_LEN:{ -const TValue*rb=RB(i); -switch(ttype(rb)){ -case 5:{ -setnvalue(ra,cast_num(luaH_getn(hvalue(rb)))); -break; -} -case 4:{ -setnvalue(ra,cast_num(tsvalue(rb)->len)); -break; -} -default:{ -Protect( -if(!call_binTM(L,rb,(&luaO_nilobject_),ra,TM_LEN)) -luaG_typeerror(L,rb,"get length of"); -) -} -} -continue; -} -case OP_CONCAT:{ -int b=GETARG_B(i); -int c=GETARG_C(i); -Protect(luaV_concat(L,c-b+1,c);luaC_checkGC(L)); -setobj(L,RA(i),base+b); -continue; -} -case OP_JMP:{ -dojump(L,pc,GETARG_sBx(i)); -continue; -} -case OP_EQ:{ -TValue*rb=RKB(i); -TValue*rc=RKC(i); -Protect( -if(equalobj(L,rb,rc)==GETARG_A(i)) -dojump(L,pc,GETARG_sBx(*pc)); -) -pc++; -continue; -} -case OP_LT:{ -Protect( -if(luaV_lessthan(L,RKB(i),RKC(i))==GETARG_A(i)) -dojump(L,pc,GETARG_sBx(*pc)); -) -pc++; -continue; -} -case OP_LE:{ -Protect( -if(lessequal(L,RKB(i),RKC(i))==GETARG_A(i)) -dojump(L,pc,GETARG_sBx(*pc)); -) -pc++; -continue; -} -case OP_TEST:{ -if(l_isfalse(ra)!=GETARG_C(i)) -dojump(L,pc,GETARG_sBx(*pc)); -pc++; -continue; -} -case OP_TESTSET:{ -TValue*rb=RB(i); -if(l_isfalse(rb)!=GETARG_C(i)){ -setobj(L,ra,rb); -dojump(L,pc,GETARG_sBx(*pc)); -} -pc++; -continue; -} -case OP_CALL:{ -int b=GETARG_B(i); -int nresults=GETARG_C(i)-1; -if(b!=0)L->top=ra+b; -L->savedpc=pc; -switch(luaD_precall(L,ra,nresults)){ -case 0:{ -nexeccalls++; -goto reentry; -} -case 1:{ -if(nresults>=0)L->top=L->ci->top; -base=L->base; -continue; -} -default:{ -return; -} -} -} -case OP_TAILCALL:{ -int b=GETARG_B(i); -if(b!=0)L->top=ra+b; -L->savedpc=pc; -switch(luaD_precall(L,ra,(-1))){ -case 0:{ -CallInfo*ci=L->ci-1; -int aux; -StkId func=ci->func; -StkId pfunc=(ci+1)->func; -if(L->openupval)luaF_close(L,ci->base); -L->base=ci->base=ci->func+((ci+1)->base-pfunc); -for(aux=0;pfunc+auxtop;aux++) -setobj(L,func+aux,pfunc+aux); -ci->top=L->top=func+aux; -ci->savedpc=L->savedpc; -ci->tailcalls++; -L->ci--; -goto reentry; -} -case 1:{ -base=L->base; -continue; -} -default:{ -return; -} -} -} -case OP_RETURN:{ -int b=GETARG_B(i); -if(b!=0)L->top=ra+b-1; -if(L->openupval)luaF_close(L,base); -L->savedpc=pc; -b=luaD_poscall(L,ra); -if(--nexeccalls==0) -return; -else{ -if(b)L->top=L->ci->top; -goto reentry; -} -} -case OP_FORLOOP:{ -lua_Number step=nvalue(ra+2); -lua_Number idx=luai_numadd(nvalue(ra),step); -lua_Number limit=nvalue(ra+1); -if(luai_numlt(0,step)?luai_numle(idx,limit) -:luai_numle(limit,idx)){ -dojump(L,pc,GETARG_sBx(i)); -setnvalue(ra,idx); -setnvalue(ra+3,idx); -} -continue; -} -case OP_FORPREP:{ -const TValue*init=ra; -const TValue*plimit=ra+1; -const TValue*pstep=ra+2; -L->savedpc=pc; -if(!tonumber(init,ra)) -luaG_runerror(L,LUA_QL("for")" initial value must be a number"); -else if(!tonumber(plimit,ra+1)) -luaG_runerror(L,LUA_QL("for")" limit must be a number"); -else if(!tonumber(pstep,ra+2)) -luaG_runerror(L,LUA_QL("for")" step must be a number"); -setnvalue(ra,luai_numsub(nvalue(ra),nvalue(pstep))); -dojump(L,pc,GETARG_sBx(i)); -continue; -} -case OP_TFORLOOP:{ -StkId cb=ra+3; -setobj(L,cb+2,ra+2); -setobj(L,cb+1,ra+1); -setobj(L,cb,ra); -L->top=cb+3; -Protect(luaD_call(L,cb,GETARG_C(i))); -L->top=L->ci->top; -cb=RA(i)+3; -if(!ttisnil(cb)){ -setobj(L,cb-1,cb); -dojump(L,pc,GETARG_sBx(*pc)); -} -pc++; -continue; -} -case OP_SETLIST:{ -int n=GETARG_B(i); -int c=GETARG_C(i); -int last; -Table*h; -if(n==0){ -n=cast_int(L->top-ra)-1; -L->top=L->ci->top; -} -if(c==0)c=cast_int(*pc++); -runtime_check(L,ttistable(ra)); -h=hvalue(ra); -last=((c-1)*50)+n; -if(last>h->sizearray) -luaH_resizearray(L,h,last); -for(;n>0;n--){ -TValue*val=ra+n; -setobj(L,luaH_setnum(L,h,last--),val); -luaC_barriert(L,h,val); -} -continue; -} -case OP_CLOSE:{ -luaF_close(L,ra); -continue; -} -case OP_CLOSURE:{ -Proto*p; -Closure*ncl; -int nup,j; -p=cl->p->p[GETARG_Bx(i)]; -nup=p->nups; -ncl=luaF_newLclosure(L,nup,cl->env); -ncl->l.p=p; -for(j=0;jl.upvals[j]=cl->upvals[GETARG_B(*pc)]; -else{ -ncl->l.upvals[j]=luaF_findupval(L,base+GETARG_B(*pc)); -} -} -setclvalue(L,ra,ncl); -Protect(luaC_checkGC(L)); -continue; -} -case OP_VARARG:{ -int b=GETARG_B(i)-1; -int j; -CallInfo*ci=L->ci; -int n=cast_int(ci->base-ci->func)-cl->p->numparams-1; -if(b==(-1)){ -Protect(luaD_checkstack(L,n)); -ra=RA(i); -b=n; -L->top=ra+n; -} -for(j=0;jbase-n+j); -} -else{ -setnilvalue(ra+j); -} -} -continue; -} -} -} -} -#define api_checknelems(L,n)luai_apicheck(L,(n)<=(L->top-L->base)) -#define api_checkvalidindex(L,i)luai_apicheck(L,(i)!=(&luaO_nilobject_)) -#define api_incr_top(L){luai_apicheck(L,L->topci->top);L->top++;} -static TValue*index2adr(lua_State*L,int idx){ -if(idx>0){ -TValue*o=L->base+(idx-1); -luai_apicheck(L,idx<=L->ci->top-L->base); -if(o>=L->top)return cast(TValue*,(&luaO_nilobject_)); -else return o; -} -else if(idx>(-10000)){ -luai_apicheck(L,idx!=0&&-idx<=L->top-L->base); -return L->top+idx; -} -else switch(idx){ -case(-10000):return registry(L); -case(-10001):{ -Closure*func=curr_func(L); -sethvalue(L,&L->env,func->c.env); -return&L->env; -} -case(-10002):return gt(L); -default:{ -Closure*func=curr_func(L); -idx=(-10002)-idx; -return(idx<=func->c.nupvalues) -?&func->c.upvalue[idx-1] -:cast(TValue*,(&luaO_nilobject_)); -} -} -} -static Table*getcurrenv(lua_State*L){ -if(L->ci==L->base_ci) -return hvalue(gt(L)); -else{ -Closure*func=curr_func(L); -return func->c.env; -} -} -static int lua_checkstack(lua_State*L,int size){ -int res=1; -if(size>8000||(L->top-L->base+size)>8000) -res=0; -else if(size>0){ -luaD_checkstack(L,size); -if(L->ci->toptop+size) -L->ci->top=L->top+size; -} -return res; -} -static lua_CFunction lua_atpanic(lua_State*L,lua_CFunction panicf){ -lua_CFunction old; -old=G(L)->panic; -G(L)->panic=panicf; -return old; -} -static int lua_gettop(lua_State*L){ -return cast_int(L->top-L->base); -} -static void lua_settop(lua_State*L,int idx){ -if(idx>=0){ -luai_apicheck(L,idx<=L->stack_last-L->base); -while(L->topbase+idx) -setnilvalue(L->top++); -L->top=L->base+idx; -} -else{ -luai_apicheck(L,-(idx+1)<=(L->top-L->base)); -L->top+=idx+1; -} -} -static void lua_remove(lua_State*L,int idx){ -StkId p; -p=index2adr(L,idx); -api_checkvalidindex(L,p); -while(++ptop)setobj(L,p-1,p); -L->top--; -} -static void lua_insert(lua_State*L,int idx){ -StkId p; -StkId q; -p=index2adr(L,idx); -api_checkvalidindex(L,p); -for(q=L->top;q>p;q--)setobj(L,q,q-1); -setobj(L,p,L->top); -} -static void lua_replace(lua_State*L,int idx){ -StkId o; -if(idx==(-10001)&&L->ci==L->base_ci) -luaG_runerror(L,"no calling environment"); -api_checknelems(L,1); -o=index2adr(L,idx); -api_checkvalidindex(L,o); -if(idx==(-10001)){ -Closure*func=curr_func(L); -luai_apicheck(L,ttistable(L->top-1)); -func->c.env=hvalue(L->top-1); -luaC_barrier(L,func,L->top-1); -} -else{ -setobj(L,o,L->top-1); -if(idx<(-10002)) -luaC_barrier(L,curr_func(L),L->top-1); -} -L->top--; -} -static void lua_pushvalue(lua_State*L,int idx){ -setobj(L,L->top,index2adr(L,idx)); -api_incr_top(L); -} -static int lua_type(lua_State*L,int idx){ -StkId o=index2adr(L,idx); -return(o==(&luaO_nilobject_))?(-1):ttype(o); -} -static const char*lua_typename(lua_State*L,int t){ -UNUSED(L); -return(t==(-1))?"no value":luaT_typenames[t]; -} -static int lua_iscfunction(lua_State*L,int idx){ -StkId o=index2adr(L,idx); -return iscfunction(o); -} -static int lua_isnumber(lua_State*L,int idx){ -TValue n; -const TValue*o=index2adr(L,idx); -return tonumber(o,&n); -} -static int lua_isstring(lua_State*L,int idx){ -int t=lua_type(L,idx); -return(t==4||t==3); -} -static int lua_rawequal(lua_State*L,int index1,int index2){ -StkId o1=index2adr(L,index1); -StkId o2=index2adr(L,index2); -return(o1==(&luaO_nilobject_)||o2==(&luaO_nilobject_))?0 -:luaO_rawequalObj(o1,o2); -} -static int lua_lessthan(lua_State*L,int index1,int index2){ -StkId o1,o2; -int i; -o1=index2adr(L,index1); -o2=index2adr(L,index2); -i=(o1==(&luaO_nilobject_)||o2==(&luaO_nilobject_))?0 -:luaV_lessthan(L,o1,o2); -return i; -} -static lua_Number lua_tonumber(lua_State*L,int idx){ -TValue n; -const TValue*o=index2adr(L,idx); -if(tonumber(o,&n)) -return nvalue(o); -else -return 0; -} -static lua_Integer lua_tointeger(lua_State*L,int idx){ -TValue n; -const TValue*o=index2adr(L,idx); -if(tonumber(o,&n)){ -lua_Integer res; -lua_Number num=nvalue(o); -lua_number2integer(res,num); -return res; -} -else -return 0; -} -static int lua_toboolean(lua_State*L,int idx){ -const TValue*o=index2adr(L,idx); -return!l_isfalse(o); -} -static const char*lua_tolstring(lua_State*L,int idx,size_t*len){ -StkId o=index2adr(L,idx); -if(!ttisstring(o)){ -if(!luaV_tostring(L,o)){ -if(len!=NULL)*len=0; -return NULL; -} -luaC_checkGC(L); -o=index2adr(L,idx); -} -if(len!=NULL)*len=tsvalue(o)->len; -return svalue(o); -} -static size_t lua_objlen(lua_State*L,int idx){ -StkId o=index2adr(L,idx); -switch(ttype(o)){ -case 4:return tsvalue(o)->len; -case 7:return uvalue(o)->len; -case 5:return luaH_getn(hvalue(o)); -case 3:{ -size_t l; -l=(luaV_tostring(L,o)?tsvalue(o)->len:0); -return l; -} -default:return 0; -} -} -static lua_CFunction lua_tocfunction(lua_State*L,int idx){ -StkId o=index2adr(L,idx); -return(!iscfunction(o))?NULL:clvalue(o)->c.f; -} -static void*lua_touserdata(lua_State*L,int idx){ -StkId o=index2adr(L,idx); -switch(ttype(o)){ -case 7:return(rawuvalue(o)+1); -case 2:return pvalue(o); -default:return NULL; -} -} -static void lua_pushnil(lua_State*L){ -setnilvalue(L->top); -api_incr_top(L); -} -static void lua_pushnumber(lua_State*L,lua_Number n){ -setnvalue(L->top,n); -api_incr_top(L); -} -static void lua_pushinteger(lua_State*L,lua_Integer n){ -setnvalue(L->top,cast_num(n)); -api_incr_top(L); -} -static void lua_pushlstring(lua_State*L,const char*s,size_t len){ -luaC_checkGC(L); -setsvalue(L,L->top,luaS_newlstr(L,s,len)); -api_incr_top(L); -} -static void lua_pushstring(lua_State*L,const char*s){ -if(s==NULL) -lua_pushnil(L); -else -lua_pushlstring(L,s,strlen(s)); -} -static const char*lua_pushvfstring(lua_State*L,const char*fmt, -va_list argp){ -const char*ret; -luaC_checkGC(L); -ret=luaO_pushvfstring(L,fmt,argp); -return ret; -} -static const char*lua_pushfstring(lua_State*L,const char*fmt,...){ -const char*ret; -va_list argp; -luaC_checkGC(L); -va_start(argp,fmt); -ret=luaO_pushvfstring(L,fmt,argp); -va_end(argp); -return ret; -} -static void lua_pushcclosure(lua_State*L,lua_CFunction fn,int n){ -Closure*cl; -luaC_checkGC(L); -api_checknelems(L,n); -cl=luaF_newCclosure(L,n,getcurrenv(L)); -cl->c.f=fn; -L->top-=n; -while(n--) -setobj(L,&cl->c.upvalue[n],L->top+n); -setclvalue(L,L->top,cl); -api_incr_top(L); -} -static void lua_pushboolean(lua_State*L,int b){ -setbvalue(L->top,(b!=0)); -api_incr_top(L); -} -static int lua_pushthread(lua_State*L){ -setthvalue(L,L->top,L); -api_incr_top(L); -return(G(L)->mainthread==L); -} -static void lua_gettable(lua_State*L,int idx){ -StkId t; -t=index2adr(L,idx); -api_checkvalidindex(L,t); -luaV_gettable(L,t,L->top-1,L->top-1); -} -static void lua_getfield(lua_State*L,int idx,const char*k){ -StkId t; -TValue key; -t=index2adr(L,idx); -api_checkvalidindex(L,t); -setsvalue(L,&key,luaS_new(L,k)); -luaV_gettable(L,t,&key,L->top); -api_incr_top(L); -} -static void lua_rawget(lua_State*L,int idx){ -StkId t; -t=index2adr(L,idx); -luai_apicheck(L,ttistable(t)); -setobj(L,L->top-1,luaH_get(hvalue(t),L->top-1)); -} -static void lua_rawgeti(lua_State*L,int idx,int n){ -StkId o; -o=index2adr(L,idx); -luai_apicheck(L,ttistable(o)); -setobj(L,L->top,luaH_getnum(hvalue(o),n)); -api_incr_top(L); -} -static void lua_createtable(lua_State*L,int narray,int nrec){ -luaC_checkGC(L); -sethvalue(L,L->top,luaH_new(L,narray,nrec)); -api_incr_top(L); -} -static int lua_getmetatable(lua_State*L,int objindex){ -const TValue*obj; -Table*mt=NULL; -int res; -obj=index2adr(L,objindex); -switch(ttype(obj)){ -case 5: -mt=hvalue(obj)->metatable; -break; -case 7: -mt=uvalue(obj)->metatable; -break; -default: -mt=G(L)->mt[ttype(obj)]; -break; -} -if(mt==NULL) -res=0; -else{ -sethvalue(L,L->top,mt); -api_incr_top(L); -res=1; -} -return res; -} -static void lua_getfenv(lua_State*L,int idx){ -StkId o; -o=index2adr(L,idx); -api_checkvalidindex(L,o); -switch(ttype(o)){ -case 6: -sethvalue(L,L->top,clvalue(o)->c.env); -break; -case 7: -sethvalue(L,L->top,uvalue(o)->env); -break; -case 8: -setobj(L,L->top,gt(thvalue(o))); -break; -default: -setnilvalue(L->top); -break; -} -api_incr_top(L); -} -static void lua_settable(lua_State*L,int idx){ -StkId t; -api_checknelems(L,2); -t=index2adr(L,idx); -api_checkvalidindex(L,t); -luaV_settable(L,t,L->top-2,L->top-1); -L->top-=2; -} -static void lua_setfield(lua_State*L,int idx,const char*k){ -StkId t; -TValue key; -api_checknelems(L,1); -t=index2adr(L,idx); -api_checkvalidindex(L,t); -setsvalue(L,&key,luaS_new(L,k)); -luaV_settable(L,t,&key,L->top-1); -L->top--; -} -static void lua_rawset(lua_State*L,int idx){ -StkId t; -api_checknelems(L,2); -t=index2adr(L,idx); -luai_apicheck(L,ttistable(t)); -setobj(L,luaH_set(L,hvalue(t),L->top-2),L->top-1); -luaC_barriert(L,hvalue(t),L->top-1); -L->top-=2; -} -static void lua_rawseti(lua_State*L,int idx,int n){ -StkId o; -api_checknelems(L,1); -o=index2adr(L,idx); -luai_apicheck(L,ttistable(o)); -setobj(L,luaH_setnum(L,hvalue(o),n),L->top-1); -luaC_barriert(L,hvalue(o),L->top-1); -L->top--; -} -static int lua_setmetatable(lua_State*L,int objindex){ -TValue*obj; -Table*mt; -api_checknelems(L,1); -obj=index2adr(L,objindex); -api_checkvalidindex(L,obj); -if(ttisnil(L->top-1)) -mt=NULL; -else{ -luai_apicheck(L,ttistable(L->top-1)); -mt=hvalue(L->top-1); -} -switch(ttype(obj)){ -case 5:{ -hvalue(obj)->metatable=mt; -if(mt) -luaC_objbarriert(L,hvalue(obj),mt); -break; -} -case 7:{ -uvalue(obj)->metatable=mt; -if(mt) -luaC_objbarrier(L,rawuvalue(obj),mt); -break; -} -default:{ -G(L)->mt[ttype(obj)]=mt; -break; -} -} -L->top--; -return 1; -} -static int lua_setfenv(lua_State*L,int idx){ -StkId o; -int res=1; -api_checknelems(L,1); -o=index2adr(L,idx); -api_checkvalidindex(L,o); -luai_apicheck(L,ttistable(L->top-1)); -switch(ttype(o)){ -case 6: -clvalue(o)->c.env=hvalue(L->top-1); -break; -case 7: -uvalue(o)->env=hvalue(L->top-1); -break; -case 8: -sethvalue(L,gt(thvalue(o)),hvalue(L->top-1)); -break; -default: -res=0; -break; -} -if(res)luaC_objbarrier(L,gcvalue(o),hvalue(L->top-1)); -L->top--; -return res; -} -#define adjustresults(L,nres){if(nres==(-1)&&L->top>=L->ci->top)L->ci->top=L->top;} -#define checkresults(L,na,nr)luai_apicheck(L,(nr)==(-1)||(L->ci->top-L->top>=(nr)-(na))) -static void lua_call(lua_State*L,int nargs,int nresults){ -StkId func; -api_checknelems(L,nargs+1); -checkresults(L,nargs,nresults); -func=L->top-(nargs+1); -luaD_call(L,func,nresults); -adjustresults(L,nresults); -} -struct CallS{ -StkId func; -int nresults; -}; -static void f_call(lua_State*L,void*ud){ -struct CallS*c=cast(struct CallS*,ud); -luaD_call(L,c->func,c->nresults); -} -static int lua_pcall(lua_State*L,int nargs,int nresults,int errfunc){ -struct CallS c; -int status; -ptrdiff_t func; -api_checknelems(L,nargs+1); -checkresults(L,nargs,nresults); -if(errfunc==0) -func=0; -else{ -StkId o=index2adr(L,errfunc); -api_checkvalidindex(L,o); -func=savestack(L,o); -} -c.func=L->top-(nargs+1); -c.nresults=nresults; -status=luaD_pcall(L,f_call,&c,savestack(L,c.func),func); -adjustresults(L,nresults); -return status; -} -static int lua_load(lua_State*L,lua_Reader reader,void*data, -const char*chunkname){ -ZIO z; -int status; -if(!chunkname)chunkname="?"; -luaZ_init(L,&z,reader,data); -status=luaD_protectedparser(L,&z,chunkname); -return status; -} -static int lua_error(lua_State*L){ -api_checknelems(L,1); -luaG_errormsg(L); -return 0; -} -static int lua_next(lua_State*L,int idx){ -StkId t; -int more; -t=index2adr(L,idx); -luai_apicheck(L,ttistable(t)); -more=luaH_next(L,hvalue(t),L->top-1); -if(more){ -api_incr_top(L); -} -else -L->top-=1; -return more; -} -static void lua_concat(lua_State*L,int n){ -api_checknelems(L,n); -if(n>=2){ -luaC_checkGC(L); -luaV_concat(L,n,cast_int(L->top-L->base)-1); -L->top-=(n-1); -} -else if(n==0){ -setsvalue(L,L->top,luaS_newlstr(L,"",0)); -api_incr_top(L); -} -} -static void*lua_newuserdata(lua_State*L,size_t size){ -Udata*u; -luaC_checkGC(L); -u=luaS_newudata(L,size,getcurrenv(L)); -setuvalue(L,L->top,u); -api_incr_top(L); -return u+1; -} -#define luaL_getn(L,i)((int)lua_objlen(L,i)) -#define luaL_setn(L,i,j)((void)0) -typedef struct luaL_Reg{ -const char*name; -lua_CFunction func; -}luaL_Reg; -static void luaI_openlib(lua_State*L,const char*libname, -const luaL_Reg*l,int nup); -static int luaL_argerror(lua_State*L,int numarg,const char*extramsg); -static const char* luaL_checklstring(lua_State*L,int numArg, -size_t*l); -static const char* luaL_optlstring(lua_State*L,int numArg, -const char*def,size_t*l); -static lua_Integer luaL_checkinteger(lua_State*L,int numArg); -static lua_Integer luaL_optinteger(lua_State*L,int nArg, -lua_Integer def); -static int luaL_error(lua_State*L,const char*fmt,...); -static const char* luaL_findtable(lua_State*L,int idx, -const char*fname,int szhint); -#define luaL_argcheck(L,cond,numarg,extramsg)((void)((cond)||luaL_argerror(L,(numarg),(extramsg)))) -#define luaL_checkstring(L,n)(luaL_checklstring(L,(n),NULL)) -#define luaL_optstring(L,n,d)(luaL_optlstring(L,(n),(d),NULL)) -#define luaL_checkint(L,n)((int)luaL_checkinteger(L,(n))) -#define luaL_optint(L,n,d)((int)luaL_optinteger(L,(n),(d))) -#define luaL_typename(L,i)lua_typename(L,lua_type(L,(i))) -#define luaL_getmetatable(L,n)(lua_getfield(L,(-10000),(n))) -#define luaL_opt(L,f,n,d)(lua_isnoneornil(L,(n))?(d):f(L,(n))) -typedef struct luaL_Buffer{ -char*p; -int lvl; -lua_State*L; -char buffer[BUFSIZ]; -}luaL_Buffer; -#define luaL_addchar(B,c)((void)((B)->p<((B)->buffer+BUFSIZ)||luaL_prepbuffer(B)),(*(B)->p++=(char)(c))) -#define luaL_addsize(B,n)((B)->p+=(n)) -static char* luaL_prepbuffer(luaL_Buffer*B); -static int luaL_argerror(lua_State*L,int narg,const char*extramsg){ -lua_Debug ar; -if(!lua_getstack(L,0,&ar)) -return luaL_error(L,"bad argument #%d (%s)",narg,extramsg); -lua_getinfo(L,"n",&ar); -if(strcmp(ar.namewhat,"method")==0){ -narg--; -if(narg==0) -return luaL_error(L,"calling "LUA_QL("%s")" on bad self (%s)", -ar.name,extramsg); -} -if(ar.name==NULL) -ar.name="?"; -return luaL_error(L,"bad argument #%d to "LUA_QL("%s")" (%s)", -narg,ar.name,extramsg); -} -static int luaL_typerror(lua_State*L,int narg,const char*tname){ -const char*msg=lua_pushfstring(L,"%s expected, got %s", -tname,luaL_typename(L,narg)); -return luaL_argerror(L,narg,msg); -} -static void tag_error(lua_State*L,int narg,int tag){ -luaL_typerror(L,narg,lua_typename(L,tag)); -} -static void luaL_where(lua_State*L,int level){ -lua_Debug ar; -if(lua_getstack(L,level,&ar)){ -lua_getinfo(L,"Sl",&ar); -if(ar.currentline>0){ -lua_pushfstring(L,"%s:%d: ",ar.short_src,ar.currentline); -return; -} -} -lua_pushliteral(L,""); -} -static int luaL_error(lua_State*L,const char*fmt,...){ -va_list argp; -va_start(argp,fmt); -luaL_where(L,1); -lua_pushvfstring(L,fmt,argp); -va_end(argp); -lua_concat(L,2); -return lua_error(L); -} -static int luaL_newmetatable(lua_State*L,const char*tname){ -lua_getfield(L,(-10000),tname); -if(!lua_isnil(L,-1)) -return 0; -lua_pop(L,1); -lua_newtable(L); -lua_pushvalue(L,-1); -lua_setfield(L,(-10000),tname); -return 1; -} -static void*luaL_checkudata(lua_State*L,int ud,const char*tname){ -void*p=lua_touserdata(L,ud); -if(p!=NULL){ -if(lua_getmetatable(L,ud)){ -lua_getfield(L,(-10000),tname); -if(lua_rawequal(L,-1,-2)){ -lua_pop(L,2); -return p; -} -} -} -luaL_typerror(L,ud,tname); -return NULL; -} -static void luaL_checkstack(lua_State*L,int space,const char*mes){ -if(!lua_checkstack(L,space)) -luaL_error(L,"stack overflow (%s)",mes); -} -static void luaL_checktype(lua_State*L,int narg,int t){ -if(lua_type(L,narg)!=t) -tag_error(L,narg,t); -} -static void luaL_checkany(lua_State*L,int narg){ -if(lua_type(L,narg)==(-1)) -luaL_argerror(L,narg,"value expected"); -} -static const char*luaL_checklstring(lua_State*L,int narg,size_t*len){ -const char*s=lua_tolstring(L,narg,len); -if(!s)tag_error(L,narg,4); -return s; -} -static const char*luaL_optlstring(lua_State*L,int narg, -const char*def,size_t*len){ -if(lua_isnoneornil(L,narg)){ -if(len) -*len=(def?strlen(def):0); -return def; -} -else return luaL_checklstring(L,narg,len); -} -static lua_Number luaL_checknumber(lua_State*L,int narg){ -lua_Number d=lua_tonumber(L,narg); -if(d==0&&!lua_isnumber(L,narg)) -tag_error(L,narg,3); -return d; -} -static lua_Integer luaL_checkinteger(lua_State*L,int narg){ -lua_Integer d=lua_tointeger(L,narg); -if(d==0&&!lua_isnumber(L,narg)) -tag_error(L,narg,3); -return d; -} -static lua_Integer luaL_optinteger(lua_State*L,int narg, -lua_Integer def){ -return luaL_opt(L,luaL_checkinteger,narg,def); -} -static int luaL_getmetafield(lua_State*L,int obj,const char*event){ -if(!lua_getmetatable(L,obj)) -return 0; -lua_pushstring(L,event); -lua_rawget(L,-2); -if(lua_isnil(L,-1)){ -lua_pop(L,2); -return 0; -} -else{ -lua_remove(L,-2); -return 1; -} -} -static void luaL_register(lua_State*L,const char*libname, -const luaL_Reg*l){ -luaI_openlib(L,libname,l,0); -} -static int libsize(const luaL_Reg*l){ -int size=0; -for(;l->name;l++)size++; -return size; -} -static void luaI_openlib(lua_State*L,const char*libname, -const luaL_Reg*l,int nup){ -if(libname){ -int size=libsize(l); -luaL_findtable(L,(-10000),"_LOADED",1); -lua_getfield(L,-1,libname); -if(!lua_istable(L,-1)){ -lua_pop(L,1); -if(luaL_findtable(L,(-10002),libname,size)!=NULL) -luaL_error(L,"name conflict for module "LUA_QL("%s"),libname); -lua_pushvalue(L,-1); -lua_setfield(L,-3,libname); -} -lua_remove(L,-2); -lua_insert(L,-(nup+1)); -} -for(;l->name;l++){ -int i; -for(i=0;ifunc,nup); -lua_setfield(L,-(nup+2),l->name); -} -lua_pop(L,nup); -} -static const char*luaL_findtable(lua_State*L,int idx, -const char*fname,int szhint){ -const char*e; -lua_pushvalue(L,idx); -do{ -e=strchr(fname,'.'); -if(e==NULL)e=fname+strlen(fname); -lua_pushlstring(L,fname,e-fname); -lua_rawget(L,-2); -if(lua_isnil(L,-1)){ -lua_pop(L,1); -lua_createtable(L,0,(*e=='.'?1:szhint)); -lua_pushlstring(L,fname,e-fname); -lua_pushvalue(L,-2); -lua_settable(L,-4); -} -else if(!lua_istable(L,-1)){ -lua_pop(L,2); -return fname; -} -lua_remove(L,-2); -fname=e+1; -}while(*e=='.'); -return NULL; -} -#define bufflen(B)((B)->p-(B)->buffer) -#define bufffree(B)((size_t)(BUFSIZ-bufflen(B))) -static int emptybuffer(luaL_Buffer*B){ -size_t l=bufflen(B); -if(l==0)return 0; -else{ -lua_pushlstring(B->L,B->buffer,l); -B->p=B->buffer; -B->lvl++; -return 1; -} -} -static void adjuststack(luaL_Buffer*B){ -if(B->lvl>1){ -lua_State*L=B->L; -int toget=1; -size_t toplen=lua_strlen(L,-1); -do{ -size_t l=lua_strlen(L,-(toget+1)); -if(B->lvl-toget+1>=(20/2)||toplen>l){ -toplen+=l; -toget++; -} -else break; -}while(togetlvl); -lua_concat(L,toget); -B->lvl=B->lvl-toget+1; -} -} -static char*luaL_prepbuffer(luaL_Buffer*B){ -if(emptybuffer(B)) -adjuststack(B); -return B->buffer; -} -static void luaL_addlstring(luaL_Buffer*B,const char*s,size_t l){ -while(l--) -luaL_addchar(B,*s++); -} -static void luaL_pushresult(luaL_Buffer*B){ -emptybuffer(B); -lua_concat(B->L,B->lvl); -B->lvl=1; -} -static void luaL_addvalue(luaL_Buffer*B){ -lua_State*L=B->L; -size_t vl; -const char*s=lua_tolstring(L,-1,&vl); -if(vl<=bufffree(B)){ -memcpy(B->p,s,vl); -B->p+=vl; -lua_pop(L,1); -} -else{ -if(emptybuffer(B)) -lua_insert(L,-2); -B->lvl++; -adjuststack(B); -} -} -static void luaL_buffinit(lua_State*L,luaL_Buffer*B){ -B->L=L; -B->p=B->buffer; -B->lvl=0; -} -typedef struct LoadF{ -int extraline; -FILE*f; -char buff[BUFSIZ]; -}LoadF; -static const char*getF(lua_State*L,void*ud,size_t*size){ -LoadF*lf=(LoadF*)ud; -(void)L; -if(lf->extraline){ -lf->extraline=0; -*size=1; -return"\n"; -} -if(feof(lf->f))return NULL; -*size=fread(lf->buff,1,sizeof(lf->buff),lf->f); -return(*size>0)?lf->buff:NULL; -} -static int errfile(lua_State*L,const char*what,int fnameindex){ -const char*serr=strerror(errno); -const char*filename=lua_tostring(L,fnameindex)+1; -lua_pushfstring(L,"cannot %s %s: %s",what,filename,serr); -lua_remove(L,fnameindex); -return(5+1); -} -static int luaL_loadfile(lua_State*L,const char*filename){ -LoadF lf; -int status,readstatus; -int c; -int fnameindex=lua_gettop(L)+1; -lf.extraline=0; -if(filename==NULL){ -lua_pushliteral(L,"=stdin"); -lf.f=stdin; -} -else{ -lua_pushfstring(L,"@%s",filename); -lf.f=fopen(filename,"r"); -if(lf.f==NULL)return errfile(L,"open",fnameindex); -} -c=getc(lf.f); -if(c=='#'){ -lf.extraline=1; -while((c=getc(lf.f))!=EOF&&c!='\n'); -if(c=='\n')c=getc(lf.f); -} -if(c=="\033Lua"[0]&&filename){ -lf.f=freopen(filename,"rb",lf.f); -if(lf.f==NULL)return errfile(L,"reopen",fnameindex); -while((c=getc(lf.f))!=EOF&&c!="\033Lua"[0]); -lf.extraline=0; -} -ungetc(c,lf.f); -status=lua_load(L,getF,&lf,lua_tostring(L,-1)); -readstatus=ferror(lf.f); -if(filename)fclose(lf.f); -if(readstatus){ -lua_settop(L,fnameindex); -return errfile(L,"read",fnameindex); -} -lua_remove(L,fnameindex); -return status; -} -typedef struct LoadS{ -const char*s; -size_t size; -}LoadS; -static const char*getS(lua_State*L,void*ud,size_t*size){ -LoadS*ls=(LoadS*)ud; -(void)L; -if(ls->size==0)return NULL; -*size=ls->size; -ls->size=0; -return ls->s; -} -static int luaL_loadbuffer(lua_State*L,const char*buff,size_t size, -const char*name){ -LoadS ls; -ls.s=buff; -ls.size=size; -return lua_load(L,getS,&ls,name); -} -static void*l_alloc(void*ud,void*ptr,size_t osize,size_t nsize){ -(void)ud; -(void)osize; -if(nsize==0){ -free(ptr); -return NULL; -} -else -return realloc(ptr,nsize); -} -static int panic(lua_State*L){ -(void)L; -fprintf(stderr,"PANIC: unprotected error in call to Lua API (%s)\n", -lua_tostring(L,-1)); -return 0; -} -static lua_State*luaL_newstate(void){ -lua_State*L=lua_newstate(l_alloc,NULL); -if(L)lua_atpanic(L,&panic); -return L; -} -static int luaB_tonumber(lua_State*L){ -int base=luaL_optint(L,2,10); -if(base==10){ -luaL_checkany(L,1); -if(lua_isnumber(L,1)){ -lua_pushnumber(L,lua_tonumber(L,1)); -return 1; -} -} -else{ -const char*s1=luaL_checkstring(L,1); -char*s2; -unsigned long n; -luaL_argcheck(L,2<=base&&base<=36,2,"base out of range"); -n=strtoul(s1,&s2,base); -if(s1!=s2){ -while(isspace((unsigned char)(*s2)))s2++; -if(*s2=='\0'){ -lua_pushnumber(L,(lua_Number)n); -return 1; -} -} -} -lua_pushnil(L); -return 1; -} -static int luaB_error(lua_State*L){ -int level=luaL_optint(L,2,1); -lua_settop(L,1); -if(lua_isstring(L,1)&&level>0){ -luaL_where(L,level); -lua_pushvalue(L,1); -lua_concat(L,2); -} -return lua_error(L); -} -static int luaB_setmetatable(lua_State*L){ -int t=lua_type(L,2); -luaL_checktype(L,1,5); -luaL_argcheck(L,t==0||t==5,2, -"nil or table expected"); -if(luaL_getmetafield(L,1,"__metatable")) -luaL_error(L,"cannot change a protected metatable"); -lua_settop(L,2); -lua_setmetatable(L,1); -return 1; -} -static void getfunc(lua_State*L,int opt){ -if(lua_isfunction(L,1))lua_pushvalue(L,1); -else{ -lua_Debug ar; -int level=opt?luaL_optint(L,1,1):luaL_checkint(L,1); -luaL_argcheck(L,level>=0,1,"level must be non-negative"); -if(lua_getstack(L,level,&ar)==0) -luaL_argerror(L,1,"invalid level"); -lua_getinfo(L,"f",&ar); -if(lua_isnil(L,-1)) -luaL_error(L,"no function environment for tail call at level %d", -level); -} -} -static int luaB_setfenv(lua_State*L){ -luaL_checktype(L,2,5); -getfunc(L,0); -lua_pushvalue(L,2); -if(lua_isnumber(L,1)&&lua_tonumber(L,1)==0){ -lua_pushthread(L); -lua_insert(L,-2); -lua_setfenv(L,-2); -return 0; -} -else if(lua_iscfunction(L,-2)||lua_setfenv(L,-2)==0) -luaL_error(L, -LUA_QL("setfenv")" cannot change environment of given object"); -return 1; -} -static int luaB_rawget(lua_State*L){ -luaL_checktype(L,1,5); -luaL_checkany(L,2); -lua_settop(L,2); -lua_rawget(L,1); -return 1; -} -static int luaB_type(lua_State*L){ -luaL_checkany(L,1); -lua_pushstring(L,luaL_typename(L,1)); -return 1; -} -static int luaB_next(lua_State*L){ -luaL_checktype(L,1,5); -lua_settop(L,2); -if(lua_next(L,1)) -return 2; -else{ -lua_pushnil(L); -return 1; -} -} -static int luaB_pairs(lua_State*L){ -luaL_checktype(L,1,5); -lua_pushvalue(L,lua_upvalueindex(1)); -lua_pushvalue(L,1); -lua_pushnil(L); -return 3; -} -static int ipairsaux(lua_State*L){ -int i=luaL_checkint(L,2); -luaL_checktype(L,1,5); -i++; -lua_pushinteger(L,i); -lua_rawgeti(L,1,i); -return(lua_isnil(L,-1))?0:2; -} -static int luaB_ipairs(lua_State*L){ -luaL_checktype(L,1,5); -lua_pushvalue(L,lua_upvalueindex(1)); -lua_pushvalue(L,1); -lua_pushinteger(L,0); -return 3; -} -static int load_aux(lua_State*L,int status){ -if(status==0) -return 1; -else{ -lua_pushnil(L); -lua_insert(L,-2); -return 2; -} -} -static int luaB_loadstring(lua_State*L){ -size_t l; -const char*s=luaL_checklstring(L,1,&l); -const char*chunkname=luaL_optstring(L,2,s); -return load_aux(L,luaL_loadbuffer(L,s,l,chunkname)); -} -static int luaB_loadfile(lua_State*L){ -const char*fname=luaL_optstring(L,1,NULL); -return load_aux(L,luaL_loadfile(L,fname)); -} -static int luaB_assert(lua_State*L){ -luaL_checkany(L,1); -if(!lua_toboolean(L,1)) -return luaL_error(L,"%s",luaL_optstring(L,2,"assertion failed!")); -return lua_gettop(L); -} -static int luaB_unpack(lua_State*L){ -int i,e,n; -luaL_checktype(L,1,5); -i=luaL_optint(L,2,1); -e=luaL_opt(L,luaL_checkint,3,luaL_getn(L,1)); -if(i>e)return 0; -n=e-i+1; -if(n<=0||!lua_checkstack(L,n)) -return luaL_error(L,"too many results to unpack"); -lua_rawgeti(L,1,i); -while(i++e)e=pos; -for(i=e;i>pos;i--){ -lua_rawgeti(L,1,i-1); -lua_rawseti(L,1,i); -} -break; -} -default:{ -return luaL_error(L,"wrong number of arguments to "LUA_QL("insert")); -} -} -luaL_setn(L,1,e); -lua_rawseti(L,1,pos); -return 0; -} -static int tremove(lua_State*L){ -int e=aux_getn(L,1); -int pos=luaL_optint(L,2,e); -if(!(1<=pos&&pos<=e)) -return 0; -luaL_setn(L,1,e-1); -lua_rawgeti(L,1,pos); -for(;posu)luaL_error(L,"invalid order function for sorting"); -lua_pop(L,1); -} -while(lua_rawgeti(L,1,--j),sort_comp(L,-3,-1)){ -if(j0); -} -l=strlen(p); -if(l==0||p[l-1]!='\n') -luaL_addsize(&b,l); -else{ -luaL_addsize(&b,l-1); -luaL_pushresult(&b); -return 1; -} -} -} -static int read_chars(lua_State*L,FILE*f,size_t n){ -size_t rlen; -size_t nr; -luaL_Buffer b; -luaL_buffinit(L,&b); -rlen=BUFSIZ; -do{ -char*p=luaL_prepbuffer(&b); -if(rlen>n)rlen=n; -nr=fread(p,sizeof(char),rlen,f); -luaL_addsize(&b,nr); -n-=nr; -}while(n>0&&nr==rlen); -luaL_pushresult(&b); -return(n==0||lua_objlen(L,-1)>0); -} -static int g_read(lua_State*L,FILE*f,int first){ -int nargs=lua_gettop(L)-1; -int success; -int n; -clearerr(f); -if(nargs==0){ -success=read_line(L,f); -n=first+1; -} -else{ -luaL_checkstack(L,nargs+20,"too many arguments"); -success=1; -for(n=first;nargs--&&success;n++){ -if(lua_type(L,n)==3){ -size_t l=(size_t)lua_tointeger(L,n); -success=(l==0)?test_eof(L,f):read_chars(L,f,l); -} -else{ -const char*p=lua_tostring(L,n); -luaL_argcheck(L,p&&p[0]=='*',n,"invalid option"); -switch(p[1]){ -case'n': -success=read_number(L,f); -break; -case'l': -success=read_line(L,f); -break; -case'a': -read_chars(L,f,~((size_t)0)); -success=1; -break; -default: -return luaL_argerror(L,n,"invalid format"); -} -} -} -} -if(ferror(f)) -return pushresult(L,0,NULL); -if(!success){ -lua_pop(L,1); -lua_pushnil(L); -} -return n-first; -} -static int io_read(lua_State*L){ -return g_read(L,getiofile(L,1),1); -} -static int f_read(lua_State*L){ -return g_read(L,tofile(L),2); -} -static int io_readline(lua_State*L){ -FILE*f=*(FILE**)lua_touserdata(L,lua_upvalueindex(1)); -int sucess; -if(f==NULL) -luaL_error(L,"file is already closed"); -sucess=read_line(L,f); -if(ferror(f)) -return luaL_error(L,"%s",strerror(errno)); -if(sucess)return 1; -else{ -if(lua_toboolean(L,lua_upvalueindex(2))){ -lua_settop(L,0); -lua_pushvalue(L,lua_upvalueindex(1)); -aux_close(L); -} -return 0; -} -} -static int g_write(lua_State*L,FILE*f,int arg){ -int nargs=lua_gettop(L)-1; -int status=1; -for(;nargs--;arg++){ -if(lua_type(L,arg)==3){ -status=status&& -fprintf(f,"%.14g",lua_tonumber(L,arg))>0; -} -else{ -size_t l; -const char*s=luaL_checklstring(L,arg,&l); -status=status&&(fwrite(s,sizeof(char),l,f)==l); -} -} -return pushresult(L,status,NULL); -} -static int io_write(lua_State*L){ -return g_write(L,getiofile(L,2),1); -} -static int f_write(lua_State*L){ -return g_write(L,tofile(L),2); -} -static int io_flush(lua_State*L){ -return pushresult(L,fflush(getiofile(L,2))==0,NULL); -} -static int f_flush(lua_State*L){ -return pushresult(L,fflush(tofile(L))==0,NULL); -} -static const luaL_Reg iolib[]={ -{"close",io_close}, -{"flush",io_flush}, -{"input",io_input}, -{"lines",io_lines}, -{"open",io_open}, -{"output",io_output}, -{"read",io_read}, -{"type",io_type}, -{"write",io_write}, -{NULL,NULL} -}; -static const luaL_Reg flib[]={ -{"close",io_close}, -{"flush",f_flush}, -{"lines",f_lines}, -{"read",f_read}, -{"write",f_write}, -{"__gc",io_gc}, -{NULL,NULL} -}; -static void createmeta(lua_State*L){ -luaL_newmetatable(L,"FILE*"); -lua_pushvalue(L,-1); -lua_setfield(L,-2,"__index"); -luaL_register(L,NULL,flib); -} -static void createstdfile(lua_State*L,FILE*f,int k,const char*fname){ -*newfile(L)=f; -if(k>0){ -lua_pushvalue(L,-1); -lua_rawseti(L,(-10001),k); -} -lua_pushvalue(L,-2); -lua_setfenv(L,-2); -lua_setfield(L,-3,fname); -} -static void newfenv(lua_State*L,lua_CFunction cls){ -lua_createtable(L,0,1); -lua_pushcfunction(L,cls); -lua_setfield(L,-2,"__close"); -} -static int luaopen_io(lua_State*L){ -createmeta(L); -newfenv(L,io_fclose); -lua_replace(L,(-10001)); -luaL_register(L,"io",iolib); -newfenv(L,io_noclose); -createstdfile(L,stdin,1,"stdin"); -createstdfile(L,stdout,2,"stdout"); -createstdfile(L,stderr,0,"stderr"); -lua_pop(L,1); -lua_getfield(L,-1,"popen"); -newfenv(L,io_pclose); -lua_setfenv(L,-2); -lua_pop(L,1); -return 1; -} -static int os_pushresult(lua_State*L,int i,const char*filename){ -int en=errno; -if(i){ -lua_pushboolean(L,1); -return 1; -} -else{ -lua_pushnil(L); -lua_pushfstring(L,"%s: %s",filename,strerror(en)); -lua_pushinteger(L,en); -return 3; -} -} -static int os_remove(lua_State*L){ -const char*filename=luaL_checkstring(L,1); -return os_pushresult(L,remove(filename)==0,filename); -} -static int os_exit(lua_State*L){ -exit(luaL_optint(L,1,EXIT_SUCCESS)); -} -static const luaL_Reg syslib[]={ -{"exit",os_exit}, -{"remove",os_remove}, -{NULL,NULL} -}; -static int luaopen_os(lua_State*L){ -luaL_register(L,"os",syslib); -return 1; -} -#define uchar(c)((unsigned char)(c)) -static ptrdiff_t posrelat(ptrdiff_t pos,size_t len){ -if(pos<0)pos+=(ptrdiff_t)len+1; -return(pos>=0)?pos:0; -} -static int str_sub(lua_State*L){ -size_t l; -const char*s=luaL_checklstring(L,1,&l); -ptrdiff_t start=posrelat(luaL_checkinteger(L,2),l); -ptrdiff_t end=posrelat(luaL_optinteger(L,3,-1),l); -if(start<1)start=1; -if(end>(ptrdiff_t)l)end=(ptrdiff_t)l; -if(start<=end) -lua_pushlstring(L,s+start-1,end-start+1); -else lua_pushliteral(L,""); -return 1; -} -static int str_lower(lua_State*L){ -size_t l; -size_t i; -luaL_Buffer b; -const char*s=luaL_checklstring(L,1,&l); -luaL_buffinit(L,&b); -for(i=0;i0) -luaL_addlstring(&b,s,l); -luaL_pushresult(&b); -return 1; -} -static int str_byte(lua_State*L){ -size_t l; -const char*s=luaL_checklstring(L,1,&l); -ptrdiff_t posi=posrelat(luaL_optinteger(L,2,1),l); -ptrdiff_t pose=posrelat(luaL_optinteger(L,3,posi),l); -int n,i; -if(posi<=0)posi=1; -if((size_t)pose>l)pose=l; -if(posi>pose)return 0; -n=(int)(pose-posi+1); -if(posi+n<=pose) -luaL_error(L,"string slice too long"); -luaL_checkstack(L,n,"string slice too long"); -for(i=0;i=ms->level||ms->capture[l].len==(-1)) -return luaL_error(ms->L,"invalid capture index"); -return l; -} -static int capture_to_close(MatchState*ms){ -int level=ms->level; -for(level--;level>=0;level--) -if(ms->capture[level].len==(-1))return level; -return luaL_error(ms->L,"invalid pattern capture"); -} -static const char*classend(MatchState*ms,const char*p){ -switch(*p++){ -case'%':{ -if(*p=='\0') -luaL_error(ms->L,"malformed pattern (ends with "LUA_QL("%%")")"); -return p+1; -} -case'[':{ -if(*p=='^')p++; -do{ -if(*p=='\0') -luaL_error(ms->L,"malformed pattern (missing "LUA_QL("]")")"); -if(*(p++)=='%'&&*p!='\0') -p++; -}while(*p!=']'); -return p+1; -} -default:{ -return p; -} -} -} -static int match_class(int c,int cl){ -int res; -switch(tolower(cl)){ -case'a':res=isalpha(c);break; -case'c':res=iscntrl(c);break; -case'd':res=isdigit(c);break; -case'l':res=islower(c);break; -case'p':res=ispunct(c);break; -case's':res=isspace(c);break; -case'u':res=isupper(c);break; -case'w':res=isalnum(c);break; -case'x':res=isxdigit(c);break; -case'z':res=(c==0);break; -default:return(cl==c); -} -return(islower(cl)?res:!res); -} -static int matchbracketclass(int c,const char*p,const char*ec){ -int sig=1; -if(*(p+1)=='^'){ -sig=0; -p++; -} -while(++pL,"unbalanced pattern"); -if(*s!=*p)return NULL; -else{ -int b=*p; -int e=*(p+1); -int cont=1; -while(++ssrc_end){ -if(*s==e){ -if(--cont==0)return s+1; -} -else if(*s==b)cont++; -} -} -return NULL; -} -static const char*max_expand(MatchState*ms,const char*s, -const char*p,const char*ep){ -ptrdiff_t i=0; -while((s+i)src_end&&singlematch(uchar(*(s+i)),p,ep)) -i++; -while(i>=0){ -const char*res=match(ms,(s+i),ep+1); -if(res)return res; -i--; -} -return NULL; -} -static const char*min_expand(MatchState*ms,const char*s, -const char*p,const char*ep){ -for(;;){ -const char*res=match(ms,s,ep+1); -if(res!=NULL) -return res; -else if(ssrc_end&&singlematch(uchar(*s),p,ep)) -s++; -else return NULL; -} -} -static const char*start_capture(MatchState*ms,const char*s, -const char*p,int what){ -const char*res; -int level=ms->level; -if(level>=32)luaL_error(ms->L,"too many captures"); -ms->capture[level].init=s; -ms->capture[level].len=what; -ms->level=level+1; -if((res=match(ms,s,p))==NULL) -ms->level--; -return res; -} -static const char*end_capture(MatchState*ms,const char*s, -const char*p){ -int l=capture_to_close(ms); -const char*res; -ms->capture[l].len=s-ms->capture[l].init; -if((res=match(ms,s,p))==NULL) -ms->capture[l].len=(-1); -return res; -} -static const char*match_capture(MatchState*ms,const char*s,int l){ -size_t len; -l=check_capture(ms,l); -len=ms->capture[l].len; -if((size_t)(ms->src_end-s)>=len&& -memcmp(ms->capture[l].init,s,len)==0) -return s+len; -else return NULL; -} -static const char*match(MatchState*ms,const char*s,const char*p){ -init: -switch(*p){ -case'(':{ -if(*(p+1)==')') -return start_capture(ms,s,p+2,(-2)); -else -return start_capture(ms,s,p+1,(-1)); -} -case')':{ -return end_capture(ms,s,p+1); -} -case'%':{ -switch(*(p+1)){ -case'b':{ -s=matchbalance(ms,s,p+2); -if(s==NULL)return NULL; -p+=4;goto init; -} -case'f':{ -const char*ep;char previous; -p+=2; -if(*p!='[') -luaL_error(ms->L,"missing "LUA_QL("[")" after " -LUA_QL("%%f")" in pattern"); -ep=classend(ms,p); -previous=(s==ms->src_init)?'\0':*(s-1); -if(matchbracketclass(uchar(previous),p,ep-1)|| -!matchbracketclass(uchar(*s),p,ep-1))return NULL; -p=ep;goto init; -} -default:{ -if(isdigit(uchar(*(p+1)))){ -s=match_capture(ms,s,uchar(*(p+1))); -if(s==NULL)return NULL; -p+=2;goto init; -} -goto dflt; -} -} -} -case'\0':{ -return s; -} -case'$':{ -if(*(p+1)=='\0') -return(s==ms->src_end)?s:NULL; -else goto dflt; -} -default:dflt:{ -const char*ep=classend(ms,p); -int m=ssrc_end&&singlematch(uchar(*s),p,ep); -switch(*ep){ -case'?':{ -const char*res; -if(m&&((res=match(ms,s+1,ep+1))!=NULL)) -return res; -p=ep+1;goto init; -} -case'*':{ -return max_expand(ms,s,p,ep); -} -case'+':{ -return(m?max_expand(ms,s+1,p,ep):NULL); -} -case'-':{ -return min_expand(ms,s,p,ep); -} -default:{ -if(!m)return NULL; -s++;p=ep;goto init; -} -} -} -} -} -static const char*lmemfind(const char*s1,size_t l1, -const char*s2,size_t l2){ -if(l2==0)return s1; -else if(l2>l1)return NULL; -else{ -const char*init; -l2--; -l1=l1-l2; -while(l1>0&&(init=(const char*)memchr(s1,*s2,l1))!=NULL){ -init++; -if(memcmp(init,s2+1,l2)==0) -return init-1; -else{ -l1-=init-s1; -s1=init; -} -} -return NULL; -} -} -static void push_onecapture(MatchState*ms,int i,const char*s, -const char*e){ -if(i>=ms->level){ -if(i==0) -lua_pushlstring(ms->L,s,e-s); -else -luaL_error(ms->L,"invalid capture index"); -} -else{ -ptrdiff_t l=ms->capture[i].len; -if(l==(-1))luaL_error(ms->L,"unfinished capture"); -if(l==(-2)) -lua_pushinteger(ms->L,ms->capture[i].init-ms->src_init+1); -else -lua_pushlstring(ms->L,ms->capture[i].init,l); -} -} -static int push_captures(MatchState*ms,const char*s,const char*e){ -int i; -int nlevels=(ms->level==0&&s)?1:ms->level; -luaL_checkstack(ms->L,nlevels,"too many captures"); -for(i=0;il1)init=(ptrdiff_t)l1; -if(find&&(lua_toboolean(L,4)|| -strpbrk(p,"^$*+?.([%-")==NULL)){ -const char*s2=lmemfind(s+init,l1-init,p,l2); -if(s2){ -lua_pushinteger(L,s2-s+1); -lua_pushinteger(L,s2-s+l2); -return 2; -} -} -else{ -MatchState ms; -int anchor=(*p=='^')?(p++,1):0; -const char*s1=s+init; -ms.L=L; -ms.src_init=s; -ms.src_end=s+l1; -do{ -const char*res; -ms.level=0; -if((res=match(&ms,s1,p))!=NULL){ -if(find){ -lua_pushinteger(L,s1-s+1); -lua_pushinteger(L,res-s); -return push_captures(&ms,NULL,0)+2; -} -else -return push_captures(&ms,s1,res); -} -}while(s1++L,3,&l); -for(i=0;iL; -switch(lua_type(L,3)){ -case 3: -case 4:{ -add_s(ms,b,s,e); -return; -} -case 6:{ -int n; -lua_pushvalue(L,3); -n=push_captures(ms,s,e); -lua_call(L,n,1); -break; -} -case 5:{ -push_onecapture(ms,0,s,e); -lua_gettable(L,3); -break; -} -} -if(!lua_toboolean(L,-1)){ -lua_pop(L,1); -lua_pushlstring(L,s,e-s); -} -else if(!lua_isstring(L,-1)) -luaL_error(L,"invalid replacement value (a %s)",luaL_typename(L,-1)); -luaL_addvalue(b); -} -static int str_gsub(lua_State*L){ -size_t srcl; -const char*src=luaL_checklstring(L,1,&srcl); -const char*p=luaL_checkstring(L,2); -int tr=lua_type(L,3); -int max_s=luaL_optint(L,4,srcl+1); -int anchor=(*p=='^')?(p++,1):0; -int n=0; -MatchState ms; -luaL_Buffer b; -luaL_argcheck(L,tr==3||tr==4|| -tr==6||tr==5,3, -"string/function/table expected"); -luaL_buffinit(L,&b); -ms.L=L; -ms.src_init=src; -ms.src_end=src+srcl; -while(nsrc) -src=e; -else if(src=sizeof("-+ #0")) -luaL_error(L,"invalid format (repeated flags)"); -if(isdigit(uchar(*p)))p++; -if(isdigit(uchar(*p)))p++; -if(*p=='.'){ -p++; -if(isdigit(uchar(*p)))p++; -if(isdigit(uchar(*p)))p++; -} -if(isdigit(uchar(*p))) -luaL_error(L,"invalid format (width or precision too long)"); -*(form++)='%'; -strncpy(form,strfrmt,p-strfrmt+1); -form+=p-strfrmt+1; -*form='\0'; -return p; -} -static void addintlen(char*form){ -size_t l=strlen(form); -char spec=form[l-1]; -strcpy(form+l-1,"l"); -form[l+sizeof("l")-2]=spec; -form[l+sizeof("l")-1]='\0'; -} -static int str_format(lua_State*L){ -int top=lua_gettop(L); -int arg=1; -size_t sfl; -const char*strfrmt=luaL_checklstring(L,arg,&sfl); -const char*strfrmt_end=strfrmt+sfl; -luaL_Buffer b; -luaL_buffinit(L,&b); -while(strfrmttop) -luaL_argerror(L,arg,"no value"); -strfrmt=scanformat(L,strfrmt,form); -switch(*strfrmt++){ -case'c':{ -sprintf(buff,form,(int)luaL_checknumber(L,arg)); -break; -} -case'd':case'i':{ -addintlen(form); -sprintf(buff,form,(long)luaL_checknumber(L,arg)); -break; -} -case'o':case'u':case'x':case'X':{ -addintlen(form); -sprintf(buff,form,(unsigned long)luaL_checknumber(L,arg)); -break; -} -case'e':case'E':case'f': -case'g':case'G':{ -sprintf(buff,form,(double)luaL_checknumber(L,arg)); -break; -} -case'q':{ -addquoted(L,&b,arg); -continue; -} -case's':{ -size_t l; -const char*s=luaL_checklstring(L,arg,&l); -if(!strchr(form,'.')&&l>=100){ -lua_pushvalue(L,arg); -luaL_addvalue(&b); -continue; -} -else{ -sprintf(buff,form,s); -break; -} -} -default:{ -return luaL_error(L,"invalid option "LUA_QL("%%%c")" to " -LUA_QL("format"),*(strfrmt-1)); -} -} -luaL_addlstring(&b,buff,strlen(buff)); -} -} -luaL_pushresult(&b); -return 1; -} -static const luaL_Reg strlib[]={ -{"byte",str_byte}, -{"char",str_char}, -{"find",str_find}, -{"format",str_format}, -{"gmatch",gmatch}, -{"gsub",str_gsub}, -{"lower",str_lower}, -{"match",str_match}, -{"rep",str_rep}, -{"sub",str_sub}, -{"upper",str_upper}, -{NULL,NULL} -}; -static void createmetatable(lua_State*L){ -lua_createtable(L,0,1); -lua_pushliteral(L,""); -lua_pushvalue(L,-2); -lua_setmetatable(L,-2); -lua_pop(L,1); -lua_pushvalue(L,-2); -lua_setfield(L,-2,"__index"); -lua_pop(L,1); -} -static int luaopen_string(lua_State*L){ -luaL_register(L,"string",strlib); -createmetatable(L); -return 1; -} -static const luaL_Reg lualibs[]={ -{"",luaopen_base}, -{"table",luaopen_table}, -{"io",luaopen_io}, -{"os",luaopen_os}, -{"string",luaopen_string}, -{NULL,NULL} -}; -static void luaL_openlibs(lua_State*L){ -const luaL_Reg*lib=lualibs; -for(;lib->func;lib++){ -lua_pushcfunction(L,lib->func); -lua_pushstring(L,lib->name); -lua_call(L,1,0); -} -} -typedef unsigned int UB; -static UB barg(lua_State*L,int idx){ -union{lua_Number n;U64 b;}bn; -bn.n=lua_tonumber(L,idx)+6755399441055744.0; -if(bn.n==0.0&&!lua_isnumber(L,idx))luaL_typerror(L,idx,"number"); -return(UB)bn.b; -} -#define BRET(b)lua_pushnumber(L,(lua_Number)(int)(b));return 1; -static int tobit(lua_State*L){ -BRET(barg(L,1))} -static int bnot(lua_State*L){ -BRET(~barg(L,1))} -static int band(lua_State*L){ -int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b&=barg(L,i);BRET(b)} -static int bor(lua_State*L){ -int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b|=barg(L,i);BRET(b)} -static int bxor(lua_State*L){ -int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b^=barg(L,i);BRET(b)} -static int lshift(lua_State*L){ -UB b=barg(L,1),n=barg(L,2)&31;BRET(b<>n)} -static int arshift(lua_State*L){ -UB b=barg(L,1),n=barg(L,2)&31;BRET((int)b>>n)} -static int rol(lua_State*L){ -UB b=barg(L,1),n=barg(L,2)&31;BRET((b<>(32-n)))} -static int ror(lua_State*L){ -UB b=barg(L,1),n=barg(L,2)&31;BRET((b>>n)|(b<<(32-n)))} -static int bswap(lua_State*L){ -UB b=barg(L,1);b=(b>>24)|((b>>8)&0xff00)|((b&0xff00)<<8)|(b<<24);BRET(b)} -static int tohex(lua_State*L){ -UB b=barg(L,1); -int n=lua_isnone(L,2)?8:(int)barg(L,2); -const char*hexdigits="0123456789abcdef"; -char buf[8]; -int i; -if(n<0){n=-n;hexdigits="0123456789ABCDEF";} -if(n>8)n=8; -for(i=(int)n;--i>=0;){buf[i]=hexdigits[b&15];b>>=4;} -lua_pushlstring(L,buf,(size_t)n); -return 1; -} -static const struct luaL_Reg bitlib[]={ -{"tobit",tobit}, -{"bnot",bnot}, -{"band",band}, -{"bor",bor}, -{"bxor",bxor}, -{"lshift",lshift}, -{"rshift",rshift}, -{"arshift",arshift}, -{"rol",rol}, -{"ror",ror}, -{"bswap",bswap}, -{"tohex",tohex}, -{NULL,NULL} -}; -int main(int argc,char**argv){ -lua_State*L=luaL_newstate(); -int i; -luaL_openlibs(L); -luaL_register(L,"bit",bitlib); -if(argc<2)return sizeof(void*); -lua_createtable(L,0,1); -lua_pushstring(L,argv[1]); -lua_rawseti(L,-2,0); -lua_setglobal(L,"arg"); -if(luaL_loadfile(L,argv[1])) -goto err; -for(i=2;i -- BYTECODE -- [...] --- print(bc.line(foo, 2)) --> 0002 KSTR 1 1 ; "hello" --- --- local out = { --- -- Do something with each line: --- write = function(t, ...) io.write(...) end, --- close = function(t) end, --- flush = function(t) end, --- } --- bc.dump(foo, out) --- ------------------------------------------------------------------------------- - --- Cache some library functions and objects. -local jit = require("jit") -assert(jit.version_num == 20100, "LuaJIT core/library version mismatch") -local jutil = require("jit.util") -local vmdef = require("jit.vmdef") -local bit = require("bit") -local sub, gsub, format = string.sub, string.gsub, string.format -local byte, band, shr = string.byte, bit.band, bit.rshift -local funcinfo, funcbc, funck = jutil.funcinfo, jutil.funcbc, jutil.funck -local funcuvname = jutil.funcuvname -local bcnames = vmdef.bcnames -local stdout, stderr = io.stdout, io.stderr - ------------------------------------------------------------------------------- - -local function ctlsub(c) - if c == "\n" then return "\\n" - elseif c == "\r" then return "\\r" - elseif c == "\t" then return "\\t" - else return format("\\%03d", byte(c)) - end -end - --- Return one bytecode line. -local function bcline(func, pc, prefix) - local ins, m = funcbc(func, pc) - if not ins then return end - local ma, mb, mc = band(m, 7), band(m, 15*8), band(m, 15*128) - local a = band(shr(ins, 8), 0xff) - local oidx = 6*band(ins, 0xff) - local op = sub(bcnames, oidx+1, oidx+6) - local s = format("%04d %s %-6s %3s ", - pc, prefix or " ", op, ma == 0 and "" or a) - local d = shr(ins, 16) - if mc == 13*128 then -- BCMjump - return format("%s=> %04d\n", s, pc+d-0x7fff) - end - if mb ~= 0 then - d = band(d, 0xff) - elseif mc == 0 then - return s.."\n" - end - local kc - if mc == 10*128 then -- BCMstr - kc = funck(func, -d-1) - kc = format(#kc > 40 and '"%.40s"~' or '"%s"', gsub(kc, "%c", ctlsub)) - elseif mc == 9*128 then -- BCMnum - kc = funck(func, d) - if op == "TSETM " then kc = kc - 2^52 end - elseif mc == 12*128 then -- BCMfunc - local fi = funcinfo(funck(func, -d-1)) - if fi.ffid then - kc = vmdef.ffnames[fi.ffid] - else - kc = fi.loc - end - elseif mc == 5*128 then -- BCMuv - kc = funcuvname(func, d) - end - if ma == 5 then -- BCMuv - local ka = funcuvname(func, a) - if kc then kc = ka.." ; "..kc else kc = ka end - end - if mb ~= 0 then - local b = shr(ins, 24) - if kc then return format("%s%3d %3d ; %s\n", s, b, d, kc) end - return format("%s%3d %3d\n", s, b, d) - end - if kc then return format("%s%3d ; %s\n", s, d, kc) end - if mc == 7*128 and d > 32767 then d = d - 65536 end -- BCMlits - return format("%s%3d\n", s, d) -end - --- Collect branch targets of a function. -local function bctargets(func) - local target = {} - for pc=1,1000000000 do - local ins, m = funcbc(func, pc) - if not ins then break end - if band(m, 15*128) == 13*128 then target[pc+shr(ins, 16)-0x7fff] = true end - end - return target -end - --- Dump bytecode instructions of a function. -local function bcdump(func, out, all) - if not out then out = stdout end - local fi = funcinfo(func) - if all and fi.children then - for n=-1,-1000000000,-1 do - local k = funck(func, n) - if not k then break end - if type(k) == "proto" then bcdump(k, out, true) end - end - end - out:write(format("-- BYTECODE -- %s-%d\n", fi.loc, fi.lastlinedefined)) - local target = bctargets(func) - for pc=1,1000000000 do - local s = bcline(func, pc, target[pc] and "=>") - if not s then break end - out:write(s) - end - out:write("\n") - out:flush() -end - ------------------------------------------------------------------------------- - --- Active flag and output file handle. -local active, out - --- List handler. -local function h_list(func) - return bcdump(func, out) -end - --- Detach list handler. -local function bclistoff() - if active then - active = false - jit.attach(h_list) - if out and out ~= stdout and out ~= stderr then out:close() end - out = nil - end -end - --- Open the output file and attach list handler. -local function bcliston(outfile) - if active then bclistoff() end - if not outfile then outfile = os.getenv("LUAJIT_LISTFILE") end - if outfile then - out = outfile == "-" and stdout or assert(io.open(outfile, "w")) - else - out = stderr - end - jit.attach(h_list, "bc") - active = true -end - --- Public module functions. -return { - line = bcline, - dump = bcdump, - targets = bctargets, - on = bcliston, - off = bclistoff, - start = bcliston -- For -j command line option. -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/bcsave.lua b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/bcsave.lua deleted file mode 100644 index 1fe9d389697..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/bcsave.lua +++ /dev/null @@ -1,661 +0,0 @@ ----------------------------------------------------------------------------- --- LuaJIT module to save/list bytecode. --- --- Copyright (C) 2005-2015 Mike Pall. All rights reserved. --- Released under the MIT license. See Copyright Notice in luajit.h ----------------------------------------------------------------------------- --- --- This module saves or lists the bytecode for an input file. --- It's run by the -b command line option. --- ------------------------------------------------------------------------------- - -local jit = require("jit") -assert(jit.version_num == 20100, "LuaJIT core/library version mismatch") -local bit = require("bit") - --- Symbol name prefix for LuaJIT bytecode. -local LJBC_PREFIX = "luaJIT_BC_" - ------------------------------------------------------------------------------- - -local function usage() - io.stderr:write[[ -Save LuaJIT bytecode: luajit -b[options] input output - -l Only list bytecode. - -s Strip debug info (default). - -g Keep debug info. - -n name Set module name (default: auto-detect from input name). - -t type Set output file type (default: auto-detect from output name). - -a arch Override architecture for object files (default: native). - -o os Override OS for object files (default: native). - -e chunk Use chunk string as input. - -- Stop handling options. - - Use stdin as input and/or stdout as output. - -File types: c h obj o raw (default) -]] - os.exit(1) -end - -local function check(ok, ...) - if ok then return ok, ... end - io.stderr:write("luajit: ", ...) - io.stderr:write("\n") - os.exit(1) -end - -local function readfile(input) - if type(input) == "function" then return input end - if input == "-" then input = nil end - return check(loadfile(input)) -end - -local function savefile(name, mode) - if name == "-" then return io.stdout end - return check(io.open(name, mode)) -end - ------------------------------------------------------------------------------- - -local map_type = { - raw = "raw", c = "c", h = "h", o = "obj", obj = "obj", -} - -local map_arch = { - x86 = true, x64 = true, arm = true, arm64 = true, ppc = true, - mips = true, mipsel = true, -} - -local map_os = { - linux = true, windows = true, osx = true, freebsd = true, netbsd = true, - openbsd = true, dragonfly = true, solaris = true, -} - -local function checkarg(str, map, err) - str = string.lower(str) - local s = check(map[str], "unknown ", err) - return s == true and str or s -end - -local function detecttype(str) - local ext = string.match(string.lower(str), "%.(%a+)$") - return map_type[ext] or "raw" -end - -local function checkmodname(str) - check(string.match(str, "^[%w_.%-]+$"), "bad module name") - return string.gsub(str, "[%.%-]", "_") -end - -local function detectmodname(str) - if type(str) == "string" then - local tail = string.match(str, "[^/\\]+$") - if tail then str = tail end - local head = string.match(str, "^(.*)%.[^.]*$") - if head then str = head end - str = string.match(str, "^[%w_.%-]+") - else - str = nil - end - check(str, "cannot derive module name, use -n name") - return string.gsub(str, "[%.%-]", "_") -end - ------------------------------------------------------------------------------- - -local function bcsave_tail(fp, output, s) - local ok, err = fp:write(s) - if ok and output ~= "-" then ok, err = fp:close() end - check(ok, "cannot write ", output, ": ", err) -end - -local function bcsave_raw(output, s) - local fp = savefile(output, "wb") - bcsave_tail(fp, output, s) -end - -local function bcsave_c(ctx, output, s) - local fp = savefile(output, "w") - if ctx.type == "c" then - fp:write(string.format([[ -#ifdef _cplusplus -extern "C" -#endif -#ifdef _WIN32 -__declspec(dllexport) -#endif -const char %s%s[] = { -]], LJBC_PREFIX, ctx.modname)) - else - fp:write(string.format([[ -#define %s%s_SIZE %d -static const char %s%s[] = { -]], LJBC_PREFIX, ctx.modname, #s, LJBC_PREFIX, ctx.modname)) - end - local t, n, m = {}, 0, 0 - for i=1,#s do - local b = tostring(string.byte(s, i)) - m = m + #b + 1 - if m > 78 then - fp:write(table.concat(t, ",", 1, n), ",\n") - n, m = 0, #b + 1 - end - n = n + 1 - t[n] = b - end - bcsave_tail(fp, output, table.concat(t, ",", 1, n).."\n};\n") -end - -local function bcsave_elfobj(ctx, output, s, ffi) - ffi.cdef[[ -typedef struct { - uint8_t emagic[4], eclass, eendian, eversion, eosabi, eabiversion, epad[7]; - uint16_t type, machine; - uint32_t version; - uint32_t entry, phofs, shofs; - uint32_t flags; - uint16_t ehsize, phentsize, phnum, shentsize, shnum, shstridx; -} ELF32header; -typedef struct { - uint8_t emagic[4], eclass, eendian, eversion, eosabi, eabiversion, epad[7]; - uint16_t type, machine; - uint32_t version; - uint64_t entry, phofs, shofs; - uint32_t flags; - uint16_t ehsize, phentsize, phnum, shentsize, shnum, shstridx; -} ELF64header; -typedef struct { - uint32_t name, type, flags, addr, ofs, size, link, info, align, entsize; -} ELF32sectheader; -typedef struct { - uint32_t name, type; - uint64_t flags, addr, ofs, size; - uint32_t link, info; - uint64_t align, entsize; -} ELF64sectheader; -typedef struct { - uint32_t name, value, size; - uint8_t info, other; - uint16_t sectidx; -} ELF32symbol; -typedef struct { - uint32_t name; - uint8_t info, other; - uint16_t sectidx; - uint64_t value, size; -} ELF64symbol; -typedef struct { - ELF32header hdr; - ELF32sectheader sect[6]; - ELF32symbol sym[2]; - uint8_t space[4096]; -} ELF32obj; -typedef struct { - ELF64header hdr; - ELF64sectheader sect[6]; - ELF64symbol sym[2]; - uint8_t space[4096]; -} ELF64obj; -]] - local symname = LJBC_PREFIX..ctx.modname - local is64, isbe = false, false - if ctx.arch == "x64" then - is64 = true - elseif ctx.arch == "ppc" or ctx.arch == "mips" then - isbe = true - end - - -- Handle different host/target endianess. - local function f32(x) return x end - local f16, fofs = f32, f32 - if ffi.abi("be") ~= isbe then - f32 = bit.bswap - function f16(x) return bit.rshift(bit.bswap(x), 16) end - if is64 then - local two32 = ffi.cast("int64_t", 2^32) - function fofs(x) return bit.bswap(x)*two32 end - else - fofs = f32 - end - end - - -- Create ELF object and fill in header. - local o = ffi.new(is64 and "ELF64obj" or "ELF32obj") - local hdr = o.hdr - if ctx.os == "bsd" or ctx.os == "other" then -- Determine native hdr.eosabi. - local bf = assert(io.open("/bin/ls", "rb")) - local bs = bf:read(9) - bf:close() - ffi.copy(o, bs, 9) - check(hdr.emagic[0] == 127, "no support for writing native object files") - else - hdr.emagic = "\127ELF" - hdr.eosabi = ({ freebsd=9, netbsd=2, openbsd=12, solaris=6 })[ctx.os] or 0 - end - hdr.eclass = is64 and 2 or 1 - hdr.eendian = isbe and 2 or 1 - hdr.eversion = 1 - hdr.type = f16(1) - hdr.machine = f16(({ x86=3, x64=62, arm=40, arm64=183, ppc=20, mips=8, mipsel=8 })[ctx.arch]) - if ctx.arch == "mips" or ctx.arch == "mipsel" then - hdr.flags = 0x50001006 - end - hdr.version = f32(1) - hdr.shofs = fofs(ffi.offsetof(o, "sect")) - hdr.ehsize = f16(ffi.sizeof(hdr)) - hdr.shentsize = f16(ffi.sizeof(o.sect[0])) - hdr.shnum = f16(6) - hdr.shstridx = f16(2) - - -- Fill in sections and symbols. - local sofs, ofs = ffi.offsetof(o, "space"), 1 - for i,name in ipairs{ - ".symtab", ".shstrtab", ".strtab", ".rodata", ".note.GNU-stack", - } do - local sect = o.sect[i] - sect.align = fofs(1) - sect.name = f32(ofs) - ffi.copy(o.space+ofs, name) - ofs = ofs + #name+1 - end - o.sect[1].type = f32(2) -- .symtab - o.sect[1].link = f32(3) - o.sect[1].info = f32(1) - o.sect[1].align = fofs(8) - o.sect[1].ofs = fofs(ffi.offsetof(o, "sym")) - o.sect[1].entsize = fofs(ffi.sizeof(o.sym[0])) - o.sect[1].size = fofs(ffi.sizeof(o.sym)) - o.sym[1].name = f32(1) - o.sym[1].sectidx = f16(4) - o.sym[1].size = fofs(#s) - o.sym[1].info = 17 - o.sect[2].type = f32(3) -- .shstrtab - o.sect[2].ofs = fofs(sofs) - o.sect[2].size = fofs(ofs) - o.sect[3].type = f32(3) -- .strtab - o.sect[3].ofs = fofs(sofs + ofs) - o.sect[3].size = fofs(#symname+1) - ffi.copy(o.space+ofs+1, symname) - ofs = ofs + #symname + 2 - o.sect[4].type = f32(1) -- .rodata - o.sect[4].flags = fofs(2) - o.sect[4].ofs = fofs(sofs + ofs) - o.sect[4].size = fofs(#s) - o.sect[5].type = f32(1) -- .note.GNU-stack - o.sect[5].ofs = fofs(sofs + ofs + #s) - - -- Write ELF object file. - local fp = savefile(output, "wb") - fp:write(ffi.string(o, ffi.sizeof(o)-4096+ofs)) - bcsave_tail(fp, output, s) -end - -local function bcsave_peobj(ctx, output, s, ffi) - ffi.cdef[[ -typedef struct { - uint16_t arch, nsects; - uint32_t time, symtabofs, nsyms; - uint16_t opthdrsz, flags; -} PEheader; -typedef struct { - char name[8]; - uint32_t vsize, vaddr, size, ofs, relocofs, lineofs; - uint16_t nreloc, nline; - uint32_t flags; -} PEsection; -typedef struct __attribute((packed)) { - union { - char name[8]; - uint32_t nameref[2]; - }; - uint32_t value; - int16_t sect; - uint16_t type; - uint8_t scl, naux; -} PEsym; -typedef struct __attribute((packed)) { - uint32_t size; - uint16_t nreloc, nline; - uint32_t cksum; - uint16_t assoc; - uint8_t comdatsel, unused[3]; -} PEsymaux; -typedef struct { - PEheader hdr; - PEsection sect[2]; - // Must be an even number of symbol structs. - PEsym sym0; - PEsymaux sym0aux; - PEsym sym1; - PEsymaux sym1aux; - PEsym sym2; - PEsym sym3; - uint32_t strtabsize; - uint8_t space[4096]; -} PEobj; -]] - local symname = LJBC_PREFIX..ctx.modname - local is64 = false - if ctx.arch == "x86" then - symname = "_"..symname - elseif ctx.arch == "x64" then - is64 = true - end - local symexport = " /EXPORT:"..symname..",DATA " - - -- The file format is always little-endian. Swap if the host is big-endian. - local function f32(x) return x end - local f16 = f32 - if ffi.abi("be") then - f32 = bit.bswap - function f16(x) return bit.rshift(bit.bswap(x), 16) end - end - - -- Create PE object and fill in header. - local o = ffi.new("PEobj") - local hdr = o.hdr - hdr.arch = f16(({ x86=0x14c, x64=0x8664, arm=0x1c0, ppc=0x1f2, mips=0x366, mipsel=0x366 })[ctx.arch]) - hdr.nsects = f16(2) - hdr.symtabofs = f32(ffi.offsetof(o, "sym0")) - hdr.nsyms = f32(6) - - -- Fill in sections and symbols. - o.sect[0].name = ".drectve" - o.sect[0].size = f32(#symexport) - o.sect[0].flags = f32(0x00100a00) - o.sym0.sect = f16(1) - o.sym0.scl = 3 - o.sym0.name = ".drectve" - o.sym0.naux = 1 - o.sym0aux.size = f32(#symexport) - o.sect[1].name = ".rdata" - o.sect[1].size = f32(#s) - o.sect[1].flags = f32(0x40300040) - o.sym1.sect = f16(2) - o.sym1.scl = 3 - o.sym1.name = ".rdata" - o.sym1.naux = 1 - o.sym1aux.size = f32(#s) - o.sym2.sect = f16(2) - o.sym2.scl = 2 - o.sym2.nameref[1] = f32(4) - o.sym3.sect = f16(-1) - o.sym3.scl = 2 - o.sym3.value = f32(1) - o.sym3.name = "@feat.00" -- Mark as SafeSEH compliant. - ffi.copy(o.space, symname) - local ofs = #symname + 1 - o.strtabsize = f32(ofs + 4) - o.sect[0].ofs = f32(ffi.offsetof(o, "space") + ofs) - ffi.copy(o.space + ofs, symexport) - ofs = ofs + #symexport - o.sect[1].ofs = f32(ffi.offsetof(o, "space") + ofs) - - -- Write PE object file. - local fp = savefile(output, "wb") - fp:write(ffi.string(o, ffi.sizeof(o)-4096+ofs)) - bcsave_tail(fp, output, s) -end - -local function bcsave_machobj(ctx, output, s, ffi) - ffi.cdef[[ -typedef struct -{ - uint32_t magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags; -} mach_header; -typedef struct -{ - mach_header; uint32_t reserved; -} mach_header_64; -typedef struct { - uint32_t cmd, cmdsize; - char segname[16]; - uint32_t vmaddr, vmsize, fileoff, filesize; - uint32_t maxprot, initprot, nsects, flags; -} mach_segment_command; -typedef struct { - uint32_t cmd, cmdsize; - char segname[16]; - uint64_t vmaddr, vmsize, fileoff, filesize; - uint32_t maxprot, initprot, nsects, flags; -} mach_segment_command_64; -typedef struct { - char sectname[16], segname[16]; - uint32_t addr, size; - uint32_t offset, align, reloff, nreloc, flags; - uint32_t reserved1, reserved2; -} mach_section; -typedef struct { - char sectname[16], segname[16]; - uint64_t addr, size; - uint32_t offset, align, reloff, nreloc, flags; - uint32_t reserved1, reserved2, reserved3; -} mach_section_64; -typedef struct { - uint32_t cmd, cmdsize, symoff, nsyms, stroff, strsize; -} mach_symtab_command; -typedef struct { - int32_t strx; - uint8_t type, sect; - int16_t desc; - uint32_t value; -} mach_nlist; -typedef struct { - uint32_t strx; - uint8_t type, sect; - uint16_t desc; - uint64_t value; -} mach_nlist_64; -typedef struct -{ - uint32_t magic, nfat_arch; -} mach_fat_header; -typedef struct -{ - uint32_t cputype, cpusubtype, offset, size, align; -} mach_fat_arch; -typedef struct { - struct { - mach_header hdr; - mach_segment_command seg; - mach_section sec; - mach_symtab_command sym; - } arch[1]; - mach_nlist sym_entry; - uint8_t space[4096]; -} mach_obj; -typedef struct { - struct { - mach_header_64 hdr; - mach_segment_command_64 seg; - mach_section_64 sec; - mach_symtab_command sym; - } arch[1]; - mach_nlist_64 sym_entry; - uint8_t space[4096]; -} mach_obj_64; -typedef struct { - mach_fat_header fat; - mach_fat_arch fat_arch[2]; - struct { - mach_header hdr; - mach_segment_command seg; - mach_section sec; - mach_symtab_command sym; - } arch[2]; - mach_nlist sym_entry; - uint8_t space[4096]; -} mach_fat_obj; -]] - local symname = '_'..LJBC_PREFIX..ctx.modname - local isfat, is64, align, mobj = false, false, 4, "mach_obj" - if ctx.arch == "x64" then - is64, align, mobj = true, 8, "mach_obj_64" - elseif ctx.arch == "arm" then - isfat, mobj = true, "mach_fat_obj" - elseif ctx.arch == "arm64" then - is64, align, isfat, mobj = true, 8, true, "mach_fat_obj" - else - check(ctx.arch == "x86", "unsupported architecture for OSX") - end - local function aligned(v, a) return bit.band(v+a-1, -a) end - local be32 = bit.bswap -- Mach-O FAT is BE, supported archs are LE. - - -- Create Mach-O object and fill in header. - local o = ffi.new(mobj) - local mach_size = aligned(ffi.offsetof(o, "space")+#symname+2, align) - local cputype = ({ x86={7}, x64={0x01000007}, arm={7,12}, arm64={0x01000007,0x0100000c} })[ctx.arch] - local cpusubtype = ({ x86={3}, x64={3}, arm={3,9}, arm64={3,0} })[ctx.arch] - if isfat then - o.fat.magic = be32(0xcafebabe) - o.fat.nfat_arch = be32(#cpusubtype) - end - - -- Fill in sections and symbols. - for i=0,#cpusubtype-1 do - local ofs = 0 - if isfat then - local a = o.fat_arch[i] - a.cputype = be32(cputype[i+1]) - a.cpusubtype = be32(cpusubtype[i+1]) - -- Subsequent slices overlap each other to share data. - ofs = ffi.offsetof(o, "arch") + i*ffi.sizeof(o.arch[0]) - a.offset = be32(ofs) - a.size = be32(mach_size-ofs+#s) - end - local a = o.arch[i] - a.hdr.magic = is64 and 0xfeedfacf or 0xfeedface - a.hdr.cputype = cputype[i+1] - a.hdr.cpusubtype = cpusubtype[i+1] - a.hdr.filetype = 1 - a.hdr.ncmds = 2 - a.hdr.sizeofcmds = ffi.sizeof(a.seg)+ffi.sizeof(a.sec)+ffi.sizeof(a.sym) - a.seg.cmd = is64 and 0x19 or 0x1 - a.seg.cmdsize = ffi.sizeof(a.seg)+ffi.sizeof(a.sec) - a.seg.vmsize = #s - a.seg.fileoff = mach_size-ofs - a.seg.filesize = #s - a.seg.maxprot = 1 - a.seg.initprot = 1 - a.seg.nsects = 1 - ffi.copy(a.sec.sectname, "__data") - ffi.copy(a.sec.segname, "__DATA") - a.sec.size = #s - a.sec.offset = mach_size-ofs - a.sym.cmd = 2 - a.sym.cmdsize = ffi.sizeof(a.sym) - a.sym.symoff = ffi.offsetof(o, "sym_entry")-ofs - a.sym.nsyms = 1 - a.sym.stroff = ffi.offsetof(o, "sym_entry")+ffi.sizeof(o.sym_entry)-ofs - a.sym.strsize = aligned(#symname+2, align) - end - o.sym_entry.type = 0xf - o.sym_entry.sect = 1 - o.sym_entry.strx = 1 - ffi.copy(o.space+1, symname) - - -- Write Macho-O object file. - local fp = savefile(output, "wb") - fp:write(ffi.string(o, mach_size)) - bcsave_tail(fp, output, s) -end - -local function bcsave_obj(ctx, output, s) - local ok, ffi = pcall(require, "ffi") - check(ok, "FFI library required to write this file type") - if ctx.os == "windows" then - return bcsave_peobj(ctx, output, s, ffi) - elseif ctx.os == "osx" then - return bcsave_machobj(ctx, output, s, ffi) - else - return bcsave_elfobj(ctx, output, s, ffi) - end -end - ------------------------------------------------------------------------------- - -local function bclist(input, output) - local f = readfile(input) - require("jit.bc").dump(f, savefile(output, "w"), true) -end - -local function bcsave(ctx, input, output) - local f = readfile(input) - local s = string.dump(f, ctx.strip) - local t = ctx.type - if not t then - t = detecttype(output) - ctx.type = t - end - if t == "raw" then - bcsave_raw(output, s) - else - if not ctx.modname then ctx.modname = detectmodname(input) end - if t == "obj" then - bcsave_obj(ctx, output, s) - else - bcsave_c(ctx, output, s) - end - end -end - -local function docmd(...) - local arg = {...} - local n = 1 - local list = false - local ctx = { - strip = true, arch = jit.arch, os = string.lower(jit.os), - type = false, modname = false, - } - while n <= #arg do - local a = arg[n] - if type(a) == "string" and string.sub(a, 1, 1) == "-" and a ~= "-" then - table.remove(arg, n) - if a == "--" then break end - for m=2,#a do - local opt = string.sub(a, m, m) - if opt == "l" then - list = true - elseif opt == "s" then - ctx.strip = true - elseif opt == "g" then - ctx.strip = false - else - if arg[n] == nil or m ~= #a then usage() end - if opt == "e" then - if n ~= 1 then usage() end - arg[1] = check(loadstring(arg[1])) - elseif opt == "n" then - ctx.modname = checkmodname(table.remove(arg, n)) - elseif opt == "t" then - ctx.type = checkarg(table.remove(arg, n), map_type, "file type") - elseif opt == "a" then - ctx.arch = checkarg(table.remove(arg, n), map_arch, "architecture") - elseif opt == "o" then - ctx.os = checkarg(table.remove(arg, n), map_os, "OS name") - else - usage() - end - end - end - else - n = n + 1 - end - end - if list then - if #arg == 0 or #arg > 2 then usage() end - bclist(arg[1], arg[2] or "-") - else - if #arg ~= 2 then usage() end - bcsave(ctx, arg[1], arg[2]) - end -end - ------------------------------------------------------------------------------- - --- Public module functions. -return { - start = docmd -- Process -b command line option. -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_arm.lua b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_arm.lua deleted file mode 100644 index dfcbeeecf14..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_arm.lua +++ /dev/null @@ -1,689 +0,0 @@ ----------------------------------------------------------------------------- --- LuaJIT ARM disassembler module. --- --- Copyright (C) 2005-2015 Mike Pall. All rights reserved. --- Released under the MIT license. See Copyright Notice in luajit.h ----------------------------------------------------------------------------- --- This is a helper module used by the LuaJIT machine code dumper module. --- --- It disassembles most user-mode ARMv7 instructions --- NYI: Advanced SIMD and VFP instructions. ------------------------------------------------------------------------------- - -local type = type -local sub, byte, format = string.sub, string.byte, string.format -local match, gmatch, gsub = string.match, string.gmatch, string.gsub -local concat = table.concat -local bit = require("bit") -local band, bor, ror, tohex = bit.band, bit.bor, bit.ror, bit.tohex -local lshift, rshift, arshift = bit.lshift, bit.rshift, bit.arshift - ------------------------------------------------------------------------------- --- Opcode maps ------------------------------------------------------------------------------- - -local map_loadc = { - shift = 8, mask = 15, - [10] = { - shift = 20, mask = 1, - [0] = { - shift = 23, mask = 3, - [0] = "vmovFmDN", "vstmFNdr", - _ = { - shift = 21, mask = 1, - [0] = "vstrFdl", - { shift = 16, mask = 15, [13] = "vpushFdr", _ = "vstmdbFNdr", } - }, - }, - { - shift = 23, mask = 3, - [0] = "vmovFDNm", - { shift = 16, mask = 15, [13] = "vpopFdr", _ = "vldmFNdr", }, - _ = { - shift = 21, mask = 1, - [0] = "vldrFdl", "vldmdbFNdr", - }, - }, - }, - [11] = { - shift = 20, mask = 1, - [0] = { - shift = 23, mask = 3, - [0] = "vmovGmDN", "vstmGNdr", - _ = { - shift = 21, mask = 1, - [0] = "vstrGdl", - { shift = 16, mask = 15, [13] = "vpushGdr", _ = "vstmdbGNdr", } - }, - }, - { - shift = 23, mask = 3, - [0] = "vmovGDNm", - { shift = 16, mask = 15, [13] = "vpopGdr", _ = "vldmGNdr", }, - _ = { - shift = 21, mask = 1, - [0] = "vldrGdl", "vldmdbGNdr", - }, - }, - }, - _ = { - shift = 0, mask = 0 -- NYI ldc, mcrr, mrrc. - }, -} - -local map_vfps = { - shift = 6, mask = 0x2c001, - [0] = "vmlaF.dnm", "vmlsF.dnm", - [0x04000] = "vnmlsF.dnm", [0x04001] = "vnmlaF.dnm", - [0x08000] = "vmulF.dnm", [0x08001] = "vnmulF.dnm", - [0x0c000] = "vaddF.dnm", [0x0c001] = "vsubF.dnm", - [0x20000] = "vdivF.dnm", - [0x24000] = "vfnmsF.dnm", [0x24001] = "vfnmaF.dnm", - [0x28000] = "vfmaF.dnm", [0x28001] = "vfmsF.dnm", - [0x2c000] = "vmovF.dY", - [0x2c001] = { - shift = 7, mask = 0x1e01, - [0] = "vmovF.dm", "vabsF.dm", - [0x0200] = "vnegF.dm", [0x0201] = "vsqrtF.dm", - [0x0800] = "vcmpF.dm", [0x0801] = "vcmpeF.dm", - [0x0a00] = "vcmpzF.d", [0x0a01] = "vcmpzeF.d", - [0x0e01] = "vcvtG.dF.m", - [0x1000] = "vcvt.f32.u32Fdm", [0x1001] = "vcvt.f32.s32Fdm", - [0x1800] = "vcvtr.u32F.dm", [0x1801] = "vcvt.u32F.dm", - [0x1a00] = "vcvtr.s32F.dm", [0x1a01] = "vcvt.s32F.dm", - }, -} - -local map_vfpd = { - shift = 6, mask = 0x2c001, - [0] = "vmlaG.dnm", "vmlsG.dnm", - [0x04000] = "vnmlsG.dnm", [0x04001] = "vnmlaG.dnm", - [0x08000] = "vmulG.dnm", [0x08001] = "vnmulG.dnm", - [0x0c000] = "vaddG.dnm", [0x0c001] = "vsubG.dnm", - [0x20000] = "vdivG.dnm", - [0x24000] = "vfnmsG.dnm", [0x24001] = "vfnmaG.dnm", - [0x28000] = "vfmaG.dnm", [0x28001] = "vfmsG.dnm", - [0x2c000] = "vmovG.dY", - [0x2c001] = { - shift = 7, mask = 0x1e01, - [0] = "vmovG.dm", "vabsG.dm", - [0x0200] = "vnegG.dm", [0x0201] = "vsqrtG.dm", - [0x0800] = "vcmpG.dm", [0x0801] = "vcmpeG.dm", - [0x0a00] = "vcmpzG.d", [0x0a01] = "vcmpzeG.d", - [0x0e01] = "vcvtF.dG.m", - [0x1000] = "vcvt.f64.u32GdFm", [0x1001] = "vcvt.f64.s32GdFm", - [0x1800] = "vcvtr.u32FdG.m", [0x1801] = "vcvt.u32FdG.m", - [0x1a00] = "vcvtr.s32FdG.m", [0x1a01] = "vcvt.s32FdG.m", - }, -} - -local map_datac = { - shift = 24, mask = 1, - [0] = { - shift = 4, mask = 1, - [0] = { - shift = 8, mask = 15, - [10] = map_vfps, - [11] = map_vfpd, - -- NYI cdp, mcr, mrc. - }, - { - shift = 8, mask = 15, - [10] = { - shift = 20, mask = 15, - [0] = "vmovFnD", "vmovFDn", - [14] = "vmsrD", - [15] = { shift = 12, mask = 15, [15] = "vmrs", _ = "vmrsD", }, - }, - }, - }, - "svcT", -} - -local map_loadcu = { - shift = 0, mask = 0, -- NYI unconditional CP load/store. -} - -local map_datacu = { - shift = 0, mask = 0, -- NYI unconditional CP data. -} - -local map_simddata = { - shift = 0, mask = 0, -- NYI SIMD data. -} - -local map_simdload = { - shift = 0, mask = 0, -- NYI SIMD load/store, preload. -} - -local map_preload = { - shift = 0, mask = 0, -- NYI preload. -} - -local map_media = { - shift = 20, mask = 31, - [0] = false, - { --01 - shift = 5, mask = 7, - [0] = "sadd16DNM", "sasxDNM", "ssaxDNM", "ssub16DNM", - "sadd8DNM", false, false, "ssub8DNM", - }, - { --02 - shift = 5, mask = 7, - [0] = "qadd16DNM", "qasxDNM", "qsaxDNM", "qsub16DNM", - "qadd8DNM", false, false, "qsub8DNM", - }, - { --03 - shift = 5, mask = 7, - [0] = "shadd16DNM", "shasxDNM", "shsaxDNM", "shsub16DNM", - "shadd8DNM", false, false, "shsub8DNM", - }, - false, - { --05 - shift = 5, mask = 7, - [0] = "uadd16DNM", "uasxDNM", "usaxDNM", "usub16DNM", - "uadd8DNM", false, false, "usub8DNM", - }, - { --06 - shift = 5, mask = 7, - [0] = "uqadd16DNM", "uqasxDNM", "uqsaxDNM", "uqsub16DNM", - "uqadd8DNM", false, false, "uqsub8DNM", - }, - { --07 - shift = 5, mask = 7, - [0] = "uhadd16DNM", "uhasxDNM", "uhsaxDNM", "uhsub16DNM", - "uhadd8DNM", false, false, "uhsub8DNM", - }, - { --08 - shift = 5, mask = 7, - [0] = "pkhbtDNMU", false, "pkhtbDNMU", - { shift = 16, mask = 15, [15] = "sxtb16DMU", _ = "sxtab16DNMU", }, - "pkhbtDNMU", "selDNM", "pkhtbDNMU", - }, - false, - { --0a - shift = 5, mask = 7, - [0] = "ssatDxMu", "ssat16DxM", "ssatDxMu", - { shift = 16, mask = 15, [15] = "sxtbDMU", _ = "sxtabDNMU", }, - "ssatDxMu", false, "ssatDxMu", - }, - { --0b - shift = 5, mask = 7, - [0] = "ssatDxMu", "revDM", "ssatDxMu", - { shift = 16, mask = 15, [15] = "sxthDMU", _ = "sxtahDNMU", }, - "ssatDxMu", "rev16DM", "ssatDxMu", - }, - { --0c - shift = 5, mask = 7, - [3] = { shift = 16, mask = 15, [15] = "uxtb16DMU", _ = "uxtab16DNMU", }, - }, - false, - { --0e - shift = 5, mask = 7, - [0] = "usatDwMu", "usat16DwM", "usatDwMu", - { shift = 16, mask = 15, [15] = "uxtbDMU", _ = "uxtabDNMU", }, - "usatDwMu", false, "usatDwMu", - }, - { --0f - shift = 5, mask = 7, - [0] = "usatDwMu", "rbitDM", "usatDwMu", - { shift = 16, mask = 15, [15] = "uxthDMU", _ = "uxtahDNMU", }, - "usatDwMu", "revshDM", "usatDwMu", - }, - { --10 - shift = 12, mask = 15, - [15] = { - shift = 5, mask = 7, - "smuadNMS", "smuadxNMS", "smusdNMS", "smusdxNMS", - }, - _ = { - shift = 5, mask = 7, - [0] = "smladNMSD", "smladxNMSD", "smlsdNMSD", "smlsdxNMSD", - }, - }, - false, false, false, - { --14 - shift = 5, mask = 7, - [0] = "smlaldDNMS", "smlaldxDNMS", "smlsldDNMS", "smlsldxDNMS", - }, - { --15 - shift = 5, mask = 7, - [0] = { shift = 12, mask = 15, [15] = "smmulNMS", _ = "smmlaNMSD", }, - { shift = 12, mask = 15, [15] = "smmulrNMS", _ = "smmlarNMSD", }, - false, false, false, false, - "smmlsNMSD", "smmlsrNMSD", - }, - false, false, - { --18 - shift = 5, mask = 7, - [0] = { shift = 12, mask = 15, [15] = "usad8NMS", _ = "usada8NMSD", }, - }, - false, - { --1a - shift = 5, mask = 3, [2] = "sbfxDMvw", - }, - { --1b - shift = 5, mask = 3, [2] = "sbfxDMvw", - }, - { --1c - shift = 5, mask = 3, - [0] = { shift = 0, mask = 15, [15] = "bfcDvX", _ = "bfiDMvX", }, - }, - { --1d - shift = 5, mask = 3, - [0] = { shift = 0, mask = 15, [15] = "bfcDvX", _ = "bfiDMvX", }, - }, - { --1e - shift = 5, mask = 3, [2] = "ubfxDMvw", - }, - { --1f - shift = 5, mask = 3, [2] = "ubfxDMvw", - }, -} - -local map_load = { - shift = 21, mask = 9, - { - shift = 20, mask = 5, - [0] = "strtDL", "ldrtDL", [4] = "strbtDL", [5] = "ldrbtDL", - }, - _ = { - shift = 20, mask = 5, - [0] = "strDL", "ldrDL", [4] = "strbDL", [5] = "ldrbDL", - } -} - -local map_load1 = { - shift = 4, mask = 1, - [0] = map_load, map_media, -} - -local map_loadm = { - shift = 20, mask = 1, - [0] = { - shift = 23, mask = 3, - [0] = "stmdaNR", "stmNR", - { shift = 16, mask = 63, [45] = "pushR", _ = "stmdbNR", }, "stmibNR", - }, - { - shift = 23, mask = 3, - [0] = "ldmdaNR", { shift = 16, mask = 63, [61] = "popR", _ = "ldmNR", }, - "ldmdbNR", "ldmibNR", - }, -} - -local map_data = { - shift = 21, mask = 15, - [0] = "andDNPs", "eorDNPs", "subDNPs", "rsbDNPs", - "addDNPs", "adcDNPs", "sbcDNPs", "rscDNPs", - "tstNP", "teqNP", "cmpNP", "cmnNP", - "orrDNPs", "movDPs", "bicDNPs", "mvnDPs", -} - -local map_mul = { - shift = 21, mask = 7, - [0] = "mulNMSs", "mlaNMSDs", "umaalDNMS", "mlsDNMS", - "umullDNMSs", "umlalDNMSs", "smullDNMSs", "smlalDNMSs", -} - -local map_sync = { - shift = 20, mask = 15, -- NYI: brackets around N. R(D+1) for ldrexd/strexd. - [0] = "swpDMN", false, false, false, - "swpbDMN", false, false, false, - "strexDMN", "ldrexDN", "strexdDN", "ldrexdDN", - "strexbDMN", "ldrexbDN", "strexhDN", "ldrexhDN", -} - -local map_mulh = { - shift = 21, mask = 3, - [0] = { shift = 5, mask = 3, - [0] = "smlabbNMSD", "smlatbNMSD", "smlabtNMSD", "smlattNMSD", }, - { shift = 5, mask = 3, - [0] = "smlawbNMSD", "smulwbNMS", "smlawtNMSD", "smulwtNMS", }, - { shift = 5, mask = 3, - [0] = "smlalbbDNMS", "smlaltbDNMS", "smlalbtDNMS", "smlalttDNMS", }, - { shift = 5, mask = 3, - [0] = "smulbbNMS", "smultbNMS", "smulbtNMS", "smulttNMS", }, -} - -local map_misc = { - shift = 4, mask = 7, - -- NYI: decode PSR bits of msr. - [0] = { shift = 21, mask = 1, [0] = "mrsD", "msrM", }, - { shift = 21, mask = 3, "bxM", false, "clzDM", }, - { shift = 21, mask = 3, "bxjM", }, - { shift = 21, mask = 3, "blxM", }, - false, - { shift = 21, mask = 3, [0] = "qaddDMN", "qsubDMN", "qdaddDMN", "qdsubDMN", }, - false, - { shift = 21, mask = 3, "bkptK", }, -} - -local map_datar = { - shift = 4, mask = 9, - [9] = { - shift = 5, mask = 3, - [0] = { shift = 24, mask = 1, [0] = map_mul, map_sync, }, - { shift = 20, mask = 1, [0] = "strhDL", "ldrhDL", }, - { shift = 20, mask = 1, [0] = "ldrdDL", "ldrsbDL", }, - { shift = 20, mask = 1, [0] = "strdDL", "ldrshDL", }, - }, - _ = { - shift = 20, mask = 25, - [16] = { shift = 7, mask = 1, [0] = map_misc, map_mulh, }, - _ = { - shift = 0, mask = 0xffffffff, - [bor(0xe1a00000)] = "nop", - _ = map_data, - } - }, -} - -local map_datai = { - shift = 20, mask = 31, -- NYI: decode PSR bits of msr. Decode imm12. - [16] = "movwDW", [20] = "movtDW", - [18] = { shift = 0, mask = 0xf00ff, [0] = "nopv6", _ = "msrNW", }, - [22] = "msrNW", - _ = map_data, -} - -local map_branch = { - shift = 24, mask = 1, - [0] = "bB", "blB" -} - -local map_condins = { - [0] = map_datar, map_datai, map_load, map_load1, - map_loadm, map_branch, map_loadc, map_datac -} - --- NYI: setend. -local map_uncondins = { - [0] = false, map_simddata, map_simdload, map_preload, - false, "blxB", map_loadcu, map_datacu, -} - ------------------------------------------------------------------------------- - -local map_gpr = { - [0] = "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", - "r8", "r9", "r10", "r11", "r12", "sp", "lr", "pc", -} - -local map_cond = { - [0] = "eq", "ne", "hs", "lo", "mi", "pl", "vs", "vc", - "hi", "ls", "ge", "lt", "gt", "le", "al", -} - -local map_shift = { [0] = "lsl", "lsr", "asr", "ror", } - ------------------------------------------------------------------------------- - --- Output a nicely formatted line with an opcode and operands. -local function putop(ctx, text, operands) - local pos = ctx.pos - local extra = "" - if ctx.rel then - local sym = ctx.symtab[ctx.rel] - if sym then - extra = "\t->"..sym - elseif band(ctx.op, 0x0e000000) ~= 0x0a000000 then - extra = "\t; 0x"..tohex(ctx.rel) - end - end - if ctx.hexdump > 0 then - ctx.out(format("%08x %s %-5s %s%s\n", - ctx.addr+pos, tohex(ctx.op), text, concat(operands, ", "), extra)) - else - ctx.out(format("%08x %-5s %s%s\n", - ctx.addr+pos, text, concat(operands, ", "), extra)) - end - ctx.pos = pos + 4 -end - --- Fallback for unknown opcodes. -local function unknown(ctx) - return putop(ctx, ".long", { "0x"..tohex(ctx.op) }) -end - --- Format operand 2 of load/store opcodes. -local function fmtload(ctx, op, pos) - local base = map_gpr[band(rshift(op, 16), 15)] - local x, ofs - local ext = (band(op, 0x04000000) == 0) - if not ext and band(op, 0x02000000) == 0 then - ofs = band(op, 4095) - if band(op, 0x00800000) == 0 then ofs = -ofs end - if base == "pc" then ctx.rel = ctx.addr + pos + 8 + ofs end - ofs = "#"..ofs - elseif ext and band(op, 0x00400000) ~= 0 then - ofs = band(op, 15) + band(rshift(op, 4), 0xf0) - if band(op, 0x00800000) == 0 then ofs = -ofs end - if base == "pc" then ctx.rel = ctx.addr + pos + 8 + ofs end - ofs = "#"..ofs - else - ofs = map_gpr[band(op, 15)] - if ext or band(op, 0xfe0) == 0 then - elseif band(op, 0xfe0) == 0x60 then - ofs = format("%s, rrx", ofs) - else - local sh = band(rshift(op, 7), 31) - if sh == 0 then sh = 32 end - ofs = format("%s, %s #%d", ofs, map_shift[band(rshift(op, 5), 3)], sh) - end - if band(op, 0x00800000) == 0 then ofs = "-"..ofs end - end - if ofs == "#0" then - x = format("[%s]", base) - elseif band(op, 0x01000000) == 0 then - x = format("[%s], %s", base, ofs) - else - x = format("[%s, %s]", base, ofs) - end - if band(op, 0x01200000) == 0x01200000 then x = x.."!" end - return x -end - --- Format operand 2 of vector load/store opcodes. -local function fmtvload(ctx, op, pos) - local base = map_gpr[band(rshift(op, 16), 15)] - local ofs = band(op, 255)*4 - if band(op, 0x00800000) == 0 then ofs = -ofs end - if base == "pc" then ctx.rel = ctx.addr + pos + 8 + ofs end - if ofs == 0 then - return format("[%s]", base) - else - return format("[%s, #%d]", base, ofs) - end -end - -local function fmtvr(op, vr, sh0, sh1) - if vr == "s" then - return format("s%d", 2*band(rshift(op, sh0), 15)+band(rshift(op, sh1), 1)) - else - return format("d%d", band(rshift(op, sh0), 15)+band(rshift(op, sh1-4), 16)) - end -end - --- Disassemble a single instruction. -local function disass_ins(ctx) - local pos = ctx.pos - local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4) - local op = bor(lshift(b3, 24), lshift(b2, 16), lshift(b1, 8), b0) - local operands = {} - local suffix = "" - local last, name, pat - local vr - ctx.op = op - ctx.rel = nil - - local cond = rshift(op, 28) - local opat - if cond == 15 then - opat = map_uncondins[band(rshift(op, 25), 7)] - else - if cond ~= 14 then suffix = map_cond[cond] end - opat = map_condins[band(rshift(op, 25), 7)] - end - while type(opat) ~= "string" do - if not opat then return unknown(ctx) end - opat = opat[band(rshift(op, opat.shift), opat.mask)] or opat._ - end - name, pat = match(opat, "^([a-z0-9]*)(.*)") - if sub(pat, 1, 1) == "." then - local s2, p2 = match(pat, "^([a-z0-9.]*)(.*)") - suffix = suffix..s2 - pat = p2 - end - - for p in gmatch(pat, ".") do - local x = nil - if p == "D" then - x = map_gpr[band(rshift(op, 12), 15)] - elseif p == "N" then - x = map_gpr[band(rshift(op, 16), 15)] - elseif p == "S" then - x = map_gpr[band(rshift(op, 8), 15)] - elseif p == "M" then - x = map_gpr[band(op, 15)] - elseif p == "d" then - x = fmtvr(op, vr, 12, 22) - elseif p == "n" then - x = fmtvr(op, vr, 16, 7) - elseif p == "m" then - x = fmtvr(op, vr, 0, 5) - elseif p == "P" then - if band(op, 0x02000000) ~= 0 then - x = ror(band(op, 255), 2*band(rshift(op, 8), 15)) - else - x = map_gpr[band(op, 15)] - if band(op, 0xff0) ~= 0 then - operands[#operands+1] = x - local s = map_shift[band(rshift(op, 5), 3)] - local r = nil - if band(op, 0xf90) == 0 then - if s == "ror" then s = "rrx" else r = "#32" end - elseif band(op, 0x10) == 0 then - r = "#"..band(rshift(op, 7), 31) - else - r = map_gpr[band(rshift(op, 8), 15)] - end - if name == "mov" then name = s; x = r - elseif r then x = format("%s %s", s, r) - else x = s end - end - end - elseif p == "L" then - x = fmtload(ctx, op, pos) - elseif p == "l" then - x = fmtvload(ctx, op, pos) - elseif p == "B" then - local addr = ctx.addr + pos + 8 + arshift(lshift(op, 8), 6) - if cond == 15 then addr = addr + band(rshift(op, 23), 2) end - ctx.rel = addr - x = "0x"..tohex(addr) - elseif p == "F" then - vr = "s" - elseif p == "G" then - vr = "d" - elseif p == "." then - suffix = suffix..(vr == "s" and ".f32" or ".f64") - elseif p == "R" then - if band(op, 0x00200000) ~= 0 and #operands == 1 then - operands[1] = operands[1].."!" - end - local t = {} - for i=0,15 do - if band(rshift(op, i), 1) == 1 then t[#t+1] = map_gpr[i] end - end - x = "{"..concat(t, ", ").."}" - elseif p == "r" then - if band(op, 0x00200000) ~= 0 and #operands == 2 then - operands[1] = operands[1].."!" - end - local s = tonumber(sub(last, 2)) - local n = band(op, 255) - if vr == "d" then n = rshift(n, 1) end - operands[#operands] = format("{%s-%s%d}", last, vr, s+n-1) - elseif p == "W" then - x = band(op, 0x0fff) + band(rshift(op, 4), 0xf000) - elseif p == "T" then - x = "#0x"..tohex(band(op, 0x00ffffff), 6) - elseif p == "U" then - x = band(rshift(op, 7), 31) - if x == 0 then x = nil end - elseif p == "u" then - x = band(rshift(op, 7), 31) - if band(op, 0x40) == 0 then - if x == 0 then x = nil else x = "lsl #"..x end - else - if x == 0 then x = "asr #32" else x = "asr #"..x end - end - elseif p == "v" then - x = band(rshift(op, 7), 31) - elseif p == "w" then - x = band(rshift(op, 16), 31) - elseif p == "x" then - x = band(rshift(op, 16), 31) + 1 - elseif p == "X" then - x = band(rshift(op, 16), 31) - last + 1 - elseif p == "Y" then - x = band(rshift(op, 12), 0xf0) + band(op, 0x0f) - elseif p == "K" then - x = "#0x"..tohex(band(rshift(op, 4), 0x0000fff0) + band(op, 15), 4) - elseif p == "s" then - if band(op, 0x00100000) ~= 0 then suffix = "s"..suffix end - else - assert(false) - end - if x then - last = x - if type(x) == "number" then x = "#"..x end - operands[#operands+1] = x - end - end - - return putop(ctx, name..suffix, operands) -end - ------------------------------------------------------------------------------- - --- Disassemble a block of code. -local function disass_block(ctx, ofs, len) - if not ofs then ofs = 0 end - local stop = len and ofs+len or #ctx.code - ctx.pos = ofs - ctx.rel = nil - while ctx.pos < stop do disass_ins(ctx) end -end - --- Extended API: create a disassembler context. Then call ctx:disass(ofs, len). -local function create(code, addr, out) - local ctx = {} - ctx.code = code - ctx.addr = addr or 0 - ctx.out = out or io.write - ctx.symtab = {} - ctx.disass = disass_block - ctx.hexdump = 8 - return ctx -end - --- Simple API: disassemble code (a string) at address and output via out. -local function disass(code, addr, out) - create(code, addr, out):disass() -end - --- Return register name for RID. -local function regname(r) - if r < 16 then return map_gpr[r] end - return "d"..(r-16) -end - --- Public module functions. -return { - create = create, - disass = disass, - regname = regname -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_mips.lua b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_mips.lua deleted file mode 100644 index 9466f45c2c2..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_mips.lua +++ /dev/null @@ -1,428 +0,0 @@ ----------------------------------------------------------------------------- --- LuaJIT MIPS disassembler module. --- --- Copyright (C) 2005-2015 Mike Pall. All rights reserved. --- Released under the MIT/X license. See Copyright Notice in luajit.h ----------------------------------------------------------------------------- --- This is a helper module used by the LuaJIT machine code dumper module. --- --- It disassembles all standard MIPS32R1/R2 instructions. --- Default mode is big-endian, but see: dis_mipsel.lua ------------------------------------------------------------------------------- - -local type = type -local sub, byte, format = string.sub, string.byte, string.format -local match, gmatch, gsub = string.match, string.gmatch, string.gsub -local concat = table.concat -local bit = require("bit") -local band, bor, tohex = bit.band, bit.bor, bit.tohex -local lshift, rshift, arshift = bit.lshift, bit.rshift, bit.arshift - ------------------------------------------------------------------------------- --- Primary and extended opcode maps ------------------------------------------------------------------------------- - -local map_movci = { shift = 16, mask = 1, [0] = "movfDSC", "movtDSC", } -local map_srl = { shift = 21, mask = 1, [0] = "srlDTA", "rotrDTA", } -local map_srlv = { shift = 6, mask = 1, [0] = "srlvDTS", "rotrvDTS", } - -local map_special = { - shift = 0, mask = 63, - [0] = { shift = 0, mask = -1, [0] = "nop", _ = "sllDTA" }, - map_movci, map_srl, "sraDTA", - "sllvDTS", false, map_srlv, "sravDTS", - "jrS", "jalrD1S", "movzDST", "movnDST", - "syscallY", "breakY", false, "sync", - "mfhiD", "mthiS", "mfloD", "mtloS", - false, false, false, false, - "multST", "multuST", "divST", "divuST", - false, false, false, false, - "addDST", "addu|moveDST0", "subDST", "subu|neguDS0T", - "andDST", "orDST", "xorDST", "nor|notDST0", - false, false, "sltDST", "sltuDST", - false, false, false, false, - "tgeSTZ", "tgeuSTZ", "tltSTZ", "tltuSTZ", - "teqSTZ", false, "tneSTZ", -} - -local map_special2 = { - shift = 0, mask = 63, - [0] = "maddST", "madduST", "mulDST", false, - "msubST", "msubuST", - [32] = "clzDS", [33] = "cloDS", - [63] = "sdbbpY", -} - -local map_bshfl = { - shift = 6, mask = 31, - [2] = "wsbhDT", - [16] = "sebDT", - [24] = "sehDT", -} - -local map_special3 = { - shift = 0, mask = 63, - [0] = "extTSAK", [4] = "insTSAL", - [32] = map_bshfl, - [59] = "rdhwrTD", -} - -local map_regimm = { - shift = 16, mask = 31, - [0] = "bltzSB", "bgezSB", "bltzlSB", "bgezlSB", - false, false, false, false, - "tgeiSI", "tgeiuSI", "tltiSI", "tltiuSI", - "teqiSI", false, "tneiSI", false, - "bltzalSB", "bgezalSB", "bltzallSB", "bgezallSB", - false, false, false, false, - false, false, false, false, - false, false, false, "synciSO", -} - -local map_cop0 = { - shift = 25, mask = 1, - [0] = { - shift = 21, mask = 15, - [0] = "mfc0TDW", [4] = "mtc0TDW", - [10] = "rdpgprDT", - [11] = { shift = 5, mask = 1, [0] = "diT0", "eiT0", }, - [14] = "wrpgprDT", - }, { - shift = 0, mask = 63, - [1] = "tlbr", [2] = "tlbwi", [6] = "tlbwr", [8] = "tlbp", - [24] = "eret", [31] = "deret", - [32] = "wait", - }, -} - -local map_cop1s = { - shift = 0, mask = 63, - [0] = "add.sFGH", "sub.sFGH", "mul.sFGH", "div.sFGH", - "sqrt.sFG", "abs.sFG", "mov.sFG", "neg.sFG", - "round.l.sFG", "trunc.l.sFG", "ceil.l.sFG", "floor.l.sFG", - "round.w.sFG", "trunc.w.sFG", "ceil.w.sFG", "floor.w.sFG", - false, - { shift = 16, mask = 1, [0] = "movf.sFGC", "movt.sFGC" }, - "movz.sFGT", "movn.sFGT", - false, "recip.sFG", "rsqrt.sFG", false, - false, false, false, false, - false, false, false, false, - false, "cvt.d.sFG", false, false, - "cvt.w.sFG", "cvt.l.sFG", "cvt.ps.sFGH", false, - false, false, false, false, - false, false, false, false, - "c.f.sVGH", "c.un.sVGH", "c.eq.sVGH", "c.ueq.sVGH", - "c.olt.sVGH", "c.ult.sVGH", "c.ole.sVGH", "c.ule.sVGH", - "c.sf.sVGH", "c.ngle.sVGH", "c.seq.sVGH", "c.ngl.sVGH", - "c.lt.sVGH", "c.nge.sVGH", "c.le.sVGH", "c.ngt.sVGH", -} - -local map_cop1d = { - shift = 0, mask = 63, - [0] = "add.dFGH", "sub.dFGH", "mul.dFGH", "div.dFGH", - "sqrt.dFG", "abs.dFG", "mov.dFG", "neg.dFG", - "round.l.dFG", "trunc.l.dFG", "ceil.l.dFG", "floor.l.dFG", - "round.w.dFG", "trunc.w.dFG", "ceil.w.dFG", "floor.w.dFG", - false, - { shift = 16, mask = 1, [0] = "movf.dFGC", "movt.dFGC" }, - "movz.dFGT", "movn.dFGT", - false, "recip.dFG", "rsqrt.dFG", false, - false, false, false, false, - false, false, false, false, - "cvt.s.dFG", false, false, false, - "cvt.w.dFG", "cvt.l.dFG", false, false, - false, false, false, false, - false, false, false, false, - "c.f.dVGH", "c.un.dVGH", "c.eq.dVGH", "c.ueq.dVGH", - "c.olt.dVGH", "c.ult.dVGH", "c.ole.dVGH", "c.ule.dVGH", - "c.df.dVGH", "c.ngle.dVGH", "c.deq.dVGH", "c.ngl.dVGH", - "c.lt.dVGH", "c.nge.dVGH", "c.le.dVGH", "c.ngt.dVGH", -} - -local map_cop1ps = { - shift = 0, mask = 63, - [0] = "add.psFGH", "sub.psFGH", "mul.psFGH", false, - false, "abs.psFG", "mov.psFG", "neg.psFG", - false, false, false, false, - false, false, false, false, - false, - { shift = 16, mask = 1, [0] = "movf.psFGC", "movt.psFGC" }, - "movz.psFGT", "movn.psFGT", - false, false, false, false, - false, false, false, false, - false, false, false, false, - "cvt.s.puFG", false, false, false, - false, false, false, false, - "cvt.s.plFG", false, false, false, - "pll.psFGH", "plu.psFGH", "pul.psFGH", "puu.psFGH", - "c.f.psVGH", "c.un.psVGH", "c.eq.psVGH", "c.ueq.psVGH", - "c.olt.psVGH", "c.ult.psVGH", "c.ole.psVGH", "c.ule.psVGH", - "c.psf.psVGH", "c.ngle.psVGH", "c.pseq.psVGH", "c.ngl.psVGH", - "c.lt.psVGH", "c.nge.psVGH", "c.le.psVGH", "c.ngt.psVGH", -} - -local map_cop1w = { - shift = 0, mask = 63, - [32] = "cvt.s.wFG", [33] = "cvt.d.wFG", -} - -local map_cop1l = { - shift = 0, mask = 63, - [32] = "cvt.s.lFG", [33] = "cvt.d.lFG", -} - -local map_cop1bc = { - shift = 16, mask = 3, - [0] = "bc1fCB", "bc1tCB", "bc1flCB", "bc1tlCB", -} - -local map_cop1 = { - shift = 21, mask = 31, - [0] = "mfc1TG", false, "cfc1TG", "mfhc1TG", - "mtc1TG", false, "ctc1TG", "mthc1TG", - map_cop1bc, false, false, false, - false, false, false, false, - map_cop1s, map_cop1d, false, false, - map_cop1w, map_cop1l, map_cop1ps, -} - -local map_cop1x = { - shift = 0, mask = 63, - [0] = "lwxc1FSX", "ldxc1FSX", false, false, - false, "luxc1FSX", false, false, - "swxc1FSX", "sdxc1FSX", false, false, - false, "suxc1FSX", false, "prefxMSX", - false, false, false, false, - false, false, false, false, - false, false, false, false, - false, false, "alnv.psFGHS", false, - "madd.sFRGH", "madd.dFRGH", false, false, - false, false, "madd.psFRGH", false, - "msub.sFRGH", "msub.dFRGH", false, false, - false, false, "msub.psFRGH", false, - "nmadd.sFRGH", "nmadd.dFRGH", false, false, - false, false, "nmadd.psFRGH", false, - "nmsub.sFRGH", "nmsub.dFRGH", false, false, - false, false, "nmsub.psFRGH", false, -} - -local map_pri = { - [0] = map_special, map_regimm, "jJ", "jalJ", - "beq|beqz|bST00B", "bne|bnezST0B", "blezSB", "bgtzSB", - "addiTSI", "addiu|liTS0I", "sltiTSI", "sltiuTSI", - "andiTSU", "ori|liTS0U", "xoriTSU", "luiTU", - map_cop0, map_cop1, false, map_cop1x, - "beql|beqzlST0B", "bnel|bnezlST0B", "blezlSB", "bgtzlSB", - false, false, false, false, - map_special2, false, false, map_special3, - "lbTSO", "lhTSO", "lwlTSO", "lwTSO", - "lbuTSO", "lhuTSO", "lwrTSO", false, - "sbTSO", "shTSO", "swlTSO", "swTSO", - false, false, "swrTSO", "cacheNSO", - "llTSO", "lwc1HSO", "lwc2TSO", "prefNSO", - false, "ldc1HSO", "ldc2TSO", false, - "scTSO", "swc1HSO", "swc2TSO", false, - false, "sdc1HSO", "sdc2TSO", false, -} - ------------------------------------------------------------------------------- - -local map_gpr = { - [0] = "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", - "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", - "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", - "r24", "r25", "r26", "r27", "r28", "sp", "r30", "ra", -} - ------------------------------------------------------------------------------- - --- Output a nicely formatted line with an opcode and operands. -local function putop(ctx, text, operands) - local pos = ctx.pos - local extra = "" - if ctx.rel then - local sym = ctx.symtab[ctx.rel] - if sym then extra = "\t->"..sym end - end - if ctx.hexdump > 0 then - ctx.out(format("%08x %s %-7s %s%s\n", - ctx.addr+pos, tohex(ctx.op), text, concat(operands, ", "), extra)) - else - ctx.out(format("%08x %-7s %s%s\n", - ctx.addr+pos, text, concat(operands, ", "), extra)) - end - ctx.pos = pos + 4 -end - --- Fallback for unknown opcodes. -local function unknown(ctx) - return putop(ctx, ".long", { "0x"..tohex(ctx.op) }) -end - -local function get_be(ctx) - local pos = ctx.pos - local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4) - return bor(lshift(b0, 24), lshift(b1, 16), lshift(b2, 8), b3) -end - -local function get_le(ctx) - local pos = ctx.pos - local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4) - return bor(lshift(b3, 24), lshift(b2, 16), lshift(b1, 8), b0) -end - --- Disassemble a single instruction. -local function disass_ins(ctx) - local op = ctx:get() - local operands = {} - local last = nil - ctx.op = op - ctx.rel = nil - - local opat = map_pri[rshift(op, 26)] - while type(opat) ~= "string" do - if not opat then return unknown(ctx) end - opat = opat[band(rshift(op, opat.shift), opat.mask)] or opat._ - end - local name, pat = match(opat, "^([a-z0-9_.]*)(.*)") - local altname, pat2 = match(pat, "|([a-z0-9_.|]*)(.*)") - if altname then pat = pat2 end - - for p in gmatch(pat, ".") do - local x = nil - if p == "S" then - x = map_gpr[band(rshift(op, 21), 31)] - elseif p == "T" then - x = map_gpr[band(rshift(op, 16), 31)] - elseif p == "D" then - x = map_gpr[band(rshift(op, 11), 31)] - elseif p == "F" then - x = "f"..band(rshift(op, 6), 31) - elseif p == "G" then - x = "f"..band(rshift(op, 11), 31) - elseif p == "H" then - x = "f"..band(rshift(op, 16), 31) - elseif p == "R" then - x = "f"..band(rshift(op, 21), 31) - elseif p == "A" then - x = band(rshift(op, 6), 31) - elseif p == "M" then - x = band(rshift(op, 11), 31) - elseif p == "N" then - x = band(rshift(op, 16), 31) - elseif p == "C" then - x = band(rshift(op, 18), 7) - if x == 0 then x = nil end - elseif p == "K" then - x = band(rshift(op, 11), 31) + 1 - elseif p == "L" then - x = band(rshift(op, 11), 31) - last + 1 - elseif p == "I" then - x = arshift(lshift(op, 16), 16) - elseif p == "U" then - x = band(op, 0xffff) - elseif p == "O" then - local disp = arshift(lshift(op, 16), 16) - operands[#operands] = format("%d(%s)", disp, last) - elseif p == "X" then - local index = map_gpr[band(rshift(op, 16), 31)] - operands[#operands] = format("%s(%s)", index, last) - elseif p == "B" then - x = ctx.addr + ctx.pos + arshift(lshift(op, 16), 16)*4 + 4 - ctx.rel = x - x = "0x"..tohex(x) - elseif p == "J" then - x = band(ctx.addr + ctx.pos, 0xf0000000) + band(op, 0x03ffffff)*4 - ctx.rel = x - x = "0x"..tohex(x) - elseif p == "V" then - x = band(rshift(op, 8), 7) - if x == 0 then x = nil end - elseif p == "W" then - x = band(op, 7) - if x == 0 then x = nil end - elseif p == "Y" then - x = band(rshift(op, 6), 0x000fffff) - if x == 0 then x = nil end - elseif p == "Z" then - x = band(rshift(op, 6), 1023) - if x == 0 then x = nil end - elseif p == "0" then - if last == "r0" or last == 0 then - local n = #operands - operands[n] = nil - last = operands[n-1] - if altname then - local a1, a2 = match(altname, "([^|]*)|(.*)") - if a1 then name, altname = a1, a2 - else name = altname end - end - end - elseif p == "1" then - if last == "ra" then - operands[#operands] = nil - end - else - assert(false) - end - if x then operands[#operands+1] = x; last = x end - end - - return putop(ctx, name, operands) -end - ------------------------------------------------------------------------------- - --- Disassemble a block of code. -local function disass_block(ctx, ofs, len) - if not ofs then ofs = 0 end - local stop = len and ofs+len or #ctx.code - stop = stop - stop % 4 - ctx.pos = ofs - ofs % 4 - ctx.rel = nil - while ctx.pos < stop do disass_ins(ctx) end -end - --- Extended API: create a disassembler context. Then call ctx:disass(ofs, len). -local function create(code, addr, out) - local ctx = {} - ctx.code = code - ctx.addr = addr or 0 - ctx.out = out or io.write - ctx.symtab = {} - ctx.disass = disass_block - ctx.hexdump = 8 - ctx.get = get_be - return ctx -end - -local function create_el(code, addr, out) - local ctx = create(code, addr, out) - ctx.get = get_le - return ctx -end - --- Simple API: disassemble code (a string) at address and output via out. -local function disass(code, addr, out) - create(code, addr, out):disass() -end - -local function disass_el(code, addr, out) - create_el(code, addr, out):disass() -end - --- Return register name for RID. -local function regname(r) - if r < 32 then return map_gpr[r] end - return "f"..(r-32) -end - --- Public module functions. -return { - create = create, - create_el = create_el, - disass = disass, - disass_el = disass_el, - regname = regname -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_mipsel.lua b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_mipsel.lua deleted file mode 100644 index f06ffe855e3..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_mipsel.lua +++ /dev/null @@ -1,17 +0,0 @@ ----------------------------------------------------------------------------- --- LuaJIT MIPSEL disassembler wrapper module. --- --- Copyright (C) 2005-2015 Mike Pall. All rights reserved. --- Released under the MIT license. See Copyright Notice in luajit.h ----------------------------------------------------------------------------- --- This module just exports the little-endian functions from the --- MIPS disassembler module. All the interesting stuff is there. ------------------------------------------------------------------------------- - -local dis_mips = require((string.match(..., ".*%.") or "").."dis_mips") -return { - create = dis_mips.create_el, - disass = dis_mips.disass_el, - regname = dis_mips.regname -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_ppc.lua b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_ppc.lua deleted file mode 100644 index e077d7acdd9..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_ppc.lua +++ /dev/null @@ -1,591 +0,0 @@ ----------------------------------------------------------------------------- --- LuaJIT PPC disassembler module. --- --- Copyright (C) 2005-2015 Mike Pall. All rights reserved. --- Released under the MIT/X license. See Copyright Notice in luajit.h ----------------------------------------------------------------------------- --- This is a helper module used by the LuaJIT machine code dumper module. --- --- It disassembles all common, non-privileged 32/64 bit PowerPC instructions --- plus the e500 SPE instructions and some Cell/Xenon extensions. --- --- NYI: VMX, VMX128 ------------------------------------------------------------------------------- - -local type = type -local sub, byte, format = string.sub, string.byte, string.format -local match, gmatch, gsub = string.match, string.gmatch, string.gsub -local concat = table.concat -local bit = require("bit") -local band, bor, tohex = bit.band, bit.bor, bit.tohex -local lshift, rshift, arshift = bit.lshift, bit.rshift, bit.arshift - ------------------------------------------------------------------------------- --- Primary and extended opcode maps ------------------------------------------------------------------------------- - -local map_crops = { - shift = 1, mask = 1023, - [0] = "mcrfXX", - [33] = "crnor|crnotCCC=", [129] = "crandcCCC", - [193] = "crxor|crclrCCC%", [225] = "crnandCCC", - [257] = "crandCCC", [289] = "creqv|crsetCCC%", - [417] = "crorcCCC", [449] = "cror|crmoveCCC=", - [16] = "b_lrKB", [528] = "b_ctrKB", - [150] = "isync", -} - -local map_rlwinm = setmetatable({ - shift = 0, mask = -1, -}, -{ __index = function(t, x) - local rot = band(rshift(x, 11), 31) - local mb = band(rshift(x, 6), 31) - local me = band(rshift(x, 1), 31) - if mb == 0 and me == 31-rot then - return "slwiRR~A." - elseif me == 31 and mb == 32-rot then - return "srwiRR~-A." - else - return "rlwinmRR~AAA." - end - end -}) - -local map_rld = { - shift = 2, mask = 7, - [0] = "rldiclRR~HM.", "rldicrRR~HM.", "rldicRR~HM.", "rldimiRR~HM.", - { - shift = 1, mask = 1, - [0] = "rldclRR~RM.", "rldcrRR~RM.", - }, -} - -local map_ext = setmetatable({ - shift = 1, mask = 1023, - - [0] = "cmp_YLRR", [32] = "cmpl_YLRR", - [4] = "twARR", [68] = "tdARR", - - [8] = "subfcRRR.", [40] = "subfRRR.", - [104] = "negRR.", [136] = "subfeRRR.", - [200] = "subfzeRR.", [232] = "subfmeRR.", - [520] = "subfcoRRR.", [552] = "subfoRRR.", - [616] = "negoRR.", [648] = "subfeoRRR.", - [712] = "subfzeoRR.", [744] = "subfmeoRR.", - - [9] = "mulhduRRR.", [73] = "mulhdRRR.", [233] = "mulldRRR.", - [457] = "divduRRR.", [489] = "divdRRR.", - [745] = "mulldoRRR.", - [969] = "divduoRRR.", [1001] = "divdoRRR.", - - [10] = "addcRRR.", [138] = "addeRRR.", - [202] = "addzeRR.", [234] = "addmeRR.", [266] = "addRRR.", - [522] = "addcoRRR.", [650] = "addeoRRR.", - [714] = "addzeoRR.", [746] = "addmeoRR.", [778] = "addoRRR.", - - [11] = "mulhwuRRR.", [75] = "mulhwRRR.", [235] = "mullwRRR.", - [459] = "divwuRRR.", [491] = "divwRRR.", - [747] = "mullwoRRR.", - [971] = "divwouRRR.", [1003] = "divwoRRR.", - - [15] = "iselltRRR", [47] = "iselgtRRR", [79] = "iseleqRRR", - - [144] = { shift = 20, mask = 1, [0] = "mtcrfRZ~", "mtocrfRZ~", }, - [19] = { shift = 20, mask = 1, [0] = "mfcrR", "mfocrfRZ", }, - [371] = { shift = 11, mask = 1023, [392] = "mftbR", [424] = "mftbuR", }, - [339] = { - shift = 11, mask = 1023, - [32] = "mferR", [256] = "mflrR", [288] = "mfctrR", [16] = "mfspefscrR", - }, - [467] = { - shift = 11, mask = 1023, - [32] = "mtxerR", [256] = "mtlrR", [288] = "mtctrR", [16] = "mtspefscrR", - }, - - [20] = "lwarxRR0R", [84] = "ldarxRR0R", - - [21] = "ldxRR0R", [53] = "lduxRRR", - [149] = "stdxRR0R", [181] = "stduxRRR", - [341] = "lwaxRR0R", [373] = "lwauxRRR", - - [23] = "lwzxRR0R", [55] = "lwzuxRRR", - [87] = "lbzxRR0R", [119] = "lbzuxRRR", - [151] = "stwxRR0R", [183] = "stwuxRRR", - [215] = "stbxRR0R", [247] = "stbuxRRR", - [279] = "lhzxRR0R", [311] = "lhzuxRRR", - [343] = "lhaxRR0R", [375] = "lhauxRRR", - [407] = "sthxRR0R", [439] = "sthuxRRR", - - [54] = "dcbst-R0R", [86] = "dcbf-R0R", - [150] = "stwcxRR0R.", [214] = "stdcxRR0R.", - [246] = "dcbtst-R0R", [278] = "dcbt-R0R", - [310] = "eciwxRR0R", [438] = "ecowxRR0R", - [470] = "dcbi-RR", - - [598] = { - shift = 21, mask = 3, - [0] = "sync", "lwsync", "ptesync", - }, - [758] = "dcba-RR", - [854] = "eieio", [982] = "icbi-R0R", [1014] = "dcbz-R0R", - - [26] = "cntlzwRR~", [58] = "cntlzdRR~", - [122] = "popcntbRR~", - [154] = "prtywRR~", [186] = "prtydRR~", - - [28] = "andRR~R.", [60] = "andcRR~R.", [124] = "nor|notRR~R=.", - [284] = "eqvRR~R.", [316] = "xorRR~R.", - [412] = "orcRR~R.", [444] = "or|mrRR~R=.", [476] = "nandRR~R.", - [508] = "cmpbRR~R", - - [512] = "mcrxrX", - - [532] = "ldbrxRR0R", [660] = "stdbrxRR0R", - - [533] = "lswxRR0R", [597] = "lswiRR0A", - [661] = "stswxRR0R", [725] = "stswiRR0A", - - [534] = "lwbrxRR0R", [662] = "stwbrxRR0R", - [790] = "lhbrxRR0R", [918] = "sthbrxRR0R", - - [535] = "lfsxFR0R", [567] = "lfsuxFRR", - [599] = "lfdxFR0R", [631] = "lfduxFRR", - [663] = "stfsxFR0R", [695] = "stfsuxFRR", - [727] = "stfdxFR0R", [759] = "stfduxFR0R", - [855] = "lfiwaxFR0R", - [983] = "stfiwxFR0R", - - [24] = "slwRR~R.", - - [27] = "sldRR~R.", [536] = "srwRR~R.", - [792] = "srawRR~R.", [824] = "srawiRR~A.", - - [794] = "sradRR~R.", [826] = "sradiRR~H.", [827] = "sradiRR~H.", - [922] = "extshRR~.", [954] = "extsbRR~.", [986] = "extswRR~.", - - [539] = "srdRR~R.", -}, -{ __index = function(t, x) - if band(x, 31) == 15 then return "iselRRRC" end - end -}) - -local map_ld = { - shift = 0, mask = 3, - [0] = "ldRRE", "lduRRE", "lwaRRE", -} - -local map_std = { - shift = 0, mask = 3, - [0] = "stdRRE", "stduRRE", -} - -local map_fps = { - shift = 5, mask = 1, - { - shift = 1, mask = 15, - [0] = false, false, "fdivsFFF.", false, - "fsubsFFF.", "faddsFFF.", "fsqrtsF-F.", false, - "fresF-F.", "fmulsFF-F.", "frsqrtesF-F.", false, - "fmsubsFFFF~.", "fmaddsFFFF~.", "fnmsubsFFFF~.", "fnmaddsFFFF~.", - } -} - -local map_fpd = { - shift = 5, mask = 1, - [0] = { - shift = 1, mask = 1023, - [0] = "fcmpuXFF", [32] = "fcmpoXFF", [64] = "mcrfsXX", - [38] = "mtfsb1A.", [70] = "mtfsb0A.", [134] = "mtfsfiA>>-A>", - [8] = "fcpsgnFFF.", [40] = "fnegF-F.", [72] = "fmrF-F.", - [136] = "fnabsF-F.", [264] = "fabsF-F.", - [12] = "frspF-F.", - [14] = "fctiwF-F.", [15] = "fctiwzF-F.", - [583] = "mffsF.", [711] = "mtfsfZF.", - [392] = "frinF-F.", [424] = "frizF-F.", - [456] = "fripF-F.", [488] = "frimF-F.", - [814] = "fctidF-F.", [815] = "fctidzF-F.", [846] = "fcfidF-F.", - }, - { - shift = 1, mask = 15, - [0] = false, false, "fdivFFF.", false, - "fsubFFF.", "faddFFF.", "fsqrtF-F.", "fselFFFF~.", - "freF-F.", "fmulFF-F.", "frsqrteF-F.", false, - "fmsubFFFF~.", "fmaddFFFF~.", "fnmsubFFFF~.", "fnmaddFFFF~.", - } -} - -local map_spe = { - shift = 0, mask = 2047, - - [512] = "evaddwRRR", [514] = "evaddiwRAR~", - [516] = "evsubwRRR~", [518] = "evsubiwRAR~", - [520] = "evabsRR", [521] = "evnegRR", - [522] = "evextsbRR", [523] = "evextshRR", [524] = "evrndwRR", - [525] = "evcntlzwRR", [526] = "evcntlswRR", - - [527] = "brincRRR", - - [529] = "evandRRR", [530] = "evandcRRR", [534] = "evxorRRR", - [535] = "evor|evmrRRR=", [536] = "evnor|evnotRRR=", - [537] = "eveqvRRR", [539] = "evorcRRR", [542] = "evnandRRR", - - [544] = "evsrwuRRR", [545] = "evsrwsRRR", - [546] = "evsrwiuRRA", [547] = "evsrwisRRA", - [548] = "evslwRRR", [550] = "evslwiRRA", - [552] = "evrlwRRR", [553] = "evsplatiRS", - [554] = "evrlwiRRA", [555] = "evsplatfiRS", - [556] = "evmergehiRRR", [557] = "evmergeloRRR", - [558] = "evmergehiloRRR", [559] = "evmergelohiRRR", - - [560] = "evcmpgtuYRR", [561] = "evcmpgtsYRR", - [562] = "evcmpltuYRR", [563] = "evcmpltsYRR", - [564] = "evcmpeqYRR", - - [632] = "evselRRR", [633] = "evselRRRW", - [634] = "evselRRRW", [635] = "evselRRRW", - [636] = "evselRRRW", [637] = "evselRRRW", - [638] = "evselRRRW", [639] = "evselRRRW", - - [640] = "evfsaddRRR", [641] = "evfssubRRR", - [644] = "evfsabsRR", [645] = "evfsnabsRR", [646] = "evfsnegRR", - [648] = "evfsmulRRR", [649] = "evfsdivRRR", - [652] = "evfscmpgtYRR", [653] = "evfscmpltYRR", [654] = "evfscmpeqYRR", - [656] = "evfscfuiR-R", [657] = "evfscfsiR-R", - [658] = "evfscfufR-R", [659] = "evfscfsfR-R", - [660] = "evfsctuiR-R", [661] = "evfsctsiR-R", - [662] = "evfsctufR-R", [663] = "evfsctsfR-R", - [664] = "evfsctuizR-R", [666] = "evfsctsizR-R", - [668] = "evfststgtYRR", [669] = "evfststltYRR", [670] = "evfststeqYRR", - - [704] = "efsaddRRR", [705] = "efssubRRR", - [708] = "efsabsRR", [709] = "efsnabsRR", [710] = "efsnegRR", - [712] = "efsmulRRR", [713] = "efsdivRRR", - [716] = "efscmpgtYRR", [717] = "efscmpltYRR", [718] = "efscmpeqYRR", - [719] = "efscfdR-R", - [720] = "efscfuiR-R", [721] = "efscfsiR-R", - [722] = "efscfufR-R", [723] = "efscfsfR-R", - [724] = "efsctuiR-R", [725] = "efsctsiR-R", - [726] = "efsctufR-R", [727] = "efsctsfR-R", - [728] = "efsctuizR-R", [730] = "efsctsizR-R", - [732] = "efststgtYRR", [733] = "efststltYRR", [734] = "efststeqYRR", - - [736] = "efdaddRRR", [737] = "efdsubRRR", - [738] = "efdcfuidR-R", [739] = "efdcfsidR-R", - [740] = "efdabsRR", [741] = "efdnabsRR", [742] = "efdnegRR", - [744] = "efdmulRRR", [745] = "efddivRRR", - [746] = "efdctuidzR-R", [747] = "efdctsidzR-R", - [748] = "efdcmpgtYRR", [749] = "efdcmpltYRR", [750] = "efdcmpeqYRR", - [751] = "efdcfsR-R", - [752] = "efdcfuiR-R", [753] = "efdcfsiR-R", - [754] = "efdcfufR-R", [755] = "efdcfsfR-R", - [756] = "efdctuiR-R", [757] = "efdctsiR-R", - [758] = "efdctufR-R", [759] = "efdctsfR-R", - [760] = "efdctuizR-R", [762] = "efdctsizR-R", - [764] = "efdtstgtYRR", [765] = "efdtstltYRR", [766] = "efdtsteqYRR", - - [768] = "evlddxRR0R", [769] = "evlddRR8", - [770] = "evldwxRR0R", [771] = "evldwRR8", - [772] = "evldhxRR0R", [773] = "evldhRR8", - [776] = "evlhhesplatxRR0R", [777] = "evlhhesplatRR2", - [780] = "evlhhousplatxRR0R", [781] = "evlhhousplatRR2", - [782] = "evlhhossplatxRR0R", [783] = "evlhhossplatRR2", - [784] = "evlwhexRR0R", [785] = "evlwheRR4", - [788] = "evlwhouxRR0R", [789] = "evlwhouRR4", - [790] = "evlwhosxRR0R", [791] = "evlwhosRR4", - [792] = "evlwwsplatxRR0R", [793] = "evlwwsplatRR4", - [796] = "evlwhsplatxRR0R", [797] = "evlwhsplatRR4", - - [800] = "evstddxRR0R", [801] = "evstddRR8", - [802] = "evstdwxRR0R", [803] = "evstdwRR8", - [804] = "evstdhxRR0R", [805] = "evstdhRR8", - [816] = "evstwhexRR0R", [817] = "evstwheRR4", - [820] = "evstwhoxRR0R", [821] = "evstwhoRR4", - [824] = "evstwwexRR0R", [825] = "evstwweRR4", - [828] = "evstwwoxRR0R", [829] = "evstwwoRR4", - - [1027] = "evmhessfRRR", [1031] = "evmhossfRRR", [1032] = "evmheumiRRR", - [1033] = "evmhesmiRRR", [1035] = "evmhesmfRRR", [1036] = "evmhoumiRRR", - [1037] = "evmhosmiRRR", [1039] = "evmhosmfRRR", [1059] = "evmhessfaRRR", - [1063] = "evmhossfaRRR", [1064] = "evmheumiaRRR", [1065] = "evmhesmiaRRR", - [1067] = "evmhesmfaRRR", [1068] = "evmhoumiaRRR", [1069] = "evmhosmiaRRR", - [1071] = "evmhosmfaRRR", [1095] = "evmwhssfRRR", [1096] = "evmwlumiRRR", - [1100] = "evmwhumiRRR", [1101] = "evmwhsmiRRR", [1103] = "evmwhsmfRRR", - [1107] = "evmwssfRRR", [1112] = "evmwumiRRR", [1113] = "evmwsmiRRR", - [1115] = "evmwsmfRRR", [1127] = "evmwhssfaRRR", [1128] = "evmwlumiaRRR", - [1132] = "evmwhumiaRRR", [1133] = "evmwhsmiaRRR", [1135] = "evmwhsmfaRRR", - [1139] = "evmwssfaRRR", [1144] = "evmwumiaRRR", [1145] = "evmwsmiaRRR", - [1147] = "evmwsmfaRRR", - - [1216] = "evaddusiaawRR", [1217] = "evaddssiaawRR", - [1218] = "evsubfusiaawRR", [1219] = "evsubfssiaawRR", - [1220] = "evmraRR", - [1222] = "evdivwsRRR", [1223] = "evdivwuRRR", - [1224] = "evaddumiaawRR", [1225] = "evaddsmiaawRR", - [1226] = "evsubfumiaawRR", [1227] = "evsubfsmiaawRR", - - [1280] = "evmheusiaawRRR", [1281] = "evmhessiaawRRR", - [1283] = "evmhessfaawRRR", [1284] = "evmhousiaawRRR", - [1285] = "evmhossiaawRRR", [1287] = "evmhossfaawRRR", - [1288] = "evmheumiaawRRR", [1289] = "evmhesmiaawRRR", - [1291] = "evmhesmfaawRRR", [1292] = "evmhoumiaawRRR", - [1293] = "evmhosmiaawRRR", [1295] = "evmhosmfaawRRR", - [1320] = "evmhegumiaaRRR", [1321] = "evmhegsmiaaRRR", - [1323] = "evmhegsmfaaRRR", [1324] = "evmhogumiaaRRR", - [1325] = "evmhogsmiaaRRR", [1327] = "evmhogsmfaaRRR", - [1344] = "evmwlusiaawRRR", [1345] = "evmwlssiaawRRR", - [1352] = "evmwlumiaawRRR", [1353] = "evmwlsmiaawRRR", - [1363] = "evmwssfaaRRR", [1368] = "evmwumiaaRRR", - [1369] = "evmwsmiaaRRR", [1371] = "evmwsmfaaRRR", - [1408] = "evmheusianwRRR", [1409] = "evmhessianwRRR", - [1411] = "evmhessfanwRRR", [1412] = "evmhousianwRRR", - [1413] = "evmhossianwRRR", [1415] = "evmhossfanwRRR", - [1416] = "evmheumianwRRR", [1417] = "evmhesmianwRRR", - [1419] = "evmhesmfanwRRR", [1420] = "evmhoumianwRRR", - [1421] = "evmhosmianwRRR", [1423] = "evmhosmfanwRRR", - [1448] = "evmhegumianRRR", [1449] = "evmhegsmianRRR", - [1451] = "evmhegsmfanRRR", [1452] = "evmhogumianRRR", - [1453] = "evmhogsmianRRR", [1455] = "evmhogsmfanRRR", - [1472] = "evmwlusianwRRR", [1473] = "evmwlssianwRRR", - [1480] = "evmwlumianwRRR", [1481] = "evmwlsmianwRRR", - [1491] = "evmwssfanRRR", [1496] = "evmwumianRRR", - [1497] = "evmwsmianRRR", [1499] = "evmwsmfanRRR", -} - -local map_pri = { - [0] = false, false, "tdiARI", "twiARI", - map_spe, false, false, "mulliRRI", - "subficRRI", false, "cmpl_iYLRU", "cmp_iYLRI", - "addicRRI", "addic.RRI", "addi|liRR0I", "addis|lisRR0I", - "b_KBJ", "sc", "bKJ", map_crops, - "rlwimiRR~AAA.", map_rlwinm, false, "rlwnmRR~RAA.", - "oriNRR~U", "orisRR~U", "xoriRR~U", "xorisRR~U", - "andi.RR~U", "andis.RR~U", map_rld, map_ext, - "lwzRRD", "lwzuRRD", "lbzRRD", "lbzuRRD", - "stwRRD", "stwuRRD", "stbRRD", "stbuRRD", - "lhzRRD", "lhzuRRD", "lhaRRD", "lhauRRD", - "sthRRD", "sthuRRD", "lmwRRD", "stmwRRD", - "lfsFRD", "lfsuFRD", "lfdFRD", "lfduFRD", - "stfsFRD", "stfsuFRD", "stfdFRD", "stfduFRD", - false, false, map_ld, map_fps, - false, false, map_std, map_fpd, -} - ------------------------------------------------------------------------------- - -local map_gpr = { - [0] = "r0", "sp", "r2", "r3", "r4", "r5", "r6", "r7", - "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", - "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", - "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", -} - -local map_cond = { [0] = "lt", "gt", "eq", "so", "ge", "le", "ne", "ns", } - --- Format a condition bit. -local function condfmt(cond) - if cond <= 3 then - return map_cond[band(cond, 3)] - else - return format("4*cr%d+%s", rshift(cond, 2), map_cond[band(cond, 3)]) - end -end - ------------------------------------------------------------------------------- - --- Output a nicely formatted line with an opcode and operands. -local function putop(ctx, text, operands) - local pos = ctx.pos - local extra = "" - if ctx.rel then - local sym = ctx.symtab[ctx.rel] - if sym then extra = "\t->"..sym end - end - if ctx.hexdump > 0 then - ctx.out(format("%08x %s %-7s %s%s\n", - ctx.addr+pos, tohex(ctx.op), text, concat(operands, ", "), extra)) - else - ctx.out(format("%08x %-7s %s%s\n", - ctx.addr+pos, text, concat(operands, ", "), extra)) - end - ctx.pos = pos + 4 -end - --- Fallback for unknown opcodes. -local function unknown(ctx) - return putop(ctx, ".long", { "0x"..tohex(ctx.op) }) -end - --- Disassemble a single instruction. -local function disass_ins(ctx) - local pos = ctx.pos - local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4) - local op = bor(lshift(b0, 24), lshift(b1, 16), lshift(b2, 8), b3) - local operands = {} - local last = nil - local rs = 21 - ctx.op = op - ctx.rel = nil - - local opat = map_pri[rshift(b0, 2)] - while type(opat) ~= "string" do - if not opat then return unknown(ctx) end - opat = opat[band(rshift(op, opat.shift), opat.mask)] - end - local name, pat = match(opat, "^([a-z0-9_.]*)(.*)") - local altname, pat2 = match(pat, "|([a-z0-9_.]*)(.*)") - if altname then pat = pat2 end - - for p in gmatch(pat, ".") do - local x = nil - if p == "R" then - x = map_gpr[band(rshift(op, rs), 31)] - rs = rs - 5 - elseif p == "F" then - x = "f"..band(rshift(op, rs), 31) - rs = rs - 5 - elseif p == "A" then - x = band(rshift(op, rs), 31) - rs = rs - 5 - elseif p == "S" then - x = arshift(lshift(op, 27-rs), 27) - rs = rs - 5 - elseif p == "I" then - x = arshift(lshift(op, 16), 16) - elseif p == "U" then - x = band(op, 0xffff) - elseif p == "D" or p == "E" then - local disp = arshift(lshift(op, 16), 16) - if p == "E" then disp = band(disp, -4) end - if last == "r0" then last = "0" end - operands[#operands] = format("%d(%s)", disp, last) - elseif p >= "2" and p <= "8" then - local disp = band(rshift(op, rs), 31) * p - if last == "r0" then last = "0" end - operands[#operands] = format("%d(%s)", disp, last) - elseif p == "H" then - x = band(rshift(op, rs), 31) + lshift(band(op, 2), 4) - rs = rs - 5 - elseif p == "M" then - x = band(rshift(op, rs), 31) + band(op, 0x20) - elseif p == "C" then - x = condfmt(band(rshift(op, rs), 31)) - rs = rs - 5 - elseif p == "B" then - local bo = rshift(op, 21) - local cond = band(rshift(op, 16), 31) - local cn = "" - rs = rs - 10 - if band(bo, 4) == 0 then - cn = band(bo, 2) == 0 and "dnz" or "dz" - if band(bo, 0x10) == 0 then - cn = cn..(band(bo, 8) == 0 and "f" or "t") - end - if band(bo, 0x10) == 0 then x = condfmt(cond) end - name = name..(band(bo, 1) == band(rshift(op, 15), 1) and "-" or "+") - elseif band(bo, 0x10) == 0 then - cn = map_cond[band(cond, 3) + (band(bo, 8) == 0 and 4 or 0)] - if cond > 3 then x = "cr"..rshift(cond, 2) end - name = name..(band(bo, 1) == band(rshift(op, 15), 1) and "-" or "+") - end - name = gsub(name, "_", cn) - elseif p == "J" then - x = arshift(lshift(op, 27-rs), 29-rs)*4 - if band(op, 2) == 0 then x = ctx.addr + pos + x end - ctx.rel = x - x = "0x"..tohex(x) - elseif p == "K" then - if band(op, 1) ~= 0 then name = name.."l" end - if band(op, 2) ~= 0 then name = name.."a" end - elseif p == "X" or p == "Y" then - x = band(rshift(op, rs+2), 7) - if x == 0 and p == "Y" then x = nil else x = "cr"..x end - rs = rs - 5 - elseif p == "W" then - x = "cr"..band(op, 7) - elseif p == "Z" then - x = band(rshift(op, rs-4), 255) - rs = rs - 10 - elseif p == ">" then - operands[#operands] = rshift(operands[#operands], 1) - elseif p == "0" then - if last == "r0" then - operands[#operands] = nil - if altname then name = altname end - end - elseif p == "L" then - name = gsub(name, "_", band(op, 0x00200000) ~= 0 and "d" or "w") - elseif p == "." then - if band(op, 1) == 1 then name = name.."." end - elseif p == "N" then - if op == 0x60000000 then name = "nop"; break end - elseif p == "~" then - local n = #operands - operands[n-1], operands[n] = operands[n], operands[n-1] - elseif p == "=" then - local n = #operands - if last == operands[n-1] then - operands[n] = nil - name = altname - end - elseif p == "%" then - local n = #operands - if last == operands[n-1] and last == operands[n-2] then - operands[n] = nil - operands[n-1] = nil - name = altname - end - elseif p == "-" then - rs = rs - 5 - else - assert(false) - end - if x then operands[#operands+1] = x; last = x end - end - - return putop(ctx, name, operands) -end - ------------------------------------------------------------------------------- - --- Disassemble a block of code. -local function disass_block(ctx, ofs, len) - if not ofs then ofs = 0 end - local stop = len and ofs+len or #ctx.code - stop = stop - stop % 4 - ctx.pos = ofs - ofs % 4 - ctx.rel = nil - while ctx.pos < stop do disass_ins(ctx) end -end - --- Extended API: create a disassembler context. Then call ctx:disass(ofs, len). -local function create(code, addr, out) - local ctx = {} - ctx.code = code - ctx.addr = addr or 0 - ctx.out = out or io.write - ctx.symtab = {} - ctx.disass = disass_block - ctx.hexdump = 8 - return ctx -end - --- Simple API: disassemble code (a string) at address and output via out. -local function disass(code, addr, out) - create(code, addr, out):disass() -end - --- Return register name for RID. -local function regname(r) - if r < 32 then return map_gpr[r] end - return "f"..(r-32) -end - --- Public module functions. -return { - create = create, - disass = disass, - regname = regname -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_x64.lua b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_x64.lua deleted file mode 100644 index 15d55243a85..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_x64.lua +++ /dev/null @@ -1,17 +0,0 @@ ----------------------------------------------------------------------------- --- LuaJIT x64 disassembler wrapper module. --- --- Copyright (C) 2005-2015 Mike Pall. All rights reserved. --- Released under the MIT license. See Copyright Notice in luajit.h ----------------------------------------------------------------------------- --- This module just exports the 64 bit functions from the combined --- x86/x64 disassembler module. All the interesting stuff is there. ------------------------------------------------------------------------------- - -local dis_x86 = require((string.match(..., ".*%.") or "").."dis_x86") -return { - create = dis_x86.create64, - disass = dis_x86.disass64, - regname = dis_x86.regname64 -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_x86.lua b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_x86.lua deleted file mode 100644 index 6bc38066fe3..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dis_x86.lua +++ /dev/null @@ -1,838 +0,0 @@ ----------------------------------------------------------------------------- --- LuaJIT x86/x64 disassembler module. --- --- Copyright (C) 2005-2015 Mike Pall. All rights reserved. --- Released under the MIT license. See Copyright Notice in luajit.h ----------------------------------------------------------------------------- --- This is a helper module used by the LuaJIT machine code dumper module. --- --- Sending small code snippets to an external disassembler and mixing the --- output with our own stuff was too fragile. So I had to bite the bullet --- and write yet another x86 disassembler. Oh well ... --- --- The output format is very similar to what ndisasm generates. But it has --- been developed independently by looking at the opcode tables from the --- Intel and AMD manuals. The supported instruction set is quite extensive --- and reflects what a current generation Intel or AMD CPU implements in --- 32 bit and 64 bit mode. Yes, this includes MMX, SSE, SSE2, SSE3, SSSE3, --- SSE4.1, SSE4.2, SSE4a and even privileged and hypervisor (VMX/SVM) --- instructions. --- --- Notes: --- * The (useless) a16 prefix, 3DNow and pre-586 opcodes are unsupported. --- * No attempt at optimization has been made -- it's fast enough for my needs. --- * The public API may change when more architectures are added. ------------------------------------------------------------------------------- - -local type = type -local sub, byte, format = string.sub, string.byte, string.format -local match, gmatch, gsub = string.match, string.gmatch, string.gsub -local lower, rep = string.lower, string.rep -local bit = require("bit") -local tohex = bit.tohex - --- Map for 1st opcode byte in 32 bit mode. Ugly? Well ... read on. -local map_opc1_32 = { ---0x -[0]="addBmr","addVmr","addBrm","addVrm","addBai","addVai","push es","pop es", -"orBmr","orVmr","orBrm","orVrm","orBai","orVai","push cs","opc2*", ---1x -"adcBmr","adcVmr","adcBrm","adcVrm","adcBai","adcVai","push ss","pop ss", -"sbbBmr","sbbVmr","sbbBrm","sbbVrm","sbbBai","sbbVai","push ds","pop ds", ---2x -"andBmr","andVmr","andBrm","andVrm","andBai","andVai","es:seg","daa", -"subBmr","subVmr","subBrm","subVrm","subBai","subVai","cs:seg","das", ---3x -"xorBmr","xorVmr","xorBrm","xorVrm","xorBai","xorVai","ss:seg","aaa", -"cmpBmr","cmpVmr","cmpBrm","cmpVrm","cmpBai","cmpVai","ds:seg","aas", ---4x -"incVR","incVR","incVR","incVR","incVR","incVR","incVR","incVR", -"decVR","decVR","decVR","decVR","decVR","decVR","decVR","decVR", ---5x -"pushUR","pushUR","pushUR","pushUR","pushUR","pushUR","pushUR","pushUR", -"popUR","popUR","popUR","popUR","popUR","popUR","popUR","popUR", ---6x -"sz*pushaw,pusha","sz*popaw,popa","boundVrm","arplWmr", -"fs:seg","gs:seg","o16:","a16", -"pushUi","imulVrmi","pushBs","imulVrms", -"insb","insVS","outsb","outsVS", ---7x -"joBj","jnoBj","jbBj","jnbBj","jzBj","jnzBj","jbeBj","jaBj", -"jsBj","jnsBj","jpeBj","jpoBj","jlBj","jgeBj","jleBj","jgBj", ---8x -"arith!Bmi","arith!Vmi","arith!Bmi","arith!Vms", -"testBmr","testVmr","xchgBrm","xchgVrm", -"movBmr","movVmr","movBrm","movVrm", -"movVmg","leaVrm","movWgm","popUm", ---9x -"nop*xchgVaR|pause|xchgWaR|repne nop","xchgVaR","xchgVaR","xchgVaR", -"xchgVaR","xchgVaR","xchgVaR","xchgVaR", -"sz*cbw,cwde,cdqe","sz*cwd,cdq,cqo","call farViw","wait", -"sz*pushfw,pushf","sz*popfw,popf","sahf","lahf", ---Ax -"movBao","movVao","movBoa","movVoa", -"movsb","movsVS","cmpsb","cmpsVS", -"testBai","testVai","stosb","stosVS", -"lodsb","lodsVS","scasb","scasVS", ---Bx -"movBRi","movBRi","movBRi","movBRi","movBRi","movBRi","movBRi","movBRi", -"movVRI","movVRI","movVRI","movVRI","movVRI","movVRI","movVRI","movVRI", ---Cx -"shift!Bmu","shift!Vmu","retBw","ret","$lesVrm","$ldsVrm","movBmi","movVmi", -"enterBwu","leave","retfBw","retf","int3","intBu","into","iretVS", ---Dx -"shift!Bm1","shift!Vm1","shift!Bmc","shift!Vmc","aamBu","aadBu","salc","xlatb", -"fp*0","fp*1","fp*2","fp*3","fp*4","fp*5","fp*6","fp*7", ---Ex -"loopneBj","loopeBj","loopBj","sz*jcxzBj,jecxzBj,jrcxzBj", -"inBau","inVau","outBua","outVua", -"callVj","jmpVj","jmp farViw","jmpBj","inBad","inVad","outBda","outVda", ---Fx -"lock:","int1","repne:rep","rep:","hlt","cmc","testb!Bm","testv!Vm", -"clc","stc","cli","sti","cld","std","incb!Bm","incd!Vm", -} -assert(#map_opc1_32 == 255) - --- Map for 1st opcode byte in 64 bit mode (overrides only). -local map_opc1_64 = setmetatable({ - [0x06]=false, [0x07]=false, [0x0e]=false, - [0x16]=false, [0x17]=false, [0x1e]=false, [0x1f]=false, - [0x27]=false, [0x2f]=false, [0x37]=false, [0x3f]=false, - [0x60]=false, [0x61]=false, [0x62]=false, [0x63]="movsxdVrDmt", [0x67]="a32:", - [0x40]="rex*", [0x41]="rex*b", [0x42]="rex*x", [0x43]="rex*xb", - [0x44]="rex*r", [0x45]="rex*rb", [0x46]="rex*rx", [0x47]="rex*rxb", - [0x48]="rex*w", [0x49]="rex*wb", [0x4a]="rex*wx", [0x4b]="rex*wxb", - [0x4c]="rex*wr", [0x4d]="rex*wrb", [0x4e]="rex*wrx", [0x4f]="rex*wrxb", - [0x82]=false, [0x9a]=false, [0xc4]=false, [0xc5]=false, [0xce]=false, - [0xd4]=false, [0xd5]=false, [0xd6]=false, [0xea]=false, -}, { __index = map_opc1_32 }) - --- Map for 2nd opcode byte (0F xx). True CISC hell. Hey, I told you. --- Prefix dependent MMX/SSE opcodes: (none)|rep|o16|repne, -|F3|66|F2 -local map_opc2 = { ---0x -[0]="sldt!Dmp","sgdt!Ump","larVrm","lslVrm",nil,"syscall","clts","sysret", -"invd","wbinvd",nil,"ud1",nil,"$prefetch!Bm","femms","3dnowMrmu", ---1x -"movupsXrm|movssXrm|movupdXrm|movsdXrm", -"movupsXmr|movssXmr|movupdXmr|movsdXmr", -"movhlpsXrm$movlpsXrm|movsldupXrm|movlpdXrm|movddupXrm", -"movlpsXmr||movlpdXmr", -"unpcklpsXrm||unpcklpdXrm", -"unpckhpsXrm||unpckhpdXrm", -"movlhpsXrm$movhpsXrm|movshdupXrm|movhpdXrm", -"movhpsXmr||movhpdXmr", -"$prefetcht!Bm","hintnopVm","hintnopVm","hintnopVm", -"hintnopVm","hintnopVm","hintnopVm","hintnopVm", ---2x -"movUmx$","movUmy$","movUxm$","movUym$","movUmz$",nil,"movUzm$",nil, -"movapsXrm||movapdXrm", -"movapsXmr||movapdXmr", -"cvtpi2psXrMm|cvtsi2ssXrVmt|cvtpi2pdXrMm|cvtsi2sdXrVmt", -"movntpsXmr|movntssXmr|movntpdXmr|movntsdXmr", -"cvttps2piMrXm|cvttss2siVrXm|cvttpd2piMrXm|cvttsd2siVrXm", -"cvtps2piMrXm|cvtss2siVrXm|cvtpd2piMrXm|cvtsd2siVrXm", -"ucomissXrm||ucomisdXrm", -"comissXrm||comisdXrm", ---3x -"wrmsr","rdtsc","rdmsr","rdpmc","sysenter","sysexit",nil,"getsec", -"opc3*38",nil,"opc3*3a",nil,nil,nil,nil,nil, ---4x -"cmovoVrm","cmovnoVrm","cmovbVrm","cmovnbVrm", -"cmovzVrm","cmovnzVrm","cmovbeVrm","cmovaVrm", -"cmovsVrm","cmovnsVrm","cmovpeVrm","cmovpoVrm", -"cmovlVrm","cmovgeVrm","cmovleVrm","cmovgVrm", ---5x -"movmskpsVrXm$||movmskpdVrXm$","sqrtpsXrm|sqrtssXrm|sqrtpdXrm|sqrtsdXrm", -"rsqrtpsXrm|rsqrtssXrm","rcppsXrm|rcpssXrm", -"andpsXrm||andpdXrm","andnpsXrm||andnpdXrm", -"orpsXrm||orpdXrm","xorpsXrm||xorpdXrm", -"addpsXrm|addssXrm|addpdXrm|addsdXrm","mulpsXrm|mulssXrm|mulpdXrm|mulsdXrm", -"cvtps2pdXrm|cvtss2sdXrm|cvtpd2psXrm|cvtsd2ssXrm", -"cvtdq2psXrm|cvttps2dqXrm|cvtps2dqXrm", -"subpsXrm|subssXrm|subpdXrm|subsdXrm","minpsXrm|minssXrm|minpdXrm|minsdXrm", -"divpsXrm|divssXrm|divpdXrm|divsdXrm","maxpsXrm|maxssXrm|maxpdXrm|maxsdXrm", ---6x -"punpcklbwPrm","punpcklwdPrm","punpckldqPrm","packsswbPrm", -"pcmpgtbPrm","pcmpgtwPrm","pcmpgtdPrm","packuswbPrm", -"punpckhbwPrm","punpckhwdPrm","punpckhdqPrm","packssdwPrm", -"||punpcklqdqXrm","||punpckhqdqXrm", -"movPrVSm","movqMrm|movdquXrm|movdqaXrm", ---7x -"pshufwMrmu|pshufhwXrmu|pshufdXrmu|pshuflwXrmu","pshiftw!Pmu", -"pshiftd!Pmu","pshiftq!Mmu||pshiftdq!Xmu", -"pcmpeqbPrm","pcmpeqwPrm","pcmpeqdPrm","emms|", -"vmreadUmr||extrqXmuu$|insertqXrmuu$","vmwriteUrm||extrqXrm$|insertqXrm$", -nil,nil, -"||haddpdXrm|haddpsXrm","||hsubpdXrm|hsubpsXrm", -"movVSmMr|movqXrm|movVSmXr","movqMmr|movdquXmr|movdqaXmr", ---8x -"joVj","jnoVj","jbVj","jnbVj","jzVj","jnzVj","jbeVj","jaVj", -"jsVj","jnsVj","jpeVj","jpoVj","jlVj","jgeVj","jleVj","jgVj", ---9x -"setoBm","setnoBm","setbBm","setnbBm","setzBm","setnzBm","setbeBm","setaBm", -"setsBm","setnsBm","setpeBm","setpoBm","setlBm","setgeBm","setleBm","setgBm", ---Ax -"push fs","pop fs","cpuid","btVmr","shldVmru","shldVmrc",nil,nil, -"push gs","pop gs","rsm","btsVmr","shrdVmru","shrdVmrc","fxsave!Dmp","imulVrm", ---Bx -"cmpxchgBmr","cmpxchgVmr","$lssVrm","btrVmr", -"$lfsVrm","$lgsVrm","movzxVrBmt","movzxVrWmt", -"|popcntVrm","ud2Dp","bt!Vmu","btcVmr", -"bsfVrm","bsrVrm|lzcntVrm|bsrWrm","movsxVrBmt","movsxVrWmt", ---Cx -"xaddBmr","xaddVmr", -"cmppsXrmu|cmpssXrmu|cmppdXrmu|cmpsdXrmu","$movntiVmr|", -"pinsrwPrWmu","pextrwDrPmu", -"shufpsXrmu||shufpdXrmu","$cmpxchg!Qmp", -"bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR", ---Dx -"||addsubpdXrm|addsubpsXrm","psrlwPrm","psrldPrm","psrlqPrm", -"paddqPrm","pmullwPrm", -"|movq2dqXrMm|movqXmr|movdq2qMrXm$","pmovmskbVrMm||pmovmskbVrXm", -"psubusbPrm","psubuswPrm","pminubPrm","pandPrm", -"paddusbPrm","padduswPrm","pmaxubPrm","pandnPrm", ---Ex -"pavgbPrm","psrawPrm","psradPrm","pavgwPrm", -"pmulhuwPrm","pmulhwPrm", -"|cvtdq2pdXrm|cvttpd2dqXrm|cvtpd2dqXrm","$movntqMmr||$movntdqXmr", -"psubsbPrm","psubswPrm","pminswPrm","porPrm", -"paddsbPrm","paddswPrm","pmaxswPrm","pxorPrm", ---Fx -"|||lddquXrm","psllwPrm","pslldPrm","psllqPrm", -"pmuludqPrm","pmaddwdPrm","psadbwPrm","maskmovqMrm||maskmovdquXrm$", -"psubbPrm","psubwPrm","psubdPrm","psubqPrm", -"paddbPrm","paddwPrm","padddPrm","ud", -} -assert(map_opc2[255] == "ud") - --- Map for three-byte opcodes. Can't wait for their next invention. -local map_opc3 = { -["38"] = { -- [66] 0f 38 xx ---0x -[0]="pshufbPrm","phaddwPrm","phadddPrm","phaddswPrm", -"pmaddubswPrm","phsubwPrm","phsubdPrm","phsubswPrm", -"psignbPrm","psignwPrm","psigndPrm","pmulhrswPrm", -nil,nil,nil,nil, ---1x -"||pblendvbXrma",nil,nil,nil, -"||blendvpsXrma","||blendvpdXrma",nil,"||ptestXrm", -nil,nil,nil,nil, -"pabsbPrm","pabswPrm","pabsdPrm",nil, ---2x -"||pmovsxbwXrm","||pmovsxbdXrm","||pmovsxbqXrm","||pmovsxwdXrm", -"||pmovsxwqXrm","||pmovsxdqXrm",nil,nil, -"||pmuldqXrm","||pcmpeqqXrm","||$movntdqaXrm","||packusdwXrm", -nil,nil,nil,nil, ---3x -"||pmovzxbwXrm","||pmovzxbdXrm","||pmovzxbqXrm","||pmovzxwdXrm", -"||pmovzxwqXrm","||pmovzxdqXrm",nil,"||pcmpgtqXrm", -"||pminsbXrm","||pminsdXrm","||pminuwXrm","||pminudXrm", -"||pmaxsbXrm","||pmaxsdXrm","||pmaxuwXrm","||pmaxudXrm", ---4x -"||pmulddXrm","||phminposuwXrm", ---Fx -[0xf0] = "|||crc32TrBmt",[0xf1] = "|||crc32TrVmt", -}, - -["3a"] = { -- [66] 0f 3a xx ---0x -[0x00]=nil,nil,nil,nil,nil,nil,nil,nil, -"||roundpsXrmu","||roundpdXrmu","||roundssXrmu","||roundsdXrmu", -"||blendpsXrmu","||blendpdXrmu","||pblendwXrmu","palignrPrmu", ---1x -nil,nil,nil,nil, -"||pextrbVmXru","||pextrwVmXru","||pextrVmSXru","||extractpsVmXru", -nil,nil,nil,nil,nil,nil,nil,nil, ---2x -"||pinsrbXrVmu","||insertpsXrmu","||pinsrXrVmuS",nil, ---4x -[0x40] = "||dppsXrmu", -[0x41] = "||dppdXrmu", -[0x42] = "||mpsadbwXrmu", ---6x -[0x60] = "||pcmpestrmXrmu",[0x61] = "||pcmpestriXrmu", -[0x62] = "||pcmpistrmXrmu",[0x63] = "||pcmpistriXrmu", -}, -} - --- Map for VMX/SVM opcodes 0F 01 C0-FF (sgdt group with register operands). -local map_opcvm = { -[0xc1]="vmcall",[0xc2]="vmlaunch",[0xc3]="vmresume",[0xc4]="vmxoff", -[0xc8]="monitor",[0xc9]="mwait", -[0xd8]="vmrun",[0xd9]="vmmcall",[0xda]="vmload",[0xdb]="vmsave", -[0xdc]="stgi",[0xdd]="clgi",[0xde]="skinit",[0xdf]="invlpga", -[0xf8]="swapgs",[0xf9]="rdtscp", -} - --- Map for FP opcodes. And you thought stack machines are simple? -local map_opcfp = { --- D8-DF 00-BF: opcodes with a memory operand. --- D8 -[0]="faddFm","fmulFm","fcomFm","fcompFm","fsubFm","fsubrFm","fdivFm","fdivrFm", -"fldFm",nil,"fstFm","fstpFm","fldenvVm","fldcwWm","fnstenvVm","fnstcwWm", --- DA -"fiaddDm","fimulDm","ficomDm","ficompDm", -"fisubDm","fisubrDm","fidivDm","fidivrDm", --- DB -"fildDm","fisttpDm","fistDm","fistpDm",nil,"fld twordFmp",nil,"fstp twordFmp", --- DC -"faddGm","fmulGm","fcomGm","fcompGm","fsubGm","fsubrGm","fdivGm","fdivrGm", --- DD -"fldGm","fisttpQm","fstGm","fstpGm","frstorDmp",nil,"fnsaveDmp","fnstswWm", --- DE -"fiaddWm","fimulWm","ficomWm","ficompWm", -"fisubWm","fisubrWm","fidivWm","fidivrWm", --- DF -"fildWm","fisttpWm","fistWm","fistpWm", -"fbld twordFmp","fildQm","fbstp twordFmp","fistpQm", --- xx C0-FF: opcodes with a pseudo-register operand. --- D8 -"faddFf","fmulFf","fcomFf","fcompFf","fsubFf","fsubrFf","fdivFf","fdivrFf", --- D9 -"fldFf","fxchFf",{"fnop"},nil, -{"fchs","fabs",nil,nil,"ftst","fxam"}, -{"fld1","fldl2t","fldl2e","fldpi","fldlg2","fldln2","fldz"}, -{"f2xm1","fyl2x","fptan","fpatan","fxtract","fprem1","fdecstp","fincstp"}, -{"fprem","fyl2xp1","fsqrt","fsincos","frndint","fscale","fsin","fcos"}, --- DA -"fcmovbFf","fcmoveFf","fcmovbeFf","fcmovuFf",nil,{nil,"fucompp"},nil,nil, --- DB -"fcmovnbFf","fcmovneFf","fcmovnbeFf","fcmovnuFf", -{nil,nil,"fnclex","fninit"},"fucomiFf","fcomiFf",nil, --- DC -"fadd toFf","fmul toFf",nil,nil, -"fsub toFf","fsubr toFf","fdivr toFf","fdiv toFf", --- DD -"ffreeFf",nil,"fstFf","fstpFf","fucomFf","fucompFf",nil,nil, --- DE -"faddpFf","fmulpFf",nil,{nil,"fcompp"}, -"fsubrpFf","fsubpFf","fdivrpFf","fdivpFf", --- DF -nil,nil,nil,nil,{"fnstsw ax"},"fucomipFf","fcomipFf",nil, -} -assert(map_opcfp[126] == "fcomipFf") - --- Map for opcode groups. The subkey is sp from the ModRM byte. -local map_opcgroup = { - arith = { "add", "or", "adc", "sbb", "and", "sub", "xor", "cmp" }, - shift = { "rol", "ror", "rcl", "rcr", "shl", "shr", "sal", "sar" }, - testb = { "testBmi", "testBmi", "not", "neg", "mul", "imul", "div", "idiv" }, - testv = { "testVmi", "testVmi", "not", "neg", "mul", "imul", "div", "idiv" }, - incb = { "inc", "dec" }, - incd = { "inc", "dec", "callUmp", "$call farDmp", - "jmpUmp", "$jmp farDmp", "pushUm" }, - sldt = { "sldt", "str", "lldt", "ltr", "verr", "verw" }, - sgdt = { "vm*$sgdt", "vm*$sidt", "$lgdt", "vm*$lidt", - "smsw", nil, "lmsw", "vm*$invlpg" }, - bt = { nil, nil, nil, nil, "bt", "bts", "btr", "btc" }, - cmpxchg = { nil, "sz*,cmpxchg8bQmp,cmpxchg16bXmp", nil, nil, - nil, nil, "vmptrld|vmxon|vmclear", "vmptrst" }, - pshiftw = { nil, nil, "psrlw", nil, "psraw", nil, "psllw" }, - pshiftd = { nil, nil, "psrld", nil, "psrad", nil, "pslld" }, - pshiftq = { nil, nil, "psrlq", nil, nil, nil, "psllq" }, - pshiftdq = { nil, nil, "psrlq", "psrldq", nil, nil, "psllq", "pslldq" }, - fxsave = { "$fxsave", "$fxrstor", "$ldmxcsr", "$stmxcsr", - nil, "lfenceDp$", "mfenceDp$", "sfenceDp$clflush" }, - prefetch = { "prefetch", "prefetchw" }, - prefetcht = { "prefetchnta", "prefetcht0", "prefetcht1", "prefetcht2" }, -} - ------------------------------------------------------------------------------- - --- Maps for register names. -local map_regs = { - B = { "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh", - "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", "r14b", "r15b" }, - B64 = { "al", "cl", "dl", "bl", "spl", "bpl", "sil", "dil", - "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", "r14b", "r15b" }, - W = { "ax", "cx", "dx", "bx", "sp", "bp", "si", "di", - "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w" }, - D = { "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi", - "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d" }, - Q = { "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", - "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" }, - M = { "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", - "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7" }, -- No x64 ext! - X = { "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", - "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15" }, -} -local map_segregs = { "es", "cs", "ss", "ds", "fs", "gs", "segr6", "segr7" } - --- Maps for size names. -local map_sz2n = { - B = 1, W = 2, D = 4, Q = 8, M = 8, X = 16, -} -local map_sz2prefix = { - B = "byte", W = "word", D = "dword", - Q = "qword", - M = "qword", X = "xword", - F = "dword", G = "qword", -- No need for sizes/register names for these two. -} - ------------------------------------------------------------------------------- - --- Output a nicely formatted line with an opcode and operands. -local function putop(ctx, text, operands) - local code, pos, hex = ctx.code, ctx.pos, "" - local hmax = ctx.hexdump - if hmax > 0 then - for i=ctx.start,pos-1 do - hex = hex..format("%02X", byte(code, i, i)) - end - if #hex > hmax then hex = sub(hex, 1, hmax)..". " - else hex = hex..rep(" ", hmax-#hex+2) end - end - if operands then text = text.." "..operands end - if ctx.o16 then text = "o16 "..text; ctx.o16 = false end - if ctx.a32 then text = "a32 "..text; ctx.a32 = false end - if ctx.rep then text = ctx.rep.." "..text; ctx.rep = false end - if ctx.rex then - local t = (ctx.rexw and "w" or "")..(ctx.rexr and "r" or "").. - (ctx.rexx and "x" or "")..(ctx.rexb and "b" or "") - if t ~= "" then text = "rex."..t.." "..text end - ctx.rexw = false; ctx.rexr = false; ctx.rexx = false; ctx.rexb = false - ctx.rex = false - end - if ctx.seg then - local text2, n = gsub(text, "%[", "["..ctx.seg..":") - if n == 0 then text = ctx.seg.." "..text else text = text2 end - ctx.seg = false - end - if ctx.lock then text = "lock "..text; ctx.lock = false end - local imm = ctx.imm - if imm then - local sym = ctx.symtab[imm] - if sym then text = text.."\t->"..sym end - end - ctx.out(format("%08x %s%s\n", ctx.addr+ctx.start, hex, text)) - ctx.mrm = false - ctx.start = pos - ctx.imm = nil -end - --- Clear all prefix flags. -local function clearprefixes(ctx) - ctx.o16 = false; ctx.seg = false; ctx.lock = false; ctx.rep = false - ctx.rexw = false; ctx.rexr = false; ctx.rexx = false; ctx.rexb = false - ctx.rex = false; ctx.a32 = false -end - --- Fallback for incomplete opcodes at the end. -local function incomplete(ctx) - ctx.pos = ctx.stop+1 - clearprefixes(ctx) - return putop(ctx, "(incomplete)") -end - --- Fallback for unknown opcodes. -local function unknown(ctx) - clearprefixes(ctx) - return putop(ctx, "(unknown)") -end - --- Return an immediate of the specified size. -local function getimm(ctx, pos, n) - if pos+n-1 > ctx.stop then return incomplete(ctx) end - local code = ctx.code - if n == 1 then - local b1 = byte(code, pos, pos) - return b1 - elseif n == 2 then - local b1, b2 = byte(code, pos, pos+1) - return b1+b2*256 - else - local b1, b2, b3, b4 = byte(code, pos, pos+3) - local imm = b1+b2*256+b3*65536+b4*16777216 - ctx.imm = imm - return imm - end -end - --- Process pattern string and generate the operands. -local function putpat(ctx, name, pat) - local operands, regs, sz, mode, sp, rm, sc, rx, sdisp - local code, pos, stop = ctx.code, ctx.pos, ctx.stop - - -- Chars used: 1DFGIMPQRSTUVWXacdfgijmoprstuwxyz - for p in gmatch(pat, ".") do - local x = nil - if p == "V" or p == "U" then - if ctx.rexw then sz = "Q"; ctx.rexw = false - elseif ctx.o16 then sz = "W"; ctx.o16 = false - elseif p == "U" and ctx.x64 then sz = "Q" - else sz = "D" end - regs = map_regs[sz] - elseif p == "T" then - if ctx.rexw then sz = "Q"; ctx.rexw = false else sz = "D" end - regs = map_regs[sz] - elseif p == "B" then - sz = "B" - regs = ctx.rex and map_regs.B64 or map_regs.B - elseif match(p, "[WDQMXFG]") then - sz = p - regs = map_regs[sz] - elseif p == "P" then - sz = ctx.o16 and "X" or "M"; ctx.o16 = false - regs = map_regs[sz] - elseif p == "S" then - name = name..lower(sz) - elseif p == "s" then - local imm = getimm(ctx, pos, 1); if not imm then return end - x = imm <= 127 and format("+0x%02x", imm) - or format("-0x%02x", 256-imm) - pos = pos+1 - elseif p == "u" then - local imm = getimm(ctx, pos, 1); if not imm then return end - x = format("0x%02x", imm) - pos = pos+1 - elseif p == "w" then - local imm = getimm(ctx, pos, 2); if not imm then return end - x = format("0x%x", imm) - pos = pos+2 - elseif p == "o" then -- [offset] - if ctx.x64 then - local imm1 = getimm(ctx, pos, 4); if not imm1 then return end - local imm2 = getimm(ctx, pos+4, 4); if not imm2 then return end - x = format("[0x%08x%08x]", imm2, imm1) - pos = pos+8 - else - local imm = getimm(ctx, pos, 4); if not imm then return end - x = format("[0x%08x]", imm) - pos = pos+4 - end - elseif p == "i" or p == "I" then - local n = map_sz2n[sz] - if n == 8 and ctx.x64 and p == "I" then - local imm1 = getimm(ctx, pos, 4); if not imm1 then return end - local imm2 = getimm(ctx, pos+4, 4); if not imm2 then return end - x = format("0x%08x%08x", imm2, imm1) - else - if n == 8 then n = 4 end - local imm = getimm(ctx, pos, n); if not imm then return end - if sz == "Q" and (imm < 0 or imm > 0x7fffffff) then - imm = (0xffffffff+1)-imm - x = format(imm > 65535 and "-0x%08x" or "-0x%x", imm) - else - x = format(imm > 65535 and "0x%08x" or "0x%x", imm) - end - end - pos = pos+n - elseif p == "j" then - local n = map_sz2n[sz] - if n == 8 then n = 4 end - local imm = getimm(ctx, pos, n); if not imm then return end - if sz == "B" and imm > 127 then imm = imm-256 - elseif imm > 2147483647 then imm = imm-4294967296 end - pos = pos+n - imm = imm + pos + ctx.addr - if imm > 4294967295 and not ctx.x64 then imm = imm-4294967296 end - ctx.imm = imm - if sz == "W" then - x = format("word 0x%04x", imm%65536) - elseif ctx.x64 then - local lo = imm % 0x1000000 - x = format("0x%02x%06x", (imm-lo) / 0x1000000, lo) - else - x = "0x"..tohex(imm) - end - elseif p == "R" then - local r = byte(code, pos-1, pos-1)%8 - if ctx.rexb then r = r + 8; ctx.rexb = false end - x = regs[r+1] - elseif p == "a" then x = regs[1] - elseif p == "c" then x = "cl" - elseif p == "d" then x = "dx" - elseif p == "1" then x = "1" - else - if not mode then - mode = ctx.mrm - if not mode then - if pos > stop then return incomplete(ctx) end - mode = byte(code, pos, pos) - pos = pos+1 - end - rm = mode%8; mode = (mode-rm)/8 - sp = mode%8; mode = (mode-sp)/8 - sdisp = "" - if mode < 3 then - if rm == 4 then - if pos > stop then return incomplete(ctx) end - sc = byte(code, pos, pos) - pos = pos+1 - rm = sc%8; sc = (sc-rm)/8 - rx = sc%8; sc = (sc-rx)/8 - if ctx.rexx then rx = rx + 8; ctx.rexx = false end - if rx == 4 then rx = nil end - end - if mode > 0 or rm == 5 then - local dsz = mode - if dsz ~= 1 then dsz = 4 end - local disp = getimm(ctx, pos, dsz); if not disp then return end - if mode == 0 then rm = nil end - if rm or rx or (not sc and ctx.x64 and not ctx.a32) then - if dsz == 1 and disp > 127 then - sdisp = format("-0x%x", 256-disp) - elseif disp >= 0 and disp <= 0x7fffffff then - sdisp = format("+0x%x", disp) - else - sdisp = format("-0x%x", (0xffffffff+1)-disp) - end - else - sdisp = format(ctx.x64 and not ctx.a32 and - not (disp >= 0 and disp <= 0x7fffffff) - and "0xffffffff%08x" or "0x%08x", disp) - end - pos = pos+dsz - end - end - if rm and ctx.rexb then rm = rm + 8; ctx.rexb = false end - if ctx.rexr then sp = sp + 8; ctx.rexr = false end - end - if p == "m" then - if mode == 3 then x = regs[rm+1] - else - local aregs = ctx.a32 and map_regs.D or ctx.aregs - local srm, srx = "", "" - if rm then srm = aregs[rm+1] - elseif not sc and ctx.x64 and not ctx.a32 then srm = "rip" end - ctx.a32 = false - if rx then - if rm then srm = srm.."+" end - srx = aregs[rx+1] - if sc > 0 then srx = srx.."*"..(2^sc) end - end - x = format("[%s%s%s]", srm, srx, sdisp) - end - if mode < 3 and - (not match(pat, "[aRrgp]") or match(pat, "t")) then -- Yuck. - x = map_sz2prefix[sz].." "..x - end - elseif p == "r" then x = regs[sp+1] - elseif p == "g" then x = map_segregs[sp+1] - elseif p == "p" then -- Suppress prefix. - elseif p == "f" then x = "st"..rm - elseif p == "x" then - if sp == 0 and ctx.lock and not ctx.x64 then - x = "CR8"; ctx.lock = false - else - x = "CR"..sp - end - elseif p == "y" then x = "DR"..sp - elseif p == "z" then x = "TR"..sp - elseif p == "t" then - else - error("bad pattern `"..pat.."'") - end - end - if x then operands = operands and operands..", "..x or x end - end - ctx.pos = pos - return putop(ctx, name, operands) -end - --- Forward declaration. -local map_act - --- Fetch and cache MRM byte. -local function getmrm(ctx) - local mrm = ctx.mrm - if not mrm then - local pos = ctx.pos - if pos > ctx.stop then return nil end - mrm = byte(ctx.code, pos, pos) - ctx.pos = pos+1 - ctx.mrm = mrm - end - return mrm -end - --- Dispatch to handler depending on pattern. -local function dispatch(ctx, opat, patgrp) - if not opat then return unknown(ctx) end - if match(opat, "%|") then -- MMX/SSE variants depending on prefix. - local p - if ctx.rep then - p = ctx.rep=="rep" and "%|([^%|]*)" or "%|[^%|]*%|[^%|]*%|([^%|]*)" - ctx.rep = false - elseif ctx.o16 then p = "%|[^%|]*%|([^%|]*)"; ctx.o16 = false - else p = "^[^%|]*" end - opat = match(opat, p) - if not opat then return unknown(ctx) end --- ctx.rep = false; ctx.o16 = false - --XXX fails for 66 f2 0f 38 f1 06 crc32 eax,WORD PTR [esi] - --XXX remove in branches? - end - if match(opat, "%$") then -- reg$mem variants. - local mrm = getmrm(ctx); if not mrm then return incomplete(ctx) end - opat = match(opat, mrm >= 192 and "^[^%$]*" or "%$(.*)") - if opat == "" then return unknown(ctx) end - end - if opat == "" then return unknown(ctx) end - local name, pat = match(opat, "^([a-z0-9 ]*)(.*)") - if pat == "" and patgrp then pat = patgrp end - return map_act[sub(pat, 1, 1)](ctx, name, pat) -end - --- Get a pattern from an opcode map and dispatch to handler. -local function dispatchmap(ctx, opcmap) - local pos = ctx.pos - local opat = opcmap[byte(ctx.code, pos, pos)] - pos = pos + 1 - ctx.pos = pos - return dispatch(ctx, opat) -end - --- Map for action codes. The key is the first char after the name. -map_act = { - -- Simple opcodes without operands. - [""] = function(ctx, name, pat) - return putop(ctx, name) - end, - - -- Operand size chars fall right through. - B = putpat, W = putpat, D = putpat, Q = putpat, - V = putpat, U = putpat, T = putpat, - M = putpat, X = putpat, P = putpat, - F = putpat, G = putpat, - - -- Collect prefixes. - [":"] = function(ctx, name, pat) - ctx[pat == ":" and name or sub(pat, 2)] = name - if ctx.pos - ctx.start > 5 then return unknown(ctx) end -- Limit #prefixes. - end, - - -- Chain to special handler specified by name. - ["*"] = function(ctx, name, pat) - return map_act[name](ctx, name, sub(pat, 2)) - end, - - -- Use named subtable for opcode group. - ["!"] = function(ctx, name, pat) - local mrm = getmrm(ctx); if not mrm then return incomplete(ctx) end - return dispatch(ctx, map_opcgroup[name][((mrm-(mrm%8))/8)%8+1], sub(pat, 2)) - end, - - -- o16,o32[,o64] variants. - sz = function(ctx, name, pat) - if ctx.o16 then ctx.o16 = false - else - pat = match(pat, ",(.*)") - if ctx.rexw then - local p = match(pat, ",(.*)") - if p then pat = p; ctx.rexw = false end - end - end - pat = match(pat, "^[^,]*") - return dispatch(ctx, pat) - end, - - -- Two-byte opcode dispatch. - opc2 = function(ctx, name, pat) - return dispatchmap(ctx, map_opc2) - end, - - -- Three-byte opcode dispatch. - opc3 = function(ctx, name, pat) - return dispatchmap(ctx, map_opc3[pat]) - end, - - -- VMX/SVM dispatch. - vm = function(ctx, name, pat) - return dispatch(ctx, map_opcvm[ctx.mrm]) - end, - - -- Floating point opcode dispatch. - fp = function(ctx, name, pat) - local mrm = getmrm(ctx); if not mrm then return incomplete(ctx) end - local rm = mrm%8 - local idx = pat*8 + ((mrm-rm)/8)%8 - if mrm >= 192 then idx = idx + 64 end - local opat = map_opcfp[idx] - if type(opat) == "table" then opat = opat[rm+1] end - return dispatch(ctx, opat) - end, - - -- REX prefix. - rex = function(ctx, name, pat) - if ctx.rex then return unknown(ctx) end -- Only 1 REX prefix allowed. - for p in gmatch(pat, ".") do ctx["rex"..p] = true end - ctx.rex = true - end, - - -- Special case for nop with REX prefix. - nop = function(ctx, name, pat) - return dispatch(ctx, ctx.rex and pat or "nop") - end, -} - ------------------------------------------------------------------------------- - --- Disassemble a block of code. -local function disass_block(ctx, ofs, len) - if not ofs then ofs = 0 end - local stop = len and ofs+len or #ctx.code - ofs = ofs + 1 - ctx.start = ofs - ctx.pos = ofs - ctx.stop = stop - ctx.imm = nil - ctx.mrm = false - clearprefixes(ctx) - while ctx.pos <= stop do dispatchmap(ctx, ctx.map1) end - if ctx.pos ~= ctx.start then incomplete(ctx) end -end - --- Extended API: create a disassembler context. Then call ctx:disass(ofs, len). -local function create(code, addr, out) - local ctx = {} - ctx.code = code - ctx.addr = (addr or 0) - 1 - ctx.out = out or io.write - ctx.symtab = {} - ctx.disass = disass_block - ctx.hexdump = 16 - ctx.x64 = false - ctx.map1 = map_opc1_32 - ctx.aregs = map_regs.D - return ctx -end - -local function create64(code, addr, out) - local ctx = create(code, addr, out) - ctx.x64 = true - ctx.map1 = map_opc1_64 - ctx.aregs = map_regs.Q - return ctx -end - --- Simple API: disassemble code (a string) at address and output via out. -local function disass(code, addr, out) - create(code, addr, out):disass() -end - -local function disass64(code, addr, out) - create64(code, addr, out):disass() -end - --- Return register name for RID. -local function regname(r) - if r < 8 then return map_regs.D[r+1] end - return map_regs.X[r-7] -end - -local function regname64(r) - if r < 16 then return map_regs.Q[r+1] end - return map_regs.X[r-15] -end - --- Public module functions. -return { - create = create, - create64 = create64, - disass = disass, - disass64 = disass64, - regname = regname, - regname64 = regname64 -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dump.lua b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dump.lua deleted file mode 100644 index 5f858492bc4..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/dump.lua +++ /dev/null @@ -1,706 +0,0 @@ ----------------------------------------------------------------------------- --- LuaJIT compiler dump module. --- --- Copyright (C) 2005-2015 Mike Pall. All rights reserved. --- Released under the MIT license. See Copyright Notice in luajit.h ----------------------------------------------------------------------------- --- --- This module can be used to debug the JIT compiler itself. It dumps the --- code representations and structures used in various compiler stages. --- --- Example usage: --- --- luajit -jdump -e "local x=0; for i=1,1e6 do x=x+i end; print(x)" --- luajit -jdump=im -e "for i=1,1000 do for j=1,1000 do end end" | less -R --- luajit -jdump=is myapp.lua | less -R --- luajit -jdump=-b myapp.lua --- luajit -jdump=+aH,myapp.html myapp.lua --- luajit -jdump=ixT,myapp.dump myapp.lua --- --- The first argument specifies the dump mode. The second argument gives --- the output file name. Default output is to stdout, unless the environment --- variable LUAJIT_DUMPFILE is set. The file is overwritten every time the --- module is started. --- --- Different features can be turned on or off with the dump mode. If the --- mode starts with a '+', the following features are added to the default --- set of features; a '-' removes them. Otherwise the features are replaced. --- --- The following dump features are available (* marks the default): --- --- * t Print a line for each started, ended or aborted trace (see also -jv). --- * b Dump the traced bytecode. --- * i Dump the IR (intermediate representation). --- r Augment the IR with register/stack slots. --- s Dump the snapshot map. --- * m Dump the generated machine code. --- x Print each taken trace exit. --- X Print each taken trace exit and the contents of all registers. --- a Print the IR of aborted traces, too. --- --- The output format can be set with the following characters: --- --- T Plain text output. --- A ANSI-colored text output --- H Colorized HTML + CSS output. --- --- The default output format is plain text. It's set to ANSI-colored text --- if the COLORTERM variable is set. Note: this is independent of any output --- redirection, which is actually considered a feature. --- --- You probably want to use less -R to enjoy viewing ANSI-colored text from --- a pipe or a file. Add this to your ~/.bashrc: export LESS="-R" --- ------------------------------------------------------------------------------- - --- Cache some library functions and objects. -local jit = require("jit") -assert(jit.version_num == 20100, "LuaJIT core/library version mismatch") -local jutil = require("jit.util") -local vmdef = require("jit.vmdef") -local funcinfo, funcbc = jutil.funcinfo, jutil.funcbc -local traceinfo, traceir, tracek = jutil.traceinfo, jutil.traceir, jutil.tracek -local tracemc, tracesnap = jutil.tracemc, jutil.tracesnap -local traceexitstub, ircalladdr = jutil.traceexitstub, jutil.ircalladdr -local bit = require("bit") -local band, shl, shr, tohex = bit.band, bit.lshift, bit.rshift, bit.tohex -local sub, gsub, format = string.sub, string.gsub, string.format -local byte, char, rep = string.byte, string.char, string.rep -local type, tostring = type, tostring -local stdout, stderr = io.stdout, io.stderr - --- Load other modules on-demand. -local bcline, disass - --- Active flag, output file handle and dump mode. -local active, out, dumpmode - ------------------------------------------------------------------------------- - -local symtabmt = { __index = false } -local symtab = {} -local nexitsym = 0 - --- Fill nested symbol table with per-trace exit stub addresses. -local function fillsymtab_tr(tr, nexit) - local t = {} - symtabmt.__index = t - if jit.arch == "mips" or jit.arch == "mipsel" then - t[traceexitstub(tr, 0)] = "exit" - return - end - for i=0,nexit-1 do - local addr = traceexitstub(tr, i) - if addr < 0 then addr = addr + 2^32 end - t[addr] = tostring(i) - end - local addr = traceexitstub(tr, nexit) - if addr then t[addr] = "stack_check" end -end - --- Fill symbol table with trace exit stub addresses. -local function fillsymtab(tr, nexit) - local t = symtab - if nexitsym == 0 then - local ircall = vmdef.ircall - for i=0,#ircall do - local addr = ircalladdr(i) - if addr ~= 0 then - if addr < 0 then addr = addr + 2^32 end - t[addr] = ircall[i] - end - end - end - if nexitsym == 1000000 then -- Per-trace exit stubs. - fillsymtab_tr(tr, nexit) - elseif nexit > nexitsym then -- Shared exit stubs. - for i=nexitsym,nexit-1 do - local addr = traceexitstub(i) - if addr == nil then -- Fall back to per-trace exit stubs. - fillsymtab_tr(tr, nexit) - setmetatable(symtab, symtabmt) - nexit = 1000000 - break - end - if addr < 0 then addr = addr + 2^32 end - t[addr] = tostring(i) - end - nexitsym = nexit - end - return t -end - -local function dumpwrite(s) - out:write(s) -end - --- Disassemble machine code. -local function dump_mcode(tr) - local info = traceinfo(tr) - if not info then return end - local mcode, addr, loop = tracemc(tr) - if not mcode then return end - if not disass then disass = require("jit.dis_"..jit.arch) end - if addr < 0 then addr = addr + 2^32 end - out:write("---- TRACE ", tr, " mcode ", #mcode, "\n") - local ctx = disass.create(mcode, addr, dumpwrite) - ctx.hexdump = 0 - ctx.symtab = fillsymtab(tr, info.nexit) - if loop ~= 0 then - symtab[addr+loop] = "LOOP" - ctx:disass(0, loop) - out:write("->LOOP:\n") - ctx:disass(loop, #mcode-loop) - symtab[addr+loop] = nil - else - ctx:disass(0, #mcode) - end -end - ------------------------------------------------------------------------------- - -local irtype_text = { - [0] = "nil", - "fal", - "tru", - "lud", - "str", - "p32", - "thr", - "pro", - "fun", - "p64", - "cdt", - "tab", - "udt", - "flt", - "num", - "i8 ", - "u8 ", - "i16", - "u16", - "int", - "u32", - "i64", - "u64", - "sfp", -} - -local colortype_ansi = { - [0] = "%s", - "%s", - "%s", - "\027[36m%s\027[m", - "\027[32m%s\027[m", - "%s", - "\027[1m%s\027[m", - "%s", - "\027[1m%s\027[m", - "%s", - "\027[33m%s\027[m", - "\027[31m%s\027[m", - "\027[36m%s\027[m", - "\027[34m%s\027[m", - "\027[34m%s\027[m", - "\027[35m%s\027[m", - "\027[35m%s\027[m", - "\027[35m%s\027[m", - "\027[35m%s\027[m", - "\027[35m%s\027[m", - "\027[35m%s\027[m", - "\027[35m%s\027[m", - "\027[35m%s\027[m", - "\027[35m%s\027[m", -} - -local function colorize_text(s, t) - return s -end - -local function colorize_ansi(s, t) - return format(colortype_ansi[t], s) -end - -local irtype_ansi = setmetatable({}, - { __index = function(tab, t) - local s = colorize_ansi(irtype_text[t], t); tab[t] = s; return s; end }) - -local html_escape = { ["<"] = "<", [">"] = ">", ["&"] = "&", } - -local function colorize_html(s, t) - s = gsub(s, "[<>&]", html_escape) - return format('%s', irtype_text[t], s) -end - -local irtype_html = setmetatable({}, - { __index = function(tab, t) - local s = colorize_html(irtype_text[t], t); tab[t] = s; return s; end }) - -local header_html = [[ - -]] - -local colorize, irtype - --- Lookup tables to convert some literals into names. -local litname = { - ["SLOAD "] = setmetatable({}, { __index = function(t, mode) - local s = "" - if band(mode, 1) ~= 0 then s = s.."P" end - if band(mode, 2) ~= 0 then s = s.."F" end - if band(mode, 4) ~= 0 then s = s.."T" end - if band(mode, 8) ~= 0 then s = s.."C" end - if band(mode, 16) ~= 0 then s = s.."R" end - if band(mode, 32) ~= 0 then s = s.."I" end - t[mode] = s - return s - end}), - ["XLOAD "] = { [0] = "", "R", "V", "RV", "U", "RU", "VU", "RVU", }, - ["CONV "] = setmetatable({}, { __index = function(t, mode) - local s = irtype[band(mode, 31)] - s = irtype[band(shr(mode, 5), 31)].."."..s - if band(mode, 0x800) ~= 0 then s = s.." sext" end - local c = shr(mode, 14) - if c == 2 then s = s.." index" elseif c == 3 then s = s.." check" end - t[mode] = s - return s - end}), - ["FLOAD "] = vmdef.irfield, - ["FREF "] = vmdef.irfield, - ["FPMATH"] = vmdef.irfpm, - ["BUFHDR"] = { [0] = "RESET", "APPEND" }, - ["TOSTR "] = { [0] = "INT", "NUM", "CHAR" }, -} - -local function ctlsub(c) - if c == "\n" then return "\\n" - elseif c == "\r" then return "\\r" - elseif c == "\t" then return "\\t" - else return format("\\%03d", byte(c)) - end -end - -local function fmtfunc(func, pc) - local fi = funcinfo(func, pc) - if fi.loc then - return fi.loc - elseif fi.ffid then - return vmdef.ffnames[fi.ffid] - elseif fi.addr then - return format("C:%x", fi.addr) - else - return "(?)" - end -end - -local function formatk(tr, idx) - local k, t, slot = tracek(tr, idx) - local tn = type(k) - local s - if tn == "number" then - if k == 2^52+2^51 then - s = "bias" - else - s = format("%+.14g", k) - end - elseif tn == "string" then - s = format(#k > 20 and '"%.20s"~' or '"%s"', gsub(k, "%c", ctlsub)) - elseif tn == "function" then - s = fmtfunc(k) - elseif tn == "table" then - s = format("{%p}", k) - elseif tn == "userdata" then - if t == 12 then - s = format("userdata:%p", k) - else - s = format("[%p]", k) - if s == "[0x00000000]" then s = "NULL" end - end - elseif t == 21 then -- int64_t - s = sub(tostring(k), 1, -3) - if sub(s, 1, 1) ~= "-" then s = "+"..s end - else - s = tostring(k) -- For primitives. - end - s = colorize(format("%-4s", s), t) - if slot then - s = format("%s @%d", s, slot) - end - return s -end - -local function printsnap(tr, snap) - local n = 2 - for s=0,snap[1]-1 do - local sn = snap[n] - if shr(sn, 24) == s then - n = n + 1 - local ref = band(sn, 0xffff) - 0x8000 -- REF_BIAS - if ref < 0 then - out:write(formatk(tr, ref)) - elseif band(sn, 0x80000) ~= 0 then -- SNAP_SOFTFPNUM - out:write(colorize(format("%04d/%04d", ref, ref+1), 14)) - else - local m, ot, op1, op2 = traceir(tr, ref) - out:write(colorize(format("%04d", ref), band(ot, 31))) - end - out:write(band(sn, 0x10000) == 0 and " " or "|") -- SNAP_FRAME - else - out:write("---- ") - end - end - out:write("]\n") -end - --- Dump snapshots (not interleaved with IR). -local function dump_snap(tr) - out:write("---- TRACE ", tr, " snapshots\n") - for i=0,1000000000 do - local snap = tracesnap(tr, i) - if not snap then break end - out:write(format("#%-3d %04d [ ", i, snap[0])) - printsnap(tr, snap) - end -end - --- Return a register name or stack slot for a rid/sp location. -local function ridsp_name(ridsp, ins) - if not disass then disass = require("jit.dis_"..jit.arch) end - local rid, slot = band(ridsp, 0xff), shr(ridsp, 8) - if rid == 253 or rid == 254 then - return (slot == 0 or slot == 255) and " {sink" or format(" {%04d", ins-slot) - end - if ridsp > 255 then return format("[%x]", slot*4) end - if rid < 128 then return disass.regname(rid) end - return "" -end - --- Dump CALL* function ref and return optional ctype. -local function dumpcallfunc(tr, ins) - local ctype - if ins > 0 then - local m, ot, op1, op2 = traceir(tr, ins) - if band(ot, 31) == 0 then -- nil type means CARG(func, ctype). - ins = op1 - ctype = formatk(tr, op2) - end - end - if ins < 0 then - out:write(format("[0x%x](", tonumber((tracek(tr, ins))))) - else - out:write(format("%04d (", ins)) - end - return ctype -end - --- Recursively gather CALL* args and dump them. -local function dumpcallargs(tr, ins) - if ins < 0 then - out:write(formatk(tr, ins)) - else - local m, ot, op1, op2 = traceir(tr, ins) - local oidx = 6*shr(ot, 8) - local op = sub(vmdef.irnames, oidx+1, oidx+6) - if op == "CARG " then - dumpcallargs(tr, op1) - if op2 < 0 then - out:write(" ", formatk(tr, op2)) - else - out:write(" ", format("%04d", op2)) - end - else - out:write(format("%04d", ins)) - end - end -end - --- Dump IR and interleaved snapshots. -local function dump_ir(tr, dumpsnap, dumpreg) - local info = traceinfo(tr) - if not info then return end - local nins = info.nins - out:write("---- TRACE ", tr, " IR\n") - local irnames = vmdef.irnames - local snapref = 65536 - local snap, snapno - if dumpsnap then - snap = tracesnap(tr, 0) - snapref = snap[0] - snapno = 0 - end - for ins=1,nins do - if ins >= snapref then - if dumpreg then - out:write(format(".... SNAP #%-3d [ ", snapno)) - else - out:write(format(".... SNAP #%-3d [ ", snapno)) - end - printsnap(tr, snap) - snapno = snapno + 1 - snap = tracesnap(tr, snapno) - snapref = snap and snap[0] or 65536 - end - local m, ot, op1, op2, ridsp = traceir(tr, ins) - local oidx, t = 6*shr(ot, 8), band(ot, 31) - local op = sub(irnames, oidx+1, oidx+6) - if op == "LOOP " then - if dumpreg then - out:write(format("%04d ------------ LOOP ------------\n", ins)) - else - out:write(format("%04d ------ LOOP ------------\n", ins)) - end - elseif op ~= "NOP " and op ~= "CARG " and - (dumpreg or op ~= "RENAME") then - local rid = band(ridsp, 255) - if dumpreg then - out:write(format("%04d %-6s", ins, ridsp_name(ridsp, ins))) - else - out:write(format("%04d ", ins)) - end - out:write(format("%s%s %s %s ", - (rid == 254 or rid == 253) and "}" or - (band(ot, 128) == 0 and " " or ">"), - band(ot, 64) == 0 and " " or "+", - irtype[t], op)) - local m1, m2 = band(m, 3), band(m, 3*4) - if sub(op, 1, 4) == "CALL" then - local ctype - if m2 == 1*4 then -- op2 == IRMlit - out:write(format("%-10s (", vmdef.ircall[op2])) - else - ctype = dumpcallfunc(tr, op2) - end - if op1 ~= -1 then dumpcallargs(tr, op1) end - out:write(")") - if ctype then out:write(" ctype ", ctype) end - elseif op == "CNEW " and op2 == -1 then - out:write(formatk(tr, op1)) - elseif m1 ~= 3 then -- op1 != IRMnone - if op1 < 0 then - out:write(formatk(tr, op1)) - else - out:write(format(m1 == 0 and "%04d" or "#%-3d", op1)) - end - if m2 ~= 3*4 then -- op2 != IRMnone - if m2 == 1*4 then -- op2 == IRMlit - local litn = litname[op] - if litn and litn[op2] then - out:write(" ", litn[op2]) - elseif op == "UREFO " or op == "UREFC " then - out:write(format(" #%-3d", shr(op2, 8))) - else - out:write(format(" #%-3d", op2)) - end - elseif op2 < 0 then - out:write(" ", formatk(tr, op2)) - else - out:write(format(" %04d", op2)) - end - end - end - out:write("\n") - end - end - if snap then - if dumpreg then - out:write(format(".... SNAP #%-3d [ ", snapno)) - else - out:write(format(".... SNAP #%-3d [ ", snapno)) - end - printsnap(tr, snap) - end -end - ------------------------------------------------------------------------------- - -local recprefix = "" -local recdepth = 0 - --- Format trace error message. -local function fmterr(err, info) - if type(err) == "number" then - if type(info) == "function" then info = fmtfunc(info) end - err = format(vmdef.traceerr[err], info) - end - return err -end - --- Dump trace states. -local function dump_trace(what, tr, func, pc, otr, oex) - if what == "stop" or (what == "abort" and dumpmode.a) then - if dumpmode.i then dump_ir(tr, dumpmode.s, dumpmode.r and what == "stop") - elseif dumpmode.s then dump_snap(tr) end - if dumpmode.m then dump_mcode(tr) end - end - if what == "start" then - if dumpmode.H then out:write('
\n') end
-    out:write("---- TRACE ", tr, " ", what)
-    if otr then out:write(" ", otr, "/", oex) end
-    out:write(" ", fmtfunc(func, pc), "\n")
-  elseif what == "stop" or what == "abort" then
-    out:write("---- TRACE ", tr, " ", what)
-    if what == "abort" then
-      out:write(" ", fmtfunc(func, pc), " -- ", fmterr(otr, oex), "\n")
-    else
-      local info = traceinfo(tr)
-      local link, ltype = info.link, info.linktype
-      if link == tr or link == 0 then
-	out:write(" -> ", ltype, "\n")
-      elseif ltype == "root" then
-	out:write(" -> ", link, "\n")
-      else
-	out:write(" -> ", link, " ", ltype, "\n")
-      end
-    end
-    if dumpmode.H then out:write("
\n\n") else out:write("\n") end - else - out:write("---- TRACE ", what, "\n\n") - end - out:flush() -end - --- Dump recorded bytecode. -local function dump_record(tr, func, pc, depth, callee) - if depth ~= recdepth then - recdepth = depth - recprefix = rep(" .", depth) - end - local line - if pc >= 0 then - line = bcline(func, pc, recprefix) - if dumpmode.H then line = gsub(line, "[<>&]", html_escape) end - else - line = "0000 "..recprefix.." FUNCC \n" - callee = func - end - if pc <= 0 then - out:write(sub(line, 1, -2), " ; ", fmtfunc(func), "\n") - else - out:write(line) - end - if pc >= 0 and band(funcbc(func, pc), 0xff) < 16 then -- ORDER BC - out:write(bcline(func, pc+1, recprefix)) -- Write JMP for cond. - end -end - ------------------------------------------------------------------------------- - --- Dump taken trace exits. -local function dump_texit(tr, ex, ngpr, nfpr, ...) - out:write("---- TRACE ", tr, " exit ", ex, "\n") - if dumpmode.X then - local regs = {...} - if jit.arch == "x64" then - for i=1,ngpr do - out:write(format(" %016x", regs[i])) - if i % 4 == 0 then out:write("\n") end - end - else - for i=1,ngpr do - out:write(" ", tohex(regs[i])) - if i % 8 == 0 then out:write("\n") end - end - end - if jit.arch == "mips" or jit.arch == "mipsel" then - for i=1,nfpr,2 do - out:write(format(" %+17.14g", regs[ngpr+i])) - if i % 8 == 7 then out:write("\n") end - end - else - for i=1,nfpr do - out:write(format(" %+17.14g", regs[ngpr+i])) - if i % 4 == 0 then out:write("\n") end - end - end - end -end - ------------------------------------------------------------------------------- - --- Detach dump handlers. -local function dumpoff() - if active then - active = false - jit.attach(dump_texit) - jit.attach(dump_record) - jit.attach(dump_trace) - if out and out ~= stdout and out ~= stderr then out:close() end - out = nil - end -end - --- Open the output file and attach dump handlers. -local function dumpon(opt, outfile) - if active then dumpoff() end - - local colormode = os.getenv("COLORTERM") and "A" or "T" - if opt then - opt = gsub(opt, "[TAH]", function(mode) colormode = mode; return ""; end) - end - - local m = { t=true, b=true, i=true, m=true, } - if opt and opt ~= "" then - local o = sub(opt, 1, 1) - if o ~= "+" and o ~= "-" then m = {} end - for i=1,#opt do m[sub(opt, i, i)] = (o ~= "-") end - end - dumpmode = m - - if m.t or m.b or m.i or m.s or m.m then - jit.attach(dump_trace, "trace") - end - if m.b then - jit.attach(dump_record, "record") - if not bcline then bcline = require("jit.bc").line end - end - if m.x or m.X then - jit.attach(dump_texit, "texit") - end - - if not outfile then outfile = os.getenv("LUAJIT_DUMPFILE") end - if outfile then - out = outfile == "-" and stdout or assert(io.open(outfile, "w")) - else - out = stdout - end - - m[colormode] = true - if colormode == "A" then - colorize = colorize_ansi - irtype = irtype_ansi - elseif colormode == "H" then - colorize = colorize_html - irtype = irtype_html - out:write(header_html) - else - colorize = colorize_text - irtype = irtype_text - end - - active = true -end - --- Public module functions. -return { - on = dumpon, - off = dumpoff, - start = dumpon -- For -j command line option. -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/p.lua b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/p.lua deleted file mode 100644 index 97d4ccdf87c..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/p.lua +++ /dev/null @@ -1,310 +0,0 @@ ----------------------------------------------------------------------------- --- LuaJIT profiler. --- --- Copyright (C) 2005-2015 Mike Pall. All rights reserved. --- Released under the MIT license. See Copyright Notice in luajit.h ----------------------------------------------------------------------------- --- --- This module is a simple command line interface to the built-in --- low-overhead profiler of LuaJIT. --- --- The lower-level API of the profiler is accessible via the "jit.profile" --- module or the luaJIT_profile_* C API. --- --- Example usage: --- --- luajit -jp myapp.lua --- luajit -jp=s myapp.lua --- luajit -jp=-s myapp.lua --- luajit -jp=vl myapp.lua --- luajit -jp=G,profile.txt myapp.lua --- --- The following dump features are available: --- --- f Stack dump: function name, otherwise module:line. Default mode. --- F Stack dump: ditto, but always prepend module. --- l Stack dump: module:line. --- stack dump depth (callee < caller). Default: 1. --- - Inverse stack dump depth (caller > callee). --- s Split stack dump after first stack level. Implies abs(depth) >= 2. --- p Show full path for module names. --- v Show VM states. Can be combined with stack dumps, e.g. vf or fv. --- z Show zones. Can be combined with stack dumps, e.g. zf or fz. --- r Show raw sample counts. Default: show percentages. --- a Annotate excerpts from source code files. --- A Annotate complete source code files. --- G Produce raw output suitable for graphical tools (e.g. flame graphs). --- m Minimum sample percentage to be shown. Default: 3. --- i Sampling interval in milliseconds. Default: 10. --- ----------------------------------------------------------------------------- - --- Cache some library functions and objects. -local jit = require("jit") -assert(jit.version_num == 20100, "LuaJIT core/library version mismatch") -local profile = require("jit.profile") -local vmdef = require("jit.vmdef") -local math = math -local pairs, ipairs, tonumber, floor = pairs, ipairs, tonumber, math.floor -local sort, format = table.sort, string.format -local stdout = io.stdout -local zone -- Load jit.zone module on demand. - --- Output file handle. -local out - ------------------------------------------------------------------------------- - -local prof_ud -local prof_states, prof_split, prof_min, prof_raw, prof_fmt, prof_depth -local prof_ann, prof_count1, prof_count2, prof_samples - -local map_vmmode = { - N = "Compiled", - I = "Interpreted", - C = "C code", - G = "Garbage Collector", - J = "JIT Compiler", -} - --- Profiler callback. -local function prof_cb(th, samples, vmmode) - prof_samples = prof_samples + samples - local key_stack, key_stack2, key_state - -- Collect keys for sample. - if prof_states then - if prof_states == "v" then - key_state = map_vmmode[vmmode] or vmmode - else - key_state = zone:get() or "(none)" - end - end - if prof_fmt then - key_stack = profile.dumpstack(th, prof_fmt, prof_depth) - key_stack = key_stack:gsub("%[builtin#(%d+)%]", function(x) - return vmdef.ffnames[tonumber(x)] - end) - if prof_split == 2 then - local k1, k2 = key_stack:match("(.-) [<>] (.*)") - if k2 then key_stack, key_stack2 = k1, k2 end - elseif prof_split == 3 then - key_stack2 = profile.dumpstack(th, "l", 1) - end - end - -- Order keys. - local k1, k2 - if prof_split == 1 then - if key_state then - k1 = key_state - if key_stack then k2 = key_stack end - end - elseif key_stack then - k1 = key_stack - if key_stack2 then k2 = key_stack2 elseif key_state then k2 = key_state end - end - -- Coalesce samples in one or two levels. - if k1 then - local t1 = prof_count1 - t1[k1] = (t1[k1] or 0) + samples - if k2 then - local t2 = prof_count2 - local t3 = t2[k1] - if not t3 then t3 = {}; t2[k1] = t3 end - t3[k2] = (t3[k2] or 0) + samples - end - end -end - ------------------------------------------------------------------------------- - --- Show top N list. -local function prof_top(count1, count2, samples, indent) - local t, n = {}, 0 - for k, v in pairs(count1) do - n = n + 1 - t[n] = k - end - sort(t, function(a, b) return count1[a] > count1[b] end) - for i=1,n do - local k = t[i] - local v = count1[k] - local pct = floor(v*100/samples + 0.5) - if pct < prof_min then break end - if not prof_raw then - out:write(format("%s%2d%% %s\n", indent, pct, k)) - elseif prof_raw == "r" then - out:write(format("%s%5d %s\n", indent, v, k)) - else - out:write(format("%s %d\n", k, v)) - end - if count2 then - local r = count2[k] - if r then - prof_top(r, nil, v, (prof_split == 3 or prof_split == 1) and " -- " or - (prof_depth < 0 and " -> " or " <- ")) - end - end - end -end - --- Annotate source code -local function prof_annotate(count1, samples) - local files = {} - local ms = 0 - for k, v in pairs(count1) do - local pct = floor(v*100/samples + 0.5) - ms = math.max(ms, v) - if pct >= prof_min then - local file, line = k:match("^(.*):(%d+)$") - local fl = files[file] - if not fl then fl = {}; files[file] = fl; files[#files+1] = file end - line = tonumber(line) - fl[line] = prof_raw and v or pct - end - end - sort(files) - local fmtv, fmtn = " %3d%% | %s\n", " | %s\n" - if prof_raw then - local n = math.max(5, math.ceil(math.log10(ms))) - fmtv = "%"..n.."d | %s\n" - fmtn = (" "):rep(n).." | %s\n" - end - local ann = prof_ann - for _, file in ipairs(files) do - local f0 = file:byte() - if f0 == 40 or f0 == 91 then - out:write(format("\n====== %s ======\n[Cannot annotate non-file]\n", file)) - break - end - local fp, err = io.open(file) - if not fp then - out:write(format("====== ERROR: %s: %s\n", file, err)) - break - end - out:write(format("\n====== %s ======\n", file)) - local fl = files[file] - local n, show = 1, false - if ann ~= 0 then - for i=1,ann do - if fl[i] then show = true; out:write("@@ 1 @@\n"); break end - end - end - for line in fp:lines() do - if line:byte() == 27 then - out:write("[Cannot annotate bytecode file]\n") - break - end - local v = fl[n] - if ann ~= 0 then - local v2 = fl[n+ann] - if show then - if v2 then show = n+ann elseif v then show = n - elseif show+ann < n then show = false end - elseif v2 then - show = n+ann - out:write(format("@@ %d @@\n", n)) - end - if not show then goto next end - end - if v then - out:write(format(fmtv, v, line)) - else - out:write(format(fmtn, line)) - end - ::next:: - n = n + 1 - end - fp:close() - end -end - ------------------------------------------------------------------------------- - --- Finish profiling and dump result. -local function prof_finish() - if prof_ud then - profile.stop() - local samples = prof_samples - if samples == 0 then - if prof_raw ~= true then out:write("[No samples collected]\n") end - return - end - if prof_ann then - prof_annotate(prof_count1, samples) - else - prof_top(prof_count1, prof_count2, samples, "") - end - prof_count1 = nil - prof_count2 = nil - prof_ud = nil - end -end - --- Start profiling. -local function prof_start(mode) - local interval = "" - mode = mode:gsub("i%d*", function(s) interval = s; return "" end) - prof_min = 3 - mode = mode:gsub("m(%d+)", function(s) prof_min = tonumber(s); return "" end) - prof_depth = 1 - mode = mode:gsub("%-?%d+", function(s) prof_depth = tonumber(s); return "" end) - local m = {} - for c in mode:gmatch(".") do m[c] = c end - prof_states = m.z or m.v - if prof_states == "z" then zone = require("jit.zone") end - local scope = m.l or m.f or m.F or (prof_states and "" or "f") - local flags = (m.p or "") - prof_raw = m.r - if m.s then - prof_split = 2 - if prof_depth == -1 or m["-"] then prof_depth = -2 - elseif prof_depth == 1 then prof_depth = 2 end - elseif mode:find("[fF].*l") then - scope = "l" - prof_split = 3 - else - prof_split = (scope == "" or mode:find("[zv].*[lfF]")) and 1 or 0 - end - prof_ann = m.A and 0 or (m.a and 3) - if prof_ann then - scope = "l" - prof_fmt = "pl" - prof_split = 0 - prof_depth = 1 - elseif m.G and scope ~= "" then - prof_fmt = flags..scope.."Z;" - prof_depth = -100 - prof_raw = true - prof_min = 0 - elseif scope == "" then - prof_fmt = false - else - local sc = prof_split == 3 and m.f or m.F or scope - prof_fmt = flags..sc..(prof_depth >= 0 and "Z < " or "Z > ") - end - prof_count1 = {} - prof_count2 = {} - prof_samples = 0 - profile.start(scope:lower()..interval, prof_cb) - prof_ud = newproxy(true) - getmetatable(prof_ud).__gc = prof_finish -end - ------------------------------------------------------------------------------- - -local function start(mode, outfile) - if not outfile then outfile = os.getenv("LUAJIT_PROFILEFILE") end - if outfile then - out = outfile == "-" and stdout or assert(io.open(outfile, "w")) - else - out = stdout - end - prof_start(mode or "f") -end - --- Public module functions. -return { - start = start, -- For -j command line option. - stop = prof_finish -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/v.lua b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/v.lua deleted file mode 100644 index 157c34bc643..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/v.lua +++ /dev/null @@ -1,170 +0,0 @@ ----------------------------------------------------------------------------- --- Verbose mode of the LuaJIT compiler. --- --- Copyright (C) 2005-2015 Mike Pall. All rights reserved. --- Released under the MIT license. See Copyright Notice in luajit.h ----------------------------------------------------------------------------- --- --- This module shows verbose information about the progress of the --- JIT compiler. It prints one line for each generated trace. This module --- is useful to see which code has been compiled or where the compiler --- punts and falls back to the interpreter. --- --- Example usage: --- --- luajit -jv -e "for i=1,1000 do for j=1,1000 do end end" --- luajit -jv=myapp.out myapp.lua --- --- Default output is to stderr. To redirect the output to a file, pass a --- filename as an argument (use '-' for stdout) or set the environment --- variable LUAJIT_VERBOSEFILE. The file is overwritten every time the --- module is started. --- --- The output from the first example should look like this: --- --- [TRACE 1 (command line):1 loop] --- [TRACE 2 (1/3) (command line):1 -> 1] --- --- The first number in each line is the internal trace number. Next are --- the file name ('(command line)') and the line number (':1') where the --- trace has started. Side traces also show the parent trace number and --- the exit number where they are attached to in parentheses ('(1/3)'). --- An arrow at the end shows where the trace links to ('-> 1'), unless --- it loops to itself. --- --- In this case the inner loop gets hot and is traced first, generating --- a root trace. Then the last exit from the 1st trace gets hot, too, --- and triggers generation of the 2nd trace. The side trace follows the --- path along the outer loop and *around* the inner loop, back to its --- start, and then links to the 1st trace. Yes, this may seem unusual, --- if you know how traditional compilers work. Trace compilers are full --- of surprises like this -- have fun! :-) --- --- Aborted traces are shown like this: --- --- [TRACE --- foo.lua:44 -- leaving loop in root trace at foo:lua:50] --- --- Don't worry -- trace aborts are quite common, even in programs which --- can be fully compiled. The compiler may retry several times until it --- finds a suitable trace. --- --- Of course this doesn't work with features that are not-yet-implemented --- (NYI error messages). The VM simply falls back to the interpreter. This --- may not matter at all if the particular trace is not very high up in --- the CPU usage profile. Oh, and the interpreter is quite fast, too. --- --- Also check out the -jdump module, which prints all the gory details. --- ------------------------------------------------------------------------------- - --- Cache some library functions and objects. -local jit = require("jit") -assert(jit.version_num == 20100, "LuaJIT core/library version mismatch") -local jutil = require("jit.util") -local vmdef = require("jit.vmdef") -local funcinfo, traceinfo = jutil.funcinfo, jutil.traceinfo -local type, format = type, string.format -local stdout, stderr = io.stdout, io.stderr - --- Active flag and output file handle. -local active, out - ------------------------------------------------------------------------------- - -local startloc, startex - -local function fmtfunc(func, pc) - local fi = funcinfo(func, pc) - if fi.loc then - return fi.loc - elseif fi.ffid then - return vmdef.ffnames[fi.ffid] - elseif fi.addr then - return format("C:%x", fi.addr) - else - return "(?)" - end -end - --- Format trace error message. -local function fmterr(err, info) - if type(err) == "number" then - if type(info) == "function" then info = fmtfunc(info) end - err = format(vmdef.traceerr[err], info) - end - return err -end - --- Dump trace states. -local function dump_trace(what, tr, func, pc, otr, oex) - if what == "start" then - startloc = fmtfunc(func, pc) - startex = otr and "("..otr.."/"..oex..") " or "" - else - if what == "abort" then - local loc = fmtfunc(func, pc) - if loc ~= startloc then - out:write(format("[TRACE --- %s%s -- %s at %s]\n", - startex, startloc, fmterr(otr, oex), loc)) - else - out:write(format("[TRACE --- %s%s -- %s]\n", - startex, startloc, fmterr(otr, oex))) - end - elseif what == "stop" then - local info = traceinfo(tr) - local link, ltype = info.link, info.linktype - if ltype == "interpreter" then - out:write(format("[TRACE %3s %s%s -- fallback to interpreter]\n", - tr, startex, startloc)) - elseif ltype == "stitch" then - out:write(format("[TRACE %3s %s%s %s %s]\n", - tr, startex, startloc, ltype, fmtfunc(func, pc))) - elseif link == tr or link == 0 then - out:write(format("[TRACE %3s %s%s %s]\n", - tr, startex, startloc, ltype)) - elseif ltype == "root" then - out:write(format("[TRACE %3s %s%s -> %d]\n", - tr, startex, startloc, link)) - else - out:write(format("[TRACE %3s %s%s -> %d %s]\n", - tr, startex, startloc, link, ltype)) - end - else - out:write(format("[TRACE %s]\n", what)) - end - out:flush() - end -end - ------------------------------------------------------------------------------- - --- Detach dump handlers. -local function dumpoff() - if active then - active = false - jit.attach(dump_trace) - if out and out ~= stdout and out ~= stderr then out:close() end - out = nil - end -end - --- Open the output file and attach dump handlers. -local function dumpon(outfile) - if active then dumpoff() end - if not outfile then outfile = os.getenv("LUAJIT_VERBOSEFILE") end - if outfile then - out = outfile == "-" and stdout or assert(io.open(outfile, "w")) - else - out = stderr - end - jit.attach(dump_trace, "trace") - active = true -end - --- Public module functions. -return { - on = dumpon, - off = dumpoff, - start = dumpon -- For -j command line option. -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/zone.lua b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/zone.lua deleted file mode 100644 index 69f0f169147..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/jit/zone.lua +++ /dev/null @@ -1,45 +0,0 @@ ----------------------------------------------------------------------------- --- LuaJIT profiler zones. --- --- Copyright (C) 2005-2015 Mike Pall. All rights reserved. --- Released under the MIT license. See Copyright Notice in luajit.h ----------------------------------------------------------------------------- --- --- This module implements a simple hierarchical zone model. --- --- Example usage: --- --- local zone = require("jit.zone") --- zone("AI") --- ... --- zone("A*") --- ... --- print(zone:get()) --> "A*" --- ... --- zone() --- ... --- print(zone:get()) --> "AI" --- ... --- zone() --- ----------------------------------------------------------------------------- - -local remove = table.remove - -return setmetatable({ - flush = function(t) - for i=#t,1,-1 do t[i] = nil end - end, - get = function(t) - return t[#t] - end -}, { - __call = function(t, zone) - if zone then - t[#t+1] = zone - else - return (assert(remove(t), "empty zone stack")) - end - end -}) - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lauxlib.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lauxlib.h deleted file mode 100644 index b6ed1a7d04a..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lauxlib.h +++ /dev/null @@ -1,193 +0,0 @@ -/* -** $Id: lauxlib.h,v 1.88.1.1 2007/12/27 13:02:25 roberto Exp $ -** Auxiliary functions for building Lua libraries -** See Copyright Notice in lua.h -*/ - - -#ifndef lauxlib_h -#define lauxlib_h - - -#include -#include - -#include "lua.h" - - -#define luaL_getn(L,i) ((int)lua_objlen(L, i)) -#define luaL_setn(L,i,j) ((void)0) /* no op! */ - -/* extra error code for `luaL_load' */ -#define LUA_ERRFILE (LUA_ERRERR+1) - -typedef struct luaL_Reg { - const char *name; - lua_CFunction func; -} luaL_Reg; - -LUALIB_API void (luaL_openlib) (lua_State *L, const char *libname, - const luaL_Reg *l, int nup); -LUALIB_API void (luaL_register) (lua_State *L, const char *libname, - const luaL_Reg *l); -LUALIB_API int (luaL_getmetafield) (lua_State *L, int obj, const char *e); -LUALIB_API int (luaL_callmeta) (lua_State *L, int obj, const char *e); -LUALIB_API int (luaL_typerror) (lua_State *L, int narg, const char *tname); -LUALIB_API int (luaL_argerror) (lua_State *L, int numarg, const char *extramsg); -LUALIB_API const char *(luaL_checklstring) (lua_State *L, int numArg, - size_t *l); -LUALIB_API const char *(luaL_optlstring) (lua_State *L, int numArg, - const char *def, size_t *l); -LUALIB_API lua_Number (luaL_checknumber) (lua_State *L, int numArg); -LUALIB_API lua_Number (luaL_optnumber) (lua_State *L, int nArg, lua_Number def); - -LUALIB_API lua_Integer (luaL_checkinteger) (lua_State *L, int numArg); -LUALIB_API lua_Integer (luaL_optinteger) (lua_State *L, int nArg, - lua_Integer def); - -LUALIB_API void (luaL_checkstack) (lua_State *L, int sz, const char *msg); -LUALIB_API void (luaL_checktype) (lua_State *L, int narg, int t); -LUALIB_API void (luaL_checkany) (lua_State *L, int narg); - -LUALIB_API int (luaL_newmetatable) (lua_State *L, const char *tname); -LUALIB_API void *(luaL_checkudata) (lua_State *L, int ud, const char *tname); - -LUALIB_API void (luaL_where) (lua_State *L, int lvl); -LUALIB_API int (luaL_error) (lua_State *L, const char *fmt, ...); - -LUALIB_API int (luaL_checkoption) (lua_State *L, int narg, const char *def, - const char *const lst[]); - -LUALIB_API int (luaL_ref) (lua_State *L, int t); -LUALIB_API void (luaL_unref) (lua_State *L, int t, int ref); - -LUALIB_API int (luaL_loadfile) (lua_State *L, const char *filename); -LUALIB_API int (luaL_loadbuffer) (lua_State *L, const char *buff, size_t sz, - const char *name); -LUALIB_API int (luaL_loadstring) (lua_State *L, const char *s); - -LUALIB_API lua_State *(luaL_newstate) (void); - - -LUALIB_API const char *(luaL_gsub) (lua_State *L, const char *s, const char *p, - const char *r); - -LUALIB_API const char *(luaL_findtable) (lua_State *L, int idx, - const char *fname, int szhint); - -/* From Lua 5.2. */ -LUALIB_API int luaL_fileresult(lua_State *L, int stat, const char *fname); -LUALIB_API int luaL_execresult(lua_State *L, int stat); -LUALIB_API int (luaL_loadfilex) (lua_State *L, const char *filename, - const char *mode); -LUALIB_API int (luaL_loadbufferx) (lua_State *L, const char *buff, size_t sz, - const char *name, const char *mode); -LUALIB_API void luaL_traceback (lua_State *L, lua_State *L1, const char *msg, - int level); - - - -/* -** {====================================================== -** File handles for IO library -** ======================================================= -*/ - -/* -** A file handle is a userdata with metatable 'LUA_FILEHANDLE' and -** initial structure 'luaL_Stream' (it may contain other fields -** after that initial structure). -*/ - -#define LUA_FILEHANDLE "FILE*" - - -typedef struct luaL_Stream { - FILE *f; /* stream (NULL for incompletely created streams) */ - lua_CFunction closef; /* to close stream (NULL for closed streams) */ -} luaL_Stream; - -/* }====================================================== */ - - - - -/* -** =============================================================== -** some useful macros -** =============================================================== -*/ - -#define luaL_argcheck(L, cond,numarg,extramsg) \ - ((void)((cond) || luaL_argerror(L, (numarg), (extramsg)))) -#define luaL_checkstring(L,n) (luaL_checklstring(L, (n), NULL)) -#define luaL_optstring(L,n,d) (luaL_optlstring(L, (n), (d), NULL)) -#define luaL_checkint(L,n) ((int)luaL_checkinteger(L, (n))) -#define luaL_optint(L,n,d) ((int)luaL_optinteger(L, (n), (d))) -#define luaL_checklong(L,n) ((long)luaL_checkinteger(L, (n))) -#define luaL_optlong(L,n,d) ((long)luaL_optinteger(L, (n), (d))) - -#define luaL_typename(L,i) lua_typename(L, lua_type(L,(i))) - -#define luaL_dofile(L, fn) \ - (luaL_loadfile(L, fn) || lua_pcall(L, 0, LUA_MULTRET, 0)) - -#define luaL_dostring(L, s) \ - (luaL_loadstring(L, s) || lua_pcall(L, 0, LUA_MULTRET, 0)) - -#define luaL_getmetatable(L,n) (lua_getfield(L, LUA_REGISTRYINDEX, (n))) - -#define luaL_opt(L,f,n,d) (lua_isnoneornil(L,(n)) ? (d) : f(L,(n))) - -/* -** {====================================================== -** Generic Buffer manipulation -** ======================================================= -*/ - - - -typedef struct luaL_Buffer { - char *p; /* current position in buffer */ - int lvl; /* number of strings in the stack (level) */ - lua_State *L; - char buffer[LUAL_BUFFERSIZE]; -} luaL_Buffer; - -#define luaL_addchar(B,c) \ - ((void)((B)->p < ((B)->buffer+LUAL_BUFFERSIZE) || luaL_prepbuffer(B)), \ - (*(B)->p++ = (char)(c))) - -/* compatibility only */ -#define luaL_putchar(B,c) luaL_addchar(B,c) - -#define luaL_addsize(B,n) ((B)->p += (n)) - -LUALIB_API void (luaL_buffinit) (lua_State *L, luaL_Buffer *B); -LUALIB_API char *(luaL_prepbuffer) (luaL_Buffer *B); -LUALIB_API void (luaL_addlstring) (luaL_Buffer *B, const char *s, size_t l); -LUALIB_API void (luaL_addstring) (luaL_Buffer *B, const char *s); -LUALIB_API void (luaL_addvalue) (luaL_Buffer *B); -LUALIB_API void (luaL_pushresult) (luaL_Buffer *B); - - -/* }====================================================== */ - - -/* compatibility with ref system */ - -/* pre-defined references */ -#define LUA_NOREF (-2) -#define LUA_REFNIL (-1) - -#define lua_ref(L,lock) ((lock) ? luaL_ref(L, LUA_REGISTRYINDEX) : \ - (lua_pushstring(L, "unlocked references are obsolete"), lua_error(L), 0)) - -#define lua_unref(L,ref) luaL_unref(L, LUA_REGISTRYINDEX, (ref)) - -#define lua_getref(L,ref) lua_rawgeti(L, LUA_REGISTRYINDEX, (ref)) - - -#define luaL_reg luaL_Reg - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_aux.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_aux.c deleted file mode 100644 index 4a1b70ddc09..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_aux.c +++ /dev/null @@ -1,356 +0,0 @@ -/* -** Auxiliary library for the Lua/C API. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -** -** Major parts taken verbatim or adapted from the Lua interpreter. -** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h -*/ - -#include -#include -#include - -#define lib_aux_c -#define LUA_LIB - -#include "lua.h" -#include "lauxlib.h" - -#include "lj_obj.h" -#include "lj_err.h" -#include "lj_state.h" -#include "lj_trace.h" -#include "lj_lib.h" - -#if LJ_TARGET_POSIX -#include -#endif - -/* -- I/O error handling -------------------------------------------------- */ - -LUALIB_API int luaL_fileresult(lua_State *L, int stat, const char *fname) -{ - if (stat) { - setboolV(L->top++, 1); - return 1; - } else { - int en = errno; /* Lua API calls may change this value. */ - setnilV(L->top++); - if (fname) - lua_pushfstring(L, "%s: %s", fname, strerror(en)); - else - lua_pushfstring(L, "%s", strerror(en)); - setintV(L->top++, en); - lj_trace_abort(G(L)); - return 3; - } -} - -LUALIB_API int luaL_execresult(lua_State *L, int stat) -{ - if (stat != -1) { -#if LJ_TARGET_POSIX - if (WIFSIGNALED(stat)) { - stat = WTERMSIG(stat); - setnilV(L->top++); - lua_pushliteral(L, "signal"); - } else { - if (WIFEXITED(stat)) - stat = WEXITSTATUS(stat); - if (stat == 0) - setboolV(L->top++, 1); - else - setnilV(L->top++); - lua_pushliteral(L, "exit"); - } -#else - if (stat == 0) - setboolV(L->top++, 1); - else - setnilV(L->top++); - lua_pushliteral(L, "exit"); -#endif - setintV(L->top++, stat); - return 3; - } - return luaL_fileresult(L, 0, NULL); -} - -/* -- Module registration ------------------------------------------------- */ - -LUALIB_API const char *luaL_findtable(lua_State *L, int idx, - const char *fname, int szhint) -{ - const char *e; - lua_pushvalue(L, idx); - do { - e = strchr(fname, '.'); - if (e == NULL) e = fname + strlen(fname); - lua_pushlstring(L, fname, (size_t)(e - fname)); - lua_rawget(L, -2); - if (lua_isnil(L, -1)) { /* no such field? */ - lua_pop(L, 1); /* remove this nil */ - lua_createtable(L, 0, (*e == '.' ? 1 : szhint)); /* new table for field */ - lua_pushlstring(L, fname, (size_t)(e - fname)); - lua_pushvalue(L, -2); - lua_settable(L, -4); /* set new table into field */ - } else if (!lua_istable(L, -1)) { /* field has a non-table value? */ - lua_pop(L, 2); /* remove table and value */ - return fname; /* return problematic part of the name */ - } - lua_remove(L, -2); /* remove previous table */ - fname = e + 1; - } while (*e == '.'); - return NULL; -} - -static int libsize(const luaL_Reg *l) -{ - int size = 0; - for (; l->name; l++) size++; - return size; -} - -LUALIB_API void luaL_openlib(lua_State *L, const char *libname, - const luaL_Reg *l, int nup) -{ - lj_lib_checkfpu(L); - if (libname) { - int size = libsize(l); - /* check whether lib already exists */ - luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16); - lua_getfield(L, -1, libname); /* get _LOADED[libname] */ - if (!lua_istable(L, -1)) { /* not found? */ - lua_pop(L, 1); /* remove previous result */ - /* try global variable (and create one if it does not exist) */ - if (luaL_findtable(L, LUA_GLOBALSINDEX, libname, size) != NULL) - lj_err_callerv(L, LJ_ERR_BADMODN, libname); - lua_pushvalue(L, -1); - lua_setfield(L, -3, libname); /* _LOADED[libname] = new table */ - } - lua_remove(L, -2); /* remove _LOADED table */ - lua_insert(L, -(nup+1)); /* move library table to below upvalues */ - } - for (; l->name; l++) { - int i; - for (i = 0; i < nup; i++) /* copy upvalues to the top */ - lua_pushvalue(L, -nup); - lua_pushcclosure(L, l->func, nup); - lua_setfield(L, -(nup+2), l->name); - } - lua_pop(L, nup); /* remove upvalues */ -} - -LUALIB_API void luaL_register(lua_State *L, const char *libname, - const luaL_Reg *l) -{ - luaL_openlib(L, libname, l, 0); -} - -LUALIB_API const char *luaL_gsub(lua_State *L, const char *s, - const char *p, const char *r) -{ - const char *wild; - size_t l = strlen(p); - luaL_Buffer b; - luaL_buffinit(L, &b); - while ((wild = strstr(s, p)) != NULL) { - luaL_addlstring(&b, s, (size_t)(wild - s)); /* push prefix */ - luaL_addstring(&b, r); /* push replacement in place of pattern */ - s = wild + l; /* continue after `p' */ - } - luaL_addstring(&b, s); /* push last suffix */ - luaL_pushresult(&b); - return lua_tostring(L, -1); -} - -/* -- Buffer handling ----------------------------------------------------- */ - -#define bufflen(B) ((size_t)((B)->p - (B)->buffer)) -#define bufffree(B) ((size_t)(LUAL_BUFFERSIZE - bufflen(B))) - -static int emptybuffer(luaL_Buffer *B) -{ - size_t l = bufflen(B); - if (l == 0) - return 0; /* put nothing on stack */ - lua_pushlstring(B->L, B->buffer, l); - B->p = B->buffer; - B->lvl++; - return 1; -} - -static void adjuststack(luaL_Buffer *B) -{ - if (B->lvl > 1) { - lua_State *L = B->L; - int toget = 1; /* number of levels to concat */ - size_t toplen = lua_strlen(L, -1); - do { - size_t l = lua_strlen(L, -(toget+1)); - if (!(B->lvl - toget + 1 >= LUA_MINSTACK/2 || toplen > l)) - break; - toplen += l; - toget++; - } while (toget < B->lvl); - lua_concat(L, toget); - B->lvl = B->lvl - toget + 1; - } -} - -LUALIB_API char *luaL_prepbuffer(luaL_Buffer *B) -{ - if (emptybuffer(B)) - adjuststack(B); - return B->buffer; -} - -LUALIB_API void luaL_addlstring(luaL_Buffer *B, const char *s, size_t l) -{ - while (l--) - luaL_addchar(B, *s++); -} - -LUALIB_API void luaL_addstring(luaL_Buffer *B, const char *s) -{ - luaL_addlstring(B, s, strlen(s)); -} - -LUALIB_API void luaL_pushresult(luaL_Buffer *B) -{ - emptybuffer(B); - lua_concat(B->L, B->lvl); - B->lvl = 1; -} - -LUALIB_API void luaL_addvalue(luaL_Buffer *B) -{ - lua_State *L = B->L; - size_t vl; - const char *s = lua_tolstring(L, -1, &vl); - if (vl <= bufffree(B)) { /* fit into buffer? */ - memcpy(B->p, s, vl); /* put it there */ - B->p += vl; - lua_pop(L, 1); /* remove from stack */ - } else { - if (emptybuffer(B)) - lua_insert(L, -2); /* put buffer before new value */ - B->lvl++; /* add new value into B stack */ - adjuststack(B); - } -} - -LUALIB_API void luaL_buffinit(lua_State *L, luaL_Buffer *B) -{ - B->L = L; - B->p = B->buffer; - B->lvl = 0; -} - -/* -- Reference management ------------------------------------------------ */ - -#define FREELIST_REF 0 - -/* Convert a stack index to an absolute index. */ -#define abs_index(L, i) \ - ((i) > 0 || (i) <= LUA_REGISTRYINDEX ? (i) : lua_gettop(L) + (i) + 1) - -LUALIB_API int luaL_ref(lua_State *L, int t) -{ - int ref; - t = abs_index(L, t); - if (lua_isnil(L, -1)) { - lua_pop(L, 1); /* remove from stack */ - return LUA_REFNIL; /* `nil' has a unique fixed reference */ - } - lua_rawgeti(L, t, FREELIST_REF); /* get first free element */ - ref = (int)lua_tointeger(L, -1); /* ref = t[FREELIST_REF] */ - lua_pop(L, 1); /* remove it from stack */ - if (ref != 0) { /* any free element? */ - lua_rawgeti(L, t, ref); /* remove it from list */ - lua_rawseti(L, t, FREELIST_REF); /* (t[FREELIST_REF] = t[ref]) */ - } else { /* no free elements */ - ref = (int)lua_objlen(L, t); - ref++; /* create new reference */ - } - lua_rawseti(L, t, ref); - return ref; -} - -LUALIB_API void luaL_unref(lua_State *L, int t, int ref) -{ - if (ref >= 0) { - t = abs_index(L, t); - lua_rawgeti(L, t, FREELIST_REF); - lua_rawseti(L, t, ref); /* t[ref] = t[FREELIST_REF] */ - lua_pushinteger(L, ref); - lua_rawseti(L, t, FREELIST_REF); /* t[FREELIST_REF] = ref */ - } -} - -/* -- Default allocator and panic function -------------------------------- */ - -static int panic(lua_State *L) -{ - const char *s = lua_tostring(L, -1); - fputs("PANIC: unprotected error in call to Lua API (", stderr); - fputs(s ? s : "?", stderr); - fputc(')', stderr); fputc('\n', stderr); - fflush(stderr); - return 0; -} - -#ifdef LUAJIT_USE_SYSMALLOC - -#if LJ_64 && !defined(LUAJIT_USE_VALGRIND) -#error "Must use builtin allocator for 64 bit target" -#endif - -static void *mem_alloc(void *ud, void *ptr, size_t osize, size_t nsize) -{ - (void)ud; - (void)osize; - if (nsize == 0) { - free(ptr); - return NULL; - } else { - return realloc(ptr, nsize); - } -} - -LUALIB_API lua_State *luaL_newstate(void) -{ - lua_State *L = lua_newstate(mem_alloc, NULL); - if (L) G(L)->panic = panic; - return L; -} - -#else - -#include "lj_alloc.h" - -LUALIB_API lua_State *luaL_newstate(void) -{ - lua_State *L; - void *ud = lj_alloc_create(); - if (ud == NULL) return NULL; -#if LJ_64 - L = lj_state_newstate(lj_alloc_f, ud); -#else - L = lua_newstate(lj_alloc_f, ud); -#endif - if (L) G(L)->panic = panic; - return L; -} - -#if LJ_64 -LUA_API lua_State *lua_newstate(lua_Alloc f, void *ud) -{ - UNUSED(f); UNUSED(ud); - fputs("Must use luaL_newstate() for 64 bit target\n", stderr); - return NULL; -} -#endif - -#endif - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_base.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_base.c deleted file mode 100644 index 887fea7a58a..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_base.c +++ /dev/null @@ -1,664 +0,0 @@ -/* -** Base and coroutine library. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -** -** Major portions taken verbatim or adapted from the Lua interpreter. -** Copyright (C) 1994-2011 Lua.org, PUC-Rio. See Copyright Notice in lua.h -*/ - -#include - -#define lib_base_c -#define LUA_LIB - -#include "lua.h" -#include "lauxlib.h" -#include "lualib.h" - -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_debug.h" -#include "lj_str.h" -#include "lj_tab.h" -#include "lj_meta.h" -#include "lj_state.h" -#if LJ_HASFFI -#include "lj_ctype.h" -#include "lj_cconv.h" -#endif -#include "lj_bc.h" -#include "lj_ff.h" -#include "lj_dispatch.h" -#include "lj_char.h" -#include "lj_strscan.h" -#include "lj_strfmt.h" -#include "lj_lib.h" - -/* -- Base library: checks ------------------------------------------------ */ - -#define LJLIB_MODULE_base - -LJLIB_ASM(assert) LJLIB_REC(.) -{ - GCstr *s; - lj_lib_checkany(L, 1); - s = lj_lib_optstr(L, 2); - if (s) - lj_err_callermsg(L, strdata(s)); - else - lj_err_caller(L, LJ_ERR_ASSERT); - return FFH_UNREACHABLE; -} - -/* ORDER LJ_T */ -LJLIB_PUSH("nil") -LJLIB_PUSH("boolean") -LJLIB_PUSH(top-1) /* boolean */ -LJLIB_PUSH("userdata") -LJLIB_PUSH("string") -LJLIB_PUSH("upval") -LJLIB_PUSH("thread") -LJLIB_PUSH("proto") -LJLIB_PUSH("function") -LJLIB_PUSH("trace") -LJLIB_PUSH("cdata") -LJLIB_PUSH("table") -LJLIB_PUSH(top-9) /* userdata */ -LJLIB_PUSH("number") -LJLIB_ASM_(type) LJLIB_REC(.) -/* Recycle the lj_lib_checkany(L, 1) from assert. */ - -/* -- Base library: iterators --------------------------------------------- */ - -/* This solves a circular dependency problem -- change FF_next_N as needed. */ -LJ_STATIC_ASSERT((int)FF_next == FF_next_N); - -LJLIB_ASM(next) -{ - lj_lib_checktab(L, 1); - return FFH_UNREACHABLE; -} - -#if LJ_52 || LJ_HASFFI -static int ffh_pairs(lua_State *L, MMS mm) -{ - TValue *o = lj_lib_checkany(L, 1); - cTValue *mo = lj_meta_lookup(L, o, mm); - if ((LJ_52 || tviscdata(o)) && !tvisnil(mo)) { - L->top = o+1; /* Only keep one argument. */ - copyTV(L, L->base-1-LJ_FR2, mo); /* Replace callable. */ - return FFH_TAILCALL; - } else { - if (!tvistab(o)) lj_err_argt(L, 1, LUA_TTABLE); - if (LJ_FR2) { copyTV(L, o-1, o); o--; } - setfuncV(L, o-1, funcV(lj_lib_upvalue(L, 1))); - if (mm == MM_pairs) setnilV(o+1); else setintV(o+1, 0); - return FFH_RES(3); - } -} -#else -#define ffh_pairs(L, mm) (lj_lib_checktab(L, 1), FFH_UNREACHABLE) -#endif - -LJLIB_PUSH(lastcl) -LJLIB_ASM(pairs) LJLIB_REC(xpairs 0) -{ - return ffh_pairs(L, MM_pairs); -} - -LJLIB_NOREGUV LJLIB_ASM(ipairs_aux) LJLIB_REC(.) -{ - lj_lib_checktab(L, 1); - lj_lib_checkint(L, 2); - return FFH_UNREACHABLE; -} - -LJLIB_PUSH(lastcl) -LJLIB_ASM(ipairs) LJLIB_REC(xpairs 1) -{ - return ffh_pairs(L, MM_ipairs); -} - -/* -- Base library: getters and setters ----------------------------------- */ - -LJLIB_ASM_(getmetatable) LJLIB_REC(.) -/* Recycle the lj_lib_checkany(L, 1) from assert. */ - -LJLIB_ASM(setmetatable) LJLIB_REC(.) -{ - GCtab *t = lj_lib_checktab(L, 1); - GCtab *mt = lj_lib_checktabornil(L, 2); - if (!tvisnil(lj_meta_lookup(L, L->base, MM_metatable))) - lj_err_caller(L, LJ_ERR_PROTMT); - setgcref(t->metatable, obj2gco(mt)); - if (mt) { lj_gc_objbarriert(L, t, mt); } - settabV(L, L->base-1-LJ_FR2, t); - return FFH_RES(1); -} - -LJLIB_CF(getfenv) LJLIB_REC(.) -{ - GCfunc *fn; - cTValue *o = L->base; - if (!(o < L->top && tvisfunc(o))) { - int level = lj_lib_optint(L, 1, 1); - o = lj_debug_frame(L, level, &level); - if (o == NULL) - lj_err_arg(L, 1, LJ_ERR_INVLVL); - if (LJ_FR2) o--; - } - fn = &gcval(o)->fn; - settabV(L, L->top++, isluafunc(fn) ? tabref(fn->l.env) : tabref(L->env)); - return 1; -} - -LJLIB_CF(setfenv) -{ - GCfunc *fn; - GCtab *t = lj_lib_checktab(L, 2); - cTValue *o = L->base; - if (!(o < L->top && tvisfunc(o))) { - int level = lj_lib_checkint(L, 1); - if (level == 0) { - /* NOBARRIER: A thread (i.e. L) is never black. */ - setgcref(L->env, obj2gco(t)); - return 0; - } - o = lj_debug_frame(L, level, &level); - if (o == NULL) - lj_err_arg(L, 1, LJ_ERR_INVLVL); - if (LJ_FR2) o--; - } - fn = &gcval(o)->fn; - if (!isluafunc(fn)) - lj_err_caller(L, LJ_ERR_SETFENV); - setgcref(fn->l.env, obj2gco(t)); - lj_gc_objbarrier(L, obj2gco(fn), t); - setfuncV(L, L->top++, fn); - return 1; -} - -LJLIB_ASM(rawget) LJLIB_REC(.) -{ - lj_lib_checktab(L, 1); - lj_lib_checkany(L, 2); - return FFH_UNREACHABLE; -} - -LJLIB_CF(rawset) LJLIB_REC(.) -{ - lj_lib_checktab(L, 1); - lj_lib_checkany(L, 2); - L->top = 1+lj_lib_checkany(L, 3); - lua_rawset(L, 1); - return 1; -} - -LJLIB_CF(rawequal) LJLIB_REC(.) -{ - cTValue *o1 = lj_lib_checkany(L, 1); - cTValue *o2 = lj_lib_checkany(L, 2); - setboolV(L->top-1, lj_obj_equal(o1, o2)); - return 1; -} - -#if LJ_52 -LJLIB_CF(rawlen) LJLIB_REC(.) -{ - cTValue *o = L->base; - int32_t len; - if (L->top > o && tvisstr(o)) - len = (int32_t)strV(o)->len; - else - len = (int32_t)lj_tab_len(lj_lib_checktab(L, 1)); - setintV(L->top-1, len); - return 1; -} -#endif - -LJLIB_CF(unpack) -{ - GCtab *t = lj_lib_checktab(L, 1); - int32_t n, i = lj_lib_optint(L, 2, 1); - int32_t e = (L->base+3-1 < L->top && !tvisnil(L->base+3-1)) ? - lj_lib_checkint(L, 3) : (int32_t)lj_tab_len(t); - if (i > e) return 0; - n = e - i + 1; - if (n <= 0 || !lua_checkstack(L, n)) - lj_err_caller(L, LJ_ERR_UNPACK); - do { - cTValue *tv = lj_tab_getint(t, i); - if (tv) { - copyTV(L, L->top++, tv); - } else { - setnilV(L->top++); - } - } while (i++ < e); - return n; -} - -LJLIB_CF(select) LJLIB_REC(.) -{ - int32_t n = (int32_t)(L->top - L->base); - if (n >= 1 && tvisstr(L->base) && *strVdata(L->base) == '#') { - setintV(L->top-1, n-1); - return 1; - } else { - int32_t i = lj_lib_checkint(L, 1); - if (i < 0) i = n + i; else if (i > n) i = n; - if (i < 1) - lj_err_arg(L, 1, LJ_ERR_IDXRNG); - return n - i; - } -} - -/* -- Base library: conversions ------------------------------------------- */ - -LJLIB_ASM(tonumber) LJLIB_REC(.) -{ - int32_t base = lj_lib_optint(L, 2, 10); - if (base == 10) { - TValue *o = lj_lib_checkany(L, 1); - if (lj_strscan_numberobj(o)) { - copyTV(L, L->base-1-LJ_FR2, o); - return FFH_RES(1); - } -#if LJ_HASFFI - if (tviscdata(o)) { - CTState *cts = ctype_cts(L); - CType *ct = lj_ctype_rawref(cts, cdataV(o)->ctypeid); - if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct); - if (ctype_isnum(ct->info) || ctype_iscomplex(ct->info)) { - if (LJ_DUALNUM && ctype_isinteger_or_bool(ct->info) && - ct->size <= 4 && !(ct->size == 4 && (ct->info & CTF_UNSIGNED))) { - int32_t i; - lj_cconv_ct_tv(cts, ctype_get(cts, CTID_INT32), (uint8_t *)&i, o, 0); - setintV(L->base-1-LJ_FR2, i); - return FFH_RES(1); - } - lj_cconv_ct_tv(cts, ctype_get(cts, CTID_DOUBLE), - (uint8_t *)&(L->base-1-LJ_FR2)->n, o, 0); - return FFH_RES(1); - } - } -#endif - } else { - const char *p = strdata(lj_lib_checkstr(L, 1)); - char *ep; - unsigned long ul; - if (base < 2 || base > 36) - lj_err_arg(L, 2, LJ_ERR_BASERNG); - ul = strtoul(p, &ep, base); - if (p != ep) { - while (lj_char_isspace((unsigned char)(*ep))) ep++; - if (*ep == '\0') { - if (LJ_DUALNUM && LJ_LIKELY(ul < 0x80000000u)) - setintV(L->base-1-LJ_FR2, (int32_t)ul); - else - setnumV(L->base-1-LJ_FR2, (lua_Number)ul); - return FFH_RES(1); - } - } - } - setnilV(L->base-1-LJ_FR2); - return FFH_RES(1); -} - -LJLIB_ASM(tostring) LJLIB_REC(.) -{ - TValue *o = lj_lib_checkany(L, 1); - cTValue *mo; - L->top = o+1; /* Only keep one argument. */ - if (!tvisnil(mo = lj_meta_lookup(L, o, MM_tostring))) { - copyTV(L, L->base-1-LJ_FR2, mo); /* Replace callable. */ - return FFH_TAILCALL; - } - lj_gc_check(L); - setstrV(L, L->base-1-LJ_FR2, lj_strfmt_obj(L, L->base)); - return FFH_RES(1); -} - -/* -- Base library: throw and catch errors -------------------------------- */ - -LJLIB_CF(error) -{ - int32_t level = lj_lib_optint(L, 2, 1); - lua_settop(L, 1); - if (lua_isstring(L, 1) && level > 0) { - luaL_where(L, level); - lua_pushvalue(L, 1); - lua_concat(L, 2); - } - return lua_error(L); -} - -LJLIB_ASM(pcall) LJLIB_REC(.) -{ - lj_lib_checkany(L, 1); - lj_lib_checkfunc(L, 2); /* For xpcall only. */ - return FFH_UNREACHABLE; -} -LJLIB_ASM_(xpcall) LJLIB_REC(.) - -/* -- Base library: load Lua code ----------------------------------------- */ - -static int load_aux(lua_State *L, int status, int envarg) -{ - if (status == 0) { - if (tvistab(L->base+envarg-1)) { - GCfunc *fn = funcV(L->top-1); - GCtab *t = tabV(L->base+envarg-1); - setgcref(fn->c.env, obj2gco(t)); - lj_gc_objbarrier(L, fn, t); - } - return 1; - } else { - setnilV(L->top-2); - return 2; - } -} - -LJLIB_CF(loadfile) -{ - GCstr *fname = lj_lib_optstr(L, 1); - GCstr *mode = lj_lib_optstr(L, 2); - int status; - lua_settop(L, 3); /* Ensure env arg exists. */ - status = luaL_loadfilex(L, fname ? strdata(fname) : NULL, - mode ? strdata(mode) : NULL); - return load_aux(L, status, 3); -} - -static const char *reader_func(lua_State *L, void *ud, size_t *size) -{ - UNUSED(ud); - luaL_checkstack(L, 2, "too many nested functions"); - copyTV(L, L->top++, L->base); - lua_call(L, 0, 1); /* Call user-supplied function. */ - L->top--; - if (tvisnil(L->top)) { - *size = 0; - return NULL; - } else if (tvisstr(L->top) || tvisnumber(L->top)) { - copyTV(L, L->base+4, L->top); /* Anchor string in reserved stack slot. */ - return lua_tolstring(L, 5, size); - } else { - lj_err_caller(L, LJ_ERR_RDRSTR); - return NULL; - } -} - -LJLIB_CF(load) -{ - GCstr *name = lj_lib_optstr(L, 2); - GCstr *mode = lj_lib_optstr(L, 3); - int status; - if (L->base < L->top && (tvisstr(L->base) || tvisnumber(L->base))) { - GCstr *s = lj_lib_checkstr(L, 1); - lua_settop(L, 4); /* Ensure env arg exists. */ - status = luaL_loadbufferx(L, strdata(s), s->len, strdata(name ? name : s), - mode ? strdata(mode) : NULL); - } else { - lj_lib_checkfunc(L, 1); - lua_settop(L, 5); /* Reserve a slot for the string from the reader. */ - status = lua_loadx(L, reader_func, NULL, name ? strdata(name) : "=(load)", - mode ? strdata(mode) : NULL); - } - return load_aux(L, status, 4); -} - -LJLIB_CF(loadstring) -{ - return lj_cf_load(L); -} - -LJLIB_CF(dofile) -{ - GCstr *fname = lj_lib_optstr(L, 1); - setnilV(L->top); - L->top = L->base+1; - if (luaL_loadfile(L, fname ? strdata(fname) : NULL) != 0) - lua_error(L); - lua_call(L, 0, LUA_MULTRET); - return (int)(L->top - L->base) - 1; -} - -/* -- Base library: GC control -------------------------------------------- */ - -LJLIB_CF(gcinfo) -{ - setintV(L->top++, (int32_t)(G(L)->gc.total >> 10)); - return 1; -} - -LJLIB_CF(collectgarbage) -{ - int opt = lj_lib_checkopt(L, 1, LUA_GCCOLLECT, /* ORDER LUA_GC* */ - "\4stop\7restart\7collect\5count\1\377\4step\10setpause\12setstepmul"); - int32_t data = lj_lib_optint(L, 2, 0); - if (opt == LUA_GCCOUNT) { - setnumV(L->top, (lua_Number)G(L)->gc.total/1024.0); - } else { - int res = lua_gc(L, opt, data); - if (opt == LUA_GCSTEP) - setboolV(L->top, res); - else - setintV(L->top, res); - } - L->top++; - return 1; -} - -/* -- Base library: miscellaneous functions ------------------------------- */ - -LJLIB_PUSH(top-2) /* Upvalue holds weak table. */ -LJLIB_CF(newproxy) -{ - lua_settop(L, 1); - lua_newuserdata(L, 0); - if (lua_toboolean(L, 1) == 0) { /* newproxy(): without metatable. */ - return 1; - } else if (lua_isboolean(L, 1)) { /* newproxy(true): with metatable. */ - lua_newtable(L); - lua_pushvalue(L, -1); - lua_pushboolean(L, 1); - lua_rawset(L, lua_upvalueindex(1)); /* Remember mt in weak table. */ - } else { /* newproxy(proxy): inherit metatable. */ - int validproxy = 0; - if (lua_getmetatable(L, 1)) { - lua_rawget(L, lua_upvalueindex(1)); - validproxy = lua_toboolean(L, -1); - lua_pop(L, 1); - } - if (!validproxy) - lj_err_arg(L, 1, LJ_ERR_NOPROXY); - lua_getmetatable(L, 1); - } - lua_setmetatable(L, 2); - return 1; -} - -LJLIB_PUSH("tostring") -LJLIB_CF(print) -{ - ptrdiff_t i, nargs = L->top - L->base; - cTValue *tv = lj_tab_getstr(tabref(L->env), strV(lj_lib_upvalue(L, 1))); - int shortcut; - if (tv && !tvisnil(tv)) { - copyTV(L, L->top++, tv); - } else { - setstrV(L, L->top++, strV(lj_lib_upvalue(L, 1))); - lua_gettable(L, LUA_GLOBALSINDEX); - tv = L->top-1; - } - shortcut = (tvisfunc(tv) && funcV(tv)->c.ffid == FF_tostring); - for (i = 0; i < nargs; i++) { - cTValue *o = &L->base[i]; - char buf[STRFMT_MAXBUF_NUM]; - const char *str; - size_t size; - MSize len; - if (shortcut && (str = lj_strfmt_wstrnum(buf, o, &len)) != NULL) { - size = len; - } else { - copyTV(L, L->top+1, o); - copyTV(L, L->top, L->top-1); - L->top += 2; - lua_call(L, 1, 1); - str = lua_tolstring(L, -1, &size); - if (!str) - lj_err_caller(L, LJ_ERR_PRTOSTR); - L->top--; - } - if (i) - putchar('\t'); - fwrite(str, 1, size, stdout); - } - putchar('\n'); - return 0; -} - -LJLIB_PUSH(top-3) -LJLIB_SET(_VERSION) - -#include "lj_libdef.h" - -/* -- Coroutine library --------------------------------------------------- */ - -#define LJLIB_MODULE_coroutine - -LJLIB_CF(coroutine_status) -{ - const char *s; - lua_State *co; - if (!(L->top > L->base && tvisthread(L->base))) - lj_err_arg(L, 1, LJ_ERR_NOCORO); - co = threadV(L->base); - if (co == L) s = "running"; - else if (co->status == LUA_YIELD) s = "suspended"; - else if (co->status != 0) s = "dead"; - else if (co->base > tvref(co->stack)+1+LJ_FR2) s = "normal"; - else if (co->top == co->base) s = "dead"; - else s = "suspended"; - lua_pushstring(L, s); - return 1; -} - -LJLIB_CF(coroutine_running) -{ -#if LJ_52 - int ismain = lua_pushthread(L); - setboolV(L->top++, ismain); - return 2; -#else - if (lua_pushthread(L)) - setnilV(L->top++); - return 1; -#endif -} - -LJLIB_CF(coroutine_create) -{ - lua_State *L1; - if (!(L->base < L->top && tvisfunc(L->base))) - lj_err_argt(L, 1, LUA_TFUNCTION); - L1 = lua_newthread(L); - setfuncV(L, L1->top++, funcV(L->base)); - return 1; -} - -LJLIB_ASM(coroutine_yield) -{ - lj_err_caller(L, LJ_ERR_CYIELD); - return FFH_UNREACHABLE; -} - -static int ffh_resume(lua_State *L, lua_State *co, int wrap) -{ - if (co->cframe != NULL || co->status > LUA_YIELD || - (co->status == 0 && co->top == co->base)) { - ErrMsg em = co->cframe ? LJ_ERR_CORUN : LJ_ERR_CODEAD; - if (wrap) lj_err_caller(L, em); - setboolV(L->base-1-LJ_FR2, 0); - setstrV(L, L->base-LJ_FR2, lj_err_str(L, em)); - return FFH_RES(2); - } - lj_state_growstack(co, (MSize)(L->top - L->base)); - return FFH_RETRY; -} - -LJLIB_ASM(coroutine_resume) -{ - if (!(L->top > L->base && tvisthread(L->base))) - lj_err_arg(L, 1, LJ_ERR_NOCORO); - return ffh_resume(L, threadV(L->base), 0); -} - -LJLIB_NOREG LJLIB_ASM(coroutine_wrap_aux) -{ - return ffh_resume(L, threadV(lj_lib_upvalue(L, 1)), 1); -} - -/* Inline declarations. */ -LJ_ASMF void lj_ff_coroutine_wrap_aux(void); -#if !(LJ_TARGET_MIPS && defined(ljamalg_c)) -LJ_FUNCA_NORET void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L, - lua_State *co); -#endif - -/* Error handler, called from assembler VM. */ -void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L, lua_State *co) -{ - co->top--; copyTV(L, L->top, co->top); L->top++; - if (tvisstr(L->top-1)) - lj_err_callermsg(L, strVdata(L->top-1)); - else - lj_err_run(L); -} - -/* Forward declaration. */ -static void setpc_wrap_aux(lua_State *L, GCfunc *fn); - -LJLIB_CF(coroutine_wrap) -{ - GCfunc *fn; - lj_cf_coroutine_create(L); - fn = lj_lib_pushcc(L, lj_ffh_coroutine_wrap_aux, FF_coroutine_wrap_aux, 1); - setpc_wrap_aux(L, fn); - return 1; -} - -#include "lj_libdef.h" - -/* Fix the PC of wrap_aux. Really ugly workaround. */ -static void setpc_wrap_aux(lua_State *L, GCfunc *fn) -{ - setmref(fn->c.pc, &L2GG(L)->bcff[lj_lib_init_coroutine[1]+2]); -} - -/* ------------------------------------------------------------------------ */ - -static void newproxy_weaktable(lua_State *L) -{ - /* NOBARRIER: The table is new (marked white). */ - GCtab *t = lj_tab_new(L, 0, 1); - settabV(L, L->top++, t); - setgcref(t->metatable, obj2gco(t)); - setstrV(L, lj_tab_setstr(L, t, lj_str_newlit(L, "__mode")), - lj_str_newlit(L, "kv")); - t->nomm = (uint8_t)(~(1u<env); - settabV(L, lj_tab_setstr(L, env, lj_str_newlit(L, "_G")), env); - lua_pushliteral(L, LUA_VERSION); /* top-3. */ - newproxy_weaktable(L); /* top-2. */ - LJ_LIB_REG(L, "_G", base); - LJ_LIB_REG(L, LUA_COLIBNAME, coroutine); - return 2; -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_bit.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_bit.c deleted file mode 100644 index 55cb2a845a8..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_bit.c +++ /dev/null @@ -1,180 +0,0 @@ -/* -** Bit manipulation library. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lib_bit_c -#define LUA_LIB - -#include "lua.h" -#include "lauxlib.h" -#include "lualib.h" - -#include "lj_obj.h" -#include "lj_err.h" -#include "lj_buf.h" -#include "lj_strscan.h" -#include "lj_strfmt.h" -#if LJ_HASFFI -#include "lj_ctype.h" -#include "lj_cdata.h" -#include "lj_cconv.h" -#include "lj_carith.h" -#endif -#include "lj_ff.h" -#include "lj_lib.h" - -/* ------------------------------------------------------------------------ */ - -#define LJLIB_MODULE_bit - -#if LJ_HASFFI -static int bit_result64(lua_State *L, CTypeID id, uint64_t x) -{ - GCcdata *cd = lj_cdata_new_(L, id, 8); - *(uint64_t *)cdataptr(cd) = x; - setcdataV(L, L->base-1-LJ_FR2, cd); - return FFH_RES(1); -} -#else -static int32_t bit_checkbit(lua_State *L, int narg) -{ - TValue *o = L->base + narg-1; - if (!(o < L->top && lj_strscan_numberobj(o))) - lj_err_argt(L, narg, LUA_TNUMBER); - if (LJ_LIKELY(tvisint(o))) { - return intV(o); - } else { - int32_t i = lj_num2bit(numV(o)); - if (LJ_DUALNUM) setintV(o, i); - return i; - } -} -#endif - -LJLIB_ASM(bit_tobit) LJLIB_REC(bit_tobit) -{ -#if LJ_HASFFI - CTypeID id = 0; - setintV(L->base-1-LJ_FR2, (int32_t)lj_carith_check64(L, 1, &id)); - return FFH_RES(1); -#else - lj_lib_checknumber(L, 1); - return FFH_RETRY; -#endif -} - -LJLIB_ASM(bit_bnot) LJLIB_REC(bit_unary IR_BNOT) -{ -#if LJ_HASFFI - CTypeID id = 0; - uint64_t x = lj_carith_check64(L, 1, &id); - return id ? bit_result64(L, id, ~x) : FFH_RETRY; -#else - lj_lib_checknumber(L, 1); - return FFH_RETRY; -#endif -} - -LJLIB_ASM(bit_bswap) LJLIB_REC(bit_unary IR_BSWAP) -{ -#if LJ_HASFFI - CTypeID id = 0; - uint64_t x = lj_carith_check64(L, 1, &id); - return id ? bit_result64(L, id, lj_bswap64(x)) : FFH_RETRY; -#else - lj_lib_checknumber(L, 1); - return FFH_RETRY; -#endif -} - -LJLIB_ASM(bit_lshift) LJLIB_REC(bit_shift IR_BSHL) -{ -#if LJ_HASFFI - CTypeID id = 0, id2 = 0; - uint64_t x = lj_carith_check64(L, 1, &id); - int32_t sh = (int32_t)lj_carith_check64(L, 2, &id2); - if (id) { - x = lj_carith_shift64(x, sh, curr_func(L)->c.ffid - (int)FF_bit_lshift); - return bit_result64(L, id, x); - } - if (id2) setintV(L->base+1, sh); - return FFH_RETRY; -#else - lj_lib_checknumber(L, 1); - bit_checkbit(L, 2); - return FFH_RETRY; -#endif -} -LJLIB_ASM_(bit_rshift) LJLIB_REC(bit_shift IR_BSHR) -LJLIB_ASM_(bit_arshift) LJLIB_REC(bit_shift IR_BSAR) -LJLIB_ASM_(bit_rol) LJLIB_REC(bit_shift IR_BROL) -LJLIB_ASM_(bit_ror) LJLIB_REC(bit_shift IR_BROR) - -LJLIB_ASM(bit_band) LJLIB_REC(bit_nary IR_BAND) -{ -#if LJ_HASFFI - CTypeID id = 0; - TValue *o = L->base, *top = L->top; - int i = 0; - do { lj_carith_check64(L, ++i, &id); } while (++o < top); - if (id) { - CTState *cts = ctype_cts(L); - CType *ct = ctype_get(cts, id); - int op = curr_func(L)->c.ffid - (int)FF_bit_bor; - uint64_t x, y = op >= 0 ? 0 : ~(uint64_t)0; - o = L->base; - do { - lj_cconv_ct_tv(cts, ct, (uint8_t *)&x, o, 0); - if (op < 0) y &= x; else if (op == 0) y |= x; else y ^= x; - } while (++o < top); - return bit_result64(L, id, y); - } - return FFH_RETRY; -#else - int i = 0; - do { lj_lib_checknumber(L, ++i); } while (L->base+i < L->top); - return FFH_RETRY; -#endif -} -LJLIB_ASM_(bit_bor) LJLIB_REC(bit_nary IR_BOR) -LJLIB_ASM_(bit_bxor) LJLIB_REC(bit_nary IR_BXOR) - -/* ------------------------------------------------------------------------ */ - -LJLIB_CF(bit_tohex) LJLIB_REC(.) -{ -#if LJ_HASFFI - CTypeID id = 0, id2 = 0; - uint64_t b = lj_carith_check64(L, 1, &id); - int32_t n = L->base+1>=L->top ? (id ? 16 : 8) : - (int32_t)lj_carith_check64(L, 2, &id2); -#else - uint32_t b = (uint32_t)bit_checkbit(L, 1); - int32_t n = L->base+1>=L->top ? 8 : bit_checkbit(L, 2); -#endif - SBuf *sb = lj_buf_tmp_(L); - SFormat sf = (STRFMT_UINT|STRFMT_T_HEX); - if (n < 0) { n = -n; sf |= STRFMT_F_UPPER; } - sf |= ((SFormat)((n+1)&255) << STRFMT_SH_PREC); -#if LJ_HASFFI - if (n < 16) b &= ((uint64_t)1 << 4*n)-1; -#else - if (n < 8) b &= (1u << 4*n)-1; -#endif - sb = lj_strfmt_putfxint(sb, sf, b); - setstrV(L, L->top-1, lj_buf_str(L, sb)); - lj_gc_check(L); - return 1; -} - -/* ------------------------------------------------------------------------ */ - -#include "lj_libdef.h" - -LUALIB_API int luaopen_bit(lua_State *L) -{ - LJ_LIB_REG(L, LUA_BITLIBNAME, bit); - return 1; -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_debug.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_debug.c deleted file mode 100644 index b610fb4dcdd..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_debug.c +++ /dev/null @@ -1,405 +0,0 @@ -/* -** Debug library. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -** -** Major portions taken verbatim or adapted from the Lua interpreter. -** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h -*/ - -#define lib_debug_c -#define LUA_LIB - -#include "lua.h" -#include "lauxlib.h" -#include "lualib.h" - -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_debug.h" -#include "lj_lib.h" - -/* ------------------------------------------------------------------------ */ - -#define LJLIB_MODULE_debug - -LJLIB_CF(debug_getregistry) -{ - copyTV(L, L->top++, registry(L)); - return 1; -} - -LJLIB_CF(debug_getmetatable) LJLIB_REC(.) -{ - lj_lib_checkany(L, 1); - if (!lua_getmetatable(L, 1)) { - setnilV(L->top-1); - } - return 1; -} - -LJLIB_CF(debug_setmetatable) -{ - lj_lib_checktabornil(L, 2); - L->top = L->base+2; - lua_setmetatable(L, 1); -#if !LJ_52 - setboolV(L->top-1, 1); -#endif - return 1; -} - -LJLIB_CF(debug_getfenv) -{ - lj_lib_checkany(L, 1); - lua_getfenv(L, 1); - return 1; -} - -LJLIB_CF(debug_setfenv) -{ - lj_lib_checktab(L, 2); - L->top = L->base+2; - if (!lua_setfenv(L, 1)) - lj_err_caller(L, LJ_ERR_SETFENV); - return 1; -} - -/* ------------------------------------------------------------------------ */ - -static void settabss(lua_State *L, const char *i, const char *v) -{ - lua_pushstring(L, v); - lua_setfield(L, -2, i); -} - -static void settabsi(lua_State *L, const char *i, int v) -{ - lua_pushinteger(L, v); - lua_setfield(L, -2, i); -} - -static void settabsb(lua_State *L, const char *i, int v) -{ - lua_pushboolean(L, v); - lua_setfield(L, -2, i); -} - -static lua_State *getthread(lua_State *L, int *arg) -{ - if (L->base < L->top && tvisthread(L->base)) { - *arg = 1; - return threadV(L->base); - } else { - *arg = 0; - return L; - } -} - -static void treatstackoption(lua_State *L, lua_State *L1, const char *fname) -{ - if (L == L1) { - lua_pushvalue(L, -2); - lua_remove(L, -3); - } - else - lua_xmove(L1, L, 1); - lua_setfield(L, -2, fname); -} - -LJLIB_CF(debug_getinfo) -{ - lj_Debug ar; - int arg, opt_f = 0, opt_L = 0; - lua_State *L1 = getthread(L, &arg); - const char *options = luaL_optstring(L, arg+2, "flnSu"); - if (lua_isnumber(L, arg+1)) { - if (!lua_getstack(L1, (int)lua_tointeger(L, arg+1), (lua_Debug *)&ar)) { - setnilV(L->top-1); - return 1; - } - } else if (L->base+arg < L->top && tvisfunc(L->base+arg)) { - options = lua_pushfstring(L, ">%s", options); - setfuncV(L1, L1->top++, funcV(L->base+arg)); - } else { - lj_err_arg(L, arg+1, LJ_ERR_NOFUNCL); - } - if (!lj_debug_getinfo(L1, options, &ar, 1)) - lj_err_arg(L, arg+2, LJ_ERR_INVOPT); - lua_createtable(L, 0, 16); /* Create result table. */ - for (; *options; options++) { - switch (*options) { - case 'S': - settabss(L, "source", ar.source); - settabss(L, "short_src", ar.short_src); - settabsi(L, "linedefined", ar.linedefined); - settabsi(L, "lastlinedefined", ar.lastlinedefined); - settabss(L, "what", ar.what); - break; - case 'l': - settabsi(L, "currentline", ar.currentline); - break; - case 'u': - settabsi(L, "nups", ar.nups); - settabsi(L, "nparams", ar.nparams); - settabsb(L, "isvararg", ar.isvararg); - break; - case 'n': - settabss(L, "name", ar.name); - settabss(L, "namewhat", ar.namewhat); - break; - case 'f': opt_f = 1; break; - case 'L': opt_L = 1; break; - default: break; - } - } - if (opt_L) treatstackoption(L, L1, "activelines"); - if (opt_f) treatstackoption(L, L1, "func"); - return 1; /* Return result table. */ -} - -LJLIB_CF(debug_getlocal) -{ - int arg; - lua_State *L1 = getthread(L, &arg); - lua_Debug ar; - const char *name; - int slot = lj_lib_checkint(L, arg+2); - if (tvisfunc(L->base+arg)) { - L->top = L->base+arg+1; - lua_pushstring(L, lua_getlocal(L, NULL, slot)); - return 1; - } - if (!lua_getstack(L1, lj_lib_checkint(L, arg+1), &ar)) - lj_err_arg(L, arg+1, LJ_ERR_LVLRNG); - name = lua_getlocal(L1, &ar, slot); - if (name) { - lua_xmove(L1, L, 1); - lua_pushstring(L, name); - lua_pushvalue(L, -2); - return 2; - } else { - setnilV(L->top-1); - return 1; - } -} - -LJLIB_CF(debug_setlocal) -{ - int arg; - lua_State *L1 = getthread(L, &arg); - lua_Debug ar; - TValue *tv; - if (!lua_getstack(L1, lj_lib_checkint(L, arg+1), &ar)) - lj_err_arg(L, arg+1, LJ_ERR_LVLRNG); - tv = lj_lib_checkany(L, arg+3); - copyTV(L1, L1->top++, tv); - lua_pushstring(L, lua_setlocal(L1, &ar, lj_lib_checkint(L, arg+2))); - return 1; -} - -static int debug_getupvalue(lua_State *L, int get) -{ - int32_t n = lj_lib_checkint(L, 2); - const char *name; - lj_lib_checkfunc(L, 1); - name = get ? lua_getupvalue(L, 1, n) : lua_setupvalue(L, 1, n); - if (name) { - lua_pushstring(L, name); - if (!get) return 1; - copyTV(L, L->top, L->top-2); - L->top++; - return 2; - } - return 0; -} - -LJLIB_CF(debug_getupvalue) -{ - return debug_getupvalue(L, 1); -} - -LJLIB_CF(debug_setupvalue) -{ - lj_lib_checkany(L, 3); - return debug_getupvalue(L, 0); -} - -LJLIB_CF(debug_upvalueid) -{ - GCfunc *fn = lj_lib_checkfunc(L, 1); - int32_t n = lj_lib_checkint(L, 2) - 1; - if ((uint32_t)n >= fn->l.nupvalues) - lj_err_arg(L, 2, LJ_ERR_IDXRNG); - setlightudV(L->top-1, isluafunc(fn) ? (void *)gcref(fn->l.uvptr[n]) : - (void *)&fn->c.upvalue[n]); - return 1; -} - -LJLIB_CF(debug_upvaluejoin) -{ - GCfunc *fn[2]; - GCRef *p[2]; - int i; - for (i = 0; i < 2; i++) { - int32_t n; - fn[i] = lj_lib_checkfunc(L, 2*i+1); - if (!isluafunc(fn[i])) - lj_err_arg(L, 2*i+1, LJ_ERR_NOLFUNC); - n = lj_lib_checkint(L, 2*i+2) - 1; - if ((uint32_t)n >= fn[i]->l.nupvalues) - lj_err_arg(L, 2*i+2, LJ_ERR_IDXRNG); - p[i] = &fn[i]->l.uvptr[n]; - } - setgcrefr(*p[0], *p[1]); - lj_gc_objbarrier(L, fn[0], gcref(*p[1])); - return 0; -} - -#if LJ_52 -LJLIB_CF(debug_getuservalue) -{ - TValue *o = L->base; - if (o < L->top && tvisudata(o)) - settabV(L, o, tabref(udataV(o)->env)); - else - setnilV(o); - L->top = o+1; - return 1; -} - -LJLIB_CF(debug_setuservalue) -{ - TValue *o = L->base; - if (!(o < L->top && tvisudata(o))) - lj_err_argt(L, 1, LUA_TUSERDATA); - if (!(o+1 < L->top && tvistab(o+1))) - lj_err_argt(L, 2, LUA_TTABLE); - L->top = o+2; - lua_setfenv(L, 1); - return 1; -} -#endif - -/* ------------------------------------------------------------------------ */ - -static const char KEY_HOOK = 'h'; - -static void hookf(lua_State *L, lua_Debug *ar) -{ - static const char *const hooknames[] = - {"call", "return", "line", "count", "tail return"}; - lua_pushlightuserdata(L, (void *)&KEY_HOOK); - lua_rawget(L, LUA_REGISTRYINDEX); - if (lua_isfunction(L, -1)) { - lua_pushstring(L, hooknames[(int)ar->event]); - if (ar->currentline >= 0) - lua_pushinteger(L, ar->currentline); - else lua_pushnil(L); - lua_call(L, 2, 0); - } -} - -static int makemask(const char *smask, int count) -{ - int mask = 0; - if (strchr(smask, 'c')) mask |= LUA_MASKCALL; - if (strchr(smask, 'r')) mask |= LUA_MASKRET; - if (strchr(smask, 'l')) mask |= LUA_MASKLINE; - if (count > 0) mask |= LUA_MASKCOUNT; - return mask; -} - -static char *unmakemask(int mask, char *smask) -{ - int i = 0; - if (mask & LUA_MASKCALL) smask[i++] = 'c'; - if (mask & LUA_MASKRET) smask[i++] = 'r'; - if (mask & LUA_MASKLINE) smask[i++] = 'l'; - smask[i] = '\0'; - return smask; -} - -LJLIB_CF(debug_sethook) -{ - int arg, mask, count; - lua_Hook func; - (void)getthread(L, &arg); - if (lua_isnoneornil(L, arg+1)) { - lua_settop(L, arg+1); - func = NULL; mask = 0; count = 0; /* turn off hooks */ - } else { - const char *smask = luaL_checkstring(L, arg+2); - luaL_checktype(L, arg+1, LUA_TFUNCTION); - count = luaL_optint(L, arg+3, 0); - func = hookf; mask = makemask(smask, count); - } - lua_pushlightuserdata(L, (void *)&KEY_HOOK); - lua_pushvalue(L, arg+1); - lua_rawset(L, LUA_REGISTRYINDEX); - lua_sethook(L, func, mask, count); - return 0; -} - -LJLIB_CF(debug_gethook) -{ - char buff[5]; - int mask = lua_gethookmask(L); - lua_Hook hook = lua_gethook(L); - if (hook != NULL && hook != hookf) { /* external hook? */ - lua_pushliteral(L, "external hook"); - } else { - lua_pushlightuserdata(L, (void *)&KEY_HOOK); - lua_rawget(L, LUA_REGISTRYINDEX); /* get hook */ - } - lua_pushstring(L, unmakemask(mask, buff)); - lua_pushinteger(L, lua_gethookcount(L)); - return 3; -} - -/* ------------------------------------------------------------------------ */ - -LJLIB_CF(debug_debug) -{ - for (;;) { - char buffer[250]; - fputs("lua_debug> ", stderr); - if (fgets(buffer, sizeof(buffer), stdin) == 0 || - strcmp(buffer, "cont\n") == 0) - return 0; - if (luaL_loadbuffer(L, buffer, strlen(buffer), "=(debug command)") || - lua_pcall(L, 0, 0, 0)) { - fputs(lua_tostring(L, -1), stderr); - fputs("\n", stderr); - } - lua_settop(L, 0); /* remove eventual returns */ - } -} - -/* ------------------------------------------------------------------------ */ - -#define LEVELS1 12 /* size of the first part of the stack */ -#define LEVELS2 10 /* size of the second part of the stack */ - -LJLIB_CF(debug_traceback) -{ - int arg; - lua_State *L1 = getthread(L, &arg); - const char *msg = lua_tostring(L, arg+1); - if (msg == NULL && L->top > L->base+arg) - L->top = L->base+arg+1; - else - luaL_traceback(L, L1, msg, lj_lib_optint(L, arg+2, (L == L1))); - return 1; -} - -/* ------------------------------------------------------------------------ */ - -#include "lj_libdef.h" - -LUALIB_API int luaopen_debug(lua_State *L) -{ - LJ_LIB_REG(L, LUA_DBLIBNAME, debug); - return 1; -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_ffi.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_ffi.c deleted file mode 100644 index b2b2d37ff7b..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_ffi.c +++ /dev/null @@ -1,872 +0,0 @@ -/* -** FFI library. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lib_ffi_c -#define LUA_LIB - -#include - -#include "lua.h" -#include "lauxlib.h" -#include "lualib.h" - -#include "lj_obj.h" - -#if LJ_HASFFI - -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_str.h" -#include "lj_tab.h" -#include "lj_meta.h" -#include "lj_ctype.h" -#include "lj_cparse.h" -#include "lj_cdata.h" -#include "lj_cconv.h" -#include "lj_carith.h" -#include "lj_ccall.h" -#include "lj_ccallback.h" -#include "lj_clib.h" -#include "lj_strfmt.h" -#include "lj_ff.h" -#include "lj_lib.h" - -/* -- C type checks ------------------------------------------------------- */ - -/* Check first argument for a C type and returns its ID. */ -static CTypeID ffi_checkctype(lua_State *L, CTState *cts, TValue *param) -{ - TValue *o = L->base; - if (!(o < L->top)) { - err_argtype: - lj_err_argtype(L, 1, "C type"); - } - if (tvisstr(o)) { /* Parse an abstract C type declaration. */ - GCstr *s = strV(o); - CPState cp; - int errcode; - cp.L = L; - cp.cts = cts; - cp.srcname = strdata(s); - cp.p = strdata(s); - cp.param = param; - cp.mode = CPARSE_MODE_ABSTRACT|CPARSE_MODE_NOIMPLICIT; - errcode = lj_cparse(&cp); - if (errcode) lj_err_throw(L, errcode); /* Propagate errors. */ - return cp.val.id; - } else { - GCcdata *cd; - if (!tviscdata(o)) goto err_argtype; - if (param && param < L->top) lj_err_arg(L, 1, LJ_ERR_FFI_NUMPARAM); - cd = cdataV(o); - return cd->ctypeid == CTID_CTYPEID ? *(CTypeID *)cdataptr(cd) : cd->ctypeid; - } -} - -/* Check argument for C data and return it. */ -static GCcdata *ffi_checkcdata(lua_State *L, int narg) -{ - TValue *o = L->base + narg-1; - if (!(o < L->top && tviscdata(o))) - lj_err_argt(L, narg, LUA_TCDATA); - return cdataV(o); -} - -/* Convert argument to C pointer. */ -static void *ffi_checkptr(lua_State *L, int narg, CTypeID id) -{ - CTState *cts = ctype_cts(L); - TValue *o = L->base + narg-1; - void *p; - if (o >= L->top) - lj_err_arg(L, narg, LJ_ERR_NOVAL); - lj_cconv_ct_tv(cts, ctype_get(cts, id), (uint8_t *)&p, o, CCF_ARG(narg)); - return p; -} - -/* Convert argument to int32_t. */ -static int32_t ffi_checkint(lua_State *L, int narg) -{ - CTState *cts = ctype_cts(L); - TValue *o = L->base + narg-1; - int32_t i; - if (o >= L->top) - lj_err_arg(L, narg, LJ_ERR_NOVAL); - lj_cconv_ct_tv(cts, ctype_get(cts, CTID_INT32), (uint8_t *)&i, o, - CCF_ARG(narg)); - return i; -} - -/* -- C type metamethods -------------------------------------------------- */ - -#define LJLIB_MODULE_ffi_meta - -/* Handle ctype __index/__newindex metamethods. */ -static int ffi_index_meta(lua_State *L, CTState *cts, CType *ct, MMS mm) -{ - CTypeID id = ctype_typeid(cts, ct); - cTValue *tv = lj_ctype_meta(cts, id, mm); - TValue *base = L->base; - if (!tv) { - const char *s; - err_index: - s = strdata(lj_ctype_repr(L, id, NULL)); - if (tvisstr(L->base+1)) { - lj_err_callerv(L, LJ_ERR_FFI_BADMEMBER, s, strVdata(L->base+1)); - } else { - const char *key = tviscdata(L->base+1) ? - strdata(lj_ctype_repr(L, cdataV(L->base+1)->ctypeid, NULL)) : - lj_typename(L->base+1); - lj_err_callerv(L, LJ_ERR_FFI_BADIDXW, s, key); - } - } - if (!tvisfunc(tv)) { - if (mm == MM_index) { - cTValue *o = lj_meta_tget(L, tv, base+1); - if (o) { - if (tvisnil(o)) goto err_index; - copyTV(L, L->top-1, o); - return 1; - } - } else { - TValue *o = lj_meta_tset(L, tv, base+1); - if (o) { - copyTV(L, o, base+2); - return 0; - } - } - copyTV(L, base, L->top); - tv = L->top-1-LJ_FR2; - } - return lj_meta_tailcall(L, tv); -} - -LJLIB_CF(ffi_meta___index) LJLIB_REC(cdata_index 0) -{ - CTState *cts = ctype_cts(L); - CTInfo qual = 0; - CType *ct; - uint8_t *p; - TValue *o = L->base; - if (!(o+1 < L->top && tviscdata(o))) /* Also checks for presence of key. */ - lj_err_argt(L, 1, LUA_TCDATA); - ct = lj_cdata_index(cts, cdataV(o), o+1, &p, &qual); - if ((qual & 1)) - return ffi_index_meta(L, cts, ct, MM_index); - if (lj_cdata_get(cts, ct, L->top-1, p)) - lj_gc_check(L); - return 1; -} - -LJLIB_CF(ffi_meta___newindex) LJLIB_REC(cdata_index 1) -{ - CTState *cts = ctype_cts(L); - CTInfo qual = 0; - CType *ct; - uint8_t *p; - TValue *o = L->base; - if (!(o+2 < L->top && tviscdata(o))) /* Also checks for key and value. */ - lj_err_argt(L, 1, LUA_TCDATA); - ct = lj_cdata_index(cts, cdataV(o), o+1, &p, &qual); - if ((qual & 1)) { - if ((qual & CTF_CONST)) - lj_err_caller(L, LJ_ERR_FFI_WRCONST); - return ffi_index_meta(L, cts, ct, MM_newindex); - } - lj_cdata_set(cts, ct, p, o+2, qual); - return 0; -} - -/* Common handler for cdata arithmetic. */ -static int ffi_arith(lua_State *L) -{ - MMS mm = (MMS)(curr_func(L)->c.ffid - (int)FF_ffi_meta___eq + (int)MM_eq); - return lj_carith_op(L, mm); -} - -/* The following functions must be in contiguous ORDER MM. */ -LJLIB_CF(ffi_meta___eq) LJLIB_REC(cdata_arith MM_eq) -{ - return ffi_arith(L); -} - -LJLIB_CF(ffi_meta___len) LJLIB_REC(cdata_arith MM_len) -{ - return ffi_arith(L); -} - -LJLIB_CF(ffi_meta___lt) LJLIB_REC(cdata_arith MM_lt) -{ - return ffi_arith(L); -} - -LJLIB_CF(ffi_meta___le) LJLIB_REC(cdata_arith MM_le) -{ - return ffi_arith(L); -} - -LJLIB_CF(ffi_meta___concat) LJLIB_REC(cdata_arith MM_concat) -{ - return ffi_arith(L); -} - -/* Forward declaration. */ -static int lj_cf_ffi_new(lua_State *L); - -LJLIB_CF(ffi_meta___call) LJLIB_REC(cdata_call) -{ - CTState *cts = ctype_cts(L); - GCcdata *cd = ffi_checkcdata(L, 1); - CTypeID id = cd->ctypeid; - CType *ct; - cTValue *tv; - MMS mm = MM_call; - if (cd->ctypeid == CTID_CTYPEID) { - id = *(CTypeID *)cdataptr(cd); - mm = MM_new; - } else { - int ret = lj_ccall_func(L, cd); - if (ret >= 0) - return ret; - } - /* Handle ctype __call/__new metamethod. */ - ct = ctype_raw(cts, id); - if (ctype_isptr(ct->info)) id = ctype_cid(ct->info); - tv = lj_ctype_meta(cts, id, mm); - if (tv) - return lj_meta_tailcall(L, tv); - else if (mm == MM_call) - lj_err_callerv(L, LJ_ERR_FFI_BADCALL, strdata(lj_ctype_repr(L, id, NULL))); - return lj_cf_ffi_new(L); -} - -LJLIB_CF(ffi_meta___add) LJLIB_REC(cdata_arith MM_add) -{ - return ffi_arith(L); -} - -LJLIB_CF(ffi_meta___sub) LJLIB_REC(cdata_arith MM_sub) -{ - return ffi_arith(L); -} - -LJLIB_CF(ffi_meta___mul) LJLIB_REC(cdata_arith MM_mul) -{ - return ffi_arith(L); -} - -LJLIB_CF(ffi_meta___div) LJLIB_REC(cdata_arith MM_div) -{ - return ffi_arith(L); -} - -LJLIB_CF(ffi_meta___mod) LJLIB_REC(cdata_arith MM_mod) -{ - return ffi_arith(L); -} - -LJLIB_CF(ffi_meta___pow) LJLIB_REC(cdata_arith MM_pow) -{ - return ffi_arith(L); -} - -LJLIB_CF(ffi_meta___unm) LJLIB_REC(cdata_arith MM_unm) -{ - return ffi_arith(L); -} -/* End of contiguous ORDER MM. */ - -LJLIB_CF(ffi_meta___tostring) -{ - GCcdata *cd = ffi_checkcdata(L, 1); - const char *msg = "cdata<%s>: %p"; - CTypeID id = cd->ctypeid; - void *p = cdataptr(cd); - if (id == CTID_CTYPEID) { - msg = "ctype<%s>"; - id = *(CTypeID *)p; - } else { - CTState *cts = ctype_cts(L); - CType *ct = ctype_raw(cts, id); - if (ctype_isref(ct->info)) { - p = *(void **)p; - ct = ctype_rawchild(cts, ct); - } - if (ctype_iscomplex(ct->info)) { - setstrV(L, L->top-1, lj_ctype_repr_complex(L, cdataptr(cd), ct->size)); - goto checkgc; - } else if (ct->size == 8 && ctype_isinteger(ct->info)) { - setstrV(L, L->top-1, lj_ctype_repr_int64(L, *(uint64_t *)cdataptr(cd), - (ct->info & CTF_UNSIGNED))); - goto checkgc; - } else if (ctype_isfunc(ct->info)) { - p = *(void **)p; - } else if (ctype_isenum(ct->info)) { - msg = "cdata<%s>: %d"; - p = (void *)(uintptr_t)*(uint32_t **)p; - } else { - if (ctype_isptr(ct->info)) { - p = cdata_getptr(p, ct->size); - ct = ctype_rawchild(cts, ct); - } - if (ctype_isstruct(ct->info) || ctype_isvector(ct->info)) { - /* Handle ctype __tostring metamethod. */ - cTValue *tv = lj_ctype_meta(cts, ctype_typeid(cts, ct), MM_tostring); - if (tv) - return lj_meta_tailcall(L, tv); - } - } - } - lj_strfmt_pushf(L, msg, strdata(lj_ctype_repr(L, id, NULL)), p); -checkgc: - lj_gc_check(L); - return 1; -} - -static int ffi_pairs(lua_State *L, MMS mm) -{ - CTState *cts = ctype_cts(L); - CTypeID id = ffi_checkcdata(L, 1)->ctypeid; - CType *ct = ctype_raw(cts, id); - cTValue *tv; - if (ctype_isptr(ct->info)) id = ctype_cid(ct->info); - tv = lj_ctype_meta(cts, id, mm); - if (!tv) - lj_err_callerv(L, LJ_ERR_FFI_BADMM, strdata(lj_ctype_repr(L, id, NULL)), - strdata(mmname_str(G(L), mm))); - return lj_meta_tailcall(L, tv); -} - -LJLIB_CF(ffi_meta___pairs) -{ - return ffi_pairs(L, MM_pairs); -} - -LJLIB_CF(ffi_meta___ipairs) -{ - return ffi_pairs(L, MM_ipairs); -} - -LJLIB_PUSH("ffi") LJLIB_SET(__metatable) - -#include "lj_libdef.h" - -/* -- C library metamethods ----------------------------------------------- */ - -#define LJLIB_MODULE_ffi_clib - -/* Index C library by a name. */ -static TValue *ffi_clib_index(lua_State *L) -{ - TValue *o = L->base; - CLibrary *cl; - if (!(o < L->top && tvisudata(o) && udataV(o)->udtype == UDTYPE_FFI_CLIB)) - lj_err_argt(L, 1, LUA_TUSERDATA); - cl = (CLibrary *)uddata(udataV(o)); - if (!(o+1 < L->top && tvisstr(o+1))) - lj_err_argt(L, 2, LUA_TSTRING); - return lj_clib_index(L, cl, strV(o+1)); -} - -LJLIB_CF(ffi_clib___index) LJLIB_REC(clib_index 1) -{ - TValue *tv = ffi_clib_index(L); - if (tviscdata(tv)) { - CTState *cts = ctype_cts(L); - GCcdata *cd = cdataV(tv); - CType *s = ctype_get(cts, cd->ctypeid); - if (ctype_isextern(s->info)) { - CTypeID sid = ctype_cid(s->info); - void *sp = *(void **)cdataptr(cd); - CType *ct = ctype_raw(cts, sid); - if (lj_cconv_tv_ct(cts, ct, sid, L->top-1, sp)) - lj_gc_check(L); - return 1; - } - } - copyTV(L, L->top-1, tv); - return 1; -} - -LJLIB_CF(ffi_clib___newindex) LJLIB_REC(clib_index 0) -{ - TValue *tv = ffi_clib_index(L); - TValue *o = L->base+2; - if (o < L->top && tviscdata(tv)) { - CTState *cts = ctype_cts(L); - GCcdata *cd = cdataV(tv); - CType *d = ctype_get(cts, cd->ctypeid); - if (ctype_isextern(d->info)) { - CTInfo qual = 0; - for (;;) { /* Skip attributes and collect qualifiers. */ - d = ctype_child(cts, d); - if (!ctype_isattrib(d->info)) break; - if (ctype_attrib(d->info) == CTA_QUAL) qual |= d->size; - } - if (!((d->info|qual) & CTF_CONST)) { - lj_cconv_ct_tv(cts, d, *(void **)cdataptr(cd), o, 0); - return 0; - } - } - } - lj_err_caller(L, LJ_ERR_FFI_WRCONST); - return 0; /* unreachable */ -} - -LJLIB_CF(ffi_clib___gc) -{ - TValue *o = L->base; - if (o < L->top && tvisudata(o) && udataV(o)->udtype == UDTYPE_FFI_CLIB) - lj_clib_unload((CLibrary *)uddata(udataV(o))); - return 0; -} - -#include "lj_libdef.h" - -/* -- Callback function metamethods --------------------------------------- */ - -#define LJLIB_MODULE_ffi_callback - -static int ffi_callback_set(lua_State *L, GCfunc *fn) -{ - GCcdata *cd = ffi_checkcdata(L, 1); - CTState *cts = ctype_cts(L); - CType *ct = ctype_raw(cts, cd->ctypeid); - if (ctype_isptr(ct->info) && (LJ_32 || ct->size == 8)) { - MSize slot = lj_ccallback_ptr2slot(cts, *(void **)cdataptr(cd)); - if (slot < cts->cb.sizeid && cts->cb.cbid[slot] != 0) { - GCtab *t = cts->miscmap; - TValue *tv = lj_tab_setint(L, t, (int32_t)slot); - if (fn) { - setfuncV(L, tv, fn); - lj_gc_anybarriert(L, t); - } else { - setnilV(tv); - cts->cb.cbid[slot] = 0; - cts->cb.topid = slot < cts->cb.topid ? slot : cts->cb.topid; - } - return 0; - } - } - lj_err_caller(L, LJ_ERR_FFI_BADCBACK); - return 0; -} - -LJLIB_CF(ffi_callback_free) -{ - return ffi_callback_set(L, NULL); -} - -LJLIB_CF(ffi_callback_set) -{ - GCfunc *fn = lj_lib_checkfunc(L, 2); - return ffi_callback_set(L, fn); -} - -LJLIB_PUSH(top-1) LJLIB_SET(__index) - -#include "lj_libdef.h" - -/* -- FFI library functions ----------------------------------------------- */ - -#define LJLIB_MODULE_ffi - -LJLIB_CF(ffi_cdef) -{ - GCstr *s = lj_lib_checkstr(L, 1); - CPState cp; - int errcode; - cp.L = L; - cp.cts = ctype_cts(L); - cp.srcname = strdata(s); - cp.p = strdata(s); - cp.param = L->base+1; - cp.mode = CPARSE_MODE_MULTI|CPARSE_MODE_DIRECT; - errcode = lj_cparse(&cp); - if (errcode) lj_err_throw(L, errcode); /* Propagate errors. */ - lj_gc_check(L); - return 0; -} - -LJLIB_CF(ffi_new) LJLIB_REC(.) -{ - CTState *cts = ctype_cts(L); - CTypeID id = ffi_checkctype(L, cts, NULL); - CType *ct = ctype_raw(cts, id); - CTSize sz; - CTInfo info = lj_ctype_info(cts, id, &sz); - TValue *o = L->base+1; - GCcdata *cd; - if ((info & CTF_VLA)) { - o++; - sz = lj_ctype_vlsize(cts, ct, (CTSize)ffi_checkint(L, 2)); - } - if (sz == CTSIZE_INVALID) - lj_err_arg(L, 1, LJ_ERR_FFI_INVSIZE); - if (!(info & CTF_VLA) && ctype_align(info) <= CT_MEMALIGN) - cd = lj_cdata_new(cts, id, sz); - else - cd = lj_cdata_newv(L, id, sz, ctype_align(info)); - setcdataV(L, o-1, cd); /* Anchor the uninitialized cdata. */ - lj_cconv_ct_init(cts, ct, sz, cdataptr(cd), - o, (MSize)(L->top - o)); /* Initialize cdata. */ - if (ctype_isstruct(ct->info)) { - /* Handle ctype __gc metamethod. Use the fast lookup here. */ - cTValue *tv = lj_tab_getinth(cts->miscmap, -(int32_t)id); - if (tv && tvistab(tv) && (tv = lj_meta_fast(L, tabV(tv), MM_gc))) { - GCtab *t = cts->finalizer; - if (gcref(t->metatable)) { - /* Add to finalizer table, if still enabled. */ - copyTV(L, lj_tab_set(L, t, o-1), tv); - lj_gc_anybarriert(L, t); - cd->marked |= LJ_GC_CDATA_FIN; - } - } - } - L->top = o; /* Only return the cdata itself. */ - lj_gc_check(L); - return 1; -} - -LJLIB_CF(ffi_cast) LJLIB_REC(ffi_new) -{ - CTState *cts = ctype_cts(L); - CTypeID id = ffi_checkctype(L, cts, NULL); - CType *d = ctype_raw(cts, id); - TValue *o = lj_lib_checkany(L, 2); - L->top = o+1; /* Make sure this is the last item on the stack. */ - if (!(ctype_isnum(d->info) || ctype_isptr(d->info) || ctype_isenum(d->info))) - lj_err_arg(L, 1, LJ_ERR_FFI_INVTYPE); - if (!(tviscdata(o) && cdataV(o)->ctypeid == id)) { - GCcdata *cd = lj_cdata_new(cts, id, d->size); - lj_cconv_ct_tv(cts, d, cdataptr(cd), o, CCF_CAST); - setcdataV(L, o, cd); - lj_gc_check(L); - } - return 1; -} - -LJLIB_CF(ffi_typeof) LJLIB_REC(.) -{ - CTState *cts = ctype_cts(L); - CTypeID id = ffi_checkctype(L, cts, L->base+1); - GCcdata *cd = lj_cdata_new(cts, CTID_CTYPEID, 4); - *(CTypeID *)cdataptr(cd) = id; - setcdataV(L, L->top-1, cd); - lj_gc_check(L); - return 1; -} - -/* Internal and unsupported API. */ -LJLIB_CF(ffi_typeinfo) -{ - CTState *cts = ctype_cts(L); - CTypeID id = (CTypeID)ffi_checkint(L, 1); - if (id > 0 && id < cts->top) { - CType *ct = ctype_get(cts, id); - GCtab *t; - lua_createtable(L, 0, 4); /* Increment hash size if fields are added. */ - t = tabV(L->top-1); - setintV(lj_tab_setstr(L, t, lj_str_newlit(L, "info")), (int32_t)ct->info); - if (ct->size != CTSIZE_INVALID) - setintV(lj_tab_setstr(L, t, lj_str_newlit(L, "size")), (int32_t)ct->size); - if (ct->sib) - setintV(lj_tab_setstr(L, t, lj_str_newlit(L, "sib")), (int32_t)ct->sib); - if (gcref(ct->name)) { - GCstr *s = gco2str(gcref(ct->name)); - setstrV(L, lj_tab_setstr(L, t, lj_str_newlit(L, "name")), s); - } - lj_gc_check(L); - return 1; - } - return 0; -} - -LJLIB_CF(ffi_istype) LJLIB_REC(.) -{ - CTState *cts = ctype_cts(L); - CTypeID id1 = ffi_checkctype(L, cts, NULL); - TValue *o = lj_lib_checkany(L, 2); - int b = 0; - if (tviscdata(o)) { - GCcdata *cd = cdataV(o); - CTypeID id2 = cd->ctypeid == CTID_CTYPEID ? *(CTypeID *)cdataptr(cd) : - cd->ctypeid; - CType *ct1 = lj_ctype_rawref(cts, id1); - CType *ct2 = lj_ctype_rawref(cts, id2); - if (ct1 == ct2) { - b = 1; - } else if (ctype_type(ct1->info) == ctype_type(ct2->info) && - ct1->size == ct2->size) { - if (ctype_ispointer(ct1->info)) - b = lj_cconv_compatptr(cts, ct1, ct2, CCF_IGNQUAL); - else if (ctype_isnum(ct1->info) || ctype_isvoid(ct1->info)) - b = (((ct1->info ^ ct2->info) & ~(CTF_QUAL|CTF_LONG)) == 0); - } else if (ctype_isstruct(ct1->info) && ctype_isptr(ct2->info) && - ct1 == ctype_rawchild(cts, ct2)) { - b = 1; - } - } - setboolV(L->top-1, b); - setboolV(&G(L)->tmptv2, b); /* Remember for trace recorder. */ - return 1; -} - -LJLIB_CF(ffi_sizeof) LJLIB_REC(ffi_xof FF_ffi_sizeof) -{ - CTState *cts = ctype_cts(L); - CTypeID id = ffi_checkctype(L, cts, NULL); - CTSize sz; - if (LJ_UNLIKELY(tviscdata(L->base) && cdataisv(cdataV(L->base)))) { - sz = cdatavlen(cdataV(L->base)); - } else { - CType *ct = lj_ctype_rawref(cts, id); - if (ctype_isvltype(ct->info)) - sz = lj_ctype_vlsize(cts, ct, (CTSize)ffi_checkint(L, 2)); - else - sz = ctype_hassize(ct->info) ? ct->size : CTSIZE_INVALID; - if (LJ_UNLIKELY(sz == CTSIZE_INVALID)) { - setnilV(L->top-1); - return 1; - } - } - setintV(L->top-1, (int32_t)sz); - return 1; -} - -LJLIB_CF(ffi_alignof) LJLIB_REC(ffi_xof FF_ffi_alignof) -{ - CTState *cts = ctype_cts(L); - CTypeID id = ffi_checkctype(L, cts, NULL); - CTSize sz = 0; - CTInfo info = lj_ctype_info(cts, id, &sz); - setintV(L->top-1, 1 << ctype_align(info)); - return 1; -} - -LJLIB_CF(ffi_offsetof) LJLIB_REC(ffi_xof FF_ffi_offsetof) -{ - CTState *cts = ctype_cts(L); - CTypeID id = ffi_checkctype(L, cts, NULL); - GCstr *name = lj_lib_checkstr(L, 2); - CType *ct = lj_ctype_rawref(cts, id); - CTSize ofs; - if (ctype_isstruct(ct->info) && ct->size != CTSIZE_INVALID) { - CType *fct = lj_ctype_getfield(cts, ct, name, &ofs); - if (fct) { - setintV(L->top-1, ofs); - if (ctype_isfield(fct->info)) { - return 1; - } else if (ctype_isbitfield(fct->info)) { - setintV(L->top++, ctype_bitpos(fct->info)); - setintV(L->top++, ctype_bitbsz(fct->info)); - return 3; - } - } - } - return 0; -} - -LJLIB_CF(ffi_errno) LJLIB_REC(.) -{ - int err = errno; - if (L->top > L->base) - errno = ffi_checkint(L, 1); - setintV(L->top++, err); - return 1; -} - -LJLIB_CF(ffi_string) LJLIB_REC(.) -{ - CTState *cts = ctype_cts(L); - TValue *o = lj_lib_checkany(L, 1); - const char *p; - size_t len; - if (o+1 < L->top && !tvisnil(o+1)) { - len = (size_t)ffi_checkint(L, 2); - lj_cconv_ct_tv(cts, ctype_get(cts, CTID_P_CVOID), (uint8_t *)&p, o, - CCF_ARG(1)); - } else { - lj_cconv_ct_tv(cts, ctype_get(cts, CTID_P_CCHAR), (uint8_t *)&p, o, - CCF_ARG(1)); - len = strlen(p); - } - L->top = o+1; /* Make sure this is the last item on the stack. */ - setstrV(L, o, lj_str_new(L, p, len)); - lj_gc_check(L); - return 1; -} - -LJLIB_CF(ffi_copy) LJLIB_REC(.) -{ - void *dp = ffi_checkptr(L, 1, CTID_P_VOID); - void *sp = ffi_checkptr(L, 2, CTID_P_CVOID); - TValue *o = L->base+1; - CTSize len; - if (tvisstr(o) && o+1 >= L->top) - len = strV(o)->len+1; /* Copy Lua string including trailing '\0'. */ - else - len = (CTSize)ffi_checkint(L, 3); - memcpy(dp, sp, len); - return 0; -} - -LJLIB_CF(ffi_fill) LJLIB_REC(.) -{ - void *dp = ffi_checkptr(L, 1, CTID_P_VOID); - CTSize len = (CTSize)ffi_checkint(L, 2); - int32_t fill = 0; - if (L->base+2 < L->top && !tvisnil(L->base+2)) fill = ffi_checkint(L, 3); - memset(dp, fill, len); - return 0; -} - -#define H_(le, be) LJ_ENDIAN_SELECT(0x##le, 0x##be) - -/* Test ABI string. */ -LJLIB_CF(ffi_abi) LJLIB_REC(.) -{ - GCstr *s = lj_lib_checkstr(L, 1); - int b = 0; - switch (s->hash) { -#if LJ_64 - case H_(849858eb,ad35fd06): b = 1; break; /* 64bit */ -#else - case H_(662d3c79,d0e22477): b = 1; break; /* 32bit */ -#endif -#if LJ_ARCH_HASFPU - case H_(e33ee463,e33ee463): b = 1; break; /* fpu */ -#endif -#if LJ_ABI_SOFTFP - case H_(61211a23,c2e8c81c): b = 1; break; /* softfp */ -#else - case H_(539417a8,8ce0812f): b = 1; break; /* hardfp */ -#endif -#if LJ_ABI_EABI - case H_(2182df8f,f2ed1152): b = 1; break; /* eabi */ -#endif -#if LJ_ABI_WIN - case H_(4ab624a8,4ab624a8): b = 1; break; /* win */ -#endif - case H_(3af93066,1f001464): b = 1; break; /* le/be */ -#if LJ_GC64 - case H_(9e89d2c9,13c83c92): b = 1; break; /* gc64 */ -#endif - default: - break; - } - setboolV(L->top-1, b); - setboolV(&G(L)->tmptv2, b); /* Remember for trace recorder. */ - return 1; -} - -#undef H_ - -LJLIB_PUSH(top-8) LJLIB_SET(!) /* Store reference to miscmap table. */ - -LJLIB_CF(ffi_metatype) -{ - CTState *cts = ctype_cts(L); - CTypeID id = ffi_checkctype(L, cts, NULL); - GCtab *mt = lj_lib_checktab(L, 2); - GCtab *t = cts->miscmap; - CType *ct = ctype_get(cts, id); /* Only allow raw types. */ - TValue *tv; - GCcdata *cd; - if (!(ctype_isstruct(ct->info) || ctype_iscomplex(ct->info) || - ctype_isvector(ct->info))) - lj_err_arg(L, 1, LJ_ERR_FFI_INVTYPE); - tv = lj_tab_setinth(L, t, -(int32_t)id); - if (!tvisnil(tv)) - lj_err_caller(L, LJ_ERR_PROTMT); - settabV(L, tv, mt); - lj_gc_anybarriert(L, t); - cd = lj_cdata_new(cts, CTID_CTYPEID, 4); - *(CTypeID *)cdataptr(cd) = id; - setcdataV(L, L->top-1, cd); - lj_gc_check(L); - return 1; -} - -LJLIB_PUSH(top-7) LJLIB_SET(!) /* Store reference to finalizer table. */ - -LJLIB_CF(ffi_gc) LJLIB_REC(.) -{ - GCcdata *cd = ffi_checkcdata(L, 1); - TValue *fin = lj_lib_checkany(L, 2); - CTState *cts = ctype_cts(L); - CType *ct = ctype_raw(cts, cd->ctypeid); - if (!(ctype_isptr(ct->info) || ctype_isstruct(ct->info) || - ctype_isrefarray(ct->info))) - lj_err_arg(L, 1, LJ_ERR_FFI_INVTYPE); - lj_cdata_setfin(L, cd, gcval(fin), itype(fin)); - L->top = L->base+1; /* Pass through the cdata object. */ - return 1; -} - -LJLIB_PUSH(top-5) LJLIB_SET(!) /* Store clib metatable in func environment. */ - -LJLIB_CF(ffi_load) -{ - GCstr *name = lj_lib_checkstr(L, 1); - int global = (L->base+1 < L->top && tvistruecond(L->base+1)); - lj_clib_load(L, tabref(curr_func(L)->c.env), name, global); - return 1; -} - -LJLIB_PUSH(top-4) LJLIB_SET(C) -LJLIB_PUSH(top-3) LJLIB_SET(os) -LJLIB_PUSH(top-2) LJLIB_SET(arch) - -#include "lj_libdef.h" - -/* ------------------------------------------------------------------------ */ - -/* Create special weak-keyed finalizer table. */ -static GCtab *ffi_finalizer(lua_State *L) -{ - /* NOBARRIER: The table is new (marked white). */ - GCtab *t = lj_tab_new(L, 0, 1); - settabV(L, L->top++, t); - setgcref(t->metatable, obj2gco(t)); - setstrV(L, lj_tab_setstr(L, t, lj_str_newlit(L, "__mode")), - lj_str_newlit(L, "K")); - t->nomm = (uint8_t)(~(1u<top-1); - lj_gc_anybarriert(L, t); - } -} - -LUALIB_API int luaopen_ffi(lua_State *L) -{ - CTState *cts = lj_ctype_init(L); - settabV(L, L->top++, (cts->miscmap = lj_tab_new(L, 0, 1))); - cts->finalizer = ffi_finalizer(L); - LJ_LIB_REG(L, NULL, ffi_meta); - /* NOBARRIER: basemt is a GC root. */ - setgcref(basemt_it(G(L), LJ_TCDATA), obj2gco(tabV(L->top-1))); - LJ_LIB_REG(L, NULL, ffi_clib); - LJ_LIB_REG(L, NULL, ffi_callback); - /* NOBARRIER: the key is new and lj_tab_newkey() handles the barrier. */ - settabV(L, lj_tab_setstr(L, cts->miscmap, &cts->g->strempty), tabV(L->top-1)); - L->top--; - lj_clib_default(L, tabV(L->top-1)); /* Create ffi.C default namespace. */ - lua_pushliteral(L, LJ_OS_NAME); - lua_pushliteral(L, LJ_ARCH_NAME); - LJ_LIB_REG(L, NULL, ffi); /* Note: no global "ffi" created! */ - ffi_register_module(L); - return 1; -} - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_init.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_init.c deleted file mode 100644 index 8e0d4428fd3..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_init.c +++ /dev/null @@ -1,56 +0,0 @@ -/* -** Library initialization. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -** -** Major parts taken verbatim from the Lua interpreter. -** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h -*/ - -#define lib_init_c -#define LUA_LIB - -#include "lua.h" -#include "lauxlib.h" -#include "lualib.h" - -#include "lj_arch.h" - -static const luaL_Reg lj_lib_load[] = { - { "", luaopen_base }, - { LUA_LOADLIBNAME, luaopen_package }, - { LUA_TABLIBNAME, luaopen_table }, - { LUA_IOLIBNAME, luaopen_io }, - { LUA_OSLIBNAME, luaopen_os }, - { LUA_STRLIBNAME, luaopen_string }, - { LUA_MATHLIBNAME, luaopen_math }, - { LUA_DBLIBNAME, luaopen_debug }, - { LUA_BITLIBNAME, luaopen_bit }, - { LUA_JITLIBNAME, luaopen_jit }, - { LUA_BITLIBNAME_32, luaopen_bit32 }, - { NULL, NULL } -}; - -static const luaL_Reg lj_lib_preload[] = { -#if LJ_HASFFI - { LUA_FFILIBNAME, luaopen_ffi }, -#endif - { NULL, NULL } -}; - -LUALIB_API void luaL_openlibs(lua_State *L) -{ - const luaL_Reg *lib; - for (lib = lj_lib_load; lib->func; lib++) { - lua_pushcfunction(L, lib->func); - lua_pushstring(L, lib->name); - lua_call(L, 1, 0); - } - luaL_findtable(L, LUA_REGISTRYINDEX, "_PRELOAD", - sizeof(lj_lib_preload)/sizeof(lj_lib_preload[0])-1); - for (lib = lj_lib_preload; lib->func; lib++) { - lua_pushcfunction(L, lib->func); - lua_setfield(L, -2, lib->name); - } - lua_pop(L, 1); -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_io.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_io.c deleted file mode 100644 index 2aa834746e1..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_io.c +++ /dev/null @@ -1,541 +0,0 @@ -/* -** I/O library. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -** -** Major portions taken verbatim or adapted from the Lua interpreter. -** Copyright (C) 1994-2011 Lua.org, PUC-Rio. See Copyright Notice in lua.h -*/ - -#include -#include - -#define lib_io_c -#define LUA_LIB - -#include "lua.h" -#include "lauxlib.h" -#include "lualib.h" - -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_buf.h" -#include "lj_str.h" -#include "lj_state.h" -#include "lj_strfmt.h" -#include "lj_ff.h" -#include "lj_lib.h" - -/* Userdata payload for I/O file. */ -typedef struct IOFileUD { - FILE *fp; /* File handle. */ - uint32_t type; /* File type. */ -} IOFileUD; - -#define IOFILE_TYPE_FILE 0 /* Regular file. */ -#define IOFILE_TYPE_PIPE 1 /* Pipe. */ -#define IOFILE_TYPE_STDF 2 /* Standard file handle. */ -#define IOFILE_TYPE_MASK 3 - -#define IOFILE_FLAG_CLOSE 4 /* Close after io.lines() iterator. */ - -#define IOSTDF_UD(L, id) (&gcref(G(L)->gcroot[(id)])->ud) -#define IOSTDF_IOF(L, id) ((IOFileUD *)uddata(IOSTDF_UD(L, (id)))) - -/* -- Open/close helpers -------------------------------------------------- */ - -static IOFileUD *io_tofilep(lua_State *L) -{ - if (!(L->base < L->top && tvisudata(L->base) && - udataV(L->base)->udtype == UDTYPE_IO_FILE)) - lj_err_argtype(L, 1, "FILE*"); - return (IOFileUD *)uddata(udataV(L->base)); -} - -static IOFileUD *io_tofile(lua_State *L) -{ - IOFileUD *iof = io_tofilep(L); - if (iof->fp == NULL) - lj_err_caller(L, LJ_ERR_IOCLFL); - return iof; -} - -static FILE *io_stdfile(lua_State *L, ptrdiff_t id) -{ - IOFileUD *iof = IOSTDF_IOF(L, id); - if (iof->fp == NULL) - lj_err_caller(L, LJ_ERR_IOSTDCL); - return iof->fp; -} - -static IOFileUD *io_file_new(lua_State *L) -{ - IOFileUD *iof = (IOFileUD *)lua_newuserdata(L, sizeof(IOFileUD)); - GCudata *ud = udataV(L->top-1); - ud->udtype = UDTYPE_IO_FILE; - /* NOBARRIER: The GCudata is new (marked white). */ - setgcrefr(ud->metatable, curr_func(L)->c.env); - iof->fp = NULL; - iof->type = IOFILE_TYPE_FILE; - return iof; -} - -static IOFileUD *io_file_open(lua_State *L, const char *mode) -{ - const char *fname = strdata(lj_lib_checkstr(L, 1)); - IOFileUD *iof = io_file_new(L); - iof->fp = fopen(fname, mode); - if (iof->fp == NULL) - luaL_argerror(L, 1, lj_strfmt_pushf(L, "%s: %s", fname, strerror(errno))); - return iof; -} - -static int io_file_close(lua_State *L, IOFileUD *iof) -{ - int ok; - if ((iof->type & IOFILE_TYPE_MASK) == IOFILE_TYPE_FILE) { - ok = (fclose(iof->fp) == 0); - } else if ((iof->type & IOFILE_TYPE_MASK) == IOFILE_TYPE_PIPE) { - int stat = -1; -#if LJ_TARGET_POSIX - stat = pclose(iof->fp); -#elif LJ_TARGET_WINDOWS && !LJ_TARGET_XBOXONE - stat = _pclose(iof->fp); -#else - lua_assert(0); - return 0; -#endif -#if LJ_52 - iof->fp = NULL; - return luaL_execresult(L, stat); -#else - ok = (stat != -1); -#endif - } else { - lua_assert((iof->type & IOFILE_TYPE_MASK) == IOFILE_TYPE_STDF); - setnilV(L->top++); - lua_pushliteral(L, "cannot close standard file"); - return 2; - } - iof->fp = NULL; - return luaL_fileresult(L, ok, NULL); -} - -/* -- Read/write helpers -------------------------------------------------- */ - -static int io_file_readnum(lua_State *L, FILE *fp) -{ - lua_Number d; - if (fscanf(fp, LUA_NUMBER_SCAN, &d) == 1) { - if (LJ_DUALNUM) { - int32_t i = lj_num2int(d); - if (d == (lua_Number)i && !tvismzero((cTValue *)&d)) { - setintV(L->top++, i); - return 1; - } - } - setnumV(L->top++, d); - return 1; - } else { - setnilV(L->top++); - return 0; - } -} - -static int io_file_readline(lua_State *L, FILE *fp, MSize chop) -{ - MSize m = LUAL_BUFFERSIZE, n = 0, ok = 0; - char *buf; - for (;;) { - buf = lj_buf_tmp(L, m); - if (fgets(buf+n, m-n, fp) == NULL) break; - n += (MSize)strlen(buf+n); - ok |= n; - if (n && buf[n-1] == '\n') { n -= chop; break; } - if (n >= m - 64) m += m; - } - setstrV(L, L->top++, lj_str_new(L, buf, (size_t)n)); - lj_gc_check(L); - return (int)ok; -} - -static void io_file_readall(lua_State *L, FILE *fp) -{ - MSize m, n; - for (m = LUAL_BUFFERSIZE, n = 0; ; m += m) { - char *buf = lj_buf_tmp(L, m); - n += (MSize)fread(buf+n, 1, m-n, fp); - if (n != m) { - setstrV(L, L->top++, lj_str_new(L, buf, (size_t)n)); - lj_gc_check(L); - return; - } - } -} - -static int io_file_readlen(lua_State *L, FILE *fp, MSize m) -{ - if (m) { - char *buf = lj_buf_tmp(L, m); - MSize n = (MSize)fread(buf, 1, m, fp); - setstrV(L, L->top++, lj_str_new(L, buf, (size_t)n)); - lj_gc_check(L); - return (n > 0 || m == 0); - } else { - int c = getc(fp); - ungetc(c, fp); - setstrV(L, L->top++, &G(L)->strempty); - return (c != EOF); - } -} - -static int io_file_read(lua_State *L, FILE *fp, int start) -{ - int ok, n, nargs = (int)(L->top - L->base) - start; - clearerr(fp); - if (nargs == 0) { - ok = io_file_readline(L, fp, 1); - n = start+1; /* Return 1 result. */ - } else { - /* The results plus the buffers go on top of the args. */ - luaL_checkstack(L, nargs+LUA_MINSTACK, "too many arguments"); - ok = 1; - for (n = start; nargs-- && ok; n++) { - if (tvisstr(L->base+n)) { - const char *p = strVdata(L->base+n); - if (p[0] != '*') - lj_err_arg(L, n+1, LJ_ERR_INVOPT); - if (p[1] == 'n') - ok = io_file_readnum(L, fp); - else if ((p[1] & ~0x20) == 'L') - ok = io_file_readline(L, fp, (p[1] == 'l')); - else if (p[1] == 'a') - io_file_readall(L, fp); - else - lj_err_arg(L, n+1, LJ_ERR_INVFMT); - } else if (tvisnumber(L->base+n)) { - ok = io_file_readlen(L, fp, (MSize)lj_lib_checkint(L, n+1)); - } else { - lj_err_arg(L, n+1, LJ_ERR_INVOPT); - } - } - } - if (ferror(fp)) - return luaL_fileresult(L, 0, NULL); - if (!ok) - setnilV(L->top-1); /* Replace last result with nil. */ - return n - start; -} - -static int io_file_write(lua_State *L, FILE *fp, int start) -{ - cTValue *tv; - int status = 1; - for (tv = L->base+start; tv < L->top; tv++) { - char buf[STRFMT_MAXBUF_NUM]; - MSize len; - const char *p = lj_strfmt_wstrnum(buf, tv, &len); - if (!p) - lj_err_argt(L, (int)(tv - L->base) + 1, LUA_TSTRING); - status = status && (fwrite(p, 1, len, fp) == len); - } - if (LJ_52 && status) { - L->top = L->base+1; - if (start == 0) - setudataV(L, L->base, IOSTDF_UD(L, GCROOT_IO_OUTPUT)); - return 1; - } - return luaL_fileresult(L, status, NULL); -} - -static int io_file_iter(lua_State *L) -{ - GCfunc *fn = curr_func(L); - IOFileUD *iof = uddata(udataV(&fn->c.upvalue[0])); - int n = fn->c.nupvalues - 1; - if (iof->fp == NULL) - lj_err_caller(L, LJ_ERR_IOCLFL); - L->top = L->base; - if (n) { /* Copy upvalues with options to stack. */ - if (n > LUAI_MAXCSTACK) - lj_err_caller(L, LJ_ERR_STKOV); - lj_state_checkstack(L, (MSize)n); - memcpy(L->top, &fn->c.upvalue[1], n*sizeof(TValue)); - L->top += n; - } - n = io_file_read(L, iof->fp, 0); - if (ferror(iof->fp)) - lj_err_callermsg(L, strVdata(L->top-2)); - if (tvisnil(L->base) && (iof->type & IOFILE_FLAG_CLOSE)) { - io_file_close(L, iof); /* Return values are ignored. */ - return 0; - } - return n; -} - -static int io_file_lines(lua_State *L) -{ - int n = (int)(L->top - L->base); - if (n > LJ_MAX_UPVAL) - lj_err_caller(L, LJ_ERR_UNPACK); - lua_pushcclosure(L, io_file_iter, n); - return 1; -} - -/* -- I/O file methods ---------------------------------------------------- */ - -#define LJLIB_MODULE_io_method - -LJLIB_CF(io_method_close) -{ - IOFileUD *iof = L->base < L->top ? io_tofile(L) : - IOSTDF_IOF(L, GCROOT_IO_OUTPUT); - return io_file_close(L, iof); -} - -LJLIB_CF(io_method_read) -{ - return io_file_read(L, io_tofile(L)->fp, 1); -} - -LJLIB_CF(io_method_write) LJLIB_REC(io_write 0) -{ - return io_file_write(L, io_tofile(L)->fp, 1); -} - -LJLIB_CF(io_method_flush) LJLIB_REC(io_flush 0) -{ - return luaL_fileresult(L, fflush(io_tofile(L)->fp) == 0, NULL); -} - -LJLIB_CF(io_method_seek) -{ - FILE *fp = io_tofile(L)->fp; - int opt = lj_lib_checkopt(L, 2, 1, "\3set\3cur\3end"); - int64_t ofs = 0; - cTValue *o; - int res; - if (opt == 0) opt = SEEK_SET; - else if (opt == 1) opt = SEEK_CUR; - else if (opt == 2) opt = SEEK_END; - o = L->base+2; - if (o < L->top) { - if (tvisint(o)) - ofs = (int64_t)intV(o); - else if (tvisnum(o)) - ofs = (int64_t)numV(o); - else if (!tvisnil(o)) - lj_err_argt(L, 3, LUA_TNUMBER); - } -#if LJ_TARGET_POSIX - res = fseeko(fp, ofs, opt); -#elif _MSC_VER >= 1400 - res = _fseeki64(fp, ofs, opt); -#elif defined(__MINGW32__) - res = fseeko64(fp, ofs, opt); -#else - res = fseek(fp, (long)ofs, opt); -#endif - if (res) - return luaL_fileresult(L, 0, NULL); -#if LJ_TARGET_POSIX - ofs = ftello(fp); -#elif _MSC_VER >= 1400 - ofs = _ftelli64(fp); -#elif defined(__MINGW32__) - ofs = ftello64(fp); -#else - ofs = (int64_t)ftell(fp); -#endif - setint64V(L->top-1, ofs); - return 1; -} - -LJLIB_CF(io_method_setvbuf) -{ - FILE *fp = io_tofile(L)->fp; - int opt = lj_lib_checkopt(L, 2, -1, "\4full\4line\2no"); - size_t sz = (size_t)lj_lib_optint(L, 3, LUAL_BUFFERSIZE); - if (opt == 0) opt = _IOFBF; - else if (opt == 1) opt = _IOLBF; - else if (opt == 2) opt = _IONBF; - return luaL_fileresult(L, setvbuf(fp, NULL, opt, sz) == 0, NULL); -} - -LJLIB_CF(io_method_lines) -{ - io_tofile(L); - return io_file_lines(L); -} - -LJLIB_CF(io_method___gc) -{ - IOFileUD *iof = io_tofilep(L); - if (iof->fp != NULL && (iof->type & IOFILE_TYPE_MASK) != IOFILE_TYPE_STDF) - io_file_close(L, iof); - return 0; -} - -LJLIB_CF(io_method___tostring) -{ - IOFileUD *iof = io_tofilep(L); - if (iof->fp != NULL) - lua_pushfstring(L, "file (%p)", iof->fp); - else - lua_pushliteral(L, "file (closed)"); - return 1; -} - -LJLIB_PUSH(top-1) LJLIB_SET(__index) - -#include "lj_libdef.h" - -/* -- I/O library functions ----------------------------------------------- */ - -#define LJLIB_MODULE_io - -LJLIB_PUSH(top-2) LJLIB_SET(!) /* Set environment. */ - -LJLIB_CF(io_open) -{ - const char *fname = strdata(lj_lib_checkstr(L, 1)); - GCstr *s = lj_lib_optstr(L, 2); - const char *mode = s ? strdata(s) : "r"; - IOFileUD *iof = io_file_new(L); - iof->fp = fopen(fname, mode); - return iof->fp != NULL ? 1 : luaL_fileresult(L, 0, fname); -} - -LJLIB_CF(io_popen) -{ -#if LJ_TARGET_POSIX || (LJ_TARGET_WINDOWS && !LJ_TARGET_XBOXONE) - const char *fname = strdata(lj_lib_checkstr(L, 1)); - GCstr *s = lj_lib_optstr(L, 2); - const char *mode = s ? strdata(s) : "r"; - IOFileUD *iof = io_file_new(L); - iof->type = IOFILE_TYPE_PIPE; -#if LJ_TARGET_POSIX - fflush(NULL); - iof->fp = popen(fname, mode); -#else - iof->fp = _popen(fname, mode); -#endif - return iof->fp != NULL ? 1 : luaL_fileresult(L, 0, fname); -#else - return luaL_error(L, LUA_QL("popen") " not supported"); -#endif -} - -LJLIB_CF(io_tmpfile) -{ - IOFileUD *iof = io_file_new(L); -#if LJ_TARGET_PS3 || LJ_TARGET_PS4 || LJ_TARGET_PSVITA - iof->fp = NULL; errno = ENOSYS; -#else - iof->fp = tmpfile(); -#endif - return iof->fp != NULL ? 1 : luaL_fileresult(L, 0, NULL); -} - -LJLIB_CF(io_close) -{ - return lj_cf_io_method_close(L); -} - -LJLIB_CF(io_read) -{ - return io_file_read(L, io_stdfile(L, GCROOT_IO_INPUT), 0); -} - -LJLIB_CF(io_write) LJLIB_REC(io_write GCROOT_IO_OUTPUT) -{ - return io_file_write(L, io_stdfile(L, GCROOT_IO_OUTPUT), 0); -} - -LJLIB_CF(io_flush) LJLIB_REC(io_flush GCROOT_IO_OUTPUT) -{ - return luaL_fileresult(L, fflush(io_stdfile(L, GCROOT_IO_OUTPUT)) == 0, NULL); -} - -static int io_std_getset(lua_State *L, ptrdiff_t id, const char *mode) -{ - if (L->base < L->top && !tvisnil(L->base)) { - if (tvisudata(L->base)) { - io_tofile(L); - L->top = L->base+1; - } else { - io_file_open(L, mode); - } - /* NOBARRIER: The standard I/O handles are GC roots. */ - setgcref(G(L)->gcroot[id], gcV(L->top-1)); - } else { - setudataV(L, L->top++, IOSTDF_UD(L, id)); - } - return 1; -} - -LJLIB_CF(io_input) -{ - return io_std_getset(L, GCROOT_IO_INPUT, "r"); -} - -LJLIB_CF(io_output) -{ - return io_std_getset(L, GCROOT_IO_OUTPUT, "w"); -} - -LJLIB_CF(io_lines) -{ - if (L->base == L->top) setnilV(L->top++); - if (!tvisnil(L->base)) { /* io.lines(fname) */ - IOFileUD *iof = io_file_open(L, "r"); - iof->type = IOFILE_TYPE_FILE|IOFILE_FLAG_CLOSE; - L->top--; - setudataV(L, L->base, udataV(L->top)); - } else { /* io.lines() iterates over stdin. */ - setudataV(L, L->base, IOSTDF_UD(L, GCROOT_IO_INPUT)); - } - return io_file_lines(L); -} - -LJLIB_CF(io_type) -{ - cTValue *o = lj_lib_checkany(L, 1); - if (!(tvisudata(o) && udataV(o)->udtype == UDTYPE_IO_FILE)) - setnilV(L->top++); - else if (((IOFileUD *)uddata(udataV(o)))->fp != NULL) - lua_pushliteral(L, "file"); - else - lua_pushliteral(L, "closed file"); - return 1; -} - -#include "lj_libdef.h" - -/* ------------------------------------------------------------------------ */ - -static GCobj *io_std_new(lua_State *L, FILE *fp, const char *name) -{ - IOFileUD *iof = (IOFileUD *)lua_newuserdata(L, sizeof(IOFileUD)); - GCudata *ud = udataV(L->top-1); - ud->udtype = UDTYPE_IO_FILE; - /* NOBARRIER: The GCudata is new (marked white). */ - setgcref(ud->metatable, gcV(L->top-3)); - iof->fp = fp; - iof->type = IOFILE_TYPE_STDF; - lua_setfield(L, -2, name); - return obj2gco(ud); -} - -LUALIB_API int luaopen_io(lua_State *L) -{ - LJ_LIB_REG(L, NULL, io_method); - copyTV(L, L->top, L->top-1); L->top++; - lua_setfield(L, LUA_REGISTRYINDEX, LUA_FILEHANDLE); - LJ_LIB_REG(L, LUA_IOLIBNAME, io); - setgcref(G(L)->gcroot[GCROOT_IO_INPUT], io_std_new(L, stdin, "stdin")); - setgcref(G(L)->gcroot[GCROOT_IO_OUTPUT], io_std_new(L, stdout, "stdout")); - io_std_new(L, stderr, "stderr"); - return 1; -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_jit.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_jit.c deleted file mode 100644 index 178ef249df3..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_jit.c +++ /dev/null @@ -1,767 +0,0 @@ -/* -** JIT library. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lib_jit_c -#define LUA_LIB - -#include "lua.h" -#include "lauxlib.h" -#include "lualib.h" - -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_debug.h" -#include "lj_str.h" -#include "lj_tab.h" -#include "lj_state.h" -#include "lj_bc.h" -#if LJ_HASFFI -#include "lj_ctype.h" -#endif -#if LJ_HASJIT -#include "lj_ir.h" -#include "lj_jit.h" -#include "lj_ircall.h" -#include "lj_iropt.h" -#include "lj_target.h" -#endif -#include "lj_trace.h" -#include "lj_dispatch.h" -#include "lj_vm.h" -#include "lj_vmevent.h" -#include "lj_lib.h" - -#include "luajit.h" - -/* -- jit.* functions ----------------------------------------------------- */ - -#define LJLIB_MODULE_jit - -static int setjitmode(lua_State *L, int mode) -{ - int idx = 0; - if (L->base == L->top || tvisnil(L->base)) { /* jit.on/off/flush([nil]) */ - mode |= LUAJIT_MODE_ENGINE; - } else { - /* jit.on/off/flush(func|proto, nil|true|false) */ - if (tvisfunc(L->base) || tvisproto(L->base)) - idx = 1; - else if (!tvistrue(L->base)) /* jit.on/off/flush(true, nil|true|false) */ - goto err; - if (L->base+1 < L->top && tvisbool(L->base+1)) - mode |= boolV(L->base+1) ? LUAJIT_MODE_ALLFUNC : LUAJIT_MODE_ALLSUBFUNC; - else - mode |= LUAJIT_MODE_FUNC; - } - if (luaJIT_setmode(L, idx, mode) != 1) { - if ((mode & LUAJIT_MODE_MASK) == LUAJIT_MODE_ENGINE) - lj_err_caller(L, LJ_ERR_NOJIT); - err: - lj_err_argt(L, 1, LUA_TFUNCTION); - } - return 0; -} - -LJLIB_CF(jit_on) -{ - return setjitmode(L, LUAJIT_MODE_ON); -} - -LJLIB_CF(jit_off) -{ - return setjitmode(L, LUAJIT_MODE_OFF); -} - -LJLIB_CF(jit_flush) -{ -#if LJ_HASJIT - if (L->base < L->top && tvisnumber(L->base)) { - int traceno = lj_lib_checkint(L, 1); - luaJIT_setmode(L, traceno, LUAJIT_MODE_FLUSH|LUAJIT_MODE_TRACE); - return 0; - } -#endif - return setjitmode(L, LUAJIT_MODE_FLUSH); -} - -#if LJ_HASJIT -/* Push a string for every flag bit that is set. */ -static void flagbits_to_strings(lua_State *L, uint32_t flags, uint32_t base, - const char *str) -{ - for (; *str; base <<= 1, str += 1+*str) - if (flags & base) - setstrV(L, L->top++, lj_str_new(L, str+1, *(uint8_t *)str)); -} -#endif - -LJLIB_CF(jit_status) -{ -#if LJ_HASJIT - jit_State *J = L2J(L); - L->top = L->base; - setboolV(L->top++, (J->flags & JIT_F_ON) ? 1 : 0); - flagbits_to_strings(L, J->flags, JIT_F_CPU_FIRST, JIT_F_CPUSTRING); - flagbits_to_strings(L, J->flags, JIT_F_OPT_FIRST, JIT_F_OPTSTRING); - return (int)(L->top - L->base); -#else - setboolV(L->top++, 0); - return 1; -#endif -} - -LJLIB_CF(jit_attach) -{ -#ifdef LUAJIT_DISABLE_VMEVENT - luaL_error(L, "vmevent API disabled"); -#else - GCfunc *fn = lj_lib_checkfunc(L, 1); - GCstr *s = lj_lib_optstr(L, 2); - luaL_findtable(L, LUA_REGISTRYINDEX, LJ_VMEVENTS_REGKEY, LJ_VMEVENTS_HSIZE); - if (s) { /* Attach to given event. */ - const uint8_t *p = (const uint8_t *)strdata(s); - uint32_t h = s->len; - while (*p) h = h ^ (lj_rol(h, 6) + *p++); - lua_pushvalue(L, 1); - lua_rawseti(L, -2, VMEVENT_HASHIDX(h)); - G(L)->vmevmask = VMEVENT_NOCACHE; /* Invalidate cache. */ - } else { /* Detach if no event given. */ - setnilV(L->top++); - while (lua_next(L, -2)) { - L->top--; - if (tvisfunc(L->top) && funcV(L->top) == fn) { - setnilV(lj_tab_set(L, tabV(L->top-2), L->top-1)); - } - } - } -#endif - return 0; -} - -LJLIB_PUSH(top-5) LJLIB_SET(os) -LJLIB_PUSH(top-4) LJLIB_SET(arch) -LJLIB_PUSH(top-3) LJLIB_SET(version_num) -LJLIB_PUSH(top-2) LJLIB_SET(version) - -#include "lj_libdef.h" - -/* -- jit.util.* functions ------------------------------------------------ */ - -#define LJLIB_MODULE_jit_util - -/* -- Reflection API for Lua functions ------------------------------------ */ - -/* Return prototype of first argument (Lua function or prototype object) */ -static GCproto *check_Lproto(lua_State *L, int nolua) -{ - TValue *o = L->base; - if (L->top > o) { - if (tvisproto(o)) { - return protoV(o); - } else if (tvisfunc(o)) { - if (isluafunc(funcV(o))) - return funcproto(funcV(o)); - else if (nolua) - return NULL; - } - } - lj_err_argt(L, 1, LUA_TFUNCTION); - return NULL; /* unreachable */ -} - -static void setintfield(lua_State *L, GCtab *t, const char *name, int32_t val) -{ - setintV(lj_tab_setstr(L, t, lj_str_newz(L, name)), val); -} - -/* local info = jit.util.funcinfo(func [,pc]) */ -LJLIB_CF(jit_util_funcinfo) -{ - GCproto *pt = check_Lproto(L, 1); - if (pt) { - BCPos pc = (BCPos)lj_lib_optint(L, 2, 0); - GCtab *t; - lua_createtable(L, 0, 16); /* Increment hash size if fields are added. */ - t = tabV(L->top-1); - setintfield(L, t, "linedefined", pt->firstline); - setintfield(L, t, "lastlinedefined", pt->firstline + pt->numline); - setintfield(L, t, "stackslots", pt->framesize); - setintfield(L, t, "params", pt->numparams); - setintfield(L, t, "bytecodes", (int32_t)pt->sizebc); - setintfield(L, t, "gcconsts", (int32_t)pt->sizekgc); - setintfield(L, t, "nconsts", (int32_t)pt->sizekn); - setintfield(L, t, "upvalues", (int32_t)pt->sizeuv); - if (pc < pt->sizebc) - setintfield(L, t, "currentline", lj_debug_line(pt, pc)); - lua_pushboolean(L, (pt->flags & PROTO_VARARG)); - lua_setfield(L, -2, "isvararg"); - lua_pushboolean(L, (pt->flags & PROTO_CHILD)); - lua_setfield(L, -2, "children"); - setstrV(L, L->top++, proto_chunkname(pt)); - lua_setfield(L, -2, "source"); - lj_debug_pushloc(L, pt, pc); - lua_setfield(L, -2, "loc"); - } else { - GCfunc *fn = funcV(L->base); - GCtab *t; - lua_createtable(L, 0, 4); /* Increment hash size if fields are added. */ - t = tabV(L->top-1); - if (!iscfunc(fn)) - setintfield(L, t, "ffid", fn->c.ffid); - setintptrV(lj_tab_setstr(L, t, lj_str_newlit(L, "addr")), - (intptr_t)(void *)fn->c.f); - setintfield(L, t, "upvalues", fn->c.nupvalues); - } - return 1; -} - -/* local ins, m = jit.util.funcbc(func, pc) */ -LJLIB_CF(jit_util_funcbc) -{ - GCproto *pt = check_Lproto(L, 0); - BCPos pc = (BCPos)lj_lib_checkint(L, 2); - if (pc < pt->sizebc) { - BCIns ins = proto_bc(pt)[pc]; - BCOp op = bc_op(ins); - lua_assert(op < BC__MAX); - setintV(L->top, ins); - setintV(L->top+1, lj_bc_mode[op]); - L->top += 2; - return 2; - } - return 0; -} - -/* local k = jit.util.funck(func, idx) */ -LJLIB_CF(jit_util_funck) -{ - GCproto *pt = check_Lproto(L, 0); - ptrdiff_t idx = (ptrdiff_t)lj_lib_checkint(L, 2); - if (idx >= 0) { - if (idx < (ptrdiff_t)pt->sizekn) { - copyTV(L, L->top-1, proto_knumtv(pt, idx)); - return 1; - } - } else { - if (~idx < (ptrdiff_t)pt->sizekgc) { - GCobj *gc = proto_kgc(pt, idx); - setgcV(L, L->top-1, gc, ~gc->gch.gct); - return 1; - } - } - return 0; -} - -/* local name = jit.util.funcuvname(func, idx) */ -LJLIB_CF(jit_util_funcuvname) -{ - GCproto *pt = check_Lproto(L, 0); - uint32_t idx = (uint32_t)lj_lib_checkint(L, 2); - if (idx < pt->sizeuv) { - setstrV(L, L->top-1, lj_str_newz(L, lj_debug_uvname(pt, idx))); - return 1; - } - return 0; -} - -/* -- Reflection API for traces ------------------------------------------- */ - -#if LJ_HASJIT - -/* Check trace argument. Must not throw for non-existent trace numbers. */ -static GCtrace *jit_checktrace(lua_State *L) -{ - TraceNo tr = (TraceNo)lj_lib_checkint(L, 1); - jit_State *J = L2J(L); - if (tr > 0 && tr < J->sizetrace) - return traceref(J, tr); - return NULL; -} - -/* Names of link types. ORDER LJ_TRLINK */ -static const char *const jit_trlinkname[] = { - "none", "root", "loop", "tail-recursion", "up-recursion", "down-recursion", - "interpreter", "return", "stitch" -}; - -/* local info = jit.util.traceinfo(tr) */ -LJLIB_CF(jit_util_traceinfo) -{ - GCtrace *T = jit_checktrace(L); - if (T) { - GCtab *t; - lua_createtable(L, 0, 8); /* Increment hash size if fields are added. */ - t = tabV(L->top-1); - setintfield(L, t, "nins", (int32_t)T->nins - REF_BIAS - 1); - setintfield(L, t, "nk", REF_BIAS - (int32_t)T->nk); - setintfield(L, t, "link", T->link); - setintfield(L, t, "nexit", T->nsnap); - setstrV(L, L->top++, lj_str_newz(L, jit_trlinkname[T->linktype])); - lua_setfield(L, -2, "linktype"); - /* There are many more fields. Add them only when needed. */ - return 1; - } - return 0; -} - -/* local m, ot, op1, op2, prev = jit.util.traceir(tr, idx) */ -LJLIB_CF(jit_util_traceir) -{ - GCtrace *T = jit_checktrace(L); - IRRef ref = (IRRef)lj_lib_checkint(L, 2) + REF_BIAS; - if (T && ref >= REF_BIAS && ref < T->nins) { - IRIns *ir = &T->ir[ref]; - int32_t m = lj_ir_mode[ir->o]; - setintV(L->top-2, m); - setintV(L->top-1, ir->ot); - setintV(L->top++, (int32_t)ir->op1 - (irm_op1(m)==IRMref ? REF_BIAS : 0)); - setintV(L->top++, (int32_t)ir->op2 - (irm_op2(m)==IRMref ? REF_BIAS : 0)); - setintV(L->top++, ir->prev); - return 5; - } - return 0; -} - -/* local k, t [, slot] = jit.util.tracek(tr, idx) */ -LJLIB_CF(jit_util_tracek) -{ - GCtrace *T = jit_checktrace(L); - IRRef ref = (IRRef)lj_lib_checkint(L, 2) + REF_BIAS; - if (T && ref >= T->nk && ref < REF_BIAS) { - IRIns *ir = &T->ir[ref]; - int32_t slot = -1; - if (ir->o == IR_KSLOT) { - slot = ir->op2; - ir = &T->ir[ir->op1]; - } -#if LJ_HASFFI - if (ir->o == IR_KINT64 && !ctype_ctsG(G(L))) { - ptrdiff_t oldtop = savestack(L, L->top); - luaopen_ffi(L); /* Load FFI library on-demand. */ - L->top = restorestack(L, oldtop); - } -#endif - lj_ir_kvalue(L, L->top-2, ir); - setintV(L->top-1, (int32_t)irt_type(ir->t)); - if (slot == -1) - return 2; - setintV(L->top++, slot); - return 3; - } - return 0; -} - -/* local snap = jit.util.tracesnap(tr, sn) */ -LJLIB_CF(jit_util_tracesnap) -{ - GCtrace *T = jit_checktrace(L); - SnapNo sn = (SnapNo)lj_lib_checkint(L, 2); - if (T && sn < T->nsnap) { - SnapShot *snap = &T->snap[sn]; - SnapEntry *map = &T->snapmap[snap->mapofs]; - MSize n, nent = snap->nent; - GCtab *t; - lua_createtable(L, nent+2, 0); - t = tabV(L->top-1); - setintV(lj_tab_setint(L, t, 0), (int32_t)snap->ref - REF_BIAS); - setintV(lj_tab_setint(L, t, 1), (int32_t)snap->nslots); - for (n = 0; n < nent; n++) - setintV(lj_tab_setint(L, t, (int32_t)(n+2)), (int32_t)map[n]); - setintV(lj_tab_setint(L, t, (int32_t)(nent+2)), (int32_t)SNAP(255, 0, 0)); - return 1; - } - return 0; -} - -/* local mcode, addr, loop = jit.util.tracemc(tr) */ -LJLIB_CF(jit_util_tracemc) -{ - GCtrace *T = jit_checktrace(L); - if (T && T->mcode != NULL) { - setstrV(L, L->top-1, lj_str_new(L, (const char *)T->mcode, T->szmcode)); - setintptrV(L->top++, (intptr_t)(void *)T->mcode); - setintV(L->top++, T->mcloop); - return 3; - } - return 0; -} - -/* local addr = jit.util.traceexitstub([tr,] exitno) */ -LJLIB_CF(jit_util_traceexitstub) -{ -#ifdef EXITSTUBS_PER_GROUP - ExitNo exitno = (ExitNo)lj_lib_checkint(L, 1); - jit_State *J = L2J(L); - if (exitno < EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR) { - setintptrV(L->top-1, (intptr_t)(void *)exitstub_addr(J, exitno)); - return 1; - } -#else - if (L->top > L->base+1) { /* Don't throw for one-argument variant. */ - GCtrace *T = jit_checktrace(L); - ExitNo exitno = (ExitNo)lj_lib_checkint(L, 2); - ExitNo maxexit = T->root ? T->nsnap+1 : T->nsnap; - if (T && T->mcode != NULL && exitno < maxexit) { - setintptrV(L->top-1, (intptr_t)(void *)exitstub_trace_addr(T, exitno)); - return 1; - } - } -#endif - return 0; -} - -/* local addr = jit.util.ircalladdr(idx) */ -LJLIB_CF(jit_util_ircalladdr) -{ - uint32_t idx = (uint32_t)lj_lib_checkint(L, 1); - if (idx < IRCALL__MAX) { - setintptrV(L->top-1, (intptr_t)(void *)lj_ir_callinfo[idx].func); - return 1; - } - return 0; -} - -#endif - -#include "lj_libdef.h" - -static int luaopen_jit_util(lua_State *L) -{ - LJ_LIB_REG(L, NULL, jit_util); - return 1; -} - -/* -- jit.opt module ------------------------------------------------------ */ - -#if LJ_HASJIT - -#define LJLIB_MODULE_jit_opt - -/* Parse optimization level. */ -static int jitopt_level(jit_State *J, const char *str) -{ - if (str[0] >= '0' && str[0] <= '9' && str[1] == '\0') { - uint32_t flags; - if (str[0] == '0') flags = JIT_F_OPT_0; - else if (str[0] == '1') flags = JIT_F_OPT_1; - else if (str[0] == '2') flags = JIT_F_OPT_2; - else flags = JIT_F_OPT_3; - J->flags = (J->flags & ~JIT_F_OPT_MASK) | flags; - return 1; /* Ok. */ - } - return 0; /* No match. */ -} - -/* Parse optimization flag. */ -static int jitopt_flag(jit_State *J, const char *str) -{ - const char *lst = JIT_F_OPTSTRING; - uint32_t opt; - int set = 1; - if (str[0] == '+') { - str++; - } else if (str[0] == '-') { - str++; - set = 0; - } else if (str[0] == 'n' && str[1] == 'o') { - str += str[2] == '-' ? 3 : 2; - set = 0; - } - for (opt = JIT_F_OPT_FIRST; ; opt <<= 1) { - size_t len = *(const uint8_t *)lst; - if (len == 0) - break; - if (strncmp(str, lst+1, len) == 0 && str[len] == '\0') { - if (set) J->flags |= opt; else J->flags &= ~opt; - return 1; /* Ok. */ - } - lst += 1+len; - } - return 0; /* No match. */ -} - -/* Parse optimization parameter. */ -static int jitopt_param(jit_State *J, const char *str) -{ - const char *lst = JIT_P_STRING; - int i; - for (i = 0; i < JIT_P__MAX; i++) { - size_t len = *(const uint8_t *)lst; - lua_assert(len != 0); - if (strncmp(str, lst+1, len) == 0 && str[len] == '=') { - int32_t n = 0; - const char *p = &str[len+1]; - while (*p >= '0' && *p <= '9') - n = n*10 + (*p++ - '0'); - if (*p) return 0; /* Malformed number. */ - J->param[i] = n; - if (i == JIT_P_hotloop) - lj_dispatch_init_hotcount(J2G(J)); - return 1; /* Ok. */ - } - lst += 1+len; - } - return 0; /* No match. */ -} - -/* jit.opt.start(flags...) */ -LJLIB_CF(jit_opt_start) -{ - jit_State *J = L2J(L); - int nargs = (int)(L->top - L->base); - if (nargs == 0) { - J->flags = (J->flags & ~JIT_F_OPT_MASK) | JIT_F_OPT_DEFAULT; - } else { - int i; - for (i = 1; i <= nargs; i++) { - const char *str = strdata(lj_lib_checkstr(L, i)); - if (!jitopt_level(J, str) && - !jitopt_flag(J, str) && - !jitopt_param(J, str)) - lj_err_callerv(L, LJ_ERR_JITOPT, str); - } - } - return 0; -} - -#include "lj_libdef.h" - -#endif - -/* -- jit.profile module -------------------------------------------------- */ - -#if LJ_HASPROFILE - -#define LJLIB_MODULE_jit_profile - -/* Not loaded by default, use: local profile = require("jit.profile") */ - -static const char KEY_PROFILE_THREAD = 't'; -static const char KEY_PROFILE_FUNC = 'f'; - -static void jit_profile_callback(lua_State *L2, lua_State *L, int samples, - int vmstate) -{ - TValue key; - cTValue *tv; - setlightudV(&key, (void *)&KEY_PROFILE_FUNC); - tv = lj_tab_get(L, tabV(registry(L)), &key); - if (tvisfunc(tv)) { - char vmst = (char)vmstate; - int status; - setfuncV(L2, L2->top++, funcV(tv)); - setthreadV(L2, L2->top++, L); - setintV(L2->top++, samples); - setstrV(L2, L2->top++, lj_str_new(L2, &vmst, 1)); - status = lua_pcall(L2, 3, 0, 0); /* callback(thread, samples, vmstate) */ - if (status) { - if (G(L2)->panic) G(L2)->panic(L2); - exit(EXIT_FAILURE); - } - lj_trace_abort(G(L2)); - } -} - -/* profile.start(mode, cb) */ -LJLIB_CF(jit_profile_start) -{ - GCtab *registry = tabV(registry(L)); - GCstr *mode = lj_lib_optstr(L, 1); - GCfunc *func = lj_lib_checkfunc(L, 2); - lua_State *L2 = lua_newthread(L); /* Thread that runs profiler callback. */ - TValue key; - /* Anchor thread and function in registry. */ - setlightudV(&key, (void *)&KEY_PROFILE_THREAD); - setthreadV(L, lj_tab_set(L, registry, &key), L2); - setlightudV(&key, (void *)&KEY_PROFILE_FUNC); - setfuncV(L, lj_tab_set(L, registry, &key), func); - lj_gc_anybarriert(L, registry); - luaJIT_profile_start(L, mode ? strdata(mode) : "", - (luaJIT_profile_callback)jit_profile_callback, L2); - return 0; -} - -/* profile.stop() */ -LJLIB_CF(jit_profile_stop) -{ - GCtab *registry; - TValue key; - luaJIT_profile_stop(L); - registry = tabV(registry(L)); - setlightudV(&key, (void *)&KEY_PROFILE_THREAD); - setnilV(lj_tab_set(L, registry, &key)); - setlightudV(&key, (void *)&KEY_PROFILE_FUNC); - setnilV(lj_tab_set(L, registry, &key)); - lj_gc_anybarriert(L, registry); - return 0; -} - -/* dump = profile.dumpstack([thread,] fmt, depth) */ -LJLIB_CF(jit_profile_dumpstack) -{ - lua_State *L2 = L; - int arg = 0; - size_t len; - int depth; - GCstr *fmt; - const char *p; - if (L->top > L->base && tvisthread(L->base)) { - L2 = threadV(L->base); - arg = 1; - } - fmt = lj_lib_checkstr(L, arg+1); - depth = lj_lib_checkint(L, arg+2); - p = luaJIT_profile_dumpstack(L2, strdata(fmt), depth, &len); - lua_pushlstring(L, p, len); - return 1; -} - -#include "lj_libdef.h" - -static int luaopen_jit_profile(lua_State *L) -{ - LJ_LIB_REG(L, NULL, jit_profile); - return 1; -} - -#endif - -/* -- JIT compiler initialization ----------------------------------------- */ - -#if LJ_HASJIT -/* Default values for JIT parameters. */ -static const int32_t jit_param_default[JIT_P__MAX+1] = { -#define JIT_PARAMINIT(len, name, value) (value), -JIT_PARAMDEF(JIT_PARAMINIT) -#undef JIT_PARAMINIT - 0 -}; -#endif - -#if LJ_TARGET_ARM && LJ_TARGET_LINUX -#include -#endif - -/* Arch-dependent CPU detection. */ -static uint32_t jit_cpudetect(lua_State *L) -{ - uint32_t flags = 0; -#if LJ_TARGET_X86ORX64 - uint32_t vendor[4]; - uint32_t features[4]; - if (lj_vm_cpuid(0, vendor) && lj_vm_cpuid(1, features)) { -#if !LJ_HASJIT -#define JIT_F_SSE2 2 -#endif - flags |= ((features[3] >> 26)&1) * JIT_F_SSE2; -#if LJ_HASJIT - flags |= ((features[2] >> 0)&1) * JIT_F_SSE3; - flags |= ((features[2] >> 19)&1) * JIT_F_SSE4_1; - if (vendor[2] == 0x6c65746e) { /* Intel. */ - if ((features[0] & 0x0fff0ff0) == 0x000106c0) /* Atom. */ - flags |= JIT_F_LEA_AGU; - } else if (vendor[2] == 0x444d4163) { /* AMD. */ - uint32_t fam = (features[0] & 0x0ff00f00); - if (fam >= 0x00000f00) /* K8, K10. */ - flags |= JIT_F_PREFER_IMUL; - } -#endif - } - /* Check for required instruction set support on x86 (unnecessary on x64). */ -#if LJ_TARGET_X86 - if (!(flags & JIT_F_SSE2)) - luaL_error(L, "CPU with SSE2 required"); -#endif -#elif LJ_TARGET_ARM -#if LJ_HASJIT - int ver = LJ_ARCH_VERSION; /* Compile-time ARM CPU detection. */ -#if LJ_TARGET_LINUX - if (ver < 70) { /* Runtime ARM CPU detection. */ - struct utsname ut; - uname(&ut); - if (strncmp(ut.machine, "armv", 4) == 0) { - if (ut.machine[4] >= '7') - ver = 70; - else if (ut.machine[4] == '6') - ver = 60; - } - } -#endif - flags |= ver >= 70 ? JIT_F_ARMV7 : - ver >= 61 ? JIT_F_ARMV6T2_ : - ver >= 60 ? JIT_F_ARMV6_ : 0; - flags |= LJ_ARCH_HASFPU == 0 ? 0 : ver >= 70 ? JIT_F_VFPV3 : JIT_F_VFPV2; -#endif -#elif LJ_TARGET_ARM64 - /* No optional CPU features to detect (for now). */ -#elif LJ_TARGET_PPC -#if LJ_HASJIT -#if LJ_ARCH_SQRT - flags |= JIT_F_SQRT; -#endif -#if LJ_ARCH_ROUND - flags |= JIT_F_ROUND; -#endif -#endif -#elif LJ_TARGET_MIPS -#if LJ_HASJIT - /* Compile-time MIPS CPU detection. */ -#if LJ_ARCH_VERSION >= 20 - flags |= JIT_F_MIPS32R2; -#endif - /* Runtime MIPS CPU detection. */ -#if defined(__GNUC__) - if (!(flags & JIT_F_MIPS32R2)) { - int x; - /* On MIPS32R1 rotr is treated as srl. rotr r2,r2,1 -> srl r2,r2,1. */ - __asm__("li $2, 1\n\t.long 0x00221042\n\tmove %0, $2" : "=r"(x) : : "$2"); - if (x) flags |= JIT_F_MIPS32R2; /* Either 0x80000000 (R2) or 0 (R1). */ - } -#endif -#endif -#else -#error "Missing CPU detection for this architecture" -#endif - UNUSED(L); - return flags; -} - -/* Initialize JIT compiler. */ -static void jit_init(lua_State *L) -{ - uint32_t flags = jit_cpudetect(L); -#if LJ_HASJIT - jit_State *J = L2J(L); - J->flags = flags | JIT_F_ON | JIT_F_OPT_DEFAULT; - memcpy(J->param, jit_param_default, sizeof(J->param)); - lj_dispatch_update(G(L)); -#else - UNUSED(flags); -#endif -} - -LUALIB_API int luaopen_jit(lua_State *L) -{ - jit_init(L); - lua_pushliteral(L, LJ_OS_NAME); - lua_pushliteral(L, LJ_ARCH_NAME); - lua_pushinteger(L, LUAJIT_VERSION_NUM); - lua_pushliteral(L, LUAJIT_VERSION); - LJ_LIB_REG(L, LUA_JITLIBNAME, jit); -#if LJ_HASPROFILE - lj_lib_prereg(L, LUA_JITLIBNAME ".profile", luaopen_jit_profile, - tabref(L->env)); -#endif -#ifndef LUAJIT_DISABLE_JITUTIL - lj_lib_prereg(L, LUA_JITLIBNAME ".util", luaopen_jit_util, tabref(L->env)); -#endif -#if LJ_HASJIT - LJ_LIB_REG(L, "jit.opt", jit_opt); -#endif - L->top -= 2; - return 1; -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_math.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_math.c deleted file mode 100644 index 78838fcd6c0..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_math.c +++ /dev/null @@ -1,230 +0,0 @@ -/* -** Math library. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#include - -#define lib_math_c -#define LUA_LIB - -#include "lua.h" -#include "lauxlib.h" -#include "lualib.h" - -#include "lj_obj.h" -#include "lj_lib.h" -#include "lj_vm.h" - -/* ------------------------------------------------------------------------ */ - -#define LJLIB_MODULE_math - -LJLIB_ASM(math_abs) LJLIB_REC(.) -{ - lj_lib_checknumber(L, 1); - return FFH_RETRY; -} -LJLIB_ASM_(math_floor) LJLIB_REC(math_round IRFPM_FLOOR) -LJLIB_ASM_(math_ceil) LJLIB_REC(math_round IRFPM_CEIL) - -LJLIB_ASM(math_sqrt) LJLIB_REC(math_unary IRFPM_SQRT) -{ - lj_lib_checknum(L, 1); - return FFH_RETRY; -} -LJLIB_ASM_(math_log10) LJLIB_REC(math_unary IRFPM_LOG10) -LJLIB_ASM_(math_exp) LJLIB_REC(math_unary IRFPM_EXP) -LJLIB_ASM_(math_sin) LJLIB_REC(math_unary IRFPM_SIN) -LJLIB_ASM_(math_cos) LJLIB_REC(math_unary IRFPM_COS) -LJLIB_ASM_(math_tan) LJLIB_REC(math_unary IRFPM_TAN) -LJLIB_ASM_(math_asin) LJLIB_REC(math_atrig FF_math_asin) -LJLIB_ASM_(math_acos) LJLIB_REC(math_atrig FF_math_acos) -LJLIB_ASM_(math_atan) LJLIB_REC(math_atrig FF_math_atan) -LJLIB_ASM_(math_sinh) LJLIB_REC(math_htrig IRCALL_sinh) -LJLIB_ASM_(math_cosh) LJLIB_REC(math_htrig IRCALL_cosh) -LJLIB_ASM_(math_tanh) LJLIB_REC(math_htrig IRCALL_tanh) -LJLIB_ASM_(math_frexp) -LJLIB_ASM_(math_modf) LJLIB_REC(.) - -LJLIB_ASM(math_log) LJLIB_REC(math_log) -{ - double x = lj_lib_checknum(L, 1); - if (L->base+1 < L->top) { - double y = lj_lib_checknum(L, 2); -#ifdef LUAJIT_NO_LOG2 - x = log(x); y = 1.0 / log(y); -#else - x = lj_vm_log2(x); y = 1.0 / lj_vm_log2(y); -#endif - setnumV(L->base-1-LJ_FR2, x*y); /* Do NOT join the expression to x / y. */ - return FFH_RES(1); - } - return FFH_RETRY; -} - -LJLIB_LUA(math_deg) /* function(x) return x * 57.29577951308232 end */ -LJLIB_LUA(math_rad) /* function(x) return x * 0.017453292519943295 end */ - -LJLIB_ASM(math_atan2) LJLIB_REC(.) -{ - lj_lib_checknum(L, 1); - lj_lib_checknum(L, 2); - return FFH_RETRY; -} -LJLIB_ASM_(math_pow) LJLIB_REC(.) -LJLIB_ASM_(math_fmod) - -LJLIB_ASM(math_ldexp) LJLIB_REC(.) -{ - lj_lib_checknum(L, 1); -#if LJ_DUALNUM && !LJ_TARGET_X86ORX64 - lj_lib_checkint(L, 2); -#else - lj_lib_checknum(L, 2); -#endif - return FFH_RETRY; -} - -LJLIB_ASM(math_min) LJLIB_REC(math_minmax IR_MIN) -{ - int i = 0; - do { lj_lib_checknumber(L, ++i); } while (L->base+i < L->top); - return FFH_RETRY; -} -LJLIB_ASM_(math_max) LJLIB_REC(math_minmax IR_MAX) - -LJLIB_PUSH(3.14159265358979323846) LJLIB_SET(pi) -LJLIB_PUSH(1e310) LJLIB_SET(huge) - -/* ------------------------------------------------------------------------ */ - -/* This implements a Tausworthe PRNG with period 2^223. Based on: -** Tables of maximally-equidistributed combined LFSR generators, -** Pierre L'Ecuyer, 1991, table 3, 1st entry. -** Full-period ME-CF generator with L=64, J=4, k=223, N1=49. -*/ - -/* PRNG state. */ -struct RandomState { - uint64_t gen[4]; /* State of the 4 LFSR generators. */ - int valid; /* State is valid. */ -}; - -/* Union needed for bit-pattern conversion between uint64_t and double. */ -typedef union { uint64_t u64; double d; } U64double; - -/* Update generator i and compute a running xor of all states. */ -#define TW223_GEN(i, k, q, s) \ - z = rs->gen[i]; \ - z = (((z<> (k-s)) ^ ((z&((uint64_t)(int64_t)-1 << (64-k)))<gen[i] = z; - -/* PRNG step function. Returns a double in the range 1.0 <= d < 2.0. */ -LJ_NOINLINE uint64_t LJ_FASTCALL lj_math_random_step(RandomState *rs) -{ - uint64_t z, r = 0; - TW223_GEN(0, 63, 31, 18) - TW223_GEN(1, 58, 19, 28) - TW223_GEN(2, 55, 24, 7) - TW223_GEN(3, 47, 21, 8) - return (r & U64x(000fffff,ffffffff)) | U64x(3ff00000,00000000); -} - -/* PRNG initialization function. */ -static void random_init(RandomState *rs, double d) -{ - uint32_t r = 0x11090601; /* 64-k[i] as four 8 bit constants. */ - int i; - for (i = 0; i < 4; i++) { - U64double u; - uint32_t m = 1u << (r&255); - r >>= 8; - u.d = d = d * 3.14159265358979323846 + 2.7182818284590452354; - if (u.u64 < m) u.u64 += m; /* Ensure k[i] MSB of gen[i] are non-zero. */ - rs->gen[i] = u.u64; - } - rs->valid = 1; - for (i = 0; i < 10; i++) - lj_math_random_step(rs); -} - -/* PRNG extract function. */ -LJLIB_PUSH(top-2) /* Upvalue holds userdata with RandomState. */ -LJLIB_CF(math_random) LJLIB_REC(.) -{ - int n = (int)(L->top - L->base); - RandomState *rs = (RandomState *)(uddata(udataV(lj_lib_upvalue(L, 1)))); - U64double u; - double d; - if (LJ_UNLIKELY(!rs->valid)) random_init(rs, 0.0); - u.u64 = lj_math_random_step(rs); - d = u.d - 1.0; - if (n > 0) { -#if LJ_DUALNUM - int isint = 1; - double r1; - lj_lib_checknumber(L, 1); - if (tvisint(L->base)) { - r1 = (lua_Number)intV(L->base); - } else { - isint = 0; - r1 = numV(L->base); - } -#else - double r1 = lj_lib_checknum(L, 1); -#endif - if (n == 1) { - d = lj_vm_floor(d*r1) + 1.0; /* d is an int in range [1, r1] */ - } else { -#if LJ_DUALNUM - double r2; - lj_lib_checknumber(L, 2); - if (tvisint(L->base+1)) { - r2 = (lua_Number)intV(L->base+1); - } else { - isint = 0; - r2 = numV(L->base+1); - } -#else - double r2 = lj_lib_checknum(L, 2); -#endif - d = lj_vm_floor(d*(r2-r1+1.0)) + r1; /* d is an int in range [r1, r2] */ - } -#if LJ_DUALNUM - if (isint) { - setintV(L->top-1, lj_num2int(d)); - return 1; - } -#endif - } /* else: d is a double in range [0, 1] */ - setnumV(L->top++, d); - return 1; -} - -/* PRNG seed function. */ -LJLIB_PUSH(top-2) /* Upvalue holds userdata with RandomState. */ -LJLIB_CF(math_randomseed) -{ - RandomState *rs = (RandomState *)(uddata(udataV(lj_lib_upvalue(L, 1)))); - random_init(rs, lj_lib_checknum(L, 1)); - return 0; -} - -/* ------------------------------------------------------------------------ */ - -#include "lj_libdef.h" - -LUALIB_API int luaopen_math(lua_State *L) -{ - RandomState *rs; - rs = (RandomState *)lua_newuserdata(L, sizeof(RandomState)); - rs->valid = 0; /* Use lazy initialization to save some time on startup. */ - LJ_LIB_REG(L, LUA_MATHLIBNAME, math); -#if defined(LUA_COMPAT_MOD) && !LJ_52 - lua_getfield(L, -1, "fmod"); - lua_setfield(L, -2, "mod"); -#endif - return 1; -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_os.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_os.c deleted file mode 100644 index 7b5873a518a..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_os.c +++ /dev/null @@ -1,292 +0,0 @@ -/* -** OS library. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -** -** Major portions taken verbatim or adapted from the Lua interpreter. -** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h -*/ - -#include -#include - -#define lib_os_c -#define LUA_LIB - -#include "lua.h" -#include "lauxlib.h" -#include "lualib.h" - -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_buf.h" -#include "lj_str.h" -#include "lj_lib.h" - -#if LJ_TARGET_POSIX -#include -#else -#include -#endif - -#if !LJ_TARGET_PSVITA -#include -#endif - -/* ------------------------------------------------------------------------ */ - -#define LJLIB_MODULE_os - -LJLIB_CF(os_execute) -{ -#if LJ_TARGET_CONSOLE -#if LJ_52 - errno = ENOSYS; - return luaL_fileresult(L, 0, NULL); -#else - lua_pushinteger(L, -1); - return 1; -#endif -#else - const char *cmd = luaL_optstring(L, 1, NULL); - int stat = system(cmd); -#if LJ_52 - if (cmd) - return luaL_execresult(L, stat); - setboolV(L->top++, 1); -#else - setintV(L->top++, stat); -#endif - return 1; -#endif -} - -LJLIB_CF(os_remove) -{ - const char *filename = luaL_checkstring(L, 1); - return luaL_fileresult(L, remove(filename) == 0, filename); -} - -LJLIB_CF(os_rename) -{ - const char *fromname = luaL_checkstring(L, 1); - const char *toname = luaL_checkstring(L, 2); - return luaL_fileresult(L, rename(fromname, toname) == 0, fromname); -} - -LJLIB_CF(os_tmpname) -{ -#if LJ_TARGET_PS3 || LJ_TARGET_PS4 || LJ_TARGET_PSVITA - lj_err_caller(L, LJ_ERR_OSUNIQF); - return 0; -#else -#if LJ_TARGET_POSIX - char buf[15+1]; - int fp; - strcpy(buf, "/tmp/lua_XXXXXX"); - fp = mkstemp(buf); - if (fp != -1) - close(fp); - else - lj_err_caller(L, LJ_ERR_OSUNIQF); -#else - char buf[L_tmpnam]; - if (tmpnam(buf) == NULL) - lj_err_caller(L, LJ_ERR_OSUNIQF); -#endif - lua_pushstring(L, buf); - return 1; -#endif -} - -LJLIB_CF(os_getenv) -{ -#if LJ_TARGET_CONSOLE - lua_pushnil(L); -#else - lua_pushstring(L, getenv(luaL_checkstring(L, 1))); /* if NULL push nil */ -#endif - return 1; -} - -LJLIB_CF(os_exit) -{ - int status; - if (L->base < L->top && tvisbool(L->base)) - status = boolV(L->base) ? EXIT_SUCCESS : EXIT_FAILURE; - else - status = lj_lib_optint(L, 1, EXIT_SUCCESS); - if (L->base+1 < L->top && tvistruecond(L->base+1)) - lua_close(L); - exit(status); - return 0; /* Unreachable. */ -} - -LJLIB_CF(os_clock) -{ - setnumV(L->top++, ((lua_Number)clock())*(1.0/(lua_Number)CLOCKS_PER_SEC)); - return 1; -} - -/* ------------------------------------------------------------------------ */ - -static void setfield(lua_State *L, const char *key, int value) -{ - lua_pushinteger(L, value); - lua_setfield(L, -2, key); -} - -static void setboolfield(lua_State *L, const char *key, int value) -{ - if (value < 0) /* undefined? */ - return; /* does not set field */ - lua_pushboolean(L, value); - lua_setfield(L, -2, key); -} - -static int getboolfield(lua_State *L, const char *key) -{ - int res; - lua_getfield(L, -1, key); - res = lua_isnil(L, -1) ? -1 : lua_toboolean(L, -1); - lua_pop(L, 1); - return res; -} - -static int getfield(lua_State *L, const char *key, int d) -{ - int res; - lua_getfield(L, -1, key); - if (lua_isnumber(L, -1)) { - res = (int)lua_tointeger(L, -1); - } else { - if (d < 0) - lj_err_callerv(L, LJ_ERR_OSDATEF, key); - res = d; - } - lua_pop(L, 1); - return res; -} - -LJLIB_CF(os_date) -{ - const char *s = luaL_optstring(L, 1, "%c"); - time_t t = luaL_opt(L, (time_t)luaL_checknumber, 2, time(NULL)); - struct tm *stm; -#if LJ_TARGET_POSIX - struct tm rtm; -#endif - if (*s == '!') { /* UTC? */ - s++; /* Skip '!' */ -#if LJ_TARGET_POSIX - stm = gmtime_r(&t, &rtm); -#else - stm = gmtime(&t); -#endif - } else { -#if LJ_TARGET_POSIX - stm = localtime_r(&t, &rtm); -#else - stm = localtime(&t); -#endif - } - if (stm == NULL) { /* Invalid date? */ - setnilV(L->top++); - } else if (strcmp(s, "*t") == 0) { - lua_createtable(L, 0, 9); /* 9 = number of fields */ - setfield(L, "sec", stm->tm_sec); - setfield(L, "min", stm->tm_min); - setfield(L, "hour", stm->tm_hour); - setfield(L, "day", stm->tm_mday); - setfield(L, "month", stm->tm_mon+1); - setfield(L, "year", stm->tm_year+1900); - setfield(L, "wday", stm->tm_wday+1); - setfield(L, "yday", stm->tm_yday+1); - setboolfield(L, "isdst", stm->tm_isdst); - } else if (*s) { - SBuf *sb = &G(L)->tmpbuf; - MSize sz = 0; - const char *q; - for (q = s; *q; q++) - sz += (*q == '%') ? 30 : 1; /* Overflow doesn't matter. */ - setsbufL(sb, L); - for (;;) { - char *buf = lj_buf_need(sb, sz); - size_t len = strftime(buf, sbufsz(sb), s, stm); - if (len) { - setstrV(L, L->top++, lj_str_new(L, buf, len)); - lj_gc_check(L); - break; - } - sz += (sz|1); - } - } else { - setstrV(L, L->top++, &G(L)->strempty); - } - return 1; -} - -LJLIB_CF(os_time) -{ - time_t t; - if (lua_isnoneornil(L, 1)) { /* called without args? */ - t = time(NULL); /* get current time */ - } else { - struct tm ts; - luaL_checktype(L, 1, LUA_TTABLE); - lua_settop(L, 1); /* make sure table is at the top */ - ts.tm_sec = getfield(L, "sec", 0); - ts.tm_min = getfield(L, "min", 0); - ts.tm_hour = getfield(L, "hour", 12); - ts.tm_mday = getfield(L, "day", -1); - ts.tm_mon = getfield(L, "month", -1) - 1; - ts.tm_year = getfield(L, "year", -1) - 1900; - ts.tm_isdst = getboolfield(L, "isdst"); - t = mktime(&ts); - } - if (t == (time_t)(-1)) - lua_pushnil(L); - else - lua_pushnumber(L, (lua_Number)t); - return 1; -} - -LJLIB_CF(os_difftime) -{ - lua_pushnumber(L, difftime((time_t)(luaL_checknumber(L, 1)), - (time_t)(luaL_optnumber(L, 2, (lua_Number)0)))); - return 1; -} - -/* ------------------------------------------------------------------------ */ - -LJLIB_CF(os_setlocale) -{ -#if LJ_TARGET_PSVITA - lua_pushliteral(L, "C"); -#else - GCstr *s = lj_lib_optstr(L, 1); - const char *str = s ? strdata(s) : NULL; - int opt = lj_lib_checkopt(L, 2, 6, - "\5ctype\7numeric\4time\7collate\10monetary\1\377\3all"); - if (opt == 0) opt = LC_CTYPE; - else if (opt == 1) opt = LC_NUMERIC; - else if (opt == 2) opt = LC_TIME; - else if (opt == 3) opt = LC_COLLATE; - else if (opt == 4) opt = LC_MONETARY; - else if (opt == 6) opt = LC_ALL; - lua_pushstring(L, setlocale(opt, str)); -#endif - return 1; -} - -/* ------------------------------------------------------------------------ */ - -#include "lj_libdef.h" - -LUALIB_API int luaopen_os(lua_State *L) -{ - LJ_LIB_REG(L, LUA_OSLIBNAME, os); - return 1; -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_package.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_package.c deleted file mode 100644 index f8d69bfea82..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_package.c +++ /dev/null @@ -1,648 +0,0 @@ -/* -** Package library. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -** -** Major portions taken verbatim or adapted from the Lua interpreter. -** Copyright (C) 1994-2012 Lua.org, PUC-Rio. See Copyright Notice in lua.h -*/ - -#define lib_package_c -#define LUA_LIB - -#include "lua.h" -#include "lauxlib.h" -#include "lualib.h" - -#include "lj_obj.h" -#include "lj_err.h" -#include "lj_lib.h" - -/* ------------------------------------------------------------------------ */ - -/* Error codes for ll_loadfunc. */ -#define PACKAGE_ERR_LIB 1 -#define PACKAGE_ERR_FUNC 2 -#define PACKAGE_ERR_LOAD 3 - -/* Redefined in platform specific part. */ -#define PACKAGE_LIB_FAIL "open" -#define setprogdir(L) ((void)0) - -/* Symbol name prefixes. */ -#define SYMPREFIX_CF "luaopen_%s" -#define SYMPREFIX_BC "luaJIT_BC_%s" - -#if LJ_TARGET_DLOPEN - -#include - -static void ll_unloadlib(void *lib) -{ - dlclose(lib); -} - -static void *ll_load(lua_State *L, const char *path, int gl) -{ - void *lib = dlopen(path, RTLD_NOW | (gl ? RTLD_GLOBAL : RTLD_LOCAL)); - if (lib == NULL) lua_pushstring(L, dlerror()); - return lib; -} - -static lua_CFunction ll_sym(lua_State *L, void *lib, const char *sym) -{ - lua_CFunction f = (lua_CFunction)dlsym(lib, sym); - if (f == NULL) lua_pushstring(L, dlerror()); - return f; -} - -static const char *ll_bcsym(void *lib, const char *sym) -{ -#if defined(RTLD_DEFAULT) - if (lib == NULL) lib = RTLD_DEFAULT; -#elif LJ_TARGET_OSX || LJ_TARGET_BSD - if (lib == NULL) lib = (void *)(intptr_t)-2; -#endif - return (const char *)dlsym(lib, sym); -} - -#elif LJ_TARGET_WINDOWS - -#define WIN32_LEAN_AND_MEAN -#include - -#ifndef GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS -#define GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS 4 -#define GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT 2 -BOOL WINAPI GetModuleHandleExA(DWORD, LPCSTR, HMODULE*); -#endif - -#undef setprogdir - -static void setprogdir(lua_State *L) -{ - char buff[MAX_PATH + 1]; - char *lb; - DWORD nsize = sizeof(buff); - DWORD n = GetModuleFileNameA(NULL, buff, nsize); - if (n == 0 || n == nsize || (lb = strrchr(buff, '\\')) == NULL) { - luaL_error(L, "unable to get ModuleFileName"); - } else { - *lb = '\0'; - luaL_gsub(L, lua_tostring(L, -1), LUA_EXECDIR, buff); - lua_remove(L, -2); /* remove original string */ - } -} - -static void pusherror(lua_State *L) -{ - DWORD error = GetLastError(); -#if LJ_TARGET_XBOXONE - wchar_t wbuffer[128]; - char buffer[128*2]; - if (FormatMessageW(FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_FROM_SYSTEM, - NULL, error, 0, wbuffer, sizeof(wbuffer)/sizeof(wchar_t), NULL) && - WideCharToMultiByte(CP_ACP, 0, wbuffer, 128, buffer, 128*2, NULL, NULL)) -#else - char buffer[128]; - if (FormatMessageA(FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_FROM_SYSTEM, - NULL, error, 0, buffer, sizeof(buffer), NULL)) -#endif - lua_pushstring(L, buffer); - else - lua_pushfstring(L, "system error %d\n", error); -} - -static void ll_unloadlib(void *lib) -{ - FreeLibrary((HINSTANCE)lib); -} - -static void *ll_load(lua_State *L, const char *path, int gl) -{ - HINSTANCE lib = LoadLibraryExA(path, NULL, 0); - if (lib == NULL) pusherror(L); - UNUSED(gl); - return lib; -} - -static lua_CFunction ll_sym(lua_State *L, void *lib, const char *sym) -{ - lua_CFunction f = (lua_CFunction)GetProcAddress((HINSTANCE)lib, sym); - if (f == NULL) pusherror(L); - return f; -} - -static const char *ll_bcsym(void *lib, const char *sym) -{ - if (lib) { - return (const char *)GetProcAddress((HINSTANCE)lib, sym); - } else { - HINSTANCE h = GetModuleHandleA(NULL); - const char *p = (const char *)GetProcAddress(h, sym); - if (p == NULL && GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS|GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, - (const char *)ll_bcsym, &h)) - p = (const char *)GetProcAddress(h, sym); - return p; - } -} - -#else - -#undef PACKAGE_LIB_FAIL -#define PACKAGE_LIB_FAIL "absent" - -#define DLMSG "dynamic libraries not enabled; no support for target OS" - -static void ll_unloadlib(void *lib) -{ - UNUSED(lib); -} - -static void *ll_load(lua_State *L, const char *path, int gl) -{ - UNUSED(path); UNUSED(gl); - lua_pushliteral(L, DLMSG); - return NULL; -} - -static lua_CFunction ll_sym(lua_State *L, void *lib, const char *sym) -{ - UNUSED(lib); UNUSED(sym); - lua_pushliteral(L, DLMSG); - return NULL; -} - -static const char *ll_bcsym(void *lib, const char *sym) -{ - UNUSED(lib); UNUSED(sym); - return NULL; -} - -#endif - -/* ------------------------------------------------------------------------ */ - -static void **ll_register(lua_State *L, const char *path) -{ - void **plib; - lua_pushfstring(L, "LOADLIB: %s", path); - lua_gettable(L, LUA_REGISTRYINDEX); /* check library in registry? */ - if (!lua_isnil(L, -1)) { /* is there an entry? */ - plib = (void **)lua_touserdata(L, -1); - } else { /* no entry yet; create one */ - lua_pop(L, 1); - plib = (void **)lua_newuserdata(L, sizeof(void *)); - *plib = NULL; - luaL_getmetatable(L, "_LOADLIB"); - lua_setmetatable(L, -2); - lua_pushfstring(L, "LOADLIB: %s", path); - lua_pushvalue(L, -2); - lua_settable(L, LUA_REGISTRYINDEX); - } - return plib; -} - -static const char *mksymname(lua_State *L, const char *modname, - const char *prefix) -{ - const char *funcname; - const char *mark = strchr(modname, *LUA_IGMARK); - if (mark) modname = mark + 1; - funcname = luaL_gsub(L, modname, ".", "_"); - funcname = lua_pushfstring(L, prefix, funcname); - lua_remove(L, -2); /* remove 'gsub' result */ - return funcname; -} - -static int ll_loadfunc(lua_State *L, const char *path, const char *name, int r) -{ - void **reg = ll_register(L, path); - if (*reg == NULL) *reg = ll_load(L, path, (*name == '*')); - if (*reg == NULL) { - return PACKAGE_ERR_LIB; /* Unable to load library. */ - } else if (*name == '*') { /* Only load library into global namespace. */ - lua_pushboolean(L, 1); - return 0; - } else { - const char *sym = r ? name : mksymname(L, name, SYMPREFIX_CF); - lua_CFunction f = ll_sym(L, *reg, sym); - if (f) { - lua_pushcfunction(L, f); - return 0; - } - if (!r) { - const char *bcdata = ll_bcsym(*reg, mksymname(L, name, SYMPREFIX_BC)); - lua_pop(L, 1); - if (bcdata) { - if (luaL_loadbuffer(L, bcdata, LJ_MAX_BUF, name) != 0) - return PACKAGE_ERR_LOAD; - return 0; - } - } - return PACKAGE_ERR_FUNC; /* Unable to find function. */ - } -} - -static int lj_cf_package_loadlib(lua_State *L) -{ - const char *path = luaL_checkstring(L, 1); - const char *init = luaL_checkstring(L, 2); - int st = ll_loadfunc(L, path, init, 1); - if (st == 0) { /* no errors? */ - return 1; /* return the loaded function */ - } else { /* error; error message is on stack top */ - lua_pushnil(L); - lua_insert(L, -2); - lua_pushstring(L, (st == PACKAGE_ERR_LIB) ? PACKAGE_LIB_FAIL : "init"); - return 3; /* return nil, error message, and where */ - } -} - -static int lj_cf_package_unloadlib(lua_State *L) -{ - void **lib = (void **)luaL_checkudata(L, 1, "_LOADLIB"); - if (*lib) ll_unloadlib(*lib); - *lib = NULL; /* mark library as closed */ - return 0; -} - -/* ------------------------------------------------------------------------ */ - -static int readable(const char *filename) -{ - FILE *f = fopen(filename, "r"); /* try to open file */ - if (f == NULL) return 0; /* open failed */ - fclose(f); - return 1; -} - -static const char *pushnexttemplate(lua_State *L, const char *path) -{ - const char *l; - while (*path == *LUA_PATHSEP) path++; /* skip separators */ - if (*path == '\0') return NULL; /* no more templates */ - l = strchr(path, *LUA_PATHSEP); /* find next separator */ - if (l == NULL) l = path + strlen(path); - lua_pushlstring(L, path, (size_t)(l - path)); /* template */ - return l; -} - -static const char *searchpath (lua_State *L, const char *name, - const char *path, const char *sep, - const char *dirsep) -{ - luaL_Buffer msg; /* to build error message */ - luaL_buffinit(L, &msg); - if (*sep != '\0') /* non-empty separator? */ - name = luaL_gsub(L, name, sep, dirsep); /* replace it by 'dirsep' */ - while ((path = pushnexttemplate(L, path)) != NULL) { - const char *filename = luaL_gsub(L, lua_tostring(L, -1), - LUA_PATH_MARK, name); - lua_remove(L, -2); /* remove path template */ - if (readable(filename)) /* does file exist and is readable? */ - return filename; /* return that file name */ - lua_pushfstring(L, "\n\tno file " LUA_QS, filename); - lua_remove(L, -2); /* remove file name */ - luaL_addvalue(&msg); /* concatenate error msg. entry */ - } - luaL_pushresult(&msg); /* create error message */ - return NULL; /* not found */ -} - -static int lj_cf_package_searchpath(lua_State *L) -{ - const char *f = searchpath(L, luaL_checkstring(L, 1), - luaL_checkstring(L, 2), - luaL_optstring(L, 3, "."), - luaL_optstring(L, 4, LUA_DIRSEP)); - if (f != NULL) { - return 1; - } else { /* error message is on top of the stack */ - lua_pushnil(L); - lua_insert(L, -2); - return 2; /* return nil + error message */ - } -} - -static const char *findfile(lua_State *L, const char *name, - const char *pname) -{ - const char *path; - lua_getfield(L, LUA_ENVIRONINDEX, pname); - path = lua_tostring(L, -1); - if (path == NULL) - luaL_error(L, LUA_QL("package.%s") " must be a string", pname); - return searchpath(L, name, path, ".", LUA_DIRSEP); -} - -static void loaderror(lua_State *L, const char *filename) -{ - luaL_error(L, "error loading module " LUA_QS " from file " LUA_QS ":\n\t%s", - lua_tostring(L, 1), filename, lua_tostring(L, -1)); -} - -static int lj_cf_package_loader_lua(lua_State *L) -{ - const char *filename; - const char *name = luaL_checkstring(L, 1); - filename = findfile(L, name, "path"); - if (filename == NULL) return 1; /* library not found in this path */ - if (luaL_loadfile(L, filename) != 0) - loaderror(L, filename); - return 1; /* library loaded successfully */ -} - -static int lj_cf_package_loader_c(lua_State *L) -{ - const char *name = luaL_checkstring(L, 1); - const char *filename = findfile(L, name, "cpath"); - if (filename == NULL) return 1; /* library not found in this path */ - if (ll_loadfunc(L, filename, name, 0) != 0) - loaderror(L, filename); - return 1; /* library loaded successfully */ -} - -#define LUA_POF "luaopen_" -#define LUA_OFSEP "_" -#define POF LUA_POF - -static const char *mkfuncname (lua_State *L, const char *modname) { - const char *funcname; - const char *mark = strchr(modname, *LUA_IGMARK); - if (mark) modname = mark + 1; - funcname = luaL_gsub(L, modname, ".", LUA_OFSEP); - funcname = lua_pushfstring(L, POF"%s", funcname); - lua_remove(L, -2); /* remove 'gsub' result */ - return funcname; -} - - -int loader_C_luatex (lua_State *L, const char *name, const char *filename) { - const char *funcname; - funcname = mkfuncname(L, name); - if (ll_loadfunc(L, filename, funcname,0) != 0) - loaderror(L, filename); - return 1; /* library loaded successfully */ -} - -static int lj_cf_package_loader_croot(lua_State *L) -{ - const char *filename; - const char *name = luaL_checkstring(L, 1); - const char *p = strchr(name, '.'); - int st; - if (p == NULL) return 0; /* is root */ - lua_pushlstring(L, name, (size_t)(p - name)); - filename = findfile(L, lua_tostring(L, -1), "cpath"); - if (filename == NULL) return 1; /* root not found */ - if ((st = ll_loadfunc(L, filename, name, 0)) != 0) { - if (st != PACKAGE_ERR_FUNC) loaderror(L, filename); /* real error */ - lua_pushfstring(L, "\n\tno module " LUA_QS " in file " LUA_QS, - name, filename); - return 1; /* function not found */ - } - return 1; -} - -int loader_Call_luatex (lua_State *L, const char *name, const char *filename) { - const char *funcname; - int stat; - if (filename == NULL) return 1; /* root not found */ - funcname = mkfuncname(L, name); - if ((stat = ll_loadfunc(L, filename, funcname,0)) != 0) { - if (stat != PACKAGE_ERR_FUNC) loaderror(L, filename); /* real error */ - lua_pushfstring(L, "\n\tno module " LUA_QS " in file " LUA_QS, - name, filename); - return 1; /* function not found */ - } - return 1; /* library loaded successfully */ -} - - -static int lj_cf_package_loader_preload(lua_State *L) -{ - const char *name = luaL_checkstring(L, 1); - lua_getfield(L, LUA_ENVIRONINDEX, "preload"); - if (!lua_istable(L, -1)) - luaL_error(L, LUA_QL("package.preload") " must be a table"); - lua_getfield(L, -1, name); - if (lua_isnil(L, -1)) { /* Not found? */ - const char *bcname = mksymname(L, name, SYMPREFIX_BC); - const char *bcdata = ll_bcsym(NULL, bcname); - if (bcdata == NULL || luaL_loadbuffer(L, bcdata, LJ_MAX_BUF, name) != 0) - lua_pushfstring(L, "\n\tno field package.preload['%s']", name); - } - return 1; -} - -/* ------------------------------------------------------------------------ */ - -static const int sentinel_ = 0; -#define sentinel ((void *)&sentinel_) - -static int lj_cf_package_require(lua_State *L) -{ - const char *name = luaL_checkstring(L, 1); - int i; - lua_settop(L, 1); /* _LOADED table will be at index 2 */ - lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); - lua_getfield(L, 2, name); - if (lua_toboolean(L, -1)) { /* is it there? */ - if (lua_touserdata(L, -1) == sentinel) /* check loops */ - luaL_error(L, "loop or previous error loading module " LUA_QS, name); - return 1; /* package is already loaded */ - } - /* else must load it; iterate over available loaders */ - lua_getfield(L, LUA_ENVIRONINDEX, "loaders"); - if (!lua_istable(L, -1)) - luaL_error(L, LUA_QL("package.loaders") " must be a table"); - lua_pushliteral(L, ""); /* error message accumulator */ - for (i = 1; ; i++) { - lua_rawgeti(L, -2, i); /* get a loader */ - if (lua_isnil(L, -1)) - luaL_error(L, "module " LUA_QS " not found:%s", - name, lua_tostring(L, -2)); - lua_pushstring(L, name); - lua_call(L, 1, 1); /* call it */ - if (lua_isfunction(L, -1)) /* did it find module? */ - break; /* module loaded successfully */ - else if (lua_isstring(L, -1)) /* loader returned error message? */ - lua_concat(L, 2); /* accumulate it */ - else - lua_pop(L, 1); - } - lua_pushlightuserdata(L, sentinel); - lua_setfield(L, 2, name); /* _LOADED[name] = sentinel */ - lua_pushstring(L, name); /* pass name as argument to module */ - lua_call(L, 1, 1); /* run loaded module */ - if (!lua_isnil(L, -1)) /* non-nil return? */ - lua_setfield(L, 2, name); /* _LOADED[name] = returned value */ - lua_getfield(L, 2, name); - if (lua_touserdata(L, -1) == sentinel) { /* module did not set a value? */ - lua_pushboolean(L, 1); /* use true as result */ - lua_pushvalue(L, -1); /* extra copy to be returned */ - lua_setfield(L, 2, name); /* _LOADED[name] = true */ - } - lj_lib_checkfpu(L); - return 1; -} - -/* ------------------------------------------------------------------------ */ - -static void setfenv(lua_State *L) -{ - lua_Debug ar; - if (lua_getstack(L, 1, &ar) == 0 || - lua_getinfo(L, "f", &ar) == 0 || /* get calling function */ - lua_iscfunction(L, -1)) - luaL_error(L, LUA_QL("module") " not called from a Lua function"); - lua_pushvalue(L, -2); - lua_setfenv(L, -2); - lua_pop(L, 1); -} - -static void dooptions(lua_State *L, int n) -{ - int i; - for (i = 2; i <= n; i++) { - lua_pushvalue(L, i); /* get option (a function) */ - lua_pushvalue(L, -2); /* module */ - lua_call(L, 1, 0); - } -} - -static void modinit(lua_State *L, const char *modname) -{ - const char *dot; - lua_pushvalue(L, -1); - lua_setfield(L, -2, "_M"); /* module._M = module */ - lua_pushstring(L, modname); - lua_setfield(L, -2, "_NAME"); - dot = strrchr(modname, '.'); /* look for last dot in module name */ - if (dot == NULL) dot = modname; else dot++; - /* set _PACKAGE as package name (full module name minus last part) */ - lua_pushlstring(L, modname, (size_t)(dot - modname)); - lua_setfield(L, -2, "_PACKAGE"); -} - -static int lj_cf_package_module(lua_State *L) -{ - const char *modname = luaL_checkstring(L, 1); - int loaded = lua_gettop(L) + 1; /* index of _LOADED table */ - lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); - lua_getfield(L, loaded, modname); /* get _LOADED[modname] */ - if (!lua_istable(L, -1)) { /* not found? */ - lua_pop(L, 1); /* remove previous result */ - /* try global variable (and create one if it does not exist) */ - if (luaL_findtable(L, LUA_GLOBALSINDEX, modname, 1) != NULL) - lj_err_callerv(L, LJ_ERR_BADMODN, modname); - lua_pushvalue(L, -1); - lua_setfield(L, loaded, modname); /* _LOADED[modname] = new table */ - } - /* check whether table already has a _NAME field */ - lua_getfield(L, -1, "_NAME"); - if (!lua_isnil(L, -1)) { /* is table an initialized module? */ - lua_pop(L, 1); - } else { /* no; initialize it */ - lua_pop(L, 1); - modinit(L, modname); - } - lua_pushvalue(L, -1); - setfenv(L); - dooptions(L, loaded - 1); - return 0; -} - -static int lj_cf_package_seeall(lua_State *L) -{ - luaL_checktype(L, 1, LUA_TTABLE); - if (!lua_getmetatable(L, 1)) { - lua_createtable(L, 0, 1); /* create new metatable */ - lua_pushvalue(L, -1); - lua_setmetatable(L, 1); - } - lua_pushvalue(L, LUA_GLOBALSINDEX); - lua_setfield(L, -2, "__index"); /* mt.__index = _G */ - return 0; -} - -/* ------------------------------------------------------------------------ */ - -#define AUXMARK "\1" - -static void setpath(lua_State *L, const char *fieldname, const char *envname, - const char *def, int noenv) -{ -#if LJ_TARGET_CONSOLE - const char *path = NULL; - UNUSED(envname); -#else - const char *path = getenv(envname); -#endif - if (path == NULL || noenv) { - lua_pushstring(L, def); - } else { - path = luaL_gsub(L, path, LUA_PATHSEP LUA_PATHSEP, - LUA_PATHSEP AUXMARK LUA_PATHSEP); - luaL_gsub(L, path, AUXMARK, def); - lua_remove(L, -2); - } - setprogdir(L); - lua_setfield(L, -2, fieldname); -} - -static const luaL_Reg package_lib[] = { - { "loadlib", lj_cf_package_loadlib }, - { "searchpath", lj_cf_package_searchpath }, - { "seeall", lj_cf_package_seeall }, - { NULL, NULL } -}; - -static const luaL_Reg package_global[] = { - { "module", lj_cf_package_module }, - { "require", lj_cf_package_require }, - { NULL, NULL } -}; - -static const lua_CFunction package_loaders[] = -{ - lj_cf_package_loader_preload, - lj_cf_package_loader_lua, - lj_cf_package_loader_c, - lj_cf_package_loader_croot, - NULL -}; - -LUALIB_API int luaopen_package(lua_State *L) -{ - int i; - int noenv; - luaL_newmetatable(L, "_LOADLIB"); - lj_lib_pushcf(L, lj_cf_package_unloadlib, 1); - lua_setfield(L, -2, "__gc"); - luaL_register(L, LUA_LOADLIBNAME, package_lib); - lua_pushvalue(L, -1); - lua_replace(L, LUA_ENVIRONINDEX); - lua_createtable(L, sizeof(package_loaders)/sizeof(package_loaders[0])-1, 0); - for (i = 0; package_loaders[i] != NULL; i++) { - lj_lib_pushcf(L, package_loaders[i], 1); - lua_rawseti(L, -2, i+1); - } - lua_setfield(L, -2, "loaders"); - lua_getfield(L, LUA_REGISTRYINDEX, "LUA_NOENV"); - noenv = lua_toboolean(L, -1); - lua_pop(L, 1); - setpath(L, "path", LUA_PATH, LUA_PATH_DEFAULT, noenv); - setpath(L, "cpath", LUA_CPATH, LUA_CPATH_DEFAULT, noenv); - lua_pushliteral(L, LUA_PATH_CONFIG); - lua_setfield(L, -2, "config"); - luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16); - lua_setfield(L, -2, "loaded"); - luaL_findtable(L, LUA_REGISTRYINDEX, "_PRELOAD", 4); - lua_setfield(L, -2, "preload"); - lua_pushvalue(L, LUA_GLOBALSINDEX); - luaL_register(L, NULL, package_global); - lua_pop(L, 1); - return 1; -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_string.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_string.c deleted file mode 100644 index a6d9986ac63..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lib_string.c +++ /dev/null @@ -1,752 +0,0 @@ -/* -** String library. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -** -** Major portions taken verbatim or adapted from the Lua interpreter. -** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h -*/ - -#define lib_string_c -#define LUA_LIB - -#include "lua.h" -#include "lauxlib.h" -#include "lualib.h" - -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_buf.h" -#include "lj_str.h" -#include "lj_tab.h" -#include "lj_meta.h" -#include "lj_state.h" -#include "lj_ff.h" -#include "lj_bcdump.h" -#include "lj_char.h" -#include "lj_strfmt.h" -#include "lj_lib.h" - -/* ------------------------------------------------------------------------ */ - -#define LJLIB_MODULE_string - -LJLIB_LUA(string_len) /* - function(s) - CHECK_str(s) - return #s - end -*/ - -LJLIB_ASM(string_byte) LJLIB_REC(string_range 0) -{ - GCstr *s = lj_lib_checkstr(L, 1); - int32_t len = (int32_t)s->len; - int32_t start = lj_lib_optint(L, 2, 1); - int32_t stop = lj_lib_optint(L, 3, start); - int32_t n, i; - const unsigned char *p; - if (stop < 0) stop += len+1; - if (start < 0) start += len+1; - if (start <= 0) start = 1; - if (stop > len) stop = len; - if (start > stop) return FFH_RES(0); /* Empty interval: return no results. */ - start--; - n = stop - start; - if ((uint32_t)n > LUAI_MAXCSTACK) - lj_err_caller(L, LJ_ERR_STRSLC); - lj_state_checkstack(L, (MSize)n); - p = (const unsigned char *)strdata(s) + start; - for (i = 0; i < n; i++) - setintV(L->base + i-1-LJ_FR2, p[i]); - return FFH_RES(n); -} - -LJLIB_ASM(string_char) LJLIB_REC(.) -{ - int i, nargs = (int)(L->top - L->base); - char *buf = lj_buf_tmp(L, (MSize)nargs); - for (i = 1; i <= nargs; i++) { - int32_t k = lj_lib_checkint(L, i); - if (!checku8(k)) - lj_err_arg(L, i, LJ_ERR_BADVAL); - buf[i-1] = (char)k; - } - setstrV(L, L->base-1-LJ_FR2, lj_str_new(L, buf, (size_t)nargs)); - return FFH_RES(1); -} - -LJLIB_ASM(string_sub) LJLIB_REC(string_range 1) -{ - lj_lib_checkstr(L, 1); - lj_lib_checkint(L, 2); - setintV(L->base+2, lj_lib_optint(L, 3, -1)); - return FFH_RETRY; -} - -LJLIB_CF(string_rep) LJLIB_REC(.) -{ - GCstr *s = lj_lib_checkstr(L, 1); - int32_t rep = lj_lib_checkint(L, 2); - GCstr *sep = lj_lib_optstr(L, 3); - SBuf *sb = lj_buf_tmp_(L); - if (sep && rep > 1) { - GCstr *s2 = lj_buf_cat2str(L, sep, s); - lj_buf_reset(sb); - lj_buf_putstr(sb, s); - s = s2; - rep--; - } - sb = lj_buf_putstr_rep(sb, s, rep); - setstrV(L, L->top-1, lj_buf_str(L, sb)); - lj_gc_check(L); - return 1; -} - -LJLIB_ASM(string_reverse) LJLIB_REC(string_op IRCALL_lj_buf_putstr_reverse) -{ - lj_lib_checkstr(L, 1); - return FFH_RETRY; -} -LJLIB_ASM_(string_lower) LJLIB_REC(string_op IRCALL_lj_buf_putstr_lower) -LJLIB_ASM_(string_upper) LJLIB_REC(string_op IRCALL_lj_buf_putstr_upper) - -/* ------------------------------------------------------------------------ */ - -static int writer_buf(lua_State *L, const void *p, size_t size, void *sb) -{ - lj_buf_putmem((SBuf *)sb, p, (MSize)size); - UNUSED(L); - return 0; -} - -LJLIB_CF(string_dump) -{ - GCfunc *fn = lj_lib_checkfunc(L, 1); - int strip = L->base+1 < L->top && tvistruecond(L->base+1); - SBuf *sb = lj_buf_tmp_(L); /* Assumes lj_bcwrite() doesn't use tmpbuf. */ - L->top = L->base+1; - if (!isluafunc(fn) || lj_bcwrite(L, funcproto(fn), writer_buf, sb, strip)) - lj_err_caller(L, LJ_ERR_STRDUMP); - setstrV(L, L->top-1, lj_buf_str(L, sb)); - lj_gc_check(L); - return 1; -} - -/* ------------------------------------------------------------------------ */ - -/* macro to `unsign' a character */ -#define uchar(c) ((unsigned char)(c)) - -#define CAP_UNFINISHED (-1) -#define CAP_POSITION (-2) - -typedef struct MatchState { - const char *src_init; /* init of source string */ - const char *src_end; /* end (`\0') of source string */ - lua_State *L; - int level; /* total number of captures (finished or unfinished) */ - int depth; - struct { - const char *init; - ptrdiff_t len; - } capture[LUA_MAXCAPTURES]; -} MatchState; - -#define L_ESC '%' - -static int check_capture(MatchState *ms, int l) -{ - l -= '1'; - if (l < 0 || l >= ms->level || ms->capture[l].len == CAP_UNFINISHED) - lj_err_caller(ms->L, LJ_ERR_STRCAPI); - return l; -} - -static int capture_to_close(MatchState *ms) -{ - int level = ms->level; - for (level--; level>=0; level--) - if (ms->capture[level].len == CAP_UNFINISHED) return level; - lj_err_caller(ms->L, LJ_ERR_STRPATC); - return 0; /* unreachable */ -} - -static const char *classend(MatchState *ms, const char *p) -{ - switch (*p++) { - case L_ESC: - if (*p == '\0') - lj_err_caller(ms->L, LJ_ERR_STRPATE); - return p+1; - case '[': - if (*p == '^') p++; - do { /* look for a `]' */ - if (*p == '\0') - lj_err_caller(ms->L, LJ_ERR_STRPATM); - if (*(p++) == L_ESC && *p != '\0') - p++; /* skip escapes (e.g. `%]') */ - } while (*p != ']'); - return p+1; - default: - return p; - } -} - -static const unsigned char match_class_map[32] = { - 0,LJ_CHAR_ALPHA,0,LJ_CHAR_CNTRL,LJ_CHAR_DIGIT,0,0,LJ_CHAR_GRAPH,0,0,0,0, - LJ_CHAR_LOWER,0,0,0,LJ_CHAR_PUNCT,0,0,LJ_CHAR_SPACE,0, - LJ_CHAR_UPPER,0,LJ_CHAR_ALNUM,LJ_CHAR_XDIGIT,0,0,0,0,0,0,0 -}; - -static int match_class(int c, int cl) -{ - if ((cl & 0xc0) == 0x40) { - int t = match_class_map[(cl&0x1f)]; - if (t) { - t = lj_char_isa(c, t); - return (cl & 0x20) ? t : !t; - } - if (cl == 'z') return c == 0; - if (cl == 'Z') return c != 0; - } - return (cl == c); -} - -static int matchbracketclass(int c, const char *p, const char *ec) -{ - int sig = 1; - if (*(p+1) == '^') { - sig = 0; - p++; /* skip the `^' */ - } - while (++p < ec) { - if (*p == L_ESC) { - p++; - if (match_class(c, uchar(*p))) - return sig; - } - else if ((*(p+1) == '-') && (p+2 < ec)) { - p+=2; - if (uchar(*(p-2)) <= c && c <= uchar(*p)) - return sig; - } - else if (uchar(*p) == c) return sig; - } - return !sig; -} - -static int singlematch(int c, const char *p, const char *ep) -{ - switch (*p) { - case '.': return 1; /* matches any char */ - case L_ESC: return match_class(c, uchar(*(p+1))); - case '[': return matchbracketclass(c, p, ep-1); - default: return (uchar(*p) == c); - } -} - -static const char *match(MatchState *ms, const char *s, const char *p); - -static const char *matchbalance(MatchState *ms, const char *s, const char *p) -{ - if (*p == 0 || *(p+1) == 0) - lj_err_caller(ms->L, LJ_ERR_STRPATU); - if (*s != *p) { - return NULL; - } else { - int b = *p; - int e = *(p+1); - int cont = 1; - while (++s < ms->src_end) { - if (*s == e) { - if (--cont == 0) return s+1; - } else if (*s == b) { - cont++; - } - } - } - return NULL; /* string ends out of balance */ -} - -static const char *max_expand(MatchState *ms, const char *s, - const char *p, const char *ep) -{ - ptrdiff_t i = 0; /* counts maximum expand for item */ - while ((s+i)src_end && singlematch(uchar(*(s+i)), p, ep)) - i++; - /* keeps trying to match with the maximum repetitions */ - while (i>=0) { - const char *res = match(ms, (s+i), ep+1); - if (res) return res; - i--; /* else didn't match; reduce 1 repetition to try again */ - } - return NULL; -} - -static const char *min_expand(MatchState *ms, const char *s, - const char *p, const char *ep) -{ - for (;;) { - const char *res = match(ms, s, ep+1); - if (res != NULL) - return res; - else if (ssrc_end && singlematch(uchar(*s), p, ep)) - s++; /* try with one more repetition */ - else - return NULL; - } -} - -static const char *start_capture(MatchState *ms, const char *s, - const char *p, int what) -{ - const char *res; - int level = ms->level; - if (level >= LUA_MAXCAPTURES) lj_err_caller(ms->L, LJ_ERR_STRCAPN); - ms->capture[level].init = s; - ms->capture[level].len = what; - ms->level = level+1; - if ((res=match(ms, s, p)) == NULL) /* match failed? */ - ms->level--; /* undo capture */ - return res; -} - -static const char *end_capture(MatchState *ms, const char *s, - const char *p) -{ - int l = capture_to_close(ms); - const char *res; - ms->capture[l].len = s - ms->capture[l].init; /* close capture */ - if ((res = match(ms, s, p)) == NULL) /* match failed? */ - ms->capture[l].len = CAP_UNFINISHED; /* undo capture */ - return res; -} - -static const char *match_capture(MatchState *ms, const char *s, int l) -{ - size_t len; - l = check_capture(ms, l); - len = (size_t)ms->capture[l].len; - if ((size_t)(ms->src_end-s) >= len && - memcmp(ms->capture[l].init, s, len) == 0) - return s+len; - else - return NULL; -} - -static const char *match(MatchState *ms, const char *s, const char *p) -{ - if (++ms->depth > LJ_MAX_XLEVEL) - lj_err_caller(ms->L, LJ_ERR_STRPATX); - init: /* using goto's to optimize tail recursion */ - switch (*p) { - case '(': /* start capture */ - if (*(p+1) == ')') /* position capture? */ - s = start_capture(ms, s, p+2, CAP_POSITION); - else - s = start_capture(ms, s, p+1, CAP_UNFINISHED); - break; - case ')': /* end capture */ - s = end_capture(ms, s, p+1); - break; - case L_ESC: - switch (*(p+1)) { - case 'b': /* balanced string? */ - s = matchbalance(ms, s, p+2); - if (s == NULL) break; - p+=4; - goto init; /* else s = match(ms, s, p+4); */ - case 'f': { /* frontier? */ - const char *ep; char previous; - p += 2; - if (*p != '[') - lj_err_caller(ms->L, LJ_ERR_STRPATB); - ep = classend(ms, p); /* points to what is next */ - previous = (s == ms->src_init) ? '\0' : *(s-1); - if (matchbracketclass(uchar(previous), p, ep-1) || - !matchbracketclass(uchar(*s), p, ep-1)) { s = NULL; break; } - p=ep; - goto init; /* else s = match(ms, s, ep); */ - } - default: - if (lj_char_isdigit(uchar(*(p+1)))) { /* capture results (%0-%9)? */ - s = match_capture(ms, s, uchar(*(p+1))); - if (s == NULL) break; - p+=2; - goto init; /* else s = match(ms, s, p+2) */ - } - goto dflt; /* case default */ - } - break; - case '\0': /* end of pattern */ - break; /* match succeeded */ - case '$': - /* is the `$' the last char in pattern? */ - if (*(p+1) != '\0') goto dflt; - if (s != ms->src_end) s = NULL; /* check end of string */ - break; - default: dflt: { /* it is a pattern item */ - const char *ep = classend(ms, p); /* points to what is next */ - int m = ssrc_end && singlematch(uchar(*s), p, ep); - switch (*ep) { - case '?': { /* optional */ - const char *res; - if (m && ((res=match(ms, s+1, ep+1)) != NULL)) { - s = res; - break; - } - p=ep+1; - goto init; /* else s = match(ms, s, ep+1); */ - } - case '*': /* 0 or more repetitions */ - s = max_expand(ms, s, p, ep); - break; - case '+': /* 1 or more repetitions */ - s = (m ? max_expand(ms, s+1, p, ep) : NULL); - break; - case '-': /* 0 or more repetitions (minimum) */ - s = min_expand(ms, s, p, ep); - break; - default: - if (m) { s++; p=ep; goto init; } /* else s = match(ms, s+1, ep); */ - s = NULL; - break; - } - break; - } - } - ms->depth--; - return s; -} - -static void push_onecapture(MatchState *ms, int i, const char *s, const char *e) -{ - if (i >= ms->level) { - if (i == 0) /* ms->level == 0, too */ - lua_pushlstring(ms->L, s, (size_t)(e - s)); /* add whole match */ - else - lj_err_caller(ms->L, LJ_ERR_STRCAPI); - } else { - ptrdiff_t l = ms->capture[i].len; - if (l == CAP_UNFINISHED) lj_err_caller(ms->L, LJ_ERR_STRCAPU); - if (l == CAP_POSITION) - lua_pushinteger(ms->L, ms->capture[i].init - ms->src_init + 1); - else - lua_pushlstring(ms->L, ms->capture[i].init, (size_t)l); - } -} - -static int push_captures(MatchState *ms, const char *s, const char *e) -{ - int i; - int nlevels = (ms->level == 0 && s) ? 1 : ms->level; - luaL_checkstack(ms->L, nlevels, "too many captures"); - for (i = 0; i < nlevels; i++) - push_onecapture(ms, i, s, e); - return nlevels; /* number of strings pushed */ -} - -static int str_find_aux(lua_State *L, int find) -{ - GCstr *s = lj_lib_checkstr(L, 1); - GCstr *p = lj_lib_checkstr(L, 2); - int32_t start = lj_lib_optint(L, 3, 1); - MSize st; - if (start < 0) start += (int32_t)s->len; else start--; - if (start < 0) start = 0; - st = (MSize)start; - if (st > s->len) { -#if LJ_52 - setnilV(L->top-1); - return 1; -#else - st = s->len; -#endif - } - if (find && ((L->base+3 < L->top && tvistruecond(L->base+3)) || - !lj_str_haspattern(p))) { /* Search for fixed string. */ - const char *q = lj_str_find(strdata(s)+st, strdata(p), s->len-st, p->len); - if (q) { - setintV(L->top-2, (int32_t)(q-strdata(s)) + 1); - setintV(L->top-1, (int32_t)(q-strdata(s)) + (int32_t)p->len); - return 2; - } - } else { /* Search for pattern. */ - MatchState ms; - const char *pstr = strdata(p); - const char *sstr = strdata(s) + st; - int anchor = 0; - if (*pstr == '^') { pstr++; anchor = 1; } - ms.L = L; - ms.src_init = strdata(s); - ms.src_end = strdata(s) + s->len; - do { /* Loop through string and try to match the pattern. */ - const char *q; - ms.level = ms.depth = 0; - q = match(&ms, sstr, pstr); - if (q) { - if (find) { - setintV(L->top++, (int32_t)(sstr-(strdata(s)-1))); - setintV(L->top++, (int32_t)(q-strdata(s))); - return push_captures(&ms, NULL, NULL) + 2; - } else { - return push_captures(&ms, sstr, q); - } - } - } while (sstr++ < ms.src_end && !anchor); - } - setnilV(L->top-1); /* Not found. */ - return 1; -} - -LJLIB_CF(string_find) LJLIB_REC(.) -{ - return str_find_aux(L, 1); -} - -LJLIB_CF(string_match) -{ - return str_find_aux(L, 0); -} - -LJLIB_NOREG LJLIB_CF(string_gmatch_aux) -{ - const char *p = strVdata(lj_lib_upvalue(L, 2)); - GCstr *str = strV(lj_lib_upvalue(L, 1)); - const char *s = strdata(str); - TValue *tvpos = lj_lib_upvalue(L, 3); - const char *src = s + tvpos->u32.lo; - MatchState ms; - ms.L = L; - ms.src_init = s; - ms.src_end = s + str->len; - for (; src <= ms.src_end; src++) { - const char *e; - ms.level = ms.depth = 0; - if ((e = match(&ms, src, p)) != NULL) { - int32_t pos = (int32_t)(e - s); - if (e == src) pos++; /* Ensure progress for empty match. */ - tvpos->u32.lo = (uint32_t)pos; - return push_captures(&ms, src, e); - } - } - return 0; /* not found */ -} - -LJLIB_CF(string_gmatch) -{ - lj_lib_checkstr(L, 1); - lj_lib_checkstr(L, 2); - L->top = L->base+3; - (L->top-1)->u64 = 0; - lj_lib_pushcc(L, lj_cf_string_gmatch_aux, FF_string_gmatch_aux, 3); - return 1; -} - -static void add_s(MatchState *ms, luaL_Buffer *b, const char *s, const char *e) -{ - size_t l, i; - const char *news = lua_tolstring(ms->L, 3, &l); - for (i = 0; i < l; i++) { - if (news[i] != L_ESC) { - luaL_addchar(b, news[i]); - } else { - i++; /* skip ESC */ - if (!lj_char_isdigit(uchar(news[i]))) { - luaL_addchar(b, news[i]); - } else if (news[i] == '0') { - luaL_addlstring(b, s, (size_t)(e - s)); - } else { - push_onecapture(ms, news[i] - '1', s, e); - luaL_addvalue(b); /* add capture to accumulated result */ - } - } - } -} - -static void add_value(MatchState *ms, luaL_Buffer *b, - const char *s, const char *e) -{ - lua_State *L = ms->L; - switch (lua_type(L, 3)) { - case LUA_TNUMBER: - case LUA_TSTRING: { - add_s(ms, b, s, e); - return; - } - case LUA_TFUNCTION: { - int n; - lua_pushvalue(L, 3); - n = push_captures(ms, s, e); - lua_call(L, n, 1); - break; - } - case LUA_TTABLE: { - push_onecapture(ms, 0, s, e); - lua_gettable(L, 3); - break; - } - } - if (!lua_toboolean(L, -1)) { /* nil or false? */ - lua_pop(L, 1); - lua_pushlstring(L, s, (size_t)(e - s)); /* keep original text */ - } else if (!lua_isstring(L, -1)) { - lj_err_callerv(L, LJ_ERR_STRGSRV, luaL_typename(L, -1)); - } - luaL_addvalue(b); /* add result to accumulator */ -} - -LJLIB_CF(string_gsub) -{ - size_t srcl; - const char *src = luaL_checklstring(L, 1, &srcl); - const char *p = luaL_checkstring(L, 2); - int tr = lua_type(L, 3); - int max_s = luaL_optint(L, 4, (int)(srcl+1)); - int anchor = (*p == '^') ? (p++, 1) : 0; - int n = 0; - MatchState ms; - luaL_Buffer b; - if (!(tr == LUA_TNUMBER || tr == LUA_TSTRING || - tr == LUA_TFUNCTION || tr == LUA_TTABLE)) - lj_err_arg(L, 3, LJ_ERR_NOSFT); - luaL_buffinit(L, &b); - ms.L = L; - ms.src_init = src; - ms.src_end = src+srcl; - while (n < max_s) { - const char *e; - ms.level = ms.depth = 0; - e = match(&ms, src, p); - if (e) { - n++; - add_value(&ms, &b, src, e); - } - if (e && e>src) /* non empty match? */ - src = e; /* skip it */ - else if (src < ms.src_end) - luaL_addchar(&b, *src++); - else - break; - if (anchor) - break; - } - luaL_addlstring(&b, src, (size_t)(ms.src_end-src)); - luaL_pushresult(&b); - lua_pushinteger(L, n); /* number of substitutions */ - return 2; -} - -/* ------------------------------------------------------------------------ */ - -/* Emulate tostring() inline. */ -static GCstr *string_fmt_tostring(lua_State *L, int arg, int retry) -{ - TValue *o = L->base+arg-1; - cTValue *mo; - lua_assert(o < L->top); /* Caller already checks for existence. */ - if (LJ_LIKELY(tvisstr(o))) - return strV(o); - if (retry != 2 && !tvisnil(mo = lj_meta_lookup(L, o, MM_tostring))) { - copyTV(L, L->top++, mo); - copyTV(L, L->top++, o); - lua_call(L, 1, 1); - copyTV(L, L->base+arg-1, --L->top); - return NULL; /* Buffer may be overwritten, retry. */ - } - return lj_strfmt_obj(L, o); -} - -LJLIB_CF(string_format) LJLIB_REC(.) -{ - int arg, top = (int)(L->top - L->base); - GCstr *fmt; - SBuf *sb; - FormatState fs; - SFormat sf; - int retry = 0; -again: - arg = 1; - sb = lj_buf_tmp_(L); - fmt = lj_lib_checkstr(L, arg); - lj_strfmt_init(&fs, strdata(fmt), fmt->len); - while ((sf = lj_strfmt_parse(&fs)) != STRFMT_EOF) { - if (sf == STRFMT_LIT) { - lj_buf_putmem(sb, fs.str, fs.len); - } else if (sf == STRFMT_ERR) { - lj_err_callerv(L, LJ_ERR_STRFMT, strdata(lj_str_new(L, fs.str, fs.len))); - } else { - if (++arg > top) - luaL_argerror(L, arg, lj_obj_typename[0]); - switch (STRFMT_TYPE(sf)) { - case STRFMT_INT: - if (tvisint(L->base+arg-1)) { - int32_t k = intV(L->base+arg-1); - if (sf == STRFMT_INT) - lj_strfmt_putint(sb, k); /* Shortcut for plain %d. */ - else - lj_strfmt_putfxint(sb, sf, k); - } else { - lj_strfmt_putfnum_int(sb, sf, lj_lib_checknum(L, arg)); - } - break; - case STRFMT_UINT: - if (tvisint(L->base+arg-1)) - lj_strfmt_putfxint(sb, sf, intV(L->base+arg-1)); - else - lj_strfmt_putfnum_uint(sb, sf, lj_lib_checknum(L, arg)); - break; - case STRFMT_NUM: - lj_strfmt_putfnum(sb, sf, lj_lib_checknum(L, arg)); - break; - case STRFMT_STR: { - GCstr *str = string_fmt_tostring(L, arg, retry); - if (str == NULL) - retry = 1; - else if ((sf & STRFMT_T_QUOTED)) - lj_strfmt_putquoted(sb, str); /* No formatting. */ - else - lj_strfmt_putfstr(sb, sf, str); - break; - } - case STRFMT_CHAR: - lj_strfmt_putfchar(sb, sf, lj_lib_checkint(L, arg)); - break; - case STRFMT_PTR: /* No formatting. */ - lj_strfmt_putptr(sb, lj_obj_ptr(L->base+arg-1)); - break; - default: - lua_assert(0); - break; - } - } - } - if (retry++ == 1) goto again; - setstrV(L, L->top-1, lj_buf_str(L, sb)); - lj_gc_check(L); - return 1; -} - -/* ------------------------------------------------------------------------ */ - -#include "lj_libdef.h" - -LUALIB_API int luaopen_string(lua_State *L) -{ - GCtab *mt; - global_State *g; - LJ_LIB_REG(L, LUA_STRLIBNAME, string); -#if defined(LUA_COMPAT_GFIND) && !LJ_52 - lua_getfield(L, -1, "gmatch"); - lua_setfield(L, -2, "gfind"); -#endif - mt = lj_tab_new(L, 0, 1); - /* NOBARRIER: basemt is a GC root. */ - g = G(L); - setgcref(basemt_it(g, LJ_TSTR), obj2gco(mt)); - settabV(L, lj_tab_setstr(L, mt, mmname_str(g, MM_index)), tabV(L->top-1)); - mt->nomm = (uint8_t)(~(1u<array); - Node *node; - lua_Number m = 0; - ptrdiff_t i; - for (i = (ptrdiff_t)t->asize - 1; i >= 0; i--) - if (!tvisnil(&array[i])) { - m = (lua_Number)(int32_t)i; - break; - } - node = noderef(t->node); - for (i = (ptrdiff_t)t->hmask; i >= 0; i--) - if (!tvisnil(&node[i].val) && tvisnumber(&node[i].key)) { - lua_Number n = numberVnum(&node[i].key); - if (n > m) m = n; - } - setnumV(L->top-1, m); - return 1; -} - -LJLIB_CF(table_insert) LJLIB_REC(.) -{ - GCtab *t = lj_lib_checktab(L, 1); - int32_t n, i = (int32_t)lj_tab_len(t) + 1; - int nargs = (int)((char *)L->top - (char *)L->base); - if (nargs != 2*sizeof(TValue)) { - if (nargs != 3*sizeof(TValue)) - lj_err_caller(L, LJ_ERR_TABINS); - /* NOBARRIER: This just moves existing elements around. */ - for (n = lj_lib_checkint(L, 2); i > n; i--) { - /* The set may invalidate the get pointer, so need to do it first! */ - TValue *dst = lj_tab_setint(L, t, i); - cTValue *src = lj_tab_getint(t, i-1); - if (src) { - copyTV(L, dst, src); - } else { - setnilV(dst); - } - } - i = n; - } - { - TValue *dst = lj_tab_setint(L, t, i); - copyTV(L, dst, L->top-1); /* Set new value. */ - lj_gc_barriert(L, t, dst); - } - return 0; -} - -LJLIB_LUA(table_remove) /* - function(t, pos) - CHECK_tab(t) - local len = #t - if pos == nil then - if len ~= 0 then - local old = t[len] - t[len] = nil - return old - end - else - CHECK_int(pos) - if pos >= 1 and pos <= len then - local old = t[pos] - for i=pos+1,len do - t[i-1] = t[i] - end - t[len] = nil - return old - end - end - end -*/ - -LJLIB_CF(table_concat) LJLIB_REC(.) -{ - GCtab *t = lj_lib_checktab(L, 1); - GCstr *sep = lj_lib_optstr(L, 2); - int32_t i = lj_lib_optint(L, 3, 1); - int32_t e = (L->base+3 < L->top && !tvisnil(L->base+3)) ? - lj_lib_checkint(L, 4) : (int32_t)lj_tab_len(t); - SBuf *sb = lj_buf_tmp_(L); - SBuf *sbx = lj_buf_puttab(sb, t, sep, i, e); - if (LJ_UNLIKELY(!sbx)) { /* Error: bad element type. */ - int32_t idx = (int32_t)(intptr_t)sbufP(sb); - cTValue *o = lj_tab_getint(t, idx); - lj_err_callerv(L, LJ_ERR_TABCAT, - lj_obj_itypename[o ? itypemap(o) : ~LJ_TNIL], idx); - } - setstrV(L, L->top-1, lj_buf_str(L, sbx)); - lj_gc_check(L); - return 1; -} - -/* ------------------------------------------------------------------------ */ - -static void set2(lua_State *L, int i, int j) -{ - lua_rawseti(L, 1, i); - lua_rawseti(L, 1, j); -} - -static int sort_comp(lua_State *L, int a, int b) -{ - if (!lua_isnil(L, 2)) { /* function? */ - int res; - lua_pushvalue(L, 2); - lua_pushvalue(L, a-1); /* -1 to compensate function */ - lua_pushvalue(L, b-2); /* -2 to compensate function and `a' */ - lua_call(L, 2, 1); - res = lua_toboolean(L, -1); - lua_pop(L, 1); - return res; - } else { /* a < b? */ - return lua_lessthan(L, a, b); - } -} - -static void auxsort(lua_State *L, int l, int u) -{ - while (l < u) { /* for tail recursion */ - int i, j; - /* sort elements a[l], a[(l+u)/2] and a[u] */ - lua_rawgeti(L, 1, l); - lua_rawgeti(L, 1, u); - if (sort_comp(L, -1, -2)) /* a[u] < a[l]? */ - set2(L, l, u); /* swap a[l] - a[u] */ - else - lua_pop(L, 2); - if (u-l == 1) break; /* only 2 elements */ - i = (l+u)/2; - lua_rawgeti(L, 1, i); - lua_rawgeti(L, 1, l); - if (sort_comp(L, -2, -1)) { /* a[i]= P */ - while (lua_rawgeti(L, 1, ++i), sort_comp(L, -1, -2)) { - if (i>=u) lj_err_caller(L, LJ_ERR_TABSORT); - lua_pop(L, 1); /* remove a[i] */ - } - /* repeat --j until a[j] <= P */ - while (lua_rawgeti(L, 1, --j), sort_comp(L, -3, -1)) { - if (j<=l) lj_err_caller(L, LJ_ERR_TABSORT); - lua_pop(L, 1); /* remove a[j] */ - } - if (jbase+1)) - lj_lib_checkfunc(L, 2); - auxsort(L, 1, n); - return 0; -} - -#if LJ_52 -LJLIB_PUSH("n") -LJLIB_CF(table_pack) -{ - TValue *array, *base = L->base; - MSize i, n = (uint32_t)(L->top - base); - GCtab *t = lj_tab_new(L, n ? n+1 : 0, 1); - /* NOBARRIER: The table is new (marked white). */ - setintV(lj_tab_setstr(L, t, strV(lj_lib_upvalue(L, 1))), (int32_t)n); - for (array = tvref(t->array) + 1, i = 0; i < n; i++) - copyTV(L, &array[i], &base[i]); - settabV(L, base, t); - L->top = base+1; - lj_gc_check(L); - return 1; -} -#endif - -LJLIB_NOREG LJLIB_CF(table_new) LJLIB_REC(.) -{ - int32_t a = lj_lib_checkint(L, 1); - int32_t h = lj_lib_checkint(L, 2); - lua_createtable(L, a, h); - return 1; -} - -LJLIB_NOREG LJLIB_CF(table_clear) LJLIB_REC(.) -{ - lj_tab_clear(lj_lib_checktab(L, 1)); - return 0; -} - -static int luaopen_table_new(lua_State *L) -{ - return lj_lib_postreg(L, lj_cf_table_new, FF_table_new, "new"); -} - -static int luaopen_table_clear(lua_State *L) -{ - return lj_lib_postreg(L, lj_cf_table_clear, FF_table_clear, "clear"); -} - -/* ------------------------------------------------------------------------ */ - -#include "lj_libdef.h" - -LUALIB_API int luaopen_table(lua_State *L) -{ - LJ_LIB_REG(L, LUA_TABLIBNAME, table); -#if LJ_52 - lua_getglobal(L, "unpack"); - lua_setfield(L, -2, "unpack"); -#endif - lj_lib_prereg(L, LUA_TABLIBNAME ".new", luaopen_table_new, tabV(L->top-1)); - lj_lib_prereg(L, LUA_TABLIBNAME ".clear", luaopen_table_clear, tabV(L->top-1)); - return 1; -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj.supp b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj.supp deleted file mode 100644 index 411f2617001..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj.supp +++ /dev/null @@ -1,26 +0,0 @@ -# Valgrind suppression file for LuaJIT 2.0. -{ - Optimized string compare - Memcheck:Addr4 - fun:lj_str_cmp -} -{ - Optimized string compare - Memcheck:Addr1 - fun:lj_str_cmp -} -{ - Optimized string compare - Memcheck:Addr4 - fun:lj_str_new -} -{ - Optimized string compare - Memcheck:Addr1 - fun:lj_str_new -} -{ - Optimized string compare - Memcheck:Cond - fun:lj_str_new -} diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_alloc.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_alloc.c deleted file mode 100644 index ddd50cae4f1..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_alloc.c +++ /dev/null @@ -1,1398 +0,0 @@ -/* -** Bundled memory allocator. -** -** Beware: this is a HEAVILY CUSTOMIZED version of dlmalloc. -** The original bears the following remark: -** -** This is a version (aka dlmalloc) of malloc/free/realloc written by -** Doug Lea and released to the public domain, as explained at -** http://creativecommons.org/licenses/publicdomain. -** -** * Version pre-2.8.4 Wed Mar 29 19:46:29 2006 (dl at gee) -** -** No additional copyright is claimed over the customizations. -** Please do NOT bother the original author about this version here! -** -** If you want to use dlmalloc in another project, you should get -** the original from: ftp://gee.cs.oswego.edu/pub/misc/ -** For thread-safe derivatives, take a look at: -** - ptmalloc: http://www.malloc.de/ -** - nedmalloc: http://www.nedprod.com/programs/portable/nedmalloc/ -*/ - -#define lj_alloc_c -#define LUA_CORE - -/* To get the mremap prototype. Must be defined before any system includes. */ -#if defined(__linux__) && !defined(_GNU_SOURCE) -#define _GNU_SOURCE -#endif - -#include "lj_def.h" -#include "lj_arch.h" -#include "lj_alloc.h" - -#ifndef LUAJIT_USE_SYSMALLOC - -#define MAX_SIZE_T (~(size_t)0) -#define MALLOC_ALIGNMENT ((size_t)8U) - -#define DEFAULT_GRANULARITY ((size_t)128U * (size_t)1024U) -#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) -#define DEFAULT_MMAP_THRESHOLD ((size_t)128U * (size_t)1024U) -#define MAX_RELEASE_CHECK_RATE 255 - -/* ------------------- size_t and alignment properties -------------------- */ - -/* The byte and bit size of a size_t */ -#define SIZE_T_SIZE (sizeof(size_t)) -#define SIZE_T_BITSIZE (sizeof(size_t) << 3) - -/* Some constants coerced to size_t */ -/* Annoying but necessary to avoid errors on some platforms */ -#define SIZE_T_ZERO ((size_t)0) -#define SIZE_T_ONE ((size_t)1) -#define SIZE_T_TWO ((size_t)2) -#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) -#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) -#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) - -/* The bit mask value corresponding to MALLOC_ALIGNMENT */ -#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) - -/* the number of bytes to offset an address to align it */ -#define align_offset(A)\ - ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ - ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) - -/* -------------------------- MMAP support ------------------------------- */ - -#define MFAIL ((void *)(MAX_SIZE_T)) -#define CMFAIL ((char *)(MFAIL)) /* defined for convenience */ - -#define IS_DIRECT_BIT (SIZE_T_ONE) - -#if LJ_TARGET_WINDOWS - -#define WIN32_LEAN_AND_MEAN -#include - -#if LJ_64 && !LJ_GC64 - -/* Undocumented, but hey, that's what we all love so much about Windows. */ -typedef long (*PNTAVM)(HANDLE handle, void **addr, ULONG zbits, - size_t *size, ULONG alloctype, ULONG prot); -static PNTAVM ntavm; - -/* Number of top bits of the lower 32 bits of an address that must be zero. -** Apparently 0 gives us full 64 bit addresses and 1 gives us the lower 2GB. -*/ -#define NTAVM_ZEROBITS 1 - -static void INIT_MMAP(void) -{ - ntavm = (PNTAVM)GetProcAddress(GetModuleHandleA("ntdll.dll"), - "NtAllocateVirtualMemory"); -} - -/* Win64 32 bit MMAP via NtAllocateVirtualMemory. */ -static LJ_AINLINE void *CALL_MMAP(size_t size) -{ - DWORD olderr = GetLastError(); - void *ptr = NULL; - long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size, - MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); - SetLastError(olderr); - return st == 0 ? ptr : MFAIL; -} - -/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ -static LJ_AINLINE void *DIRECT_MMAP(size_t size) -{ - DWORD olderr = GetLastError(); - void *ptr = NULL; - long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size, - MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, PAGE_READWRITE); - SetLastError(olderr); - return st == 0 ? ptr : MFAIL; -} - -#else - -#define INIT_MMAP() ((void)0) - -/* Win32 MMAP via VirtualAlloc */ -static LJ_AINLINE void *CALL_MMAP(size_t size) -{ - DWORD olderr = GetLastError(); - void *ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); - SetLastError(olderr); - return ptr ? ptr : MFAIL; -} - -/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ -static LJ_AINLINE void *DIRECT_MMAP(size_t size) -{ - DWORD olderr = GetLastError(); - void *ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, - PAGE_READWRITE); - SetLastError(olderr); - return ptr ? ptr : MFAIL; -} - -#endif - -/* This function supports releasing coalesed segments */ -static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size) -{ - DWORD olderr = GetLastError(); - MEMORY_BASIC_INFORMATION minfo; - char *cptr = (char *)ptr; - while (size) { - if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) - return -1; - if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || - minfo.State != MEM_COMMIT || minfo.RegionSize > size) - return -1; - if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) - return -1; - cptr += minfo.RegionSize; - size -= minfo.RegionSize; - } - SetLastError(olderr); - return 0; -} - -#else - -#include -#include - -#define MMAP_PROT (PROT_READ|PROT_WRITE) -#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) -#define MAP_ANONYMOUS MAP_ANON -#endif -#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) - -#if LJ_64 && !LJ_GC64 -/* 64 bit mode with 32 bit pointers needs special support for allocating -** memory in the lower 2GB. -*/ - -#if defined(MAP_32BIT) - -#if defined(__sun__) -#define MMAP_REGION_START ((uintptr_t)0x1000) -#else -/* Actually this only gives us max. 1GB in current Linux kernels. */ -#define MMAP_REGION_START ((uintptr_t)0) -#endif - -static LJ_AINLINE void *CALL_MMAP(size_t size) -{ - int olderr = errno; - void *ptr = mmap((void *)MMAP_REGION_START, size, MMAP_PROT, MAP_32BIT|MMAP_FLAGS, -1, 0); - errno = olderr; - return ptr; -} - -#elif LJ_TARGET_OSX || LJ_TARGET_PS4 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__sun__) || defined(__CYGWIN__) - -/* OSX and FreeBSD mmap() use a naive first-fit linear search. -** That's perfect for us. Except that -pagezero_size must be set for OSX, -** otherwise the lower 4GB are blocked. And the 32GB RLIMIT_DATA needs -** to be reduced to 250MB on FreeBSD. -*/ -#if LJ_TARGET_OSX || defined(__DragonFly__) -#define MMAP_REGION_START ((uintptr_t)0x10000) -#elif LJ_TARGET_PS4 -#define MMAP_REGION_START ((uintptr_t)0x4000) -#else -#define MMAP_REGION_START ((uintptr_t)0x10000000) -#endif -#define MMAP_REGION_END ((uintptr_t)0x80000000) - -#if (defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4 -#include -#endif - -static LJ_AINLINE void *CALL_MMAP(size_t size) -{ - int olderr = errno; - /* Hint for next allocation. Doesn't need to be thread-safe. */ - static uintptr_t alloc_hint = MMAP_REGION_START; - int retry = 0; -#if (defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4 - static int rlimit_modified = 0; - if (LJ_UNLIKELY(rlimit_modified == 0)) { - struct rlimit rlim; - rlim.rlim_cur = rlim.rlim_max = MMAP_REGION_START; - setrlimit(RLIMIT_DATA, &rlim); /* Ignore result. May fail below. */ - rlimit_modified = 1; - } -#endif - for (;;) { - void *p = mmap((void *)alloc_hint, size, MMAP_PROT, MMAP_FLAGS, -1, 0); - if ((uintptr_t)p >= MMAP_REGION_START && - (uintptr_t)p + size < MMAP_REGION_END) { - alloc_hint = (uintptr_t)p + size; - errno = olderr; - return p; - } - if (p != CMFAIL) munmap(p, size); -#if defined(__sun__) || defined(__DragonFly__) - alloc_hint += 0x1000000; /* Need near-exhaustive linear scan. */ - if (alloc_hint + size < MMAP_REGION_END) continue; -#endif - if (retry) break; - retry = 1; - alloc_hint = MMAP_REGION_START; - } - errno = olderr; - return CMFAIL; -} - -#else - -#error "NYI: need an equivalent of MAP_32BIT for this 64 bit OS" - -#endif - -#else - -/* 32 bit mode and GC64 mode is easy. */ -static LJ_AINLINE void *CALL_MMAP(size_t size) -{ - int olderr = errno; - void *ptr = mmap(NULL, size, MMAP_PROT, MMAP_FLAGS, -1, 0); - errno = olderr; - return ptr; -} - -#endif - -#define INIT_MMAP() ((void)0) -#define DIRECT_MMAP(s) CALL_MMAP(s) - -static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size) -{ - int olderr = errno; - int ret = munmap(ptr, size); - errno = olderr; - return ret; -} - -#if LJ_TARGET_LINUX -/* Need to define _GNU_SOURCE to get the mremap prototype. */ -static LJ_AINLINE void *CALL_MREMAP_(void *ptr, size_t osz, size_t nsz, - int flags) -{ - int olderr = errno; - ptr = mremap(ptr, osz, nsz, flags); - errno = olderr; - return ptr; -} - -#define CALL_MREMAP(addr, osz, nsz, mv) CALL_MREMAP_((addr), (osz), (nsz), (mv)) -#define CALL_MREMAP_NOMOVE 0 -#define CALL_MREMAP_MAYMOVE 1 -#if LJ_64 && !LJ_GC64 -#define CALL_MREMAP_MV CALL_MREMAP_NOMOVE -#else -#define CALL_MREMAP_MV CALL_MREMAP_MAYMOVE -#endif -#endif - -#endif - -#ifndef CALL_MREMAP -#define CALL_MREMAP(addr, osz, nsz, mv) ((void)osz, MFAIL) -#endif - -/* ----------------------- Chunk representations ------------------------ */ - -struct malloc_chunk { - size_t prev_foot; /* Size of previous chunk (if free). */ - size_t head; /* Size and inuse bits. */ - struct malloc_chunk *fd; /* double links -- used only if free. */ - struct malloc_chunk *bk; -}; - -typedef struct malloc_chunk mchunk; -typedef struct malloc_chunk *mchunkptr; -typedef struct malloc_chunk *sbinptr; /* The type of bins of chunks */ -typedef size_t bindex_t; /* Described below */ -typedef unsigned int binmap_t; /* Described below */ -typedef unsigned int flag_t; /* The type of various bit flag sets */ - -/* ------------------- Chunks sizes and alignments ----------------------- */ - -#define MCHUNK_SIZE (sizeof(mchunk)) - -#define CHUNK_OVERHEAD (SIZE_T_SIZE) - -/* Direct chunks need a second word of overhead ... */ -#define DIRECT_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) -/* ... and additional padding for fake next-chunk at foot */ -#define DIRECT_FOOT_PAD (FOUR_SIZE_T_SIZES) - -/* The smallest size we can malloc is an aligned minimal chunk */ -#define MIN_CHUNK_SIZE\ - ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) - -/* conversion from malloc headers to user pointers, and back */ -#define chunk2mem(p) ((void *)((char *)(p) + TWO_SIZE_T_SIZES)) -#define mem2chunk(mem) ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES)) -/* chunk associated with aligned address A */ -#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) - -/* Bounds on request (not chunk) sizes. */ -#define MAX_REQUEST ((~MIN_CHUNK_SIZE+1) << 2) -#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) - -/* pad request bytes into a usable size */ -#define pad_request(req) \ - (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) - -/* pad request, checking for minimum (but not maximum) */ -#define request2size(req) \ - (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) - -/* ------------------ Operations on head and foot fields ----------------- */ - -#define PINUSE_BIT (SIZE_T_ONE) -#define CINUSE_BIT (SIZE_T_TWO) -#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) - -/* Head value for fenceposts */ -#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) - -/* extraction of fields from head words */ -#define cinuse(p) ((p)->head & CINUSE_BIT) -#define pinuse(p) ((p)->head & PINUSE_BIT) -#define chunksize(p) ((p)->head & ~(INUSE_BITS)) - -#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) -#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT) - -/* Treat space at ptr +/- offset as a chunk */ -#define chunk_plus_offset(p, s) ((mchunkptr)(((char *)(p)) + (s))) -#define chunk_minus_offset(p, s) ((mchunkptr)(((char *)(p)) - (s))) - -/* Ptr to next or previous physical malloc_chunk. */ -#define next_chunk(p) ((mchunkptr)(((char *)(p)) + ((p)->head & ~INUSE_BITS))) -#define prev_chunk(p) ((mchunkptr)(((char *)(p)) - ((p)->prev_foot) )) - -/* extract next chunk's pinuse bit */ -#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) - -/* Get/set size at footer */ -#define get_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot) -#define set_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s)) - -/* Set size, pinuse bit, and foot */ -#define set_size_and_pinuse_of_free_chunk(p, s)\ - ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) - -/* Set size, pinuse bit, foot, and clear next pinuse */ -#define set_free_with_pinuse(p, s, n)\ - (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) - -#define is_direct(p)\ - (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_DIRECT_BIT)) - -/* Get the internal overhead associated with chunk p */ -#define overhead_for(p)\ - (is_direct(p)? DIRECT_CHUNK_OVERHEAD : CHUNK_OVERHEAD) - -/* ---------------------- Overlaid data structures ----------------------- */ - -struct malloc_tree_chunk { - /* The first four fields must be compatible with malloc_chunk */ - size_t prev_foot; - size_t head; - struct malloc_tree_chunk *fd; - struct malloc_tree_chunk *bk; - - struct malloc_tree_chunk *child[2]; - struct malloc_tree_chunk *parent; - bindex_t index; -}; - -typedef struct malloc_tree_chunk tchunk; -typedef struct malloc_tree_chunk *tchunkptr; -typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */ - -/* A little helper macro for trees */ -#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) - -/* ----------------------------- Segments -------------------------------- */ - -struct malloc_segment { - char *base; /* base address */ - size_t size; /* allocated size */ - struct malloc_segment *next; /* ptr to next segment */ -}; - -typedef struct malloc_segment msegment; -typedef struct malloc_segment *msegmentptr; - -/* ---------------------------- malloc_state ----------------------------- */ - -/* Bin types, widths and sizes */ -#define NSMALLBINS (32U) -#define NTREEBINS (32U) -#define SMALLBIN_SHIFT (3U) -#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) -#define TREEBIN_SHIFT (8U) -#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) -#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) -#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) - -struct malloc_state { - binmap_t smallmap; - binmap_t treemap; - size_t dvsize; - size_t topsize; - mchunkptr dv; - mchunkptr top; - size_t trim_check; - size_t release_checks; - mchunkptr smallbins[(NSMALLBINS+1)*2]; - tbinptr treebins[NTREEBINS]; - msegment seg; -}; - -typedef struct malloc_state *mstate; - -#define is_initialized(M) ((M)->top != 0) - -/* -------------------------- system alloc setup ------------------------- */ - -/* page-align a size */ -#define page_align(S)\ - (((S) + (LJ_PAGESIZE - SIZE_T_ONE)) & ~(LJ_PAGESIZE - SIZE_T_ONE)) - -/* granularity-align a size */ -#define granularity_align(S)\ - (((S) + (DEFAULT_GRANULARITY - SIZE_T_ONE))\ - & ~(DEFAULT_GRANULARITY - SIZE_T_ONE)) - -#if LJ_TARGET_WINDOWS -#define mmap_align(S) granularity_align(S) -#else -#define mmap_align(S) page_align(S) -#endif - -/* True if segment S holds address A */ -#define segment_holds(S, A)\ - ((char *)(A) >= S->base && (char *)(A) < S->base + S->size) - -/* Return segment holding given address */ -static msegmentptr segment_holding(mstate m, char *addr) -{ - msegmentptr sp = &m->seg; - for (;;) { - if (addr >= sp->base && addr < sp->base + sp->size) - return sp; - if ((sp = sp->next) == 0) - return 0; - } -} - -/* Return true if segment contains a segment link */ -static int has_segment_link(mstate m, msegmentptr ss) -{ - msegmentptr sp = &m->seg; - for (;;) { - if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size) - return 1; - if ((sp = sp->next) == 0) - return 0; - } -} - -/* - TOP_FOOT_SIZE is padding at the end of a segment, including space - that may be needed to place segment records and fenceposts when new - noncontiguous segments are added. -*/ -#define TOP_FOOT_SIZE\ - (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) - -/* ---------------------------- Indexing Bins ---------------------------- */ - -#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) -#define small_index(s) ((s) >> SMALLBIN_SHIFT) -#define small_index2size(i) ((i) << SMALLBIN_SHIFT) -#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) - -/* addressing by index. See above about smallbin repositioning */ -#define smallbin_at(M, i) ((sbinptr)((char *)&((M)->smallbins[(i)<<1]))) -#define treebin_at(M,i) (&((M)->treebins[i])) - -/* assign tree index for size S to variable I */ -#define compute_tree_index(S, I)\ -{\ - unsigned int X = (unsigned int)(S >> TREEBIN_SHIFT);\ - if (X == 0) {\ - I = 0;\ - } else if (X > 0xFFFF) {\ - I = NTREEBINS-1;\ - } else {\ - unsigned int K = lj_fls(X);\ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ - }\ -} - -/* Bit representing maximum resolved size in a treebin at i */ -#define bit_for_tree_index(i) \ - (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) - -/* Shift placing maximum resolved bit in a treebin at i as sign bit */ -#define leftshift_for_tree_index(i) \ - ((i == NTREEBINS-1)? 0 : \ - ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) - -/* The size of the smallest chunk held in bin with index i */ -#define minsize_for_tree_index(i) \ - ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ - (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) - -/* ------------------------ Operations on bin maps ----------------------- */ - -/* bit corresponding to given index */ -#define idx2bit(i) ((binmap_t)(1) << (i)) - -/* Mark/Clear bits with given index */ -#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) -#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) -#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) - -#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) -#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) -#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) - -/* mask with all bits to left of least bit of x on */ -#define left_bits(x) ((x<<1) | (~(x<<1)+1)) - -/* Set cinuse bit and pinuse bit of next chunk */ -#define set_inuse(M,p,s)\ - ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ - ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT) - -/* Set cinuse and pinuse of this chunk and pinuse of next chunk */ -#define set_inuse_and_pinuse(M,p,s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ - ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT) - -/* Set size, cinuse and pinuse bit of this chunk */ -#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) - -/* ----------------------- Operations on smallbins ----------------------- */ - -/* Link a free chunk into a smallbin */ -#define insert_small_chunk(M, P, S) {\ - bindex_t I = small_index(S);\ - mchunkptr B = smallbin_at(M, I);\ - mchunkptr F = B;\ - if (!smallmap_is_marked(M, I))\ - mark_smallmap(M, I);\ - else\ - F = B->fd;\ - B->fd = P;\ - F->bk = P;\ - P->fd = F;\ - P->bk = B;\ -} - -/* Unlink a chunk from a smallbin */ -#define unlink_small_chunk(M, P, S) {\ - mchunkptr F = P->fd;\ - mchunkptr B = P->bk;\ - bindex_t I = small_index(S);\ - if (F == B) {\ - clear_smallmap(M, I);\ - } else {\ - F->bk = B;\ - B->fd = F;\ - }\ -} - -/* Unlink the first chunk from a smallbin */ -#define unlink_first_small_chunk(M, B, P, I) {\ - mchunkptr F = P->fd;\ - if (B == F) {\ - clear_smallmap(M, I);\ - } else {\ - B->fd = F;\ - F->bk = B;\ - }\ -} - -/* Replace dv node, binning the old one */ -/* Used only when dvsize known to be small */ -#define replace_dv(M, P, S) {\ - size_t DVS = M->dvsize;\ - if (DVS != 0) {\ - mchunkptr DV = M->dv;\ - insert_small_chunk(M, DV, DVS);\ - }\ - M->dvsize = S;\ - M->dv = P;\ -} - -/* ------------------------- Operations on trees ------------------------- */ - -/* Insert chunk into tree */ -#define insert_large_chunk(M, X, S) {\ - tbinptr *H;\ - bindex_t I;\ - compute_tree_index(S, I);\ - H = treebin_at(M, I);\ - X->index = I;\ - X->child[0] = X->child[1] = 0;\ - if (!treemap_is_marked(M, I)) {\ - mark_treemap(M, I);\ - *H = X;\ - X->parent = (tchunkptr)H;\ - X->fd = X->bk = X;\ - } else {\ - tchunkptr T = *H;\ - size_t K = S << leftshift_for_tree_index(I);\ - for (;;) {\ - if (chunksize(T) != S) {\ - tchunkptr *C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ - K <<= 1;\ - if (*C != 0) {\ - T = *C;\ - } else {\ - *C = X;\ - X->parent = T;\ - X->fd = X->bk = X;\ - break;\ - }\ - } else {\ - tchunkptr F = T->fd;\ - T->fd = F->bk = X;\ - X->fd = F;\ - X->bk = T;\ - X->parent = 0;\ - break;\ - }\ - }\ - }\ -} - -#define unlink_large_chunk(M, X) {\ - tchunkptr XP = X->parent;\ - tchunkptr R;\ - if (X->bk != X) {\ - tchunkptr F = X->fd;\ - R = X->bk;\ - F->bk = R;\ - R->fd = F;\ - } else {\ - tchunkptr *RP;\ - if (((R = *(RP = &(X->child[1]))) != 0) ||\ - ((R = *(RP = &(X->child[0]))) != 0)) {\ - tchunkptr *CP;\ - while ((*(CP = &(R->child[1])) != 0) ||\ - (*(CP = &(R->child[0])) != 0)) {\ - R = *(RP = CP);\ - }\ - *RP = 0;\ - }\ - }\ - if (XP != 0) {\ - tbinptr *H = treebin_at(M, X->index);\ - if (X == *H) {\ - if ((*H = R) == 0) \ - clear_treemap(M, X->index);\ - } else {\ - if (XP->child[0] == X) \ - XP->child[0] = R;\ - else \ - XP->child[1] = R;\ - }\ - if (R != 0) {\ - tchunkptr C0, C1;\ - R->parent = XP;\ - if ((C0 = X->child[0]) != 0) {\ - R->child[0] = C0;\ - C0->parent = R;\ - }\ - if ((C1 = X->child[1]) != 0) {\ - R->child[1] = C1;\ - C1->parent = R;\ - }\ - }\ - }\ -} - -/* Relays to large vs small bin operations */ - -#define insert_chunk(M, P, S)\ - if (is_small(S)) { insert_small_chunk(M, P, S)\ - } else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } - -#define unlink_chunk(M, P, S)\ - if (is_small(S)) { unlink_small_chunk(M, P, S)\ - } else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } - -/* ----------------------- Direct-mmapping chunks ----------------------- */ - -static void *direct_alloc(size_t nb) -{ - size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); - if (LJ_LIKELY(mmsize > nb)) { /* Check for wrap around 0 */ - char *mm = (char *)(DIRECT_MMAP(mmsize)); - if (mm != CMFAIL) { - size_t offset = align_offset(chunk2mem(mm)); - size_t psize = mmsize - offset - DIRECT_FOOT_PAD; - mchunkptr p = (mchunkptr)(mm + offset); - p->prev_foot = offset | IS_DIRECT_BIT; - p->head = psize|CINUSE_BIT; - chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; - chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; - return chunk2mem(p); - } - } - return NULL; -} - -static mchunkptr direct_resize(mchunkptr oldp, size_t nb) -{ - size_t oldsize = chunksize(oldp); - if (is_small(nb)) /* Can't shrink direct regions below small size */ - return NULL; - /* Keep old chunk if big enough but not too big */ - if (oldsize >= nb + SIZE_T_SIZE && - (oldsize - nb) <= (DEFAULT_GRANULARITY >> 1)) { - return oldp; - } else { - size_t offset = oldp->prev_foot & ~IS_DIRECT_BIT; - size_t oldmmsize = oldsize + offset + DIRECT_FOOT_PAD; - size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); - char *cp = (char *)CALL_MREMAP((char *)oldp - offset, - oldmmsize, newmmsize, CALL_MREMAP_MV); - if (cp != CMFAIL) { - mchunkptr newp = (mchunkptr)(cp + offset); - size_t psize = newmmsize - offset - DIRECT_FOOT_PAD; - newp->head = psize|CINUSE_BIT; - chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; - chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; - return newp; - } - } - return NULL; -} - -/* -------------------------- mspace management -------------------------- */ - -/* Initialize top chunk and its size */ -static void init_top(mstate m, mchunkptr p, size_t psize) -{ - /* Ensure alignment */ - size_t offset = align_offset(chunk2mem(p)); - p = (mchunkptr)((char *)p + offset); - psize -= offset; - - m->top = p; - m->topsize = psize; - p->head = psize | PINUSE_BIT; - /* set size of fake trailing chunk holding overhead space only once */ - chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; - m->trim_check = DEFAULT_TRIM_THRESHOLD; /* reset on each update */ -} - -/* Initialize bins for a new mstate that is otherwise zeroed out */ -static void init_bins(mstate m) -{ - /* Establish circular links for smallbins */ - bindex_t i; - for (i = 0; i < NSMALLBINS; i++) { - sbinptr bin = smallbin_at(m,i); - bin->fd = bin->bk = bin; - } -} - -/* Allocate chunk and prepend remainder with chunk in successor base. */ -static void *prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb) -{ - mchunkptr p = align_as_chunk(newbase); - mchunkptr oldfirst = align_as_chunk(oldbase); - size_t psize = (size_t)((char *)oldfirst - (char *)p); - mchunkptr q = chunk_plus_offset(p, nb); - size_t qsize = psize - nb; - set_size_and_pinuse_of_inuse_chunk(m, p, nb); - - /* consolidate remainder with first chunk of old base */ - if (oldfirst == m->top) { - size_t tsize = m->topsize += qsize; - m->top = q; - q->head = tsize | PINUSE_BIT; - } else if (oldfirst == m->dv) { - size_t dsize = m->dvsize += qsize; - m->dv = q; - set_size_and_pinuse_of_free_chunk(q, dsize); - } else { - if (!cinuse(oldfirst)) { - size_t nsize = chunksize(oldfirst); - unlink_chunk(m, oldfirst, nsize); - oldfirst = chunk_plus_offset(oldfirst, nsize); - qsize += nsize; - } - set_free_with_pinuse(q, qsize, oldfirst); - insert_chunk(m, q, qsize); - } - - return chunk2mem(p); -} - -/* Add a segment to hold a new noncontiguous region */ -static void add_segment(mstate m, char *tbase, size_t tsize) -{ - /* Determine locations and sizes of segment, fenceposts, old top */ - char *old_top = (char *)m->top; - msegmentptr oldsp = segment_holding(m, old_top); - char *old_end = oldsp->base + oldsp->size; - size_t ssize = pad_request(sizeof(struct malloc_segment)); - char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); - size_t offset = align_offset(chunk2mem(rawsp)); - char *asp = rawsp + offset; - char *csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; - mchunkptr sp = (mchunkptr)csp; - msegmentptr ss = (msegmentptr)(chunk2mem(sp)); - mchunkptr tnext = chunk_plus_offset(sp, ssize); - mchunkptr p = tnext; - - /* reset top to new space */ - init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); - - /* Set up segment record */ - set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); - *ss = m->seg; /* Push current record */ - m->seg.base = tbase; - m->seg.size = tsize; - m->seg.next = ss; - - /* Insert trailing fenceposts */ - for (;;) { - mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); - p->head = FENCEPOST_HEAD; - if ((char *)(&(nextp->head)) < old_end) - p = nextp; - else - break; - } - - /* Insert the rest of old top into a bin as an ordinary free chunk */ - if (csp != old_top) { - mchunkptr q = (mchunkptr)old_top; - size_t psize = (size_t)(csp - old_top); - mchunkptr tn = chunk_plus_offset(q, psize); - set_free_with_pinuse(q, psize, tn); - insert_chunk(m, q, psize); - } -} - -/* -------------------------- System allocation -------------------------- */ - -static void *alloc_sys(mstate m, size_t nb) -{ - char *tbase = CMFAIL; - size_t tsize = 0; - - /* Directly map large chunks */ - if (LJ_UNLIKELY(nb >= DEFAULT_MMAP_THRESHOLD)) { - void *mem = direct_alloc(nb); - if (mem != 0) - return mem; - } - - { - size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE; - size_t rsize = granularity_align(req); - if (LJ_LIKELY(rsize > nb)) { /* Fail if wraps around zero */ - char *mp = (char *)(CALL_MMAP(rsize)); - if (mp != CMFAIL) { - tbase = mp; - tsize = rsize; - } - } - } - - if (tbase != CMFAIL) { - msegmentptr sp = &m->seg; - /* Try to merge with an existing segment */ - while (sp != 0 && tbase != sp->base + sp->size) - sp = sp->next; - if (sp != 0 && segment_holds(sp, m->top)) { /* append */ - sp->size += tsize; - init_top(m, m->top, m->topsize + tsize); - } else { - sp = &m->seg; - while (sp != 0 && sp->base != tbase + tsize) - sp = sp->next; - if (sp != 0) { - char *oldbase = sp->base; - sp->base = tbase; - sp->size += tsize; - return prepend_alloc(m, tbase, oldbase, nb); - } else { - add_segment(m, tbase, tsize); - } - } - - if (nb < m->topsize) { /* Allocate from new or extended top space */ - size_t rsize = m->topsize -= nb; - mchunkptr p = m->top; - mchunkptr r = m->top = chunk_plus_offset(p, nb); - r->head = rsize | PINUSE_BIT; - set_size_and_pinuse_of_inuse_chunk(m, p, nb); - return chunk2mem(p); - } - } - - return NULL; -} - -/* ----------------------- system deallocation -------------------------- */ - -/* Unmap and unlink any mmapped segments that don't contain used chunks */ -static size_t release_unused_segments(mstate m) -{ - size_t released = 0; - size_t nsegs = 0; - msegmentptr pred = &m->seg; - msegmentptr sp = pred->next; - while (sp != 0) { - char *base = sp->base; - size_t size = sp->size; - msegmentptr next = sp->next; - nsegs++; - { - mchunkptr p = align_as_chunk(base); - size_t psize = chunksize(p); - /* Can unmap if first chunk holds entire segment and not pinned */ - if (!cinuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) { - tchunkptr tp = (tchunkptr)p; - if (p == m->dv) { - m->dv = 0; - m->dvsize = 0; - } else { - unlink_large_chunk(m, tp); - } - if (CALL_MUNMAP(base, size) == 0) { - released += size; - /* unlink obsoleted record */ - sp = pred; - sp->next = next; - } else { /* back out if cannot unmap */ - insert_large_chunk(m, tp, psize); - } - } - } - pred = sp; - sp = next; - } - /* Reset check counter */ - m->release_checks = nsegs > MAX_RELEASE_CHECK_RATE ? - nsegs : MAX_RELEASE_CHECK_RATE; - return released; -} - -static int alloc_trim(mstate m, size_t pad) -{ - size_t released = 0; - if (pad < MAX_REQUEST && is_initialized(m)) { - pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ - - if (m->topsize > pad) { - /* Shrink top space in granularity-size units, keeping at least one */ - size_t unit = DEFAULT_GRANULARITY; - size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - - SIZE_T_ONE) * unit; - msegmentptr sp = segment_holding(m, (char *)m->top); - - if (sp->size >= extra && - !has_segment_link(m, sp)) { /* can't shrink if pinned */ - size_t newsize = sp->size - extra; - /* Prefer mremap, fall back to munmap */ - if ((CALL_MREMAP(sp->base, sp->size, newsize, CALL_MREMAP_NOMOVE) != MFAIL) || - (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { - released = extra; - } - } - - if (released != 0) { - sp->size -= released; - init_top(m, m->top, m->topsize - released); - } - } - - /* Unmap any unused mmapped segments */ - released += release_unused_segments(m); - - /* On failure, disable autotrim to avoid repeated failed future calls */ - if (released == 0 && m->topsize > m->trim_check) - m->trim_check = MAX_SIZE_T; - } - - return (released != 0)? 1 : 0; -} - -/* ---------------------------- malloc support --------------------------- */ - -/* allocate a large request from the best fitting chunk in a treebin */ -static void *tmalloc_large(mstate m, size_t nb) -{ - tchunkptr v = 0; - size_t rsize = ~nb+1; /* Unsigned negation */ - tchunkptr t; - bindex_t idx; - compute_tree_index(nb, idx); - - if ((t = *treebin_at(m, idx)) != 0) { - /* Traverse tree for this bin looking for node with size == nb */ - size_t sizebits = nb << leftshift_for_tree_index(idx); - tchunkptr rst = 0; /* The deepest untaken right subtree */ - for (;;) { - tchunkptr rt; - size_t trem = chunksize(t) - nb; - if (trem < rsize) { - v = t; - if ((rsize = trem) == 0) - break; - } - rt = t->child[1]; - t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; - if (rt != 0 && rt != t) - rst = rt; - if (t == 0) { - t = rst; /* set t to least subtree holding sizes > nb */ - break; - } - sizebits <<= 1; - } - } - - if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ - binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; - if (leftbits != 0) - t = *treebin_at(m, lj_ffs(leftbits)); - } - - while (t != 0) { /* find smallest of tree or subtree */ - size_t trem = chunksize(t) - nb; - if (trem < rsize) { - rsize = trem; - v = t; - } - t = leftmost_child(t); - } - - /* If dv is a better fit, return NULL so malloc will use it */ - if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { - mchunkptr r = chunk_plus_offset(v, nb); - unlink_large_chunk(m, v); - if (rsize < MIN_CHUNK_SIZE) { - set_inuse_and_pinuse(m, v, (rsize + nb)); - } else { - set_size_and_pinuse_of_inuse_chunk(m, v, nb); - set_size_and_pinuse_of_free_chunk(r, rsize); - insert_chunk(m, r, rsize); - } - return chunk2mem(v); - } - return NULL; -} - -/* allocate a small request from the best fitting chunk in a treebin */ -static void *tmalloc_small(mstate m, size_t nb) -{ - tchunkptr t, v; - mchunkptr r; - size_t rsize; - bindex_t i = lj_ffs(m->treemap); - - v = t = *treebin_at(m, i); - rsize = chunksize(t) - nb; - - while ((t = leftmost_child(t)) != 0) { - size_t trem = chunksize(t) - nb; - if (trem < rsize) { - rsize = trem; - v = t; - } - } - - r = chunk_plus_offset(v, nb); - unlink_large_chunk(m, v); - if (rsize < MIN_CHUNK_SIZE) { - set_inuse_and_pinuse(m, v, (rsize + nb)); - } else { - set_size_and_pinuse_of_inuse_chunk(m, v, nb); - set_size_and_pinuse_of_free_chunk(r, rsize); - replace_dv(m, r, rsize); - } - return chunk2mem(v); -} - -/* ----------------------------------------------------------------------- */ - -void *lj_alloc_create(void) -{ - size_t tsize = DEFAULT_GRANULARITY; - char *tbase; - INIT_MMAP(); - tbase = (char *)(CALL_MMAP(tsize)); - if (tbase != CMFAIL) { - size_t msize = pad_request(sizeof(struct malloc_state)); - mchunkptr mn; - mchunkptr msp = align_as_chunk(tbase); - mstate m = (mstate)(chunk2mem(msp)); - memset(m, 0, msize); - msp->head = (msize|PINUSE_BIT|CINUSE_BIT); - m->seg.base = tbase; - m->seg.size = tsize; - m->release_checks = MAX_RELEASE_CHECK_RATE; - init_bins(m); - mn = next_chunk(mem2chunk(m)); - init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE); - return m; - } - return NULL; -} - -void lj_alloc_destroy(void *msp) -{ - mstate ms = (mstate)msp; - msegmentptr sp = &ms->seg; - while (sp != 0) { - char *base = sp->base; - size_t size = sp->size; - sp = sp->next; - CALL_MUNMAP(base, size); - } -} - -static LJ_NOINLINE void *lj_alloc_malloc(void *msp, size_t nsize) -{ - mstate ms = (mstate)msp; - void *mem; - size_t nb; - if (nsize <= MAX_SMALL_REQUEST) { - bindex_t idx; - binmap_t smallbits; - nb = (nsize < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(nsize); - idx = small_index(nb); - smallbits = ms->smallmap >> idx; - - if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ - mchunkptr b, p; - idx += ~smallbits & 1; /* Uses next bin if idx empty */ - b = smallbin_at(ms, idx); - p = b->fd; - unlink_first_small_chunk(ms, b, p, idx); - set_inuse_and_pinuse(ms, p, small_index2size(idx)); - mem = chunk2mem(p); - return mem; - } else if (nb > ms->dvsize) { - if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ - mchunkptr b, p, r; - size_t rsize; - binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); - bindex_t i = lj_ffs(leftbits); - b = smallbin_at(ms, i); - p = b->fd; - unlink_first_small_chunk(ms, b, p, i); - rsize = small_index2size(i) - nb; - /* Fit here cannot be remainderless if 4byte sizes */ - if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) { - set_inuse_and_pinuse(ms, p, small_index2size(i)); - } else { - set_size_and_pinuse_of_inuse_chunk(ms, p, nb); - r = chunk_plus_offset(p, nb); - set_size_and_pinuse_of_free_chunk(r, rsize); - replace_dv(ms, r, rsize); - } - mem = chunk2mem(p); - return mem; - } else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { - return mem; - } - } - } else if (nsize >= MAX_REQUEST) { - nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ - } else { - nb = pad_request(nsize); - if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { - return mem; - } - } - - if (nb <= ms->dvsize) { - size_t rsize = ms->dvsize - nb; - mchunkptr p = ms->dv; - if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ - mchunkptr r = ms->dv = chunk_plus_offset(p, nb); - ms->dvsize = rsize; - set_size_and_pinuse_of_free_chunk(r, rsize); - set_size_and_pinuse_of_inuse_chunk(ms, p, nb); - } else { /* exhaust dv */ - size_t dvs = ms->dvsize; - ms->dvsize = 0; - ms->dv = 0; - set_inuse_and_pinuse(ms, p, dvs); - } - mem = chunk2mem(p); - return mem; - } else if (nb < ms->topsize) { /* Split top */ - size_t rsize = ms->topsize -= nb; - mchunkptr p = ms->top; - mchunkptr r = ms->top = chunk_plus_offset(p, nb); - r->head = rsize | PINUSE_BIT; - set_size_and_pinuse_of_inuse_chunk(ms, p, nb); - mem = chunk2mem(p); - return mem; - } - return alloc_sys(ms, nb); -} - -static LJ_NOINLINE void *lj_alloc_free(void *msp, void *ptr) -{ - if (ptr != 0) { - mchunkptr p = mem2chunk(ptr); - mstate fm = (mstate)msp; - size_t psize = chunksize(p); - mchunkptr next = chunk_plus_offset(p, psize); - if (!pinuse(p)) { - size_t prevsize = p->prev_foot; - if ((prevsize & IS_DIRECT_BIT) != 0) { - prevsize &= ~IS_DIRECT_BIT; - psize += prevsize + DIRECT_FOOT_PAD; - CALL_MUNMAP((char *)p - prevsize, psize); - return NULL; - } else { - mchunkptr prev = chunk_minus_offset(p, prevsize); - psize += prevsize; - p = prev; - /* consolidate backward */ - if (p != fm->dv) { - unlink_chunk(fm, p, prevsize); - } else if ((next->head & INUSE_BITS) == INUSE_BITS) { - fm->dvsize = psize; - set_free_with_pinuse(p, psize, next); - return NULL; - } - } - } - if (!cinuse(next)) { /* consolidate forward */ - if (next == fm->top) { - size_t tsize = fm->topsize += psize; - fm->top = p; - p->head = tsize | PINUSE_BIT; - if (p == fm->dv) { - fm->dv = 0; - fm->dvsize = 0; - } - if (tsize > fm->trim_check) - alloc_trim(fm, 0); - return NULL; - } else if (next == fm->dv) { - size_t dsize = fm->dvsize += psize; - fm->dv = p; - set_size_and_pinuse_of_free_chunk(p, dsize); - return NULL; - } else { - size_t nsize = chunksize(next); - psize += nsize; - unlink_chunk(fm, next, nsize); - set_size_and_pinuse_of_free_chunk(p, psize); - if (p == fm->dv) { - fm->dvsize = psize; - return NULL; - } - } - } else { - set_free_with_pinuse(p, psize, next); - } - - if (is_small(psize)) { - insert_small_chunk(fm, p, psize); - } else { - tchunkptr tp = (tchunkptr)p; - insert_large_chunk(fm, tp, psize); - if (--fm->release_checks == 0) - release_unused_segments(fm); - } - } - return NULL; -} - -static LJ_NOINLINE void *lj_alloc_realloc(void *msp, void *ptr, size_t nsize) -{ - if (nsize >= MAX_REQUEST) { - return NULL; - } else { - mstate m = (mstate)msp; - mchunkptr oldp = mem2chunk(ptr); - size_t oldsize = chunksize(oldp); - mchunkptr next = chunk_plus_offset(oldp, oldsize); - mchunkptr newp = 0; - size_t nb = request2size(nsize); - - /* Try to either shrink or extend into top. Else malloc-copy-free */ - if (is_direct(oldp)) { - newp = direct_resize(oldp, nb); /* this may return NULL. */ - } else if (oldsize >= nb) { /* already big enough */ - size_t rsize = oldsize - nb; - newp = oldp; - if (rsize >= MIN_CHUNK_SIZE) { - mchunkptr rem = chunk_plus_offset(newp, nb); - set_inuse(m, newp, nb); - set_inuse(m, rem, rsize); - lj_alloc_free(m, chunk2mem(rem)); - } - } else if (next == m->top && oldsize + m->topsize > nb) { - /* Expand into top */ - size_t newsize = oldsize + m->topsize; - size_t newtopsize = newsize - nb; - mchunkptr newtop = chunk_plus_offset(oldp, nb); - set_inuse(m, oldp, nb); - newtop->head = newtopsize |PINUSE_BIT; - m->top = newtop; - m->topsize = newtopsize; - newp = oldp; - } - - if (newp != 0) { - return chunk2mem(newp); - } else { - void *newmem = lj_alloc_malloc(m, nsize); - if (newmem != 0) { - size_t oc = oldsize - overhead_for(oldp); - memcpy(newmem, ptr, oc < nsize ? oc : nsize); - lj_alloc_free(m, ptr); - } - return newmem; - } - } -} - -void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize) -{ - (void)osize; - if (nsize == 0) { - return lj_alloc_free(msp, ptr); - } else if (ptr == NULL) { - return lj_alloc_malloc(msp, nsize); - } else { - return lj_alloc_realloc(msp, ptr, nsize); - } -} - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_alloc.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_alloc.h deleted file mode 100644 index f87a7cf3422..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_alloc.h +++ /dev/null @@ -1,17 +0,0 @@ -/* -** Bundled memory allocator. -** Donated to the public domain. -*/ - -#ifndef _LJ_ALLOC_H -#define _LJ_ALLOC_H - -#include "lj_def.h" - -#ifndef LUAJIT_USE_SYSMALLOC -LJ_FUNC void *lj_alloc_create(void); -LJ_FUNC void lj_alloc_destroy(void *msp); -LJ_FUNC void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize); -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_api.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_api.c deleted file mode 100644 index 1f09284f991..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_api.c +++ /dev/null @@ -1,1210 +0,0 @@ -/* -** Public Lua/C API. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -** -** Major portions taken verbatim or adapted from the Lua interpreter. -** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h -*/ - -#define lj_api_c -#define LUA_CORE - -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_debug.h" -#include "lj_str.h" -#include "lj_tab.h" -#include "lj_func.h" -#include "lj_udata.h" -#include "lj_meta.h" -#include "lj_state.h" -#include "lj_bc.h" -#include "lj_frame.h" -#include "lj_trace.h" -#include "lj_vm.h" -#include "lj_strscan.h" -#include "lj_strfmt.h" - -/* -- Common helper functions --------------------------------------------- */ - -#define api_checknelems(L, n) api_check(L, (n) <= (L->top - L->base)) -#define api_checkvalidindex(L, i) api_check(L, (i) != niltv(L)) - -static TValue *index2adr(lua_State *L, int idx) -{ - if (idx > 0) { - TValue *o = L->base + (idx - 1); - return o < L->top ? o : niltv(L); - } else if (idx > LUA_REGISTRYINDEX) { - api_check(L, idx != 0 && -idx <= L->top - L->base); - return L->top + idx; - } else if (idx == LUA_GLOBALSINDEX) { - TValue *o = &G(L)->tmptv; - settabV(L, o, tabref(L->env)); - return o; - } else if (idx == LUA_REGISTRYINDEX) { - return registry(L); - } else { - GCfunc *fn = curr_func(L); - api_check(L, fn->c.gct == ~LJ_TFUNC && !isluafunc(fn)); - if (idx == LUA_ENVIRONINDEX) { - TValue *o = &G(L)->tmptv; - settabV(L, o, tabref(fn->c.env)); - return o; - } else { - idx = LUA_GLOBALSINDEX - idx; - return idx <= fn->c.nupvalues ? &fn->c.upvalue[idx-1] : niltv(L); - } - } -} - -static TValue *stkindex2adr(lua_State *L, int idx) -{ - if (idx > 0) { - TValue *o = L->base + (idx - 1); - return o < L->top ? o : niltv(L); - } else { - api_check(L, idx != 0 && -idx <= L->top - L->base); - return L->top + idx; - } -} - -static GCtab *getcurrenv(lua_State *L) -{ - GCfunc *fn = curr_func(L); - return fn->c.gct == ~LJ_TFUNC ? tabref(fn->c.env) : tabref(L->env); -} - -/* -- Miscellaneous API functions ----------------------------------------- */ - -LUA_API int lua_status(lua_State *L) -{ - return L->status; -} - -LUA_API int lua_checkstack(lua_State *L, int size) -{ - if (size > LUAI_MAXCSTACK || (L->top - L->base + size) > LUAI_MAXCSTACK) { - return 0; /* Stack overflow. */ - } else if (size > 0) { - lj_state_checkstack(L, (MSize)size); - } - return 1; -} - -LUALIB_API void luaL_checkstack(lua_State *L, int size, const char *msg) -{ - if (!lua_checkstack(L, size)) - lj_err_callerv(L, LJ_ERR_STKOVM, msg); -} - -LUA_API void lua_xmove(lua_State *from, lua_State *to, int n) -{ - TValue *f, *t; - if (from == to) return; - api_checknelems(from, n); - api_check(from, G(from) == G(to)); - lj_state_checkstack(to, (MSize)n); - f = from->top; - t = to->top = to->top + n; - while (--n >= 0) copyTV(to, --t, --f); - from->top = f; -} - -/* -- Stack manipulation -------------------------------------------------- */ - -LUA_API int lua_gettop(lua_State *L) -{ - return (int)(L->top - L->base); -} - -LUA_API void lua_settop(lua_State *L, int idx) -{ - if (idx >= 0) { - api_check(L, idx <= tvref(L->maxstack) - L->base); - if (L->base + idx > L->top) { - if (L->base + idx >= tvref(L->maxstack)) - lj_state_growstack(L, (MSize)idx - (MSize)(L->top - L->base)); - do { setnilV(L->top++); } while (L->top < L->base + idx); - } else { - L->top = L->base + idx; - } - } else { - api_check(L, -(idx+1) <= (L->top - L->base)); - L->top += idx+1; /* Shrinks top (idx < 0). */ - } -} - -LUA_API void lua_remove(lua_State *L, int idx) -{ - TValue *p = stkindex2adr(L, idx); - api_checkvalidindex(L, p); - while (++p < L->top) copyTV(L, p-1, p); - L->top--; -} - -LUA_API void lua_insert(lua_State *L, int idx) -{ - TValue *q, *p = stkindex2adr(L, idx); - api_checkvalidindex(L, p); - for (q = L->top; q > p; q--) copyTV(L, q, q-1); - copyTV(L, p, L->top); -} - -LUA_API void lua_replace(lua_State *L, int idx) -{ - api_checknelems(L, 1); - if (idx == LUA_GLOBALSINDEX) { - api_check(L, tvistab(L->top-1)); - /* NOBARRIER: A thread (i.e. L) is never black. */ - setgcref(L->env, obj2gco(tabV(L->top-1))); - } else if (idx == LUA_ENVIRONINDEX) { - GCfunc *fn = curr_func(L); - if (fn->c.gct != ~LJ_TFUNC) - lj_err_msg(L, LJ_ERR_NOENV); - api_check(L, tvistab(L->top-1)); - setgcref(fn->c.env, obj2gco(tabV(L->top-1))); - lj_gc_barrier(L, fn, L->top-1); - } else { - TValue *o = index2adr(L, idx); - api_checkvalidindex(L, o); - copyTV(L, o, L->top-1); - if (idx < LUA_GLOBALSINDEX) /* Need a barrier for upvalues. */ - lj_gc_barrier(L, curr_func(L), L->top-1); - } - L->top--; -} - -LUA_API void lua_pushvalue(lua_State *L, int idx) -{ - copyTV(L, L->top, index2adr(L, idx)); - incr_top(L); -} - -/* -- Stack getters ------------------------------------------------------- */ - -LUA_API int lua_type(lua_State *L, int idx) -{ - cTValue *o = index2adr(L, idx); - if (tvisnumber(o)) { - return LUA_TNUMBER; -#if LJ_64 && !LJ_GC64 - } else if (tvislightud(o)) { - return LUA_TLIGHTUSERDATA; -#endif - } else if (o == niltv(L)) { - return LUA_TNONE; - } else { /* Magic internal/external tag conversion. ORDER LJ_T */ - uint32_t t = ~itype(o); -#if LJ_64 - int tt = (int)((U64x(75a06,98042110) >> 4*t) & 15u); -#else - int tt = (int)(((t < 8 ? 0x98042110u : 0x75a06u) >> 4*(t&7)) & 15u); -#endif - lua_assert(tt != LUA_TNIL || tvisnil(o)); - return tt; - } -} - -LUALIB_API void luaL_checktype(lua_State *L, int idx, int tt) -{ - if (lua_type(L, idx) != tt) - lj_err_argt(L, idx, tt); -} - -LUALIB_API void luaL_checkany(lua_State *L, int idx) -{ - if (index2adr(L, idx) == niltv(L)) - lj_err_arg(L, idx, LJ_ERR_NOVAL); -} - -LUA_API const char *lua_typename(lua_State *L, int t) -{ - UNUSED(L); - return lj_obj_typename[t+1]; -} - -LUA_API int lua_iscfunction(lua_State *L, int idx) -{ - cTValue *o = index2adr(L, idx); - return tvisfunc(o) && !isluafunc(funcV(o)); -} - -LUA_API int lua_isnumber(lua_State *L, int idx) -{ - cTValue *o = index2adr(L, idx); - TValue tmp; - return (tvisnumber(o) || (tvisstr(o) && lj_strscan_number(strV(o), &tmp))); -} - -LUA_API int lua_isstring(lua_State *L, int idx) -{ - cTValue *o = index2adr(L, idx); - return (tvisstr(o) || tvisnumber(o)); -} - -LUA_API int lua_isuserdata(lua_State *L, int idx) -{ - cTValue *o = index2adr(L, idx); - return (tvisudata(o) || tvislightud(o)); -} - -LUA_API int lua_rawequal(lua_State *L, int idx1, int idx2) -{ - cTValue *o1 = index2adr(L, idx1); - cTValue *o2 = index2adr(L, idx2); - return (o1 == niltv(L) || o2 == niltv(L)) ? 0 : lj_obj_equal(o1, o2); -} - -LUA_API int lua_equal(lua_State *L, int idx1, int idx2) -{ - cTValue *o1 = index2adr(L, idx1); - cTValue *o2 = index2adr(L, idx2); - if (tvisint(o1) && tvisint(o2)) { - return intV(o1) == intV(o2); - } else if (tvisnumber(o1) && tvisnumber(o2)) { - return numberVnum(o1) == numberVnum(o2); - } else if (itype(o1) != itype(o2)) { - return 0; - } else if (tvispri(o1)) { - return o1 != niltv(L) && o2 != niltv(L); -#if LJ_64 && !LJ_GC64 - } else if (tvislightud(o1)) { - return o1->u64 == o2->u64; -#endif - } else if (gcrefeq(o1->gcr, o2->gcr)) { - return 1; - } else if (!tvistabud(o1)) { - return 0; - } else { - TValue *base = lj_meta_equal(L, gcV(o1), gcV(o2), 0); - if ((uintptr_t)base <= 1) { - return (int)(uintptr_t)base; - } else { - L->top = base+2; - lj_vm_call(L, base, 1+1); - L->top -= 2+LJ_FR2; - return tvistruecond(L->top+1+LJ_FR2); - } - } -} - -LUA_API int lua_lessthan(lua_State *L, int idx1, int idx2) -{ - cTValue *o1 = index2adr(L, idx1); - cTValue *o2 = index2adr(L, idx2); - if (o1 == niltv(L) || o2 == niltv(L)) { - return 0; - } else if (tvisint(o1) && tvisint(o2)) { - return intV(o1) < intV(o2); - } else if (tvisnumber(o1) && tvisnumber(o2)) { - return numberVnum(o1) < numberVnum(o2); - } else { - TValue *base = lj_meta_comp(L, o1, o2, 0); - if ((uintptr_t)base <= 1) { - return (int)(uintptr_t)base; - } else { - L->top = base+2; - lj_vm_call(L, base, 1+1); - L->top -= 2+LJ_FR2; - return tvistruecond(L->top+1+LJ_FR2); - } - } -} - -LUA_API lua_Number lua_tonumber(lua_State *L, int idx) -{ - cTValue *o = index2adr(L, idx); - TValue tmp; - if (LJ_LIKELY(tvisnumber(o))) - return numberVnum(o); - else if (tvisstr(o) && lj_strscan_num(strV(o), &tmp)) - return numV(&tmp); - else - return 0; -} - -LUALIB_API lua_Number luaL_checknumber(lua_State *L, int idx) -{ - cTValue *o = index2adr(L, idx); - TValue tmp; - if (LJ_LIKELY(tvisnumber(o))) - return numberVnum(o); - else if (!(tvisstr(o) && lj_strscan_num(strV(o), &tmp))) - lj_err_argt(L, idx, LUA_TNUMBER); - return numV(&tmp); -} - -LUALIB_API lua_Number luaL_optnumber(lua_State *L, int idx, lua_Number def) -{ - cTValue *o = index2adr(L, idx); - TValue tmp; - if (LJ_LIKELY(tvisnumber(o))) - return numberVnum(o); - else if (tvisnil(o)) - return def; - else if (!(tvisstr(o) && lj_strscan_num(strV(o), &tmp))) - lj_err_argt(L, idx, LUA_TNUMBER); - return numV(&tmp); -} - -LUA_API lua_Integer lua_tointeger(lua_State *L, int idx) -{ - cTValue *o = index2adr(L, idx); - TValue tmp; - lua_Number n; - if (LJ_LIKELY(tvisint(o))) { - return intV(o); - } else if (LJ_LIKELY(tvisnum(o))) { - n = numV(o); - } else { - if (!(tvisstr(o) && lj_strscan_number(strV(o), &tmp))) - return 0; - if (tvisint(&tmp)) - return (lua_Integer)intV(&tmp); - n = numV(&tmp); - } -#if LJ_64 - return (lua_Integer)n; -#else - return lj_num2int(n); -#endif -} - -LUALIB_API lua_Integer luaL_checkinteger(lua_State *L, int idx) -{ - cTValue *o = index2adr(L, idx); - TValue tmp; - lua_Number n; - if (LJ_LIKELY(tvisint(o))) { - return intV(o); - } else if (LJ_LIKELY(tvisnum(o))) { - n = numV(o); - } else { - if (!(tvisstr(o) && lj_strscan_number(strV(o), &tmp))) - lj_err_argt(L, idx, LUA_TNUMBER); - if (tvisint(&tmp)) - return (lua_Integer)intV(&tmp); - n = numV(&tmp); - } -#if LJ_64 - return (lua_Integer)n; -#else - return lj_num2int(n); -#endif -} - -LUALIB_API lua_Integer luaL_optinteger(lua_State *L, int idx, lua_Integer def) -{ - cTValue *o = index2adr(L, idx); - TValue tmp; - lua_Number n; - if (LJ_LIKELY(tvisint(o))) { - return intV(o); - } else if (LJ_LIKELY(tvisnum(o))) { - n = numV(o); - } else if (tvisnil(o)) { - return def; - } else { - if (!(tvisstr(o) && lj_strscan_number(strV(o), &tmp))) - lj_err_argt(L, idx, LUA_TNUMBER); - if (tvisint(&tmp)) - return (lua_Integer)intV(&tmp); - n = numV(&tmp); - } -#if LJ_64 - return (lua_Integer)n; -#else - return lj_num2int(n); -#endif -} - -LUA_API int lua_toboolean(lua_State *L, int idx) -{ - cTValue *o = index2adr(L, idx); - return tvistruecond(o); -} - -LUA_API const char *lua_tolstring(lua_State *L, int idx, size_t *len) -{ - TValue *o = index2adr(L, idx); - GCstr *s; - if (LJ_LIKELY(tvisstr(o))) { - s = strV(o); - } else if (tvisnumber(o)) { - lj_gc_check(L); - o = index2adr(L, idx); /* GC may move the stack. */ - s = lj_strfmt_number(L, o); - setstrV(L, o, s); - } else { - if (len != NULL) *len = 0; - return NULL; - } - if (len != NULL) *len = s->len; - return strdata(s); -} - -LUALIB_API const char *luaL_checklstring(lua_State *L, int idx, size_t *len) -{ - TValue *o = index2adr(L, idx); - GCstr *s; - if (LJ_LIKELY(tvisstr(o))) { - s = strV(o); - } else if (tvisnumber(o)) { - lj_gc_check(L); - o = index2adr(L, idx); /* GC may move the stack. */ - s = lj_strfmt_number(L, o); - setstrV(L, o, s); - } else { - lj_err_argt(L, idx, LUA_TSTRING); - } - if (len != NULL) *len = s->len; - return strdata(s); -} - -LUALIB_API const char *luaL_optlstring(lua_State *L, int idx, - const char *def, size_t *len) -{ - TValue *o = index2adr(L, idx); - GCstr *s; - if (LJ_LIKELY(tvisstr(o))) { - s = strV(o); - } else if (tvisnil(o)) { - if (len != NULL) *len = def ? strlen(def) : 0; - return def; - } else if (tvisnumber(o)) { - lj_gc_check(L); - o = index2adr(L, idx); /* GC may move the stack. */ - s = lj_strfmt_number(L, o); - setstrV(L, o, s); - } else { - lj_err_argt(L, idx, LUA_TSTRING); - } - if (len != NULL) *len = s->len; - return strdata(s); -} - -LUALIB_API int luaL_checkoption(lua_State *L, int idx, const char *def, - const char *const lst[]) -{ - ptrdiff_t i; - const char *s = lua_tolstring(L, idx, NULL); - if (s == NULL && (s = def) == NULL) - lj_err_argt(L, idx, LUA_TSTRING); - for (i = 0; lst[i]; i++) - if (strcmp(lst[i], s) == 0) - return (int)i; - lj_err_argv(L, idx, LJ_ERR_INVOPTM, s); -} - -LUA_API size_t lua_objlen(lua_State *L, int idx) -{ - TValue *o = index2adr(L, idx); - if (tvisstr(o)) { - return strV(o)->len; - } else if (tvistab(o)) { - return (size_t)lj_tab_len(tabV(o)); - } else if (tvisudata(o)) { - return udataV(o)->len; - } else if (tvisnumber(o)) { - GCstr *s = lj_strfmt_number(L, o); - setstrV(L, o, s); - return s->len; - } else { - return 0; - } -} - -LUA_API lua_CFunction lua_tocfunction(lua_State *L, int idx) -{ - cTValue *o = index2adr(L, idx); - if (tvisfunc(o)) { - BCOp op = bc_op(*mref(funcV(o)->c.pc, BCIns)); - if (op == BC_FUNCC || op == BC_FUNCCW) - return funcV(o)->c.f; - } - return NULL; -} - -LUA_API void *lua_touserdata(lua_State *L, int idx) -{ - cTValue *o = index2adr(L, idx); - if (tvisudata(o)) - return uddata(udataV(o)); - else if (tvislightud(o)) - return lightudV(o); - else - return NULL; -} - -LUA_API lua_State *lua_tothread(lua_State *L, int idx) -{ - cTValue *o = index2adr(L, idx); - return (!tvisthread(o)) ? NULL : threadV(o); -} - -LUA_API const void *lua_topointer(lua_State *L, int idx) -{ - return lj_obj_ptr(index2adr(L, idx)); -} - -/* -- Stack setters (object creation) ------------------------------------- */ - -LUA_API void lua_pushnil(lua_State *L) -{ - setnilV(L->top); - incr_top(L); -} - -LUA_API void lua_pushnumber(lua_State *L, lua_Number n) -{ - setnumV(L->top, n); - if (LJ_UNLIKELY(tvisnan(L->top))) - setnanV(L->top); /* Canonicalize injected NaNs. */ - incr_top(L); -} - -LUA_API void lua_pushinteger(lua_State *L, lua_Integer n) -{ - setintptrV(L->top, n); - incr_top(L); -} - -LUA_API void lua_pushlstring(lua_State *L, const char *str, size_t len) -{ - GCstr *s; - lj_gc_check(L); - s = lj_str_new(L, str, len); - setstrV(L, L->top, s); - incr_top(L); -} - -LUA_API void lua_pushstring(lua_State *L, const char *str) -{ - if (str == NULL) { - setnilV(L->top); - } else { - GCstr *s; - lj_gc_check(L); - s = lj_str_newz(L, str); - setstrV(L, L->top, s); - } - incr_top(L); -} - -LUA_API const char *lua_pushvfstring(lua_State *L, const char *fmt, - va_list argp) -{ - lj_gc_check(L); - return lj_strfmt_pushvf(L, fmt, argp); -} - -LUA_API const char *lua_pushfstring(lua_State *L, const char *fmt, ...) -{ - const char *ret; - va_list argp; - lj_gc_check(L); - va_start(argp, fmt); - ret = lj_strfmt_pushvf(L, fmt, argp); - va_end(argp); - return ret; -} - -LUA_API void lua_pushcclosure(lua_State *L, lua_CFunction f, int n) -{ - GCfunc *fn; - lj_gc_check(L); - api_checknelems(L, n); - fn = lj_func_newC(L, (MSize)n, getcurrenv(L)); - fn->c.f = f; - L->top -= n; - while (n--) - copyTV(L, &fn->c.upvalue[n], L->top+n); - setfuncV(L, L->top, fn); - lua_assert(iswhite(obj2gco(fn))); - incr_top(L); -} - -LUA_API void lua_pushboolean(lua_State *L, int b) -{ - setboolV(L->top, (b != 0)); - incr_top(L); -} - -LUA_API void lua_pushlightuserdata(lua_State *L, void *p) -{ - setlightudV(L->top, checklightudptr(L, p)); - incr_top(L); -} - -LUA_API void lua_createtable(lua_State *L, int narray, int nrec) -{ - lj_gc_check(L); - settabV(L, L->top, lj_tab_new_ah(L, narray, nrec)); - incr_top(L); -} - -LUALIB_API int luaL_newmetatable(lua_State *L, const char *tname) -{ - GCtab *regt = tabV(registry(L)); - TValue *tv = lj_tab_setstr(L, regt, lj_str_newz(L, tname)); - if (tvisnil(tv)) { - GCtab *mt = lj_tab_new(L, 0, 1); - settabV(L, tv, mt); - settabV(L, L->top++, mt); - lj_gc_anybarriert(L, regt); - return 1; - } else { - copyTV(L, L->top++, tv); - return 0; - } -} - -LUA_API int lua_pushthread(lua_State *L) -{ - setthreadV(L, L->top, L); - incr_top(L); - return (mainthread(G(L)) == L); -} - -LUA_API lua_State *lua_newthread(lua_State *L) -{ - lua_State *L1; - lj_gc_check(L); - L1 = lj_state_new(L); - setthreadV(L, L->top, L1); - incr_top(L); - return L1; -} - -LUA_API void *lua_newuserdata(lua_State *L, size_t size) -{ - GCudata *ud; - lj_gc_check(L); - if (size > LJ_MAX_UDATA) - lj_err_msg(L, LJ_ERR_UDATAOV); - ud = lj_udata_new(L, (MSize)size, getcurrenv(L)); - setudataV(L, L->top, ud); - incr_top(L); - return uddata(ud); -} - -LUA_API void lua_concat(lua_State *L, int n) -{ - api_checknelems(L, n); - if (n >= 2) { - n--; - do { - TValue *top = lj_meta_cat(L, L->top-1, -n); - if (top == NULL) { - L->top -= n; - break; - } - n -= (int)(L->top - top); - L->top = top+2; - lj_vm_call(L, top, 1+1); - L->top -= 1+LJ_FR2; - copyTV(L, L->top-1, L->top+LJ_FR2); - } while (--n > 0); - } else if (n == 0) { /* Push empty string. */ - setstrV(L, L->top, &G(L)->strempty); - incr_top(L); - } - /* else n == 1: nothing to do. */ -} - -/* -- Object getters ------------------------------------------------------ */ - -LUA_API void lua_gettable(lua_State *L, int idx) -{ - cTValue *v, *t = index2adr(L, idx); - api_checkvalidindex(L, t); - v = lj_meta_tget(L, t, L->top-1); - if (v == NULL) { - L->top += 2; - lj_vm_call(L, L->top-2, 1+1); - L->top -= 2+LJ_FR2; - v = L->top+1+LJ_FR2; - } - copyTV(L, L->top-1, v); -} - -LUA_API void lua_getfield(lua_State *L, int idx, const char *k) -{ - cTValue *v, *t = index2adr(L, idx); - TValue key; - api_checkvalidindex(L, t); - setstrV(L, &key, lj_str_newz(L, k)); - v = lj_meta_tget(L, t, &key); - if (v == NULL) { - L->top += 2; - lj_vm_call(L, L->top-2, 1+1); - L->top -= 2+LJ_FR2; - v = L->top+1+LJ_FR2; - } - copyTV(L, L->top, v); - incr_top(L); -} - -LUA_API void lua_rawget(lua_State *L, int idx) -{ - cTValue *t = index2adr(L, idx); - api_check(L, tvistab(t)); - copyTV(L, L->top-1, lj_tab_get(L, tabV(t), L->top-1)); -} - -LUA_API void lua_rawgeti(lua_State *L, int idx, int n) -{ - cTValue *v, *t = index2adr(L, idx); - api_check(L, tvistab(t)); - v = lj_tab_getint(tabV(t), n); - if (v) { - copyTV(L, L->top, v); - } else { - setnilV(L->top); - } - incr_top(L); -} - -LUA_API int lua_getmetatable(lua_State *L, int idx) -{ - cTValue *o = index2adr(L, idx); - GCtab *mt = NULL; - if (tvistab(o)) - mt = tabref(tabV(o)->metatable); - else if (tvisudata(o)) - mt = tabref(udataV(o)->metatable); - else - mt = tabref(basemt_obj(G(L), o)); - if (mt == NULL) - return 0; - settabV(L, L->top, mt); - incr_top(L); - return 1; -} - -LUALIB_API int luaL_getmetafield(lua_State *L, int idx, const char *field) -{ - if (lua_getmetatable(L, idx)) { - cTValue *tv = lj_tab_getstr(tabV(L->top-1), lj_str_newz(L, field)); - if (tv && !tvisnil(tv)) { - copyTV(L, L->top-1, tv); - return 1; - } - L->top--; - } - return 0; -} - -LUA_API void lua_getfenv(lua_State *L, int idx) -{ - cTValue *o = index2adr(L, idx); - api_checkvalidindex(L, o); - if (tvisfunc(o)) { - settabV(L, L->top, tabref(funcV(o)->c.env)); - } else if (tvisudata(o)) { - settabV(L, L->top, tabref(udataV(o)->env)); - } else if (tvisthread(o)) { - settabV(L, L->top, tabref(threadV(o)->env)); - } else { - setnilV(L->top); - } - incr_top(L); -} - -LUA_API int lua_next(lua_State *L, int idx) -{ - cTValue *t = index2adr(L, idx); - int more; - api_check(L, tvistab(t)); - more = lj_tab_next(L, tabV(t), L->top-1); - if (more) { - incr_top(L); /* Return new key and value slot. */ - } else { /* End of traversal. */ - L->top--; /* Remove key slot. */ - } - return more; -} - -LUA_API const char *lua_getupvalue(lua_State *L, int idx, int n) -{ - TValue *val; - const char *name = lj_debug_uvnamev(index2adr(L, idx), (uint32_t)(n-1), &val); - if (name) { - copyTV(L, L->top, val); - incr_top(L); - } - return name; -} - -LUA_API void *lua_upvalueid(lua_State *L, int idx, int n) -{ - GCfunc *fn = funcV(index2adr(L, idx)); - n--; - api_check(L, (uint32_t)n < fn->l.nupvalues); - return isluafunc(fn) ? (void *)gcref(fn->l.uvptr[n]) : - (void *)&fn->c.upvalue[n]; -} - -LUA_API void lua_upvaluejoin(lua_State *L, int idx1, int n1, int idx2, int n2) -{ - GCfunc *fn1 = funcV(index2adr(L, idx1)); - GCfunc *fn2 = funcV(index2adr(L, idx2)); - n1--; n2--; - api_check(L, isluafunc(fn1) && (uint32_t)n1 < fn1->l.nupvalues); - api_check(L, isluafunc(fn2) && (uint32_t)n2 < fn2->l.nupvalues); - setgcrefr(fn1->l.uvptr[n1], fn2->l.uvptr[n2]); - lj_gc_objbarrier(L, fn1, gcref(fn1->l.uvptr[n1])); -} - -LUALIB_API void *luaL_checkudata(lua_State *L, int idx, const char *tname) -{ - cTValue *o = index2adr(L, idx); - if (tvisudata(o)) { - GCudata *ud = udataV(o); - cTValue *tv = lj_tab_getstr(tabV(registry(L)), lj_str_newz(L, tname)); - if (tv && tvistab(tv) && tabV(tv) == tabref(ud->metatable)) - return uddata(ud); - } - lj_err_argtype(L, idx, tname); - return NULL; /* unreachable */ -} - -/* -- Object setters ------------------------------------------------------ */ - -LUA_API void lua_settable(lua_State *L, int idx) -{ - TValue *o; - cTValue *t = index2adr(L, idx); - api_checknelems(L, 2); - api_checkvalidindex(L, t); - o = lj_meta_tset(L, t, L->top-2); - if (o) { - /* NOBARRIER: lj_meta_tset ensures the table is not black. */ - L->top -= 2; - copyTV(L, o, L->top+1); - } else { - TValue *base = L->top; - copyTV(L, base+2, base-3-2*LJ_FR2); - L->top = base+3; - lj_vm_call(L, base, 0+1); - L->top -= 3+LJ_FR2; - } -} - -LUA_API void lua_setfield(lua_State *L, int idx, const char *k) -{ - TValue *o; - TValue key; - cTValue *t = index2adr(L, idx); - api_checknelems(L, 1); - api_checkvalidindex(L, t); - setstrV(L, &key, lj_str_newz(L, k)); - o = lj_meta_tset(L, t, &key); - if (o) { - /* NOBARRIER: lj_meta_tset ensures the table is not black. */ - copyTV(L, o, --L->top); - } else { - TValue *base = L->top; - copyTV(L, base+2, base-3-2*LJ_FR2); - L->top = base+3; - lj_vm_call(L, base, 0+1); - L->top -= 2+LJ_FR2; - } -} - -LUA_API void lua_rawset(lua_State *L, int idx) -{ - GCtab *t = tabV(index2adr(L, idx)); - TValue *dst, *key; - api_checknelems(L, 2); - key = L->top-2; - dst = lj_tab_set(L, t, key); - copyTV(L, dst, key+1); - lj_gc_anybarriert(L, t); - L->top = key; -} - -LUA_API void lua_rawseti(lua_State *L, int idx, int n) -{ - GCtab *t = tabV(index2adr(L, idx)); - TValue *dst, *src; - api_checknelems(L, 1); - dst = lj_tab_setint(L, t, n); - src = L->top-1; - copyTV(L, dst, src); - lj_gc_barriert(L, t, dst); - L->top = src; -} - -LUA_API int lua_setmetatable(lua_State *L, int idx) -{ - global_State *g; - GCtab *mt; - cTValue *o = index2adr(L, idx); - api_checknelems(L, 1); - api_checkvalidindex(L, o); - if (tvisnil(L->top-1)) { - mt = NULL; - } else { - api_check(L, tvistab(L->top-1)); - mt = tabV(L->top-1); - } - g = G(L); - if (tvistab(o)) { - setgcref(tabV(o)->metatable, obj2gco(mt)); - if (mt) - lj_gc_objbarriert(L, tabV(o), mt); - } else if (tvisudata(o)) { - setgcref(udataV(o)->metatable, obj2gco(mt)); - if (mt) - lj_gc_objbarrier(L, udataV(o), mt); - } else { - /* Flush cache, since traces specialize to basemt. But not during __gc. */ - if (lj_trace_flushall(L)) - lj_err_caller(L, LJ_ERR_NOGCMM); - if (tvisbool(o)) { - /* NOBARRIER: basemt is a GC root. */ - setgcref(basemt_it(g, LJ_TTRUE), obj2gco(mt)); - setgcref(basemt_it(g, LJ_TFALSE), obj2gco(mt)); - } else { - /* NOBARRIER: basemt is a GC root. */ - setgcref(basemt_obj(g, o), obj2gco(mt)); - } - } - L->top--; - return 1; -} - -LUA_API int lua_setfenv(lua_State *L, int idx) -{ - cTValue *o = index2adr(L, idx); - GCtab *t; - api_checknelems(L, 1); - api_checkvalidindex(L, o); - api_check(L, tvistab(L->top-1)); - t = tabV(L->top-1); - if (tvisfunc(o)) { - setgcref(funcV(o)->c.env, obj2gco(t)); - } else if (tvisudata(o)) { - setgcref(udataV(o)->env, obj2gco(t)); - } else if (tvisthread(o)) { - setgcref(threadV(o)->env, obj2gco(t)); - } else { - L->top--; - return 0; - } - lj_gc_objbarrier(L, gcV(o), t); - L->top--; - return 1; -} - -LUA_API const char *lua_setupvalue(lua_State *L, int idx, int n) -{ - cTValue *f = index2adr(L, idx); - TValue *val; - const char *name; - api_checknelems(L, 1); - name = lj_debug_uvnamev(f, (uint32_t)(n-1), &val); - if (name) { - L->top--; - copyTV(L, val, L->top); - lj_gc_barrier(L, funcV(f), L->top); - } - return name; -} - -/* -- Calls --------------------------------------------------------------- */ - -#if LJ_FR2 -static TValue *api_call_base(lua_State *L, int nargs) -{ - TValue *o = L->top, *base = o - nargs; - L->top = o+1; - for (; o > base; o--) copyTV(L, o, o-1); - setnilV(o); - return o+1; -} -#else -#define api_call_base(L, nargs) (L->top - (nargs)) -#endif - -LUA_API void lua_call(lua_State *L, int nargs, int nresults) -{ - api_check(L, L->status == 0 || L->status == LUA_ERRERR); - api_checknelems(L, nargs+1); - lj_vm_call(L, api_call_base(L, nargs), nresults+1); -} - -LUA_API int lua_pcall(lua_State *L, int nargs, int nresults, int errfunc) -{ - global_State *g = G(L); - uint8_t oldh = hook_save(g); - ptrdiff_t ef; - int status; - api_check(L, L->status == 0 || L->status == LUA_ERRERR); - api_checknelems(L, nargs+1); - if (errfunc == 0) { - ef = 0; - } else { - cTValue *o = stkindex2adr(L, errfunc); - api_checkvalidindex(L, o); - ef = savestack(L, o); - } - status = lj_vm_pcall(L, api_call_base(L, nargs), nresults+1, ef); - if (status) hook_restore(g, oldh); - return status; -} - -static TValue *cpcall(lua_State *L, lua_CFunction func, void *ud) -{ - GCfunc *fn = lj_func_newC(L, 0, getcurrenv(L)); - TValue *top = L->top; - fn->c.f = func; - setfuncV(L, top++, fn); - if (LJ_FR2) setnilV(top++); - setlightudV(top++, checklightudptr(L, ud)); - cframe_nres(L->cframe) = 1+0; /* Zero results. */ - L->top = top; - return top-1; /* Now call the newly allocated C function. */ -} - -LUA_API int lua_cpcall(lua_State *L, lua_CFunction func, void *ud) -{ - global_State *g = G(L); - uint8_t oldh = hook_save(g); - int status; - api_check(L, L->status == 0 || L->status == LUA_ERRERR); - status = lj_vm_cpcall(L, func, ud, cpcall); - if (status) hook_restore(g, oldh); - return status; -} - -LUALIB_API int luaL_callmeta(lua_State *L, int idx, const char *field) -{ - if (luaL_getmetafield(L, idx, field)) { - TValue *top = L->top--; - if (LJ_FR2) setnilV(top++); - copyTV(L, top++, index2adr(L, idx)); - L->top = top; - lj_vm_call(L, top-1, 1+1); - return 1; - } - return 0; -} - -/* -- Coroutine yield and resume ------------------------------------------ */ - -LUA_API int lua_yield(lua_State *L, int nresults) -{ - void *cf = L->cframe; - global_State *g = G(L); - if (cframe_canyield(cf)) { - cf = cframe_raw(cf); - if (!hook_active(g)) { /* Regular yield: move results down if needed. */ - cTValue *f = L->top - nresults; - if (f > L->base) { - TValue *t = L->base; - while (--nresults >= 0) copyTV(L, t++, f++); - L->top = t; - } - L->cframe = NULL; - L->status = LUA_YIELD; - return -1; - } else { /* Yield from hook: add a pseudo-frame. */ - TValue *top = L->top; - hook_leave(g); - (top++)->u64 = cframe_multres(cf); - setcont(top, lj_cont_hook); - if (LJ_FR2) top++; - setframe_pc(top, cframe_pc(cf)-1); - if (LJ_FR2) top++; - setframe_gc(top, obj2gco(L), LJ_TTHREAD); - setframe_ftsz(top, ((char *)(top+1)-(char *)L->base)+FRAME_CONT); - L->top = L->base = top+1; -#if LJ_TARGET_X64 - lj_err_throw(L, LUA_YIELD); -#else - L->cframe = NULL; - L->status = LUA_YIELD; - lj_vm_unwind_c(cf, LUA_YIELD); -#endif - } - } - lj_err_msg(L, LJ_ERR_CYIELD); - return 0; /* unreachable */ -} - -LUA_API int lua_resume(lua_State *L, int nargs) -{ - if (L->cframe == NULL && L->status <= LUA_YIELD) - return lj_vm_resume(L, - L->status == 0 ? api_call_base(L, nargs) : L->top - nargs, - 0, 0); - L->top = L->base; - setstrV(L, L->top, lj_err_str(L, LJ_ERR_COSUSP)); - incr_top(L); - return LUA_ERRRUN; -} - -/* -- GC and memory management -------------------------------------------- */ - -LUA_API int lua_gc(lua_State *L, int what, int data) -{ - global_State *g = G(L); - int res = 0; - switch (what) { - case LUA_GCSTOP: - g->gc.threshold = LJ_MAX_MEM; - break; - case LUA_GCRESTART: - g->gc.threshold = data == -1 ? (g->gc.total/100)*g->gc.pause : g->gc.total; - break; - case LUA_GCCOLLECT: - lj_gc_fullgc(L); - break; - case LUA_GCCOUNT: - res = (int)(g->gc.total >> 10); - break; - case LUA_GCCOUNTB: - res = (int)(g->gc.total & 0x3ff); - break; - case LUA_GCSTEP: { - GCSize a = (GCSize)data << 10; - g->gc.threshold = (a <= g->gc.total) ? (g->gc.total - a) : 0; - while (g->gc.total >= g->gc.threshold) - if (lj_gc_step(L) > 0) { - res = 1; - break; - } - break; - } - case LUA_GCSETPAUSE: - res = (int)(g->gc.pause); - g->gc.pause = (MSize)data; - break; - case LUA_GCSETSTEPMUL: - res = (int)(g->gc.stepmul); - g->gc.stepmul = (MSize)data; - break; - default: - res = -1; /* Invalid option. */ - } - return res; -} - -LUA_API lua_Alloc lua_getallocf(lua_State *L, void **ud) -{ - global_State *g = G(L); - if (ud) *ud = g->allocd; - return g->allocf; -} - -LUA_API void lua_setallocf(lua_State *L, lua_Alloc f, void *ud) -{ - global_State *g = G(L); - g->allocd = ud; - g->allocf = f; -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_arch.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_arch.h deleted file mode 100644 index 6760111ef5e..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_arch.h +++ /dev/null @@ -1,513 +0,0 @@ -/* -** Target architecture selection. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_ARCH_H -#define _LJ_ARCH_H - -#include "lua.h" - -/* Target endianess. */ -#define LUAJIT_LE 0 -#define LUAJIT_BE 1 - -/* Target architectures. */ -#define LUAJIT_ARCH_X86 1 -#define LUAJIT_ARCH_x86 1 -#define LUAJIT_ARCH_X64 2 -#define LUAJIT_ARCH_x64 2 -#define LUAJIT_ARCH_ARM 3 -#define LUAJIT_ARCH_arm 3 -#define LUAJIT_ARCH_ARM64 4 -#define LUAJIT_ARCH_arm64 4 -#define LUAJIT_ARCH_PPC 5 -#define LUAJIT_ARCH_ppc 5 -#define LUAJIT_ARCH_MIPS 6 -#define LUAJIT_ARCH_mips 6 - -/* Target OS. */ -#define LUAJIT_OS_OTHER 0 -#define LUAJIT_OS_WINDOWS 1 -#define LUAJIT_OS_LINUX 2 -#define LUAJIT_OS_OSX 3 -#define LUAJIT_OS_BSD 4 -#define LUAJIT_OS_POSIX 5 - -/* Select native target if no target defined. */ -#ifndef LUAJIT_TARGET - -#if defined(__i386) || defined(__i386__) || defined(_M_IX86) -#define LUAJIT_TARGET LUAJIT_ARCH_X86 -#elif defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64) -#define LUAJIT_TARGET LUAJIT_ARCH_X64 -#elif defined(__arm__) || defined(__arm) || defined(__ARM__) || defined(__ARM) -#define LUAJIT_TARGET LUAJIT_ARCH_ARM -#elif defined(__aarch64__) -#define LUAJIT_TARGET LUAJIT_ARCH_ARM64 -#elif defined(__ppc__) || defined(__ppc) || defined(__PPC__) || defined(__PPC) || defined(__powerpc__) || defined(__powerpc) || defined(__POWERPC__) || defined(__POWERPC) || defined(_M_PPC) -#define LUAJIT_TARGET LUAJIT_ARCH_PPC -#elif defined(__mips__) || defined(__mips) || defined(__MIPS__) || defined(__MIPS) -#define LUAJIT_TARGET LUAJIT_ARCH_MIPS -#else -#error "No support for this architecture (yet)" -#endif - -#endif - -/* Select native OS if no target OS defined. */ -#ifndef LUAJIT_OS - -#if defined(_WIN32) && !defined(_XBOX_VER) -#define LUAJIT_OS LUAJIT_OS_WINDOWS -#elif defined(__linux__) -#define LUAJIT_OS LUAJIT_OS_LINUX -#elif defined(__MACH__) && defined(__APPLE__) -#define LUAJIT_OS LUAJIT_OS_OSX -#elif (defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \ - defined(__NetBSD__) || defined(__OpenBSD__) || \ - defined(__DragonFly__)) && !defined(__ORBIS__) -#define LUAJIT_OS LUAJIT_OS_BSD -#elif (defined(__sun__) && defined(__svr4__)) || defined(__CYGWIN__) -#define LUAJIT_OS LUAJIT_OS_POSIX -#else -#define LUAJIT_OS LUAJIT_OS_OTHER -#endif - -#endif - -/* Set target OS properties. */ -#if LUAJIT_OS == LUAJIT_OS_WINDOWS -#define LJ_OS_NAME "Windows" -#elif LUAJIT_OS == LUAJIT_OS_LINUX -#define LJ_OS_NAME "Linux" -#elif LUAJIT_OS == LUAJIT_OS_OSX -#define LJ_OS_NAME "OSX" -#elif LUAJIT_OS == LUAJIT_OS_BSD -#define LJ_OS_NAME "BSD" -#elif LUAJIT_OS == LUAJIT_OS_POSIX -#define LJ_OS_NAME "POSIX" -#else -#define LJ_OS_NAME "Other" -#endif - -#define LJ_TARGET_WINDOWS (LUAJIT_OS == LUAJIT_OS_WINDOWS) -#define LJ_TARGET_LINUX (LUAJIT_OS == LUAJIT_OS_LINUX) -#define LJ_TARGET_OSX (LUAJIT_OS == LUAJIT_OS_OSX) -#define LJ_TARGET_IOS (LJ_TARGET_OSX && (LUAJIT_TARGET == LUAJIT_ARCH_ARM || LUAJIT_TARGET == LUAJIT_ARCH_ARM64)) -#define LJ_TARGET_POSIX (LUAJIT_OS > LUAJIT_OS_WINDOWS) -#define LJ_TARGET_DLOPEN LJ_TARGET_POSIX - -#ifdef __CELLOS_LV2__ -#define LJ_TARGET_PS3 1 -#define LJ_TARGET_CONSOLE 1 -#endif - -#ifdef __ORBIS__ -#define LJ_TARGET_PS4 1 -#define LJ_TARGET_CONSOLE 1 -#undef NULL -#define NULL ((void*)0) -#endif - -#ifdef __psp2__ -#define LJ_TARGET_PSVITA 1 -#define LJ_TARGET_CONSOLE 1 -#endif - -#if _XBOX_VER >= 200 -#define LJ_TARGET_XBOX360 1 -#define LJ_TARGET_CONSOLE 1 -#endif - -#ifdef _DURANGO -#define LJ_TARGET_XBOXONE 1 -#define LJ_TARGET_CONSOLE 1 -#define LJ_TARGET_GC64 1 -#endif - -#define LJ_NUMMODE_SINGLE 0 /* Single-number mode only. */ -#define LJ_NUMMODE_SINGLE_DUAL 1 /* Default to single-number mode. */ -#define LJ_NUMMODE_DUAL 2 /* Dual-number mode only. */ -#define LJ_NUMMODE_DUAL_SINGLE 3 /* Default to dual-number mode. */ - -/* Set target architecture properties. */ -#if LUAJIT_TARGET == LUAJIT_ARCH_X86 - -#define LJ_ARCH_NAME "x86" -#define LJ_ARCH_BITS 32 -#define LJ_ARCH_ENDIAN LUAJIT_LE -#if LJ_TARGET_WINDOWS || __CYGWIN__ -#define LJ_ABI_WIN 1 -#else -#define LJ_ABI_WIN 0 -#endif -#define LJ_TARGET_X86 1 -#define LJ_TARGET_X86ORX64 1 -#define LJ_TARGET_EHRETREG 0 -#define LJ_TARGET_MASKSHIFT 1 -#define LJ_TARGET_MASKROT 1 -#define LJ_TARGET_UNALIGNED 1 -#define LJ_ARCH_NUMMODE LJ_NUMMODE_SINGLE_DUAL - -#elif LUAJIT_TARGET == LUAJIT_ARCH_X64 - -#define LJ_ARCH_NAME "x64" -#define LJ_ARCH_BITS 64 -#define LJ_ARCH_ENDIAN LUAJIT_LE -#if LJ_TARGET_WINDOWS || __CYGWIN__ -#define LJ_ABI_WIN 1 -#else -#define LJ_ABI_WIN 0 -#endif -#define LJ_TARGET_X64 1 -#define LJ_TARGET_X86ORX64 1 -#define LJ_TARGET_EHRETREG 0 -#define LJ_TARGET_JUMPRANGE 31 /* +-2^31 = +-2GB */ -#define LJ_TARGET_MASKSHIFT 1 -#define LJ_TARGET_MASKROT 1 -#define LJ_TARGET_UNALIGNED 1 -#define LJ_ARCH_NUMMODE LJ_NUMMODE_SINGLE_DUAL -#ifdef LUAJIT_ENABLE_GC64 -#define LJ_TARGET_GC64 1 -#endif - -#elif LUAJIT_TARGET == LUAJIT_ARCH_ARM - -#define LJ_ARCH_NAME "arm" -#define LJ_ARCH_BITS 32 -#define LJ_ARCH_ENDIAN LUAJIT_LE -#if !defined(LJ_ARCH_HASFPU) && __SOFTFP__ -#define LJ_ARCH_HASFPU 0 -#endif -#if !defined(LJ_ABI_SOFTFP) && !__ARM_PCS_VFP -#define LJ_ABI_SOFTFP 1 -#endif -#define LJ_ABI_EABI 1 -#define LJ_TARGET_ARM 1 -#define LJ_TARGET_EHRETREG 0 -#define LJ_TARGET_JUMPRANGE 25 /* +-2^25 = +-32MB */ -#define LJ_TARGET_MASKSHIFT 0 -#define LJ_TARGET_MASKROT 1 -#define LJ_TARGET_UNIFYROT 2 /* Want only IR_BROR. */ -#define LJ_ARCH_NUMMODE LJ_NUMMODE_DUAL - -#if __ARM_ARCH____ARM_ARCH_8__ || __ARM_ARCH_8A__ -#define LJ_ARCH_VERSION 80 -#elif __ARM_ARCH_7__ || __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH_7S__ || __ARM_ARCH_7VE__ -#define LJ_ARCH_VERSION 70 -#elif __ARM_ARCH_6T2__ -#define LJ_ARCH_VERSION 61 -#elif __ARM_ARCH_6__ || __ARM_ARCH_6J__ || __ARM_ARCH_6K__ || __ARM_ARCH_6Z__ || __ARM_ARCH_6ZK__ -#define LJ_ARCH_VERSION 60 -#else -#define LJ_ARCH_VERSION 50 -#endif - -#elif LUAJIT_TARGET == LUAJIT_ARCH_ARM64 - -#define LJ_ARCH_NAME "arm64" -#define LJ_ARCH_BITS 64 -#define LJ_ARCH_ENDIAN LUAJIT_LE -#define LJ_TARGET_ARM64 1 -#define LJ_TARGET_EHRETREG 0 -#define LJ_TARGET_JUMPRANGE 27 /* +-2^27 = +-128MB */ -#define LJ_TARGET_MASKSHIFT 1 -#define LJ_TARGET_MASKROT 1 -#define LJ_TARGET_UNIFYROT 2 /* Want only IR_BROR. */ -#define LJ_TARGET_GC64 1 -#define LJ_ARCH_NUMMODE LJ_NUMMODE_DUAL -#define LJ_ARCH_NOJIT 1 /* NYI */ - -#define LJ_ARCH_VERSION 80 - -#elif LUAJIT_TARGET == LUAJIT_ARCH_PPC - -#ifndef LJ_ARCH_ENDIAN -#if __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__ -#define LJ_ARCH_ENDIAN LUAJIT_LE -#else -#define LJ_ARCH_ENDIAN LUAJIT_BE -#endif -#endif - -#if _LP64 -#define LJ_ARCH_BITS 64 -#if LJ_ARCH_ENDIAN == LUAJIT_LE -#define LJ_ARCH_NAME "ppc64le" -#else -#define LJ_ARCH_NAME "ppc64" -#endif -#else -#define LJ_ARCH_BITS 32 -#define LJ_ARCH_NAME "ppc" -#endif - -#define LJ_TARGET_PPC 1 -#define LJ_TARGET_EHRETREG 3 -#define LJ_TARGET_JUMPRANGE 25 /* +-2^25 = +-32MB */ -#define LJ_TARGET_MASKSHIFT 0 -#define LJ_TARGET_MASKROT 1 -#define LJ_TARGET_UNIFYROT 1 /* Want only IR_BROL. */ -#define LJ_ARCH_NUMMODE LJ_NUMMODE_DUAL_SINGLE - -#if LJ_TARGET_CONSOLE -#define LJ_ARCH_PPC32ON64 1 -#define LJ_ARCH_NOFFI 1 -#elif LJ_ARCH_BITS == 64 -#define LJ_ARCH_PPC64 1 -#define LJ_TARGET_GC64 1 -#define LJ_ARCH_NOJIT 1 /* NYI */ -#endif - -#if _ARCH_PWR7 -#define LJ_ARCH_VERSION 70 -#elif _ARCH_PWR6 -#define LJ_ARCH_VERSION 60 -#elif _ARCH_PWR5X -#define LJ_ARCH_VERSION 51 -#elif _ARCH_PWR5 -#define LJ_ARCH_VERSION 50 -#elif _ARCH_PWR4 -#define LJ_ARCH_VERSION 40 -#else -#define LJ_ARCH_VERSION 0 -#endif -#if _ARCH_PPCSQ -#define LJ_ARCH_SQRT 1 -#endif -#if _ARCH_PWR5X -#define LJ_ARCH_ROUND 1 -#endif -#if __PPU__ -#define LJ_ARCH_CELL 1 -#endif -#if LJ_TARGET_XBOX360 -#define LJ_ARCH_XENON 1 -#endif - -#elif LUAJIT_TARGET == LUAJIT_ARCH_MIPS - -#if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) -#define LJ_ARCH_NAME "mipsel" -#define LJ_ARCH_ENDIAN LUAJIT_LE -#else -#define LJ_ARCH_NAME "mips" -#define LJ_ARCH_ENDIAN LUAJIT_BE -#endif -#define LJ_ARCH_BITS 32 -#define LJ_TARGET_MIPS 1 -#define LJ_TARGET_EHRETREG 4 -#define LJ_TARGET_JUMPRANGE 27 /* 2*2^27 = 256MB-aligned region */ -#define LJ_TARGET_MASKSHIFT 1 -#define LJ_TARGET_MASKROT 1 -#define LJ_TARGET_UNIFYROT 2 /* Want only IR_BROR. */ -#define LJ_ARCH_NUMMODE LJ_NUMMODE_SINGLE - -#if _MIPS_ARCH_MIPS32R2 -#define LJ_ARCH_VERSION 20 -#else -#define LJ_ARCH_VERSION 10 -#endif - -#else -#error "No target architecture defined" -#endif - -#ifndef LJ_PAGESIZE -#define LJ_PAGESIZE 4096 -#endif - -/* Check for minimum required compiler versions. */ -#if defined(__GNUC__) -#if LJ_TARGET_X86 -#if (__GNUC__ < 3) || ((__GNUC__ == 3) && __GNUC_MINOR__ < 4) -#error "Need at least GCC 3.4 or newer" -#endif -#elif LJ_TARGET_X64 -#if __GNUC__ < 4 -#error "Need at least GCC 4.0 or newer" -#endif -#elif LJ_TARGET_ARM || LJ_TARGET_PPC -#if (__GNUC__ < 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ < 2) -#error "Need at least GCC 4.2 or newer" -#endif -#elif LJ_TARGET_ARM64 -#if __clang__ -#if (__clang_major__ < 3) || ((__clang_major__ == 3) && __clang_minor__ < 5) -#error "Need at least Clang 3.5 or newer" -#endif -#else -#if (__GNUC__ < 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ < 8) -#error "Need at least GCC 4.8 or newer" -#endif -#endif -#elif !LJ_TARGET_PS3 -#if (__GNUC__ < 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ < 3) -#error "Need at least GCC 4.3 or newer" -#endif -#endif -#endif - -/* Check target-specific constraints. */ -#ifndef _BUILDVM_H -#if LJ_TARGET_X64 -#if __USING_SJLJ_EXCEPTIONS__ -#error "Need a C compiler with native exception handling on x64" -#endif -#elif LJ_TARGET_ARM -#if defined(__ARMEB__) -#error "No support for big-endian ARM" -#endif -#if __ARM_ARCH_6M__ || __ARM_ARCH_7M__ || __ARM_ARCH_7EM__ -#error "No support for Cortex-M CPUs" -#endif -#if !(__ARM_EABI__ || LJ_TARGET_IOS) -#error "Only ARM EABI or iOS 3.0+ ABI is supported" -#endif -#elif LJ_TARGET_ARM64 -#if defined(__AARCH64EB__) -#error "No support for big-endian ARM64" -#endif -#if defined(_ILP32) -#error "No support for ILP32 model on ARM64" -#endif -#elif LJ_TARGET_PPC -#if defined(_SOFT_FLOAT) || defined(_SOFT_DOUBLE) -#error "No support for PowerPC CPUs without double-precision FPU" -#endif -#if !LJ_ARCH_PPC64 && LJ_ARCH_ENDIAN == LUAJIT_LE -#error "No support for little-endian PPC32" -#endif -#if LJ_ARCH_PPC64 -#error "No support for PowerPC 64 bit mode (yet)" -#endif -#ifdef __NO_FPRS__ -#error "No support for PPC/e500 anymore (use LuaJIT 2.0)" -#endif -#elif LJ_TARGET_MIPS -#if defined(__mips_soft_float) -#error "No support for MIPS CPUs without FPU" -#endif -#if defined(_LP64) -#error "No support for MIPS64" -#endif -#endif -#endif - -/* Enable or disable the dual-number mode for the VM. */ -#if (LJ_ARCH_NUMMODE == LJ_NUMMODE_SINGLE && LUAJIT_NUMMODE == 2) || \ - (LJ_ARCH_NUMMODE == LJ_NUMMODE_DUAL && LUAJIT_NUMMODE == 1) -#error "No support for this number mode on this architecture" -#endif -#if LJ_ARCH_NUMMODE == LJ_NUMMODE_DUAL || \ - (LJ_ARCH_NUMMODE == LJ_NUMMODE_DUAL_SINGLE && LUAJIT_NUMMODE != 1) || \ - (LJ_ARCH_NUMMODE == LJ_NUMMODE_SINGLE_DUAL && LUAJIT_NUMMODE == 2) -#define LJ_DUALNUM 1 -#else -#define LJ_DUALNUM 0 -#endif - -#if LJ_TARGET_IOS || LJ_TARGET_CONSOLE -/* Runtime code generation is restricted on iOS. Complain to Apple, not me. */ -/* Ditto for the consoles. Complain to Sony or MS, not me. */ -#ifndef LUAJIT_ENABLE_JIT -#define LJ_OS_NOJIT 1 -#endif -#endif - -/* 64 bit GC references. */ -#if LJ_TARGET_GC64 -#define LJ_GC64 1 -#else -#define LJ_GC64 0 -#endif - -/* 2-slot frame info. */ -#if LJ_GC64 -#define LJ_FR2 1 -#else -#define LJ_FR2 0 -#endif - -/* Disable or enable the JIT compiler. */ -#if defined(LUAJIT_DISABLE_JIT) || defined(LJ_ARCH_NOJIT) || defined(LJ_OS_NOJIT) || LJ_FR2 || LJ_GC64 -#define LJ_HASJIT 0 -#else -#define LJ_HASJIT 1 -#endif - -/* Disable or enable the FFI extension. */ -#if defined(LUAJIT_DISABLE_FFI) || defined(LJ_ARCH_NOFFI) -#define LJ_HASFFI 0 -#else -#define LJ_HASFFI 1 -#endif - -#if defined(LUAJIT_DISABLE_PROFILE) -#define LJ_HASPROFILE 0 -#elif LJ_TARGET_POSIX -#define LJ_HASPROFILE 1 -#define LJ_PROFILE_SIGPROF 1 -#elif LJ_TARGET_PS3 -#define LJ_HASPROFILE 1 -#define LJ_PROFILE_PTHREAD 1 -#elif LJ_TARGET_WINDOWS || LJ_TARGET_XBOX360 -#define LJ_HASPROFILE 1 -#define LJ_PROFILE_WTHREAD 1 -#else -#define LJ_HASPROFILE 0 -#endif - -#ifndef LJ_ARCH_HASFPU -#define LJ_ARCH_HASFPU 1 -#endif -#ifndef LJ_ABI_SOFTFP -#define LJ_ABI_SOFTFP 0 -#endif -#define LJ_SOFTFP (!LJ_ARCH_HASFPU) - -#if LJ_ARCH_ENDIAN == LUAJIT_BE -#define LJ_LE 0 -#define LJ_BE 1 -#define LJ_ENDIAN_SELECT(le, be) be -#define LJ_ENDIAN_LOHI(lo, hi) hi lo -#else -#define LJ_LE 1 -#define LJ_BE 0 -#define LJ_ENDIAN_SELECT(le, be) le -#define LJ_ENDIAN_LOHI(lo, hi) lo hi -#endif - -#if LJ_ARCH_BITS == 32 -#define LJ_32 1 -#define LJ_64 0 -#else -#define LJ_32 0 -#define LJ_64 1 -#endif - -#ifndef LJ_TARGET_UNALIGNED -#define LJ_TARGET_UNALIGNED 0 -#endif - -/* Various workarounds for embedded operating systems or weak C runtimes. */ -#if (defined(__ANDROID__) && !defined(LJ_TARGET_X86ORX64)) || defined(__symbian__) || LJ_TARGET_XBOX360 || LJ_TARGET_WINDOWS -#define LUAJIT_NO_LOG2 -#endif -#if defined(__symbian__) || LJ_TARGET_WINDOWS -#define LUAJIT_NO_EXP2 -#endif - -#if defined(LUAJIT_NO_UNWIND) || defined(__symbian__) || LJ_TARGET_IOS || LJ_TARGET_PS3 || LJ_TARGET_PS4 -#define LJ_NO_UNWIND 1 -#endif - -/* Compatibility with Lua 5.1 vs. 5.2. */ -#ifdef LUAJIT_ENABLE_LUA52COMPAT -#define LJ_52 1 -#else -#define LJ_52 0 -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm.c deleted file mode 100644 index 9db950a2bd1..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm.c +++ /dev/null @@ -1,2278 +0,0 @@ -/* -** IR assembler (SSA IR -> machine code). -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_asm_c -#define LUA_CORE - -#include "lj_obj.h" - -#if LJ_HASJIT - -#include "lj_gc.h" -#include "lj_str.h" -#include "lj_tab.h" -#include "lj_frame.h" -#if LJ_HASFFI -#include "lj_ctype.h" -#endif -#include "lj_ir.h" -#include "lj_jit.h" -#include "lj_ircall.h" -#include "lj_iropt.h" -#include "lj_mcode.h" -#include "lj_iropt.h" -#include "lj_trace.h" -#include "lj_snap.h" -#include "lj_asm.h" -#include "lj_dispatch.h" -#include "lj_vm.h" -#include "lj_target.h" - -#ifdef LUA_USE_ASSERT -#include -#endif - -/* -- Assembler state and common macros ----------------------------------- */ - -/* Assembler state. */ -typedef struct ASMState { - RegCost cost[RID_MAX]; /* Reference and blended allocation cost for regs. */ - - MCode *mcp; /* Current MCode pointer (grows down). */ - MCode *mclim; /* Lower limit for MCode memory + red zone. */ -#ifdef LUA_USE_ASSERT - MCode *mcp_prev; /* Red zone overflow check. */ -#endif - - IRIns *ir; /* Copy of pointer to IR instructions/constants. */ - jit_State *J; /* JIT compiler state. */ - -#if LJ_TARGET_X86ORX64 - x86ModRM mrm; /* Fused x86 address operand. */ -#endif - - RegSet freeset; /* Set of free registers. */ - RegSet modset; /* Set of registers modified inside the loop. */ - RegSet weakset; /* Set of weakly referenced registers. */ - RegSet phiset; /* Set of PHI registers. */ - - uint32_t flags; /* Copy of JIT compiler flags. */ - int loopinv; /* Loop branch inversion (0:no, 1:yes, 2:yes+CC_P). */ - - int32_t evenspill; /* Next even spill slot. */ - int32_t oddspill; /* Next odd spill slot (or 0). */ - - IRRef curins; /* Reference of current instruction. */ - IRRef stopins; /* Stop assembly before hitting this instruction. */ - IRRef orignins; /* Original T->nins. */ - - IRRef snapref; /* Current snapshot is active after this reference. */ - IRRef snaprename; /* Rename highwater mark for snapshot check. */ - SnapNo snapno; /* Current snapshot number. */ - SnapNo loopsnapno; /* Loop snapshot number. */ - - IRRef fuseref; /* Fusion limit (loopref, 0 or FUSE_DISABLED). */ - IRRef sectref; /* Section base reference (loopref or 0). */ - IRRef loopref; /* Reference of LOOP instruction (or 0). */ - - BCReg topslot; /* Number of slots for stack check (unless 0). */ - int32_t gcsteps; /* Accumulated number of GC steps (per section). */ - - GCtrace *T; /* Trace to assemble. */ - GCtrace *parent; /* Parent trace (or NULL). */ - - MCode *mcbot; /* Bottom of reserved MCode. */ - MCode *mctop; /* Top of generated MCode. */ - MCode *mcloop; /* Pointer to loop MCode (or NULL). */ - MCode *invmcp; /* Points to invertible loop branch (or NULL). */ - MCode *flagmcp; /* Pending opportunity to merge flag setting ins. */ - MCode *realign; /* Realign loop if not NULL. */ - -#ifdef RID_NUM_KREF - int32_t krefk[RID_NUM_KREF]; -#endif - IRRef1 phireg[RID_MAX]; /* PHI register references. */ - uint16_t parentmap[LJ_MAX_JSLOTS]; /* Parent instruction to RegSP map. */ -} ASMState; - -#define IR(ref) (&as->ir[(ref)]) - -#define ASMREF_TMP1 REF_TRUE /* Temp. register. */ -#define ASMREF_TMP2 REF_FALSE /* Temp. register. */ -#define ASMREF_L REF_NIL /* Stores register for L. */ - -/* Check for variant to invariant references. */ -#define iscrossref(as, ref) ((ref) < as->sectref) - -/* Inhibit memory op fusion from variant to invariant references. */ -#define FUSE_DISABLED (~(IRRef)0) -#define mayfuse(as, ref) ((ref) > as->fuseref) -#define neverfuse(as) (as->fuseref == FUSE_DISABLED) -#define canfuse(as, ir) (!neverfuse(as) && !irt_isphi((ir)->t)) -#define opisfusableload(o) \ - ((o) == IR_ALOAD || (o) == IR_HLOAD || (o) == IR_ULOAD || \ - (o) == IR_FLOAD || (o) == IR_XLOAD || (o) == IR_SLOAD || (o) == IR_VLOAD) - -/* Sparse limit checks using a red zone before the actual limit. */ -#define MCLIM_REDZONE 64 - -static LJ_NORET LJ_NOINLINE void asm_mclimit(ASMState *as) -{ - lj_mcode_limiterr(as->J, (size_t)(as->mctop - as->mcp + 4*MCLIM_REDZONE)); -} - -static LJ_AINLINE void checkmclim(ASMState *as) -{ -#ifdef LUA_USE_ASSERT - if (as->mcp + MCLIM_REDZONE < as->mcp_prev) { - IRIns *ir = IR(as->curins+1); - fprintf(stderr, "RED ZONE OVERFLOW: %p IR %04d %02d %04d %04d\n", as->mcp, - as->curins+1-REF_BIAS, ir->o, ir->op1-REF_BIAS, ir->op2-REF_BIAS); - lua_assert(0); - } -#endif - if (LJ_UNLIKELY(as->mcp < as->mclim)) asm_mclimit(as); -#ifdef LUA_USE_ASSERT - as->mcp_prev = as->mcp; -#endif -} - -#ifdef RID_NUM_KREF -#define ra_iskref(ref) ((ref) < RID_NUM_KREF) -#define ra_krefreg(ref) ((Reg)(RID_MIN_KREF + (Reg)(ref))) -#define ra_krefk(as, ref) (as->krefk[(ref)]) - -static LJ_AINLINE void ra_setkref(ASMState *as, Reg r, int32_t k) -{ - IRRef ref = (IRRef)(r - RID_MIN_KREF); - as->krefk[ref] = k; - as->cost[r] = REGCOST(ref, ref); -} - -#else -#define ra_iskref(ref) 0 -#define ra_krefreg(ref) RID_MIN_GPR -#define ra_krefk(as, ref) 0 -#endif - -/* Arch-specific field offsets. */ -static const uint8_t field_ofs[IRFL__MAX+1] = { -#define FLOFS(name, ofs) (uint8_t)(ofs), -IRFLDEF(FLOFS) -#undef FLOFS - 0 -}; - -/* -- Target-specific instruction emitter --------------------------------- */ - -#if LJ_TARGET_X86ORX64 -#include "lj_emit_x86.h" -#elif LJ_TARGET_ARM -#include "lj_emit_arm.h" -#elif LJ_TARGET_PPC -#include "lj_emit_ppc.h" -#elif LJ_TARGET_MIPS -#include "lj_emit_mips.h" -#else -#error "Missing instruction emitter for target CPU" -#endif - -/* Generic load/store of register from/to stack slot. */ -#define emit_spload(as, ir, r, ofs) \ - emit_loadofs(as, ir, (r), RID_SP, (ofs)) -#define emit_spstore(as, ir, r, ofs) \ - emit_storeofs(as, ir, (r), RID_SP, (ofs)) - -/* -- Register allocator debugging ---------------------------------------- */ - -/* #define LUAJIT_DEBUG_RA */ - -#ifdef LUAJIT_DEBUG_RA - -#include -#include - -#define RIDNAME(name) #name, -static const char *const ra_regname[] = { - GPRDEF(RIDNAME) - FPRDEF(RIDNAME) - VRIDDEF(RIDNAME) - NULL -}; -#undef RIDNAME - -static char ra_dbg_buf[65536]; -static char *ra_dbg_p; -static char *ra_dbg_merge; -static MCode *ra_dbg_mcp; - -static void ra_dstart(void) -{ - ra_dbg_p = ra_dbg_buf; - ra_dbg_merge = NULL; - ra_dbg_mcp = NULL; -} - -static void ra_dflush(void) -{ - fwrite(ra_dbg_buf, 1, (size_t)(ra_dbg_p-ra_dbg_buf), stdout); - ra_dstart(); -} - -static void ra_dprintf(ASMState *as, const char *fmt, ...) -{ - char *p; - va_list argp; - va_start(argp, fmt); - p = ra_dbg_mcp == as->mcp ? ra_dbg_merge : ra_dbg_p; - ra_dbg_mcp = NULL; - p += sprintf(p, "%08x \e[36m%04d ", (uintptr_t)as->mcp, as->curins-REF_BIAS); - for (;;) { - const char *e = strchr(fmt, '$'); - if (e == NULL) break; - memcpy(p, fmt, (size_t)(e-fmt)); - p += e-fmt; - if (e[1] == 'r') { - Reg r = va_arg(argp, Reg) & RID_MASK; - if (r <= RID_MAX) { - const char *q; - for (q = ra_regname[r]; *q; q++) - *p++ = *q >= 'A' && *q <= 'Z' ? *q + 0x20 : *q; - } else { - *p++ = '?'; - lua_assert(0); - } - } else if (e[1] == 'f' || e[1] == 'i') { - IRRef ref; - if (e[1] == 'f') - ref = va_arg(argp, IRRef); - else - ref = va_arg(argp, IRIns *) - as->ir; - if (ref >= REF_BIAS) - p += sprintf(p, "%04d", ref - REF_BIAS); - else - p += sprintf(p, "K%03d", REF_BIAS - ref); - } else if (e[1] == 's') { - uint32_t slot = va_arg(argp, uint32_t); - p += sprintf(p, "[sp+0x%x]", sps_scale(slot)); - } else if (e[1] == 'x') { - p += sprintf(p, "%08x", va_arg(argp, int32_t)); - } else { - lua_assert(0); - } - fmt = e+2; - } - va_end(argp); - while (*fmt) - *p++ = *fmt++; - *p++ = '\e'; *p++ = '['; *p++ = 'm'; *p++ = '\n'; - if (p > ra_dbg_buf+sizeof(ra_dbg_buf)-256) { - fwrite(ra_dbg_buf, 1, (size_t)(p-ra_dbg_buf), stdout); - p = ra_dbg_buf; - } - ra_dbg_p = p; -} - -#define RA_DBG_START() ra_dstart() -#define RA_DBG_FLUSH() ra_dflush() -#define RA_DBG_REF() \ - do { char *_p = ra_dbg_p; ra_dprintf(as, ""); \ - ra_dbg_merge = _p; ra_dbg_mcp = as->mcp; } while (0) -#define RA_DBGX(x) ra_dprintf x - -#else -#define RA_DBG_START() ((void)0) -#define RA_DBG_FLUSH() ((void)0) -#define RA_DBG_REF() ((void)0) -#define RA_DBGX(x) ((void)0) -#endif - -/* -- Register allocator -------------------------------------------------- */ - -#define ra_free(as, r) rset_set(as->freeset, (r)) -#define ra_modified(as, r) rset_set(as->modset, (r)) -#define ra_weak(as, r) rset_set(as->weakset, (r)) -#define ra_noweak(as, r) rset_clear(as->weakset, (r)) - -#define ra_used(ir) (ra_hasreg((ir)->r) || ra_hasspill((ir)->s)) - -/* Setup register allocator. */ -static void ra_setup(ASMState *as) -{ - Reg r; - /* Initially all regs (except the stack pointer) are free for use. */ - as->freeset = RSET_INIT; - as->modset = RSET_EMPTY; - as->weakset = RSET_EMPTY; - as->phiset = RSET_EMPTY; - memset(as->phireg, 0, sizeof(as->phireg)); - for (r = RID_MIN_GPR; r < RID_MAX; r++) - as->cost[r] = REGCOST(~0u, 0u); -} - -/* Rematerialize constants. */ -static Reg ra_rematk(ASMState *as, IRRef ref) -{ - IRIns *ir; - Reg r; - if (ra_iskref(ref)) { - r = ra_krefreg(ref); - lua_assert(!rset_test(as->freeset, r)); - ra_free(as, r); - ra_modified(as, r); - emit_loadi(as, r, ra_krefk(as, ref)); - return r; - } - ir = IR(ref); - r = ir->r; - lua_assert(ra_hasreg(r) && !ra_hasspill(ir->s)); - ra_free(as, r); - ra_modified(as, r); - ir->r = RID_INIT; /* Do not keep any hint. */ - RA_DBGX((as, "remat $i $r", ir, r)); -#if !LJ_SOFTFP - if (ir->o == IR_KNUM) { - emit_loadn(as, r, ir_knum(ir)); - } else -#endif - if (emit_canremat(REF_BASE) && ir->o == IR_BASE) { - ra_sethint(ir->r, RID_BASE); /* Restore BASE register hint. */ - emit_getgl(as, r, jit_base); - } else if (emit_canremat(ASMREF_L) && ir->o == IR_KPRI) { - lua_assert(irt_isnil(ir->t)); /* REF_NIL stores ASMREF_L register. */ - emit_getgl(as, r, cur_L); -#if LJ_64 - } else if (ir->o == IR_KINT64) { - emit_loadu64(as, r, ir_kint64(ir)->u64); -#endif - } else { - lua_assert(ir->o == IR_KINT || ir->o == IR_KGC || - ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL); - emit_loadi(as, r, ir->i); - } - return r; -} - -/* Force a spill. Allocate a new spill slot if needed. */ -static int32_t ra_spill(ASMState *as, IRIns *ir) -{ - int32_t slot = ir->s; - lua_assert(ir >= as->ir + REF_TRUE); - if (!ra_hasspill(slot)) { - if (irt_is64(ir->t)) { - slot = as->evenspill; - as->evenspill += 2; - } else if (as->oddspill) { - slot = as->oddspill; - as->oddspill = 0; - } else { - slot = as->evenspill; - as->oddspill = slot+1; - as->evenspill += 2; - } - if (as->evenspill > 256) - lj_trace_err(as->J, LJ_TRERR_SPILLOV); - ir->s = (uint8_t)slot; - } - return sps_scale(slot); -} - -/* Release the temporarily allocated register in ASMREF_TMP1/ASMREF_TMP2. */ -static Reg ra_releasetmp(ASMState *as, IRRef ref) -{ - IRIns *ir = IR(ref); - Reg r = ir->r; - lua_assert(ra_hasreg(r) && !ra_hasspill(ir->s)); - ra_free(as, r); - ra_modified(as, r); - ir->r = RID_INIT; - return r; -} - -/* Restore a register (marked as free). Rematerialize or force a spill. */ -static Reg ra_restore(ASMState *as, IRRef ref) -{ - if (emit_canremat(ref)) { - return ra_rematk(as, ref); - } else { - IRIns *ir = IR(ref); - int32_t ofs = ra_spill(as, ir); /* Force a spill slot. */ - Reg r = ir->r; - lua_assert(ra_hasreg(r)); - ra_sethint(ir->r, r); /* Keep hint. */ - ra_free(as, r); - if (!rset_test(as->weakset, r)) { /* Only restore non-weak references. */ - ra_modified(as, r); - RA_DBGX((as, "restore $i $r", ir, r)); - emit_spload(as, ir, r, ofs); - } - return r; - } -} - -/* Save a register to a spill slot. */ -static void ra_save(ASMState *as, IRIns *ir, Reg r) -{ - RA_DBGX((as, "save $i $r", ir, r)); - emit_spstore(as, ir, r, sps_scale(ir->s)); -} - -#define MINCOST(name) \ - if (rset_test(RSET_ALL, RID_##name) && \ - LJ_LIKELY(allow&RID2RSET(RID_##name)) && as->cost[RID_##name] < cost) \ - cost = as->cost[RID_##name]; - -/* Evict the register with the lowest cost, forcing a restore. */ -static Reg ra_evict(ASMState *as, RegSet allow) -{ - IRRef ref; - RegCost cost = ~(RegCost)0; - lua_assert(allow != RSET_EMPTY); - if (RID_NUM_FPR == 0 || allow < RID2RSET(RID_MAX_GPR)) { - GPRDEF(MINCOST) - } else { - FPRDEF(MINCOST) - } - ref = regcost_ref(cost); - lua_assert(ra_iskref(ref) || (ref >= as->T->nk && ref < as->T->nins)); - /* Preferably pick any weak ref instead of a non-weak, non-const ref. */ - if (!irref_isk(ref) && (as->weakset & allow)) { - IRIns *ir = IR(ref); - if (!rset_test(as->weakset, ir->r)) - ref = regcost_ref(as->cost[rset_pickbot((as->weakset & allow))]); - } - return ra_restore(as, ref); -} - -/* Pick any register (marked as free). Evict on-demand. */ -static Reg ra_pick(ASMState *as, RegSet allow) -{ - RegSet pick = as->freeset & allow; - if (!pick) - return ra_evict(as, allow); - else - return rset_picktop(pick); -} - -/* Get a scratch register (marked as free). */ -static Reg ra_scratch(ASMState *as, RegSet allow) -{ - Reg r = ra_pick(as, allow); - ra_modified(as, r); - RA_DBGX((as, "scratch $r", r)); - return r; -} - -/* Evict all registers from a set (if not free). */ -static void ra_evictset(ASMState *as, RegSet drop) -{ - RegSet work; - as->modset |= drop; -#if !LJ_SOFTFP - work = (drop & ~as->freeset) & RSET_FPR; - while (work) { - Reg r = rset_pickbot(work); - ra_restore(as, regcost_ref(as->cost[r])); - rset_clear(work, r); - checkmclim(as); - } -#endif - work = (drop & ~as->freeset); - while (work) { - Reg r = rset_pickbot(work); - ra_restore(as, regcost_ref(as->cost[r])); - rset_clear(work, r); - checkmclim(as); - } -} - -/* Evict (rematerialize) all registers allocated to constants. */ -static void ra_evictk(ASMState *as) -{ - RegSet work; -#if !LJ_SOFTFP - work = ~as->freeset & RSET_FPR; - while (work) { - Reg r = rset_pickbot(work); - IRRef ref = regcost_ref(as->cost[r]); - if (emit_canremat(ref) && irref_isk(ref)) { - ra_rematk(as, ref); - checkmclim(as); - } - rset_clear(work, r); - } -#endif - work = ~as->freeset & RSET_GPR; - while (work) { - Reg r = rset_pickbot(work); - IRRef ref = regcost_ref(as->cost[r]); - if (emit_canremat(ref) && irref_isk(ref)) { - ra_rematk(as, ref); - checkmclim(as); - } - rset_clear(work, r); - } -} - -#ifdef RID_NUM_KREF -/* Allocate a register for a constant. */ -static Reg ra_allock(ASMState *as, int32_t k, RegSet allow) -{ - /* First try to find a register which already holds the same constant. */ - RegSet pick, work = ~as->freeset & RSET_GPR; - Reg r; - while (work) { - IRRef ref; - r = rset_pickbot(work); - ref = regcost_ref(as->cost[r]); - if (ref < ASMREF_L && - k == (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i)) - return r; - rset_clear(work, r); - } - pick = as->freeset & allow; - if (pick) { - /* Constants should preferably get unmodified registers. */ - if ((pick & ~as->modset)) - pick &= ~as->modset; - r = rset_pickbot(pick); /* Reduce conflicts with inverse allocation. */ - } else { - r = ra_evict(as, allow); - } - RA_DBGX((as, "allock $x $r", k, r)); - ra_setkref(as, r, k); - rset_clear(as->freeset, r); - ra_noweak(as, r); - return r; -} - -/* Allocate a specific register for a constant. */ -static void ra_allockreg(ASMState *as, int32_t k, Reg r) -{ - Reg kr = ra_allock(as, k, RID2RSET(r)); - if (kr != r) { - IRIns irdummy; - irdummy.t.irt = IRT_INT; - ra_scratch(as, RID2RSET(r)); - emit_movrr(as, &irdummy, r, kr); - } -} -#else -#define ra_allockreg(as, k, r) emit_loadi(as, (r), (k)) -#endif - -/* Allocate a register for ref from the allowed set of registers. -** Note: this function assumes the ref does NOT have a register yet! -** Picks an optimal register, sets the cost and marks the register as non-free. -*/ -static Reg ra_allocref(ASMState *as, IRRef ref, RegSet allow) -{ - IRIns *ir = IR(ref); - RegSet pick = as->freeset & allow; - Reg r; - lua_assert(ra_noreg(ir->r)); - if (pick) { - /* First check register hint from propagation or PHI. */ - if (ra_hashint(ir->r)) { - r = ra_gethint(ir->r); - if (rset_test(pick, r)) /* Use hint register if possible. */ - goto found; - /* Rematerialization is cheaper than missing a hint. */ - if (rset_test(allow, r) && emit_canremat(regcost_ref(as->cost[r]))) { - ra_rematk(as, regcost_ref(as->cost[r])); - goto found; - } - RA_DBGX((as, "hintmiss $f $r", ref, r)); - } - /* Invariants should preferably get unmodified registers. */ - if (ref < as->loopref && !irt_isphi(ir->t)) { - if ((pick & ~as->modset)) - pick &= ~as->modset; - r = rset_pickbot(pick); /* Reduce conflicts with inverse allocation. */ - } else { - /* We've got plenty of regs, so get callee-save regs if possible. */ - if (RID_NUM_GPR > 8 && (pick & ~RSET_SCRATCH)) - pick &= ~RSET_SCRATCH; - r = rset_picktop(pick); - } - } else { - r = ra_evict(as, allow); - } -found: - RA_DBGX((as, "alloc $f $r", ref, r)); - ir->r = (uint8_t)r; - rset_clear(as->freeset, r); - ra_noweak(as, r); - as->cost[r] = REGCOST_REF_T(ref, irt_t(ir->t)); - return r; -} - -/* Allocate a register on-demand. */ -static Reg ra_alloc1(ASMState *as, IRRef ref, RegSet allow) -{ - Reg r = IR(ref)->r; - /* Note: allow is ignored if the register is already allocated. */ - if (ra_noreg(r)) r = ra_allocref(as, ref, allow); - ra_noweak(as, r); - return r; -} - -/* Rename register allocation and emit move. */ -static void ra_rename(ASMState *as, Reg down, Reg up) -{ - IRRef ren, ref = regcost_ref(as->cost[up] = as->cost[down]); - IRIns *ir = IR(ref); - ir->r = (uint8_t)up; - as->cost[down] = 0; - lua_assert((down < RID_MAX_GPR) == (up < RID_MAX_GPR)); - lua_assert(!rset_test(as->freeset, down) && rset_test(as->freeset, up)); - ra_free(as, down); /* 'down' is free ... */ - ra_modified(as, down); - rset_clear(as->freeset, up); /* ... and 'up' is now allocated. */ - ra_noweak(as, up); - RA_DBGX((as, "rename $f $r $r", regcost_ref(as->cost[up]), down, up)); - emit_movrr(as, ir, down, up); /* Backwards codegen needs inverse move. */ - if (!ra_hasspill(IR(ref)->s)) { /* Add the rename to the IR. */ - lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), ref, as->snapno); - ren = tref_ref(lj_ir_emit(as->J)); - as->ir = as->T->ir; /* The IR may have been reallocated. */ - IR(ren)->r = (uint8_t)down; - IR(ren)->s = SPS_NONE; - } -} - -/* Pick a destination register (marked as free). -** Caveat: allow is ignored if there's already a destination register. -** Use ra_destreg() to get a specific register. -*/ -static Reg ra_dest(ASMState *as, IRIns *ir, RegSet allow) -{ - Reg dest = ir->r; - if (ra_hasreg(dest)) { - ra_free(as, dest); - ra_modified(as, dest); - } else { - if (ra_hashint(dest) && rset_test((as->freeset&allow), ra_gethint(dest))) { - dest = ra_gethint(dest); - ra_modified(as, dest); - RA_DBGX((as, "dest $r", dest)); - } else { - dest = ra_scratch(as, allow); - } - ir->r = dest; - } - if (LJ_UNLIKELY(ra_hasspill(ir->s))) ra_save(as, ir, dest); - return dest; -} - -/* Force a specific destination register (marked as free). */ -static void ra_destreg(ASMState *as, IRIns *ir, Reg r) -{ - Reg dest = ra_dest(as, ir, RID2RSET(r)); - if (dest != r) { - lua_assert(rset_test(as->freeset, r)); - ra_modified(as, r); - emit_movrr(as, ir, dest, r); - } -} - -#if LJ_TARGET_X86ORX64 -/* Propagate dest register to left reference. Emit moves as needed. -** This is a required fixup step for all 2-operand machine instructions. -*/ -static void ra_left(ASMState *as, Reg dest, IRRef lref) -{ - IRIns *ir = IR(lref); - Reg left = ir->r; - if (ra_noreg(left)) { - if (irref_isk(lref)) { - if (ir->o == IR_KNUM) { - cTValue *tv = ir_knum(ir); - /* FP remat needs a load except for +0. Still better than eviction. */ - if (tvispzero(tv) || !(as->freeset & RSET_FPR)) { - emit_loadn(as, dest, tv); - return; - } -#if LJ_64 - } else if (ir->o == IR_KINT64) { - emit_loadu64(as, dest, ir_kint64(ir)->u64); - return; -#endif - } else if (ir->o != IR_KPRI) { - lua_assert(ir->o == IR_KINT || ir->o == IR_KGC || - ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL); - emit_loadi(as, dest, ir->i); - return; - } - } - if (!ra_hashint(left) && !iscrossref(as, lref)) - ra_sethint(ir->r, dest); /* Propagate register hint. */ - left = ra_allocref(as, lref, dest < RID_MAX_GPR ? RSET_GPR : RSET_FPR); - } - ra_noweak(as, left); - /* Move needed for true 3-operand instruction: y=a+b ==> y=a; y+=b. */ - if (dest != left) { - /* Use register renaming if dest is the PHI reg. */ - if (irt_isphi(ir->t) && as->phireg[dest] == lref) { - ra_modified(as, left); - ra_rename(as, left, dest); - } else { - emit_movrr(as, ir, dest, left); - } - } -} -#else -/* Similar to ra_left, except we override any hints. */ -static void ra_leftov(ASMState *as, Reg dest, IRRef lref) -{ - IRIns *ir = IR(lref); - Reg left = ir->r; - if (ra_noreg(left)) { - ra_sethint(ir->r, dest); /* Propagate register hint. */ - left = ra_allocref(as, lref, - (LJ_SOFTFP || dest < RID_MAX_GPR) ? RSET_GPR : RSET_FPR); - } - ra_noweak(as, left); - if (dest != left) { - /* Use register renaming if dest is the PHI reg. */ - if (irt_isphi(ir->t) && as->phireg[dest] == lref) { - ra_modified(as, left); - ra_rename(as, left, dest); - } else { - emit_movrr(as, ir, dest, left); - } - } -} -#endif - -#if !LJ_64 -/* Force a RID_RETLO/RID_RETHI destination register pair (marked as free). */ -static void ra_destpair(ASMState *as, IRIns *ir) -{ - Reg destlo = ir->r, desthi = (ir+1)->r; - /* First spill unrelated refs blocking the destination registers. */ - if (!rset_test(as->freeset, RID_RETLO) && - destlo != RID_RETLO && desthi != RID_RETLO) - ra_restore(as, regcost_ref(as->cost[RID_RETLO])); - if (!rset_test(as->freeset, RID_RETHI) && - destlo != RID_RETHI && desthi != RID_RETHI) - ra_restore(as, regcost_ref(as->cost[RID_RETHI])); - /* Next free the destination registers (if any). */ - if (ra_hasreg(destlo)) { - ra_free(as, destlo); - ra_modified(as, destlo); - } else { - destlo = RID_RETLO; - } - if (ra_hasreg(desthi)) { - ra_free(as, desthi); - ra_modified(as, desthi); - } else { - desthi = RID_RETHI; - } - /* Check for conflicts and shuffle the registers as needed. */ - if (destlo == RID_RETHI) { - if (desthi == RID_RETLO) { -#if LJ_TARGET_X86 - *--as->mcp = XI_XCHGa + RID_RETHI; -#else - emit_movrr(as, ir, RID_RETHI, RID_TMP); - emit_movrr(as, ir, RID_RETLO, RID_RETHI); - emit_movrr(as, ir, RID_TMP, RID_RETLO); -#endif - } else { - emit_movrr(as, ir, RID_RETHI, RID_RETLO); - if (desthi != RID_RETHI) emit_movrr(as, ir, desthi, RID_RETHI); - } - } else if (desthi == RID_RETLO) { - emit_movrr(as, ir, RID_RETLO, RID_RETHI); - if (destlo != RID_RETLO) emit_movrr(as, ir, destlo, RID_RETLO); - } else { - if (desthi != RID_RETHI) emit_movrr(as, ir, desthi, RID_RETHI); - if (destlo != RID_RETLO) emit_movrr(as, ir, destlo, RID_RETLO); - } - /* Restore spill slots (if any). */ - if (ra_hasspill((ir+1)->s)) ra_save(as, ir+1, RID_RETHI); - if (ra_hasspill(ir->s)) ra_save(as, ir, RID_RETLO); -} -#endif - -/* -- Snapshot handling --------- ----------------------------------------- */ - -/* Can we rematerialize a KNUM instead of forcing a spill? */ -static int asm_snap_canremat(ASMState *as) -{ - Reg r; - for (r = RID_MIN_FPR; r < RID_MAX_FPR; r++) - if (irref_isk(regcost_ref(as->cost[r]))) - return 1; - return 0; -} - -/* Check whether a sunk store corresponds to an allocation. */ -static int asm_sunk_store(ASMState *as, IRIns *ira, IRIns *irs) -{ - if (irs->s == 255) { - if (irs->o == IR_ASTORE || irs->o == IR_HSTORE || - irs->o == IR_FSTORE || irs->o == IR_XSTORE) { - IRIns *irk = IR(irs->op1); - if (irk->o == IR_AREF || irk->o == IR_HREFK) - irk = IR(irk->op1); - return (IR(irk->op1) == ira); - } - return 0; - } else { - return (ira + irs->s == irs); /* Quick check. */ - } -} - -/* Allocate register or spill slot for a ref that escapes to a snapshot. */ -static void asm_snap_alloc1(ASMState *as, IRRef ref) -{ - IRIns *ir = IR(ref); - if (!irref_isk(ref) && (!(ra_used(ir) || ir->r == RID_SUNK))) { - if (ir->r == RID_SINK) { - ir->r = RID_SUNK; -#if LJ_HASFFI - if (ir->o == IR_CNEWI) { /* Allocate CNEWI value. */ - asm_snap_alloc1(as, ir->op2); - if (LJ_32 && (ir+1)->o == IR_HIOP) - asm_snap_alloc1(as, (ir+1)->op2); - } else -#endif - { /* Allocate stored values for TNEW, TDUP and CNEW. */ - IRIns *irs; - lua_assert(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW); - for (irs = IR(as->snapref-1); irs > ir; irs--) - if (irs->r == RID_SINK && asm_sunk_store(as, ir, irs)) { - lua_assert(irs->o == IR_ASTORE || irs->o == IR_HSTORE || - irs->o == IR_FSTORE || irs->o == IR_XSTORE); - asm_snap_alloc1(as, irs->op2); - if (LJ_32 && (irs+1)->o == IR_HIOP) - asm_snap_alloc1(as, (irs+1)->op2); - } - } - } else { - RegSet allow; - if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT) { - IRIns *irc; - for (irc = IR(as->curins); irc > ir; irc--) - if ((irc->op1 == ref || irc->op2 == ref) && - !(irc->r == RID_SINK || irc->r == RID_SUNK)) - goto nosink; /* Don't sink conversion if result is used. */ - asm_snap_alloc1(as, ir->op1); - return; - } - nosink: - allow = (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR; - if ((as->freeset & allow) || - (allow == RSET_FPR && asm_snap_canremat(as))) { - /* Get a weak register if we have a free one or can rematerialize. */ - Reg r = ra_allocref(as, ref, allow); /* Allocate a register. */ - if (!irt_isphi(ir->t)) - ra_weak(as, r); /* But mark it as weakly referenced. */ - checkmclim(as); - RA_DBGX((as, "snapreg $f $r", ref, ir->r)); - } else { - ra_spill(as, ir); /* Otherwise force a spill slot. */ - RA_DBGX((as, "snapspill $f $s", ref, ir->s)); - } - } - } -} - -/* Allocate refs escaping to a snapshot. */ -static void asm_snap_alloc(ASMState *as) -{ - SnapShot *snap = &as->T->snap[as->snapno]; - SnapEntry *map = &as->T->snapmap[snap->mapofs]; - MSize n, nent = snap->nent; - for (n = 0; n < nent; n++) { - SnapEntry sn = map[n]; - IRRef ref = snap_ref(sn); - if (!irref_isk(ref)) { - asm_snap_alloc1(as, ref); - if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) { - lua_assert(irt_type(IR(ref+1)->t) == IRT_SOFTFP); - asm_snap_alloc1(as, ref+1); - } - } - } -} - -/* All guards for a snapshot use the same exitno. This is currently the -** same as the snapshot number. Since the exact origin of the exit cannot -** be determined, all guards for the same snapshot must exit with the same -** RegSP mapping. -** A renamed ref which has been used in a prior guard for the same snapshot -** would cause an inconsistency. The easy way out is to force a spill slot. -*/ -static int asm_snap_checkrename(ASMState *as, IRRef ren) -{ - SnapShot *snap = &as->T->snap[as->snapno]; - SnapEntry *map = &as->T->snapmap[snap->mapofs]; - MSize n, nent = snap->nent; - for (n = 0; n < nent; n++) { - SnapEntry sn = map[n]; - IRRef ref = snap_ref(sn); - if (ref == ren || (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM) && ++ref == ren)) { - IRIns *ir = IR(ref); - ra_spill(as, ir); /* Register renamed, so force a spill slot. */ - RA_DBGX((as, "snaprensp $f $s", ref, ir->s)); - return 1; /* Found. */ - } - } - return 0; /* Not found. */ -} - -/* Prepare snapshot for next guard instruction. */ -static void asm_snap_prep(ASMState *as) -{ - if (as->curins < as->snapref) { - do { - if (as->snapno == 0) return; /* Called by sunk stores before snap #0. */ - as->snapno--; - as->snapref = as->T->snap[as->snapno].ref; - } while (as->curins < as->snapref); - asm_snap_alloc(as); - as->snaprename = as->T->nins; - } else { - /* Process any renames above the highwater mark. */ - for (; as->snaprename < as->T->nins; as->snaprename++) { - IRIns *ir = IR(as->snaprename); - if (asm_snap_checkrename(as, ir->op1)) - ir->op2 = REF_BIAS-1; /* Kill rename. */ - } - } -} - -/* -- Miscellaneous helpers ----------------------------------------------- */ - -/* Calculate stack adjustment. */ -static int32_t asm_stack_adjust(ASMState *as) -{ - if (as->evenspill <= SPS_FIXED) - return 0; - return sps_scale(sps_align(as->evenspill)); -} - -/* Must match with hash*() in lj_tab.c. */ -static uint32_t ir_khash(IRIns *ir) -{ - uint32_t lo, hi; - if (irt_isstr(ir->t)) { - return ir_kstr(ir)->hash; - } else if (irt_isnum(ir->t)) { - lo = ir_knum(ir)->u32.lo; - hi = ir_knum(ir)->u32.hi << 1; - } else if (irt_ispri(ir->t)) { - lua_assert(!irt_isnil(ir->t)); - return irt_type(ir->t)-IRT_FALSE; - } else { - lua_assert(irt_isgcv(ir->t)); - lo = u32ptr(ir_kgc(ir)); - hi = lo + HASH_BIAS; - } - return hashrot(lo, hi); -} - -/* -- Allocations --------------------------------------------------------- */ - -static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args); -static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci); - -static void asm_snew(ASMState *as, IRIns *ir) -{ - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_new]; - IRRef args[3]; - args[0] = ASMREF_L; /* lua_State *L */ - args[1] = ir->op1; /* const char *str */ - args[2] = ir->op2; /* size_t len */ - as->gcsteps++; - asm_setupresult(as, ir, ci); /* GCstr * */ - asm_gencall(as, ci, args); -} - -static void asm_tnew(ASMState *as, IRIns *ir) -{ - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_new1]; - IRRef args[2]; - args[0] = ASMREF_L; /* lua_State *L */ - args[1] = ASMREF_TMP1; /* uint32_t ahsize */ - as->gcsteps++; - asm_setupresult(as, ir, ci); /* GCtab * */ - asm_gencall(as, ci, args); - ra_allockreg(as, ir->op1 | (ir->op2 << 24), ra_releasetmp(as, ASMREF_TMP1)); -} - -static void asm_tdup(ASMState *as, IRIns *ir) -{ - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_dup]; - IRRef args[2]; - args[0] = ASMREF_L; /* lua_State *L */ - args[1] = ir->op1; /* const GCtab *kt */ - as->gcsteps++; - asm_setupresult(as, ir, ci); /* GCtab * */ - asm_gencall(as, ci, args); -} - -static void asm_gc_check(ASMState *as); - -/* Explicit GC step. */ -static void asm_gcstep(ASMState *as, IRIns *ir) -{ - IRIns *ira; - for (ira = IR(as->stopins+1); ira < ir; ira++) - if ((ira->o == IR_TNEW || ira->o == IR_TDUP || - (LJ_HASFFI && (ira->o == IR_CNEW || ira->o == IR_CNEWI))) && - ra_used(ira)) - as->gcsteps++; - if (as->gcsteps) - asm_gc_check(as); - as->gcsteps = 0x80000000; /* Prevent implicit GC check further up. */ -} - -/* -- Buffer operations --------------------------------------------------- */ - -static void asm_tvptr(ASMState *as, Reg dest, IRRef ref); - -static void asm_bufhdr(ASMState *as, IRIns *ir) -{ - Reg sb = ra_dest(as, ir, RSET_GPR); - if ((ir->op2 & IRBUFHDR_APPEND)) { - /* Rematerialize const buffer pointer instead of likely spill. */ - IRIns *irp = IR(ir->op1); - if (!(ra_hasreg(irp->r) || irp == ir-1 || - (irp == ir-2 && !ra_used(ir-1)))) { - while (!(irp->o == IR_BUFHDR && !(irp->op2 & IRBUFHDR_APPEND))) - irp = IR(irp->op1); - if (irref_isk(irp->op1)) { - ra_weak(as, ra_allocref(as, ir->op1, RSET_GPR)); - ir = irp; - } - } - } else { - Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, sb)); - /* Passing ir isn't strictly correct, but it's an IRT_P32, too. */ - emit_storeofs(as, ir, tmp, sb, offsetof(SBuf, p)); - emit_loadofs(as, ir, tmp, sb, offsetof(SBuf, b)); - } -#if LJ_TARGET_X86ORX64 - ra_left(as, sb, ir->op1); -#else - ra_leftov(as, sb, ir->op1); -#endif -} - -static void asm_bufput(ASMState *as, IRIns *ir) -{ - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_buf_putstr]; - IRRef args[3]; - IRIns *irs; - int kchar = -1; - args[0] = ir->op1; /* SBuf * */ - args[1] = ir->op2; /* GCstr * */ - irs = IR(ir->op2); - lua_assert(irt_isstr(irs->t)); - if (irs->o == IR_KGC) { - GCstr *s = ir_kstr(irs); - if (s->len == 1) { /* Optimize put of single-char string constant. */ - kchar = strdata(s)[0]; - args[1] = ASMREF_TMP1; /* int, truncated to char */ - ci = &lj_ir_callinfo[IRCALL_lj_buf_putchar]; - } - } else if (mayfuse(as, ir->op2) && ra_noreg(irs->r)) { - if (irs->o == IR_TOSTR) { /* Fuse number to string conversions. */ - if (irs->op2 == IRTOSTR_NUM) { - args[1] = ASMREF_TMP1; /* TValue * */ - ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putnum]; - } else { - lua_assert(irt_isinteger(IR(irs->op1)->t)); - args[1] = irs->op1; /* int */ - if (irs->op2 == IRTOSTR_INT) - ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putint]; - else - ci = &lj_ir_callinfo[IRCALL_lj_buf_putchar]; - } - } else if (irs->o == IR_SNEW) { /* Fuse string allocation. */ - args[1] = irs->op1; /* const void * */ - args[2] = irs->op2; /* MSize */ - ci = &lj_ir_callinfo[IRCALL_lj_buf_putmem]; - } - } - asm_setupresult(as, ir, ci); /* SBuf * */ - asm_gencall(as, ci, args); - if (args[1] == ASMREF_TMP1) { - Reg tmp = ra_releasetmp(as, ASMREF_TMP1); - if (kchar == -1) - asm_tvptr(as, tmp, irs->op1); - else - ra_allockreg(as, kchar, tmp); - } -} - -static void asm_bufstr(ASMState *as, IRIns *ir) -{ - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_buf_tostr]; - IRRef args[1]; - args[0] = ir->op1; /* SBuf *sb */ - as->gcsteps++; - asm_setupresult(as, ir, ci); /* GCstr * */ - asm_gencall(as, ci, args); -} - -/* -- Type conversions ---------------------------------------------------- */ - -static void asm_tostr(ASMState *as, IRIns *ir) -{ - const CCallInfo *ci; - IRRef args[2]; - args[0] = ASMREF_L; - as->gcsteps++; - if (ir->op2 == IRTOSTR_NUM) { - args[1] = ASMREF_TMP1; /* cTValue * */ - ci = &lj_ir_callinfo[IRCALL_lj_strfmt_num]; - } else { - args[1] = ir->op1; /* int32_t k */ - if (ir->op2 == IRTOSTR_INT) - ci = &lj_ir_callinfo[IRCALL_lj_strfmt_int]; - else - ci = &lj_ir_callinfo[IRCALL_lj_strfmt_char]; - } - asm_setupresult(as, ir, ci); /* GCstr * */ - asm_gencall(as, ci, args); - if (ir->op2 == IRTOSTR_NUM) - asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1); -} - -#if LJ_32 && LJ_HASFFI && !LJ_SOFTFP && !LJ_TARGET_X86 -static void asm_conv64(ASMState *as, IRIns *ir) -{ - IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK); - IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH); - IRCallID id; - IRRef args[2]; - lua_assert((ir-1)->o == IR_CONV && ir->o == IR_HIOP); - args[LJ_BE] = (ir-1)->op1; - args[LJ_LE] = ir->op1; - if (st == IRT_NUM || st == IRT_FLOAT) { - id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64); - ir--; - } else { - id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64); - } - { -#if LJ_TARGET_ARM && !LJ_ABI_SOFTFP - CCallInfo cim = lj_ir_callinfo[id], *ci = &cim; - cim.flags |= CCI_VARARG; /* These calls don't use the hard-float ABI! */ -#else - const CCallInfo *ci = &lj_ir_callinfo[id]; -#endif - asm_setupresult(as, ir, ci); - asm_gencall(as, ci, args); - } -} -#endif - -/* -- Memory references --------------------------------------------------- */ - -static void asm_newref(ASMState *as, IRIns *ir) -{ - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey]; - IRRef args[3]; - if (ir->r == RID_SINK) - return; - args[0] = ASMREF_L; /* lua_State *L */ - args[1] = ir->op1; /* GCtab *t */ - args[2] = ASMREF_TMP1; /* cTValue *key */ - asm_setupresult(as, ir, ci); /* TValue * */ - asm_gencall(as, ci, args); - asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2); -} - -static void asm_lref(ASMState *as, IRIns *ir) -{ - Reg r = ra_dest(as, ir, RSET_GPR); -#if LJ_TARGET_X86ORX64 - ra_left(as, r, ASMREF_L); -#else - ra_leftov(as, r, ASMREF_L); -#endif -} - -/* -- Calls --------------------------------------------------------------- */ - -/* Collect arguments from CALL* and CARG instructions. */ -static void asm_collectargs(ASMState *as, IRIns *ir, - const CCallInfo *ci, IRRef *args) -{ - uint32_t n = CCI_XNARGS(ci); - lua_assert(n <= CCI_NARGS_MAX*2); /* Account for split args. */ - if ((ci->flags & CCI_L)) { *args++ = ASMREF_L; n--; } - while (n-- > 1) { - ir = IR(ir->op1); - lua_assert(ir->o == IR_CARG); - args[n] = ir->op2 == REF_NIL ? 0 : ir->op2; - } - args[0] = ir->op1 == REF_NIL ? 0 : ir->op1; - lua_assert(IR(ir->op1)->o != IR_CARG); -} - -/* Reconstruct CCallInfo flags for CALLX*. */ -static uint32_t asm_callx_flags(ASMState *as, IRIns *ir) -{ - uint32_t nargs = 0; - if (ir->op1 != REF_NIL) { /* Count number of arguments first. */ - IRIns *ira = IR(ir->op1); - nargs++; - while (ira->o == IR_CARG) { nargs++; ira = IR(ira->op1); } - } -#if LJ_HASFFI - if (IR(ir->op2)->o == IR_CARG) { /* Copy calling convention info. */ - CTypeID id = (CTypeID)IR(IR(ir->op2)->op2)->i; - CType *ct = ctype_get(ctype_ctsG(J2G(as->J)), id); - nargs |= ((ct->info & CTF_VARARG) ? CCI_VARARG : 0); -#if LJ_TARGET_X86 - nargs |= (ctype_cconv(ct->info) << CCI_CC_SHIFT); -#endif - } -#endif - return (nargs | (ir->t.irt << CCI_OTSHIFT)); -} - -static void asm_callid(ASMState *as, IRIns *ir, IRCallID id) -{ - const CCallInfo *ci = &lj_ir_callinfo[id]; - IRRef args[2]; - args[0] = ir->op1; - args[1] = ir->op2; - asm_setupresult(as, ir, ci); - asm_gencall(as, ci, args); -} - -static void asm_call(ASMState *as, IRIns *ir) -{ - IRRef args[CCI_NARGS_MAX]; - const CCallInfo *ci = &lj_ir_callinfo[ir->op2]; - asm_collectargs(as, ir, ci, args); - asm_setupresult(as, ir, ci); - asm_gencall(as, ci, args); -} - -#if !LJ_SOFTFP -static void asm_fppow(ASMState *as, IRIns *ir, IRRef lref, IRRef rref) -{ - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_pow]; - IRRef args[2]; - args[0] = lref; - args[1] = rref; - asm_setupresult(as, ir, ci); - asm_gencall(as, ci, args); -} - -static int asm_fpjoin_pow(ASMState *as, IRIns *ir) -{ - IRIns *irp = IR(ir->op1); - if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) { - IRIns *irpp = IR(irp->op1); - if (irpp == ir-2 && irpp->o == IR_FPMATH && - irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) { - asm_fppow(as, ir, irpp->op1, irp->op2); - return 1; - } - } - return 0; -} -#endif - -/* -- PHI and loop handling ----------------------------------------------- */ - -/* Break a PHI cycle by renaming to a free register (evict if needed). */ -static void asm_phi_break(ASMState *as, RegSet blocked, RegSet blockedby, - RegSet allow) -{ - RegSet candidates = blocked & allow; - if (candidates) { /* If this register file has candidates. */ - /* Note: the set for ra_pick cannot be empty, since each register file - ** has some registers never allocated to PHIs. - */ - Reg down, up = ra_pick(as, ~blocked & allow); /* Get a free register. */ - if (candidates & ~blockedby) /* Optimize shifts, else it's a cycle. */ - candidates = candidates & ~blockedby; - down = rset_picktop(candidates); /* Pick candidate PHI register. */ - ra_rename(as, down, up); /* And rename it to the free register. */ - } -} - -/* PHI register shuffling. -** -** The allocator tries hard to preserve PHI register assignments across -** the loop body. Most of the time this loop does nothing, since there -** are no register mismatches. -** -** If a register mismatch is detected and ... -** - the register is currently free: rename it. -** - the register is blocked by an invariant: restore/remat and rename it. -** - Otherwise the register is used by another PHI, so mark it as blocked. -** -** The renames are order-sensitive, so just retry the loop if a register -** is marked as blocked, but has been freed in the meantime. A cycle is -** detected if all of the blocked registers are allocated. To break the -** cycle rename one of them to a free register and retry. -** -** Note that PHI spill slots are kept in sync and don't need to be shuffled. -*/ -static void asm_phi_shuffle(ASMState *as) -{ - RegSet work; - - /* Find and resolve PHI register mismatches. */ - for (;;) { - RegSet blocked = RSET_EMPTY; - RegSet blockedby = RSET_EMPTY; - RegSet phiset = as->phiset; - while (phiset) { /* Check all left PHI operand registers. */ - Reg r = rset_pickbot(phiset); - IRIns *irl = IR(as->phireg[r]); - Reg left = irl->r; - if (r != left) { /* Mismatch? */ - if (!rset_test(as->freeset, r)) { /* PHI register blocked? */ - IRRef ref = regcost_ref(as->cost[r]); - /* Blocked by other PHI (w/reg)? */ - if (!ra_iskref(ref) && irt_ismarked(IR(ref)->t)) { - rset_set(blocked, r); - if (ra_hasreg(left)) - rset_set(blockedby, left); - left = RID_NONE; - } else { /* Otherwise grab register from invariant. */ - ra_restore(as, ref); - checkmclim(as); - } - } - if (ra_hasreg(left)) { - ra_rename(as, left, r); - checkmclim(as); - } - } - rset_clear(phiset, r); - } - if (!blocked) break; /* Finished. */ - if (!(as->freeset & blocked)) { /* Break cycles if none are free. */ - asm_phi_break(as, blocked, blockedby, RSET_GPR); - if (!LJ_SOFTFP) asm_phi_break(as, blocked, blockedby, RSET_FPR); - checkmclim(as); - } /* Else retry some more renames. */ - } - - /* Restore/remat invariants whose registers are modified inside the loop. */ -#if !LJ_SOFTFP - work = as->modset & ~(as->freeset | as->phiset) & RSET_FPR; - while (work) { - Reg r = rset_pickbot(work); - ra_restore(as, regcost_ref(as->cost[r])); - rset_clear(work, r); - checkmclim(as); - } -#endif - work = as->modset & ~(as->freeset | as->phiset); - while (work) { - Reg r = rset_pickbot(work); - ra_restore(as, regcost_ref(as->cost[r])); - rset_clear(work, r); - checkmclim(as); - } - - /* Allocate and save all unsaved PHI regs and clear marks. */ - work = as->phiset; - while (work) { - Reg r = rset_picktop(work); - IRRef lref = as->phireg[r]; - IRIns *ir = IR(lref); - if (ra_hasspill(ir->s)) { /* Left PHI gained a spill slot? */ - irt_clearmark(ir->t); /* Handled here, so clear marker now. */ - ra_alloc1(as, lref, RID2RSET(r)); - ra_save(as, ir, r); /* Save to spill slot inside the loop. */ - checkmclim(as); - } - rset_clear(work, r); - } -} - -/* Copy unsynced left/right PHI spill slots. Rarely needed. */ -static void asm_phi_copyspill(ASMState *as) -{ - int need = 0; - IRIns *ir; - for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) - if (ra_hasspill(ir->s) && ra_hasspill(IR(ir->op1)->s)) - need |= irt_isfp(ir->t) ? 2 : 1; /* Unsynced spill slot? */ - if ((need & 1)) { /* Copy integer spill slots. */ -#if !LJ_TARGET_X86ORX64 - Reg r = RID_TMP; -#else - Reg r = RID_RET; - if ((as->freeset & RSET_GPR)) - r = rset_pickbot((as->freeset & RSET_GPR)); - else - emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP); -#endif - for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) { - if (ra_hasspill(ir->s)) { - IRIns *irl = IR(ir->op1); - if (ra_hasspill(irl->s) && !irt_isfp(ir->t)) { - emit_spstore(as, irl, r, sps_scale(irl->s)); - emit_spload(as, ir, r, sps_scale(ir->s)); - checkmclim(as); - } - } - } -#if LJ_TARGET_X86ORX64 - if (!rset_test(as->freeset, r)) - emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP); -#endif - } -#if !LJ_SOFTFP - if ((need & 2)) { /* Copy FP spill slots. */ -#if LJ_TARGET_X86 - Reg r = RID_XMM0; -#else - Reg r = RID_FPRET; -#endif - if ((as->freeset & RSET_FPR)) - r = rset_pickbot((as->freeset & RSET_FPR)); - if (!rset_test(as->freeset, r)) - emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP); - for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) { - if (ra_hasspill(ir->s)) { - IRIns *irl = IR(ir->op1); - if (ra_hasspill(irl->s) && irt_isfp(ir->t)) { - emit_spstore(as, irl, r, sps_scale(irl->s)); - emit_spload(as, ir, r, sps_scale(ir->s)); - checkmclim(as); - } - } - } - if (!rset_test(as->freeset, r)) - emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP); - } -#endif -} - -/* Emit renames for left PHIs which are only spilled outside the loop. */ -static void asm_phi_fixup(ASMState *as) -{ - RegSet work = as->phiset; - while (work) { - Reg r = rset_picktop(work); - IRRef lref = as->phireg[r]; - IRIns *ir = IR(lref); - if (irt_ismarked(ir->t)) { - irt_clearmark(ir->t); - /* Left PHI gained a spill slot before the loop? */ - if (ra_hasspill(ir->s)) { - IRRef ren; - lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), lref, as->loopsnapno); - ren = tref_ref(lj_ir_emit(as->J)); - as->ir = as->T->ir; /* The IR may have been reallocated. */ - IR(ren)->r = (uint8_t)r; - IR(ren)->s = SPS_NONE; - } - } - rset_clear(work, r); - } -} - -/* Setup right PHI reference. */ -static void asm_phi(ASMState *as, IRIns *ir) -{ - RegSet allow = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) & - ~as->phiset; - RegSet afree = (as->freeset & allow); - IRIns *irl = IR(ir->op1); - IRIns *irr = IR(ir->op2); - if (ir->r == RID_SINK) /* Sink PHI. */ - return; - /* Spill slot shuffling is not implemented yet (but rarely needed). */ - if (ra_hasspill(irl->s) || ra_hasspill(irr->s)) - lj_trace_err(as->J, LJ_TRERR_NYIPHI); - /* Leave at least one register free for non-PHIs (and PHI cycle breaking). */ - if ((afree & (afree-1))) { /* Two or more free registers? */ - Reg r; - if (ra_noreg(irr->r)) { /* Get a register for the right PHI. */ - r = ra_allocref(as, ir->op2, allow); - } else { /* Duplicate right PHI, need a copy (rare). */ - r = ra_scratch(as, allow); - emit_movrr(as, irr, r, irr->r); - } - ir->r = (uint8_t)r; - rset_set(as->phiset, r); - as->phireg[r] = (IRRef1)ir->op1; - irt_setmark(irl->t); /* Marks left PHIs _with_ register. */ - if (ra_noreg(irl->r)) - ra_sethint(irl->r, r); /* Set register hint for left PHI. */ - } else { /* Otherwise allocate a spill slot. */ - /* This is overly restrictive, but it triggers only on synthetic code. */ - if (ra_hasreg(irl->r) || ra_hasreg(irr->r)) - lj_trace_err(as->J, LJ_TRERR_NYIPHI); - ra_spill(as, ir); - irr->s = ir->s; /* Set right PHI spill slot. Sync left slot later. */ - } -} - -static void asm_loop_fixup(ASMState *as); - -/* Middle part of a loop. */ -static void asm_loop(ASMState *as) -{ - MCode *mcspill; - /* LOOP is a guard, so the snapno is up to date. */ - as->loopsnapno = as->snapno; - if (as->gcsteps) - asm_gc_check(as); - /* LOOP marks the transition from the variant to the invariant part. */ - as->flagmcp = as->invmcp = NULL; - as->sectref = 0; - if (!neverfuse(as)) as->fuseref = 0; - asm_phi_shuffle(as); - mcspill = as->mcp; - asm_phi_copyspill(as); - asm_loop_fixup(as); - as->mcloop = as->mcp; - RA_DBGX((as, "===== LOOP =====")); - if (!as->realign) RA_DBG_FLUSH(); - if (as->mcp != mcspill) - emit_jmp(as, mcspill); -} - -/* -- Target-specific assembler ------------------------------------------- */ - -#if LJ_TARGET_X86ORX64 -#include "lj_asm_x86.h" -#elif LJ_TARGET_ARM -#include "lj_asm_arm.h" -#elif LJ_TARGET_PPC -#include "lj_asm_ppc.h" -#elif LJ_TARGET_MIPS -#include "lj_asm_mips.h" -#else -#error "Missing assembler for target CPU" -#endif - -/* -- Instruction dispatch ------------------------------------------------ */ - -/* Assemble a single instruction. */ -static void asm_ir(ASMState *as, IRIns *ir) -{ - switch ((IROp)ir->o) { - /* Miscellaneous ops. */ - case IR_LOOP: asm_loop(as); break; - case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break; - case IR_USE: - ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break; - case IR_PHI: asm_phi(as, ir); break; - case IR_HIOP: asm_hiop(as, ir); break; - case IR_GCSTEP: asm_gcstep(as, ir); break; - case IR_PROF: asm_prof(as, ir); break; - - /* Guarded assertions. */ - case IR_LT: case IR_GE: case IR_LE: case IR_GT: - case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT: - case IR_ABC: - asm_comp(as, ir); - break; - case IR_EQ: case IR_NE: - if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) { - as->curins--; - asm_href(as, ir-1, (IROp)ir->o); - } else { - asm_equal(as, ir); - } - break; - - case IR_RETF: asm_retf(as, ir); break; - - /* Bit ops. */ - case IR_BNOT: asm_bnot(as, ir); break; - case IR_BSWAP: asm_bswap(as, ir); break; - case IR_BAND: asm_band(as, ir); break; - case IR_BOR: asm_bor(as, ir); break; - case IR_BXOR: asm_bxor(as, ir); break; - case IR_BSHL: asm_bshl(as, ir); break; - case IR_BSHR: asm_bshr(as, ir); break; - case IR_BSAR: asm_bsar(as, ir); break; - case IR_BROL: asm_brol(as, ir); break; - case IR_BROR: asm_bror(as, ir); break; - - /* Arithmetic ops. */ - case IR_ADD: asm_add(as, ir); break; - case IR_SUB: asm_sub(as, ir); break; - case IR_MUL: asm_mul(as, ir); break; - case IR_DIV: asm_div(as, ir); break; - case IR_MOD: asm_mod(as, ir); break; - case IR_POW: asm_pow(as, ir); break; - case IR_NEG: asm_neg(as, ir); break; - case IR_ABS: asm_abs(as, ir); break; - case IR_ATAN2: asm_atan2(as, ir); break; - case IR_LDEXP: asm_ldexp(as, ir); break; - case IR_MIN: asm_min(as, ir); break; - case IR_MAX: asm_max(as, ir); break; - case IR_FPMATH: asm_fpmath(as, ir); break; - - /* Overflow-checking arithmetic ops. */ - case IR_ADDOV: asm_addov(as, ir); break; - case IR_SUBOV: asm_subov(as, ir); break; - case IR_MULOV: asm_mulov(as, ir); break; - - /* Memory references. */ - case IR_AREF: asm_aref(as, ir); break; - case IR_HREF: asm_href(as, ir, 0); break; - case IR_HREFK: asm_hrefk(as, ir); break; - case IR_NEWREF: asm_newref(as, ir); break; - case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break; - case IR_FREF: asm_fref(as, ir); break; - case IR_STRREF: asm_strref(as, ir); break; - case IR_LREF: asm_lref(as, ir); break; - - /* Loads and stores. */ - case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: - asm_ahuvload(as, ir); - break; - case IR_FLOAD: asm_fload(as, ir); break; - case IR_XLOAD: asm_xload(as, ir); break; - case IR_SLOAD: asm_sload(as, ir); break; - - case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break; - case IR_FSTORE: asm_fstore(as, ir); break; - case IR_XSTORE: asm_xstore(as, ir); break; - - /* Allocations. */ - case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break; - case IR_TNEW: asm_tnew(as, ir); break; - case IR_TDUP: asm_tdup(as, ir); break; - case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break; - - /* Buffer operations. */ - case IR_BUFHDR: asm_bufhdr(as, ir); break; - case IR_BUFPUT: asm_bufput(as, ir); break; - case IR_BUFSTR: asm_bufstr(as, ir); break; - - /* Write barriers. */ - case IR_TBAR: asm_tbar(as, ir); break; - case IR_OBAR: asm_obar(as, ir); break; - - /* Type conversions. */ - case IR_TOBIT: asm_tobit(as, ir); break; - case IR_CONV: asm_conv(as, ir); break; - case IR_TOSTR: asm_tostr(as, ir); break; - case IR_STRTO: asm_strto(as, ir); break; - - /* Calls. */ - case IR_CALLA: - as->gcsteps++; - /* fallthrough */ - case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break; - case IR_CALLXS: asm_callx(as, ir); break; - case IR_CARG: break; - - default: - setintV(&as->J->errinfo, ir->o); - lj_trace_err_info(as->J, LJ_TRERR_NYIIR); - break; - } -} - -/* -- Head of trace ------------------------------------------------------- */ - -/* Head of a root trace. */ -static void asm_head_root(ASMState *as) -{ - int32_t spadj; - asm_head_root_base(as); - emit_setvmstate(as, (int32_t)as->T->traceno); - spadj = asm_stack_adjust(as); - as->T->spadjust = (uint16_t)spadj; - emit_spsub(as, spadj); - /* Root traces assume a checked stack for the starting proto. */ - as->T->topslot = gcref(as->T->startpt)->pt.framesize; -} - -/* Head of a side trace. -** -** The current simplistic algorithm requires that all slots inherited -** from the parent are live in a register between pass 2 and pass 3. This -** avoids the complexity of stack slot shuffling. But of course this may -** overflow the register set in some cases and cause the dreaded error: -** "NYI: register coalescing too complex". A refined algorithm is needed. -*/ -static void asm_head_side(ASMState *as) -{ - IRRef1 sloadins[RID_MAX]; - RegSet allow = RSET_ALL; /* Inverse of all coalesced registers. */ - RegSet live = RSET_EMPTY; /* Live parent registers. */ - IRIns *irp = &as->parent->ir[REF_BASE]; /* Parent base. */ - int32_t spadj, spdelta; - int pass2 = 0; - int pass3 = 0; - IRRef i; - - if (as->snapno && as->topslot > as->parent->topslot) { - /* Force snap #0 alloc to prevent register overwrite in stack check. */ - as->snapno = 0; - asm_snap_alloc(as); - } - allow = asm_head_side_base(as, irp, allow); - - /* Scan all parent SLOADs and collect register dependencies. */ - for (i = as->stopins; i > REF_BASE; i--) { - IRIns *ir = IR(i); - RegSP rs; - lua_assert((ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_PARENT)) || - (LJ_SOFTFP && ir->o == IR_HIOP) || ir->o == IR_PVAL); - rs = as->parentmap[i - REF_FIRST]; - if (ra_hasreg(ir->r)) { - rset_clear(allow, ir->r); - if (ra_hasspill(ir->s)) { - ra_save(as, ir, ir->r); - checkmclim(as); - } - } else if (ra_hasspill(ir->s)) { - irt_setmark(ir->t); - pass2 = 1; - } - if (ir->r == rs) { /* Coalesce matching registers right now. */ - ra_free(as, ir->r); - } else if (ra_hasspill(regsp_spill(rs))) { - if (ra_hasreg(ir->r)) - pass3 = 1; - } else if (ra_used(ir)) { - sloadins[rs] = (IRRef1)i; - rset_set(live, rs); /* Block live parent register. */ - } - } - - /* Calculate stack frame adjustment. */ - spadj = asm_stack_adjust(as); - spdelta = spadj - (int32_t)as->parent->spadjust; - if (spdelta < 0) { /* Don't shrink the stack frame. */ - spadj = (int32_t)as->parent->spadjust; - spdelta = 0; - } - as->T->spadjust = (uint16_t)spadj; - - /* Reload spilled target registers. */ - if (pass2) { - for (i = as->stopins; i > REF_BASE; i--) { - IRIns *ir = IR(i); - if (irt_ismarked(ir->t)) { - RegSet mask; - Reg r; - RegSP rs; - irt_clearmark(ir->t); - rs = as->parentmap[i - REF_FIRST]; - if (!ra_hasspill(regsp_spill(rs))) - ra_sethint(ir->r, rs); /* Hint may be gone, set it again. */ - else if (sps_scale(regsp_spill(rs))+spdelta == sps_scale(ir->s)) - continue; /* Same spill slot, do nothing. */ - mask = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) & allow; - if (mask == RSET_EMPTY) - lj_trace_err(as->J, LJ_TRERR_NYICOAL); - r = ra_allocref(as, i, mask); - ra_save(as, ir, r); - rset_clear(allow, r); - if (r == rs) { /* Coalesce matching registers right now. */ - ra_free(as, r); - rset_clear(live, r); - } else if (ra_hasspill(regsp_spill(rs))) { - pass3 = 1; - } - checkmclim(as); - } - } - } - - /* Store trace number and adjust stack frame relative to the parent. */ - emit_setvmstate(as, (int32_t)as->T->traceno); - emit_spsub(as, spdelta); - -#if !LJ_TARGET_X86ORX64 - /* Restore BASE register from parent spill slot. */ - if (ra_hasspill(irp->s)) - emit_spload(as, IR(REF_BASE), IR(REF_BASE)->r, sps_scale(irp->s)); -#endif - - /* Restore target registers from parent spill slots. */ - if (pass3) { - RegSet work = ~as->freeset & RSET_ALL; - while (work) { - Reg r = rset_pickbot(work); - IRRef ref = regcost_ref(as->cost[r]); - RegSP rs = as->parentmap[ref - REF_FIRST]; - rset_clear(work, r); - if (ra_hasspill(regsp_spill(rs))) { - int32_t ofs = sps_scale(regsp_spill(rs)); - ra_free(as, r); - emit_spload(as, IR(ref), r, ofs); - checkmclim(as); - } - } - } - - /* Shuffle registers to match up target regs with parent regs. */ - for (;;) { - RegSet work; - - /* Repeatedly coalesce free live registers by moving to their target. */ - while ((work = as->freeset & live) != RSET_EMPTY) { - Reg rp = rset_pickbot(work); - IRIns *ir = IR(sloadins[rp]); - rset_clear(live, rp); - rset_clear(allow, rp); - ra_free(as, ir->r); - emit_movrr(as, ir, ir->r, rp); - checkmclim(as); - } - - /* We're done if no live registers remain. */ - if (live == RSET_EMPTY) - break; - - /* Break cycles by renaming one target to a temp. register. */ - if (live & RSET_GPR) { - RegSet tmpset = as->freeset & ~live & allow & RSET_GPR; - if (tmpset == RSET_EMPTY) - lj_trace_err(as->J, LJ_TRERR_NYICOAL); - ra_rename(as, rset_pickbot(live & RSET_GPR), rset_pickbot(tmpset)); - } - if (!LJ_SOFTFP && (live & RSET_FPR)) { - RegSet tmpset = as->freeset & ~live & allow & RSET_FPR; - if (tmpset == RSET_EMPTY) - lj_trace_err(as->J, LJ_TRERR_NYICOAL); - ra_rename(as, rset_pickbot(live & RSET_FPR), rset_pickbot(tmpset)); - } - checkmclim(as); - /* Continue with coalescing to fix up the broken cycle(s). */ - } - - /* Inherit top stack slot already checked by parent trace. */ - as->T->topslot = as->parent->topslot; - if (as->topslot > as->T->topslot) { /* Need to check for higher slot? */ -#ifdef EXITSTATE_CHECKEXIT - /* Highest exit + 1 indicates stack check. */ - ExitNo exitno = as->T->nsnap; -#else - /* Reuse the parent exit in the context of the parent trace. */ - ExitNo exitno = as->J->exitno; -#endif - as->T->topslot = (uint8_t)as->topslot; /* Remember for child traces. */ - asm_stack_check(as, as->topslot, irp, allow & RSET_GPR, exitno); - } -} - -/* -- Tail of trace ------------------------------------------------------- */ - -/* Get base slot for a snapshot. */ -static BCReg asm_baseslot(ASMState *as, SnapShot *snap, int *gotframe) -{ - SnapEntry *map = &as->T->snapmap[snap->mapofs]; - MSize n; - for (n = snap->nent; n > 0; n--) { - SnapEntry sn = map[n-1]; - if ((sn & SNAP_FRAME)) { - *gotframe = 1; - return snap_slot(sn); - } - } - return 0; -} - -/* Link to another trace. */ -static void asm_tail_link(ASMState *as) -{ - SnapNo snapno = as->T->nsnap-1; /* Last snapshot. */ - SnapShot *snap = &as->T->snap[snapno]; - int gotframe = 0; - BCReg baseslot = asm_baseslot(as, snap, &gotframe); - - as->topslot = snap->topslot; - checkmclim(as); - ra_allocref(as, REF_BASE, RID2RSET(RID_BASE)); - - if (as->T->link == 0) { - /* Setup fixed registers for exit to interpreter. */ - const BCIns *pc = snap_pc(as->T->snapmap[snap->mapofs + snap->nent]); - int32_t mres; - if (bc_op(*pc) == BC_JLOOP) { /* NYI: find a better way to do this. */ - BCIns *retpc = &traceref(as->J, bc_d(*pc))->startins; - if (bc_isret(bc_op(*retpc))) - pc = retpc; - } - ra_allockreg(as, i32ptr(J2GG(as->J)->dispatch), RID_DISPATCH); - ra_allockreg(as, i32ptr(pc), RID_LPC); - mres = (int32_t)(snap->nslots - baseslot); - switch (bc_op(*pc)) { - case BC_CALLM: case BC_CALLMT: - mres -= (int32_t)(1 + LJ_FR2 + bc_a(*pc) + bc_c(*pc)); break; - case BC_RETM: mres -= (int32_t)(bc_a(*pc) + bc_d(*pc)); break; - case BC_TSETM: mres -= (int32_t)bc_a(*pc); break; - default: if (bc_op(*pc) < BC_FUNCF) mres = 0; break; - } - ra_allockreg(as, mres, RID_RET); /* Return MULTRES or 0. */ - } else if (baseslot) { - /* Save modified BASE for linking to trace with higher start frame. */ - emit_setgl(as, RID_BASE, jit_base); - } - emit_addptr(as, RID_BASE, 8*(int32_t)baseslot); - - /* Sync the interpreter state with the on-trace state. */ - asm_stack_restore(as, snap); - - /* Root traces that add frames need to check the stack at the end. */ - if (!as->parent && gotframe) - asm_stack_check(as, as->topslot, NULL, as->freeset & RSET_GPR, snapno); -} - -/* -- Trace setup --------------------------------------------------------- */ - -/* Clear reg/sp for all instructions and add register hints. */ -static void asm_setup_regsp(ASMState *as) -{ - GCtrace *T = as->T; - int sink = T->sinktags; - IRRef nins = T->nins; - IRIns *ir, *lastir; - int inloop; -#if LJ_TARGET_ARM - uint32_t rload = 0xa6402a64; -#endif - - ra_setup(as); - - /* Clear reg/sp for constants. */ - for (ir = IR(T->nk), lastir = IR(REF_BASE); ir < lastir; ir++) - ir->prev = REGSP_INIT; - - /* REF_BASE is used for implicit references to the BASE register. */ - lastir->prev = REGSP_HINT(RID_BASE); - - ir = IR(nins-1); - if (ir->o == IR_RENAME) { - do { ir--; nins--; } while (ir->o == IR_RENAME); - T->nins = nins; /* Remove any renames left over from ASM restart. */ - } - as->snaprename = nins; - as->snapref = nins; - as->snapno = T->nsnap; - - as->stopins = REF_BASE; - as->orignins = nins; - as->curins = nins; - - /* Setup register hints for parent link instructions. */ - ir = IR(REF_FIRST); - if (as->parent) { - uint16_t *p; - lastir = lj_snap_regspmap(as->parent, as->J->exitno, ir); - if (lastir - ir > LJ_MAX_JSLOTS) - lj_trace_err(as->J, LJ_TRERR_NYICOAL); - as->stopins = (IRRef)((lastir-1) - as->ir); - for (p = as->parentmap; ir < lastir; ir++) { - RegSP rs = ir->prev; - *p++ = (uint16_t)rs; /* Copy original parent RegSP to parentmap. */ - if (!ra_hasspill(regsp_spill(rs))) - ir->prev = (uint16_t)REGSP_HINT(regsp_reg(rs)); - else - ir->prev = REGSP_INIT; - } - } - - inloop = 0; - as->evenspill = SPS_FIRST; - for (lastir = IR(nins); ir < lastir; ir++) { - if (sink) { - if (ir->r == RID_SINK) - continue; - if (ir->r == RID_SUNK) { /* Revert after ASM restart. */ - ir->r = RID_SINK; - continue; - } - } - switch (ir->o) { - case IR_LOOP: - inloop = 1; - break; -#if LJ_TARGET_ARM - case IR_SLOAD: - if (!((ir->op2 & IRSLOAD_TYPECHECK) || (ir+1)->o == IR_HIOP)) - break; - /* fallthrough */ - case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: - if (!LJ_SOFTFP && irt_isnum(ir->t)) break; - ir->prev = (uint16_t)REGSP_HINT((rload & 15)); - rload = lj_ror(rload, 4); - continue; -#endif - case IR_CALLXS: { - CCallInfo ci; - ci.flags = asm_callx_flags(as, ir); - ir->prev = asm_setup_call_slots(as, ir, &ci); - if (inloop) - as->modset |= RSET_SCRATCH; - continue; - } - case IR_CALLN: case IR_CALLA: case IR_CALLL: case IR_CALLS: { - const CCallInfo *ci = &lj_ir_callinfo[ir->op2]; - ir->prev = asm_setup_call_slots(as, ir, ci); - if (inloop) - as->modset |= (ci->flags & CCI_NOFPRCLOBBER) ? - (RSET_SCRATCH & ~RSET_FPR) : RSET_SCRATCH; - continue; - } -#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI) - case IR_HIOP: - switch ((ir-1)->o) { -#if LJ_SOFTFP && LJ_TARGET_ARM - case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: - if (ra_hashint((ir-1)->r)) { - ir->prev = (ir-1)->prev + 1; - continue; - } - break; -#endif -#if !LJ_SOFTFP && LJ_NEED_FP64 - case IR_CONV: - if (irt_isfp((ir-1)->t)) { - ir->prev = REGSP_HINT(RID_FPRET); - continue; - } - /* fallthrough */ -#endif - case IR_CALLN: case IR_CALLXS: -#if LJ_SOFTFP - case IR_MIN: case IR_MAX: -#endif - (ir-1)->prev = REGSP_HINT(RID_RETLO); - ir->prev = REGSP_HINT(RID_RETHI); - continue; - default: - break; - } - break; -#endif -#if LJ_SOFTFP - case IR_MIN: case IR_MAX: - if ((ir+1)->o != IR_HIOP) break; - /* fallthrough */ -#endif - /* C calls evict all scratch regs and return results in RID_RET. */ - case IR_SNEW: case IR_XSNEW: case IR_NEWREF: case IR_BUFPUT: - if (REGARG_NUMGPR < 3 && as->evenspill < 3) - as->evenspill = 3; /* lj_str_new and lj_tab_newkey need 3 args. */ -#if LJ_TARGET_X86 && LJ_HASFFI - if (0) { - case IR_CNEW: - if (ir->op2 != REF_NIL && as->evenspill < 4) - as->evenspill = 4; /* lj_cdata_newv needs 4 args. */ - } -#else - case IR_CNEW: -#endif - case IR_TNEW: case IR_TDUP: case IR_CNEWI: case IR_TOSTR: - case IR_BUFSTR: - ir->prev = REGSP_HINT(RID_RET); - if (inloop) - as->modset = RSET_SCRATCH; - continue; - case IR_STRTO: case IR_OBAR: - if (inloop) - as->modset = RSET_SCRATCH; - break; -#if !LJ_SOFTFP - case IR_ATAN2: -#if LJ_TARGET_X86 - if (as->evenspill < 4) /* Leave room to call atan2(). */ - as->evenspill = 4; -#endif -#if !LJ_TARGET_X86ORX64 - case IR_LDEXP: -#endif -#endif - case IR_POW: - if (!LJ_SOFTFP && irt_isnum(ir->t)) { - if (inloop) - as->modset |= RSET_SCRATCH; -#if LJ_TARGET_X86 - break; -#else - ir->prev = REGSP_HINT(RID_FPRET); - continue; -#endif - } - /* fallthrough for integer POW */ - case IR_DIV: case IR_MOD: - if (!irt_isnum(ir->t)) { - ir->prev = REGSP_HINT(RID_RET); - if (inloop) - as->modset |= (RSET_SCRATCH & RSET_GPR); - continue; - } - break; - case IR_FPMATH: -#if LJ_TARGET_X86ORX64 - if (ir->op2 <= IRFPM_TRUNC) { - if (!(as->flags & JIT_F_SSE4_1)) { - ir->prev = REGSP_HINT(RID_XMM0); - if (inloop) - as->modset |= RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX); - continue; - } - break; - } else if (ir->op2 == IRFPM_EXP2 && !LJ_64) { - if (as->evenspill < 4) /* Leave room to call pow(). */ - as->evenspill = 4; - } -#endif - if (inloop) - as->modset |= RSET_SCRATCH; -#if LJ_TARGET_X86 - break; -#else - ir->prev = REGSP_HINT(RID_FPRET); - continue; -#endif -#if LJ_TARGET_X86ORX64 - /* Non-constant shift counts need to be in RID_ECX on x86/x64. */ - case IR_BSHL: case IR_BSHR: case IR_BSAR: case IR_BROL: case IR_BROR: - if (!irref_isk(ir->op2) && !ra_hashint(IR(ir->op2)->r)) { - IR(ir->op2)->r = REGSP_HINT(RID_ECX); - if (inloop) - rset_set(as->modset, RID_ECX); - } - break; -#endif - /* Do not propagate hints across type conversions or loads. */ - case IR_TOBIT: - case IR_XLOAD: -#if !LJ_TARGET_ARM - case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: -#endif - break; - case IR_CONV: - if (irt_isfp(ir->t) || (ir->op2 & IRCONV_SRCMASK) == IRT_NUM || - (ir->op2 & IRCONV_SRCMASK) == IRT_FLOAT) - break; - /* fallthrough */ - default: - /* Propagate hints across likely 'op reg, imm' or 'op reg'. */ - if (irref_isk(ir->op2) && !irref_isk(ir->op1) && - ra_hashint(regsp_reg(IR(ir->op1)->prev))) { - ir->prev = IR(ir->op1)->prev; - continue; - } - break; - } - ir->prev = REGSP_INIT; - } - if ((as->evenspill & 1)) - as->oddspill = as->evenspill++; - else - as->oddspill = 0; -} - -/* -- Assembler core ------------------------------------------------------ */ - -/* Assemble a trace. */ -void lj_asm_trace(jit_State *J, GCtrace *T) -{ - ASMState as_; - ASMState *as = &as_; - MCode *origtop; - - /* Ensure an initialized instruction beyond the last one for HIOP checks. */ - J->cur.nins = lj_ir_nextins(J); - J->cur.ir[J->cur.nins].o = IR_NOP; - - /* Setup initial state. Copy some fields to reduce indirections. */ - as->J = J; - as->T = T; - as->ir = T->ir; - as->flags = J->flags; - as->loopref = J->loopref; - as->realign = NULL; - as->loopinv = 0; - as->parent = J->parent ? traceref(J, J->parent) : NULL; - - /* Reserve MCode memory. */ - as->mctop = origtop = lj_mcode_reserve(J, &as->mcbot); - as->mcp = as->mctop; - as->mclim = as->mcbot + MCLIM_REDZONE; - asm_setup_target(as); - - do { - as->mcp = as->mctop; -#ifdef LUA_USE_ASSERT - as->mcp_prev = as->mcp; -#endif - as->curins = T->nins; - RA_DBG_START(); - RA_DBGX((as, "===== STOP =====")); - - /* General trace setup. Emit tail of trace. */ - asm_tail_prep(as); - as->mcloop = NULL; - as->flagmcp = NULL; - as->topslot = 0; - as->gcsteps = 0; - as->sectref = as->loopref; - as->fuseref = (as->flags & JIT_F_OPT_FUSE) ? as->loopref : FUSE_DISABLED; - asm_setup_regsp(as); - if (!as->loopref) - asm_tail_link(as); - - /* Assemble a trace in linear backwards order. */ - for (as->curins--; as->curins > as->stopins; as->curins--) { - IRIns *ir = IR(as->curins); - lua_assert(!(LJ_32 && irt_isint64(ir->t))); /* Handled by SPLIT. */ - if (!ra_used(ir) && !ir_sideeff(ir) && (as->flags & JIT_F_OPT_DCE)) - continue; /* Dead-code elimination can be soooo easy. */ - if (irt_isguard(ir->t)) - asm_snap_prep(as); - RA_DBG_REF(); - checkmclim(as); - asm_ir(as, ir); - } - } while (as->realign); /* Retry in case the MCode needs to be realigned. */ - - /* Emit head of trace. */ - RA_DBG_REF(); - checkmclim(as); - if (as->gcsteps > 0) { - as->curins = as->T->snap[0].ref; - asm_snap_prep(as); /* The GC check is a guard. */ - asm_gc_check(as); - } - ra_evictk(as); - if (as->parent) - asm_head_side(as); - else - asm_head_root(as); - asm_phi_fixup(as); - - RA_DBGX((as, "===== START ====")); - RA_DBG_FLUSH(); - if (as->freeset != RSET_ALL) - lj_trace_err(as->J, LJ_TRERR_BADRA); /* Ouch! Should never happen. */ - - /* Set trace entry point before fixing up tail to allow link to self. */ - T->mcode = as->mcp; - T->mcloop = as->mcloop ? (MSize)((char *)as->mcloop - (char *)as->mcp) : 0; - if (!as->loopref) - asm_tail_fixup(as, T->link); /* Note: this may change as->mctop! */ - T->szmcode = (MSize)((char *)as->mctop - (char *)as->mcp); - lj_mcode_sync(T->mcode, origtop); -} - -#undef IR - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm.h deleted file mode 100644 index 85f29763220..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm.h +++ /dev/null @@ -1,17 +0,0 @@ -/* -** IR assembler (SSA IR -> machine code). -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_ASM_H -#define _LJ_ASM_H - -#include "lj_jit.h" - -#if LJ_HASJIT -LJ_FUNC void lj_asm_trace(jit_State *J, GCtrace *T); -LJ_FUNC void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, - MCode *target); -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm_arm.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm_arm.h deleted file mode 100644 index 81843caf96a..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm_arm.h +++ /dev/null @@ -1,2217 +0,0 @@ -/* -** ARM IR assembler (SSA IR -> machine code). -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -/* -- Register allocator extensions --------------------------------------- */ - -/* Allocate a register with a hint. */ -static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow) -{ - Reg r = IR(ref)->r; - if (ra_noreg(r)) { - if (!ra_hashint(r) && !iscrossref(as, ref)) - ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */ - r = ra_allocref(as, ref, allow); - } - ra_noweak(as, r); - return r; -} - -/* Allocate a scratch register pair. */ -static Reg ra_scratchpair(ASMState *as, RegSet allow) -{ - RegSet pick1 = as->freeset & allow; - RegSet pick2 = pick1 & (pick1 >> 1) & RSET_GPREVEN; - Reg r; - if (pick2) { - r = rset_picktop(pick2); - } else { - RegSet pick = pick1 & (allow >> 1) & RSET_GPREVEN; - if (pick) { - r = rset_picktop(pick); - ra_restore(as, regcost_ref(as->cost[r+1])); - } else { - pick = pick1 & (allow << 1) & RSET_GPRODD; - if (pick) { - r = ra_restore(as, regcost_ref(as->cost[rset_picktop(pick)-1])); - } else { - r = ra_evict(as, allow & (allow >> 1) & RSET_GPREVEN); - ra_restore(as, regcost_ref(as->cost[r+1])); - } - } - } - lua_assert(rset_test(RSET_GPREVEN, r)); - ra_modified(as, r); - ra_modified(as, r+1); - RA_DBGX((as, "scratchpair $r $r", r, r+1)); - return r; -} - -#if !LJ_SOFTFP -/* Allocate two source registers for three-operand instructions. */ -static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow) -{ - IRIns *irl = IR(ir->op1), *irr = IR(ir->op2); - Reg left = irl->r, right = irr->r; - if (ra_hasreg(left)) { - ra_noweak(as, left); - if (ra_noreg(right)) - right = ra_allocref(as, ir->op2, rset_exclude(allow, left)); - else - ra_noweak(as, right); - } else if (ra_hasreg(right)) { - ra_noweak(as, right); - left = ra_allocref(as, ir->op1, rset_exclude(allow, right)); - } else if (ra_hashint(right)) { - right = ra_allocref(as, ir->op2, allow); - left = ra_alloc1(as, ir->op1, rset_exclude(allow, right)); - } else { - left = ra_allocref(as, ir->op1, allow); - right = ra_alloc1(as, ir->op2, rset_exclude(allow, left)); - } - return left | (right << 8); -} -#endif - -/* -- Guard handling ------------------------------------------------------ */ - -/* Generate an exit stub group at the bottom of the reserved MCode memory. */ -static MCode *asm_exitstub_gen(ASMState *as, ExitNo group) -{ - MCode *mxp = as->mcbot; - int i; - if (mxp + 4*4+4*EXITSTUBS_PER_GROUP >= as->mctop) - asm_mclimit(as); - /* str lr, [sp]; bl ->vm_exit_handler; .long DISPATCH_address, group. */ - *mxp++ = ARMI_STR|ARMI_LS_P|ARMI_LS_U|ARMF_D(RID_LR)|ARMF_N(RID_SP); - *mxp = ARMI_BL|((((MCode *)(void *)lj_vm_exit_handler-mxp)-2)&0x00ffffffu); - mxp++; - *mxp++ = (MCode)i32ptr(J2GG(as->J)->dispatch); /* DISPATCH address */ - *mxp++ = group*EXITSTUBS_PER_GROUP; - for (i = 0; i < EXITSTUBS_PER_GROUP; i++) - *mxp++ = ARMI_B|((-6-i)&0x00ffffffu); - lj_mcode_sync(as->mcbot, mxp); - lj_mcode_commitbot(as->J, mxp); - as->mcbot = mxp; - as->mclim = as->mcbot + MCLIM_REDZONE; - return mxp - EXITSTUBS_PER_GROUP; -} - -/* Setup all needed exit stubs. */ -static void asm_exitstub_setup(ASMState *as, ExitNo nexits) -{ - ExitNo i; - if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR) - lj_trace_err(as->J, LJ_TRERR_SNAPOV); - for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++) - if (as->J->exitstubgroup[i] == NULL) - as->J->exitstubgroup[i] = asm_exitstub_gen(as, i); -} - -/* Emit conditional branch to exit for guard. */ -static void asm_guardcc(ASMState *as, ARMCC cc) -{ - MCode *target = exitstub_addr(as->J, as->snapno); - MCode *p = as->mcp; - if (LJ_UNLIKELY(p == as->invmcp)) { - as->loopinv = 1; - *p = ARMI_BL | ((target-p-2) & 0x00ffffffu); - emit_branch(as, ARMF_CC(ARMI_B, cc^1), p+1); - return; - } - emit_branch(as, ARMF_CC(ARMI_BL, cc), target); -} - -/* -- Operand fusion ------------------------------------------------------ */ - -/* Limit linear search to this distance. Avoids O(n^2) behavior. */ -#define CONFLICT_SEARCH_LIM 31 - -/* Check if there's no conflicting instruction between curins and ref. */ -static int noconflict(ASMState *as, IRRef ref, IROp conflict) -{ - IRIns *ir = as->ir; - IRRef i = as->curins; - if (i > ref + CONFLICT_SEARCH_LIM) - return 0; /* Give up, ref is too far away. */ - while (--i > ref) - if (ir[i].o == conflict) - return 0; /* Conflict found. */ - return 1; /* Ok, no conflict. */ -} - -/* Fuse the array base of colocated arrays. */ -static int32_t asm_fuseabase(ASMState *as, IRRef ref) -{ - IRIns *ir = IR(ref); - if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE && - !neverfuse(as) && noconflict(as, ref, IR_NEWREF)) - return (int32_t)sizeof(GCtab); - return 0; -} - -/* Fuse array/hash/upvalue reference into register+offset operand. */ -static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow, - int lim) -{ - IRIns *ir = IR(ref); - if (ra_noreg(ir->r)) { - if (ir->o == IR_AREF) { - if (mayfuse(as, ref)) { - if (irref_isk(ir->op2)) { - IRRef tab = IR(ir->op1)->op1; - int32_t ofs = asm_fuseabase(as, tab); - IRRef refa = ofs ? tab : ir->op1; - ofs += 8*IR(ir->op2)->i; - if (ofs > -lim && ofs < lim) { - *ofsp = ofs; - return ra_alloc1(as, refa, allow); - } - } - } - } else if (ir->o == IR_HREFK) { - if (mayfuse(as, ref)) { - int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node)); - if (ofs < lim) { - *ofsp = ofs; - return ra_alloc1(as, ir->op1, allow); - } - } - } else if (ir->o == IR_UREFC) { - if (irref_isk(ir->op1)) { - GCfunc *fn = ir_kfunc(IR(ir->op1)); - int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv); - *ofsp = (ofs & 255); /* Mask out less bits to allow LDRD. */ - return ra_allock(as, (ofs & ~255), allow); - } - } - } - *ofsp = 0; - return ra_alloc1(as, ref, allow); -} - -/* Fuse m operand into arithmetic/logic instructions. */ -static uint32_t asm_fuseopm(ASMState *as, ARMIns ai, IRRef ref, RegSet allow) -{ - IRIns *ir = IR(ref); - if (ra_hasreg(ir->r)) { - ra_noweak(as, ir->r); - return ARMF_M(ir->r); - } else if (irref_isk(ref)) { - uint32_t k = emit_isk12(ai, ir->i); - if (k) - return k; - } else if (mayfuse(as, ref)) { - if (ir->o >= IR_BSHL && ir->o <= IR_BROR) { - Reg m = ra_alloc1(as, ir->op1, allow); - ARMShift sh = ir->o == IR_BSHL ? ARMSH_LSL : - ir->o == IR_BSHR ? ARMSH_LSR : - ir->o == IR_BSAR ? ARMSH_ASR : ARMSH_ROR; - if (irref_isk(ir->op2)) { - return m | ARMF_SH(sh, (IR(ir->op2)->i & 31)); - } else { - Reg s = ra_alloc1(as, ir->op2, rset_exclude(allow, m)); - return m | ARMF_RSH(sh, s); - } - } else if (ir->o == IR_ADD && ir->op1 == ir->op2) { - Reg m = ra_alloc1(as, ir->op1, allow); - return m | ARMF_SH(ARMSH_LSL, 1); - } - } - return ra_allocref(as, ref, allow); -} - -/* Fuse shifts into loads/stores. Only bother with BSHL 2 => lsl #2. */ -static IRRef asm_fuselsl2(ASMState *as, IRRef ref) -{ - IRIns *ir = IR(ref); - if (ra_noreg(ir->r) && mayfuse(as, ref) && ir->o == IR_BSHL && - irref_isk(ir->op2) && IR(ir->op2)->i == 2) - return ir->op1; - return 0; /* No fusion. */ -} - -/* Fuse XLOAD/XSTORE reference into load/store operand. */ -static void asm_fusexref(ASMState *as, ARMIns ai, Reg rd, IRRef ref, - RegSet allow, int32_t ofs) -{ - IRIns *ir = IR(ref); - Reg base; - if (ra_noreg(ir->r) && canfuse(as, ir)) { - int32_t lim = (!LJ_SOFTFP && (ai & 0x08000000)) ? 1024 : - (ai & 0x04000000) ? 4096 : 256; - if (ir->o == IR_ADD) { - int32_t ofs2; - if (irref_isk(ir->op2) && - (ofs2 = ofs + IR(ir->op2)->i) > -lim && ofs2 < lim && - (!(!LJ_SOFTFP && (ai & 0x08000000)) || !(ofs2 & 3))) { - ofs = ofs2; - ref = ir->op1; - } else if (ofs == 0 && !(!LJ_SOFTFP && (ai & 0x08000000))) { - IRRef lref = ir->op1, rref = ir->op2; - Reg rn, rm; - if ((ai & 0x04000000)) { - IRRef sref = asm_fuselsl2(as, rref); - if (sref) { - rref = sref; - ai |= ARMF_SH(ARMSH_LSL, 2); - } else if ((sref = asm_fuselsl2(as, lref)) != 0) { - lref = rref; - rref = sref; - ai |= ARMF_SH(ARMSH_LSL, 2); - } - } - rn = ra_alloc1(as, lref, allow); - rm = ra_alloc1(as, rref, rset_exclude(allow, rn)); - if ((ai & 0x04000000)) ai |= ARMI_LS_R; - emit_dnm(as, ai|ARMI_LS_P|ARMI_LS_U, rd, rn, rm); - return; - } - } else if (ir->o == IR_STRREF && !(!LJ_SOFTFP && (ai & 0x08000000))) { - lua_assert(ofs == 0); - ofs = (int32_t)sizeof(GCstr); - if (irref_isk(ir->op2)) { - ofs += IR(ir->op2)->i; - ref = ir->op1; - } else if (irref_isk(ir->op1)) { - ofs += IR(ir->op1)->i; - ref = ir->op2; - } else { - /* NYI: Fuse ADD with constant. */ - Reg rn = ra_alloc1(as, ir->op1, allow); - uint32_t m = asm_fuseopm(as, 0, ir->op2, rset_exclude(allow, rn)); - if ((ai & 0x04000000)) - emit_lso(as, ai, rd, rd, ofs); - else - emit_lsox(as, ai, rd, rd, ofs); - emit_dn(as, ARMI_ADD^m, rd, rn); - return; - } - if (ofs <= -lim || ofs >= lim) { - Reg rn = ra_alloc1(as, ref, allow); - Reg rm = ra_allock(as, ofs, rset_exclude(allow, rn)); - if ((ai & 0x04000000)) ai |= ARMI_LS_R; - emit_dnm(as, ai|ARMI_LS_P|ARMI_LS_U, rd, rn, rm); - return; - } - } - } - base = ra_alloc1(as, ref, allow); -#if !LJ_SOFTFP - if ((ai & 0x08000000)) - emit_vlso(as, ai, rd, base, ofs); - else -#endif - if ((ai & 0x04000000)) - emit_lso(as, ai, rd, base, ofs); - else - emit_lsox(as, ai, rd, base, ofs); -} - -#if !LJ_SOFTFP -/* Fuse to multiply-add/sub instruction. */ -static int asm_fusemadd(ASMState *as, IRIns *ir, ARMIns ai, ARMIns air) -{ - IRRef lref = ir->op1, rref = ir->op2; - IRIns *irm; - if (lref != rref && - ((mayfuse(as, lref) && (irm = IR(lref), irm->o == IR_MUL) && - ra_noreg(irm->r)) || - (mayfuse(as, rref) && (irm = IR(rref), irm->o == IR_MUL) && - (rref = lref, ai = air, ra_noreg(irm->r))))) { - Reg dest = ra_dest(as, ir, RSET_FPR); - Reg add = ra_hintalloc(as, rref, dest, RSET_FPR); - Reg right, left = ra_alloc2(as, irm, - rset_exclude(rset_exclude(RSET_FPR, dest), add)); - right = (left >> 8); left &= 255; - emit_dnm(as, ai, (dest & 15), (left & 15), (right & 15)); - if (dest != add) emit_dm(as, ARMI_VMOV_D, (dest & 15), (add & 15)); - return 1; - } - return 0; -} -#endif - -/* -- Calls --------------------------------------------------------------- */ - -/* Generate a call to a C function. */ -static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) -{ - uint32_t n, nargs = CCI_XNARGS(ci); - int32_t ofs = 0; -#if LJ_SOFTFP - Reg gpr = REGARG_FIRSTGPR; -#else - Reg gpr, fpr = REGARG_FIRSTFPR, fprodd = 0; -#endif - if ((void *)ci->func) - emit_call(as, (void *)ci->func); -#if !LJ_SOFTFP - for (gpr = REGARG_FIRSTGPR; gpr <= REGARG_LASTGPR; gpr++) - as->cost[gpr] = REGCOST(~0u, ASMREF_L); - gpr = REGARG_FIRSTGPR; -#endif - for (n = 0; n < nargs; n++) { /* Setup args. */ - IRRef ref = args[n]; - IRIns *ir = IR(ref); -#if !LJ_SOFTFP - if (ref && irt_isfp(ir->t)) { - RegSet of = as->freeset; - Reg src; - if (!LJ_ABI_SOFTFP && !(ci->flags & CCI_VARARG)) { - if (irt_isnum(ir->t)) { - if (fpr <= REGARG_LASTFPR) { - ra_leftov(as, fpr, ref); - fpr++; - continue; - } - } else if (fprodd) { /* Ick. */ - src = ra_alloc1(as, ref, RSET_FPR); - emit_dm(as, ARMI_VMOV_S, (fprodd & 15), (src & 15) | 0x00400000); - fprodd = 0; - continue; - } else if (fpr <= REGARG_LASTFPR) { - ra_leftov(as, fpr, ref); - fprodd = fpr++; - continue; - } - /* Workaround to protect argument GPRs from being used for remat. */ - as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1); - src = ra_alloc1(as, ref, RSET_FPR); /* May alloc GPR to remat FPR. */ - as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1)); - fprodd = 0; - goto stackfp; - } - /* Workaround to protect argument GPRs from being used for remat. */ - as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1); - src = ra_alloc1(as, ref, RSET_FPR); /* May alloc GPR to remat FPR. */ - as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1)); - if (irt_isnum(ir->t)) gpr = (gpr+1) & ~1u; - if (gpr <= REGARG_LASTGPR) { - lua_assert(rset_test(as->freeset, gpr)); /* Must have been evicted. */ - if (irt_isnum(ir->t)) { - lua_assert(rset_test(as->freeset, gpr+1)); /* Ditto. */ - emit_dnm(as, ARMI_VMOV_RR_D, gpr, gpr+1, (src & 15)); - gpr += 2; - } else { - emit_dn(as, ARMI_VMOV_R_S, gpr, (src & 15)); - gpr++; - } - } else { - stackfp: - if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4; - emit_spstore(as, ir, src, ofs); - ofs += irt_isnum(ir->t) ? 8 : 4; - } - } else -#endif - { - if (gpr <= REGARG_LASTGPR) { - lua_assert(rset_test(as->freeset, gpr)); /* Must have been evicted. */ - if (ref) ra_leftov(as, gpr, ref); - gpr++; - } else { - if (ref) { - Reg r = ra_alloc1(as, ref, RSET_GPR); - emit_spstore(as, ir, r, ofs); - } - ofs += 4; - } - } - } -} - -/* Setup result reg/sp for call. Evict scratch regs. */ -static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) -{ - RegSet drop = RSET_SCRATCH; - int hiop = ((ir+1)->o == IR_HIOP); - if (ra_hasreg(ir->r)) - rset_clear(drop, ir->r); /* Dest reg handled below. */ - if (hiop && ra_hasreg((ir+1)->r)) - rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */ - ra_evictset(as, drop); /* Evictions must be performed first. */ - if (ra_used(ir)) { - lua_assert(!irt_ispri(ir->t)); - if (!LJ_SOFTFP && irt_isfp(ir->t)) { - if (LJ_ABI_SOFTFP || (ci->flags & (CCI_CASTU64|CCI_VARARG))) { - Reg dest = (ra_dest(as, ir, RSET_FPR) & 15); - if (irt_isnum(ir->t)) - emit_dnm(as, ARMI_VMOV_D_RR, RID_RETLO, RID_RETHI, dest); - else - emit_dn(as, ARMI_VMOV_S_R, RID_RET, dest); - } else { - ra_destreg(as, ir, RID_FPRET); - } - } else if (hiop) { - ra_destpair(as, ir); - } else { - ra_destreg(as, ir, RID_RET); - } - } - UNUSED(ci); -} - -static void asm_callx(ASMState *as, IRIns *ir) -{ - IRRef args[CCI_NARGS_MAX*2]; - CCallInfo ci; - IRRef func; - IRIns *irf; - ci.flags = asm_callx_flags(as, ir); - asm_collectargs(as, ir, &ci, args); - asm_setupresult(as, ir, &ci); - func = ir->op2; irf = IR(func); - if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); } - if (irref_isk(func)) { /* Call to constant address. */ - ci.func = (ASMFunction)(void *)(irf->i); - } else { /* Need a non-argument register for indirect calls. */ - Reg freg = ra_alloc1(as, func, RSET_RANGE(RID_R4, RID_R12+1)); - emit_m(as, ARMI_BLXr, freg); - ci.func = (ASMFunction)(void *)0; - } - asm_gencall(as, &ci, args); -} - -/* -- Returns ------------------------------------------------------------- */ - -/* Return to lower frame. Guard that it goes to the right spot. */ -static void asm_retf(ASMState *as, IRIns *ir) -{ - Reg base = ra_alloc1(as, REF_BASE, RSET_GPR); - void *pc = ir_kptr(IR(ir->op2)); - int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1)); - as->topslot -= (BCReg)delta; - if ((int32_t)as->topslot < 0) as->topslot = 0; - irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */ - /* Need to force a spill on REF_BASE now to update the stack slot. */ - emit_lso(as, ARMI_STR, base, RID_SP, ra_spill(as, IR(REF_BASE))); - emit_setgl(as, base, jit_base); - emit_addptr(as, base, -8*delta); - asm_guardcc(as, CC_NE); - emit_nm(as, ARMI_CMP, RID_TMP, - ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base))); - emit_lso(as, ARMI_LDR, RID_TMP, base, -4); -} - -/* -- Type conversions ---------------------------------------------------- */ - -#if !LJ_SOFTFP -static void asm_tointg(ASMState *as, IRIns *ir, Reg left) -{ - Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); - Reg dest = ra_dest(as, ir, RSET_GPR); - asm_guardcc(as, CC_NE); - emit_d(as, ARMI_VMRS, 0); - emit_dm(as, ARMI_VCMP_D, (tmp & 15), (left & 15)); - emit_dm(as, ARMI_VCVT_F64_S32, (tmp & 15), (tmp & 15)); - emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15)); - emit_dm(as, ARMI_VCVT_S32_F64, (tmp & 15), (left & 15)); -} - -static void asm_tobit(ASMState *as, IRIns *ir) -{ - RegSet allow = RSET_FPR; - Reg left = ra_alloc1(as, ir->op1, allow); - Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left)); - Reg tmp = ra_scratch(as, rset_clear(allow, right)); - Reg dest = ra_dest(as, ir, RSET_GPR); - emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15)); - emit_dnm(as, ARMI_VADD_D, (tmp & 15), (left & 15), (right & 15)); -} -#else -#define asm_tobit(as, ir) lua_assert(0) -#endif - -static void asm_conv(ASMState *as, IRIns *ir) -{ - IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); -#if !LJ_SOFTFP - int stfp = (st == IRT_NUM || st == IRT_FLOAT); -#endif - IRRef lref = ir->op1; - /* 64 bit integer conversions are handled by SPLIT. */ - lua_assert(!irt_isint64(ir->t) && !(st == IRT_I64 || st == IRT_U64)); -#if LJ_SOFTFP - /* FP conversions are handled by SPLIT. */ - lua_assert(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT)); - /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */ -#else - lua_assert(irt_type(ir->t) != st); - if (irt_isfp(ir->t)) { - Reg dest = ra_dest(as, ir, RSET_FPR); - if (stfp) { /* FP to FP conversion. */ - emit_dm(as, st == IRT_NUM ? ARMI_VCVT_F32_F64 : ARMI_VCVT_F64_F32, - (dest & 15), (ra_alloc1(as, lref, RSET_FPR) & 15)); - } else { /* Integer to FP conversion. */ - Reg left = ra_alloc1(as, lref, RSET_GPR); - ARMIns ai = irt_isfloat(ir->t) ? - (st == IRT_INT ? ARMI_VCVT_F32_S32 : ARMI_VCVT_F32_U32) : - (st == IRT_INT ? ARMI_VCVT_F64_S32 : ARMI_VCVT_F64_U32); - emit_dm(as, ai, (dest & 15), (dest & 15)); - emit_dn(as, ARMI_VMOV_S_R, left, (dest & 15)); - } - } else if (stfp) { /* FP to integer conversion. */ - if (irt_isguard(ir->t)) { - /* Checked conversions are only supported from number to int. */ - lua_assert(irt_isint(ir->t) && st == IRT_NUM); - asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); - } else { - Reg left = ra_alloc1(as, lref, RSET_FPR); - Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); - Reg dest = ra_dest(as, ir, RSET_GPR); - ARMIns ai; - emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15)); - ai = irt_isint(ir->t) ? - (st == IRT_NUM ? ARMI_VCVT_S32_F64 : ARMI_VCVT_S32_F32) : - (st == IRT_NUM ? ARMI_VCVT_U32_F64 : ARMI_VCVT_U32_F32); - emit_dm(as, ai, (tmp & 15), (left & 15)); - } - } else -#endif - { - Reg dest = ra_dest(as, ir, RSET_GPR); - if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ - Reg left = ra_alloc1(as, lref, RSET_GPR); - lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); - if ((as->flags & JIT_F_ARMV6)) { - ARMIns ai = st == IRT_I8 ? ARMI_SXTB : - st == IRT_U8 ? ARMI_UXTB : - st == IRT_I16 ? ARMI_SXTH : ARMI_UXTH; - emit_dm(as, ai, dest, left); - } else if (st == IRT_U8) { - emit_dn(as, ARMI_AND|ARMI_K12|255, dest, left); - } else { - uint32_t shift = st == IRT_I8 ? 24 : 16; - ARMShift sh = st == IRT_U16 ? ARMSH_LSR : ARMSH_ASR; - emit_dm(as, ARMI_MOV|ARMF_SH(sh, shift), dest, RID_TMP); - emit_dm(as, ARMI_MOV|ARMF_SH(ARMSH_LSL, shift), RID_TMP, left); - } - } else { /* Handle 32/32 bit no-op (cast). */ - ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */ - } - } -} - -static void asm_strto(ASMState *as, IRIns *ir) -{ - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num]; - IRRef args[2]; - Reg rlo = 0, rhi = 0, tmp; - int destused = ra_used(ir); - int32_t ofs = 0; - ra_evictset(as, RSET_SCRATCH); -#if LJ_SOFTFP - if (destused) { - if (ra_hasspill(ir->s) && ra_hasspill((ir+1)->s) && - (ir->s & 1) == 0 && ir->s + 1 == (ir+1)->s) { - int i; - for (i = 0; i < 2; i++) { - Reg r = (ir+i)->r; - if (ra_hasreg(r)) { - ra_free(as, r); - ra_modified(as, r); - emit_spload(as, ir+i, r, sps_scale((ir+i)->s)); - } - } - ofs = sps_scale(ir->s); - destused = 0; - } else { - rhi = ra_dest(as, ir+1, RSET_GPR); - rlo = ra_dest(as, ir, rset_exclude(RSET_GPR, rhi)); - } - } - asm_guardcc(as, CC_EQ); - if (destused) { - emit_lso(as, ARMI_LDR, rhi, RID_SP, 4); - emit_lso(as, ARMI_LDR, rlo, RID_SP, 0); - } -#else - UNUSED(rhi); - if (destused) { - if (ra_hasspill(ir->s)) { - ofs = sps_scale(ir->s); - destused = 0; - if (ra_hasreg(ir->r)) { - ra_free(as, ir->r); - ra_modified(as, ir->r); - emit_spload(as, ir, ir->r, ofs); - } - } else { - rlo = ra_dest(as, ir, RSET_FPR); - } - } - asm_guardcc(as, CC_EQ); - if (destused) - emit_vlso(as, ARMI_VLDR_D, rlo, RID_SP, 0); -#endif - emit_n(as, ARMI_CMP|ARMI_K12|0, RID_RET); /* Test return status. */ - args[0] = ir->op1; /* GCstr *str */ - args[1] = ASMREF_TMP1; /* TValue *n */ - asm_gencall(as, ci, args); - tmp = ra_releasetmp(as, ASMREF_TMP1); - if (ofs == 0) - emit_dm(as, ARMI_MOV, tmp, RID_SP); - else - emit_opk(as, ARMI_ADD, tmp, RID_SP, ofs, RSET_GPR); -} - -/* -- Memory references --------------------------------------------------- */ - -/* Get pointer to TValue. */ -static void asm_tvptr(ASMState *as, Reg dest, IRRef ref) -{ - IRIns *ir = IR(ref); - if (irt_isnum(ir->t)) { - if (irref_isk(ref)) { - /* Use the number constant itself as a TValue. */ - ra_allockreg(as, i32ptr(ir_knum(ir)), dest); - } else { -#if LJ_SOFTFP - lua_assert(0); -#else - /* Otherwise force a spill and use the spill slot. */ - emit_opk(as, ARMI_ADD, dest, RID_SP, ra_spill(as, ir), RSET_GPR); -#endif - } - } else { - /* Otherwise use [sp] and [sp+4] to hold the TValue. */ - RegSet allow = rset_exclude(RSET_GPR, dest); - Reg type; - emit_dm(as, ARMI_MOV, dest, RID_SP); - if (!irt_ispri(ir->t)) { - Reg src = ra_alloc1(as, ref, allow); - emit_lso(as, ARMI_STR, src, RID_SP, 0); - } - if (LJ_SOFTFP && (ir+1)->o == IR_HIOP) - type = ra_alloc1(as, ref+1, allow); - else - type = ra_allock(as, irt_toitype(ir->t), allow); - emit_lso(as, ARMI_STR, type, RID_SP, 4); - } -} - -static void asm_aref(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg idx, base; - if (irref_isk(ir->op2)) { - IRRef tab = IR(ir->op1)->op1; - int32_t ofs = asm_fuseabase(as, tab); - IRRef refa = ofs ? tab : ir->op1; - uint32_t k = emit_isk12(ARMI_ADD, ofs + 8*IR(ir->op2)->i); - if (k) { - base = ra_alloc1(as, refa, RSET_GPR); - emit_dn(as, ARMI_ADD^k, dest, base); - return; - } - } - base = ra_alloc1(as, ir->op1, RSET_GPR); - idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base)); - emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 3), dest, base, idx); -} - -/* Inlined hash lookup. Specialized for key type and for const keys. -** The equivalent C code is: -** Node *n = hashkey(t, key); -** do { -** if (lj_obj_equal(&n->key, key)) return &n->val; -** } while ((n = nextnode(n))); -** return niltv(L); -*/ -static void asm_href(ASMState *as, IRIns *ir, IROp merge) -{ - RegSet allow = RSET_GPR; - int destused = ra_used(ir); - Reg dest = ra_dest(as, ir, allow); - Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest)); - Reg key = 0, keyhi = 0, keynumhi = RID_NONE, tmp = RID_TMP; - IRRef refkey = ir->op2; - IRIns *irkey = IR(refkey); - IRType1 kt = irkey->t; - int32_t k = 0, khi = emit_isk12(ARMI_CMP, irt_toitype(kt)); - uint32_t khash; - MCLabel l_end, l_loop; - rset_clear(allow, tab); - if (!irref_isk(refkey) || irt_isstr(kt)) { -#if LJ_SOFTFP - key = ra_alloc1(as, refkey, allow); - rset_clear(allow, key); - if (irkey[1].o == IR_HIOP) { - if (ra_hasreg((irkey+1)->r)) { - keynumhi = (irkey+1)->r; - keyhi = RID_TMP; - ra_noweak(as, keynumhi); - } else { - keyhi = keynumhi = ra_allocref(as, refkey+1, allow); - } - rset_clear(allow, keynumhi); - khi = 0; - } -#else - if (irt_isnum(kt)) { - key = ra_scratch(as, allow); - rset_clear(allow, key); - keyhi = keynumhi = ra_scratch(as, allow); - rset_clear(allow, keyhi); - khi = 0; - } else { - key = ra_alloc1(as, refkey, allow); - rset_clear(allow, key); - } -#endif - } else if (irt_isnum(kt)) { - int32_t val = (int32_t)ir_knum(irkey)->u32.lo; - k = emit_isk12(ARMI_CMP, val); - if (!k) { - key = ra_allock(as, val, allow); - rset_clear(allow, key); - } - val = (int32_t)ir_knum(irkey)->u32.hi; - khi = emit_isk12(ARMI_CMP, val); - if (!khi) { - keyhi = ra_allock(as, val, allow); - rset_clear(allow, keyhi); - } - } else if (!irt_ispri(kt)) { - k = emit_isk12(ARMI_CMP, irkey->i); - if (!k) { - key = ra_alloc1(as, refkey, allow); - rset_clear(allow, key); - } - } - if (!irt_ispri(kt)) - tmp = ra_scratchpair(as, allow); - - /* Key not found in chain: jump to exit (if merged) or load niltv. */ - l_end = emit_label(as); - as->invmcp = NULL; - if (merge == IR_NE) - asm_guardcc(as, CC_AL); - else if (destused) - emit_loada(as, dest, niltvg(J2G(as->J))); - - /* Follow hash chain until the end. */ - l_loop = --as->mcp; - emit_n(as, ARMI_CMP|ARMI_K12|0, dest); - emit_lso(as, ARMI_LDR, dest, dest, (int32_t)offsetof(Node, next)); - - /* Type and value comparison. */ - if (merge == IR_EQ) - asm_guardcc(as, CC_EQ); - else - emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end); - if (!irt_ispri(kt)) { - emit_nm(as, ARMF_CC(ARMI_CMP, CC_EQ)^k, tmp, key); - emit_nm(as, ARMI_CMP^khi, tmp+1, keyhi); - emit_lsox(as, ARMI_LDRD, tmp, dest, (int32_t)offsetof(Node, key)); - } else { - emit_n(as, ARMI_CMP^khi, tmp); - emit_lso(as, ARMI_LDR, tmp, dest, (int32_t)offsetof(Node, key.it)); - } - *l_loop = ARMF_CC(ARMI_B, CC_NE) | ((as->mcp-l_loop-2) & 0x00ffffffu); - - /* Load main position relative to tab->node into dest. */ - khash = irref_isk(refkey) ? ir_khash(irkey) : 1; - if (khash == 0) { - emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node)); - } else { - emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 3), dest, dest, tmp); - emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 1), tmp, tmp, tmp); - if (irt_isstr(kt)) { /* Fetch of str->hash is cheaper than ra_allock. */ - emit_dnm(as, ARMI_AND, tmp, tmp+1, RID_TMP); - emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node)); - emit_lso(as, ARMI_LDR, tmp+1, key, (int32_t)offsetof(GCstr, hash)); - emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask)); - } else if (irref_isk(refkey)) { - emit_opk(as, ARMI_AND, tmp, RID_TMP, (int32_t)khash, - rset_exclude(rset_exclude(RSET_GPR, tab), dest)); - emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node)); - emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask)); - } else { /* Must match with hash*() in lj_tab.c. */ - if (ra_hasreg(keynumhi)) { /* Canonicalize +-0.0 to 0.0. */ - if (keyhi == RID_TMP) - emit_dm(as, ARMF_CC(ARMI_MOV, CC_NE), keyhi, keynumhi); - emit_d(as, ARMF_CC(ARMI_MOV, CC_EQ)|ARMI_K12|0, keyhi); - } - emit_dnm(as, ARMI_AND, tmp, tmp, RID_TMP); - emit_dnm(as, ARMI_SUB|ARMF_SH(ARMSH_ROR, 32-HASH_ROT3), tmp, tmp, tmp+1); - emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node)); - emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_ROR, 32-((HASH_ROT2+HASH_ROT1)&31)), - tmp, tmp+1, tmp); - emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask)); - emit_dnm(as, ARMI_SUB|ARMF_SH(ARMSH_ROR, 32-HASH_ROT1), tmp+1, tmp+1, tmp); - if (ra_hasreg(keynumhi)) { - emit_dnm(as, ARMI_EOR, tmp+1, tmp, key); - emit_dnm(as, ARMI_ORR|ARMI_S, RID_TMP, tmp, key); /* Test for +-0.0. */ - emit_dnm(as, ARMI_ADD, tmp, keynumhi, keynumhi); -#if !LJ_SOFTFP - emit_dnm(as, ARMI_VMOV_RR_D, key, keynumhi, - (ra_alloc1(as, refkey, RSET_FPR) & 15)); -#endif - } else { - emit_dnm(as, ARMI_EOR, tmp+1, tmp, key); - emit_opk(as, ARMI_ADD, tmp, key, (int32_t)HASH_BIAS, - rset_exclude(rset_exclude(RSET_GPR, tab), key)); - } - } - } -} - -static void asm_hrefk(ASMState *as, IRIns *ir) -{ - IRIns *kslot = IR(ir->op2); - IRIns *irkey = IR(kslot->op1); - int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node)); - int32_t kofs = ofs + (int32_t)offsetof(Node, key); - Reg dest = (ra_used(ir) || ofs > 4095) ? ra_dest(as, ir, RSET_GPR) : RID_NONE; - Reg node = ra_alloc1(as, ir->op1, RSET_GPR); - Reg key = RID_NONE, type = RID_TMP, idx = node; - RegSet allow = rset_exclude(RSET_GPR, node); - lua_assert(ofs % sizeof(Node) == 0); - if (ofs > 4095) { - idx = dest; - rset_clear(allow, dest); - kofs = (int32_t)offsetof(Node, key); - } else if (ra_hasreg(dest)) { - emit_opk(as, ARMI_ADD, dest, node, ofs, allow); - } - asm_guardcc(as, CC_NE); - if (!irt_ispri(irkey->t)) { - RegSet even = (as->freeset & allow); - even = even & (even >> 1) & RSET_GPREVEN; - if (even) { - key = ra_scratch(as, even); - if (rset_test(as->freeset, key+1)) { - type = key+1; - ra_modified(as, type); - } - } else { - key = ra_scratch(as, allow); - } - rset_clear(allow, key); - } - rset_clear(allow, type); - if (irt_isnum(irkey->t)) { - emit_opk(as, ARMF_CC(ARMI_CMP, CC_EQ), 0, type, - (int32_t)ir_knum(irkey)->u32.hi, allow); - emit_opk(as, ARMI_CMP, 0, key, - (int32_t)ir_knum(irkey)->u32.lo, allow); - } else { - if (ra_hasreg(key)) - emit_opk(as, ARMF_CC(ARMI_CMP, CC_EQ), 0, key, irkey->i, allow); - emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype(irkey->t), type); - } - emit_lso(as, ARMI_LDR, type, idx, kofs+4); - if (ra_hasreg(key)) emit_lso(as, ARMI_LDR, key, idx, kofs); - if (ofs > 4095) - emit_opk(as, ARMI_ADD, dest, node, ofs, RSET_GPR); -} - -static void asm_uref(ASMState *as, IRIns *ir) -{ - /* NYI: Check that UREFO is still open and not aliasing a slot. */ - Reg dest = ra_dest(as, ir, RSET_GPR); - if (irref_isk(ir->op1)) { - GCfunc *fn = ir_kfunc(IR(ir->op1)); - MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v; - emit_lsptr(as, ARMI_LDR, dest, v); - } else { - Reg uv = ra_scratch(as, RSET_GPR); - Reg func = ra_alloc1(as, ir->op1, RSET_GPR); - if (ir->o == IR_UREFC) { - asm_guardcc(as, CC_NE); - emit_n(as, ARMI_CMP|ARMI_K12|1, RID_TMP); - emit_opk(as, ARMI_ADD, dest, uv, - (int32_t)offsetof(GCupval, tv), RSET_GPR); - emit_lso(as, ARMI_LDRB, RID_TMP, uv, (int32_t)offsetof(GCupval, closed)); - } else { - emit_lso(as, ARMI_LDR, dest, uv, (int32_t)offsetof(GCupval, v)); - } - emit_lso(as, ARMI_LDR, uv, func, - (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8)); - } -} - -static void asm_fref(ASMState *as, IRIns *ir) -{ - UNUSED(as); UNUSED(ir); - lua_assert(!ra_used(ir)); -} - -static void asm_strref(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - IRRef ref = ir->op2, refk = ir->op1; - Reg r; - if (irref_isk(ref)) { - IRRef tmp = refk; refk = ref; ref = tmp; - } else if (!irref_isk(refk)) { - uint32_t k, m = ARMI_K12|sizeof(GCstr); - Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR); - IRIns *irr = IR(ir->op2); - if (ra_hasreg(irr->r)) { - ra_noweak(as, irr->r); - right = irr->r; - } else if (mayfuse(as, irr->op2) && - irr->o == IR_ADD && irref_isk(irr->op2) && - (k = emit_isk12(ARMI_ADD, - (int32_t)sizeof(GCstr) + IR(irr->op2)->i))) { - m = k; - right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left)); - } else { - right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left)); - } - emit_dn(as, ARMI_ADD^m, dest, dest); - emit_dnm(as, ARMI_ADD, dest, left, right); - return; - } - r = ra_alloc1(as, ref, RSET_GPR); - emit_opk(as, ARMI_ADD, dest, r, - sizeof(GCstr) + IR(refk)->i, rset_exclude(RSET_GPR, r)); -} - -/* -- Loads and stores ---------------------------------------------------- */ - -static ARMIns asm_fxloadins(IRIns *ir) -{ - switch (irt_type(ir->t)) { - case IRT_I8: return ARMI_LDRSB; - case IRT_U8: return ARMI_LDRB; - case IRT_I16: return ARMI_LDRSH; - case IRT_U16: return ARMI_LDRH; - case IRT_NUM: lua_assert(!LJ_SOFTFP); return ARMI_VLDR_D; - case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VLDR_S; - default: return ARMI_LDR; - } -} - -static ARMIns asm_fxstoreins(IRIns *ir) -{ - switch (irt_type(ir->t)) { - case IRT_I8: case IRT_U8: return ARMI_STRB; - case IRT_I16: case IRT_U16: return ARMI_STRH; - case IRT_NUM: lua_assert(!LJ_SOFTFP); return ARMI_VSTR_D; - case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VSTR_S; - default: return ARMI_STR; - } -} - -static void asm_fload(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg idx = ra_alloc1(as, ir->op1, RSET_GPR); - ARMIns ai = asm_fxloadins(ir); - int32_t ofs; - if (ir->op2 == IRFL_TAB_ARRAY) { - ofs = asm_fuseabase(as, ir->op1); - if (ofs) { /* Turn the t->array load into an add for colocated arrays. */ - emit_dn(as, ARMI_ADD|ARMI_K12|ofs, dest, idx); - return; - } - } - ofs = field_ofs[ir->op2]; - if ((ai & 0x04000000)) - emit_lso(as, ai, dest, idx, ofs); - else - emit_lsox(as, ai, dest, idx, ofs); -} - -static void asm_fstore(ASMState *as, IRIns *ir) -{ - if (ir->r != RID_SINK) { - Reg src = ra_alloc1(as, ir->op2, RSET_GPR); - IRIns *irf = IR(ir->op1); - Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src)); - int32_t ofs = field_ofs[irf->op2]; - ARMIns ai = asm_fxstoreins(ir); - if ((ai & 0x04000000)) - emit_lso(as, ai, src, idx, ofs); - else - emit_lsox(as, ai, src, idx, ofs); - } -} - -static void asm_xload(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, - (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR); - lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED)); - asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0); -} - -static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs) -{ - if (ir->r != RID_SINK) { - Reg src = ra_alloc1(as, ir->op2, - (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR); - asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1, - rset_exclude(RSET_GPR, src), ofs); - } -} - -#define asm_xstore(as, ir) asm_xstore_(as, ir, 0) - -static void asm_ahuvload(ASMState *as, IRIns *ir) -{ - int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP); - IRType t = hiop ? IRT_NUM : irt_type(ir->t); - Reg dest = RID_NONE, type = RID_NONE, idx; - RegSet allow = RSET_GPR; - int32_t ofs = 0; - if (hiop && ra_used(ir+1)) { - type = ra_dest(as, ir+1, allow); - rset_clear(allow, type); - } - if (ra_used(ir)) { - lua_assert((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) || - irt_isint(ir->t) || irt_isaddr(ir->t)); - dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow); - rset_clear(allow, dest); - } - idx = asm_fuseahuref(as, ir->op1, &ofs, allow, - (!LJ_SOFTFP && t == IRT_NUM) ? 1024 : 4096); - if (!hiop || type == RID_NONE) { - rset_clear(allow, idx); - if (ofs < 256 && ra_hasreg(dest) && (dest & 1) == 0 && - rset_test((as->freeset & allow), dest+1)) { - type = dest+1; - ra_modified(as, type); - } else { - type = RID_TMP; - } - } - asm_guardcc(as, t == IRT_NUM ? CC_HS : CC_NE); - emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype_(t), type); - if (ra_hasreg(dest)) { -#if !LJ_SOFTFP - if (t == IRT_NUM) - emit_vlso(as, ARMI_VLDR_D, dest, idx, ofs); - else -#endif - emit_lso(as, ARMI_LDR, dest, idx, ofs); - } - emit_lso(as, ARMI_LDR, type, idx, ofs+4); -} - -static void asm_ahustore(ASMState *as, IRIns *ir) -{ - if (ir->r != RID_SINK) { - RegSet allow = RSET_GPR; - Reg idx, src = RID_NONE, type = RID_NONE; - int32_t ofs = 0; -#if !LJ_SOFTFP - if (irt_isnum(ir->t)) { - src = ra_alloc1(as, ir->op2, RSET_FPR); - idx = asm_fuseahuref(as, ir->op1, &ofs, allow, 1024); - emit_vlso(as, ARMI_VSTR_D, src, idx, ofs); - } else -#endif - { - int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP); - if (!irt_ispri(ir->t)) { - src = ra_alloc1(as, ir->op2, allow); - rset_clear(allow, src); - } - if (hiop) - type = ra_alloc1(as, (ir+1)->op2, allow); - else - type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow); - idx = asm_fuseahuref(as, ir->op1, &ofs, rset_exclude(allow, type), 4096); - if (ra_hasreg(src)) emit_lso(as, ARMI_STR, src, idx, ofs); - emit_lso(as, ARMI_STR, type, idx, ofs+4); - } - } -} - -static void asm_sload(ASMState *as, IRIns *ir) -{ - int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0); - int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP); - IRType t = hiop ? IRT_NUM : irt_type(ir->t); - Reg dest = RID_NONE, type = RID_NONE, base; - RegSet allow = RSET_GPR; - lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ - lua_assert(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK)); -#if LJ_SOFTFP - lua_assert(!(ir->op2 & IRSLOAD_CONVERT)); /* Handled by LJ_SOFTFP SPLIT. */ - if (hiop && ra_used(ir+1)) { - type = ra_dest(as, ir+1, allow); - rset_clear(allow, type); - } -#else - if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(ir->t) && t == IRT_INT) { - dest = ra_scratch(as, RSET_FPR); - asm_tointg(as, ir, dest); - t = IRT_NUM; /* Continue with a regular number type check. */ - } else -#endif - if (ra_used(ir)) { - Reg tmp = RID_NONE; - if ((ir->op2 & IRSLOAD_CONVERT)) - tmp = ra_scratch(as, t == IRT_INT ? RSET_FPR : RSET_GPR); - lua_assert((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) || - irt_isint(ir->t) || irt_isaddr(ir->t)); - dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow); - rset_clear(allow, dest); - base = ra_alloc1(as, REF_BASE, allow); - if ((ir->op2 & IRSLOAD_CONVERT)) { - if (t == IRT_INT) { - emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15)); - emit_dm(as, ARMI_VCVT_S32_F64, (tmp & 15), (tmp & 15)); - t = IRT_NUM; /* Check for original type. */ - } else { - emit_dm(as, ARMI_VCVT_F64_S32, (dest & 15), (dest & 15)); - emit_dn(as, ARMI_VMOV_S_R, tmp, (dest & 15)); - t = IRT_INT; /* Check for original type. */ - } - dest = tmp; - } - goto dotypecheck; - } - base = ra_alloc1(as, REF_BASE, allow); -dotypecheck: - rset_clear(allow, base); - if ((ir->op2 & IRSLOAD_TYPECHECK)) { - if (ra_noreg(type)) { - if (ofs < 256 && ra_hasreg(dest) && (dest & 1) == 0 && - rset_test((as->freeset & allow), dest+1)) { - type = dest+1; - ra_modified(as, type); - } else { - type = RID_TMP; - } - } - asm_guardcc(as, t == IRT_NUM ? CC_HS : CC_NE); - emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype_(t), type); - } - if (ra_hasreg(dest)) { -#if !LJ_SOFTFP - if (t == IRT_NUM) { - if (ofs < 1024) { - emit_vlso(as, ARMI_VLDR_D, dest, base, ofs); - } else { - if (ra_hasreg(type)) emit_lso(as, ARMI_LDR, type, base, ofs+4); - emit_vlso(as, ARMI_VLDR_D, dest, RID_TMP, 0); - emit_opk(as, ARMI_ADD, RID_TMP, base, ofs, allow); - return; - } - } else -#endif - emit_lso(as, ARMI_LDR, dest, base, ofs); - } - if (ra_hasreg(type)) emit_lso(as, ARMI_LDR, type, base, ofs+4); -} - -/* -- Allocations --------------------------------------------------------- */ - -#if LJ_HASFFI -static void asm_cnew(ASMState *as, IRIns *ir) -{ - CTState *cts = ctype_ctsG(J2G(as->J)); - CTypeID id = (CTypeID)IR(ir->op1)->i; - CTSize sz; - CTInfo info = lj_ctype_info(cts, id, &sz); - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; - IRRef args[4]; - RegSet allow = (RSET_GPR & ~RSET_SCRATCH); - RegSet drop = RSET_SCRATCH; - lua_assert(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL)); - - as->gcsteps++; - if (ra_hasreg(ir->r)) - rset_clear(drop, ir->r); /* Dest reg handled below. */ - ra_evictset(as, drop); - if (ra_used(ir)) - ra_destreg(as, ir, RID_RET); /* GCcdata * */ - - /* Initialize immutable cdata object. */ - if (ir->o == IR_CNEWI) { - int32_t ofs = sizeof(GCcdata); - lua_assert(sz == 4 || sz == 8); - if (sz == 8) { - ofs += 4; ir++; - lua_assert(ir->o == IR_HIOP); - } - for (;;) { - Reg r = ra_alloc1(as, ir->op2, allow); - emit_lso(as, ARMI_STR, r, RID_RET, ofs); - rset_clear(allow, r); - if (ofs == sizeof(GCcdata)) break; - ofs -= 4; ir--; - } - } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */ - ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv]; - args[0] = ASMREF_L; /* lua_State *L */ - args[1] = ir->op1; /* CTypeID id */ - args[2] = ir->op2; /* CTSize sz */ - args[3] = ASMREF_TMP1; /* CTSize align */ - asm_gencall(as, ci, args); - emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info)); - return; - } - - /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */ - { - uint32_t k = emit_isk12(ARMI_MOV, id); - Reg r = k ? RID_R1 : ra_allock(as, id, allow); - emit_lso(as, ARMI_STRB, RID_TMP, RID_RET, offsetof(GCcdata, gct)); - emit_lsox(as, ARMI_STRH, r, RID_RET, offsetof(GCcdata, ctypeid)); - emit_d(as, ARMI_MOV|ARMI_K12|~LJ_TCDATA, RID_TMP); - if (k) emit_d(as, ARMI_MOV^k, RID_R1); - } - args[0] = ASMREF_L; /* lua_State *L */ - args[1] = ASMREF_TMP1; /* MSize size */ - asm_gencall(as, ci, args); - ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)), - ra_releasetmp(as, ASMREF_TMP1)); -} -#else -#define asm_cnew(as, ir) ((void)0) -#endif - -/* -- Write barriers ------------------------------------------------------ */ - -static void asm_tbar(ASMState *as, IRIns *ir) -{ - Reg tab = ra_alloc1(as, ir->op1, RSET_GPR); - Reg link = ra_scratch(as, rset_exclude(RSET_GPR, tab)); - Reg gr = ra_allock(as, i32ptr(J2G(as->J)), - rset_exclude(rset_exclude(RSET_GPR, tab), link)); - Reg mark = RID_TMP; - MCLabel l_end = emit_label(as); - emit_lso(as, ARMI_STR, link, tab, (int32_t)offsetof(GCtab, gclist)); - emit_lso(as, ARMI_STRB, mark, tab, (int32_t)offsetof(GCtab, marked)); - emit_lso(as, ARMI_STR, tab, gr, - (int32_t)offsetof(global_State, gc.grayagain)); - emit_dn(as, ARMI_BIC|ARMI_K12|LJ_GC_BLACK, mark, mark); - emit_lso(as, ARMI_LDR, link, gr, - (int32_t)offsetof(global_State, gc.grayagain)); - emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end); - emit_n(as, ARMI_TST|ARMI_K12|LJ_GC_BLACK, mark); - emit_lso(as, ARMI_LDRB, mark, tab, (int32_t)offsetof(GCtab, marked)); -} - -static void asm_obar(ASMState *as, IRIns *ir) -{ - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv]; - IRRef args[2]; - MCLabel l_end; - Reg obj, val, tmp; - /* No need for other object barriers (yet). */ - lua_assert(IR(ir->op1)->o == IR_UREFC); - ra_evictset(as, RSET_SCRATCH); - l_end = emit_label(as); - args[0] = ASMREF_TMP1; /* global_State *g */ - args[1] = ir->op1; /* TValue *tv */ - asm_gencall(as, ci, args); - if ((l_end[-1] >> 28) == CC_AL) - l_end[-1] = ARMF_CC(l_end[-1], CC_NE); - else - emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end); - ra_allockreg(as, i32ptr(J2G(as->J)), ra_releasetmp(as, ASMREF_TMP1)); - obj = IR(ir->op1)->r; - tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj)); - emit_n(as, ARMF_CC(ARMI_TST, CC_NE)|ARMI_K12|LJ_GC_BLACK, tmp); - emit_n(as, ARMI_TST|ARMI_K12|LJ_GC_WHITES, RID_TMP); - val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj)); - emit_lso(as, ARMI_LDRB, tmp, obj, - (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)); - emit_lso(as, ARMI_LDRB, RID_TMP, val, (int32_t)offsetof(GChead, marked)); -} - -/* -- Arithmetic and logic operations ------------------------------------- */ - -#if !LJ_SOFTFP -static void asm_fparith(ASMState *as, IRIns *ir, ARMIns ai) -{ - Reg dest = ra_dest(as, ir, RSET_FPR); - Reg right, left = ra_alloc2(as, ir, RSET_FPR); - right = (left >> 8); left &= 255; - emit_dnm(as, ai, (dest & 15), (left & 15), (right & 15)); -} - -static void asm_fpunary(ASMState *as, IRIns *ir, ARMIns ai) -{ - Reg dest = ra_dest(as, ir, RSET_FPR); - Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR); - emit_dm(as, ai, (dest & 15), (left & 15)); -} - -static void asm_callround(ASMState *as, IRIns *ir, int id) -{ - /* The modified regs must match with the *.dasc implementation. */ - RegSet drop = RID2RSET(RID_R0)|RID2RSET(RID_R1)|RID2RSET(RID_R2)| - RID2RSET(RID_R3)|RID2RSET(RID_R12); - RegSet of; - Reg dest, src; - ra_evictset(as, drop); - dest = ra_dest(as, ir, RSET_FPR); - emit_dnm(as, ARMI_VMOV_D_RR, RID_RETLO, RID_RETHI, (dest & 15)); - emit_call(as, id == IRFPM_FLOOR ? (void *)lj_vm_floor_sf : - id == IRFPM_CEIL ? (void *)lj_vm_ceil_sf : - (void *)lj_vm_trunc_sf); - /* Workaround to protect argument GPRs from being used for remat. */ - of = as->freeset; - as->freeset &= ~RSET_RANGE(RID_R0, RID_R1+1); - as->cost[RID_R0] = as->cost[RID_R1] = REGCOST(~0u, ASMREF_L); - src = ra_alloc1(as, ir->op1, RSET_FPR); /* May alloc GPR to remat FPR. */ - as->freeset |= (of & RSET_RANGE(RID_R0, RID_R1+1)); - emit_dnm(as, ARMI_VMOV_RR_D, RID_R0, RID_R1, (src & 15)); -} - -static void asm_fpmath(ASMState *as, IRIns *ir) -{ - if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir)) - return; - if (ir->op2 <= IRFPM_TRUNC) - asm_callround(as, ir, ir->op2); - else if (ir->op2 == IRFPM_SQRT) - asm_fpunary(as, ir, ARMI_VSQRT_D); - else - asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2); -} -#else -#define asm_fpmath(as, ir) lua_assert(0) -#endif - -static int asm_swapops(ASMState *as, IRRef lref, IRRef rref) -{ - IRIns *ir; - if (irref_isk(rref)) - return 0; /* Don't swap constants to the left. */ - if (irref_isk(lref)) - return 1; /* But swap constants to the right. */ - ir = IR(rref); - if ((ir->o >= IR_BSHL && ir->o <= IR_BROR) || - (ir->o == IR_ADD && ir->op1 == ir->op2)) - return 0; /* Don't swap fusable operands to the left. */ - ir = IR(lref); - if ((ir->o >= IR_BSHL && ir->o <= IR_BROR) || - (ir->o == IR_ADD && ir->op1 == ir->op2)) - return 1; /* But swap fusable operands to the right. */ - return 0; /* Otherwise don't swap. */ -} - -static void asm_intop(ASMState *as, IRIns *ir, ARMIns ai) -{ - IRRef lref = ir->op1, rref = ir->op2; - Reg left, dest = ra_dest(as, ir, RSET_GPR); - uint32_t m; - if (asm_swapops(as, lref, rref)) { - IRRef tmp = lref; lref = rref; rref = tmp; - if ((ai & ~ARMI_S) == ARMI_SUB || (ai & ~ARMI_S) == ARMI_SBC) - ai ^= (ARMI_SUB^ARMI_RSB); - } - left = ra_hintalloc(as, lref, dest, RSET_GPR); - m = asm_fuseopm(as, ai, rref, rset_exclude(RSET_GPR, left)); - if (irt_isguard(ir->t)) { /* For IR_ADDOV etc. */ - asm_guardcc(as, CC_VS); - ai |= ARMI_S; - } - emit_dn(as, ai^m, dest, left); -} - -static void asm_intop_s(ASMState *as, IRIns *ir, ARMIns ai) -{ - if (as->flagmcp == as->mcp) { /* Drop cmp r, #0. */ - as->flagmcp = NULL; - as->mcp++; - ai |= ARMI_S; - } - asm_intop(as, ir, ai); -} - -static void asm_intneg(ASMState *as, IRIns *ir, ARMIns ai) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); - emit_dn(as, ai|ARMI_K12|0, dest, left); -} - -/* NYI: use add/shift for MUL(OV) with constants. FOLD only does 2^k. */ -static void asm_intmul(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, dest)); - Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); - Reg tmp = RID_NONE; - /* ARMv5 restriction: dest != left and dest_hi != left. */ - if (dest == left && left != right) { left = right; right = dest; } - if (irt_isguard(ir->t)) { /* IR_MULOV */ - if (!(as->flags & JIT_F_ARMV6) && dest == left) - tmp = left = ra_scratch(as, rset_exclude(RSET_GPR, left)); - asm_guardcc(as, CC_NE); - emit_nm(as, ARMI_TEQ|ARMF_SH(ARMSH_ASR, 31), RID_TMP, dest); - emit_dnm(as, ARMI_SMULL|ARMF_S(right), dest, RID_TMP, left); - } else { - if (!(as->flags & JIT_F_ARMV6) && dest == left) tmp = left = RID_TMP; - emit_nm(as, ARMI_MUL|ARMF_S(right), dest, left); - } - /* Only need this for the dest == left == right case. */ - if (ra_hasreg(tmp)) emit_dm(as, ARMI_MOV, tmp, right); -} - -static void asm_add(ASMState *as, IRIns *ir) -{ -#if !LJ_SOFTFP - if (irt_isnum(ir->t)) { - if (!asm_fusemadd(as, ir, ARMI_VMLA_D, ARMI_VMLA_D)) - asm_fparith(as, ir, ARMI_VADD_D); - return; - } -#endif - asm_intop_s(as, ir, ARMI_ADD); -} - -static void asm_sub(ASMState *as, IRIns *ir) -{ -#if !LJ_SOFTFP - if (irt_isnum(ir->t)) { - if (!asm_fusemadd(as, ir, ARMI_VNMLS_D, ARMI_VMLS_D)) - asm_fparith(as, ir, ARMI_VSUB_D); - return; - } -#endif - asm_intop_s(as, ir, ARMI_SUB); -} - -static void asm_mul(ASMState *as, IRIns *ir) -{ -#if !LJ_SOFTFP - if (irt_isnum(ir->t)) { - asm_fparith(as, ir, ARMI_VMUL_D); - return; - } -#endif - asm_intmul(as, ir); -} - -#define asm_addov(as, ir) asm_add(as, ir) -#define asm_subov(as, ir) asm_sub(as, ir) -#define asm_mulov(as, ir) asm_mul(as, ir) - -#if LJ_SOFTFP -#define asm_div(as, ir) lua_assert(0) -#define asm_pow(as, ir) lua_assert(0) -#define asm_abs(as, ir) lua_assert(0) -#define asm_atan2(as, ir) lua_assert(0) -#define asm_ldexp(as, ir) lua_assert(0) -#else -#define asm_div(as, ir) asm_fparith(as, ir, ARMI_VDIV_D) -#define asm_pow(as, ir) asm_callid(as, ir, IRCALL_lj_vm_powi) -#define asm_abs(as, ir) asm_fpunary(as, ir, ARMI_VABS_D) -#define asm_atan2(as, ir) asm_callid(as, ir, IRCALL_atan2) -#define asm_ldexp(as, ir) asm_callid(as, ir, IRCALL_ldexp) -#endif - -#define asm_mod(as, ir) asm_callid(as, ir, IRCALL_lj_vm_modi) - -static void asm_neg(ASMState *as, IRIns *ir) -{ -#if !LJ_SOFTFP - if (irt_isnum(ir->t)) { - asm_fpunary(as, ir, ARMI_VNEG_D); - return; - } -#endif - asm_intneg(as, ir, ARMI_RSB); -} - -static void asm_bitop(ASMState *as, IRIns *ir, ARMIns ai) -{ - if (as->flagmcp == as->mcp) { /* Try to drop cmp r, #0. */ - uint32_t cc = (as->mcp[1] >> 28); - as->flagmcp = NULL; - if (cc <= CC_NE) { - as->mcp++; - ai |= ARMI_S; - } else if (cc == CC_GE) { - *++as->mcp ^= ((CC_GE^CC_PL) << 28); - ai |= ARMI_S; - } else if (cc == CC_LT) { - *++as->mcp ^= ((CC_LT^CC_MI) << 28); - ai |= ARMI_S; - } /* else: other conds don't work with bit ops. */ - } - if (ir->op2 == 0) { - Reg dest = ra_dest(as, ir, RSET_GPR); - uint32_t m = asm_fuseopm(as, ai, ir->op1, RSET_GPR); - emit_d(as, ai^m, dest); - } else { - /* NYI: Turn BAND !k12 into uxtb, uxth or bfc or shl+shr. */ - asm_intop(as, ir, ai); - } -} - -#define asm_bnot(as, ir) asm_bitop(as, ir, ARMI_MVN) - -static void asm_bswap(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg left = ra_alloc1(as, ir->op1, RSET_GPR); - if ((as->flags & JIT_F_ARMV6)) { - emit_dm(as, ARMI_REV, dest, left); - } else { - Reg tmp2 = dest; - if (tmp2 == left) - tmp2 = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, dest), left)); - emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_LSR, 8), dest, tmp2, RID_TMP); - emit_dm(as, ARMI_MOV|ARMF_SH(ARMSH_ROR, 8), tmp2, left); - emit_dn(as, ARMI_BIC|ARMI_K12|256*8|255, RID_TMP, RID_TMP); - emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_ROR, 16), RID_TMP, left, left); - } -} - -#define asm_band(as, ir) asm_bitop(as, ir, ARMI_AND) -#define asm_bor(as, ir) asm_bitop(as, ir, ARMI_ORR) -#define asm_bxor(as, ir) asm_bitop(as, ir, ARMI_EOR) - -static void asm_bitshift(ASMState *as, IRIns *ir, ARMShift sh) -{ - if (irref_isk(ir->op2)) { /* Constant shifts. */ - /* NYI: Turn SHL+SHR or BAND+SHR into uxtb, uxth or ubfx. */ - /* NYI: Turn SHL+ASR into sxtb, sxth or sbfx. */ - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg left = ra_alloc1(as, ir->op1, RSET_GPR); - int32_t shift = (IR(ir->op2)->i & 31); - emit_dm(as, ARMI_MOV|ARMF_SH(sh, shift), dest, left); - } else { - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg left = ra_alloc1(as, ir->op1, RSET_GPR); - Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); - emit_dm(as, ARMI_MOV|ARMF_RSH(sh, right), dest, left); - } -} - -#define asm_bshl(as, ir) asm_bitshift(as, ir, ARMSH_LSL) -#define asm_bshr(as, ir) asm_bitshift(as, ir, ARMSH_LSR) -#define asm_bsar(as, ir) asm_bitshift(as, ir, ARMSH_ASR) -#define asm_bror(as, ir) asm_bitshift(as, ir, ARMSH_ROR) -#define asm_brol(as, ir) lua_assert(0) - -static void asm_intmin_max(ASMState *as, IRIns *ir, int cc) -{ - uint32_t kcmp = 0, kmov = 0; - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); - Reg right = 0; - if (irref_isk(ir->op2)) { - kcmp = emit_isk12(ARMI_CMP, IR(ir->op2)->i); - if (kcmp) kmov = emit_isk12(ARMI_MOV, IR(ir->op2)->i); - } - if (!kmov) { - kcmp = 0; - right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); - } - if (kmov || dest != right) { - emit_dm(as, ARMF_CC(ARMI_MOV, cc)^kmov, dest, right); - cc ^= 1; /* Must use opposite conditions for paired moves. */ - } else { - cc ^= (CC_LT^CC_GT); /* Otherwise may swap CC_LT <-> CC_GT. */ - } - if (dest != left) emit_dm(as, ARMF_CC(ARMI_MOV, cc), dest, left); - emit_nm(as, ARMI_CMP^kcmp, left, right); -} - -#if LJ_SOFTFP -static void asm_sfpmin_max(ASMState *as, IRIns *ir, int cc) -{ - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp]; - RegSet drop = RSET_SCRATCH; - Reg r; - IRRef args[4]; - args[0] = ir->op1; args[1] = (ir+1)->op1; - args[2] = ir->op2; args[3] = (ir+1)->op2; - /* __aeabi_cdcmple preserves r0-r3. */ - if (ra_hasreg(ir->r)) rset_clear(drop, ir->r); - if (ra_hasreg((ir+1)->r)) rset_clear(drop, (ir+1)->r); - if (!rset_test(as->freeset, RID_R2) && - regcost_ref(as->cost[RID_R2]) == args[2]) rset_clear(drop, RID_R2); - if (!rset_test(as->freeset, RID_R3) && - regcost_ref(as->cost[RID_R3]) == args[3]) rset_clear(drop, RID_R3); - ra_evictset(as, drop); - ra_destpair(as, ir); - emit_dm(as, ARMF_CC(ARMI_MOV, cc), RID_RETHI, RID_R3); - emit_dm(as, ARMF_CC(ARMI_MOV, cc), RID_RETLO, RID_R2); - emit_call(as, (void *)ci->func); - for (r = RID_R0; r <= RID_R3; r++) - ra_leftov(as, r, args[r-RID_R0]); -} -#else -static void asm_fpmin_max(ASMState *as, IRIns *ir, int cc) -{ - Reg dest = (ra_dest(as, ir, RSET_FPR) & 15); - Reg right, left = ra_alloc2(as, ir, RSET_FPR); - right = ((left >> 8) & 15); left &= 15; - if (dest != left) emit_dm(as, ARMF_CC(ARMI_VMOV_D, cc^1), dest, left); - if (dest != right) emit_dm(as, ARMF_CC(ARMI_VMOV_D, cc), dest, right); - emit_d(as, ARMI_VMRS, 0); - emit_dm(as, ARMI_VCMP_D, left, right); -} -#endif - -static void asm_min_max(ASMState *as, IRIns *ir, int cc, int fcc) -{ -#if LJ_SOFTFP - UNUSED(fcc); -#else - if (irt_isnum(ir->t)) - asm_fpmin_max(as, ir, fcc); - else -#endif - asm_intmin_max(as, ir, cc); -} - -#define asm_min(as, ir) asm_min_max(as, ir, CC_GT, CC_HI) -#define asm_max(as, ir) asm_min_max(as, ir, CC_LT, CC_LO) - -/* -- Comparisons --------------------------------------------------------- */ - -/* Map of comparisons to flags. ORDER IR. */ -static const uint8_t asm_compmap[IR_ABC+1] = { - /* op FP swp int cc FP cc */ - /* LT */ CC_GE + (CC_HS << 4), - /* GE x */ CC_LT + (CC_HI << 4), - /* LE */ CC_GT + (CC_HI << 4), - /* GT x */ CC_LE + (CC_HS << 4), - /* ULT x */ CC_HS + (CC_LS << 4), - /* UGE */ CC_LO + (CC_LO << 4), - /* ULE x */ CC_HI + (CC_LO << 4), - /* UGT */ CC_LS + (CC_LS << 4), - /* EQ */ CC_NE + (CC_NE << 4), - /* NE */ CC_EQ + (CC_EQ << 4), - /* ABC */ CC_LS + (CC_LS << 4) /* Same as UGT. */ -}; - -#if LJ_SOFTFP -/* FP comparisons. */ -static void asm_sfpcomp(ASMState *as, IRIns *ir) -{ - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp]; - RegSet drop = RSET_SCRATCH; - Reg r; - IRRef args[4]; - int swp = (((ir->o ^ (ir->o >> 2)) & ~(ir->o >> 3) & 1) << 1); - args[swp^0] = ir->op1; args[swp^1] = (ir+1)->op1; - args[swp^2] = ir->op2; args[swp^3] = (ir+1)->op2; - /* __aeabi_cdcmple preserves r0-r3. This helps to reduce spills. */ - for (r = RID_R0; r <= RID_R3; r++) - if (!rset_test(as->freeset, r) && - regcost_ref(as->cost[r]) == args[r-RID_R0]) rset_clear(drop, r); - ra_evictset(as, drop); - asm_guardcc(as, (asm_compmap[ir->o] >> 4)); - emit_call(as, (void *)ci->func); - for (r = RID_R0; r <= RID_R3; r++) - ra_leftov(as, r, args[r-RID_R0]); -} -#else -/* FP comparisons. */ -static void asm_fpcomp(ASMState *as, IRIns *ir) -{ - Reg left, right; - ARMIns ai; - int swp = ((ir->o ^ (ir->o >> 2)) & ~(ir->o >> 3) & 1); - if (!swp && irref_isk(ir->op2) && ir_knum(IR(ir->op2))->u64 == 0) { - left = (ra_alloc1(as, ir->op1, RSET_FPR) & 15); - right = 0; - ai = ARMI_VCMPZ_D; - } else { - left = ra_alloc2(as, ir, RSET_FPR); - if (swp) { - right = (left & 15); left = ((left >> 8) & 15); - } else { - right = ((left >> 8) & 15); left &= 15; - } - ai = ARMI_VCMP_D; - } - asm_guardcc(as, (asm_compmap[ir->o] >> 4)); - emit_d(as, ARMI_VMRS, 0); - emit_dm(as, ai, left, right); -} -#endif - -/* Integer comparisons. */ -static void asm_intcomp(ASMState *as, IRIns *ir) -{ - ARMCC cc = (asm_compmap[ir->o] & 15); - IRRef lref = ir->op1, rref = ir->op2; - Reg left; - uint32_t m; - int cmpprev0 = 0; - lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)); - if (asm_swapops(as, lref, rref)) { - Reg tmp = lref; lref = rref; rref = tmp; - if (cc >= CC_GE) cc ^= 7; /* LT <-> GT, LE <-> GE */ - else if (cc > CC_NE) cc ^= 11; /* LO <-> HI, LS <-> HS */ - } - if (irref_isk(rref) && IR(rref)->i == 0) { - IRIns *irl = IR(lref); - cmpprev0 = (irl+1 == ir); - /* Combine comp(BAND(left, right), 0) into tst left, right. */ - if (cmpprev0 && irl->o == IR_BAND && !ra_used(irl)) { - IRRef blref = irl->op1, brref = irl->op2; - uint32_t m2 = 0; - Reg bleft; - if (asm_swapops(as, blref, brref)) { - Reg tmp = blref; blref = brref; brref = tmp; - } - if (irref_isk(brref)) { - m2 = emit_isk12(ARMI_AND, IR(brref)->i); - if ((m2 & (ARMI_AND^ARMI_BIC))) - goto notst; /* Not beneficial if we miss a constant operand. */ - } - if (cc == CC_GE) cc = CC_PL; - else if (cc == CC_LT) cc = CC_MI; - else if (cc > CC_NE) goto notst; /* Other conds don't work with tst. */ - bleft = ra_alloc1(as, blref, RSET_GPR); - if (!m2) m2 = asm_fuseopm(as, 0, brref, rset_exclude(RSET_GPR, bleft)); - asm_guardcc(as, cc); - emit_n(as, ARMI_TST^m2, bleft); - return; - } - } -notst: - left = ra_alloc1(as, lref, RSET_GPR); - m = asm_fuseopm(as, ARMI_CMP, rref, rset_exclude(RSET_GPR, left)); - asm_guardcc(as, cc); - emit_n(as, ARMI_CMP^m, left); - /* Signed comparison with zero and referencing previous ins? */ - if (cmpprev0 && (cc <= CC_NE || cc >= CC_GE)) - as->flagmcp = as->mcp; /* Allow elimination of the compare. */ -} - -static void asm_comp(ASMState *as, IRIns *ir) -{ -#if !LJ_SOFTFP - if (irt_isnum(ir->t)) - asm_fpcomp(as, ir); - else -#endif - asm_intcomp(as, ir); -} - -#define asm_equal(as, ir) asm_comp(as, ir) - -#if LJ_HASFFI -/* 64 bit integer comparisons. */ -static void asm_int64comp(ASMState *as, IRIns *ir) -{ - int signedcomp = (ir->o <= IR_GT); - ARMCC cclo, cchi; - Reg leftlo, lefthi; - uint32_t mlo, mhi; - RegSet allow = RSET_GPR, oldfree; - - /* Always use unsigned comparison for loword. */ - cclo = asm_compmap[ir->o + (signedcomp ? 4 : 0)] & 15; - leftlo = ra_alloc1(as, ir->op1, allow); - oldfree = as->freeset; - mlo = asm_fuseopm(as, ARMI_CMP, ir->op2, rset_clear(allow, leftlo)); - allow &= ~(oldfree & ~as->freeset); /* Update for allocs of asm_fuseopm. */ - - /* Use signed or unsigned comparison for hiword. */ - cchi = asm_compmap[ir->o] & 15; - lefthi = ra_alloc1(as, (ir+1)->op1, allow); - mhi = asm_fuseopm(as, ARMI_CMP, (ir+1)->op2, rset_clear(allow, lefthi)); - - /* All register allocations must be performed _before_ this point. */ - if (signedcomp) { - MCLabel l_around = emit_label(as); - asm_guardcc(as, cclo); - emit_n(as, ARMI_CMP^mlo, leftlo); - emit_branch(as, ARMF_CC(ARMI_B, CC_NE), l_around); - if (cchi == CC_GE || cchi == CC_LE) cchi ^= 6; /* GE -> GT, LE -> LT */ - asm_guardcc(as, cchi); - } else { - asm_guardcc(as, cclo); - emit_n(as, ARMF_CC(ARMI_CMP, CC_EQ)^mlo, leftlo); - } - emit_n(as, ARMI_CMP^mhi, lefthi); -} -#endif - -/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */ - -/* Hiword op of a split 64 bit op. Previous op must be the loword op. */ -static void asm_hiop(ASMState *as, IRIns *ir) -{ -#if LJ_HASFFI || LJ_SOFTFP - /* HIOP is marked as a store because it needs its own DCE logic. */ - int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */ - if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1; - if ((ir-1)->o <= IR_NE) { /* 64 bit integer or FP comparisons. ORDER IR. */ - as->curins--; /* Always skip the loword comparison. */ -#if LJ_SOFTFP - if (!irt_isint(ir->t)) { - asm_sfpcomp(as, ir-1); - return; - } -#endif -#if LJ_HASFFI - asm_int64comp(as, ir-1); -#endif - return; -#if LJ_SOFTFP - } else if ((ir-1)->o == IR_MIN || (ir-1)->o == IR_MAX) { - as->curins--; /* Always skip the loword min/max. */ - if (uselo || usehi) - asm_sfpmin_max(as, ir-1, (ir-1)->o == IR_MIN ? CC_HI : CC_LO); - return; -#elif LJ_HASFFI - } else if ((ir-1)->o == IR_CONV) { - as->curins--; /* Always skip the CONV. */ - if (usehi || uselo) - asm_conv64(as, ir); - return; -#endif - } else if ((ir-1)->o == IR_XSTORE) { - if ((ir-1)->r != RID_SINK) - asm_xstore_(as, ir, 4); - return; - } - if (!usehi) return; /* Skip unused hiword op for all remaining ops. */ - switch ((ir-1)->o) { -#if LJ_HASFFI - case IR_ADD: - as->curins--; - asm_intop(as, ir, ARMI_ADC); - asm_intop(as, ir-1, ARMI_ADD|ARMI_S); - break; - case IR_SUB: - as->curins--; - asm_intop(as, ir, ARMI_SBC); - asm_intop(as, ir-1, ARMI_SUB|ARMI_S); - break; - case IR_NEG: - as->curins--; - asm_intneg(as, ir, ARMI_RSC); - asm_intneg(as, ir-1, ARMI_RSB|ARMI_S); - break; -#endif -#if LJ_SOFTFP - case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: - case IR_STRTO: - if (!uselo) - ra_allocref(as, ir->op1, RSET_GPR); /* Mark lo op as used. */ - break; -#endif - case IR_CALLN: - case IR_CALLS: - case IR_CALLXS: - if (!uselo) - ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */ - break; -#if LJ_SOFTFP - case IR_ASTORE: case IR_HSTORE: case IR_USTORE: case IR_TOSTR: -#endif - case IR_CNEWI: - /* Nothing to do here. Handled by lo op itself. */ - break; - default: lua_assert(0); break; - } -#else - UNUSED(as); UNUSED(ir); lua_assert(0); -#endif -} - -/* -- Profiling ----------------------------------------------------------- */ - -static void asm_prof(ASMState *as, IRIns *ir) -{ - UNUSED(ir); - asm_guardcc(as, CC_NE); - emit_n(as, ARMI_TST|ARMI_K12|HOOK_PROFILE, RID_TMP); - emit_lsptr(as, ARMI_LDRB, RID_TMP, (void *)&J2G(as->J)->hookmask); -} - -/* -- Stack handling ------------------------------------------------------ */ - -/* Check Lua stack size for overflow. Use exit handler as fallback. */ -static void asm_stack_check(ASMState *as, BCReg topslot, - IRIns *irp, RegSet allow, ExitNo exitno) -{ - Reg pbase; - uint32_t k; - if (irp) { - if (!ra_hasspill(irp->s)) { - pbase = irp->r; - lua_assert(ra_hasreg(pbase)); - } else if (allow) { - pbase = rset_pickbot(allow); - } else { - pbase = RID_RET; - emit_lso(as, ARMI_LDR, RID_RET, RID_SP, 0); /* Restore temp. register. */ - } - } else { - pbase = RID_BASE; - } - emit_branch(as, ARMF_CC(ARMI_BL, CC_LS), exitstub_addr(as->J, exitno)); - k = emit_isk12(0, (int32_t)(8*topslot)); - lua_assert(k); - emit_n(as, ARMI_CMP^k, RID_TMP); - emit_dnm(as, ARMI_SUB, RID_TMP, RID_TMP, pbase); - emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP, - (int32_t)offsetof(lua_State, maxstack)); - if (irp) { /* Must not spill arbitrary registers in head of side trace. */ - int32_t i = i32ptr(&J2G(as->J)->cur_L); - if (ra_hasspill(irp->s)) - emit_lso(as, ARMI_LDR, pbase, RID_SP, sps_scale(irp->s)); - emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP, (i & 4095)); - if (ra_hasspill(irp->s) && !allow) - emit_lso(as, ARMI_STR, RID_RET, RID_SP, 0); /* Save temp. register. */ - emit_loadi(as, RID_TMP, (i & ~4095)); - } else { - emit_getgl(as, RID_TMP, cur_L); - } -} - -/* Restore Lua stack from on-trace state. */ -static void asm_stack_restore(ASMState *as, SnapShot *snap) -{ - SnapEntry *map = &as->T->snapmap[snap->mapofs]; - SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1]; - MSize n, nent = snap->nent; - /* Store the value of all modified slots to the Lua stack. */ - for (n = 0; n < nent; n++) { - SnapEntry sn = map[n]; - BCReg s = snap_slot(sn); - int32_t ofs = 8*((int32_t)s-1); - IRRef ref = snap_ref(sn); - IRIns *ir = IR(ref); - if ((sn & SNAP_NORESTORE)) - continue; - if (irt_isnum(ir->t)) { -#if LJ_SOFTFP - RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE); - Reg tmp; - lua_assert(irref_isk(ref)); /* LJ_SOFTFP: must be a number constant. */ - tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, - rset_exclude(RSET_GPREVEN, RID_BASE)); - emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs); - if (rset_test(as->freeset, tmp+1)) odd = RID2RSET(tmp+1); - tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, odd); - emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs+4); -#else - Reg src = ra_alloc1(as, ref, RSET_FPR); - emit_vlso(as, ARMI_VSTR_D, src, RID_BASE, ofs); -#endif - } else { - RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE); - Reg type; - lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t)); - if (!irt_ispri(ir->t)) { - Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPREVEN, RID_BASE)); - emit_lso(as, ARMI_STR, src, RID_BASE, ofs); - if (rset_test(as->freeset, src+1)) odd = RID2RSET(src+1); - } - if ((sn & (SNAP_CONT|SNAP_FRAME))) { - if (s == 0) continue; /* Do not overwrite link to previous frame. */ - type = ra_allock(as, (int32_t)(*flinks--), odd); -#if LJ_SOFTFP - } else if ((sn & SNAP_SOFTFPNUM)) { - type = ra_alloc1(as, ref+1, rset_exclude(RSET_GPRODD, RID_BASE)); -#endif - } else { - type = ra_allock(as, (int32_t)irt_toitype(ir->t), odd); - } - emit_lso(as, ARMI_STR, type, RID_BASE, ofs+4); - } - checkmclim(as); - } - lua_assert(map + nent == flinks); -} - -/* -- GC handling --------------------------------------------------------- */ - -/* Check GC threshold and do one or more GC steps. */ -static void asm_gc_check(ASMState *as) -{ - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit]; - IRRef args[2]; - MCLabel l_end; - Reg tmp1, tmp2; - ra_evictset(as, RSET_SCRATCH); - l_end = emit_label(as); - /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */ - asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */ - emit_n(as, ARMI_CMP|ARMI_K12|0, RID_RET); - args[0] = ASMREF_TMP1; /* global_State *g */ - args[1] = ASMREF_TMP2; /* MSize steps */ - asm_gencall(as, ci, args); - tmp1 = ra_releasetmp(as, ASMREF_TMP1); - tmp2 = ra_releasetmp(as, ASMREF_TMP2); - emit_loadi(as, tmp2, as->gcsteps); - /* Jump around GC step if GC total < GC threshold. */ - emit_branch(as, ARMF_CC(ARMI_B, CC_LS), l_end); - emit_nm(as, ARMI_CMP, RID_TMP, tmp2); - emit_lso(as, ARMI_LDR, tmp2, tmp1, - (int32_t)offsetof(global_State, gc.threshold)); - emit_lso(as, ARMI_LDR, RID_TMP, tmp1, - (int32_t)offsetof(global_State, gc.total)); - ra_allockreg(as, i32ptr(J2G(as->J)), tmp1); - as->gcsteps = 0; - checkmclim(as); -} - -/* -- Loop handling ------------------------------------------------------- */ - -/* Fixup the loop branch. */ -static void asm_loop_fixup(ASMState *as) -{ - MCode *p = as->mctop; - MCode *target = as->mcp; - if (as->loopinv) { /* Inverted loop branch? */ - /* asm_guardcc already inverted the bcc and patched the final bl. */ - p[-2] |= ((uint32_t)(target-p) & 0x00ffffffu); - } else { - p[-1] = ARMI_B | ((uint32_t)((target-p)-1) & 0x00ffffffu); - } -} - -/* -- Head of trace ------------------------------------------------------- */ - -/* Reload L register from g->cur_L. */ -static void asm_head_lreg(ASMState *as) -{ - IRIns *ir = IR(ASMREF_L); - if (ra_used(ir)) { - Reg r = ra_dest(as, ir, RSET_GPR); - emit_getgl(as, r, cur_L); - ra_evictk(as); - } -} - -/* Coalesce BASE register for a root trace. */ -static void asm_head_root_base(ASMState *as) -{ - IRIns *ir; - asm_head_lreg(as); - ir = IR(REF_BASE); - if (ra_hasreg(ir->r) && (rset_test(as->modset, ir->r) || irt_ismarked(ir->t))) - ra_spill(as, ir); - ra_destreg(as, ir, RID_BASE); -} - -/* Coalesce BASE register for a side trace. */ -static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow) -{ - IRIns *ir; - asm_head_lreg(as); - ir = IR(REF_BASE); - if (ra_hasreg(ir->r) && (rset_test(as->modset, ir->r) || irt_ismarked(ir->t))) - ra_spill(as, ir); - if (ra_hasspill(irp->s)) { - rset_clear(allow, ra_dest(as, ir, allow)); - } else { - Reg r = irp->r; - lua_assert(ra_hasreg(r)); - rset_clear(allow, r); - if (r != ir->r && !rset_test(as->freeset, r)) - ra_restore(as, regcost_ref(as->cost[r])); - ra_destreg(as, ir, r); - } - return allow; -} - -/* -- Tail of trace ------------------------------------------------------- */ - -/* Fixup the tail code. */ -static void asm_tail_fixup(ASMState *as, TraceNo lnk) -{ - MCode *p = as->mctop; - MCode *target; - int32_t spadj = as->T->spadjust; - if (spadj == 0) { - as->mctop = --p; - } else { - /* Patch stack adjustment. */ - uint32_t k = emit_isk12(ARMI_ADD, spadj); - lua_assert(k); - p[-2] = (ARMI_ADD^k) | ARMF_D(RID_SP) | ARMF_N(RID_SP); - } - /* Patch exit branch. */ - target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp; - p[-1] = ARMI_B|(((target-p)-1)&0x00ffffffu); -} - -/* Prepare tail of code. */ -static void asm_tail_prep(ASMState *as) -{ - MCode *p = as->mctop - 1; /* Leave room for exit branch. */ - if (as->loopref) { - as->invmcp = as->mcp = p; - } else { - as->mcp = p-1; /* Leave room for stack pointer adjustment. */ - as->invmcp = NULL; - } - *p = 0; /* Prevent load/store merging. */ -} - -/* -- Trace setup --------------------------------------------------------- */ - -/* Ensure there are enough stack slots for call arguments. */ -static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci) -{ - IRRef args[CCI_NARGS_MAX*2]; - uint32_t i, nargs = CCI_XNARGS(ci); - int nslots = 0, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR, fprodd = 0; - asm_collectargs(as, ir, ci, args); - for (i = 0; i < nargs; i++) { - if (!LJ_SOFTFP && args[i] && irt_isfp(IR(args[i])->t)) { - if (!LJ_ABI_SOFTFP && !(ci->flags & CCI_VARARG)) { - if (irt_isnum(IR(args[i])->t)) { - if (nfpr > 0) nfpr--; - else fprodd = 0, nslots = (nslots + 3) & ~1; - } else { - if (fprodd) fprodd--; - else if (nfpr > 0) fprodd = 1, nfpr--; - else nslots++; - } - } else if (irt_isnum(IR(args[i])->t)) { - ngpr &= ~1; - if (ngpr > 0) ngpr -= 2; else nslots += 2; - } else { - if (ngpr > 0) ngpr--; else nslots++; - } - } else { - if (ngpr > 0) ngpr--; else nslots++; - } - } - if (nslots > as->evenspill) /* Leave room for args in stack slots. */ - as->evenspill = nslots; - return REGSP_HINT(RID_RET); -} - -static void asm_setup_target(ASMState *as) -{ - /* May need extra exit for asm_stack_check on side traces. */ - asm_exitstub_setup(as, as->T->nsnap + (as->parent ? 1 : 0)); -} - -/* -- Trace patching ------------------------------------------------------ */ - -/* Patch exit jumps of existing machine code to a new target. */ -void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target) -{ - MCode *p = T->mcode; - MCode *pe = (MCode *)((char *)p + T->szmcode); - MCode *cstart = NULL, *cend = p; - MCode *mcarea = lj_mcode_patch(J, p, 0); - MCode *px = exitstub_addr(J, exitno) - 2; - for (; p < pe; p++) { - /* Look for bl_cc exitstub, replace with b_cc target. */ - uint32_t ins = *p; - if ((ins & 0x0f000000u) == 0x0b000000u && ins < 0xf0000000u && - ((ins ^ (px-p)) & 0x00ffffffu) == 0) { - *p = (ins & 0xfe000000u) | (((target-p)-2) & 0x00ffffffu); - cend = p+1; - if (!cstart) cstart = p; - } - } - lua_assert(cstart != NULL); - lj_mcode_sync(cstart, cend); - lj_mcode_patch(J, mcarea, 1); -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm_mips.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm_mips.h deleted file mode 100644 index adea0e32367..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm_mips.h +++ /dev/null @@ -1,1833 +0,0 @@ -/* -** MIPS IR assembler (SSA IR -> machine code). -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -/* -- Register allocator extensions --------------------------------------- */ - -/* Allocate a register with a hint. */ -static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow) -{ - Reg r = IR(ref)->r; - if (ra_noreg(r)) { - if (!ra_hashint(r) && !iscrossref(as, ref)) - ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */ - r = ra_allocref(as, ref, allow); - } - ra_noweak(as, r); - return r; -} - -/* Allocate a register or RID_ZERO. */ -static Reg ra_alloc1z(ASMState *as, IRRef ref, RegSet allow) -{ - Reg r = IR(ref)->r; - if (ra_noreg(r)) { - if (!(allow & RSET_FPR) && irref_isk(ref) && IR(ref)->i == 0) - return RID_ZERO; - r = ra_allocref(as, ref, allow); - } else { - ra_noweak(as, r); - } - return r; -} - -/* Allocate two source registers for three-operand instructions. */ -static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow) -{ - IRIns *irl = IR(ir->op1), *irr = IR(ir->op2); - Reg left = irl->r, right = irr->r; - if (ra_hasreg(left)) { - ra_noweak(as, left); - if (ra_noreg(right)) - right = ra_alloc1z(as, ir->op2, rset_exclude(allow, left)); - else - ra_noweak(as, right); - } else if (ra_hasreg(right)) { - ra_noweak(as, right); - left = ra_alloc1z(as, ir->op1, rset_exclude(allow, right)); - } else if (ra_hashint(right)) { - right = ra_alloc1z(as, ir->op2, allow); - left = ra_alloc1z(as, ir->op1, rset_exclude(allow, right)); - } else { - left = ra_alloc1z(as, ir->op1, allow); - right = ra_alloc1z(as, ir->op2, rset_exclude(allow, left)); - } - return left | (right << 8); -} - -/* -- Guard handling ------------------------------------------------------ */ - -/* Need some spare long-range jump slots, for out-of-range branches. */ -#define MIPS_SPAREJUMP 4 - -/* Setup spare long-range jump slots per mcarea. */ -static void asm_sparejump_setup(ASMState *as) -{ - MCode *mxp = as->mcbot; - /* Assumes sizeof(MCLink) == 8. */ - if (((uintptr_t)mxp & (LJ_PAGESIZE-1)) == 8) { - lua_assert(MIPSI_NOP == 0); - memset(mxp+2, 0, MIPS_SPAREJUMP*8); - mxp += MIPS_SPAREJUMP*2; - lua_assert(mxp < as->mctop); - lj_mcode_sync(as->mcbot, mxp); - lj_mcode_commitbot(as->J, mxp); - as->mcbot = mxp; - as->mclim = as->mcbot + MCLIM_REDZONE; - } -} - -/* Setup exit stub after the end of each trace. */ -static void asm_exitstub_setup(ASMState *as) -{ - MCode *mxp = as->mctop; - /* sw TMP, 0(sp); j ->vm_exit_handler; li TMP, traceno */ - *--mxp = MIPSI_LI|MIPSF_T(RID_TMP)|as->T->traceno; - *--mxp = MIPSI_J|((((uintptr_t)(void *)lj_vm_exit_handler)>>2)&0x03ffffffu); - lua_assert(((uintptr_t)mxp ^ (uintptr_t)(void *)lj_vm_exit_handler)>>28 == 0); - *--mxp = MIPSI_SW|MIPSF_T(RID_TMP)|MIPSF_S(RID_SP)|0; - as->mctop = mxp; -} - -/* Keep this in-sync with exitstub_trace_addr(). */ -#define asm_exitstub_addr(as) ((as)->mctop) - -/* Emit conditional branch to exit for guard. */ -static void asm_guard(ASMState *as, MIPSIns mi, Reg rs, Reg rt) -{ - MCode *target = asm_exitstub_addr(as); - MCode *p = as->mcp; - if (LJ_UNLIKELY(p == as->invmcp)) { - as->invmcp = NULL; - as->loopinv = 1; - as->mcp = p+1; - mi = mi ^ ((mi>>28) == 1 ? 0x04000000u : 0x00010000u); /* Invert cond. */ - target = p; /* Patch target later in asm_loop_fixup. */ - } - emit_ti(as, MIPSI_LI, RID_TMP, as->snapno); - emit_branch(as, mi, rs, rt, target); -} - -/* -- Operand fusion ------------------------------------------------------ */ - -/* Limit linear search to this distance. Avoids O(n^2) behavior. */ -#define CONFLICT_SEARCH_LIM 31 - -/* Check if there's no conflicting instruction between curins and ref. */ -static int noconflict(ASMState *as, IRRef ref, IROp conflict) -{ - IRIns *ir = as->ir; - IRRef i = as->curins; - if (i > ref + CONFLICT_SEARCH_LIM) - return 0; /* Give up, ref is too far away. */ - while (--i > ref) - if (ir[i].o == conflict) - return 0; /* Conflict found. */ - return 1; /* Ok, no conflict. */ -} - -/* Fuse the array base of colocated arrays. */ -static int32_t asm_fuseabase(ASMState *as, IRRef ref) -{ - IRIns *ir = IR(ref); - if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE && - !neverfuse(as) && noconflict(as, ref, IR_NEWREF)) - return (int32_t)sizeof(GCtab); - return 0; -} - -/* Fuse array/hash/upvalue reference into register+offset operand. */ -static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow) -{ - IRIns *ir = IR(ref); - if (ra_noreg(ir->r)) { - if (ir->o == IR_AREF) { - if (mayfuse(as, ref)) { - if (irref_isk(ir->op2)) { - IRRef tab = IR(ir->op1)->op1; - int32_t ofs = asm_fuseabase(as, tab); - IRRef refa = ofs ? tab : ir->op1; - ofs += 8*IR(ir->op2)->i; - if (checki16(ofs)) { - *ofsp = ofs; - return ra_alloc1(as, refa, allow); - } - } - } - } else if (ir->o == IR_HREFK) { - if (mayfuse(as, ref)) { - int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node)); - if (checki16(ofs)) { - *ofsp = ofs; - return ra_alloc1(as, ir->op1, allow); - } - } - } else if (ir->o == IR_UREFC) { - if (irref_isk(ir->op1)) { - GCfunc *fn = ir_kfunc(IR(ir->op1)); - int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv); - int32_t jgl = (intptr_t)J2G(as->J); - if ((uint32_t)(ofs-jgl) < 65536) { - *ofsp = ofs-jgl-32768; - return RID_JGL; - } else { - *ofsp = (int16_t)ofs; - return ra_allock(as, ofs-(int16_t)ofs, allow); - } - } - } - } - *ofsp = 0; - return ra_alloc1(as, ref, allow); -} - -/* Fuse XLOAD/XSTORE reference into load/store operand. */ -static void asm_fusexref(ASMState *as, MIPSIns mi, Reg rt, IRRef ref, - RegSet allow, int32_t ofs) -{ - IRIns *ir = IR(ref); - Reg base; - if (ra_noreg(ir->r) && canfuse(as, ir)) { - if (ir->o == IR_ADD) { - int32_t ofs2; - if (irref_isk(ir->op2) && (ofs2 = ofs + IR(ir->op2)->i, checki16(ofs2))) { - ref = ir->op1; - ofs = ofs2; - } - } else if (ir->o == IR_STRREF) { - int32_t ofs2 = 65536; - lua_assert(ofs == 0); - ofs = (int32_t)sizeof(GCstr); - if (irref_isk(ir->op2)) { - ofs2 = ofs + IR(ir->op2)->i; - ref = ir->op1; - } else if (irref_isk(ir->op1)) { - ofs2 = ofs + IR(ir->op1)->i; - ref = ir->op2; - } - if (!checki16(ofs2)) { - /* NYI: Fuse ADD with constant. */ - Reg right, left = ra_alloc2(as, ir, allow); - right = (left >> 8); left &= 255; - emit_hsi(as, mi, rt, RID_TMP, ofs); - emit_dst(as, MIPSI_ADDU, RID_TMP, left, right); - return; - } - ofs = ofs2; - } - } - base = ra_alloc1(as, ref, allow); - emit_hsi(as, mi, rt, base, ofs); -} - -/* -- Calls --------------------------------------------------------------- */ - -/* Generate a call to a C function. */ -static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) -{ - uint32_t n, nargs = CCI_XNARGS(ci); - int32_t ofs = 16; - Reg gpr, fpr = REGARG_FIRSTFPR; - if ((void *)ci->func) - emit_call(as, (void *)ci->func); - for (gpr = REGARG_FIRSTGPR; gpr <= REGARG_LASTGPR; gpr++) - as->cost[gpr] = REGCOST(~0u, ASMREF_L); - gpr = REGARG_FIRSTGPR; - for (n = 0; n < nargs; n++) { /* Setup args. */ - IRRef ref = args[n]; - if (ref) { - IRIns *ir = IR(ref); - if (irt_isfp(ir->t) && fpr <= REGARG_LASTFPR && - !(ci->flags & CCI_VARARG)) { - lua_assert(rset_test(as->freeset, fpr)); /* Already evicted. */ - ra_leftov(as, fpr, ref); - fpr += 2; - gpr += irt_isnum(ir->t) ? 2 : 1; - } else { - fpr = REGARG_LASTFPR+1; - if (irt_isnum(ir->t)) gpr = (gpr+1) & ~1; - if (gpr <= REGARG_LASTGPR) { - lua_assert(rset_test(as->freeset, gpr)); /* Already evicted. */ - if (irt_isfp(ir->t)) { - RegSet of = as->freeset; - Reg r; - /* Workaround to protect argument GPRs from being used for remat. */ - as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1); - r = ra_alloc1(as, ref, RSET_FPR); - as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1)); - if (irt_isnum(ir->t)) { - emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?0:1), r+1); - emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?1:0), r); - lua_assert(rset_test(as->freeset, gpr+1)); /* Already evicted. */ - gpr += 2; - } else if (irt_isfloat(ir->t)) { - emit_tg(as, MIPSI_MFC1, gpr, r); - gpr++; - } - } else { - ra_leftov(as, gpr, ref); - gpr++; - } - } else { - Reg r = ra_alloc1z(as, ref, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); - if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4; - emit_spstore(as, ir, r, ofs); - ofs += irt_isnum(ir->t) ? 8 : 4; - } - } - } else { - fpr = REGARG_LASTFPR+1; - if (gpr <= REGARG_LASTGPR) - gpr++; - else - ofs += 4; - } - checkmclim(as); - } -} - -/* Setup result reg/sp for call. Evict scratch regs. */ -static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) -{ - RegSet drop = RSET_SCRATCH; - int hiop = ((ir+1)->o == IR_HIOP); - if ((ci->flags & CCI_NOFPRCLOBBER)) - drop &= ~RSET_FPR; - if (ra_hasreg(ir->r)) - rset_clear(drop, ir->r); /* Dest reg handled below. */ - if (hiop && ra_hasreg((ir+1)->r)) - rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */ - ra_evictset(as, drop); /* Evictions must be performed first. */ - if (ra_used(ir)) { - lua_assert(!irt_ispri(ir->t)); - if (irt_isfp(ir->t)) { - if ((ci->flags & CCI_CASTU64)) { - int32_t ofs = sps_scale(ir->s); - Reg dest = ir->r; - if (ra_hasreg(dest)) { - ra_free(as, dest); - ra_modified(as, dest); - emit_tg(as, MIPSI_MTC1, RID_RETHI, dest+1); - emit_tg(as, MIPSI_MTC1, RID_RETLO, dest); - } - if (ofs) { - emit_tsi(as, MIPSI_SW, RID_RETLO, RID_SP, ofs+(LJ_BE?4:0)); - emit_tsi(as, MIPSI_SW, RID_RETHI, RID_SP, ofs+(LJ_BE?0:4)); - } - } else { - ra_destreg(as, ir, RID_FPRET); - } - } else if (hiop) { - ra_destpair(as, ir); - } else { - ra_destreg(as, ir, RID_RET); - } - } -} - -static void asm_callx(ASMState *as, IRIns *ir) -{ - IRRef args[CCI_NARGS_MAX*2]; - CCallInfo ci; - IRRef func; - IRIns *irf; - ci.flags = asm_callx_flags(as, ir); - asm_collectargs(as, ir, &ci, args); - asm_setupresult(as, ir, &ci); - func = ir->op2; irf = IR(func); - if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); } - if (irref_isk(func)) { /* Call to constant address. */ - ci.func = (ASMFunction)(void *)(irf->i); - } else { /* Need specific register for indirect calls. */ - Reg r = ra_alloc1(as, func, RID2RSET(RID_CFUNCADDR)); - MCode *p = as->mcp; - if (r == RID_CFUNCADDR) - *--p = MIPSI_NOP; - else - *--p = MIPSI_MOVE | MIPSF_D(RID_CFUNCADDR) | MIPSF_S(r); - *--p = MIPSI_JALR | MIPSF_S(r); - as->mcp = p; - ci.func = (ASMFunction)(void *)0; - } - asm_gencall(as, &ci, args); -} - -static void asm_callround(ASMState *as, IRIns *ir, IRCallID id) -{ - /* The modified regs must match with the *.dasc implementation. */ - RegSet drop = RID2RSET(RID_R1)|RID2RSET(RID_R12)|RID2RSET(RID_FPRET)| - RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(REGARG_FIRSTFPR); - if (ra_hasreg(ir->r)) rset_clear(drop, ir->r); - ra_evictset(as, drop); - ra_destreg(as, ir, RID_FPRET); - emit_call(as, (void *)lj_ir_callinfo[id].func); - ra_leftov(as, REGARG_FIRSTFPR, ir->op1); -} - -/* -- Returns ------------------------------------------------------------- */ - -/* Return to lower frame. Guard that it goes to the right spot. */ -static void asm_retf(ASMState *as, IRIns *ir) -{ - Reg base = ra_alloc1(as, REF_BASE, RSET_GPR); - void *pc = ir_kptr(IR(ir->op2)); - int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1)); - as->topslot -= (BCReg)delta; - if ((int32_t)as->topslot < 0) as->topslot = 0; - irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */ - emit_setgl(as, base, jit_base); - emit_addptr(as, base, -8*delta); - asm_guard(as, MIPSI_BNE, RID_TMP, - ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base))); - emit_tsi(as, MIPSI_LW, RID_TMP, base, -8); -} - -/* -- Type conversions ---------------------------------------------------- */ - -static void asm_tointg(ASMState *as, IRIns *ir, Reg left) -{ - Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); - Reg dest = ra_dest(as, ir, RSET_GPR); - asm_guard(as, MIPSI_BC1F, 0, 0); - emit_fgh(as, MIPSI_C_EQ_D, 0, tmp, left); - emit_fg(as, MIPSI_CVT_D_W, tmp, tmp); - emit_tg(as, MIPSI_MFC1, dest, tmp); - emit_fg(as, MIPSI_CVT_W_D, tmp, left); -} - -static void asm_tobit(ASMState *as, IRIns *ir) -{ - RegSet allow = RSET_FPR; - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg left = ra_alloc1(as, ir->op1, allow); - Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left)); - Reg tmp = ra_scratch(as, rset_clear(allow, right)); - emit_tg(as, MIPSI_MFC1, dest, tmp); - emit_fgh(as, MIPSI_ADD_D, tmp, left, right); -} - -static void asm_conv(ASMState *as, IRIns *ir) -{ - IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); - int stfp = (st == IRT_NUM || st == IRT_FLOAT); - IRRef lref = ir->op1; - lua_assert(irt_type(ir->t) != st); - lua_assert(!(irt_isint64(ir->t) || - (st == IRT_I64 || st == IRT_U64))); /* Handled by SPLIT. */ - if (irt_isfp(ir->t)) { - Reg dest = ra_dest(as, ir, RSET_FPR); - if (stfp) { /* FP to FP conversion. */ - emit_fg(as, st == IRT_NUM ? MIPSI_CVT_S_D : MIPSI_CVT_D_S, - dest, ra_alloc1(as, lref, RSET_FPR)); - } else if (st == IRT_U32) { /* U32 to FP conversion. */ - /* y = (x ^ 0x8000000) + 2147483648.0 */ - Reg left = ra_alloc1(as, lref, RSET_GPR); - Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, dest)); - emit_fgh(as, irt_isfloat(ir->t) ? MIPSI_ADD_S : MIPSI_ADD_D, - dest, dest, tmp); - emit_fg(as, irt_isfloat(ir->t) ? MIPSI_CVT_S_W : MIPSI_CVT_D_W, - dest, dest); - if (irt_isfloat(ir->t)) - emit_lsptr(as, MIPSI_LWC1, (tmp & 31), - (void *)lj_ir_k64_find(as->J, U64x(4f000000,4f000000)), - RSET_GPR); - else - emit_lsptr(as, MIPSI_LDC1, (tmp & 31), - (void *)lj_ir_k64_find(as->J, U64x(41e00000,00000000)), - RSET_GPR); - emit_tg(as, MIPSI_MTC1, RID_TMP, dest); - emit_dst(as, MIPSI_XOR, RID_TMP, RID_TMP, left); - emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000); - } else { /* Integer to FP conversion. */ - Reg left = ra_alloc1(as, lref, RSET_GPR); - emit_fg(as, irt_isfloat(ir->t) ? MIPSI_CVT_S_W : MIPSI_CVT_D_W, - dest, dest); - emit_tg(as, MIPSI_MTC1, left, dest); - } - } else if (stfp) { /* FP to integer conversion. */ - if (irt_isguard(ir->t)) { - /* Checked conversions are only supported from number to int. */ - lua_assert(irt_isint(ir->t) && st == IRT_NUM); - asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); - } else { - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg left = ra_alloc1(as, lref, RSET_FPR); - Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); - if (irt_isu32(ir->t)) { - /* y = (int)floor(x - 2147483648.0) ^ 0x80000000 */ - emit_dst(as, MIPSI_XOR, dest, dest, RID_TMP); - emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000); - emit_tg(as, MIPSI_MFC1, dest, tmp); - emit_fg(as, st == IRT_FLOAT ? MIPSI_FLOOR_W_S : MIPSI_FLOOR_W_D, - tmp, tmp); - emit_fgh(as, st == IRT_FLOAT ? MIPSI_SUB_S : MIPSI_SUB_D, - tmp, left, tmp); - if (st == IRT_FLOAT) - emit_lsptr(as, MIPSI_LWC1, (tmp & 31), - (void *)lj_ir_k64_find(as->J, U64x(4f000000,4f000000)), - RSET_GPR); - else - emit_lsptr(as, MIPSI_LDC1, (tmp & 31), - (void *)lj_ir_k64_find(as->J, U64x(41e00000,00000000)), - RSET_GPR); - } else { - emit_tg(as, MIPSI_MFC1, dest, tmp); - emit_fg(as, st == IRT_FLOAT ? MIPSI_TRUNC_W_S : MIPSI_TRUNC_W_D, - tmp, left); - } - } - } else { - Reg dest = ra_dest(as, ir, RSET_GPR); - if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ - Reg left = ra_alloc1(as, ir->op1, RSET_GPR); - lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); - if ((ir->op2 & IRCONV_SEXT)) { - if ((as->flags & JIT_F_MIPS32R2)) { - emit_dst(as, st == IRT_I8 ? MIPSI_SEB : MIPSI_SEH, dest, 0, left); - } else { - uint32_t shift = st == IRT_I8 ? 24 : 16; - emit_dta(as, MIPSI_SRA, dest, dest, shift); - emit_dta(as, MIPSI_SLL, dest, left, shift); - } - } else { - emit_tsi(as, MIPSI_ANDI, dest, left, - (int32_t)(st == IRT_U8 ? 0xff : 0xffff)); - } - } else { /* 32/64 bit integer conversions. */ - /* Only need to handle 32/32 bit no-op (cast) on 32 bit archs. */ - ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */ - } - } -} - -static void asm_strto(ASMState *as, IRIns *ir) -{ - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num]; - IRRef args[2]; - RegSet drop = RSET_SCRATCH; - if (ra_hasreg(ir->r)) rset_set(drop, ir->r); /* Spill dest reg (if any). */ - ra_evictset(as, drop); - asm_guard(as, MIPSI_BEQ, RID_RET, RID_ZERO); /* Test return status. */ - args[0] = ir->op1; /* GCstr *str */ - args[1] = ASMREF_TMP1; /* TValue *n */ - asm_gencall(as, ci, args); - /* Store the result to the spill slot or temp slots. */ - emit_tsi(as, MIPSI_ADDIU, ra_releasetmp(as, ASMREF_TMP1), - RID_SP, sps_scale(ir->s)); -} - -/* -- Memory references --------------------------------------------------- */ - -/* Get pointer to TValue. */ -static void asm_tvptr(ASMState *as, Reg dest, IRRef ref) -{ - IRIns *ir = IR(ref); - if (irt_isnum(ir->t)) { - if (irref_isk(ref)) /* Use the number constant itself as a TValue. */ - ra_allockreg(as, i32ptr(ir_knum(ir)), dest); - else /* Otherwise force a spill and use the spill slot. */ - emit_tsi(as, MIPSI_ADDIU, dest, RID_SP, ra_spill(as, ir)); - } else { - /* Otherwise use g->tmptv to hold the TValue. */ - RegSet allow = rset_exclude(RSET_GPR, dest); - Reg type; - emit_tsi(as, MIPSI_ADDIU, dest, RID_JGL, offsetof(global_State, tmptv)-32768); - if (!irt_ispri(ir->t)) { - Reg src = ra_alloc1(as, ref, allow); - emit_setgl(as, src, tmptv.gcr); - } - type = ra_allock(as, irt_toitype(ir->t), allow); - emit_setgl(as, type, tmptv.it); - } -} - -static void asm_aref(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg idx, base; - if (irref_isk(ir->op2)) { - IRRef tab = IR(ir->op1)->op1; - int32_t ofs = asm_fuseabase(as, tab); - IRRef refa = ofs ? tab : ir->op1; - ofs += 8*IR(ir->op2)->i; - if (checki16(ofs)) { - base = ra_alloc1(as, refa, RSET_GPR); - emit_tsi(as, MIPSI_ADDIU, dest, base, ofs); - return; - } - } - base = ra_alloc1(as, ir->op1, RSET_GPR); - idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base)); - emit_dst(as, MIPSI_ADDU, dest, RID_TMP, base); - emit_dta(as, MIPSI_SLL, RID_TMP, idx, 3); -} - -/* Inlined hash lookup. Specialized for key type and for const keys. -** The equivalent C code is: -** Node *n = hashkey(t, key); -** do { -** if (lj_obj_equal(&n->key, key)) return &n->val; -** } while ((n = nextnode(n))); -** return niltv(L); -*/ -static void asm_href(ASMState *as, IRIns *ir, IROp merge) -{ - RegSet allow = RSET_GPR; - int destused = ra_used(ir); - Reg dest = ra_dest(as, ir, allow); - Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest)); - Reg key = RID_NONE, type = RID_NONE, tmpnum = RID_NONE, tmp1 = RID_TMP, tmp2; - IRRef refkey = ir->op2; - IRIns *irkey = IR(refkey); - IRType1 kt = irkey->t; - uint32_t khash; - MCLabel l_end, l_loop, l_next; - - rset_clear(allow, tab); - if (irt_isnum(kt)) { - key = ra_alloc1(as, refkey, RSET_FPR); - tmpnum = ra_scratch(as, rset_exclude(RSET_FPR, key)); - } else if (!irt_ispri(kt)) { - key = ra_alloc1(as, refkey, allow); - rset_clear(allow, key); - type = ra_allock(as, irt_toitype(irkey->t), allow); - rset_clear(allow, type); - } - tmp2 = ra_scratch(as, allow); - rset_clear(allow, tmp2); - - /* Key not found in chain: jump to exit (if merged) or load niltv. */ - l_end = emit_label(as); - as->invmcp = NULL; - if (merge == IR_NE) - asm_guard(as, MIPSI_B, RID_ZERO, RID_ZERO); - else if (destused) - emit_loada(as, dest, niltvg(J2G(as->J))); - /* Follow hash chain until the end. */ - emit_move(as, dest, tmp2); - l_loop = --as->mcp; - emit_tsi(as, MIPSI_LW, tmp2, dest, (int32_t)offsetof(Node, next)); - l_next = emit_label(as); - - /* Type and value comparison. */ - if (merge == IR_EQ) { /* Must match asm_guard(). */ - emit_ti(as, MIPSI_LI, RID_TMP, as->snapno); - l_end = asm_exitstub_addr(as); - } - if (irt_isnum(kt)) { - emit_branch(as, MIPSI_BC1T, 0, 0, l_end); - emit_fgh(as, MIPSI_C_EQ_D, 0, tmpnum, key); - *--as->mcp = MIPSI_NOP; /* Avoid NaN comparison overhead. */ - emit_branch(as, MIPSI_BEQ, tmp2, RID_ZERO, l_next); - emit_tsi(as, MIPSI_SLTIU, tmp2, tmp2, (int32_t)LJ_TISNUM); - emit_hsi(as, MIPSI_LDC1, tmpnum, dest, (int32_t)offsetof(Node, key.n)); - } else { - if (irt_ispri(kt)) { - emit_branch(as, MIPSI_BEQ, tmp2, type, l_end); - } else { - emit_branch(as, MIPSI_BEQ, tmp1, key, l_end); - emit_tsi(as, MIPSI_LW, tmp1, dest, (int32_t)offsetof(Node, key.gcr)); - emit_branch(as, MIPSI_BNE, tmp2, type, l_next); - } - } - emit_tsi(as, MIPSI_LW, tmp2, dest, (int32_t)offsetof(Node, key.it)); - *l_loop = MIPSI_BNE | MIPSF_S(tmp2) | ((as->mcp-l_loop-1) & 0xffffu); - - /* Load main position relative to tab->node into dest. */ - khash = irref_isk(refkey) ? ir_khash(irkey) : 1; - if (khash == 0) { - emit_tsi(as, MIPSI_LW, dest, tab, (int32_t)offsetof(GCtab, node)); - } else { - Reg tmphash = tmp1; - if (irref_isk(refkey)) - tmphash = ra_allock(as, khash, allow); - emit_dst(as, MIPSI_ADDU, dest, dest, tmp1); - lua_assert(sizeof(Node) == 24); - emit_dst(as, MIPSI_SUBU, tmp1, tmp2, tmp1); - emit_dta(as, MIPSI_SLL, tmp1, tmp1, 3); - emit_dta(as, MIPSI_SLL, tmp2, tmp1, 5); - emit_dst(as, MIPSI_AND, tmp1, tmp2, tmphash); - emit_tsi(as, MIPSI_LW, dest, tab, (int32_t)offsetof(GCtab, node)); - emit_tsi(as, MIPSI_LW, tmp2, tab, (int32_t)offsetof(GCtab, hmask)); - if (irref_isk(refkey)) { - /* Nothing to do. */ - } else if (irt_isstr(kt)) { - emit_tsi(as, MIPSI_LW, tmp1, key, (int32_t)offsetof(GCstr, hash)); - } else { /* Must match with hash*() in lj_tab.c. */ - emit_dst(as, MIPSI_SUBU, tmp1, tmp1, tmp2); - emit_rotr(as, tmp2, tmp2, dest, (-HASH_ROT3)&31); - emit_dst(as, MIPSI_XOR, tmp1, tmp1, tmp2); - emit_rotr(as, tmp1, tmp1, dest, (-HASH_ROT2-HASH_ROT1)&31); - emit_dst(as, MIPSI_SUBU, tmp2, tmp2, dest); - if (irt_isnum(kt)) { - emit_dst(as, MIPSI_XOR, tmp2, tmp2, tmp1); - if ((as->flags & JIT_F_MIPS32R2)) { - emit_dta(as, MIPSI_ROTR, dest, tmp1, (-HASH_ROT1)&31); - } else { - emit_dst(as, MIPSI_OR, dest, dest, tmp1); - emit_dta(as, MIPSI_SLL, tmp1, tmp1, HASH_ROT1); - emit_dta(as, MIPSI_SRL, dest, tmp1, (-HASH_ROT1)&31); - } - emit_dst(as, MIPSI_ADDU, tmp1, tmp1, tmp1); - emit_tg(as, MIPSI_MFC1, tmp2, key); - emit_tg(as, MIPSI_MFC1, tmp1, key+1); - } else { - emit_dst(as, MIPSI_XOR, tmp2, key, tmp1); - emit_rotr(as, dest, tmp1, tmp2, (-HASH_ROT1)&31); - emit_dst(as, MIPSI_ADDU, tmp1, key, ra_allock(as, HASH_BIAS, allow)); - } - } - } -} - -static void asm_hrefk(ASMState *as, IRIns *ir) -{ - IRIns *kslot = IR(ir->op2); - IRIns *irkey = IR(kslot->op1); - int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node)); - int32_t kofs = ofs + (int32_t)offsetof(Node, key); - Reg dest = (ra_used(ir)||ofs > 32736) ? ra_dest(as, ir, RSET_GPR) : RID_NONE; - Reg node = ra_alloc1(as, ir->op1, RSET_GPR); - Reg key = RID_NONE, type = RID_TMP, idx = node; - RegSet allow = rset_exclude(RSET_GPR, node); - int32_t lo, hi; - lua_assert(ofs % sizeof(Node) == 0); - if (ofs > 32736) { - idx = dest; - rset_clear(allow, dest); - kofs = (int32_t)offsetof(Node, key); - } else if (ra_hasreg(dest)) { - emit_tsi(as, MIPSI_ADDIU, dest, node, ofs); - } - if (!irt_ispri(irkey->t)) { - key = ra_scratch(as, allow); - rset_clear(allow, key); - } - if (irt_isnum(irkey->t)) { - lo = (int32_t)ir_knum(irkey)->u32.lo; - hi = (int32_t)ir_knum(irkey)->u32.hi; - } else { - lo = irkey->i; - hi = irt_toitype(irkey->t); - if (!ra_hasreg(key)) - goto nolo; - } - asm_guard(as, MIPSI_BNE, key, lo ? ra_allock(as, lo, allow) : RID_ZERO); -nolo: - asm_guard(as, MIPSI_BNE, type, hi ? ra_allock(as, hi, allow) : RID_ZERO); - if (ra_hasreg(key)) emit_tsi(as, MIPSI_LW, key, idx, kofs+(LJ_BE?4:0)); - emit_tsi(as, MIPSI_LW, type, idx, kofs+(LJ_BE?0:4)); - if (ofs > 32736) - emit_tsi(as, MIPSI_ADDU, dest, node, ra_allock(as, ofs, allow)); -} - -static void asm_uref(ASMState *as, IRIns *ir) -{ - /* NYI: Check that UREFO is still open and not aliasing a slot. */ - Reg dest = ra_dest(as, ir, RSET_GPR); - if (irref_isk(ir->op1)) { - GCfunc *fn = ir_kfunc(IR(ir->op1)); - MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v; - emit_lsptr(as, MIPSI_LW, dest, v, RSET_GPR); - } else { - Reg uv = ra_scratch(as, RSET_GPR); - Reg func = ra_alloc1(as, ir->op1, RSET_GPR); - if (ir->o == IR_UREFC) { - asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO); - emit_tsi(as, MIPSI_ADDIU, dest, uv, (int32_t)offsetof(GCupval, tv)); - emit_tsi(as, MIPSI_LBU, RID_TMP, uv, (int32_t)offsetof(GCupval, closed)); - } else { - emit_tsi(as, MIPSI_LW, dest, uv, (int32_t)offsetof(GCupval, v)); - } - emit_tsi(as, MIPSI_LW, uv, func, - (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8)); - } -} - -static void asm_fref(ASMState *as, IRIns *ir) -{ - UNUSED(as); UNUSED(ir); - lua_assert(!ra_used(ir)); -} - -static void asm_strref(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - IRRef ref = ir->op2, refk = ir->op1; - int32_t ofs = (int32_t)sizeof(GCstr); - Reg r; - if (irref_isk(ref)) { - IRRef tmp = refk; refk = ref; ref = tmp; - } else if (!irref_isk(refk)) { - Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR); - IRIns *irr = IR(ir->op2); - if (ra_hasreg(irr->r)) { - ra_noweak(as, irr->r); - right = irr->r; - } else if (mayfuse(as, irr->op2) && - irr->o == IR_ADD && irref_isk(irr->op2) && - checki16(ofs + IR(irr->op2)->i)) { - ofs += IR(irr->op2)->i; - right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left)); - } else { - right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left)); - } - emit_tsi(as, MIPSI_ADDIU, dest, dest, ofs); - emit_dst(as, MIPSI_ADDU, dest, left, right); - return; - } - r = ra_alloc1(as, ref, RSET_GPR); - ofs += IR(refk)->i; - if (checki16(ofs)) - emit_tsi(as, MIPSI_ADDIU, dest, r, ofs); - else - emit_dst(as, MIPSI_ADDU, dest, r, - ra_allock(as, ofs, rset_exclude(RSET_GPR, r))); -} - -/* -- Loads and stores ---------------------------------------------------- */ - -static MIPSIns asm_fxloadins(IRIns *ir) -{ - switch (irt_type(ir->t)) { - case IRT_I8: return MIPSI_LB; - case IRT_U8: return MIPSI_LBU; - case IRT_I16: return MIPSI_LH; - case IRT_U16: return MIPSI_LHU; - case IRT_NUM: return MIPSI_LDC1; - case IRT_FLOAT: return MIPSI_LWC1; - default: return MIPSI_LW; - } -} - -static MIPSIns asm_fxstoreins(IRIns *ir) -{ - switch (irt_type(ir->t)) { - case IRT_I8: case IRT_U8: return MIPSI_SB; - case IRT_I16: case IRT_U16: return MIPSI_SH; - case IRT_NUM: return MIPSI_SDC1; - case IRT_FLOAT: return MIPSI_SWC1; - default: return MIPSI_SW; - } -} - -static void asm_fload(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg idx = ra_alloc1(as, ir->op1, RSET_GPR); - MIPSIns mi = asm_fxloadins(ir); - int32_t ofs; - if (ir->op2 == IRFL_TAB_ARRAY) { - ofs = asm_fuseabase(as, ir->op1); - if (ofs) { /* Turn the t->array load into an add for colocated arrays. */ - emit_tsi(as, MIPSI_ADDIU, dest, idx, ofs); - return; - } - } - ofs = field_ofs[ir->op2]; - lua_assert(!irt_isfp(ir->t)); - emit_tsi(as, mi, dest, idx, ofs); -} - -static void asm_fstore(ASMState *as, IRIns *ir) -{ - if (ir->r != RID_SINK) { - Reg src = ra_alloc1z(as, ir->op2, RSET_GPR); - IRIns *irf = IR(ir->op1); - Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src)); - int32_t ofs = field_ofs[irf->op2]; - MIPSIns mi = asm_fxstoreins(ir); - lua_assert(!irt_isfp(ir->t)); - emit_tsi(as, mi, src, idx, ofs); - } -} - -static void asm_xload(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); - lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED)); - asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0); -} - -static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs) -{ - if (ir->r != RID_SINK) { - Reg src = ra_alloc1z(as, ir->op2, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); - asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1, - rset_exclude(RSET_GPR, src), ofs); - } -} - -#define asm_xstore(as, ir) asm_xstore_(as, ir, 0) - -static void asm_ahuvload(ASMState *as, IRIns *ir) -{ - IRType1 t = ir->t; - Reg dest = RID_NONE, type = RID_TMP, idx; - RegSet allow = RSET_GPR; - int32_t ofs = 0; - if (ra_used(ir)) { - lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t)); - dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR); - rset_clear(allow, dest); - } - idx = asm_fuseahuref(as, ir->op1, &ofs, allow); - rset_clear(allow, idx); - if (irt_isnum(t)) { - asm_guard(as, MIPSI_BEQ, type, RID_ZERO); - emit_tsi(as, MIPSI_SLTIU, type, type, (int32_t)LJ_TISNUM); - if (ra_hasreg(dest)) - emit_hsi(as, MIPSI_LDC1, dest, idx, ofs); - } else { - asm_guard(as, MIPSI_BNE, type, ra_allock(as, irt_toitype(t), allow)); - if (ra_hasreg(dest)) emit_tsi(as, MIPSI_LW, dest, idx, ofs+(LJ_BE?4:0)); - } - emit_tsi(as, MIPSI_LW, type, idx, ofs+(LJ_BE?0:4)); -} - -static void asm_ahustore(ASMState *as, IRIns *ir) -{ - RegSet allow = RSET_GPR; - Reg idx, src = RID_NONE, type = RID_NONE; - int32_t ofs = 0; - if (ir->r == RID_SINK) - return; - if (irt_isnum(ir->t)) { - src = ra_alloc1(as, ir->op2, RSET_FPR); - } else { - if (!irt_ispri(ir->t)) { - src = ra_alloc1(as, ir->op2, allow); - rset_clear(allow, src); - } - type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow); - rset_clear(allow, type); - } - idx = asm_fuseahuref(as, ir->op1, &ofs, allow); - if (irt_isnum(ir->t)) { - emit_hsi(as, MIPSI_SDC1, src, idx, ofs); - } else { - if (ra_hasreg(src)) - emit_tsi(as, MIPSI_SW, src, idx, ofs+(LJ_BE?4:0)); - emit_tsi(as, MIPSI_SW, type, idx, ofs+(LJ_BE?0:4)); - } -} - -static void asm_sload(ASMState *as, IRIns *ir) -{ - int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0); - IRType1 t = ir->t; - Reg dest = RID_NONE, type = RID_NONE, base; - RegSet allow = RSET_GPR; - lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ - lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK)); - lua_assert(!irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME))); - if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) { - dest = ra_scratch(as, RSET_FPR); - asm_tointg(as, ir, dest); - t.irt = IRT_NUM; /* Continue with a regular number type check. */ - } else if (ra_used(ir)) { - lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t)); - dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR); - rset_clear(allow, dest); - base = ra_alloc1(as, REF_BASE, allow); - rset_clear(allow, base); - if ((ir->op2 & IRSLOAD_CONVERT)) { - if (irt_isint(t)) { - Reg tmp = ra_scratch(as, RSET_FPR); - emit_tg(as, MIPSI_MFC1, dest, tmp); - emit_fg(as, MIPSI_TRUNC_W_D, tmp, tmp); - dest = tmp; - t.irt = IRT_NUM; /* Check for original type. */ - } else { - Reg tmp = ra_scratch(as, RSET_GPR); - emit_fg(as, MIPSI_CVT_D_W, dest, dest); - emit_tg(as, MIPSI_MTC1, tmp, dest); - dest = tmp; - t.irt = IRT_INT; /* Check for original type. */ - } - } - goto dotypecheck; - } - base = ra_alloc1(as, REF_BASE, allow); - rset_clear(allow, base); -dotypecheck: - if (irt_isnum(t)) { - if ((ir->op2 & IRSLOAD_TYPECHECK)) { - asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO); - emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)LJ_TISNUM); - type = RID_TMP; - } - if (ra_hasreg(dest)) emit_hsi(as, MIPSI_LDC1, dest, base, ofs); - } else { - if ((ir->op2 & IRSLOAD_TYPECHECK)) { - Reg ktype = ra_allock(as, irt_toitype(t), allow); - asm_guard(as, MIPSI_BNE, RID_TMP, ktype); - type = RID_TMP; - } - if (ra_hasreg(dest)) emit_tsi(as, MIPSI_LW, dest, base, ofs ^ (LJ_BE?4:0)); - } - if (ra_hasreg(type)) emit_tsi(as, MIPSI_LW, type, base, ofs ^ (LJ_BE?0:4)); -} - -/* -- Allocations --------------------------------------------------------- */ - -#if LJ_HASFFI -static void asm_cnew(ASMState *as, IRIns *ir) -{ - CTState *cts = ctype_ctsG(J2G(as->J)); - CTypeID id = (CTypeID)IR(ir->op1)->i; - CTSize sz; - CTInfo info = lj_ctype_info(cts, id, &sz); - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; - IRRef args[4]; - RegSet drop = RSET_SCRATCH; - lua_assert(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL)); - - as->gcsteps++; - if (ra_hasreg(ir->r)) - rset_clear(drop, ir->r); /* Dest reg handled below. */ - ra_evictset(as, drop); - if (ra_used(ir)) - ra_destreg(as, ir, RID_RET); /* GCcdata * */ - - /* Initialize immutable cdata object. */ - if (ir->o == IR_CNEWI) { - RegSet allow = (RSET_GPR & ~RSET_SCRATCH); - int32_t ofs = sizeof(GCcdata); - lua_assert(sz == 4 || sz == 8); - if (sz == 8) { - ofs += 4; - lua_assert((ir+1)->o == IR_HIOP); - if (LJ_LE) ir++; - } - for (;;) { - Reg r = ra_alloc1z(as, ir->op2, allow); - emit_tsi(as, MIPSI_SW, r, RID_RET, ofs); - rset_clear(allow, r); - if (ofs == sizeof(GCcdata)) break; - ofs -= 4; if (LJ_BE) ir++; else ir--; - } - } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */ - ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv]; - args[0] = ASMREF_L; /* lua_State *L */ - args[1] = ir->op1; /* CTypeID id */ - args[2] = ir->op2; /* CTSize sz */ - args[3] = ASMREF_TMP1; /* CTSize align */ - asm_gencall(as, ci, args); - emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info)); - return; - } - - /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */ - emit_tsi(as, MIPSI_SB, RID_RET+1, RID_RET, offsetof(GCcdata, gct)); - emit_tsi(as, MIPSI_SH, RID_TMP, RID_RET, offsetof(GCcdata, ctypeid)); - emit_ti(as, MIPSI_LI, RID_RET+1, ~LJ_TCDATA); - emit_ti(as, MIPSI_LI, RID_TMP, id); /* Lower 16 bit used. Sign-ext ok. */ - args[0] = ASMREF_L; /* lua_State *L */ - args[1] = ASMREF_TMP1; /* MSize size */ - asm_gencall(as, ci, args); - ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)), - ra_releasetmp(as, ASMREF_TMP1)); -} -#else -#define asm_cnew(as, ir) ((void)0) -#endif - -/* -- Write barriers ------------------------------------------------------ */ - -static void asm_tbar(ASMState *as, IRIns *ir) -{ - Reg tab = ra_alloc1(as, ir->op1, RSET_GPR); - Reg mark = ra_scratch(as, rset_exclude(RSET_GPR, tab)); - Reg link = RID_TMP; - MCLabel l_end = emit_label(as); - emit_tsi(as, MIPSI_SW, link, tab, (int32_t)offsetof(GCtab, gclist)); - emit_tsi(as, MIPSI_SB, mark, tab, (int32_t)offsetof(GCtab, marked)); - emit_setgl(as, tab, gc.grayagain); - emit_getgl(as, link, gc.grayagain); - emit_dst(as, MIPSI_XOR, mark, mark, RID_TMP); /* Clear black bit. */ - emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end); - emit_tsi(as, MIPSI_ANDI, RID_TMP, mark, LJ_GC_BLACK); - emit_tsi(as, MIPSI_LBU, mark, tab, (int32_t)offsetof(GCtab, marked)); -} - -static void asm_obar(ASMState *as, IRIns *ir) -{ - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv]; - IRRef args[2]; - MCLabel l_end; - Reg obj, val, tmp; - /* No need for other object barriers (yet). */ - lua_assert(IR(ir->op1)->o == IR_UREFC); - ra_evictset(as, RSET_SCRATCH); - l_end = emit_label(as); - args[0] = ASMREF_TMP1; /* global_State *g */ - args[1] = ir->op1; /* TValue *tv */ - asm_gencall(as, ci, args); - emit_tsi(as, MIPSI_ADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768); - obj = IR(ir->op1)->r; - tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj)); - emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end); - emit_tsi(as, MIPSI_ANDI, tmp, tmp, LJ_GC_BLACK); - emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end); - emit_tsi(as, MIPSI_ANDI, RID_TMP, RID_TMP, LJ_GC_WHITES); - val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj)); - emit_tsi(as, MIPSI_LBU, tmp, obj, - (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)); - emit_tsi(as, MIPSI_LBU, RID_TMP, val, (int32_t)offsetof(GChead, marked)); -} - -/* -- Arithmetic and logic operations ------------------------------------- */ - -static void asm_fparith(ASMState *as, IRIns *ir, MIPSIns mi) -{ - Reg dest = ra_dest(as, ir, RSET_FPR); - Reg right, left = ra_alloc2(as, ir, RSET_FPR); - right = (left >> 8); left &= 255; - emit_fgh(as, mi, dest, left, right); -} - -static void asm_fpunary(ASMState *as, IRIns *ir, MIPSIns mi) -{ - Reg dest = ra_dest(as, ir, RSET_FPR); - Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR); - emit_fg(as, mi, dest, left); -} - -static void asm_fpmath(ASMState *as, IRIns *ir) -{ - if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir)) - return; - if (ir->op2 <= IRFPM_TRUNC) - asm_callround(as, ir, IRCALL_lj_vm_floor + ir->op2); - else if (ir->op2 == IRFPM_SQRT) - asm_fpunary(as, ir, MIPSI_SQRT_D); - else - asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2); -} - -static void asm_add(ASMState *as, IRIns *ir) -{ - if (irt_isnum(ir->t)) { - asm_fparith(as, ir, MIPSI_ADD_D); - } else { - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); - if (irref_isk(ir->op2)) { - int32_t k = IR(ir->op2)->i; - if (checki16(k)) { - emit_tsi(as, MIPSI_ADDIU, dest, left, k); - return; - } - } - right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); - emit_dst(as, MIPSI_ADDU, dest, left, right); - } -} - -static void asm_sub(ASMState *as, IRIns *ir) -{ - if (irt_isnum(ir->t)) { - asm_fparith(as, ir, MIPSI_SUB_D); - } else { - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg right, left = ra_alloc2(as, ir, RSET_GPR); - right = (left >> 8); left &= 255; - emit_dst(as, MIPSI_SUBU, dest, left, right); - } -} - -static void asm_mul(ASMState *as, IRIns *ir) -{ - if (irt_isnum(ir->t)) { - asm_fparith(as, ir, MIPSI_MUL_D); - } else { - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg right, left = ra_alloc2(as, ir, RSET_GPR); - right = (left >> 8); left &= 255; - emit_dst(as, MIPSI_MUL, dest, left, right); - } -} - -#define asm_div(as, ir) asm_fparith(as, ir, MIPSI_DIV_D) -#define asm_mod(as, ir) asm_callid(as, ir, IRCALL_lj_vm_modi) -#define asm_pow(as, ir) asm_callid(as, ir, IRCALL_lj_vm_powi) - -static void asm_neg(ASMState *as, IRIns *ir) -{ - if (irt_isnum(ir->t)) { - asm_fpunary(as, ir, MIPSI_NEG_D); - } else { - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); - emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left); - } -} - -#define asm_abs(as, ir) asm_fpunary(as, ir, MIPSI_ABS_D) -#define asm_atan2(as, ir) asm_callid(as, ir, IRCALL_atan2) -#define asm_ldexp(as, ir) asm_callid(as, ir, IRCALL_ldexp) - -static void asm_arithov(ASMState *as, IRIns *ir) -{ - Reg right, left, tmp, dest = ra_dest(as, ir, RSET_GPR); - if (irref_isk(ir->op2)) { - int k = IR(ir->op2)->i; - if (ir->o == IR_SUBOV) k = -k; - if (checki16(k)) { /* (dest < left) == (k >= 0 ? 1 : 0) */ - left = ra_alloc1(as, ir->op1, RSET_GPR); - asm_guard(as, k >= 0 ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO); - emit_dst(as, MIPSI_SLT, RID_TMP, dest, dest == left ? RID_TMP : left); - emit_tsi(as, MIPSI_ADDIU, dest, left, k); - if (dest == left) emit_move(as, RID_TMP, left); - return; - } - } - left = ra_alloc2(as, ir, RSET_GPR); - right = (left >> 8); left &= 255; - tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR, left), - right), dest)); - asm_guard(as, MIPSI_BLTZ, RID_TMP, 0); - emit_dst(as, MIPSI_AND, RID_TMP, RID_TMP, tmp); - if (ir->o == IR_ADDOV) { /* ((dest^left) & (dest^right)) < 0 */ - emit_dst(as, MIPSI_XOR, RID_TMP, dest, dest == right ? RID_TMP : right); - } else { /* ((dest^left) & (dest^~right)) < 0 */ - emit_dst(as, MIPSI_XOR, RID_TMP, RID_TMP, dest); - emit_dst(as, MIPSI_NOR, RID_TMP, dest == right ? RID_TMP : right, RID_ZERO); - } - emit_dst(as, MIPSI_XOR, tmp, dest, dest == left ? RID_TMP : left); - emit_dst(as, ir->o == IR_ADDOV ? MIPSI_ADDU : MIPSI_SUBU, dest, left, right); - if (dest == left || dest == right) - emit_move(as, RID_TMP, dest == left ? left : right); -} - -#define asm_addov(as, ir) asm_arithov(as, ir) -#define asm_subov(as, ir) asm_arithov(as, ir) - -static void asm_mulov(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg tmp, right, left = ra_alloc2(as, ir, RSET_GPR); - right = (left >> 8); left &= 255; - tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR, left), - right), dest)); - asm_guard(as, MIPSI_BNE, RID_TMP, tmp); - emit_dta(as, MIPSI_SRA, RID_TMP, dest, 31); - emit_dst(as, MIPSI_MFHI, tmp, 0, 0); - emit_dst(as, MIPSI_MFLO, dest, 0, 0); - emit_dst(as, MIPSI_MULT, 0, left, right); -} - -#if LJ_HASFFI -static void asm_add64(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR); - if (irref_isk(ir->op2)) { - int32_t k = IR(ir->op2)->i; - if (k == 0) { - emit_dst(as, MIPSI_ADDU, dest, left, RID_TMP); - goto loarith; - } else if (checki16(k)) { - emit_dst(as, MIPSI_ADDU, dest, dest, RID_TMP); - emit_tsi(as, MIPSI_ADDIU, dest, left, k); - goto loarith; - } - } - emit_dst(as, MIPSI_ADDU, dest, dest, RID_TMP); - right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); - emit_dst(as, MIPSI_ADDU, dest, left, right); -loarith: - ir--; - dest = ra_dest(as, ir, RSET_GPR); - left = ra_alloc1(as, ir->op1, RSET_GPR); - if (irref_isk(ir->op2)) { - int32_t k = IR(ir->op2)->i; - if (k == 0) { - if (dest != left) - emit_move(as, dest, left); - return; - } else if (checki16(k)) { - if (dest == left) { - Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, left)); - emit_move(as, dest, tmp); - dest = tmp; - } - emit_dst(as, MIPSI_SLTU, RID_TMP, dest, left); - emit_tsi(as, MIPSI_ADDIU, dest, left, k); - return; - } - } - right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); - if (dest == left && dest == right) { - Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right)); - emit_move(as, dest, tmp); - dest = tmp; - } - emit_dst(as, MIPSI_SLTU, RID_TMP, dest, dest == left ? right : left); - emit_dst(as, MIPSI_ADDU, dest, left, right); -} - -static void asm_sub64(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg right, left = ra_alloc2(as, ir, RSET_GPR); - right = (left >> 8); left &= 255; - emit_dst(as, MIPSI_SUBU, dest, dest, RID_TMP); - emit_dst(as, MIPSI_SUBU, dest, left, right); - ir--; - dest = ra_dest(as, ir, RSET_GPR); - left = ra_alloc2(as, ir, RSET_GPR); - right = (left >> 8); left &= 255; - if (dest == left) { - Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right)); - emit_move(as, dest, tmp); - dest = tmp; - } - emit_dst(as, MIPSI_SLTU, RID_TMP, left, dest); - emit_dst(as, MIPSI_SUBU, dest, left, right); -} - -static void asm_neg64(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg left = ra_alloc1(as, ir->op1, RSET_GPR); - emit_dst(as, MIPSI_SUBU, dest, dest, RID_TMP); - emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left); - ir--; - dest = ra_dest(as, ir, RSET_GPR); - left = ra_alloc1(as, ir->op1, RSET_GPR); - emit_dst(as, MIPSI_SLTU, RID_TMP, RID_ZERO, dest); - emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left); -} -#endif - -static void asm_bnot(ASMState *as, IRIns *ir) -{ - Reg left, right, dest = ra_dest(as, ir, RSET_GPR); - IRIns *irl = IR(ir->op1); - if (mayfuse(as, ir->op1) && irl->o == IR_BOR) { - left = ra_alloc2(as, irl, RSET_GPR); - right = (left >> 8); left &= 255; - } else { - left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); - right = RID_ZERO; - } - emit_dst(as, MIPSI_NOR, dest, left, right); -} - -static void asm_bswap(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg left = ra_alloc1(as, ir->op1, RSET_GPR); - if ((as->flags & JIT_F_MIPS32R2)) { - emit_dta(as, MIPSI_ROTR, dest, RID_TMP, 16); - emit_dst(as, MIPSI_WSBH, RID_TMP, 0, left); - } else { - Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), dest)); - emit_dst(as, MIPSI_OR, dest, dest, tmp); - emit_dst(as, MIPSI_OR, dest, dest, RID_TMP); - emit_tsi(as, MIPSI_ANDI, dest, dest, 0xff00); - emit_dta(as, MIPSI_SLL, RID_TMP, RID_TMP, 8); - emit_dta(as, MIPSI_SRL, dest, left, 8); - emit_tsi(as, MIPSI_ANDI, RID_TMP, left, 0xff00); - emit_dst(as, MIPSI_OR, tmp, tmp, RID_TMP); - emit_dta(as, MIPSI_SRL, tmp, left, 24); - emit_dta(as, MIPSI_SLL, RID_TMP, left, 24); - } -} - -static void asm_bitop(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); - if (irref_isk(ir->op2)) { - int32_t k = IR(ir->op2)->i; - if (checku16(k)) { - emit_tsi(as, mik, dest, left, k); - return; - } - } - right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); - emit_dst(as, mi, dest, left, right); -} - -#define asm_band(as, ir) asm_bitop(as, ir, MIPSI_AND, MIPSI_ANDI) -#define asm_bor(as, ir) asm_bitop(as, ir, MIPSI_OR, MIPSI_ORI) -#define asm_bxor(as, ir) asm_bitop(as, ir, MIPSI_XOR, MIPSI_XORI) - -static void asm_bitshift(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - if (irref_isk(ir->op2)) { /* Constant shifts. */ - uint32_t shift = (uint32_t)(IR(ir->op2)->i & 31); - emit_dta(as, mik, dest, ra_hintalloc(as, ir->op1, dest, RSET_GPR), shift); - } else { - Reg right, left = ra_alloc2(as, ir, RSET_GPR); - right = (left >> 8); left &= 255; - emit_dst(as, mi, dest, right, left); /* Shift amount is in rs. */ - } -} - -#define asm_bshl(as, ir) asm_bitshift(as, ir, MIPSI_SLLV, MIPSI_SLL) -#define asm_bshr(as, ir) asm_bitshift(as, ir, MIPSI_SRLV, MIPSI_SRL) -#define asm_bsar(as, ir) asm_bitshift(as, ir, MIPSI_SRAV, MIPSI_SRA) -#define asm_brol(as, ir) lua_assert(0) - -static void asm_bror(ASMState *as, IRIns *ir) -{ - if ((as->flags & JIT_F_MIPS32R2)) { - asm_bitshift(as, ir, MIPSI_ROTRV, MIPSI_ROTR); - } else { - Reg dest = ra_dest(as, ir, RSET_GPR); - if (irref_isk(ir->op2)) { /* Constant shifts. */ - uint32_t shift = (uint32_t)(IR(ir->op2)->i & 31); - Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); - emit_rotr(as, dest, left, RID_TMP, shift); - } else { - Reg right, left = ra_alloc2(as, ir, RSET_GPR); - right = (left >> 8); left &= 255; - emit_dst(as, MIPSI_OR, dest, dest, RID_TMP); - emit_dst(as, MIPSI_SRLV, dest, right, left); - emit_dst(as, MIPSI_SLLV, RID_TMP, RID_TMP, left); - emit_dst(as, MIPSI_SUBU, RID_TMP, ra_allock(as, 32, RSET_GPR), right); - } - } -} - -static void asm_min_max(ASMState *as, IRIns *ir, int ismax) -{ - if (irt_isnum(ir->t)) { - Reg dest = ra_dest(as, ir, RSET_FPR); - Reg right, left = ra_alloc2(as, ir, RSET_FPR); - right = (left >> 8); left &= 255; - if (dest == left) { - emit_fg(as, MIPSI_MOVT_D, dest, right); - } else { - emit_fg(as, MIPSI_MOVF_D, dest, left); - if (dest != right) emit_fg(as, MIPSI_MOV_D, dest, right); - } - emit_fgh(as, MIPSI_C_OLT_D, 0, ismax ? left : right, ismax ? right : left); - } else { - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg right, left = ra_alloc2(as, ir, RSET_GPR); - right = (left >> 8); left &= 255; - if (dest == left) { - emit_dst(as, MIPSI_MOVN, dest, right, RID_TMP); - } else { - emit_dst(as, MIPSI_MOVZ, dest, left, RID_TMP); - if (dest != right) emit_move(as, dest, right); - } - emit_dst(as, MIPSI_SLT, RID_TMP, - ismax ? left : right, ismax ? right : left); - } -} - -#define asm_min(as, ir) asm_min_max(as, ir, 0) -#define asm_max(as, ir) asm_min_max(as, ir, 1) - -/* -- Comparisons --------------------------------------------------------- */ - -static void asm_comp(ASMState *as, IRIns *ir) -{ - /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */ - IROp op = ir->o; - if (irt_isnum(ir->t)) { - Reg right, left = ra_alloc2(as, ir, RSET_FPR); - right = (left >> 8); left &= 255; - asm_guard(as, (op&1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0); - emit_fgh(as, MIPSI_C_OLT_D + ((op&3) ^ ((op>>2)&1)), 0, left, right); - } else { - Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR); - if (op == IR_ABC) op = IR_UGT; - if ((op&4) == 0 && irref_isk(ir->op2) && IR(ir->op2)->i == 0) { - MIPSIns mi = (op&2) ? ((op&1) ? MIPSI_BLEZ : MIPSI_BGTZ) : - ((op&1) ? MIPSI_BLTZ : MIPSI_BGEZ); - asm_guard(as, mi, left, 0); - } else { - if (irref_isk(ir->op2)) { - int32_t k = IR(ir->op2)->i; - if ((op&2)) k++; - if (checki16(k)) { - asm_guard(as, (op&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO); - emit_tsi(as, (op&4) ? MIPSI_SLTIU : MIPSI_SLTI, - RID_TMP, left, k); - return; - } - } - right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); - asm_guard(as, ((op^(op>>1))&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO); - emit_dst(as, (op&4) ? MIPSI_SLTU : MIPSI_SLT, - RID_TMP, (op&2) ? right : left, (op&2) ? left : right); - } - } -} - -static void asm_equal(ASMState *as, IRIns *ir) -{ - Reg right, left = ra_alloc2(as, ir, irt_isnum(ir->t) ? RSET_FPR : RSET_GPR); - right = (left >> 8); left &= 255; - if (irt_isnum(ir->t)) { - asm_guard(as, (ir->o & 1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0); - emit_fgh(as, MIPSI_C_EQ_D, 0, left, right); - } else { - asm_guard(as, (ir->o & 1) ? MIPSI_BEQ : MIPSI_BNE, left, right); - } -} - -#if LJ_HASFFI -/* 64 bit integer comparisons. */ -static void asm_comp64(ASMState *as, IRIns *ir) -{ - /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */ - IROp op = (ir-1)->o; - MCLabel l_end; - Reg rightlo, leftlo, righthi, lefthi = ra_alloc2(as, ir, RSET_GPR); - righthi = (lefthi >> 8); lefthi &= 255; - leftlo = ra_alloc2(as, ir-1, - rset_exclude(rset_exclude(RSET_GPR, lefthi), righthi)); - rightlo = (leftlo >> 8); leftlo &= 255; - asm_guard(as, ((op^(op>>1))&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO); - l_end = emit_label(as); - if (lefthi != righthi) - emit_dst(as, (op&4) ? MIPSI_SLTU : MIPSI_SLT, RID_TMP, - (op&2) ? righthi : lefthi, (op&2) ? lefthi : righthi); - emit_dst(as, MIPSI_SLTU, RID_TMP, - (op&2) ? rightlo : leftlo, (op&2) ? leftlo : rightlo); - if (lefthi != righthi) - emit_branch(as, MIPSI_BEQ, lefthi, righthi, l_end); -} - -static void asm_comp64eq(ASMState *as, IRIns *ir) -{ - Reg tmp, right, left = ra_alloc2(as, ir, RSET_GPR); - right = (left >> 8); left &= 255; - asm_guard(as, ((ir-1)->o & 1) ? MIPSI_BEQ : MIPSI_BNE, RID_TMP, RID_ZERO); - tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right)); - emit_dst(as, MIPSI_OR, RID_TMP, RID_TMP, tmp); - emit_dst(as, MIPSI_XOR, tmp, left, right); - left = ra_alloc2(as, ir-1, RSET_GPR); - right = (left >> 8); left &= 255; - emit_dst(as, MIPSI_XOR, RID_TMP, left, right); -} -#endif - -/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */ - -/* Hiword op of a split 64 bit op. Previous op must be the loword op. */ -static void asm_hiop(ASMState *as, IRIns *ir) -{ -#if LJ_HASFFI - /* HIOP is marked as a store because it needs its own DCE logic. */ - int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */ - if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1; - if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */ - as->curins--; /* Always skip the CONV. */ - if (usehi || uselo) - asm_conv64(as, ir); - return; - } else if ((ir-1)->o < IR_EQ) { /* 64 bit integer comparisons. ORDER IR. */ - as->curins--; /* Always skip the loword comparison. */ - asm_comp64(as, ir); - return; - } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */ - as->curins--; /* Always skip the loword comparison. */ - asm_comp64eq(as, ir); - return; - } else if ((ir-1)->o == IR_XSTORE) { - as->curins--; /* Handle both stores here. */ - if ((ir-1)->r != RID_SINK) { - asm_xstore_(as, ir, LJ_LE ? 4 : 0); - asm_xstore_(as, ir-1, LJ_LE ? 0 : 4); - } - return; - } - if (!usehi) return; /* Skip unused hiword op for all remaining ops. */ - switch ((ir-1)->o) { - case IR_ADD: as->curins--; asm_add64(as, ir); break; - case IR_SUB: as->curins--; asm_sub64(as, ir); break; - case IR_NEG: as->curins--; asm_neg64(as, ir); break; - case IR_CALLN: - case IR_CALLXS: - if (!uselo) - ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */ - break; - case IR_CNEWI: - /* Nothing to do here. Handled by lo op itself. */ - break; - default: lua_assert(0); break; - } -#else - UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused without FFI. */ -#endif -} - -/* -- Profiling ----------------------------------------------------------- */ - -static void asm_prof(ASMState *as, IRIns *ir) -{ - UNUSED(ir); - asm_guard(as, MIPSI_BNE, RID_TMP, RID_ZERO); - emit_tsi(as, MIPSI_ANDI, RID_TMP, RID_TMP, HOOK_PROFILE); - emit_lsglptr(as, MIPSI_LBU, RID_TMP, - (int32_t)offsetof(global_State, hookmask)); -} - -/* -- Stack handling ------------------------------------------------------ */ - -/* Check Lua stack size for overflow. Use exit handler as fallback. */ -static void asm_stack_check(ASMState *as, BCReg topslot, - IRIns *irp, RegSet allow, ExitNo exitno) -{ - /* Try to get an unused temp. register, otherwise spill/restore RID_RET*. */ - Reg tmp, pbase = irp ? (ra_hasreg(irp->r) ? irp->r : RID_TMP) : RID_BASE; - ExitNo oldsnap = as->snapno; - rset_clear(allow, pbase); - tmp = allow ? rset_pickbot(allow) : - (pbase == RID_RETHI ? RID_RETLO : RID_RETHI); - as->snapno = exitno; - asm_guard(as, MIPSI_BNE, RID_TMP, RID_ZERO); - as->snapno = oldsnap; - if (allow == RSET_EMPTY) /* Restore temp. register. */ - emit_tsi(as, MIPSI_LW, tmp, RID_SP, 0); - else - ra_modified(as, tmp); - emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)(8*topslot)); - emit_dst(as, MIPSI_SUBU, RID_TMP, tmp, pbase); - emit_tsi(as, MIPSI_LW, tmp, tmp, offsetof(lua_State, maxstack)); - if (pbase == RID_TMP) - emit_getgl(as, RID_TMP, jit_base); - emit_getgl(as, tmp, cur_L); - if (allow == RSET_EMPTY) /* Spill temp. register. */ - emit_tsi(as, MIPSI_SW, tmp, RID_SP, 0); -} - -/* Restore Lua stack from on-trace state. */ -static void asm_stack_restore(ASMState *as, SnapShot *snap) -{ - SnapEntry *map = &as->T->snapmap[snap->mapofs]; - SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1]; - MSize n, nent = snap->nent; - /* Store the value of all modified slots to the Lua stack. */ - for (n = 0; n < nent; n++) { - SnapEntry sn = map[n]; - BCReg s = snap_slot(sn); - int32_t ofs = 8*((int32_t)s-1); - IRRef ref = snap_ref(sn); - IRIns *ir = IR(ref); - if ((sn & SNAP_NORESTORE)) - continue; - if (irt_isnum(ir->t)) { - Reg src = ra_alloc1(as, ref, RSET_FPR); - emit_hsi(as, MIPSI_SDC1, src, RID_BASE, ofs); - } else { - Reg type; - RegSet allow = rset_exclude(RSET_GPR, RID_BASE); - lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t)); - if (!irt_ispri(ir->t)) { - Reg src = ra_alloc1(as, ref, allow); - rset_clear(allow, src); - emit_tsi(as, MIPSI_SW, src, RID_BASE, ofs+(LJ_BE?4:0)); - } - if ((sn & (SNAP_CONT|SNAP_FRAME))) { - if (s == 0) continue; /* Do not overwrite link to previous frame. */ - type = ra_allock(as, (int32_t)(*flinks--), allow); - } else { - type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow); - } - emit_tsi(as, MIPSI_SW, type, RID_BASE, ofs+(LJ_BE?0:4)); - } - checkmclim(as); - } - lua_assert(map + nent == flinks); -} - -/* -- GC handling --------------------------------------------------------- */ - -/* Check GC threshold and do one or more GC steps. */ -static void asm_gc_check(ASMState *as) -{ - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit]; - IRRef args[2]; - MCLabel l_end; - Reg tmp; - ra_evictset(as, RSET_SCRATCH); - l_end = emit_label(as); - /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */ - /* Assumes asm_snap_prep() already done. */ - asm_guard(as, MIPSI_BNE, RID_RET, RID_ZERO); - args[0] = ASMREF_TMP1; /* global_State *g */ - args[1] = ASMREF_TMP2; /* MSize steps */ - asm_gencall(as, ci, args); - emit_tsi(as, MIPSI_ADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768); - tmp = ra_releasetmp(as, ASMREF_TMP2); - emit_loadi(as, tmp, as->gcsteps); - /* Jump around GC step if GC total < GC threshold. */ - emit_branch(as, MIPSI_BNE, RID_TMP, RID_ZERO, l_end); - emit_dst(as, MIPSI_SLTU, RID_TMP, RID_TMP, tmp); - emit_getgl(as, tmp, gc.threshold); - emit_getgl(as, RID_TMP, gc.total); - as->gcsteps = 0; - checkmclim(as); -} - -/* -- Loop handling ------------------------------------------------------- */ - -/* Fixup the loop branch. */ -static void asm_loop_fixup(ASMState *as) -{ - MCode *p = as->mctop; - MCode *target = as->mcp; - p[-1] = MIPSI_NOP; - if (as->loopinv) { /* Inverted loop branch? */ - /* asm_guard already inverted the cond branch. Only patch the target. */ - p[-3] |= ((target-p+2) & 0x0000ffffu); - } else { - p[-2] = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu); - } -} - -/* -- Head of trace ------------------------------------------------------- */ - -/* Coalesce BASE register for a root trace. */ -static void asm_head_root_base(ASMState *as) -{ - IRIns *ir = IR(REF_BASE); - Reg r = ir->r; - if (as->loopinv) as->mctop--; - if (ra_hasreg(r)) { - ra_free(as, r); - if (rset_test(as->modset, r) || irt_ismarked(ir->t)) - ir->r = RID_INIT; /* No inheritance for modified BASE register. */ - if (r != RID_BASE) - emit_move(as, r, RID_BASE); - } -} - -/* Coalesce BASE register for a side trace. */ -static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow) -{ - IRIns *ir = IR(REF_BASE); - Reg r = ir->r; - if (as->loopinv) as->mctop--; - if (ra_hasreg(r)) { - ra_free(as, r); - if (rset_test(as->modset, r) || irt_ismarked(ir->t)) - ir->r = RID_INIT; /* No inheritance for modified BASE register. */ - if (irp->r == r) { - rset_clear(allow, r); /* Mark same BASE register as coalesced. */ - } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) { - rset_clear(allow, irp->r); - emit_move(as, r, irp->r); /* Move from coalesced parent reg. */ - } else { - emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */ - } - } - return allow; -} - -/* -- Tail of trace ------------------------------------------------------- */ - -/* Fixup the tail code. */ -static void asm_tail_fixup(ASMState *as, TraceNo lnk) -{ - MCode *target = lnk ? traceref(as->J,lnk)->mcode : (MCode *)lj_vm_exit_interp; - int32_t spadj = as->T->spadjust; - MCode *p = as->mctop-1; - *p = spadj ? (MIPSI_ADDIU|MIPSF_T(RID_SP)|MIPSF_S(RID_SP)|spadj) : MIPSI_NOP; - p[-1] = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu); -} - -/* Prepare tail of code. */ -static void asm_tail_prep(ASMState *as) -{ - as->mcp = as->mctop-2; /* Leave room for branch plus nop or stack adj. */ - as->invmcp = as->loopref ? as->mcp : NULL; -} - -/* -- Trace setup --------------------------------------------------------- */ - -/* Ensure there are enough stack slots for call arguments. */ -static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci) -{ - IRRef args[CCI_NARGS_MAX*2]; - uint32_t i, nargs = CCI_XNARGS(ci); - int nslots = 4, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR; - asm_collectargs(as, ir, ci, args); - for (i = 0; i < nargs; i++) { - if (args[i] && irt_isfp(IR(args[i])->t) && - nfpr > 0 && !(ci->flags & CCI_VARARG)) { - nfpr--; - ngpr -= irt_isnum(IR(args[i])->t) ? 2 : 1; - } else if (args[i] && irt_isnum(IR(args[i])->t)) { - nfpr = 0; - ngpr = ngpr & ~1; - if (ngpr > 0) ngpr -= 2; else nslots = (nslots+3) & ~1; - } else { - nfpr = 0; - if (ngpr > 0) ngpr--; else nslots++; - } - } - if (nslots > as->evenspill) /* Leave room for args in stack slots. */ - as->evenspill = nslots; - return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET); -} - -static void asm_setup_target(ASMState *as) -{ - asm_sparejump_setup(as); - asm_exitstub_setup(as); -} - -/* -- Trace patching ------------------------------------------------------ */ - -/* Patch exit jumps of existing machine code to a new target. */ -void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target) -{ - MCode *p = T->mcode; - MCode *pe = (MCode *)((char *)p + T->szmcode); - MCode *px = exitstub_trace_addr(T, exitno); - MCode *cstart = NULL, *cstop = NULL; - MCode *mcarea = lj_mcode_patch(J, p, 0); - MCode exitload = MIPSI_LI | MIPSF_T(RID_TMP) | exitno; - MCode tjump = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu); - for (p++; p < pe; p++) { - if (*p == exitload) { /* Look for load of exit number. */ - if (((p[-1] ^ (px-p)) & 0xffffu) == 0) { /* Look for exitstub branch. */ - ptrdiff_t delta = target - p; - if (((delta + 0x8000) >> 16) == 0) { /* Patch in-range branch. */ - patchbranch: - p[-1] = (p[-1] & 0xffff0000u) | (delta & 0xffffu); - *p = MIPSI_NOP; /* Replace the load of the exit number. */ - cstop = p; - if (!cstart) cstart = p-1; - } else { /* Branch out of range. Use spare jump slot in mcarea. */ - int i; - for (i = 2; i < 2+MIPS_SPAREJUMP*2; i += 2) { - if (mcarea[i] == tjump) { - delta = mcarea+i - p; - goto patchbranch; - } else if (mcarea[i] == MIPSI_NOP) { - mcarea[i] = tjump; - cstart = mcarea+i; - delta = mcarea+i - p; - goto patchbranch; - } - } - /* Ignore jump slot overflow. Child trace is simply not attached. */ - } - } else if (p+1 == pe) { - /* Patch NOP after code for inverted loop branch. Use of J is ok. */ - lua_assert(p[1] == MIPSI_NOP); - p[1] = tjump; - *p = MIPSI_NOP; /* Replace the load of the exit number. */ - cstop = p+2; - if (!cstart) cstart = p+1; - } - } - } - if (cstart) lj_mcode_sync(cstart, cstop); - lj_mcode_patch(J, mcarea, 1); -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm_ppc.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm_ppc.h deleted file mode 100644 index 7deeb66e51f..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm_ppc.h +++ /dev/null @@ -1,2015 +0,0 @@ -/* -** PPC IR assembler (SSA IR -> machine code). -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -/* -- Register allocator extensions --------------------------------------- */ - -/* Allocate a register with a hint. */ -static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow) -{ - Reg r = IR(ref)->r; - if (ra_noreg(r)) { - if (!ra_hashint(r) && !iscrossref(as, ref)) - ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */ - r = ra_allocref(as, ref, allow); - } - ra_noweak(as, r); - return r; -} - -/* Allocate two source registers for three-operand instructions. */ -static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow) -{ - IRIns *irl = IR(ir->op1), *irr = IR(ir->op2); - Reg left = irl->r, right = irr->r; - if (ra_hasreg(left)) { - ra_noweak(as, left); - if (ra_noreg(right)) - right = ra_allocref(as, ir->op2, rset_exclude(allow, left)); - else - ra_noweak(as, right); - } else if (ra_hasreg(right)) { - ra_noweak(as, right); - left = ra_allocref(as, ir->op1, rset_exclude(allow, right)); - } else if (ra_hashint(right)) { - right = ra_allocref(as, ir->op2, allow); - left = ra_alloc1(as, ir->op1, rset_exclude(allow, right)); - } else { - left = ra_allocref(as, ir->op1, allow); - right = ra_alloc1(as, ir->op2, rset_exclude(allow, left)); - } - return left | (right << 8); -} - -/* -- Guard handling ------------------------------------------------------ */ - -/* Setup exit stubs after the end of each trace. */ -static void asm_exitstub_setup(ASMState *as, ExitNo nexits) -{ - ExitNo i; - MCode *mxp = as->mctop; - if (mxp - (nexits + 3 + MCLIM_REDZONE) < as->mclim) - asm_mclimit(as); - /* 1: mflr r0; bl ->vm_exit_handler; li r0, traceno; bl <1; bl <1; ... */ - for (i = nexits-1; (int32_t)i >= 0; i--) - *--mxp = PPCI_BL|(((-3-i)&0x00ffffffu)<<2); - *--mxp = PPCI_LI|PPCF_T(RID_TMP)|as->T->traceno; /* Read by exit handler. */ - mxp--; - *mxp = PPCI_BL|((((MCode *)(void *)lj_vm_exit_handler-mxp)&0x00ffffffu)<<2); - *--mxp = PPCI_MFLR|PPCF_T(RID_TMP); - as->mctop = mxp; -} - -static MCode *asm_exitstub_addr(ASMState *as, ExitNo exitno) -{ - /* Keep this in-sync with exitstub_trace_addr(). */ - return as->mctop + exitno + 3; -} - -/* Emit conditional branch to exit for guard. */ -static void asm_guardcc(ASMState *as, PPCCC cc) -{ - MCode *target = asm_exitstub_addr(as, as->snapno); - MCode *p = as->mcp; - if (LJ_UNLIKELY(p == as->invmcp)) { - as->loopinv = 1; - *p = PPCI_B | (((target-p) & 0x00ffffffu) << 2); - emit_condbranch(as, PPCI_BC, cc^4, p); - return; - } - emit_condbranch(as, PPCI_BC, cc, target); -} - -/* -- Operand fusion ------------------------------------------------------ */ - -/* Limit linear search to this distance. Avoids O(n^2) behavior. */ -#define CONFLICT_SEARCH_LIM 31 - -/* Check if there's no conflicting instruction between curins and ref. */ -static int noconflict(ASMState *as, IRRef ref, IROp conflict) -{ - IRIns *ir = as->ir; - IRRef i = as->curins; - if (i > ref + CONFLICT_SEARCH_LIM) - return 0; /* Give up, ref is too far away. */ - while (--i > ref) - if (ir[i].o == conflict) - return 0; /* Conflict found. */ - return 1; /* Ok, no conflict. */ -} - -/* Fuse the array base of colocated arrays. */ -static int32_t asm_fuseabase(ASMState *as, IRRef ref) -{ - IRIns *ir = IR(ref); - if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE && - !neverfuse(as) && noconflict(as, ref, IR_NEWREF)) - return (int32_t)sizeof(GCtab); - return 0; -} - -/* Indicates load/store indexed is ok. */ -#define AHUREF_LSX ((int32_t)0x80000000) - -/* Fuse array/hash/upvalue reference into register+offset operand. */ -static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow) -{ - IRIns *ir = IR(ref); - if (ra_noreg(ir->r)) { - if (ir->o == IR_AREF) { - if (mayfuse(as, ref)) { - if (irref_isk(ir->op2)) { - IRRef tab = IR(ir->op1)->op1; - int32_t ofs = asm_fuseabase(as, tab); - IRRef refa = ofs ? tab : ir->op1; - ofs += 8*IR(ir->op2)->i; - if (checki16(ofs)) { - *ofsp = ofs; - return ra_alloc1(as, refa, allow); - } - } - if (*ofsp == AHUREF_LSX) { - Reg base = ra_alloc1(as, ir->op1, allow); - Reg idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base)); - return base | (idx << 8); - } - } - } else if (ir->o == IR_HREFK) { - if (mayfuse(as, ref)) { - int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node)); - if (checki16(ofs)) { - *ofsp = ofs; - return ra_alloc1(as, ir->op1, allow); - } - } - } else if (ir->o == IR_UREFC) { - if (irref_isk(ir->op1)) { - GCfunc *fn = ir_kfunc(IR(ir->op1)); - int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv); - int32_t jgl = (intptr_t)J2G(as->J); - if ((uint32_t)(ofs-jgl) < 65536) { - *ofsp = ofs-jgl-32768; - return RID_JGL; - } else { - *ofsp = (int16_t)ofs; - return ra_allock(as, ofs-(int16_t)ofs, allow); - } - } - } - } - *ofsp = 0; - return ra_alloc1(as, ref, allow); -} - -/* Fuse XLOAD/XSTORE reference into load/store operand. */ -static void asm_fusexref(ASMState *as, PPCIns pi, Reg rt, IRRef ref, - RegSet allow, int32_t ofs) -{ - IRIns *ir = IR(ref); - Reg base; - if (ra_noreg(ir->r) && canfuse(as, ir)) { - if (ir->o == IR_ADD) { - int32_t ofs2; - if (irref_isk(ir->op2) && (ofs2 = ofs + IR(ir->op2)->i, checki16(ofs2))) { - ofs = ofs2; - ref = ir->op1; - } else if (ofs == 0) { - Reg right, left = ra_alloc2(as, ir, allow); - right = (left >> 8); left &= 255; - emit_fab(as, PPCI_LWZX | ((pi >> 20) & 0x780), rt, left, right); - return; - } - } else if (ir->o == IR_STRREF) { - lua_assert(ofs == 0); - ofs = (int32_t)sizeof(GCstr); - if (irref_isk(ir->op2)) { - ofs += IR(ir->op2)->i; - ref = ir->op1; - } else if (irref_isk(ir->op1)) { - ofs += IR(ir->op1)->i; - ref = ir->op2; - } else { - /* NYI: Fuse ADD with constant. */ - Reg tmp, right, left = ra_alloc2(as, ir, allow); - right = (left >> 8); left &= 255; - tmp = ra_scratch(as, rset_exclude(rset_exclude(allow, left), right)); - emit_fai(as, pi, rt, tmp, ofs); - emit_tab(as, PPCI_ADD, tmp, left, right); - return; - } - if (!checki16(ofs)) { - Reg left = ra_alloc1(as, ref, allow); - Reg right = ra_allock(as, ofs, rset_exclude(allow, left)); - emit_fab(as, PPCI_LWZX | ((pi >> 20) & 0x780), rt, left, right); - return; - } - } - } - base = ra_alloc1(as, ref, allow); - emit_fai(as, pi, rt, base, ofs); -} - -/* Fuse XLOAD/XSTORE reference into indexed-only load/store operand. */ -static void asm_fusexrefx(ASMState *as, PPCIns pi, Reg rt, IRRef ref, - RegSet allow) -{ - IRIns *ira = IR(ref); - Reg right, left; - if (canfuse(as, ira) && ira->o == IR_ADD && ra_noreg(ira->r)) { - left = ra_alloc2(as, ira, allow); - right = (left >> 8); left &= 255; - } else { - right = ra_alloc1(as, ref, allow); - left = RID_R0; - } - emit_tab(as, pi, rt, left, right); -} - -/* Fuse to multiply-add/sub instruction. */ -static int asm_fusemadd(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pir) -{ - IRRef lref = ir->op1, rref = ir->op2; - IRIns *irm; - if (lref != rref && - ((mayfuse(as, lref) && (irm = IR(lref), irm->o == IR_MUL) && - ra_noreg(irm->r)) || - (mayfuse(as, rref) && (irm = IR(rref), irm->o == IR_MUL) && - (rref = lref, pi = pir, ra_noreg(irm->r))))) { - Reg dest = ra_dest(as, ir, RSET_FPR); - Reg add = ra_alloc1(as, rref, RSET_FPR); - Reg right, left = ra_alloc2(as, irm, rset_exclude(RSET_FPR, add)); - right = (left >> 8); left &= 255; - emit_facb(as, pi, dest, left, right, add); - return 1; - } - return 0; -} - -/* -- Calls --------------------------------------------------------------- */ - -/* Generate a call to a C function. */ -static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) -{ - uint32_t n, nargs = CCI_XNARGS(ci); - int32_t ofs = 8; - Reg gpr = REGARG_FIRSTGPR, fpr = REGARG_FIRSTFPR; - if ((void *)ci->func) - emit_call(as, (void *)ci->func); - for (n = 0; n < nargs; n++) { /* Setup args. */ - IRRef ref = args[n]; - if (ref) { - IRIns *ir = IR(ref); - if (irt_isfp(ir->t)) { - if (fpr <= REGARG_LASTFPR) { - lua_assert(rset_test(as->freeset, fpr)); /* Already evicted. */ - ra_leftov(as, fpr, ref); - fpr++; - } else { - Reg r = ra_alloc1(as, ref, RSET_FPR); - if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4; - emit_spstore(as, ir, r, ofs); - ofs += irt_isnum(ir->t) ? 8 : 4; - } - } else { - if (gpr <= REGARG_LASTGPR) { - lua_assert(rset_test(as->freeset, gpr)); /* Already evicted. */ - ra_leftov(as, gpr, ref); - gpr++; - } else { - Reg r = ra_alloc1(as, ref, RSET_GPR); - emit_spstore(as, ir, r, ofs); - ofs += 4; - } - } - } else { - if (gpr <= REGARG_LASTGPR) - gpr++; - else - ofs += 4; - } - checkmclim(as); - } - if ((ci->flags & CCI_VARARG)) /* Vararg calls need to know about FPR use. */ - emit_tab(as, fpr == REGARG_FIRSTFPR ? PPCI_CRXOR : PPCI_CREQV, 6, 6, 6); -} - -/* Setup result reg/sp for call. Evict scratch regs. */ -static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) -{ - RegSet drop = RSET_SCRATCH; - int hiop = ((ir+1)->o == IR_HIOP); - if ((ci->flags & CCI_NOFPRCLOBBER)) - drop &= ~RSET_FPR; - if (ra_hasreg(ir->r)) - rset_clear(drop, ir->r); /* Dest reg handled below. */ - if (hiop && ra_hasreg((ir+1)->r)) - rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */ - ra_evictset(as, drop); /* Evictions must be performed first. */ - if (ra_used(ir)) { - lua_assert(!irt_ispri(ir->t)); - if (irt_isfp(ir->t)) { - if ((ci->flags & CCI_CASTU64)) { - /* Use spill slot or temp slots. */ - int32_t ofs = ir->s ? sps_scale(ir->s) : SPOFS_TMP; - Reg dest = ir->r; - if (ra_hasreg(dest)) { - ra_free(as, dest); - ra_modified(as, dest); - emit_fai(as, PPCI_LFD, dest, RID_SP, ofs); - } - emit_tai(as, PPCI_STW, RID_RETHI, RID_SP, ofs); - emit_tai(as, PPCI_STW, RID_RETLO, RID_SP, ofs+4); - } else { - ra_destreg(as, ir, RID_FPRET); - } -#if LJ_32 - } else if (hiop) { - ra_destpair(as, ir); -#endif - } else { - ra_destreg(as, ir, RID_RET); - } - } -} - -static void asm_callx(ASMState *as, IRIns *ir) -{ - IRRef args[CCI_NARGS_MAX*2]; - CCallInfo ci; - IRRef func; - IRIns *irf; - ci.flags = asm_callx_flags(as, ir); - asm_collectargs(as, ir, &ci, args); - asm_setupresult(as, ir, &ci); - func = ir->op2; irf = IR(func); - if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); } - if (irref_isk(func)) { /* Call to constant address. */ - ci.func = (ASMFunction)(void *)(intptr_t)(irf->i); - } else { /* Need a non-argument register for indirect calls. */ - RegSet allow = RSET_GPR & ~RSET_RANGE(RID_R0, REGARG_LASTGPR+1); - Reg freg = ra_alloc1(as, func, allow); - *--as->mcp = PPCI_BCTRL; - *--as->mcp = PPCI_MTCTR | PPCF_T(freg); - ci.func = (ASMFunction)(void *)0; - } - asm_gencall(as, &ci, args); -} - -/* -- Returns ------------------------------------------------------------- */ - -/* Return to lower frame. Guard that it goes to the right spot. */ -static void asm_retf(ASMState *as, IRIns *ir) -{ - Reg base = ra_alloc1(as, REF_BASE, RSET_GPR); - void *pc = ir_kptr(IR(ir->op2)); - int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1)); - as->topslot -= (BCReg)delta; - if ((int32_t)as->topslot < 0) as->topslot = 0; - irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */ - emit_setgl(as, base, jit_base); - emit_addptr(as, base, -8*delta); - asm_guardcc(as, CC_NE); - emit_ab(as, PPCI_CMPW, RID_TMP, - ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base))); - emit_tai(as, PPCI_LWZ, RID_TMP, base, -8); -} - -/* -- Type conversions ---------------------------------------------------- */ - -static void asm_tointg(ASMState *as, IRIns *ir, Reg left) -{ - RegSet allow = RSET_FPR; - Reg tmp = ra_scratch(as, rset_clear(allow, left)); - Reg fbias = ra_scratch(as, rset_clear(allow, tmp)); - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg hibias = ra_allock(as, 0x43300000, rset_exclude(RSET_GPR, dest)); - asm_guardcc(as, CC_NE); - emit_fab(as, PPCI_FCMPU, 0, tmp, left); - emit_fab(as, PPCI_FSUB, tmp, tmp, fbias); - emit_fai(as, PPCI_LFD, tmp, RID_SP, SPOFS_TMP); - emit_tai(as, PPCI_STW, RID_TMP, RID_SP, SPOFS_TMPLO); - emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI); - emit_asi(as, PPCI_XORIS, RID_TMP, dest, 0x8000); - emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO); - emit_lsptr(as, PPCI_LFS, (fbias & 31), - (void *)lj_ir_k64_find(as->J, U64x(59800004,59800000)), - RSET_GPR); - emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP); - emit_fb(as, PPCI_FCTIWZ, tmp, left); -} - -static void asm_tobit(ASMState *as, IRIns *ir) -{ - RegSet allow = RSET_FPR; - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg left = ra_alloc1(as, ir->op1, allow); - Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left)); - Reg tmp = ra_scratch(as, rset_clear(allow, right)); - emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO); - emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP); - emit_fab(as, PPCI_FADD, tmp, left, right); -} - -static void asm_conv(ASMState *as, IRIns *ir) -{ - IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); - int stfp = (st == IRT_NUM || st == IRT_FLOAT); - IRRef lref = ir->op1; - lua_assert(irt_type(ir->t) != st); - lua_assert(!(irt_isint64(ir->t) || - (st == IRT_I64 || st == IRT_U64))); /* Handled by SPLIT. */ - if (irt_isfp(ir->t)) { - Reg dest = ra_dest(as, ir, RSET_FPR); - if (stfp) { /* FP to FP conversion. */ - if (st == IRT_NUM) /* double -> float conversion. */ - emit_fb(as, PPCI_FRSP, dest, ra_alloc1(as, lref, RSET_FPR)); - else /* float -> double conversion is a no-op on PPC. */ - ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */ - } else { /* Integer to FP conversion. */ - /* IRT_INT: Flip hibit, bias with 2^52, subtract 2^52+2^31. */ - /* IRT_U32: Bias with 2^52, subtract 2^52. */ - RegSet allow = RSET_GPR; - Reg left = ra_alloc1(as, lref, allow); - Reg hibias = ra_allock(as, 0x43300000, rset_clear(allow, left)); - Reg fbias = ra_scratch(as, rset_exclude(RSET_FPR, dest)); - const float *kbias; - if (irt_isfloat(ir->t)) emit_fb(as, PPCI_FRSP, dest, dest); - emit_fab(as, PPCI_FSUB, dest, dest, fbias); - emit_fai(as, PPCI_LFD, dest, RID_SP, SPOFS_TMP); - kbias = (const float *)lj_ir_k64_find(as->J, U64x(59800004,59800000)); - if (st == IRT_U32) kbias++; - emit_lsptr(as, PPCI_LFS, (fbias & 31), (void *)kbias, - rset_clear(allow, hibias)); - emit_tai(as, PPCI_STW, st == IRT_U32 ? left : RID_TMP, - RID_SP, SPOFS_TMPLO); - emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI); - if (st != IRT_U32) emit_asi(as, PPCI_XORIS, RID_TMP, left, 0x8000); - } - } else if (stfp) { /* FP to integer conversion. */ - if (irt_isguard(ir->t)) { - /* Checked conversions are only supported from number to int. */ - lua_assert(irt_isint(ir->t) && st == IRT_NUM); - asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); - } else { - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg left = ra_alloc1(as, lref, RSET_FPR); - Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); - if (irt_isu32(ir->t)) { - /* Convert both x and x-2^31 to int and merge results. */ - Reg tmpi = ra_scratch(as, rset_exclude(RSET_GPR, dest)); - emit_asb(as, PPCI_OR, dest, dest, tmpi); /* Select with mask idiom. */ - emit_asb(as, PPCI_AND, tmpi, tmpi, RID_TMP); - emit_asb(as, PPCI_ANDC, dest, dest, RID_TMP); - emit_tai(as, PPCI_LWZ, tmpi, RID_SP, SPOFS_TMPLO); /* tmp = (int)(x) */ - emit_tai(as, PPCI_ADDIS, dest, dest, 0x8000); /* dest += 2^31 */ - emit_asb(as, PPCI_SRAWI, RID_TMP, dest, 31); /* mask = -(dest < 0) */ - emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP); - emit_tai(as, PPCI_LWZ, dest, - RID_SP, SPOFS_TMPLO); /* dest = (int)(x-2^31) */ - emit_fb(as, PPCI_FCTIWZ, tmp, left); - emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP); - emit_fb(as, PPCI_FCTIWZ, tmp, tmp); - emit_fab(as, PPCI_FSUB, tmp, left, tmp); - emit_lsptr(as, PPCI_LFS, (tmp & 31), - (void *)lj_ir_k64_find(as->J, U64x(4f000000,00000000)), - RSET_GPR); - } else { - emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO); - emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP); - emit_fb(as, PPCI_FCTIWZ, tmp, left); - } - } - } else { - Reg dest = ra_dest(as, ir, RSET_GPR); - if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ - Reg left = ra_alloc1(as, ir->op1, RSET_GPR); - lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); - if ((ir->op2 & IRCONV_SEXT)) - emit_as(as, st == IRT_I8 ? PPCI_EXTSB : PPCI_EXTSH, dest, left); - else - emit_rot(as, PPCI_RLWINM, dest, left, 0, st == IRT_U8 ? 24 : 16, 31); - } else { /* 32/64 bit integer conversions. */ - /* Only need to handle 32/32 bit no-op (cast) on 32 bit archs. */ - ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */ - } - } -} - -static void asm_strto(ASMState *as, IRIns *ir) -{ - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num]; - IRRef args[2]; - int32_t ofs; - RegSet drop = RSET_SCRATCH; - if (ra_hasreg(ir->r)) rset_set(drop, ir->r); /* Spill dest reg (if any). */ - ra_evictset(as, drop); - asm_guardcc(as, CC_EQ); - emit_ai(as, PPCI_CMPWI, RID_RET, 0); /* Test return status. */ - args[0] = ir->op1; /* GCstr *str */ - args[1] = ASMREF_TMP1; /* TValue *n */ - asm_gencall(as, ci, args); - /* Store the result to the spill slot or temp slots. */ - ofs = ir->s ? sps_scale(ir->s) : SPOFS_TMP; - emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_SP, ofs); -} - -/* -- Memory references --------------------------------------------------- */ - -/* Get pointer to TValue. */ -static void asm_tvptr(ASMState *as, Reg dest, IRRef ref) -{ - IRIns *ir = IR(ref); - if (irt_isnum(ir->t)) { - if (irref_isk(ref)) /* Use the number constant itself as a TValue. */ - ra_allockreg(as, i32ptr(ir_knum(ir)), dest); - else /* Otherwise force a spill and use the spill slot. */ - emit_tai(as, PPCI_ADDI, dest, RID_SP, ra_spill(as, ir)); - } else { - /* Otherwise use g->tmptv to hold the TValue. */ - RegSet allow = rset_exclude(RSET_GPR, dest); - Reg type; - emit_tai(as, PPCI_ADDI, dest, RID_JGL, (int32_t)offsetof(global_State, tmptv)-32768); - if (!irt_ispri(ir->t)) { - Reg src = ra_alloc1(as, ref, allow); - emit_setgl(as, src, tmptv.gcr); - } - type = ra_allock(as, irt_toitype(ir->t), allow); - emit_setgl(as, type, tmptv.it); - } -} - -static void asm_aref(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg idx, base; - if (irref_isk(ir->op2)) { - IRRef tab = IR(ir->op1)->op1; - int32_t ofs = asm_fuseabase(as, tab); - IRRef refa = ofs ? tab : ir->op1; - ofs += 8*IR(ir->op2)->i; - if (checki16(ofs)) { - base = ra_alloc1(as, refa, RSET_GPR); - emit_tai(as, PPCI_ADDI, dest, base, ofs); - return; - } - } - base = ra_alloc1(as, ir->op1, RSET_GPR); - idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base)); - emit_tab(as, PPCI_ADD, dest, RID_TMP, base); - emit_slwi(as, RID_TMP, idx, 3); -} - -/* Inlined hash lookup. Specialized for key type and for const keys. -** The equivalent C code is: -** Node *n = hashkey(t, key); -** do { -** if (lj_obj_equal(&n->key, key)) return &n->val; -** } while ((n = nextnode(n))); -** return niltv(L); -*/ -static void asm_href(ASMState *as, IRIns *ir, IROp merge) -{ - RegSet allow = RSET_GPR; - int destused = ra_used(ir); - Reg dest = ra_dest(as, ir, allow); - Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest)); - Reg key = RID_NONE, tmp1 = RID_TMP, tmp2; - Reg tisnum = RID_NONE, tmpnum = RID_NONE; - IRRef refkey = ir->op2; - IRIns *irkey = IR(refkey); - IRType1 kt = irkey->t; - uint32_t khash; - MCLabel l_end, l_loop, l_next; - - rset_clear(allow, tab); - if (irt_isnum(kt)) { - key = ra_alloc1(as, refkey, RSET_FPR); - tmpnum = ra_scratch(as, rset_exclude(RSET_FPR, key)); - tisnum = ra_allock(as, (int32_t)LJ_TISNUM, allow); - rset_clear(allow, tisnum); - } else if (!irt_ispri(kt)) { - key = ra_alloc1(as, refkey, allow); - rset_clear(allow, key); - } - tmp2 = ra_scratch(as, allow); - rset_clear(allow, tmp2); - - /* Key not found in chain: jump to exit (if merged) or load niltv. */ - l_end = emit_label(as); - as->invmcp = NULL; - if (merge == IR_NE) - asm_guardcc(as, CC_EQ); - else if (destused) - emit_loada(as, dest, niltvg(J2G(as->J))); - - /* Follow hash chain until the end. */ - l_loop = --as->mcp; - emit_ai(as, PPCI_CMPWI, dest, 0); - emit_tai(as, PPCI_LWZ, dest, dest, (int32_t)offsetof(Node, next)); - l_next = emit_label(as); - - /* Type and value comparison. */ - if (merge == IR_EQ) - asm_guardcc(as, CC_EQ); - else - emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end); - if (irt_isnum(kt)) { - emit_fab(as, PPCI_FCMPU, 0, tmpnum, key); - emit_condbranch(as, PPCI_BC, CC_GE, l_next); - emit_ab(as, PPCI_CMPLW, tmp1, tisnum); - emit_fai(as, PPCI_LFD, tmpnum, dest, (int32_t)offsetof(Node, key.n)); - } else { - if (!irt_ispri(kt)) { - emit_ab(as, PPCI_CMPW, tmp2, key); - emit_condbranch(as, PPCI_BC, CC_NE, l_next); - } - emit_ai(as, PPCI_CMPWI, tmp1, irt_toitype(irkey->t)); - if (!irt_ispri(kt)) - emit_tai(as, PPCI_LWZ, tmp2, dest, (int32_t)offsetof(Node, key.gcr)); - } - emit_tai(as, PPCI_LWZ, tmp1, dest, (int32_t)offsetof(Node, key.it)); - *l_loop = PPCI_BC | PPCF_Y | PPCF_CC(CC_NE) | - (((char *)as->mcp-(char *)l_loop) & 0xffffu); - - /* Load main position relative to tab->node into dest. */ - khash = irref_isk(refkey) ? ir_khash(irkey) : 1; - if (khash == 0) { - emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node)); - } else { - Reg tmphash = tmp1; - if (irref_isk(refkey)) - tmphash = ra_allock(as, khash, allow); - emit_tab(as, PPCI_ADD, dest, dest, tmp1); - emit_tai(as, PPCI_MULLI, tmp1, tmp1, sizeof(Node)); - emit_asb(as, PPCI_AND, tmp1, tmp2, tmphash); - emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node)); - emit_tai(as, PPCI_LWZ, tmp2, tab, (int32_t)offsetof(GCtab, hmask)); - if (irref_isk(refkey)) { - /* Nothing to do. */ - } else if (irt_isstr(kt)) { - emit_tai(as, PPCI_LWZ, tmp1, key, (int32_t)offsetof(GCstr, hash)); - } else { /* Must match with hash*() in lj_tab.c. */ - emit_tab(as, PPCI_SUBF, tmp1, tmp2, tmp1); - emit_rotlwi(as, tmp2, tmp2, HASH_ROT3); - emit_asb(as, PPCI_XOR, tmp1, tmp1, tmp2); - emit_rotlwi(as, tmp1, tmp1, (HASH_ROT2+HASH_ROT1)&31); - emit_tab(as, PPCI_SUBF, tmp2, dest, tmp2); - if (irt_isnum(kt)) { - int32_t ofs = ra_spill(as, irkey); - emit_asb(as, PPCI_XOR, tmp2, tmp2, tmp1); - emit_rotlwi(as, dest, tmp1, HASH_ROT1); - emit_tab(as, PPCI_ADD, tmp1, tmp1, tmp1); - emit_tai(as, PPCI_LWZ, tmp2, RID_SP, ofs+4); - emit_tai(as, PPCI_LWZ, tmp1, RID_SP, ofs); - } else { - emit_asb(as, PPCI_XOR, tmp2, key, tmp1); - emit_rotlwi(as, dest, tmp1, HASH_ROT1); - emit_tai(as, PPCI_ADDI, tmp1, tmp2, HASH_BIAS); - emit_tai(as, PPCI_ADDIS, tmp2, key, (HASH_BIAS + 32768)>>16); - } - } - } -} - -static void asm_hrefk(ASMState *as, IRIns *ir) -{ - IRIns *kslot = IR(ir->op2); - IRIns *irkey = IR(kslot->op1); - int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node)); - int32_t kofs = ofs + (int32_t)offsetof(Node, key); - Reg dest = (ra_used(ir)||ofs > 32736) ? ra_dest(as, ir, RSET_GPR) : RID_NONE; - Reg node = ra_alloc1(as, ir->op1, RSET_GPR); - Reg key = RID_NONE, type = RID_TMP, idx = node; - RegSet allow = rset_exclude(RSET_GPR, node); - lua_assert(ofs % sizeof(Node) == 0); - if (ofs > 32736) { - idx = dest; - rset_clear(allow, dest); - kofs = (int32_t)offsetof(Node, key); - } else if (ra_hasreg(dest)) { - emit_tai(as, PPCI_ADDI, dest, node, ofs); - } - asm_guardcc(as, CC_NE); - if (!irt_ispri(irkey->t)) { - key = ra_scratch(as, allow); - rset_clear(allow, key); - } - rset_clear(allow, type); - if (irt_isnum(irkey->t)) { - emit_cmpi(as, key, (int32_t)ir_knum(irkey)->u32.lo); - asm_guardcc(as, CC_NE); - emit_cmpi(as, type, (int32_t)ir_knum(irkey)->u32.hi); - } else { - if (ra_hasreg(key)) { - emit_cmpi(as, key, irkey->i); /* May use RID_TMP, i.e. type. */ - asm_guardcc(as, CC_NE); - } - emit_ai(as, PPCI_CMPWI, type, irt_toitype(irkey->t)); - } - if (ra_hasreg(key)) emit_tai(as, PPCI_LWZ, key, idx, kofs+4); - emit_tai(as, PPCI_LWZ, type, idx, kofs); - if (ofs > 32736) { - emit_tai(as, PPCI_ADDIS, dest, dest, (ofs + 32768) >> 16); - emit_tai(as, PPCI_ADDI, dest, node, ofs); - } -} - -static void asm_uref(ASMState *as, IRIns *ir) -{ - /* NYI: Check that UREFO is still open and not aliasing a slot. */ - Reg dest = ra_dest(as, ir, RSET_GPR); - if (irref_isk(ir->op1)) { - GCfunc *fn = ir_kfunc(IR(ir->op1)); - MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v; - emit_lsptr(as, PPCI_LWZ, dest, v, RSET_GPR); - } else { - Reg uv = ra_scratch(as, RSET_GPR); - Reg func = ra_alloc1(as, ir->op1, RSET_GPR); - if (ir->o == IR_UREFC) { - asm_guardcc(as, CC_NE); - emit_ai(as, PPCI_CMPWI, RID_TMP, 1); - emit_tai(as, PPCI_ADDI, dest, uv, (int32_t)offsetof(GCupval, tv)); - emit_tai(as, PPCI_LBZ, RID_TMP, uv, (int32_t)offsetof(GCupval, closed)); - } else { - emit_tai(as, PPCI_LWZ, dest, uv, (int32_t)offsetof(GCupval, v)); - } - emit_tai(as, PPCI_LWZ, uv, func, - (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8)); - } -} - -static void asm_fref(ASMState *as, IRIns *ir) -{ - UNUSED(as); UNUSED(ir); - lua_assert(!ra_used(ir)); -} - -static void asm_strref(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - IRRef ref = ir->op2, refk = ir->op1; - int32_t ofs = (int32_t)sizeof(GCstr); - Reg r; - if (irref_isk(ref)) { - IRRef tmp = refk; refk = ref; ref = tmp; - } else if (!irref_isk(refk)) { - Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR); - IRIns *irr = IR(ir->op2); - if (ra_hasreg(irr->r)) { - ra_noweak(as, irr->r); - right = irr->r; - } else if (mayfuse(as, irr->op2) && - irr->o == IR_ADD && irref_isk(irr->op2) && - checki16(ofs + IR(irr->op2)->i)) { - ofs += IR(irr->op2)->i; - right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left)); - } else { - right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left)); - } - emit_tai(as, PPCI_ADDI, dest, dest, ofs); - emit_tab(as, PPCI_ADD, dest, left, right); - return; - } - r = ra_alloc1(as, ref, RSET_GPR); - ofs += IR(refk)->i; - if (checki16(ofs)) - emit_tai(as, PPCI_ADDI, dest, r, ofs); - else - emit_tab(as, PPCI_ADD, dest, r, - ra_allock(as, ofs, rset_exclude(RSET_GPR, r))); -} - -/* -- Loads and stores ---------------------------------------------------- */ - -static PPCIns asm_fxloadins(IRIns *ir) -{ - switch (irt_type(ir->t)) { - case IRT_I8: return PPCI_LBZ; /* Needs sign-extension. */ - case IRT_U8: return PPCI_LBZ; - case IRT_I16: return PPCI_LHA; - case IRT_U16: return PPCI_LHZ; - case IRT_NUM: return PPCI_LFD; - case IRT_FLOAT: return PPCI_LFS; - default: return PPCI_LWZ; - } -} - -static PPCIns asm_fxstoreins(IRIns *ir) -{ - switch (irt_type(ir->t)) { - case IRT_I8: case IRT_U8: return PPCI_STB; - case IRT_I16: case IRT_U16: return PPCI_STH; - case IRT_NUM: return PPCI_STFD; - case IRT_FLOAT: return PPCI_STFS; - default: return PPCI_STW; - } -} - -static void asm_fload(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg idx = ra_alloc1(as, ir->op1, RSET_GPR); - PPCIns pi = asm_fxloadins(ir); - int32_t ofs; - if (ir->op2 == IRFL_TAB_ARRAY) { - ofs = asm_fuseabase(as, ir->op1); - if (ofs) { /* Turn the t->array load into an add for colocated arrays. */ - emit_tai(as, PPCI_ADDI, dest, idx, ofs); - return; - } - } - ofs = field_ofs[ir->op2]; - lua_assert(!irt_isi8(ir->t)); - emit_tai(as, pi, dest, idx, ofs); -} - -static void asm_fstore(ASMState *as, IRIns *ir) -{ - if (ir->r != RID_SINK) { - Reg src = ra_alloc1(as, ir->op2, RSET_GPR); - IRIns *irf = IR(ir->op1); - Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src)); - int32_t ofs = field_ofs[irf->op2]; - PPCIns pi = asm_fxstoreins(ir); - emit_tai(as, pi, src, idx, ofs); - } -} - -static void asm_xload(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); - lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED)); - if (irt_isi8(ir->t)) - emit_as(as, PPCI_EXTSB, dest, dest); - asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0); -} - -static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs) -{ - IRIns *irb; - if (ir->r == RID_SINK) - return; - if (ofs == 0 && mayfuse(as, ir->op2) && (irb = IR(ir->op2))->o == IR_BSWAP && - ra_noreg(irb->r) && (irt_isint(ir->t) || irt_isu32(ir->t))) { - /* Fuse BSWAP with XSTORE to stwbrx. */ - Reg src = ra_alloc1(as, irb->op1, RSET_GPR); - asm_fusexrefx(as, PPCI_STWBRX, src, ir->op1, rset_exclude(RSET_GPR, src)); - } else { - Reg src = ra_alloc1(as, ir->op2, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); - asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1, - rset_exclude(RSET_GPR, src), ofs); - } -} - -#define asm_xstore(as, ir) asm_xstore_(as, ir, 0) - -static void asm_ahuvload(ASMState *as, IRIns *ir) -{ - IRType1 t = ir->t; - Reg dest = RID_NONE, type = RID_TMP, tmp = RID_TMP, idx; - RegSet allow = RSET_GPR; - int32_t ofs = AHUREF_LSX; - if (ra_used(ir)) { - lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t)); - if (!irt_isnum(t)) ofs = 0; - dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR); - rset_clear(allow, dest); - } - idx = asm_fuseahuref(as, ir->op1, &ofs, allow); - if (irt_isnum(t)) { - Reg tisnum = ra_allock(as, (int32_t)LJ_TISNUM, rset_exclude(allow, idx)); - asm_guardcc(as, CC_GE); - emit_ab(as, PPCI_CMPLW, type, tisnum); - if (ra_hasreg(dest)) { - if (ofs == AHUREF_LSX) { - tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, - (idx&255)), (idx>>8))); - emit_fab(as, PPCI_LFDX, dest, (idx&255), tmp); - } else { - emit_fai(as, PPCI_LFD, dest, idx, ofs); - } - } - } else { - asm_guardcc(as, CC_NE); - emit_ai(as, PPCI_CMPWI, type, irt_toitype(t)); - if (ra_hasreg(dest)) emit_tai(as, PPCI_LWZ, dest, idx, ofs+4); - } - if (ofs == AHUREF_LSX) { - emit_tab(as, PPCI_LWZX, type, (idx&255), tmp); - emit_slwi(as, tmp, (idx>>8), 3); - } else { - emit_tai(as, PPCI_LWZ, type, idx, ofs); - } -} - -static void asm_ahustore(ASMState *as, IRIns *ir) -{ - RegSet allow = RSET_GPR; - Reg idx, src = RID_NONE, type = RID_NONE; - int32_t ofs = AHUREF_LSX; - if (ir->r == RID_SINK) - return; - if (irt_isnum(ir->t)) { - src = ra_alloc1(as, ir->op2, RSET_FPR); - } else { - if (!irt_ispri(ir->t)) { - src = ra_alloc1(as, ir->op2, allow); - rset_clear(allow, src); - ofs = 0; - } - type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow); - rset_clear(allow, type); - } - idx = asm_fuseahuref(as, ir->op1, &ofs, allow); - if (irt_isnum(ir->t)) { - if (ofs == AHUREF_LSX) { - emit_fab(as, PPCI_STFDX, src, (idx&255), RID_TMP); - emit_slwi(as, RID_TMP, (idx>>8), 3); - } else { - emit_fai(as, PPCI_STFD, src, idx, ofs); - } - } else { - if (ra_hasreg(src)) - emit_tai(as, PPCI_STW, src, idx, ofs+4); - if (ofs == AHUREF_LSX) { - emit_tab(as, PPCI_STWX, type, (idx&255), RID_TMP); - emit_slwi(as, RID_TMP, (idx>>8), 3); - } else { - emit_tai(as, PPCI_STW, type, idx, ofs); - } - } -} - -static void asm_sload(ASMState *as, IRIns *ir) -{ - int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 0 : 4); - IRType1 t = ir->t; - Reg dest = RID_NONE, type = RID_NONE, base; - RegSet allow = RSET_GPR; - lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ - lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK)); - lua_assert(LJ_DUALNUM || - !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME))); - if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) { - dest = ra_scratch(as, RSET_FPR); - asm_tointg(as, ir, dest); - t.irt = IRT_NUM; /* Continue with a regular number type check. */ - } else if (ra_used(ir)) { - lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t)); - dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR); - rset_clear(allow, dest); - base = ra_alloc1(as, REF_BASE, allow); - rset_clear(allow, base); - if ((ir->op2 & IRSLOAD_CONVERT)) { - if (irt_isint(t)) { - emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO); - dest = ra_scratch(as, RSET_FPR); - emit_fai(as, PPCI_STFD, dest, RID_SP, SPOFS_TMP); - emit_fb(as, PPCI_FCTIWZ, dest, dest); - t.irt = IRT_NUM; /* Check for original type. */ - } else { - Reg tmp = ra_scratch(as, allow); - Reg hibias = ra_allock(as, 0x43300000, rset_clear(allow, tmp)); - Reg fbias = ra_scratch(as, rset_exclude(RSET_FPR, dest)); - emit_fab(as, PPCI_FSUB, dest, dest, fbias); - emit_fai(as, PPCI_LFD, dest, RID_SP, SPOFS_TMP); - emit_lsptr(as, PPCI_LFS, (fbias & 31), - (void *)lj_ir_k64_find(as->J, U64x(59800004,59800000)), - rset_clear(allow, hibias)); - emit_tai(as, PPCI_STW, tmp, RID_SP, SPOFS_TMPLO); - emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI); - emit_asi(as, PPCI_XORIS, tmp, tmp, 0x8000); - dest = tmp; - t.irt = IRT_INT; /* Check for original type. */ - } - } - goto dotypecheck; - } - base = ra_alloc1(as, REF_BASE, allow); - rset_clear(allow, base); -dotypecheck: - if (irt_isnum(t)) { - if ((ir->op2 & IRSLOAD_TYPECHECK)) { - Reg tisnum = ra_allock(as, (int32_t)LJ_TISNUM, allow); - asm_guardcc(as, CC_GE); - emit_ab(as, PPCI_CMPLW, RID_TMP, tisnum); - type = RID_TMP; - } - if (ra_hasreg(dest)) emit_fai(as, PPCI_LFD, dest, base, ofs-4); - } else { - if ((ir->op2 & IRSLOAD_TYPECHECK)) { - asm_guardcc(as, CC_NE); - emit_ai(as, PPCI_CMPWI, RID_TMP, irt_toitype(t)); - type = RID_TMP; - } - if (ra_hasreg(dest)) emit_tai(as, PPCI_LWZ, dest, base, ofs); - } - if (ra_hasreg(type)) emit_tai(as, PPCI_LWZ, type, base, ofs-4); -} - -/* -- Allocations --------------------------------------------------------- */ - -#if LJ_HASFFI -static void asm_cnew(ASMState *as, IRIns *ir) -{ - CTState *cts = ctype_ctsG(J2G(as->J)); - CTypeID id = (CTypeID)IR(ir->op1)->i; - CTSize sz; - CTInfo info = lj_ctype_info(cts, id, &sz); - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; - IRRef args[4]; - RegSet drop = RSET_SCRATCH; - lua_assert(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL)); - - as->gcsteps++; - if (ra_hasreg(ir->r)) - rset_clear(drop, ir->r); /* Dest reg handled below. */ - ra_evictset(as, drop); - if (ra_used(ir)) - ra_destreg(as, ir, RID_RET); /* GCcdata * */ - - /* Initialize immutable cdata object. */ - if (ir->o == IR_CNEWI) { - RegSet allow = (RSET_GPR & ~RSET_SCRATCH); - int32_t ofs = sizeof(GCcdata); - lua_assert(sz == 4 || sz == 8); - if (sz == 8) { - ofs += 4; - lua_assert((ir+1)->o == IR_HIOP); - } - for (;;) { - Reg r = ra_alloc1(as, ir->op2, allow); - emit_tai(as, PPCI_STW, r, RID_RET, ofs); - rset_clear(allow, r); - if (ofs == sizeof(GCcdata)) break; - ofs -= 4; ir++; - } - } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */ - ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv]; - args[0] = ASMREF_L; /* lua_State *L */ - args[1] = ir->op1; /* CTypeID id */ - args[2] = ir->op2; /* CTSize sz */ - args[3] = ASMREF_TMP1; /* CTSize align */ - asm_gencall(as, ci, args); - emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info)); - return; - } - - /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */ - emit_tai(as, PPCI_STB, RID_RET+1, RID_RET, offsetof(GCcdata, gct)); - emit_tai(as, PPCI_STH, RID_TMP, RID_RET, offsetof(GCcdata, ctypeid)); - emit_ti(as, PPCI_LI, RID_RET+1, ~LJ_TCDATA); - emit_ti(as, PPCI_LI, RID_TMP, id); /* Lower 16 bit used. Sign-ext ok. */ - args[0] = ASMREF_L; /* lua_State *L */ - args[1] = ASMREF_TMP1; /* MSize size */ - asm_gencall(as, ci, args); - ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)), - ra_releasetmp(as, ASMREF_TMP1)); -} -#else -#define asm_cnew(as, ir) ((void)0) -#endif - -/* -- Write barriers ------------------------------------------------------ */ - -static void asm_tbar(ASMState *as, IRIns *ir) -{ - Reg tab = ra_alloc1(as, ir->op1, RSET_GPR); - Reg mark = ra_scratch(as, rset_exclude(RSET_GPR, tab)); - Reg link = RID_TMP; - MCLabel l_end = emit_label(as); - emit_tai(as, PPCI_STW, link, tab, (int32_t)offsetof(GCtab, gclist)); - emit_tai(as, PPCI_STB, mark, tab, (int32_t)offsetof(GCtab, marked)); - emit_setgl(as, tab, gc.grayagain); - lua_assert(LJ_GC_BLACK == 0x04); - emit_rot(as, PPCI_RLWINM, mark, mark, 0, 30, 28); /* Clear black bit. */ - emit_getgl(as, link, gc.grayagain); - emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end); - emit_asi(as, PPCI_ANDIDOT, RID_TMP, mark, LJ_GC_BLACK); - emit_tai(as, PPCI_LBZ, mark, tab, (int32_t)offsetof(GCtab, marked)); -} - -static void asm_obar(ASMState *as, IRIns *ir) -{ - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv]; - IRRef args[2]; - MCLabel l_end; - Reg obj, val, tmp; - /* No need for other object barriers (yet). */ - lua_assert(IR(ir->op1)->o == IR_UREFC); - ra_evictset(as, RSET_SCRATCH); - l_end = emit_label(as); - args[0] = ASMREF_TMP1; /* global_State *g */ - args[1] = ir->op1; /* TValue *tv */ - asm_gencall(as, ci, args); - emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768); - obj = IR(ir->op1)->r; - tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj)); - emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end); - emit_asi(as, PPCI_ANDIDOT, tmp, tmp, LJ_GC_BLACK); - emit_condbranch(as, PPCI_BC, CC_EQ, l_end); - emit_asi(as, PPCI_ANDIDOT, RID_TMP, RID_TMP, LJ_GC_WHITES); - val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj)); - emit_tai(as, PPCI_LBZ, tmp, obj, - (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)); - emit_tai(as, PPCI_LBZ, RID_TMP, val, (int32_t)offsetof(GChead, marked)); -} - -/* -- Arithmetic and logic operations ------------------------------------- */ - -static void asm_fparith(ASMState *as, IRIns *ir, PPCIns pi) -{ - Reg dest = ra_dest(as, ir, RSET_FPR); - Reg right, left = ra_alloc2(as, ir, RSET_FPR); - right = (left >> 8); left &= 255; - if (pi == PPCI_FMUL) - emit_fac(as, pi, dest, left, right); - else - emit_fab(as, pi, dest, left, right); -} - -static void asm_fpunary(ASMState *as, IRIns *ir, PPCIns pi) -{ - Reg dest = ra_dest(as, ir, RSET_FPR); - Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR); - emit_fb(as, pi, dest, left); -} - -static void asm_fpmath(ASMState *as, IRIns *ir) -{ - if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir)) - return; - if (ir->op2 == IRFPM_SQRT && (as->flags & JIT_F_SQRT)) - asm_fpunary(as, ir, PPCI_FSQRT); - else - asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2); -} - -static void asm_add(ASMState *as, IRIns *ir) -{ - if (irt_isnum(ir->t)) { - if (!asm_fusemadd(as, ir, PPCI_FMADD, PPCI_FMADD)) - asm_fparith(as, ir, PPCI_FADD); - } else { - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); - PPCIns pi; - if (irref_isk(ir->op2)) { - int32_t k = IR(ir->op2)->i; - if (checki16(k)) { - pi = PPCI_ADDI; - /* May fail due to spills/restores above, but simplifies the logic. */ - if (as->flagmcp == as->mcp) { - as->flagmcp = NULL; - as->mcp++; - pi = PPCI_ADDICDOT; - } - emit_tai(as, pi, dest, left, k); - return; - } else if ((k & 0xffff) == 0) { - emit_tai(as, PPCI_ADDIS, dest, left, (k >> 16)); - return; - } else if (!as->sectref) { - emit_tai(as, PPCI_ADDIS, dest, dest, (k + 32768) >> 16); - emit_tai(as, PPCI_ADDI, dest, left, k); - return; - } - } - pi = PPCI_ADD; - /* May fail due to spills/restores above, but simplifies the logic. */ - if (as->flagmcp == as->mcp) { - as->flagmcp = NULL; - as->mcp++; - pi |= PPCF_DOT; - } - right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); - emit_tab(as, pi, dest, left, right); - } -} - -static void asm_sub(ASMState *as, IRIns *ir) -{ - if (irt_isnum(ir->t)) { - if (!asm_fusemadd(as, ir, PPCI_FMSUB, PPCI_FNMSUB)) - asm_fparith(as, ir, PPCI_FSUB); - } else { - PPCIns pi = PPCI_SUBF; - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg left, right; - if (irref_isk(ir->op1)) { - int32_t k = IR(ir->op1)->i; - if (checki16(k)) { - right = ra_alloc1(as, ir->op2, RSET_GPR); - emit_tai(as, PPCI_SUBFIC, dest, right, k); - return; - } - } - /* May fail due to spills/restores above, but simplifies the logic. */ - if (as->flagmcp == as->mcp) { - as->flagmcp = NULL; - as->mcp++; - pi |= PPCF_DOT; - } - left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); - right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); - emit_tab(as, pi, dest, right, left); /* Subtract right _from_ left. */ - } -} - -static void asm_mul(ASMState *as, IRIns *ir) -{ - if (irt_isnum(ir->t)) { - asm_fparith(as, ir, PPCI_FMUL); - } else { - PPCIns pi = PPCI_MULLW; - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); - if (irref_isk(ir->op2)) { - int32_t k = IR(ir->op2)->i; - if (checki16(k)) { - emit_tai(as, PPCI_MULLI, dest, left, k); - return; - } - } - /* May fail due to spills/restores above, but simplifies the logic. */ - if (as->flagmcp == as->mcp) { - as->flagmcp = NULL; - as->mcp++; - pi |= PPCF_DOT; - } - right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); - emit_tab(as, pi, dest, left, right); - } -} - -#define asm_div(as, ir) asm_fparith(as, ir, PPCI_FDIV) -#define asm_mod(as, ir) asm_callid(as, ir, IRCALL_lj_vm_modi) -#define asm_pow(as, ir) asm_callid(as, ir, IRCALL_lj_vm_powi) - -static void asm_neg(ASMState *as, IRIns *ir) -{ - if (irt_isnum(ir->t)) { - asm_fpunary(as, ir, PPCI_FNEG); - } else { - Reg dest, left; - PPCIns pi = PPCI_NEG; - if (as->flagmcp == as->mcp) { - as->flagmcp = NULL; - as->mcp++; - pi |= PPCF_DOT; - } - dest = ra_dest(as, ir, RSET_GPR); - left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); - emit_tab(as, pi, dest, left, 0); - } -} - -#define asm_abs(as, ir) asm_fpunary(as, ir, PPCI_FABS) -#define asm_atan2(as, ir) asm_callid(as, ir, IRCALL_atan2) -#define asm_ldexp(as, ir) asm_callid(as, ir, IRCALL_ldexp) - -static void asm_arithov(ASMState *as, IRIns *ir, PPCIns pi) -{ - Reg dest, left, right; - if (as->flagmcp == as->mcp) { - as->flagmcp = NULL; - as->mcp++; - } - asm_guardcc(as, CC_SO); - dest = ra_dest(as, ir, RSET_GPR); - left = ra_alloc2(as, ir, RSET_GPR); - right = (left >> 8); left &= 255; - if (pi == PPCI_SUBFO) { Reg tmp = left; left = right; right = tmp; } - emit_tab(as, pi|PPCF_DOT, dest, left, right); -} - -#define asm_addov(as, ir) asm_arithov(as, ir, PPCI_ADDO) -#define asm_subov(as, ir) asm_arithov(as, ir, PPCI_SUBFO) -#define asm_mulov(as, ir) asm_arithov(as, ir, PPCI_MULLWO) - -#if LJ_HASFFI -static void asm_add64(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR); - PPCIns pi = PPCI_ADDE; - if (irref_isk(ir->op2)) { - int32_t k = IR(ir->op2)->i; - if (k == 0) - pi = PPCI_ADDZE; - else if (k == -1) - pi = PPCI_ADDME; - else - goto needright; - right = 0; - } else { - needright: - right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); - } - emit_tab(as, pi, dest, left, right); - ir--; - dest = ra_dest(as, ir, RSET_GPR); - left = ra_alloc1(as, ir->op1, RSET_GPR); - if (irref_isk(ir->op2)) { - int32_t k = IR(ir->op2)->i; - if (checki16(k)) { - emit_tai(as, PPCI_ADDIC, dest, left, k); - return; - } - } - right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); - emit_tab(as, PPCI_ADDC, dest, left, right); -} - -static void asm_sub64(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg left, right = ra_alloc1(as, ir->op2, RSET_GPR); - PPCIns pi = PPCI_SUBFE; - if (irref_isk(ir->op1)) { - int32_t k = IR(ir->op1)->i; - if (k == 0) - pi = PPCI_SUBFZE; - else if (k == -1) - pi = PPCI_SUBFME; - else - goto needleft; - left = 0; - } else { - needleft: - left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, right)); - } - emit_tab(as, pi, dest, right, left); /* Subtract right _from_ left. */ - ir--; - dest = ra_dest(as, ir, RSET_GPR); - right = ra_alloc1(as, ir->op2, RSET_GPR); - if (irref_isk(ir->op1)) { - int32_t k = IR(ir->op1)->i; - if (checki16(k)) { - emit_tai(as, PPCI_SUBFIC, dest, right, k); - return; - } - } - left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, right)); - emit_tab(as, PPCI_SUBFC, dest, right, left); -} - -static void asm_neg64(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg left = ra_alloc1(as, ir->op1, RSET_GPR); - emit_tab(as, PPCI_SUBFZE, dest, left, 0); - ir--; - dest = ra_dest(as, ir, RSET_GPR); - left = ra_alloc1(as, ir->op1, RSET_GPR); - emit_tai(as, PPCI_SUBFIC, dest, left, 0); -} -#endif - -static void asm_bnot(ASMState *as, IRIns *ir) -{ - Reg dest, left, right; - PPCIns pi = PPCI_NOR; - if (as->flagmcp == as->mcp) { - as->flagmcp = NULL; - as->mcp++; - pi |= PPCF_DOT; - } - dest = ra_dest(as, ir, RSET_GPR); - if (mayfuse(as, ir->op1)) { - IRIns *irl = IR(ir->op1); - if (irl->o == IR_BAND) - pi ^= (PPCI_NOR ^ PPCI_NAND); - else if (irl->o == IR_BXOR) - pi ^= (PPCI_NOR ^ PPCI_EQV); - else if (irl->o != IR_BOR) - goto nofuse; - left = ra_hintalloc(as, irl->op1, dest, RSET_GPR); - right = ra_alloc1(as, irl->op2, rset_exclude(RSET_GPR, left)); - } else { -nofuse: - left = right = ra_hintalloc(as, ir->op1, dest, RSET_GPR); - } - emit_asb(as, pi, dest, left, right); -} - -static void asm_bswap(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - IRIns *irx; - if (mayfuse(as, ir->op1) && (irx = IR(ir->op1))->o == IR_XLOAD && - ra_noreg(irx->r) && (irt_isint(irx->t) || irt_isu32(irx->t))) { - /* Fuse BSWAP with XLOAD to lwbrx. */ - asm_fusexrefx(as, PPCI_LWBRX, dest, irx->op1, RSET_GPR); - } else { - Reg left = ra_alloc1(as, ir->op1, RSET_GPR); - Reg tmp = dest; - if (tmp == left) { - tmp = RID_TMP; - emit_mr(as, dest, RID_TMP); - } - emit_rot(as, PPCI_RLWIMI, tmp, left, 24, 16, 23); - emit_rot(as, PPCI_RLWIMI, tmp, left, 24, 0, 7); - emit_rotlwi(as, tmp, left, 8); - } -} - -/* Fuse BAND with contiguous bitmask and a shift to rlwinm. */ -static void asm_fuseandsh(ASMState *as, PPCIns pi, int32_t mask, IRRef ref) -{ - IRIns *ir; - Reg left; - if (mayfuse(as, ref) && (ir = IR(ref), ra_noreg(ir->r)) && - irref_isk(ir->op2) && ir->o >= IR_BSHL && ir->o <= IR_BROR) { - int32_t sh = (IR(ir->op2)->i & 31); - switch (ir->o) { - case IR_BSHL: - if ((mask & ((1u<>sh))) goto nofuse; - sh = ((32-sh)&31); - break; - case IR_BROL: - break; - default: - goto nofuse; - } - left = ra_alloc1(as, ir->op1, RSET_GPR); - *--as->mcp = pi | PPCF_T(left) | PPCF_B(sh); - return; - } -nofuse: - left = ra_alloc1(as, ref, RSET_GPR); - *--as->mcp = pi | PPCF_T(left); -} - -static void asm_band(ASMState *as, IRIns *ir) -{ - Reg dest, left, right; - IRRef lref = ir->op1; - PPCIns dot = 0; - IRRef op2; - if (as->flagmcp == as->mcp) { - as->flagmcp = NULL; - as->mcp++; - dot = PPCF_DOT; - } - dest = ra_dest(as, ir, RSET_GPR); - if (irref_isk(ir->op2)) { - int32_t k = IR(ir->op2)->i; - if (k) { - /* First check for a contiguous bitmask as used by rlwinm. */ - uint32_t s1 = lj_ffs((uint32_t)k); - uint32_t k1 = ((uint32_t)k >> s1); - if ((k1 & (k1+1)) == 0) { - asm_fuseandsh(as, PPCI_RLWINM|dot | PPCF_A(dest) | - PPCF_MB(31-lj_fls((uint32_t)k)) | PPCF_ME(31-s1), - k, lref); - return; - } - if (~(uint32_t)k) { - uint32_t s2 = lj_ffs(~(uint32_t)k); - uint32_t k2 = (~(uint32_t)k >> s2); - if ((k2 & (k2+1)) == 0) { - asm_fuseandsh(as, PPCI_RLWINM|dot | PPCF_A(dest) | - PPCF_MB(32-s2) | PPCF_ME(30-lj_fls(~(uint32_t)k)), - k, lref); - return; - } - } - } - if (checku16(k)) { - left = ra_alloc1(as, lref, RSET_GPR); - emit_asi(as, PPCI_ANDIDOT, dest, left, k); - return; - } else if ((k & 0xffff) == 0) { - left = ra_alloc1(as, lref, RSET_GPR); - emit_asi(as, PPCI_ANDISDOT, dest, left, (k >> 16)); - return; - } - } - op2 = ir->op2; - if (mayfuse(as, op2) && IR(op2)->o == IR_BNOT && ra_noreg(IR(op2)->r)) { - dot ^= (PPCI_AND ^ PPCI_ANDC); - op2 = IR(op2)->op1; - } - left = ra_hintalloc(as, lref, dest, RSET_GPR); - right = ra_alloc1(as, op2, rset_exclude(RSET_GPR, left)); - emit_asb(as, PPCI_AND ^ dot, dest, left, right); -} - -static void asm_bitop(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); - if (irref_isk(ir->op2)) { - int32_t k = IR(ir->op2)->i; - Reg tmp = left; - if ((checku16(k) || (k & 0xffff) == 0) || (tmp = dest, !as->sectref)) { - if (!checku16(k)) { - emit_asi(as, pik ^ (PPCI_ORI ^ PPCI_ORIS), dest, tmp, (k >> 16)); - if ((k & 0xffff) == 0) return; - } - emit_asi(as, pik, dest, left, k); - return; - } - } - /* May fail due to spills/restores above, but simplifies the logic. */ - if (as->flagmcp == as->mcp) { - as->flagmcp = NULL; - as->mcp++; - pi |= PPCF_DOT; - } - right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); - emit_asb(as, pi, dest, left, right); -} - -#define asm_bor(as, ir) asm_bitop(as, ir, PPCI_OR, PPCI_ORI) -#define asm_bxor(as, ir) asm_bitop(as, ir, PPCI_XOR, PPCI_XORI) - -static void asm_bitshift(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik) -{ - Reg dest, left; - Reg dot = 0; - if (as->flagmcp == as->mcp) { - as->flagmcp = NULL; - as->mcp++; - dot = PPCF_DOT; - } - dest = ra_dest(as, ir, RSET_GPR); - left = ra_alloc1(as, ir->op1, RSET_GPR); - if (irref_isk(ir->op2)) { /* Constant shifts. */ - int32_t shift = (IR(ir->op2)->i & 31); - if (pik == 0) /* SLWI */ - emit_rot(as, PPCI_RLWINM|dot, dest, left, shift, 0, 31-shift); - else if (pik == 1) /* SRWI */ - emit_rot(as, PPCI_RLWINM|dot, dest, left, (32-shift)&31, shift, 31); - else - emit_asb(as, pik|dot, dest, left, shift); - } else { - Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); - emit_asb(as, pi|dot, dest, left, right); - } -} - -#define asm_bshl(as, ir) asm_bitshift(as, ir, PPCI_SLW, 0) -#define asm_bshr(as, ir) asm_bitshift(as, ir, PPCI_SRW, 1) -#define asm_bsar(as, ir) asm_bitshift(as, ir, PPCI_SRAW, PPCI_SRAWI) -#define asm_brol(as, ir) \ - asm_bitshift(as, ir, PPCI_RLWNM|PPCF_MB(0)|PPCF_ME(31), \ - PPCI_RLWINM|PPCF_MB(0)|PPCF_ME(31)) -#define asm_bror(as, ir) lua_assert(0) - -static void asm_min_max(ASMState *as, IRIns *ir, int ismax) -{ - if (irt_isnum(ir->t)) { - Reg dest = ra_dest(as, ir, RSET_FPR); - Reg tmp = dest; - Reg right, left = ra_alloc2(as, ir, RSET_FPR); - right = (left >> 8); left &= 255; - if (tmp == left || tmp == right) - tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_FPR, - dest), left), right)); - emit_facb(as, PPCI_FSEL, dest, tmp, - ismax ? left : right, ismax ? right : left); - emit_fab(as, PPCI_FSUB, tmp, left, right); - } else { - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg tmp1 = RID_TMP, tmp2 = dest; - Reg right, left = ra_alloc2(as, ir, RSET_GPR); - right = (left >> 8); left &= 255; - if (tmp2 == left || tmp2 == right) - tmp2 = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR, - dest), left), right)); - emit_tab(as, PPCI_ADD, dest, tmp2, right); - emit_asb(as, ismax ? PPCI_ANDC : PPCI_AND, tmp2, tmp2, tmp1); - emit_tab(as, PPCI_SUBFE, tmp1, tmp1, tmp1); - emit_tab(as, PPCI_SUBFC, tmp2, tmp2, tmp1); - emit_asi(as, PPCI_XORIS, tmp2, right, 0x8000); - emit_asi(as, PPCI_XORIS, tmp1, left, 0x8000); - } -} - -#define asm_min(as, ir) asm_min_max(as, ir, 0) -#define asm_max(as, ir) asm_min_max(as, ir, 1) - -/* -- Comparisons --------------------------------------------------------- */ - -#define CC_UNSIGNED 0x08 /* Unsigned integer comparison. */ -#define CC_TWO 0x80 /* Check two flags for FP comparison. */ - -/* Map of comparisons to flags. ORDER IR. */ -static const uint8_t asm_compmap[IR_ABC+1] = { - /* op int cc FP cc */ - /* LT */ CC_GE + (CC_GE<<4), - /* GE */ CC_LT + (CC_LE<<4) + CC_TWO, - /* LE */ CC_GT + (CC_GE<<4) + CC_TWO, - /* GT */ CC_LE + (CC_LE<<4), - /* ULT */ CC_GE + CC_UNSIGNED + (CC_GT<<4) + CC_TWO, - /* UGE */ CC_LT + CC_UNSIGNED + (CC_LT<<4), - /* ULE */ CC_GT + CC_UNSIGNED + (CC_GT<<4), - /* UGT */ CC_LE + CC_UNSIGNED + (CC_LT<<4) + CC_TWO, - /* EQ */ CC_NE + (CC_NE<<4), - /* NE */ CC_EQ + (CC_EQ<<4), - /* ABC */ CC_LE + CC_UNSIGNED + (CC_LT<<4) + CC_TWO /* Same as UGT. */ -}; - -static void asm_intcomp_(ASMState *as, IRRef lref, IRRef rref, Reg cr, PPCCC cc) -{ - Reg right, left = ra_alloc1(as, lref, RSET_GPR); - if (irref_isk(rref)) { - int32_t k = IR(rref)->i; - if ((cc & CC_UNSIGNED) == 0) { /* Signed comparison with constant. */ - if (checki16(k)) { - emit_tai(as, PPCI_CMPWI, cr, left, k); - /* Signed comparison with zero and referencing previous ins? */ - if (k == 0 && lref == as->curins-1) - as->flagmcp = as->mcp; /* Allow elimination of the compare. */ - return; - } else if ((cc & 3) == (CC_EQ & 3)) { /* Use CMPLWI for EQ or NE. */ - if (checku16(k)) { - emit_tai(as, PPCI_CMPLWI, cr, left, k); - return; - } else if (!as->sectref && ra_noreg(IR(rref)->r)) { - emit_tai(as, PPCI_CMPLWI, cr, RID_TMP, k); - emit_asi(as, PPCI_XORIS, RID_TMP, left, (k >> 16)); - return; - } - } - } else { /* Unsigned comparison with constant. */ - if (checku16(k)) { - emit_tai(as, PPCI_CMPLWI, cr, left, k); - return; - } - } - } - right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, left)); - emit_tab(as, (cc & CC_UNSIGNED) ? PPCI_CMPLW : PPCI_CMPW, cr, left, right); -} - -static void asm_comp(ASMState *as, IRIns *ir) -{ - PPCCC cc = asm_compmap[ir->o]; - if (irt_isnum(ir->t)) { - Reg right, left = ra_alloc2(as, ir, RSET_FPR); - right = (left >> 8); left &= 255; - asm_guardcc(as, (cc >> 4)); - if ((cc & CC_TWO)) - emit_tab(as, PPCI_CROR, ((cc>>4)&3), ((cc>>4)&3), (CC_EQ&3)); - emit_fab(as, PPCI_FCMPU, 0, left, right); - } else { - IRRef lref = ir->op1, rref = ir->op2; - if (irref_isk(lref) && !irref_isk(rref)) { - /* Swap constants to the right (only for ABC). */ - IRRef tmp = lref; lref = rref; rref = tmp; - if ((cc & 2) == 0) cc ^= 1; /* LT <-> GT, LE <-> GE */ - } - asm_guardcc(as, cc); - asm_intcomp_(as, lref, rref, 0, cc); - } -} - -#define asm_equal(as, ir) asm_comp(as, ir) - -#if LJ_HASFFI -/* 64 bit integer comparisons. */ -static void asm_comp64(ASMState *as, IRIns *ir) -{ - PPCCC cc = asm_compmap[(ir-1)->o]; - if ((cc&3) == (CC_EQ&3)) { - asm_guardcc(as, cc); - emit_tab(as, (cc&4) ? PPCI_CRAND : PPCI_CROR, - (CC_EQ&3), (CC_EQ&3), 4+(CC_EQ&3)); - } else { - asm_guardcc(as, CC_EQ); - emit_tab(as, PPCI_CROR, (CC_EQ&3), (CC_EQ&3), ((cc^~(cc>>2))&1)); - emit_tab(as, (cc&4) ? PPCI_CRAND : PPCI_CRANDC, - (CC_EQ&3), (CC_EQ&3), 4+(cc&3)); - } - /* Loword comparison sets cr1 and is unsigned, except for equality. */ - asm_intcomp_(as, (ir-1)->op1, (ir-1)->op2, 4, - cc | ((cc&3) == (CC_EQ&3) ? 0 : CC_UNSIGNED)); - /* Hiword comparison sets cr0. */ - asm_intcomp_(as, ir->op1, ir->op2, 0, cc); - as->flagmcp = NULL; /* Doesn't work here. */ -} -#endif - -/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */ - -/* Hiword op of a split 64 bit op. Previous op must be the loword op. */ -static void asm_hiop(ASMState *as, IRIns *ir) -{ -#if LJ_HASFFI - /* HIOP is marked as a store because it needs its own DCE logic. */ - int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */ - if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1; - if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */ - as->curins--; /* Always skip the CONV. */ - if (usehi || uselo) - asm_conv64(as, ir); - return; - } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */ - as->curins--; /* Always skip the loword comparison. */ - asm_comp64(as, ir); - return; - } else if ((ir-1)->o == IR_XSTORE) { - as->curins--; /* Handle both stores here. */ - if ((ir-1)->r != RID_SINK) { - asm_xstore_(as, ir, 0); - asm_xstore_(as, ir-1, 4); - } - return; - } - if (!usehi) return; /* Skip unused hiword op for all remaining ops. */ - switch ((ir-1)->o) { - case IR_ADD: as->curins--; asm_add64(as, ir); break; - case IR_SUB: as->curins--; asm_sub64(as, ir); break; - case IR_NEG: as->curins--; asm_neg64(as, ir); break; - case IR_CALLN: - case IR_CALLXS: - if (!uselo) - ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */ - break; - case IR_CNEWI: - /* Nothing to do here. Handled by lo op itself. */ - break; - default: lua_assert(0); break; - } -#else - UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused without FFI. */ -#endif -} - -/* -- Profiling ----------------------------------------------------------- */ - -static void asm_prof(ASMState *as, IRIns *ir) -{ - UNUSED(ir); - asm_guardcc(as, CC_NE); - emit_asi(as, PPCI_ANDIDOT, RID_TMP, RID_TMP, HOOK_PROFILE); - emit_lsglptr(as, PPCI_LBZ, RID_TMP, - (int32_t)offsetof(global_State, hookmask)); -} - -/* -- Stack handling ------------------------------------------------------ */ - -/* Check Lua stack size for overflow. Use exit handler as fallback. */ -static void asm_stack_check(ASMState *as, BCReg topslot, - IRIns *irp, RegSet allow, ExitNo exitno) -{ - /* Try to get an unused temp. register, otherwise spill/restore RID_RET*. */ - Reg tmp, pbase = irp ? (ra_hasreg(irp->r) ? irp->r : RID_TMP) : RID_BASE; - rset_clear(allow, pbase); - tmp = allow ? rset_pickbot(allow) : - (pbase == RID_RETHI ? RID_RETLO : RID_RETHI); - emit_condbranch(as, PPCI_BC, CC_LT, asm_exitstub_addr(as, exitno)); - if (allow == RSET_EMPTY) /* Restore temp. register. */ - emit_tai(as, PPCI_LWZ, tmp, RID_SP, SPOFS_TMPW); - else - ra_modified(as, tmp); - emit_ai(as, PPCI_CMPLWI, RID_TMP, (int32_t)(8*topslot)); - emit_tab(as, PPCI_SUBF, RID_TMP, pbase, tmp); - emit_tai(as, PPCI_LWZ, tmp, tmp, offsetof(lua_State, maxstack)); - if (pbase == RID_TMP) - emit_getgl(as, RID_TMP, jit_base); - emit_getgl(as, tmp, cur_L); - if (allow == RSET_EMPTY) /* Spill temp. register. */ - emit_tai(as, PPCI_STW, tmp, RID_SP, SPOFS_TMPW); -} - -/* Restore Lua stack from on-trace state. */ -static void asm_stack_restore(ASMState *as, SnapShot *snap) -{ - SnapEntry *map = &as->T->snapmap[snap->mapofs]; - SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1]; - MSize n, nent = snap->nent; - /* Store the value of all modified slots to the Lua stack. */ - for (n = 0; n < nent; n++) { - SnapEntry sn = map[n]; - BCReg s = snap_slot(sn); - int32_t ofs = 8*((int32_t)s-1); - IRRef ref = snap_ref(sn); - IRIns *ir = IR(ref); - if ((sn & SNAP_NORESTORE)) - continue; - if (irt_isnum(ir->t)) { - Reg src = ra_alloc1(as, ref, RSET_FPR); - emit_fai(as, PPCI_STFD, src, RID_BASE, ofs); - } else { - Reg type; - RegSet allow = rset_exclude(RSET_GPR, RID_BASE); - lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t)); - if (!irt_ispri(ir->t)) { - Reg src = ra_alloc1(as, ref, allow); - rset_clear(allow, src); - emit_tai(as, PPCI_STW, src, RID_BASE, ofs+4); - } - if ((sn & (SNAP_CONT|SNAP_FRAME))) { - if (s == 0) continue; /* Do not overwrite link to previous frame. */ - type = ra_allock(as, (int32_t)(*flinks--), allow); - } else { - type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow); - } - emit_tai(as, PPCI_STW, type, RID_BASE, ofs); - } - checkmclim(as); - } - lua_assert(map + nent == flinks); -} - -/* -- GC handling --------------------------------------------------------- */ - -/* Check GC threshold and do one or more GC steps. */ -static void asm_gc_check(ASMState *as) -{ - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit]; - IRRef args[2]; - MCLabel l_end; - Reg tmp; - ra_evictset(as, RSET_SCRATCH); - l_end = emit_label(as); - /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */ - asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */ - emit_ai(as, PPCI_CMPWI, RID_RET, 0); - args[0] = ASMREF_TMP1; /* global_State *g */ - args[1] = ASMREF_TMP2; /* MSize steps */ - asm_gencall(as, ci, args); - emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768); - tmp = ra_releasetmp(as, ASMREF_TMP2); - emit_loadi(as, tmp, as->gcsteps); - /* Jump around GC step if GC total < GC threshold. */ - emit_condbranch(as, PPCI_BC|PPCF_Y, CC_LT, l_end); - emit_ab(as, PPCI_CMPLW, RID_TMP, tmp); - emit_getgl(as, tmp, gc.threshold); - emit_getgl(as, RID_TMP, gc.total); - as->gcsteps = 0; - checkmclim(as); -} - -/* -- Loop handling ------------------------------------------------------- */ - -/* Fixup the loop branch. */ -static void asm_loop_fixup(ASMState *as) -{ - MCode *p = as->mctop; - MCode *target = as->mcp; - if (as->loopinv) { /* Inverted loop branch? */ - /* asm_guardcc already inverted the cond branch and patched the final b. */ - p[-2] = (p[-2] & (0xffff0000u & ~PPCF_Y)) | (((target-p+2) & 0x3fffu) << 2); - } else { - p[-1] = PPCI_B|(((target-p+1)&0x00ffffffu)<<2); - } -} - -/* -- Head of trace ------------------------------------------------------- */ - -/* Coalesce BASE register for a root trace. */ -static void asm_head_root_base(ASMState *as) -{ - IRIns *ir = IR(REF_BASE); - Reg r = ir->r; - if (ra_hasreg(r)) { - ra_free(as, r); - if (rset_test(as->modset, r) || irt_ismarked(ir->t)) - ir->r = RID_INIT; /* No inheritance for modified BASE register. */ - if (r != RID_BASE) - emit_mr(as, r, RID_BASE); - } -} - -/* Coalesce BASE register for a side trace. */ -static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow) -{ - IRIns *ir = IR(REF_BASE); - Reg r = ir->r; - if (ra_hasreg(r)) { - ra_free(as, r); - if (rset_test(as->modset, r) || irt_ismarked(ir->t)) - ir->r = RID_INIT; /* No inheritance for modified BASE register. */ - if (irp->r == r) { - rset_clear(allow, r); /* Mark same BASE register as coalesced. */ - } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) { - rset_clear(allow, irp->r); - emit_mr(as, r, irp->r); /* Move from coalesced parent reg. */ - } else { - emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */ - } - } - return allow; -} - -/* -- Tail of trace ------------------------------------------------------- */ - -/* Fixup the tail code. */ -static void asm_tail_fixup(ASMState *as, TraceNo lnk) -{ - MCode *p = as->mctop; - MCode *target; - int32_t spadj = as->T->spadjust; - if (spadj == 0) { - *--p = PPCI_NOP; - *--p = PPCI_NOP; - as->mctop = p; - } else { - /* Patch stack adjustment. */ - lua_assert(checki16(CFRAME_SIZE+spadj)); - p[-3] = PPCI_ADDI | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | (CFRAME_SIZE+spadj); - p[-2] = PPCI_STWU | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | spadj; - } - /* Patch exit branch. */ - target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp; - p[-1] = PPCI_B|(((target-p+1)&0x00ffffffu)<<2); -} - -/* Prepare tail of code. */ -static void asm_tail_prep(ASMState *as) -{ - MCode *p = as->mctop - 1; /* Leave room for exit branch. */ - if (as->loopref) { - as->invmcp = as->mcp = p; - } else { - as->mcp = p-2; /* Leave room for stack pointer adjustment. */ - as->invmcp = NULL; - } -} - -/* -- Trace setup --------------------------------------------------------- */ - -/* Ensure there are enough stack slots for call arguments. */ -static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci) -{ - IRRef args[CCI_NARGS_MAX*2]; - uint32_t i, nargs = CCI_XNARGS(ci); - int nslots = 2, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR; - asm_collectargs(as, ir, ci, args); - for (i = 0; i < nargs; i++) - if (args[i] && irt_isfp(IR(args[i])->t)) { - if (nfpr > 0) nfpr--; else nslots = (nslots+3) & ~1; - } else { - if (ngpr > 0) ngpr--; else nslots++; - } - if (nslots > as->evenspill) /* Leave room for args in stack slots. */ - as->evenspill = nslots; - return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET); -} - -static void asm_setup_target(ASMState *as) -{ - asm_exitstub_setup(as, as->T->nsnap + (as->parent ? 1 : 0)); -} - -/* -- Trace patching ------------------------------------------------------ */ - -/* Patch exit jumps of existing machine code to a new target. */ -void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target) -{ - MCode *p = T->mcode; - MCode *pe = (MCode *)((char *)p + T->szmcode); - MCode *px = exitstub_trace_addr(T, exitno); - MCode *cstart = NULL; - MCode *mcarea = lj_mcode_patch(J, p, 0); - int clearso = 0; - for (; p < pe; p++) { - /* Look for exitstub branch, try to replace with branch to target. */ - uint32_t ins = *p; - if ((ins & 0xfc000000u) == 0x40000000u && - ((ins ^ ((char *)px-(char *)p)) & 0xffffu) == 0) { - ptrdiff_t delta = (char *)target - (char *)p; - if (((ins >> 16) & 3) == (CC_SO&3)) { - clearso = sizeof(MCode); - delta -= sizeof(MCode); - } - /* Many, but not all short-range branches can be patched directly. */ - if (((delta + 0x8000) >> 16) == 0) { - *p = (ins & 0xffdf0000u) | ((uint32_t)delta & 0xffffu) | - ((delta & 0x8000) * (PPCF_Y/0x8000)); - if (!cstart) cstart = p; - } - } else if ((ins & 0xfc000000u) == PPCI_B && - ((ins ^ ((char *)px-(char *)p)) & 0x03ffffffu) == 0) { - ptrdiff_t delta = (char *)target - (char *)p; - lua_assert(((delta + 0x02000000) >> 26) == 0); - *p = PPCI_B | ((uint32_t)delta & 0x03ffffffu); - if (!cstart) cstart = p; - } - } - { /* Always patch long-range branch in exit stub itself. */ - ptrdiff_t delta = (char *)target - (char *)px - clearso; - lua_assert(((delta + 0x02000000) >> 26) == 0); - *px = PPCI_B | ((uint32_t)delta & 0x03ffffffu); - } - if (!cstart) cstart = px; - lj_mcode_sync(cstart, px+1); - if (clearso) { /* Extend the current trace. Ugly workaround. */ - MCode *pp = J->cur.mcode; - J->cur.szmcode += sizeof(MCode); - *--pp = PPCI_MCRXR; /* Clear SO flag. */ - J->cur.mcode = pp; - lj_mcode_sync(pp, pp+1); - } - lj_mcode_patch(J, mcarea, 1); -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm_x86.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm_x86.h deleted file mode 100644 index 941d09192c2..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_asm_x86.h +++ /dev/null @@ -1,2634 +0,0 @@ -/* -** x86/x64 IR assembler (SSA IR -> machine code). -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -/* -- Guard handling ------------------------------------------------------ */ - -/* Generate an exit stub group at the bottom of the reserved MCode memory. */ -static MCode *asm_exitstub_gen(ASMState *as, ExitNo group) -{ - ExitNo i, groupofs = (group*EXITSTUBS_PER_GROUP) & 0xff; - MCode *mxp = as->mcbot; - MCode *mxpstart = mxp; - if (mxp + (2+2)*EXITSTUBS_PER_GROUP+8+5 >= as->mctop) - asm_mclimit(as); - /* Push low byte of exitno for each exit stub. */ - *mxp++ = XI_PUSHi8; *mxp++ = (MCode)groupofs; - for (i = 1; i < EXITSTUBS_PER_GROUP; i++) { - *mxp++ = XI_JMPs; *mxp++ = (MCode)((2+2)*(EXITSTUBS_PER_GROUP - i) - 2); - *mxp++ = XI_PUSHi8; *mxp++ = (MCode)(groupofs + i); - } - /* Push the high byte of the exitno for each exit stub group. */ - *mxp++ = XI_PUSHi8; *mxp++ = (MCode)((group*EXITSTUBS_PER_GROUP)>>8); - /* Store DISPATCH at original stack slot 0. Account for the two push ops. */ - *mxp++ = XI_MOVmi; - *mxp++ = MODRM(XM_OFS8, 0, RID_ESP); - *mxp++ = MODRM(XM_SCALE1, RID_ESP, RID_ESP); - *mxp++ = 2*sizeof(void *); - *(int32_t *)mxp = ptr2addr(J2GG(as->J)->dispatch); mxp += 4; - /* Jump to exit handler which fills in the ExitState. */ - *mxp++ = XI_JMP; mxp += 4; - *((int32_t *)(mxp-4)) = jmprel(mxp, (MCode *)(void *)lj_vm_exit_handler); - /* Commit the code for this group (even if assembly fails later on). */ - lj_mcode_commitbot(as->J, mxp); - as->mcbot = mxp; - as->mclim = as->mcbot + MCLIM_REDZONE; - return mxpstart; -} - -/* Setup all needed exit stubs. */ -static void asm_exitstub_setup(ASMState *as, ExitNo nexits) -{ - ExitNo i; - if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR) - lj_trace_err(as->J, LJ_TRERR_SNAPOV); - for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++) - if (as->J->exitstubgroup[i] == NULL) - as->J->exitstubgroup[i] = asm_exitstub_gen(as, i); -} - -/* Emit conditional branch to exit for guard. -** It's important to emit this *after* all registers have been allocated, -** because rematerializations may invalidate the flags. -*/ -static void asm_guardcc(ASMState *as, int cc) -{ - MCode *target = exitstub_addr(as->J, as->snapno); - MCode *p = as->mcp; - if (LJ_UNLIKELY(p == as->invmcp)) { - as->loopinv = 1; - *(int32_t *)(p+1) = jmprel(p+5, target); - target = p; - cc ^= 1; - if (as->realign) { - emit_sjcc(as, cc, target); - return; - } - } - emit_jcc(as, cc, target); -} - -/* -- Memory operand fusion ----------------------------------------------- */ - -/* Limit linear search to this distance. Avoids O(n^2) behavior. */ -#define CONFLICT_SEARCH_LIM 31 - -/* Check if a reference is a signed 32 bit constant. */ -static int asm_isk32(ASMState *as, IRRef ref, int32_t *k) -{ - if (irref_isk(ref)) { - IRIns *ir = IR(ref); - if (ir->o != IR_KINT64) { - *k = ir->i; - return 1; - } else if (checki32((int64_t)ir_kint64(ir)->u64)) { - *k = (int32_t)ir_kint64(ir)->u64; - return 1; - } - } - return 0; -} - -/* Check if there's no conflicting instruction between curins and ref. -** Also avoid fusing loads if there are multiple references. -*/ -static int noconflict(ASMState *as, IRRef ref, IROp conflict, int noload) -{ - IRIns *ir = as->ir; - IRRef i = as->curins; - if (i > ref + CONFLICT_SEARCH_LIM) - return 0; /* Give up, ref is too far away. */ - while (--i > ref) { - if (ir[i].o == conflict) - return 0; /* Conflict found. */ - else if (!noload && (ir[i].op1 == ref || ir[i].op2 == ref)) - return 0; - } - return 1; /* Ok, no conflict. */ -} - -/* Fuse array base into memory operand. */ -static IRRef asm_fuseabase(ASMState *as, IRRef ref) -{ - IRIns *irb = IR(ref); - as->mrm.ofs = 0; - if (irb->o == IR_FLOAD) { - IRIns *ira = IR(irb->op1); - lua_assert(irb->op2 == IRFL_TAB_ARRAY); - /* We can avoid the FLOAD of t->array for colocated arrays. */ - if (ira->o == IR_TNEW && ira->op1 <= LJ_MAX_COLOSIZE && - !neverfuse(as) && noconflict(as, irb->op1, IR_NEWREF, 1)) { - as->mrm.ofs = (int32_t)sizeof(GCtab); /* Ofs to colocated array. */ - return irb->op1; /* Table obj. */ - } - } else if (irb->o == IR_ADD && irref_isk(irb->op2)) { - /* Fuse base offset (vararg load). */ - as->mrm.ofs = IR(irb->op2)->i; - return irb->op1; - } - return ref; /* Otherwise use the given array base. */ -} - -/* Fuse array reference into memory operand. */ -static void asm_fusearef(ASMState *as, IRIns *ir, RegSet allow) -{ - IRIns *irx; - lua_assert(ir->o == IR_AREF); - as->mrm.base = (uint8_t)ra_alloc1(as, asm_fuseabase(as, ir->op1), allow); - irx = IR(ir->op2); - if (irref_isk(ir->op2)) { - as->mrm.ofs += 8*irx->i; - as->mrm.idx = RID_NONE; - } else { - rset_clear(allow, as->mrm.base); - as->mrm.scale = XM_SCALE8; - /* Fuse a constant ADD (e.g. t[i+1]) into the offset. - ** Doesn't help much without ABCelim, but reduces register pressure. - */ - if (!LJ_64 && /* Has bad effects with negative index on x64. */ - mayfuse(as, ir->op2) && ra_noreg(irx->r) && - irx->o == IR_ADD && irref_isk(irx->op2)) { - as->mrm.ofs += 8*IR(irx->op2)->i; - as->mrm.idx = (uint8_t)ra_alloc1(as, irx->op1, allow); - } else { - as->mrm.idx = (uint8_t)ra_alloc1(as, ir->op2, allow); - } - } -} - -/* Fuse array/hash/upvalue reference into memory operand. -** Caveat: this may allocate GPRs for the base/idx registers. Be sure to -** pass the final allow mask, excluding any GPRs used for other inputs. -** In particular: 2-operand GPR instructions need to call ra_dest() first! -*/ -static void asm_fuseahuref(ASMState *as, IRRef ref, RegSet allow) -{ - IRIns *ir = IR(ref); - if (ra_noreg(ir->r)) { - switch ((IROp)ir->o) { - case IR_AREF: - if (mayfuse(as, ref)) { - asm_fusearef(as, ir, allow); - return; - } - break; - case IR_HREFK: - if (mayfuse(as, ref)) { - as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow); - as->mrm.ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node)); - as->mrm.idx = RID_NONE; - return; - } - break; - case IR_UREFC: - if (irref_isk(ir->op1)) { - GCfunc *fn = ir_kfunc(IR(ir->op1)); - GCupval *uv = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv; - as->mrm.ofs = ptr2addr(&uv->tv); - as->mrm.base = as->mrm.idx = RID_NONE; - return; - } - break; - default: - lua_assert(ir->o == IR_HREF || ir->o == IR_NEWREF || ir->o == IR_UREFO || - ir->o == IR_KKPTR); - break; - } - } - as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow); - as->mrm.ofs = 0; - as->mrm.idx = RID_NONE; -} - -/* Fuse FLOAD/FREF reference into memory operand. */ -static void asm_fusefref(ASMState *as, IRIns *ir, RegSet allow) -{ - lua_assert(ir->o == IR_FLOAD || ir->o == IR_FREF); - as->mrm.ofs = field_ofs[ir->op2]; - as->mrm.idx = RID_NONE; - if (irref_isk(ir->op1)) { - as->mrm.ofs += IR(ir->op1)->i; - as->mrm.base = RID_NONE; - } else { - as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow); - } -} - -/* Fuse string reference into memory operand. */ -static void asm_fusestrref(ASMState *as, IRIns *ir, RegSet allow) -{ - IRIns *irr; - lua_assert(ir->o == IR_STRREF); - as->mrm.base = as->mrm.idx = RID_NONE; - as->mrm.scale = XM_SCALE1; - as->mrm.ofs = sizeof(GCstr); - if (irref_isk(ir->op1)) { - as->mrm.ofs += IR(ir->op1)->i; - } else { - Reg r = ra_alloc1(as, ir->op1, allow); - rset_clear(allow, r); - as->mrm.base = (uint8_t)r; - } - irr = IR(ir->op2); - if (irref_isk(ir->op2)) { - as->mrm.ofs += irr->i; - } else { - Reg r; - /* Fuse a constant add into the offset, e.g. string.sub(s, i+10). */ - if (!LJ_64 && /* Has bad effects with negative index on x64. */ - mayfuse(as, ir->op2) && irr->o == IR_ADD && irref_isk(irr->op2)) { - as->mrm.ofs += IR(irr->op2)->i; - r = ra_alloc1(as, irr->op1, allow); - } else { - r = ra_alloc1(as, ir->op2, allow); - } - if (as->mrm.base == RID_NONE) - as->mrm.base = (uint8_t)r; - else - as->mrm.idx = (uint8_t)r; - } -} - -static void asm_fusexref(ASMState *as, IRRef ref, RegSet allow) -{ - IRIns *ir = IR(ref); - as->mrm.idx = RID_NONE; - if (ir->o == IR_KPTR || ir->o == IR_KKPTR) { - as->mrm.ofs = ir->i; - as->mrm.base = RID_NONE; - } else if (ir->o == IR_STRREF) { - asm_fusestrref(as, ir, allow); - } else { - as->mrm.ofs = 0; - if (canfuse(as, ir) && ir->o == IR_ADD && ra_noreg(ir->r)) { - /* Gather (base+idx*sz)+ofs as emitted by cdata ptr/array indexing. */ - IRIns *irx; - IRRef idx; - Reg r; - if (asm_isk32(as, ir->op2, &as->mrm.ofs)) { /* Recognize x+ofs. */ - ref = ir->op1; - ir = IR(ref); - if (!(ir->o == IR_ADD && canfuse(as, ir) && ra_noreg(ir->r))) - goto noadd; - } - as->mrm.scale = XM_SCALE1; - idx = ir->op1; - ref = ir->op2; - irx = IR(idx); - if (!(irx->o == IR_BSHL || irx->o == IR_ADD)) { /* Try other operand. */ - idx = ir->op2; - ref = ir->op1; - irx = IR(idx); - } - if (canfuse(as, irx) && ra_noreg(irx->r)) { - if (irx->o == IR_BSHL && irref_isk(irx->op2) && IR(irx->op2)->i <= 3) { - /* Recognize idx<op1; - as->mrm.scale = (uint8_t)(IR(irx->op2)->i << 6); - } else if (irx->o == IR_ADD && irx->op1 == irx->op2) { - /* FOLD does idx*2 ==> idx<<1 ==> idx+idx. */ - idx = irx->op1; - as->mrm.scale = XM_SCALE2; - } - } - r = ra_alloc1(as, idx, allow); - rset_clear(allow, r); - as->mrm.idx = (uint8_t)r; - } - noadd: - as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow); - } -} - -/* Fuse load into memory operand. */ -static Reg asm_fuseload(ASMState *as, IRRef ref, RegSet allow) -{ - IRIns *ir = IR(ref); - if (ra_hasreg(ir->r)) { - if (allow != RSET_EMPTY) { /* Fast path. */ - ra_noweak(as, ir->r); - return ir->r; - } - fusespill: - /* Force a spill if only memory operands are allowed (asm_x87load). */ - as->mrm.base = RID_ESP; - as->mrm.ofs = ra_spill(as, ir); - as->mrm.idx = RID_NONE; - return RID_MRM; - } - if (ir->o == IR_KNUM) { - RegSet avail = as->freeset & ~as->modset & RSET_FPR; - lua_assert(allow != RSET_EMPTY); - if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */ - as->mrm.ofs = ptr2addr(ir_knum(ir)); - as->mrm.base = as->mrm.idx = RID_NONE; - return RID_MRM; - } - } else if (ir->o == IR_KINT64) { - RegSet avail = as->freeset & ~as->modset & RSET_GPR; - lua_assert(allow != RSET_EMPTY); - if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */ - as->mrm.ofs = ptr2addr(ir_kint64(ir)); - as->mrm.base = as->mrm.idx = RID_NONE; - return RID_MRM; - } - } else if (mayfuse(as, ref)) { - RegSet xallow = (allow & RSET_GPR) ? allow : RSET_GPR; - if (ir->o == IR_SLOAD) { - if (!(ir->op2 & (IRSLOAD_PARENT|IRSLOAD_CONVERT)) && - noconflict(as, ref, IR_RETF, 0)) { - as->mrm.base = (uint8_t)ra_alloc1(as, REF_BASE, xallow); - as->mrm.ofs = 8*((int32_t)ir->op1-1) + ((ir->op2&IRSLOAD_FRAME)?4:0); - as->mrm.idx = RID_NONE; - return RID_MRM; - } - } else if (ir->o == IR_FLOAD) { - /* Generic fusion is only ok for 32 bit operand (but see asm_comp). */ - if ((irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)) && - noconflict(as, ref, IR_FSTORE, 0)) { - asm_fusefref(as, ir, xallow); - return RID_MRM; - } - } else if (ir->o == IR_ALOAD || ir->o == IR_HLOAD || ir->o == IR_ULOAD) { - if (noconflict(as, ref, ir->o + IRDELTA_L2S, 0)) { - asm_fuseahuref(as, ir->op1, xallow); - return RID_MRM; - } - } else if (ir->o == IR_XLOAD) { - /* Generic fusion is not ok for 8/16 bit operands (but see asm_comp). - ** Fusing unaligned memory operands is ok on x86 (except for SIMD types). - */ - if ((!irt_typerange(ir->t, IRT_I8, IRT_U16)) && - noconflict(as, ref, IR_XSTORE, 0)) { - asm_fusexref(as, ir->op1, xallow); - return RID_MRM; - } - } else if (ir->o == IR_VLOAD) { - asm_fuseahuref(as, ir->op1, xallow); - return RID_MRM; - } - } - if (!(as->freeset & allow) && !irref_isk(ref) && - (allow == RSET_EMPTY || ra_hasspill(ir->s) || iscrossref(as, ref))) - goto fusespill; - return ra_allocref(as, ref, allow); -} - -#if LJ_64 -/* Don't fuse a 32 bit load into a 64 bit operation. */ -static Reg asm_fuseloadm(ASMState *as, IRRef ref, RegSet allow, int is64) -{ - if (is64 && !irt_is64(IR(ref)->t)) - return ra_alloc1(as, ref, allow); - return asm_fuseload(as, ref, allow); -} -#else -#define asm_fuseloadm(as, ref, allow, is64) asm_fuseload(as, (ref), (allow)) -#endif - -/* -- Calls --------------------------------------------------------------- */ - -/* Count the required number of stack slots for a call. */ -static int asm_count_call_slots(ASMState *as, const CCallInfo *ci, IRRef *args) -{ - uint32_t i, nargs = CCI_XNARGS(ci); - int nslots = 0; -#if LJ_64 - if (LJ_ABI_WIN) { - nslots = (int)(nargs*2); /* Only matters for more than four args. */ - } else { - int ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR; - for (i = 0; i < nargs; i++) - if (args[i] && irt_isfp(IR(args[i])->t)) { - if (nfpr > 0) nfpr--; else nslots += 2; - } else { - if (ngpr > 0) ngpr--; else nslots += 2; - } - } -#else - int ngpr = 0; - if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL) - ngpr = 2; - else if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL) - ngpr = 1; - for (i = 0; i < nargs; i++) - if (args[i] && irt_isfp(IR(args[i])->t)) { - nslots += irt_isnum(IR(args[i])->t) ? 2 : 1; - } else { - if (ngpr > 0) ngpr--; else nslots++; - } -#endif - return nslots; -} - -/* Generate a call to a C function. */ -static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) -{ - uint32_t n, nargs = CCI_XNARGS(ci); - int32_t ofs = STACKARG_OFS; -#if LJ_64 - uint32_t gprs = REGARG_GPRS; - Reg fpr = REGARG_FIRSTFPR; -#if !LJ_ABI_WIN - MCode *patchnfpr = NULL; -#endif -#else - uint32_t gprs = 0; - if ((ci->flags & CCI_CC_MASK) != CCI_CC_CDECL) { - if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL) - gprs = (REGARG_GPRS & 31); - else if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL) - gprs = REGARG_GPRS; - } -#endif - if ((void *)ci->func) - emit_call(as, ci->func); -#if LJ_64 - if ((ci->flags & CCI_VARARG)) { /* Special handling for vararg calls. */ -#if LJ_ABI_WIN - for (n = 0; n < 4 && n < nargs; n++) { - IRIns *ir = IR(args[n]); - if (irt_isfp(ir->t)) /* Duplicate FPRs in GPRs. */ - emit_rr(as, XO_MOVDto, (irt_isnum(ir->t) ? REX_64 : 0) | (fpr+n), - ((gprs >> (n*5)) & 31)); /* Either MOVD or MOVQ. */ - } -#else - patchnfpr = --as->mcp; /* Indicate number of used FPRs in register al. */ - *--as->mcp = XI_MOVrib | RID_EAX; -#endif - } -#endif - for (n = 0; n < nargs; n++) { /* Setup args. */ - IRRef ref = args[n]; - IRIns *ir = IR(ref); - Reg r; -#if LJ_64 && LJ_ABI_WIN - /* Windows/x64 argument registers are strictly positional. */ - r = irt_isfp(ir->t) ? (fpr <= REGARG_LASTFPR ? fpr : 0) : (gprs & 31); - fpr++; gprs >>= 5; -#elif LJ_64 - /* POSIX/x64 argument registers are used in order of appearance. */ - if (irt_isfp(ir->t)) { - r = fpr <= REGARG_LASTFPR ? fpr++ : 0; - } else { - r = gprs & 31; gprs >>= 5; - } -#else - if (ref && irt_isfp(ir->t)) { - r = 0; - } else { - r = gprs & 31; gprs >>= 5; - if (!ref) continue; - } -#endif - if (r) { /* Argument is in a register. */ - if (r < RID_MAX_GPR && ref < ASMREF_TMP1) { -#if LJ_64 - if (ir->o == IR_KINT64) - emit_loadu64(as, r, ir_kint64(ir)->u64); - else -#endif - emit_loadi(as, r, ir->i); - } else { - lua_assert(rset_test(as->freeset, r)); /* Must have been evicted. */ - if (ra_hasreg(ir->r)) { - ra_noweak(as, ir->r); - emit_movrr(as, ir, r, ir->r); - } else { - ra_allocref(as, ref, RID2RSET(r)); - } - } - } else if (irt_isfp(ir->t)) { /* FP argument is on stack. */ - lua_assert(!(irt_isfloat(ir->t) && irref_isk(ref))); /* No float k. */ - if (LJ_32 && (ofs & 4) && irref_isk(ref)) { - /* Split stores for unaligned FP consts. */ - emit_movmroi(as, RID_ESP, ofs, (int32_t)ir_knum(ir)->u32.lo); - emit_movmroi(as, RID_ESP, ofs+4, (int32_t)ir_knum(ir)->u32.hi); - } else { - r = ra_alloc1(as, ref, RSET_FPR); - emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto, - r, RID_ESP, ofs); - } - ofs += (LJ_32 && irt_isfloat(ir->t)) ? 4 : 8; - } else { /* Non-FP argument is on stack. */ - if (LJ_32 && ref < ASMREF_TMP1) { - emit_movmroi(as, RID_ESP, ofs, ir->i); - } else { - r = ra_alloc1(as, ref, RSET_GPR); - emit_movtomro(as, REX_64 + r, RID_ESP, ofs); - } - ofs += sizeof(intptr_t); - } - checkmclim(as); - } -#if LJ_64 && !LJ_ABI_WIN - if (patchnfpr) *patchnfpr = fpr - REGARG_FIRSTFPR; -#endif -} - -/* Setup result reg/sp for call. Evict scratch regs. */ -static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) -{ - RegSet drop = RSET_SCRATCH; - int hiop = (LJ_32 && (ir+1)->o == IR_HIOP); - if ((ci->flags & CCI_NOFPRCLOBBER)) - drop &= ~RSET_FPR; - if (ra_hasreg(ir->r)) - rset_clear(drop, ir->r); /* Dest reg handled below. */ - if (hiop && ra_hasreg((ir+1)->r)) - rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */ - ra_evictset(as, drop); /* Evictions must be performed first. */ - if (ra_used(ir)) { - if (irt_isfp(ir->t)) { - int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */ -#if LJ_64 - if ((ci->flags & CCI_CASTU64)) { - Reg dest = ir->r; - if (ra_hasreg(dest)) { - ra_free(as, dest); - ra_modified(as, dest); - emit_rr(as, XO_MOVD, dest|REX_64, RID_RET); /* Really MOVQ. */ - } - if (ofs) emit_movtomro(as, RID_RET|REX_64, RID_ESP, ofs); - } else { - ra_destreg(as, ir, RID_FPRET); - } -#else - /* Number result is in x87 st0 for x86 calling convention. */ - Reg dest = ir->r; - if (ra_hasreg(dest)) { - ra_free(as, dest); - ra_modified(as, dest); - emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSD : XO_MOVSS, - dest, RID_ESP, ofs); - } - if ((ci->flags & CCI_CASTU64)) { - emit_movtomro(as, RID_RETLO, RID_ESP, ofs); - emit_movtomro(as, RID_RETHI, RID_ESP, ofs+4); - } else { - emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd, - irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs); - } -#endif -#if LJ_32 - } else if (hiop) { - ra_destpair(as, ir); -#endif - } else { - lua_assert(!irt_ispri(ir->t)); - ra_destreg(as, ir, RID_RET); - } - } else if (LJ_32 && irt_isfp(ir->t) && !(ci->flags & CCI_CASTU64)) { - emit_x87op(as, XI_FPOP); /* Pop unused result from x87 st0. */ - } -} - -/* Return a constant function pointer or NULL for indirect calls. */ -static void *asm_callx_func(ASMState *as, IRIns *irf, IRRef func) -{ -#if LJ_32 - UNUSED(as); - if (irref_isk(func)) - return (void *)irf->i; -#else - if (irref_isk(func)) { - MCode *p; - if (irf->o == IR_KINT64) - p = (MCode *)(void *)ir_k64(irf)->u64; - else - p = (MCode *)(void *)(uintptr_t)(uint32_t)irf->i; - if (p - as->mcp == (int32_t)(p - as->mcp)) - return p; /* Call target is still in +-2GB range. */ - /* Avoid the indirect case of emit_call(). Try to hoist func addr. */ - } -#endif - return NULL; -} - -static void asm_callx(ASMState *as, IRIns *ir) -{ - IRRef args[CCI_NARGS_MAX*2]; - CCallInfo ci; - IRRef func; - IRIns *irf; - int32_t spadj = 0; - ci.flags = asm_callx_flags(as, ir); - asm_collectargs(as, ir, &ci, args); - asm_setupresult(as, ir, &ci); -#if LJ_32 - /* Have to readjust stack after non-cdecl calls due to callee cleanup. */ - if ((ci.flags & CCI_CC_MASK) != CCI_CC_CDECL) - spadj = 4 * asm_count_call_slots(as, &ci, args); -#endif - func = ir->op2; irf = IR(func); - if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); } - ci.func = (ASMFunction)asm_callx_func(as, irf, func); - if (!(void *)ci.func) { - /* Use a (hoistable) non-scratch register for indirect calls. */ - RegSet allow = (RSET_GPR & ~RSET_SCRATCH); - Reg r = ra_alloc1(as, func, allow); - if (LJ_32) emit_spsub(as, spadj); /* Above code may cause restores! */ - emit_rr(as, XO_GROUP5, XOg_CALL, r); - } else if (LJ_32) { - emit_spsub(as, spadj); - } - asm_gencall(as, &ci, args); -} - -/* -- Returns ------------------------------------------------------------- */ - -/* Return to lower frame. Guard that it goes to the right spot. */ -static void asm_retf(ASMState *as, IRIns *ir) -{ - Reg base = ra_alloc1(as, REF_BASE, RSET_GPR); - void *pc = ir_kptr(IR(ir->op2)); - int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1)); - as->topslot -= (BCReg)delta; - if ((int32_t)as->topslot < 0) as->topslot = 0; - irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */ - emit_setgl(as, base, jit_base); - emit_addptr(as, base, -8*delta); - asm_guardcc(as, CC_NE); - emit_gmroi(as, XG_ARITHi(XOg_CMP), base, -4, ptr2addr(pc)); -} - -/* -- Type conversions ---------------------------------------------------- */ - -static void asm_tointg(ASMState *as, IRIns *ir, Reg left) -{ - Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); - Reg dest = ra_dest(as, ir, RSET_GPR); - asm_guardcc(as, CC_P); - asm_guardcc(as, CC_NE); - emit_rr(as, XO_UCOMISD, left, tmp); - emit_rr(as, XO_CVTSI2SD, tmp, dest); - emit_rr(as, XO_XORPS, tmp, tmp); /* Avoid partial register stall. */ - emit_rr(as, XO_CVTTSD2SI, dest, left); - /* Can't fuse since left is needed twice. */ -} - -static void asm_tobit(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - Reg tmp = ra_noreg(IR(ir->op1)->r) ? - ra_alloc1(as, ir->op1, RSET_FPR) : - ra_scratch(as, RSET_FPR); - Reg right = asm_fuseload(as, ir->op2, rset_exclude(RSET_FPR, tmp)); - emit_rr(as, XO_MOVDto, tmp, dest); - emit_mrm(as, XO_ADDSD, tmp, right); - ra_left(as, tmp, ir->op1); -} - -static void asm_conv(ASMState *as, IRIns *ir) -{ - IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); - int st64 = (st == IRT_I64 || st == IRT_U64 || (LJ_64 && st == IRT_P64)); - int stfp = (st == IRT_NUM || st == IRT_FLOAT); - IRRef lref = ir->op1; - lua_assert(irt_type(ir->t) != st); - lua_assert(!(LJ_32 && (irt_isint64(ir->t) || st64))); /* Handled by SPLIT. */ - if (irt_isfp(ir->t)) { - Reg dest = ra_dest(as, ir, RSET_FPR); - if (stfp) { /* FP to FP conversion. */ - Reg left = asm_fuseload(as, lref, RSET_FPR); - emit_mrm(as, st == IRT_NUM ? XO_CVTSD2SS : XO_CVTSS2SD, dest, left); - if (left == dest) return; /* Avoid the XO_XORPS. */ - } else if (LJ_32 && st == IRT_U32) { /* U32 to FP conversion on x86. */ - /* number = (2^52+2^51 .. u32) - (2^52+2^51) */ - cTValue *k = lj_ir_k64_find(as->J, U64x(43380000,00000000)); - Reg bias = ra_scratch(as, rset_exclude(RSET_FPR, dest)); - if (irt_isfloat(ir->t)) - emit_rr(as, XO_CVTSD2SS, dest, dest); - emit_rr(as, XO_SUBSD, dest, bias); /* Subtract 2^52+2^51 bias. */ - emit_rr(as, XO_XORPS, dest, bias); /* Merge bias and integer. */ - emit_loadn(as, bias, k); - emit_mrm(as, XO_MOVD, dest, asm_fuseload(as, lref, RSET_GPR)); - return; - } else { /* Integer to FP conversion. */ - Reg left = (LJ_64 && (st == IRT_U32 || st == IRT_U64)) ? - ra_alloc1(as, lref, RSET_GPR) : - asm_fuseloadm(as, lref, RSET_GPR, st64); - if (LJ_64 && st == IRT_U64) { - MCLabel l_end = emit_label(as); - const void *k = lj_ir_k64_find(as->J, U64x(43f00000,00000000)); - emit_rma(as, XO_ADDSD, dest, k); /* Add 2^64 to compensate. */ - emit_sjcc(as, CC_NS, l_end); - emit_rr(as, XO_TEST, left|REX_64, left); /* Check if u64 >= 2^63. */ - } - emit_mrm(as, irt_isnum(ir->t) ? XO_CVTSI2SD : XO_CVTSI2SS, - dest|((LJ_64 && (st64 || st == IRT_U32)) ? REX_64 : 0), left); - } - emit_rr(as, XO_XORPS, dest, dest); /* Avoid partial register stall. */ - } else if (stfp) { /* FP to integer conversion. */ - if (irt_isguard(ir->t)) { - /* Checked conversions are only supported from number to int. */ - lua_assert(irt_isint(ir->t) && st == IRT_NUM); - asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); - } else { - Reg dest = ra_dest(as, ir, RSET_GPR); - x86Op op = st == IRT_NUM ? XO_CVTTSD2SI : XO_CVTTSS2SI; - if (LJ_64 ? irt_isu64(ir->t) : irt_isu32(ir->t)) { - /* LJ_64: For inputs >= 2^63 add -2^64, convert again. */ - /* LJ_32: For inputs >= 2^31 add -2^31, convert again and add 2^31. */ - Reg tmp = ra_noreg(IR(lref)->r) ? ra_alloc1(as, lref, RSET_FPR) : - ra_scratch(as, RSET_FPR); - MCLabel l_end = emit_label(as); - if (LJ_32) - emit_gri(as, XG_ARITHi(XOg_ADD), dest, (int32_t)0x80000000); - emit_rr(as, op, dest|REX_64, tmp); - if (st == IRT_NUM) - emit_rma(as, XO_ADDSD, tmp, lj_ir_k64_find(as->J, - LJ_64 ? U64x(c3f00000,00000000) : U64x(c1e00000,00000000))); - else - emit_rma(as, XO_ADDSS, tmp, lj_ir_k64_find(as->J, - LJ_64 ? U64x(00000000,df800000) : U64x(00000000,cf000000))); - emit_sjcc(as, CC_NS, l_end); - emit_rr(as, XO_TEST, dest|REX_64, dest); /* Check if dest negative. */ - emit_rr(as, op, dest|REX_64, tmp); - ra_left(as, tmp, lref); - } else { - Reg left = asm_fuseload(as, lref, RSET_FPR); - if (LJ_64 && irt_isu32(ir->t)) - emit_rr(as, XO_MOV, dest, dest); /* Zero hiword. */ - emit_mrm(as, op, - dest|((LJ_64 && - (irt_is64(ir->t) || irt_isu32(ir->t))) ? REX_64 : 0), - left); - } - } - } else if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ - Reg left, dest = ra_dest(as, ir, RSET_GPR); - RegSet allow = RSET_GPR; - x86Op op; - lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); - if (st == IRT_I8) { - op = XO_MOVSXb; allow = RSET_GPR8; dest |= FORCE_REX; - } else if (st == IRT_U8) { - op = XO_MOVZXb; allow = RSET_GPR8; dest |= FORCE_REX; - } else if (st == IRT_I16) { - op = XO_MOVSXw; - } else { - op = XO_MOVZXw; - } - left = asm_fuseload(as, lref, allow); - /* Add extra MOV if source is already in wrong register. */ - if (!LJ_64 && left != RID_MRM && !rset_test(allow, left)) { - Reg tmp = ra_scratch(as, allow); - emit_rr(as, op, dest, tmp); - emit_rr(as, XO_MOV, tmp, left); - } else { - emit_mrm(as, op, dest, left); - } - } else { /* 32/64 bit integer conversions. */ - if (LJ_32) { /* Only need to handle 32/32 bit no-op (cast) on x86. */ - Reg dest = ra_dest(as, ir, RSET_GPR); - ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */ - } else if (irt_is64(ir->t)) { - Reg dest = ra_dest(as, ir, RSET_GPR); - if (st64 || !(ir->op2 & IRCONV_SEXT)) { - /* 64/64 bit no-op (cast) or 32 to 64 bit zero extension. */ - ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */ - } else { /* 32 to 64 bit sign extension. */ - Reg left = asm_fuseload(as, lref, RSET_GPR); - emit_mrm(as, XO_MOVSXd, dest|REX_64, left); - } - } else { - Reg dest = ra_dest(as, ir, RSET_GPR); - if (st64) { - Reg left = asm_fuseload(as, lref, RSET_GPR); - /* This is either a 32 bit reg/reg mov which zeroes the hiword - ** or a load of the loword from a 64 bit address. - */ - emit_mrm(as, XO_MOV, dest, left); - } else { /* 32/32 bit no-op (cast). */ - ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */ - } - } - } -} - -#if LJ_32 && LJ_HASFFI -/* No SSE conversions to/from 64 bit on x86, so resort to ugly x87 code. */ - -/* 64 bit integer to FP conversion in 32 bit mode. */ -static void asm_conv_fp_int64(ASMState *as, IRIns *ir) -{ - Reg hi = ra_alloc1(as, ir->op1, RSET_GPR); - Reg lo = ra_alloc1(as, (ir-1)->op1, rset_exclude(RSET_GPR, hi)); - int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */ - Reg dest = ir->r; - if (ra_hasreg(dest)) { - ra_free(as, dest); - ra_modified(as, dest); - emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSD : XO_MOVSS, dest, RID_ESP, ofs); - } - emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd, - irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs); - if (((ir-1)->op2 & IRCONV_SRCMASK) == IRT_U64) { - /* For inputs in [2^63,2^64-1] add 2^64 to compensate. */ - MCLabel l_end = emit_label(as); - emit_rma(as, XO_FADDq, XOg_FADDq, - lj_ir_k64_find(as->J, U64x(43f00000,00000000))); - emit_sjcc(as, CC_NS, l_end); - emit_rr(as, XO_TEST, hi, hi); /* Check if u64 >= 2^63. */ - } else { - lua_assert(((ir-1)->op2 & IRCONV_SRCMASK) == IRT_I64); - } - emit_rmro(as, XO_FILDq, XOg_FILDq, RID_ESP, 0); - /* NYI: Avoid narrow-to-wide store-to-load forwarding stall. */ - emit_rmro(as, XO_MOVto, hi, RID_ESP, 4); - emit_rmro(as, XO_MOVto, lo, RID_ESP, 0); -} - -/* FP to 64 bit integer conversion in 32 bit mode. */ -static void asm_conv_int64_fp(ASMState *as, IRIns *ir) -{ - IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK); - IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH); - Reg lo, hi; - lua_assert(st == IRT_NUM || st == IRT_FLOAT); - lua_assert(dt == IRT_I64 || dt == IRT_U64); - hi = ra_dest(as, ir, RSET_GPR); - lo = ra_dest(as, ir-1, rset_exclude(RSET_GPR, hi)); - if (ra_used(ir-1)) emit_rmro(as, XO_MOV, lo, RID_ESP, 0); - /* NYI: Avoid wide-to-narrow store-to-load forwarding stall. */ - if (!(as->flags & JIT_F_SSE3)) { /* Set FPU rounding mode to default. */ - emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 4); - emit_rmro(as, XO_MOVto, lo, RID_ESP, 4); - emit_gri(as, XG_ARITHi(XOg_AND), lo, 0xf3ff); - } - if (dt == IRT_U64) { - /* For inputs in [2^63,2^64-1] add -2^64 and convert again. */ - MCLabel l_pop, l_end = emit_label(as); - emit_x87op(as, XI_FPOP); - l_pop = emit_label(as); - emit_sjmp(as, l_end); - emit_rmro(as, XO_MOV, hi, RID_ESP, 4); - if ((as->flags & JIT_F_SSE3)) - emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0); - else - emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0); - emit_rma(as, XO_FADDq, XOg_FADDq, - lj_ir_k64_find(as->J, U64x(c3f00000,00000000))); - emit_sjcc(as, CC_NS, l_pop); - emit_rr(as, XO_TEST, hi, hi); /* Check if out-of-range (2^63). */ - } - emit_rmro(as, XO_MOV, hi, RID_ESP, 4); - if ((as->flags & JIT_F_SSE3)) { /* Truncation is easy with SSE3. */ - emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0); - } else { /* Otherwise set FPU rounding mode to truncate before the store. */ - emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0); - emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 0); - emit_rmro(as, XO_MOVtow, lo, RID_ESP, 0); - emit_rmro(as, XO_ARITHw(XOg_OR), lo, RID_ESP, 0); - emit_loadi(as, lo, 0xc00); - emit_rmro(as, XO_FNSTCW, XOg_FNSTCW, RID_ESP, 0); - } - if (dt == IRT_U64) - emit_x87op(as, XI_FDUP); - emit_mrm(as, st == IRT_NUM ? XO_FLDq : XO_FLDd, - st == IRT_NUM ? XOg_FLDq: XOg_FLDd, - asm_fuseload(as, ir->op1, RSET_EMPTY)); -} - -static void asm_conv64(ASMState *as, IRIns *ir) -{ - if (irt_isfp(ir->t)) - asm_conv_fp_int64(as, ir); - else - asm_conv_int64_fp(as, ir); -} -#endif - -static void asm_strto(ASMState *as, IRIns *ir) -{ - /* Force a spill slot for the destination register (if any). */ - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num]; - IRRef args[2]; - RegSet drop = RSET_SCRATCH; - if ((drop & RSET_FPR) != RSET_FPR && ra_hasreg(ir->r)) - rset_set(drop, ir->r); /* WIN64 doesn't spill all FPRs. */ - ra_evictset(as, drop); - asm_guardcc(as, CC_E); - emit_rr(as, XO_TEST, RID_RET, RID_RET); /* Test return status. */ - args[0] = ir->op1; /* GCstr *str */ - args[1] = ASMREF_TMP1; /* TValue *n */ - asm_gencall(as, ci, args); - /* Store the result to the spill slot or temp slots. */ - emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64, - RID_ESP, sps_scale(ir->s)); -} - -/* -- Memory references --------------------------------------------------- */ - -/* Get pointer to TValue. */ -static void asm_tvptr(ASMState *as, Reg dest, IRRef ref) -{ - IRIns *ir = IR(ref); - if (irt_isnum(ir->t)) { - /* For numbers use the constant itself or a spill slot as a TValue. */ - if (irref_isk(ref)) - emit_loada(as, dest, ir_knum(ir)); - else - emit_rmro(as, XO_LEA, dest|REX_64, RID_ESP, ra_spill(as, ir)); - } else { - /* Otherwise use g->tmptv to hold the TValue. */ - if (!irref_isk(ref)) { - Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, dest)); - emit_movtomro(as, REX_64IR(ir, src), dest, 0); - } else if (!irt_ispri(ir->t)) { - emit_movmroi(as, dest, 0, ir->i); - } - if (!(LJ_64 && irt_islightud(ir->t))) - emit_movmroi(as, dest, 4, irt_toitype(ir->t)); - emit_loada(as, dest, &J2G(as->J)->tmptv); - } -} - -static void asm_aref(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - asm_fusearef(as, ir, RSET_GPR); - if (!(as->mrm.idx == RID_NONE && as->mrm.ofs == 0)) - emit_mrm(as, XO_LEA, dest, RID_MRM); - else if (as->mrm.base != dest) - emit_rr(as, XO_MOV, dest, as->mrm.base); -} - -/* Inlined hash lookup. Specialized for key type and for const keys. -** The equivalent C code is: -** Node *n = hashkey(t, key); -** do { -** if (lj_obj_equal(&n->key, key)) return &n->val; -** } while ((n = nextnode(n))); -** return niltv(L); -*/ -static void asm_href(ASMState *as, IRIns *ir, IROp merge) -{ - RegSet allow = RSET_GPR; - int destused = ra_used(ir); - Reg dest = ra_dest(as, ir, allow); - Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest)); - Reg key = RID_NONE, tmp = RID_NONE; - IRIns *irkey = IR(ir->op2); - int isk = irref_isk(ir->op2); - IRType1 kt = irkey->t; - uint32_t khash; - MCLabel l_end, l_loop, l_next; - - if (!isk) { - rset_clear(allow, tab); - key = ra_alloc1(as, ir->op2, irt_isnum(kt) ? RSET_FPR : allow); - if (!irt_isstr(kt)) - tmp = ra_scratch(as, rset_exclude(allow, key)); - } - - /* Key not found in chain: jump to exit (if merged) or load niltv. */ - l_end = emit_label(as); - if (merge == IR_NE) - asm_guardcc(as, CC_E); /* XI_JMP is not found by lj_asm_patchexit. */ - else if (destused) - emit_loada(as, dest, niltvg(J2G(as->J))); - - /* Follow hash chain until the end. */ - l_loop = emit_sjcc_label(as, CC_NZ); - emit_rr(as, XO_TEST, dest, dest); - emit_rmro(as, XO_MOV, dest, dest, offsetof(Node, next)); - l_next = emit_label(as); - - /* Type and value comparison. */ - if (merge == IR_EQ) - asm_guardcc(as, CC_E); - else - emit_sjcc(as, CC_E, l_end); - if (irt_isnum(kt)) { - if (isk) { - /* Assumes -0.0 is already canonicalized to +0.0. */ - emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.lo), - (int32_t)ir_knum(irkey)->u32.lo); - emit_sjcc(as, CC_NE, l_next); - emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.hi), - (int32_t)ir_knum(irkey)->u32.hi); - } else { - emit_sjcc(as, CC_P, l_next); - emit_rmro(as, XO_UCOMISD, key, dest, offsetof(Node, key.n)); - emit_sjcc(as, CC_AE, l_next); - /* The type check avoids NaN penalties and complaints from Valgrind. */ -#if LJ_64 - emit_u32(as, LJ_TISNUM); - emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it)); -#else - emit_i8(as, LJ_TISNUM); - emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it)); -#endif - } -#if LJ_64 - } else if (irt_islightud(kt)) { - emit_rmro(as, XO_CMP, key|REX_64, dest, offsetof(Node, key.u64)); -#endif - } else { - if (!irt_ispri(kt)) { - lua_assert(irt_isaddr(kt)); - if (isk) - emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.gcr), - ptr2addr(ir_kgc(irkey))); - else - emit_rmro(as, XO_CMP, key, dest, offsetof(Node, key.gcr)); - emit_sjcc(as, CC_NE, l_next); - } - lua_assert(!irt_isnil(kt)); - emit_i8(as, irt_toitype(kt)); - emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it)); - } - emit_sfixup(as, l_loop); - checkmclim(as); - - /* Load main position relative to tab->node into dest. */ - khash = isk ? ir_khash(irkey) : 1; - if (khash == 0) { - emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, node)); - } else { - emit_rmro(as, XO_ARITH(XOg_ADD), dest, tab, offsetof(GCtab, node)); - if ((as->flags & JIT_F_PREFER_IMUL)) { - emit_i8(as, sizeof(Node)); - emit_rr(as, XO_IMULi8, dest, dest); - } else { - emit_shifti(as, XOg_SHL, dest, 3); - emit_rmrxo(as, XO_LEA, dest, dest, dest, XM_SCALE2, 0); - } - if (isk) { - emit_gri(as, XG_ARITHi(XOg_AND), dest, (int32_t)khash); - emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask)); - } else if (irt_isstr(kt)) { - emit_rmro(as, XO_ARITH(XOg_AND), dest, key, offsetof(GCstr, hash)); - emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask)); - } else { /* Must match with hashrot() in lj_tab.c. */ - emit_rmro(as, XO_ARITH(XOg_AND), dest, tab, offsetof(GCtab, hmask)); - emit_rr(as, XO_ARITH(XOg_SUB), dest, tmp); - emit_shifti(as, XOg_ROL, tmp, HASH_ROT3); - emit_rr(as, XO_ARITH(XOg_XOR), dest, tmp); - emit_shifti(as, XOg_ROL, dest, HASH_ROT2); - emit_rr(as, XO_ARITH(XOg_SUB), tmp, dest); - emit_shifti(as, XOg_ROL, dest, HASH_ROT1); - emit_rr(as, XO_ARITH(XOg_XOR), tmp, dest); - if (irt_isnum(kt)) { - emit_rr(as, XO_ARITH(XOg_ADD), dest, dest); -#if LJ_64 - emit_shifti(as, XOg_SHR|REX_64, dest, 32); - emit_rr(as, XO_MOV, tmp, dest); - emit_rr(as, XO_MOVDto, key|REX_64, dest); -#else - emit_rmro(as, XO_MOV, dest, RID_ESP, ra_spill(as, irkey)+4); - emit_rr(as, XO_MOVDto, key, tmp); -#endif - } else { - emit_rr(as, XO_MOV, tmp, key); - emit_rmro(as, XO_LEA, dest, key, HASH_BIAS); - } - } - } -} - -static void asm_hrefk(ASMState *as, IRIns *ir) -{ - IRIns *kslot = IR(ir->op2); - IRIns *irkey = IR(kslot->op1); - int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node)); - Reg dest = ra_used(ir) ? ra_dest(as, ir, RSET_GPR) : RID_NONE; - Reg node = ra_alloc1(as, ir->op1, RSET_GPR); -#if !LJ_64 - MCLabel l_exit; -#endif - lua_assert(ofs % sizeof(Node) == 0); - if (ra_hasreg(dest)) { - if (ofs != 0) { - if (dest == node && !(as->flags & JIT_F_LEA_AGU)) - emit_gri(as, XG_ARITHi(XOg_ADD), dest, ofs); - else - emit_rmro(as, XO_LEA, dest, node, ofs); - } else if (dest != node) { - emit_rr(as, XO_MOV, dest, node); - } - } - asm_guardcc(as, CC_NE); -#if LJ_64 - if (!irt_ispri(irkey->t)) { - Reg key = ra_scratch(as, rset_exclude(RSET_GPR, node)); - emit_rmro(as, XO_CMP, key|REX_64, node, - ofs + (int32_t)offsetof(Node, key.u64)); - lua_assert(irt_isnum(irkey->t) || irt_isgcv(irkey->t)); - /* Assumes -0.0 is already canonicalized to +0.0. */ - emit_loadu64(as, key, irt_isnum(irkey->t) ? ir_knum(irkey)->u64 : - ((uint64_t)irt_toitype(irkey->t) << 32) | - (uint64_t)(uint32_t)ptr2addr(ir_kgc(irkey))); - } else { - lua_assert(!irt_isnil(irkey->t)); - emit_i8(as, irt_toitype(irkey->t)); - emit_rmro(as, XO_ARITHi8, XOg_CMP, node, - ofs + (int32_t)offsetof(Node, key.it)); - } -#else - l_exit = emit_label(as); - if (irt_isnum(irkey->t)) { - /* Assumes -0.0 is already canonicalized to +0.0. */ - emit_gmroi(as, XG_ARITHi(XOg_CMP), node, - ofs + (int32_t)offsetof(Node, key.u32.lo), - (int32_t)ir_knum(irkey)->u32.lo); - emit_sjcc(as, CC_NE, l_exit); - emit_gmroi(as, XG_ARITHi(XOg_CMP), node, - ofs + (int32_t)offsetof(Node, key.u32.hi), - (int32_t)ir_knum(irkey)->u32.hi); - } else { - if (!irt_ispri(irkey->t)) { - lua_assert(irt_isgcv(irkey->t)); - emit_gmroi(as, XG_ARITHi(XOg_CMP), node, - ofs + (int32_t)offsetof(Node, key.gcr), - ptr2addr(ir_kgc(irkey))); - emit_sjcc(as, CC_NE, l_exit); - } - lua_assert(!irt_isnil(irkey->t)); - emit_i8(as, irt_toitype(irkey->t)); - emit_rmro(as, XO_ARITHi8, XOg_CMP, node, - ofs + (int32_t)offsetof(Node, key.it)); - } -#endif -} - -static void asm_uref(ASMState *as, IRIns *ir) -{ - /* NYI: Check that UREFO is still open and not aliasing a slot. */ - Reg dest = ra_dest(as, ir, RSET_GPR); - if (irref_isk(ir->op1)) { - GCfunc *fn = ir_kfunc(IR(ir->op1)); - MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v; - emit_rma(as, XO_MOV, dest, v); - } else { - Reg uv = ra_scratch(as, RSET_GPR); - Reg func = ra_alloc1(as, ir->op1, RSET_GPR); - if (ir->o == IR_UREFC) { - emit_rmro(as, XO_LEA, dest, uv, offsetof(GCupval, tv)); - asm_guardcc(as, CC_NE); - emit_i8(as, 1); - emit_rmro(as, XO_ARITHib, XOg_CMP, uv, offsetof(GCupval, closed)); - } else { - emit_rmro(as, XO_MOV, dest, uv, offsetof(GCupval, v)); - } - emit_rmro(as, XO_MOV, uv, func, - (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8)); - } -} - -static void asm_fref(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - asm_fusefref(as, ir, RSET_GPR); - emit_mrm(as, XO_LEA, dest, RID_MRM); -} - -static void asm_strref(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - asm_fusestrref(as, ir, RSET_GPR); - if (as->mrm.base == RID_NONE) - emit_loadi(as, dest, as->mrm.ofs); - else if (as->mrm.base == dest && as->mrm.idx == RID_NONE) - emit_gri(as, XG_ARITHi(XOg_ADD), dest, as->mrm.ofs); - else - emit_mrm(as, XO_LEA, dest, RID_MRM); -} - -/* -- Loads and stores ---------------------------------------------------- */ - -static void asm_fxload(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); - x86Op xo; - if (ir->o == IR_FLOAD) - asm_fusefref(as, ir, RSET_GPR); - else - asm_fusexref(as, ir->op1, RSET_GPR); - /* ir->op2 is ignored -- unaligned loads are ok on x86. */ - switch (irt_type(ir->t)) { - case IRT_I8: xo = XO_MOVSXb; break; - case IRT_U8: xo = XO_MOVZXb; break; - case IRT_I16: xo = XO_MOVSXw; break; - case IRT_U16: xo = XO_MOVZXw; break; - case IRT_NUM: xo = XO_MOVSD; break; - case IRT_FLOAT: xo = XO_MOVSS; break; - default: - if (LJ_64 && irt_is64(ir->t)) - dest |= REX_64; - else - lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)); - xo = XO_MOV; - break; - } - emit_mrm(as, xo, dest, RID_MRM); -} - -#define asm_fload(as, ir) asm_fxload(as, ir) -#define asm_xload(as, ir) asm_fxload(as, ir) - -static void asm_fxstore(ASMState *as, IRIns *ir) -{ - RegSet allow = RSET_GPR; - Reg src = RID_NONE, osrc = RID_NONE; - int32_t k = 0; - if (ir->r == RID_SINK) - return; - /* The IRT_I16/IRT_U16 stores should never be simplified for constant - ** values since mov word [mem], imm16 has a length-changing prefix. - */ - if (irt_isi16(ir->t) || irt_isu16(ir->t) || irt_isfp(ir->t) || - !asm_isk32(as, ir->op2, &k)) { - RegSet allow8 = irt_isfp(ir->t) ? RSET_FPR : - (irt_isi8(ir->t) || irt_isu8(ir->t)) ? RSET_GPR8 : RSET_GPR; - src = osrc = ra_alloc1(as, ir->op2, allow8); - if (!LJ_64 && !rset_test(allow8, src)) { /* Already in wrong register. */ - rset_clear(allow, osrc); - src = ra_scratch(as, allow8); - } - rset_clear(allow, src); - } - if (ir->o == IR_FSTORE) { - asm_fusefref(as, IR(ir->op1), allow); - } else { - asm_fusexref(as, ir->op1, allow); - if (LJ_32 && ir->o == IR_HIOP) as->mrm.ofs += 4; - } - if (ra_hasreg(src)) { - x86Op xo; - switch (irt_type(ir->t)) { - case IRT_I8: case IRT_U8: xo = XO_MOVtob; src |= FORCE_REX; break; - case IRT_I16: case IRT_U16: xo = XO_MOVtow; break; - case IRT_NUM: xo = XO_MOVSDto; break; - case IRT_FLOAT: xo = XO_MOVSSto; break; -#if LJ_64 - case IRT_LIGHTUD: lua_assert(0); /* NYI: mask 64 bit lightuserdata. */ -#endif - default: - if (LJ_64 && irt_is64(ir->t)) - src |= REX_64; - else - lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)); - xo = XO_MOVto; - break; - } - emit_mrm(as, xo, src, RID_MRM); - if (!LJ_64 && src != osrc) { - ra_noweak(as, osrc); - emit_rr(as, XO_MOV, src, osrc); - } - } else { - if (irt_isi8(ir->t) || irt_isu8(ir->t)) { - emit_i8(as, k); - emit_mrm(as, XO_MOVmib, 0, RID_MRM); - } else { - lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || irt_isu32(ir->t) || - irt_isaddr(ir->t)); - emit_i32(as, k); - emit_mrm(as, XO_MOVmi, REX_64IR(ir, 0), RID_MRM); - } - } -} - -#define asm_fstore(as, ir) asm_fxstore(as, ir) -#define asm_xstore(as, ir) asm_fxstore(as, ir) - -#if LJ_64 -static Reg asm_load_lightud64(ASMState *as, IRIns *ir, int typecheck) -{ - if (ra_used(ir) || typecheck) { - Reg dest = ra_dest(as, ir, RSET_GPR); - if (typecheck) { - Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, dest)); - asm_guardcc(as, CC_NE); - emit_i8(as, -2); - emit_rr(as, XO_ARITHi8, XOg_CMP, tmp); - emit_shifti(as, XOg_SAR|REX_64, tmp, 47); - emit_rr(as, XO_MOV, tmp|REX_64, dest); - } - return dest; - } else { - return RID_NONE; - } -} -#endif - -static void asm_ahuvload(ASMState *as, IRIns *ir) -{ - lua_assert(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) || - (LJ_DUALNUM && irt_isint(ir->t))); -#if LJ_64 - if (irt_islightud(ir->t)) { - Reg dest = asm_load_lightud64(as, ir, 1); - if (ra_hasreg(dest)) { - asm_fuseahuref(as, ir->op1, RSET_GPR); - emit_mrm(as, XO_MOV, dest|REX_64, RID_MRM); - } - return; - } else -#endif - if (ra_used(ir)) { - RegSet allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR; - Reg dest = ra_dest(as, ir, allow); - asm_fuseahuref(as, ir->op1, RSET_GPR); - emit_mrm(as, dest < RID_MAX_GPR ? XO_MOV : XO_MOVSD, dest, RID_MRM); - } else { - asm_fuseahuref(as, ir->op1, RSET_GPR); - } - /* Always do the type check, even if the load result is unused. */ - as->mrm.ofs += 4; - asm_guardcc(as, irt_isnum(ir->t) ? CC_AE : CC_NE); - if (LJ_64 && irt_type(ir->t) >= IRT_NUM) { - lua_assert(irt_isinteger(ir->t) || irt_isnum(ir->t)); - emit_u32(as, LJ_TISNUM); - emit_mrm(as, XO_ARITHi, XOg_CMP, RID_MRM); - } else { - emit_i8(as, irt_toitype(ir->t)); - emit_mrm(as, XO_ARITHi8, XOg_CMP, RID_MRM); - } -} - -static void asm_ahustore(ASMState *as, IRIns *ir) -{ - if (ir->r == RID_SINK) - return; - if (irt_isnum(ir->t)) { - Reg src = ra_alloc1(as, ir->op2, RSET_FPR); - asm_fuseahuref(as, ir->op1, RSET_GPR); - emit_mrm(as, XO_MOVSDto, src, RID_MRM); -#if LJ_64 - } else if (irt_islightud(ir->t)) { - Reg src = ra_alloc1(as, ir->op2, RSET_GPR); - asm_fuseahuref(as, ir->op1, rset_exclude(RSET_GPR, src)); - emit_mrm(as, XO_MOVto, src|REX_64, RID_MRM); -#endif - } else { - IRIns *irr = IR(ir->op2); - RegSet allow = RSET_GPR; - Reg src = RID_NONE; - if (!irref_isk(ir->op2)) { - src = ra_alloc1(as, ir->op2, allow); - rset_clear(allow, src); - } - asm_fuseahuref(as, ir->op1, allow); - if (ra_hasreg(src)) { - emit_mrm(as, XO_MOVto, src, RID_MRM); - } else if (!irt_ispri(irr->t)) { - lua_assert(irt_isaddr(ir->t) || (LJ_DUALNUM && irt_isinteger(ir->t))); - emit_i32(as, irr->i); - emit_mrm(as, XO_MOVmi, 0, RID_MRM); - } - as->mrm.ofs += 4; - emit_i32(as, (int32_t)irt_toitype(ir->t)); - emit_mrm(as, XO_MOVmi, 0, RID_MRM); - } -} - -static void asm_sload(ASMState *as, IRIns *ir) -{ - int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0); - IRType1 t = ir->t; - Reg base; - lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ - lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK)); - lua_assert(LJ_DUALNUM || - !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME))); - if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) { - Reg left = ra_scratch(as, RSET_FPR); - asm_tointg(as, ir, left); /* Frees dest reg. Do this before base alloc. */ - base = ra_alloc1(as, REF_BASE, RSET_GPR); - emit_rmro(as, XO_MOVSD, left, base, ofs); - t.irt = IRT_NUM; /* Continue with a regular number type check. */ -#if LJ_64 - } else if (irt_islightud(t)) { - Reg dest = asm_load_lightud64(as, ir, (ir->op2 & IRSLOAD_TYPECHECK)); - if (ra_hasreg(dest)) { - base = ra_alloc1(as, REF_BASE, RSET_GPR); - emit_rmro(as, XO_MOV, dest|REX_64, base, ofs); - } - return; -#endif - } else if (ra_used(ir)) { - RegSet allow = irt_isnum(t) ? RSET_FPR : RSET_GPR; - Reg dest = ra_dest(as, ir, allow); - base = ra_alloc1(as, REF_BASE, RSET_GPR); - lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t)); - if ((ir->op2 & IRSLOAD_CONVERT)) { - t.irt = irt_isint(t) ? IRT_NUM : IRT_INT; /* Check for original type. */ - emit_rmro(as, irt_isint(t) ? XO_CVTSI2SD : XO_CVTTSD2SI, dest, base, ofs); - } else { - emit_rmro(as, irt_isnum(t) ? XO_MOVSD : XO_MOV, dest, base, ofs); - } - } else { - if (!(ir->op2 & IRSLOAD_TYPECHECK)) - return; /* No type check: avoid base alloc. */ - base = ra_alloc1(as, REF_BASE, RSET_GPR); - } - if ((ir->op2 & IRSLOAD_TYPECHECK)) { - /* Need type check, even if the load result is unused. */ - asm_guardcc(as, irt_isnum(t) ? CC_AE : CC_NE); - if (LJ_64 && irt_type(t) >= IRT_NUM) { - lua_assert(irt_isinteger(t) || irt_isnum(t)); - emit_u32(as, LJ_TISNUM); - emit_rmro(as, XO_ARITHi, XOg_CMP, base, ofs+4); - } else { - emit_i8(as, irt_toitype(t)); - emit_rmro(as, XO_ARITHi8, XOg_CMP, base, ofs+4); - } - } -} - -/* -- Allocations --------------------------------------------------------- */ - -#if LJ_HASFFI -static void asm_cnew(ASMState *as, IRIns *ir) -{ - CTState *cts = ctype_ctsG(J2G(as->J)); - CTypeID id = (CTypeID)IR(ir->op1)->i; - CTSize sz; - CTInfo info = lj_ctype_info(cts, id, &sz); - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; - IRRef args[4]; - lua_assert(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL)); - - as->gcsteps++; - asm_setupresult(as, ir, ci); /* GCcdata * */ - - /* Initialize immutable cdata object. */ - if (ir->o == IR_CNEWI) { - RegSet allow = (RSET_GPR & ~RSET_SCRATCH); -#if LJ_64 - Reg r64 = sz == 8 ? REX_64 : 0; - if (irref_isk(ir->op2)) { - IRIns *irk = IR(ir->op2); - uint64_t k = irk->o == IR_KINT64 ? ir_k64(irk)->u64 : - (uint64_t)(uint32_t)irk->i; - if (sz == 4 || checki32((int64_t)k)) { - emit_i32(as, (int32_t)k); - emit_rmro(as, XO_MOVmi, r64, RID_RET, sizeof(GCcdata)); - } else { - emit_movtomro(as, RID_ECX + r64, RID_RET, sizeof(GCcdata)); - emit_loadu64(as, RID_ECX, k); - } - } else { - Reg r = ra_alloc1(as, ir->op2, allow); - emit_movtomro(as, r + r64, RID_RET, sizeof(GCcdata)); - } -#else - int32_t ofs = sizeof(GCcdata); - if (sz == 8) { - ofs += 4; ir++; - lua_assert(ir->o == IR_HIOP); - } - do { - if (irref_isk(ir->op2)) { - emit_movmroi(as, RID_RET, ofs, IR(ir->op2)->i); - } else { - Reg r = ra_alloc1(as, ir->op2, allow); - emit_movtomro(as, r, RID_RET, ofs); - rset_clear(allow, r); - } - if (ofs == sizeof(GCcdata)) break; - ofs -= 4; ir--; - } while (1); -#endif - lua_assert(sz == 4 || sz == 8); - } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */ - ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv]; - args[0] = ASMREF_L; /* lua_State *L */ - args[1] = ir->op1; /* CTypeID id */ - args[2] = ir->op2; /* CTSize sz */ - args[3] = ASMREF_TMP1; /* CTSize align */ - asm_gencall(as, ci, args); - emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info)); - return; - } - - /* Combine initialization of marked, gct and ctypeid. */ - emit_movtomro(as, RID_ECX, RID_RET, offsetof(GCcdata, marked)); - emit_gri(as, XG_ARITHi(XOg_OR), RID_ECX, - (int32_t)((~LJ_TCDATA<<8)+(id<<16))); - emit_gri(as, XG_ARITHi(XOg_AND), RID_ECX, LJ_GC_WHITES); - emit_opgl(as, XO_MOVZXb, RID_ECX, gc.currentwhite); - - args[0] = ASMREF_L; /* lua_State *L */ - args[1] = ASMREF_TMP1; /* MSize size */ - asm_gencall(as, ci, args); - emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)(sz+sizeof(GCcdata))); -} -#else -#define asm_cnew(as, ir) ((void)0) -#endif - -/* -- Write barriers ------------------------------------------------------ */ - -static void asm_tbar(ASMState *as, IRIns *ir) -{ - Reg tab = ra_alloc1(as, ir->op1, RSET_GPR); - Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, tab)); - MCLabel l_end = emit_label(as); - emit_movtomro(as, tmp, tab, offsetof(GCtab, gclist)); - emit_setgl(as, tab, gc.grayagain); - emit_getgl(as, tmp, gc.grayagain); - emit_i8(as, ~LJ_GC_BLACK); - emit_rmro(as, XO_ARITHib, XOg_AND, tab, offsetof(GCtab, marked)); - emit_sjcc(as, CC_Z, l_end); - emit_i8(as, LJ_GC_BLACK); - emit_rmro(as, XO_GROUP3b, XOg_TEST, tab, offsetof(GCtab, marked)); -} - -static void asm_obar(ASMState *as, IRIns *ir) -{ - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv]; - IRRef args[2]; - MCLabel l_end; - Reg obj; - /* No need for other object barriers (yet). */ - lua_assert(IR(ir->op1)->o == IR_UREFC); - ra_evictset(as, RSET_SCRATCH); - l_end = emit_label(as); - args[0] = ASMREF_TMP1; /* global_State *g */ - args[1] = ir->op1; /* TValue *tv */ - asm_gencall(as, ci, args); - emit_loada(as, ra_releasetmp(as, ASMREF_TMP1), J2G(as->J)); - obj = IR(ir->op1)->r; - emit_sjcc(as, CC_Z, l_end); - emit_i8(as, LJ_GC_WHITES); - if (irref_isk(ir->op2)) { - GCobj *vp = ir_kgc(IR(ir->op2)); - emit_rma(as, XO_GROUP3b, XOg_TEST, &vp->gch.marked); - } else { - Reg val = ra_alloc1(as, ir->op2, rset_exclude(RSET_SCRATCH&RSET_GPR, obj)); - emit_rmro(as, XO_GROUP3b, XOg_TEST, val, (int32_t)offsetof(GChead, marked)); - } - emit_sjcc(as, CC_Z, l_end); - emit_i8(as, LJ_GC_BLACK); - emit_rmro(as, XO_GROUP3b, XOg_TEST, obj, - (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)); -} - -/* -- FP/int arithmetic and logic operations ------------------------------ */ - -/* Load reference onto x87 stack. Force a spill to memory if needed. */ -static void asm_x87load(ASMState *as, IRRef ref) -{ - IRIns *ir = IR(ref); - if (ir->o == IR_KNUM) { - cTValue *tv = ir_knum(ir); - if (tvispzero(tv)) /* Use fldz only for +0. */ - emit_x87op(as, XI_FLDZ); - else if (tvispone(tv)) - emit_x87op(as, XI_FLD1); - else - emit_rma(as, XO_FLDq, XOg_FLDq, tv); - } else if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT && !ra_used(ir) && - !irref_isk(ir->op1) && mayfuse(as, ir->op1)) { - IRIns *iri = IR(ir->op1); - emit_rmro(as, XO_FILDd, XOg_FILDd, RID_ESP, ra_spill(as, iri)); - } else { - emit_mrm(as, XO_FLDq, XOg_FLDq, asm_fuseload(as, ref, RSET_EMPTY)); - } -} - -static void asm_fpmath(ASMState *as, IRIns *ir) -{ - IRFPMathOp fpm = (IRFPMathOp)ir->op2; - if (fpm == IRFPM_SQRT) { - Reg dest = ra_dest(as, ir, RSET_FPR); - Reg left = asm_fuseload(as, ir->op1, RSET_FPR); - emit_mrm(as, XO_SQRTSD, dest, left); - } else if (fpm <= IRFPM_TRUNC) { - if (as->flags & JIT_F_SSE4_1) { /* SSE4.1 has a rounding instruction. */ - Reg dest = ra_dest(as, ir, RSET_FPR); - Reg left = asm_fuseload(as, ir->op1, RSET_FPR); - /* ROUNDSD has a 4-byte opcode which doesn't fit in x86Op. - ** Let's pretend it's a 3-byte opcode, and compensate afterwards. - ** This is atrocious, but the alternatives are much worse. - */ - /* Round down/up/trunc == 1001/1010/1011. */ - emit_i8(as, 0x09 + fpm); - emit_mrm(as, XO_ROUNDSD, dest, left); - if (LJ_64 && as->mcp[1] != (MCode)(XO_ROUNDSD >> 16)) { - as->mcp[0] = as->mcp[1]; as->mcp[1] = 0x0f; /* Swap 0F and REX. */ - } - *--as->mcp = 0x66; /* 1st byte of ROUNDSD opcode. */ - } else { /* Call helper functions for SSE2 variant. */ - /* The modified regs must match with the *.dasc implementation. */ - RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX); - if (ra_hasreg(ir->r)) - rset_clear(drop, ir->r); /* Dest reg handled below. */ - ra_evictset(as, drop); - ra_destreg(as, ir, RID_XMM0); - emit_call(as, fpm == IRFPM_FLOOR ? lj_vm_floor_sse : - fpm == IRFPM_CEIL ? lj_vm_ceil_sse : lj_vm_trunc_sse); - ra_left(as, RID_XMM0, ir->op1); - } - } else if (fpm == IRFPM_EXP2 && asm_fpjoin_pow(as, ir)) { - /* Rejoined to pow(). */ - } else { - asm_callid(as, ir, IRCALL_lj_vm_floor + fpm); - } -} - -#define asm_atan2(as, ir) asm_callid(as, ir, IRCALL_atan2) - -static void asm_ldexp(ASMState *as, IRIns *ir) -{ - int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */ - Reg dest = ir->r; - if (ra_hasreg(dest)) { - ra_free(as, dest); - ra_modified(as, dest); - emit_rmro(as, XO_MOVSD, dest, RID_ESP, ofs); - } - emit_rmro(as, XO_FSTPq, XOg_FSTPq, RID_ESP, ofs); - emit_x87op(as, XI_FPOP1); - emit_x87op(as, XI_FSCALE); - asm_x87load(as, ir->op1); - asm_x87load(as, ir->op2); -} - -static void asm_fppowi(ASMState *as, IRIns *ir) -{ - /* The modified regs must match with the *.dasc implementation. */ - RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM1+1)|RID2RSET(RID_EAX); - if (ra_hasreg(ir->r)) - rset_clear(drop, ir->r); /* Dest reg handled below. */ - ra_evictset(as, drop); - ra_destreg(as, ir, RID_XMM0); - emit_call(as, lj_vm_powi_sse); - ra_left(as, RID_XMM0, ir->op1); - ra_left(as, RID_EAX, ir->op2); -} - -static void asm_pow(ASMState *as, IRIns *ir) -{ -#if LJ_64 && LJ_HASFFI - if (!irt_isnum(ir->t)) - asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 : - IRCALL_lj_carith_powu64); - else -#endif - asm_fppowi(as, ir); -} - -static int asm_swapops(ASMState *as, IRIns *ir) -{ - IRIns *irl = IR(ir->op1); - IRIns *irr = IR(ir->op2); - lua_assert(ra_noreg(irr->r)); - if (!irm_iscomm(lj_ir_mode[ir->o])) - return 0; /* Can't swap non-commutative operations. */ - if (irref_isk(ir->op2)) - return 0; /* Don't swap constants to the left. */ - if (ra_hasreg(irl->r)) - return 1; /* Swap if left already has a register. */ - if (ra_samehint(ir->r, irr->r)) - return 1; /* Swap if dest and right have matching hints. */ - if (as->curins > as->loopref) { /* In variant part? */ - if (ir->op2 < as->loopref && !irt_isphi(irr->t)) - return 0; /* Keep invariants on the right. */ - if (ir->op1 < as->loopref && !irt_isphi(irl->t)) - return 1; /* Swap invariants to the right. */ - } - if (opisfusableload(irl->o)) - return 1; /* Swap fusable loads to the right. */ - return 0; /* Otherwise don't swap. */ -} - -static void asm_fparith(ASMState *as, IRIns *ir, x86Op xo) -{ - IRRef lref = ir->op1; - IRRef rref = ir->op2; - RegSet allow = RSET_FPR; - Reg dest; - Reg right = IR(rref)->r; - if (ra_hasreg(right)) { - rset_clear(allow, right); - ra_noweak(as, right); - } - dest = ra_dest(as, ir, allow); - if (lref == rref) { - right = dest; - } else if (ra_noreg(right)) { - if (asm_swapops(as, ir)) { - IRRef tmp = lref; lref = rref; rref = tmp; - } - right = asm_fuseload(as, rref, rset_clear(allow, dest)); - } - emit_mrm(as, xo, dest, right); - ra_left(as, dest, lref); -} - -static void asm_intarith(ASMState *as, IRIns *ir, x86Arith xa) -{ - IRRef lref = ir->op1; - IRRef rref = ir->op2; - RegSet allow = RSET_GPR; - Reg dest, right; - int32_t k = 0; - if (as->flagmcp == as->mcp) { /* Drop test r,r instruction. */ - MCode *p = as->mcp + ((LJ_64 && *as->mcp < XI_TESTb) ? 3 : 2); - if ((p[1] & 15) < 14) { - if ((p[1] & 15) >= 12) p[1] -= 4; /* L <->S, NL <-> NS */ - as->flagmcp = NULL; - as->mcp = p; - } /* else: cannot transform LE/NLE to cc without use of OF. */ - } - right = IR(rref)->r; - if (ra_hasreg(right)) { - rset_clear(allow, right); - ra_noweak(as, right); - } - dest = ra_dest(as, ir, allow); - if (lref == rref) { - right = dest; - } else if (ra_noreg(right) && !asm_isk32(as, rref, &k)) { - if (asm_swapops(as, ir)) { - IRRef tmp = lref; lref = rref; rref = tmp; - } - right = asm_fuseloadm(as, rref, rset_clear(allow, dest), irt_is64(ir->t)); - } - if (irt_isguard(ir->t)) /* For IR_ADDOV etc. */ - asm_guardcc(as, CC_O); - if (xa != XOg_X_IMUL) { - if (ra_hasreg(right)) - emit_mrm(as, XO_ARITH(xa), REX_64IR(ir, dest), right); - else - emit_gri(as, XG_ARITHi(xa), REX_64IR(ir, dest), k); - } else if (ra_hasreg(right)) { /* IMUL r, mrm. */ - emit_mrm(as, XO_IMUL, REX_64IR(ir, dest), right); - } else { /* IMUL r, r, k. */ - /* NYI: use lea/shl/add/sub (FOLD only does 2^k) depending on CPU. */ - Reg left = asm_fuseloadm(as, lref, RSET_GPR, irt_is64(ir->t)); - x86Op xo; - if (checki8(k)) { emit_i8(as, k); xo = XO_IMULi8; - } else { emit_i32(as, k); xo = XO_IMULi; } - emit_mrm(as, xo, REX_64IR(ir, dest), left); - return; - } - ra_left(as, dest, lref); -} - -/* LEA is really a 4-operand ADD with an independent destination register, -** up to two source registers and an immediate. One register can be scaled -** by 1, 2, 4 or 8. This can be used to avoid moves or to fuse several -** instructions. -** -** Currently only a few common cases are supported: -** - 3-operand ADD: y = a+b; y = a+k with a and b already allocated -** - Left ADD fusion: y = (a+b)+k; y = (a+k)+b -** - Right ADD fusion: y = a+(b+k) -** The ommited variants have already been reduced by FOLD. -** -** There are more fusion opportunities, like gathering shifts or joining -** common references. But these are probably not worth the trouble, since -** array indexing is not decomposed and already makes use of all fields -** of the ModRM operand. -*/ -static int asm_lea(ASMState *as, IRIns *ir) -{ - IRIns *irl = IR(ir->op1); - IRIns *irr = IR(ir->op2); - RegSet allow = RSET_GPR; - Reg dest; - as->mrm.base = as->mrm.idx = RID_NONE; - as->mrm.scale = XM_SCALE1; - as->mrm.ofs = 0; - if (ra_hasreg(irl->r)) { - rset_clear(allow, irl->r); - ra_noweak(as, irl->r); - as->mrm.base = irl->r; - if (irref_isk(ir->op2) || ra_hasreg(irr->r)) { - /* The PHI renaming logic does a better job in some cases. */ - if (ra_hasreg(ir->r) && - ((irt_isphi(irl->t) && as->phireg[ir->r] == ir->op1) || - (irt_isphi(irr->t) && as->phireg[ir->r] == ir->op2))) - return 0; - if (irref_isk(ir->op2)) { - as->mrm.ofs = irr->i; - } else { - rset_clear(allow, irr->r); - ra_noweak(as, irr->r); - as->mrm.idx = irr->r; - } - } else if (irr->o == IR_ADD && mayfuse(as, ir->op2) && - irref_isk(irr->op2)) { - Reg idx = ra_alloc1(as, irr->op1, allow); - rset_clear(allow, idx); - as->mrm.idx = (uint8_t)idx; - as->mrm.ofs = IR(irr->op2)->i; - } else { - return 0; - } - } else if (ir->op1 != ir->op2 && irl->o == IR_ADD && mayfuse(as, ir->op1) && - (irref_isk(ir->op2) || irref_isk(irl->op2))) { - Reg idx, base = ra_alloc1(as, irl->op1, allow); - rset_clear(allow, base); - as->mrm.base = (uint8_t)base; - if (irref_isk(ir->op2)) { - as->mrm.ofs = irr->i; - idx = ra_alloc1(as, irl->op2, allow); - } else { - as->mrm.ofs = IR(irl->op2)->i; - idx = ra_alloc1(as, ir->op2, allow); - } - rset_clear(allow, idx); - as->mrm.idx = (uint8_t)idx; - } else { - return 0; - } - dest = ra_dest(as, ir, allow); - emit_mrm(as, XO_LEA, dest, RID_MRM); - return 1; /* Success. */ -} - -static void asm_add(ASMState *as, IRIns *ir) -{ - if (irt_isnum(ir->t)) - asm_fparith(as, ir, XO_ADDSD); - else if ((as->flags & JIT_F_LEA_AGU) || as->flagmcp == as->mcp || - irt_is64(ir->t) || !asm_lea(as, ir)) - asm_intarith(as, ir, XOg_ADD); -} - -static void asm_sub(ASMState *as, IRIns *ir) -{ - if (irt_isnum(ir->t)) - asm_fparith(as, ir, XO_SUBSD); - else /* Note: no need for LEA trick here. i-k is encoded as i+(-k). */ - asm_intarith(as, ir, XOg_SUB); -} - -static void asm_mul(ASMState *as, IRIns *ir) -{ - if (irt_isnum(ir->t)) - asm_fparith(as, ir, XO_MULSD); - else - asm_intarith(as, ir, XOg_X_IMUL); -} - -static void asm_div(ASMState *as, IRIns *ir) -{ -#if LJ_64 && LJ_HASFFI - if (!irt_isnum(ir->t)) - asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 : - IRCALL_lj_carith_divu64); - else -#endif - asm_fparith(as, ir, XO_DIVSD); -} - -static void asm_mod(ASMState *as, IRIns *ir) -{ -#if LJ_64 && LJ_HASFFI - if (!irt_isint(ir->t)) - asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 : - IRCALL_lj_carith_modu64); - else -#endif - asm_callid(as, ir, IRCALL_lj_vm_modi); -} - -static void asm_neg_not(ASMState *as, IRIns *ir, x86Group3 xg) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - emit_rr(as, XO_GROUP3, REX_64IR(ir, xg), dest); - ra_left(as, dest, ir->op1); -} - -static void asm_neg(ASMState *as, IRIns *ir) -{ - if (irt_isnum(ir->t)) - asm_fparith(as, ir, XO_XORPS); - else - asm_neg_not(as, ir, XOg_NEG); -} - -#define asm_abs(as, ir) asm_fparith(as, ir, XO_ANDPS) - -static void asm_intmin_max(ASMState *as, IRIns *ir, int cc) -{ - Reg right, dest = ra_dest(as, ir, RSET_GPR); - IRRef lref = ir->op1, rref = ir->op2; - if (irref_isk(rref)) { lref = rref; rref = ir->op1; } - right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, dest)); - emit_rr(as, XO_CMOV + (cc<<24), REX_64IR(ir, dest), right); - emit_rr(as, XO_CMP, REX_64IR(ir, dest), right); - ra_left(as, dest, lref); -} - -static void asm_min(ASMState *as, IRIns *ir) -{ - if (irt_isnum(ir->t)) - asm_fparith(as, ir, XO_MINSD); - else - asm_intmin_max(as, ir, CC_G); -} - -static void asm_max(ASMState *as, IRIns *ir) -{ - if (irt_isnum(ir->t)) - asm_fparith(as, ir, XO_MAXSD); - else - asm_intmin_max(as, ir, CC_L); -} - -/* Note: don't use LEA for overflow-checking arithmetic! */ -#define asm_addov(as, ir) asm_intarith(as, ir, XOg_ADD) -#define asm_subov(as, ir) asm_intarith(as, ir, XOg_SUB) -#define asm_mulov(as, ir) asm_intarith(as, ir, XOg_X_IMUL) - -#define asm_bnot(as, ir) asm_neg_not(as, ir, XOg_NOT) - -static void asm_bswap(ASMState *as, IRIns *ir) -{ - Reg dest = ra_dest(as, ir, RSET_GPR); - as->mcp = emit_op(XO_BSWAP + ((dest&7) << 24), - REX_64IR(ir, 0), dest, 0, as->mcp, 1); - ra_left(as, dest, ir->op1); -} - -#define asm_band(as, ir) asm_intarith(as, ir, XOg_AND) -#define asm_bor(as, ir) asm_intarith(as, ir, XOg_OR) -#define asm_bxor(as, ir) asm_intarith(as, ir, XOg_XOR) - -static void asm_bitshift(ASMState *as, IRIns *ir, x86Shift xs) -{ - IRRef rref = ir->op2; - IRIns *irr = IR(rref); - Reg dest; - if (irref_isk(rref)) { /* Constant shifts. */ - int shift; - dest = ra_dest(as, ir, RSET_GPR); - shift = irr->i & (irt_is64(ir->t) ? 63 : 31); - switch (shift) { - case 0: break; - case 1: emit_rr(as, XO_SHIFT1, REX_64IR(ir, xs), dest); break; - default: emit_shifti(as, REX_64IR(ir, xs), dest, shift); break; - } - } else { /* Variable shifts implicitly use register cl (i.e. ecx). */ - Reg right; - dest = ra_dest(as, ir, rset_exclude(RSET_GPR, RID_ECX)); - if (dest == RID_ECX) { - dest = ra_scratch(as, rset_exclude(RSET_GPR, RID_ECX)); - emit_rr(as, XO_MOV, RID_ECX, dest); - } - right = irr->r; - if (ra_noreg(right)) - right = ra_allocref(as, rref, RID2RSET(RID_ECX)); - else if (right != RID_ECX) - ra_scratch(as, RID2RSET(RID_ECX)); - emit_rr(as, XO_SHIFTcl, REX_64IR(ir, xs), dest); - ra_noweak(as, right); - if (right != RID_ECX) - emit_rr(as, XO_MOV, RID_ECX, right); - } - ra_left(as, dest, ir->op1); - /* - ** Note: avoid using the flags resulting from a shift or rotate! - ** All of them cause a partial flag stall, except for r,1 shifts - ** (but not rotates). And a shift count of 0 leaves the flags unmodified. - */ -} - -#define asm_bshl(as, ir) asm_bitshift(as, ir, XOg_SHL) -#define asm_bshr(as, ir) asm_bitshift(as, ir, XOg_SHR) -#define asm_bsar(as, ir) asm_bitshift(as, ir, XOg_SAR) -#define asm_brol(as, ir) asm_bitshift(as, ir, XOg_ROL) -#define asm_bror(as, ir) asm_bitshift(as, ir, XOg_ROR) - -/* -- Comparisons --------------------------------------------------------- */ - -/* Virtual flags for unordered FP comparisons. */ -#define VCC_U 0x1000 /* Unordered. */ -#define VCC_P 0x2000 /* Needs extra CC_P branch. */ -#define VCC_S 0x4000 /* Swap avoids CC_P branch. */ -#define VCC_PS (VCC_P|VCC_S) - -/* Map of comparisons to flags. ORDER IR. */ -#define COMPFLAGS(ci, cin, cu, cf) ((ci)+((cu)<<4)+((cin)<<8)+(cf)) -static const uint16_t asm_compmap[IR_ABC+1] = { - /* signed non-eq unsigned flags */ - /* LT */ COMPFLAGS(CC_GE, CC_G, CC_AE, VCC_PS), - /* GE */ COMPFLAGS(CC_L, CC_L, CC_B, 0), - /* LE */ COMPFLAGS(CC_G, CC_G, CC_A, VCC_PS), - /* GT */ COMPFLAGS(CC_LE, CC_L, CC_BE, 0), - /* ULT */ COMPFLAGS(CC_AE, CC_A, CC_AE, VCC_U), - /* UGE */ COMPFLAGS(CC_B, CC_B, CC_B, VCC_U|VCC_PS), - /* ULE */ COMPFLAGS(CC_A, CC_A, CC_A, VCC_U), - /* UGT */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS), - /* EQ */ COMPFLAGS(CC_NE, CC_NE, CC_NE, VCC_P), - /* NE */ COMPFLAGS(CC_E, CC_E, CC_E, VCC_U|VCC_P), - /* ABC */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS) /* Same as UGT. */ -}; - -/* FP and integer comparisons. */ -static void asm_comp(ASMState *as, IRIns *ir) -{ - uint32_t cc = asm_compmap[ir->o]; - if (irt_isnum(ir->t)) { - IRRef lref = ir->op1; - IRRef rref = ir->op2; - Reg left, right; - MCLabel l_around; - /* - ** An extra CC_P branch is required to preserve ordered/unordered - ** semantics for FP comparisons. This can be avoided by swapping - ** the operands and inverting the condition (except for EQ and UNE). - ** So always try to swap if possible. - ** - ** Another option would be to swap operands to achieve better memory - ** operand fusion. But it's unlikely that this outweighs the cost - ** of the extra branches. - */ - if (cc & VCC_S) { /* Swap? */ - IRRef tmp = lref; lref = rref; rref = tmp; - cc ^= (VCC_PS|(5<<4)); /* A <-> B, AE <-> BE, PS <-> none */ - } - left = ra_alloc1(as, lref, RSET_FPR); - right = asm_fuseload(as, rref, rset_exclude(RSET_FPR, left)); - l_around = emit_label(as); - asm_guardcc(as, cc >> 4); - if (cc & VCC_P) { /* Extra CC_P branch required? */ - if (!(cc & VCC_U)) { - asm_guardcc(as, CC_P); /* Branch to exit for ordered comparisons. */ - } else if (l_around != as->invmcp) { - emit_sjcc(as, CC_P, l_around); /* Branch around for unordered. */ - } else { - /* Patched to mcloop by asm_loop_fixup. */ - as->loopinv = 2; - if (as->realign) - emit_sjcc(as, CC_P, as->mcp); - else - emit_jcc(as, CC_P, as->mcp); - } - } - emit_mrm(as, XO_UCOMISD, left, right); - } else { - IRRef lref = ir->op1, rref = ir->op2; - IROp leftop = (IROp)(IR(lref)->o); - Reg r64 = REX_64IR(ir, 0); - int32_t imm = 0; - lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || - irt_isu32(ir->t) || irt_isaddr(ir->t) || irt_isu8(ir->t)); - /* Swap constants (only for ABC) and fusable loads to the right. */ - if (irref_isk(lref) || (!irref_isk(rref) && opisfusableload(leftop))) { - if ((cc & 0xc) == 0xc) cc ^= 0x53; /* L <-> G, LE <-> GE */ - else if ((cc & 0xa) == 0x2) cc ^= 0x55; /* A <-> B, AE <-> BE */ - lref = ir->op2; rref = ir->op1; - } - if (asm_isk32(as, rref, &imm)) { - IRIns *irl = IR(lref); - /* Check wether we can use test ins. Not for unsigned, since CF=0. */ - int usetest = (imm == 0 && (cc & 0xa) != 0x2); - if (usetest && irl->o == IR_BAND && irl+1 == ir && !ra_used(irl)) { - /* Combine comp(BAND(ref, r/imm), 0) into test mrm, r/imm. */ - Reg right, left = RID_NONE; - RegSet allow = RSET_GPR; - if (!asm_isk32(as, irl->op2, &imm)) { - left = ra_alloc1(as, irl->op2, allow); - rset_clear(allow, left); - } else { /* Try to Fuse IRT_I8/IRT_U8 loads, too. See below. */ - IRIns *irll = IR(irl->op1); - if (opisfusableload((IROp)irll->o) && - (irt_isi8(irll->t) || irt_isu8(irll->t))) { - IRType1 origt = irll->t; /* Temporarily flip types. */ - irll->t.irt = (irll->t.irt & ~IRT_TYPE) | IRT_INT; - as->curins--; /* Skip to BAND to avoid failing in noconflict(). */ - right = asm_fuseload(as, irl->op1, RSET_GPR); - as->curins++; - irll->t = origt; - if (right != RID_MRM) goto test_nofuse; - /* Fusion succeeded, emit test byte mrm, imm8. */ - asm_guardcc(as, cc); - emit_i8(as, (imm & 0xff)); - emit_mrm(as, XO_GROUP3b, XOg_TEST, RID_MRM); - return; - } - } - as->curins--; /* Skip to BAND to avoid failing in noconflict(). */ - right = asm_fuseloadm(as, irl->op1, allow, r64); - as->curins++; /* Undo the above. */ - test_nofuse: - asm_guardcc(as, cc); - if (ra_noreg(left)) { - emit_i32(as, imm); - emit_mrm(as, XO_GROUP3, r64 + XOg_TEST, right); - } else { - emit_mrm(as, XO_TEST, r64 + left, right); - } - } else { - Reg left; - if (opisfusableload((IROp)irl->o) && - ((irt_isu8(irl->t) && checku8(imm)) || - ((irt_isi8(irl->t) || irt_isi16(irl->t)) && checki8(imm)) || - (irt_isu16(irl->t) && checku16(imm) && checki8((int16_t)imm)))) { - /* Only the IRT_INT case is fused by asm_fuseload. - ** The IRT_I8/IRT_U8 loads and some IRT_I16/IRT_U16 loads - ** are handled here. - ** Note that cmp word [mem], imm16 should not be generated, - ** since it has a length-changing prefix. Compares of a word - ** against a sign-extended imm8 are ok, however. - */ - IRType1 origt = irl->t; /* Temporarily flip types. */ - irl->t.irt = (irl->t.irt & ~IRT_TYPE) | IRT_INT; - left = asm_fuseload(as, lref, RSET_GPR); - irl->t = origt; - if (left == RID_MRM) { /* Fusion succeeded? */ - if (irt_isu8(irl->t) || irt_isu16(irl->t)) - cc >>= 4; /* Need unsigned compare. */ - asm_guardcc(as, cc); - emit_i8(as, imm); - emit_mrm(as, (irt_isi8(origt) || irt_isu8(origt)) ? - XO_ARITHib : XO_ARITHiw8, r64 + XOg_CMP, RID_MRM); - return; - } /* Otherwise handle register case as usual. */ - } else { - left = asm_fuseloadm(as, lref, - irt_isu8(ir->t) ? RSET_GPR8 : RSET_GPR, r64); - } - asm_guardcc(as, cc); - if (usetest && left != RID_MRM) { - /* Use test r,r instead of cmp r,0. */ - x86Op xo = XO_TEST; - if (irt_isu8(ir->t)) { - lua_assert(ir->o == IR_EQ || ir->o == IR_NE); - xo = XO_TESTb; - if (!rset_test(RSET_RANGE(RID_EAX, RID_EBX+1), left)) { - if (LJ_64) { - left |= FORCE_REX; - } else { - emit_i32(as, 0xff); - emit_mrm(as, XO_GROUP3, XOg_TEST, left); - return; - } - } - } - emit_rr(as, xo, r64 + left, left); - if (irl+1 == ir) /* Referencing previous ins? */ - as->flagmcp = as->mcp; /* Set flag to drop test r,r if possible. */ - } else { - emit_gmrmi(as, XG_ARITHi(XOg_CMP), r64 + left, imm); - } - } - } else { - Reg left = ra_alloc1(as, lref, RSET_GPR); - Reg right = asm_fuseloadm(as, rref, rset_exclude(RSET_GPR, left), r64); - asm_guardcc(as, cc); - emit_mrm(as, XO_CMP, r64 + left, right); - } - } -} - -#define asm_equal(as, ir) asm_comp(as, ir) - -#if LJ_32 && LJ_HASFFI -/* 64 bit integer comparisons in 32 bit mode. */ -static void asm_comp_int64(ASMState *as, IRIns *ir) -{ - uint32_t cc = asm_compmap[(ir-1)->o]; - RegSet allow = RSET_GPR; - Reg lefthi = RID_NONE, leftlo = RID_NONE; - Reg righthi = RID_NONE, rightlo = RID_NONE; - MCLabel l_around; - x86ModRM mrm; - - as->curins--; /* Skip loword ins. Avoids failing in noconflict(), too. */ - - /* Allocate/fuse hiword operands. */ - if (irref_isk(ir->op2)) { - lefthi = asm_fuseload(as, ir->op1, allow); - } else { - lefthi = ra_alloc1(as, ir->op1, allow); - rset_clear(allow, lefthi); - righthi = asm_fuseload(as, ir->op2, allow); - if (righthi == RID_MRM) { - if (as->mrm.base != RID_NONE) rset_clear(allow, as->mrm.base); - if (as->mrm.idx != RID_NONE) rset_clear(allow, as->mrm.idx); - } else { - rset_clear(allow, righthi); - } - } - mrm = as->mrm; /* Save state for hiword instruction. */ - - /* Allocate/fuse loword operands. */ - if (irref_isk((ir-1)->op2)) { - leftlo = asm_fuseload(as, (ir-1)->op1, allow); - } else { - leftlo = ra_alloc1(as, (ir-1)->op1, allow); - rset_clear(allow, leftlo); - rightlo = asm_fuseload(as, (ir-1)->op2, allow); - } - - /* All register allocations must be performed _before_ this point. */ - l_around = emit_label(as); - as->invmcp = as->flagmcp = NULL; /* Cannot use these optimizations. */ - - /* Loword comparison and branch. */ - asm_guardcc(as, cc >> 4); /* Always use unsigned compare for loword. */ - if (ra_noreg(rightlo)) { - int32_t imm = IR((ir-1)->op2)->i; - if (imm == 0 && ((cc >> 4) & 0xa) != 0x2 && leftlo != RID_MRM) - emit_rr(as, XO_TEST, leftlo, leftlo); - else - emit_gmrmi(as, XG_ARITHi(XOg_CMP), leftlo, imm); - } else { - emit_mrm(as, XO_CMP, leftlo, rightlo); - } - - /* Hiword comparison and branches. */ - if ((cc & 15) != CC_NE) - emit_sjcc(as, CC_NE, l_around); /* Hiword unequal: skip loword compare. */ - if ((cc & 15) != CC_E) - asm_guardcc(as, cc >> 8); /* Hiword compare without equality check. */ - as->mrm = mrm; /* Restore state. */ - if (ra_noreg(righthi)) { - int32_t imm = IR(ir->op2)->i; - if (imm == 0 && (cc & 0xa) != 0x2 && lefthi != RID_MRM) - emit_rr(as, XO_TEST, lefthi, lefthi); - else - emit_gmrmi(as, XG_ARITHi(XOg_CMP), lefthi, imm); - } else { - emit_mrm(as, XO_CMP, lefthi, righthi); - } -} -#endif - -/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */ - -/* Hiword op of a split 64 bit op. Previous op must be the loword op. */ -static void asm_hiop(ASMState *as, IRIns *ir) -{ -#if LJ_32 && LJ_HASFFI - /* HIOP is marked as a store because it needs its own DCE logic. */ - int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */ - if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1; - if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */ - as->curins--; /* Always skip the CONV. */ - if (usehi || uselo) - asm_conv64(as, ir); - return; - } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */ - asm_comp_int64(as, ir); - return; - } else if ((ir-1)->o == IR_XSTORE) { - if ((ir-1)->r != RID_SINK) - asm_fxstore(as, ir); - return; - } - if (!usehi) return; /* Skip unused hiword op for all remaining ops. */ - switch ((ir-1)->o) { - case IR_ADD: - as->flagmcp = NULL; - as->curins--; - asm_intarith(as, ir, XOg_ADC); - asm_intarith(as, ir-1, XOg_ADD); - break; - case IR_SUB: - as->flagmcp = NULL; - as->curins--; - asm_intarith(as, ir, XOg_SBB); - asm_intarith(as, ir-1, XOg_SUB); - break; - case IR_NEG: { - Reg dest = ra_dest(as, ir, RSET_GPR); - emit_rr(as, XO_GROUP3, XOg_NEG, dest); - emit_i8(as, 0); - emit_rr(as, XO_ARITHi8, XOg_ADC, dest); - ra_left(as, dest, ir->op1); - as->curins--; - asm_neg_not(as, ir-1, XOg_NEG); - break; - } - case IR_CALLN: - case IR_CALLXS: - if (!uselo) - ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */ - break; - case IR_CNEWI: - /* Nothing to do here. Handled by CNEWI itself. */ - break; - default: lua_assert(0); break; - } -#else - UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused on x64 or without FFI. */ -#endif -} - -/* -- Profiling ----------------------------------------------------------- */ - -static void asm_prof(ASMState *as, IRIns *ir) -{ - UNUSED(ir); - asm_guardcc(as, CC_NE); - emit_i8(as, HOOK_PROFILE); - emit_rma(as, XO_GROUP3b, XOg_TEST, &J2G(as->J)->hookmask); -} - -/* -- Stack handling ------------------------------------------------------ */ - -/* Check Lua stack size for overflow. Use exit handler as fallback. */ -static void asm_stack_check(ASMState *as, BCReg topslot, - IRIns *irp, RegSet allow, ExitNo exitno) -{ - /* Try to get an unused temp. register, otherwise spill/restore eax. */ - Reg pbase = irp ? irp->r : RID_BASE; - Reg r = allow ? rset_pickbot(allow) : RID_EAX; - emit_jcc(as, CC_B, exitstub_addr(as->J, exitno)); - if (allow == RSET_EMPTY) /* Restore temp. register. */ - emit_rmro(as, XO_MOV, r|REX_64, RID_ESP, 0); - else - ra_modified(as, r); - emit_gri(as, XG_ARITHi(XOg_CMP), r, (int32_t)(8*topslot)); - if (ra_hasreg(pbase) && pbase != r) - emit_rr(as, XO_ARITH(XOg_SUB), r, pbase); - else - emit_rmro(as, XO_ARITH(XOg_SUB), r, RID_NONE, - ptr2addr(&J2G(as->J)->jit_base)); - emit_rmro(as, XO_MOV, r, r, offsetof(lua_State, maxstack)); - emit_getgl(as, r, cur_L); - if (allow == RSET_EMPTY) /* Spill temp. register. */ - emit_rmro(as, XO_MOVto, r|REX_64, RID_ESP, 0); -} - -/* Restore Lua stack from on-trace state. */ -static void asm_stack_restore(ASMState *as, SnapShot *snap) -{ - SnapEntry *map = &as->T->snapmap[snap->mapofs]; - SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1]; - MSize n, nent = snap->nent; - /* Store the value of all modified slots to the Lua stack. */ - for (n = 0; n < nent; n++) { - SnapEntry sn = map[n]; - BCReg s = snap_slot(sn); - int32_t ofs = 8*((int32_t)s-1); - IRRef ref = snap_ref(sn); - IRIns *ir = IR(ref); - if ((sn & SNAP_NORESTORE)) - continue; - if (irt_isnum(ir->t)) { - Reg src = ra_alloc1(as, ref, RSET_FPR); - emit_rmro(as, XO_MOVSDto, src, RID_BASE, ofs); - } else { - lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || - (LJ_DUALNUM && irt_isinteger(ir->t))); - if (!irref_isk(ref)) { - Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE)); - emit_movtomro(as, REX_64IR(ir, src), RID_BASE, ofs); - } else if (!irt_ispri(ir->t)) { - emit_movmroi(as, RID_BASE, ofs, ir->i); - } - if ((sn & (SNAP_CONT|SNAP_FRAME))) { - if (s != 0) /* Do not overwrite link to previous frame. */ - emit_movmroi(as, RID_BASE, ofs+4, (int32_t)(*flinks--)); - } else { - if (!(LJ_64 && irt_islightud(ir->t))) - emit_movmroi(as, RID_BASE, ofs+4, irt_toitype(ir->t)); - } - } - checkmclim(as); - } - lua_assert(map + nent == flinks); -} - -/* -- GC handling --------------------------------------------------------- */ - -/* Check GC threshold and do one or more GC steps. */ -static void asm_gc_check(ASMState *as) -{ - const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit]; - IRRef args[2]; - MCLabel l_end; - Reg tmp; - ra_evictset(as, RSET_SCRATCH); - l_end = emit_label(as); - /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */ - asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */ - emit_rr(as, XO_TEST, RID_RET, RID_RET); - args[0] = ASMREF_TMP1; /* global_State *g */ - args[1] = ASMREF_TMP2; /* MSize steps */ - asm_gencall(as, ci, args); - tmp = ra_releasetmp(as, ASMREF_TMP1); - emit_loada(as, tmp, J2G(as->J)); - emit_loadi(as, ra_releasetmp(as, ASMREF_TMP2), as->gcsteps); - /* Jump around GC step if GC total < GC threshold. */ - emit_sjcc(as, CC_B, l_end); - emit_opgl(as, XO_ARITH(XOg_CMP), tmp, gc.threshold); - emit_getgl(as, tmp, gc.total); - as->gcsteps = 0; - checkmclim(as); -} - -/* -- Loop handling ------------------------------------------------------- */ - -/* Fixup the loop branch. */ -static void asm_loop_fixup(ASMState *as) -{ - MCode *p = as->mctop; - MCode *target = as->mcp; - if (as->realign) { /* Realigned loops use short jumps. */ - as->realign = NULL; /* Stop another retry. */ - lua_assert(((intptr_t)target & 15) == 0); - if (as->loopinv) { /* Inverted loop branch? */ - p -= 5; - p[0] = XI_JMP; - lua_assert(target - p >= -128); - p[-1] = (MCode)(target - p); /* Patch sjcc. */ - if (as->loopinv == 2) - p[-3] = (MCode)(target - p + 2); /* Patch opt. short jp. */ - } else { - lua_assert(target - p >= -128); - p[-1] = (MCode)(int8_t)(target - p); /* Patch short jmp. */ - p[-2] = XI_JMPs; - } - } else { - MCode *newloop; - p[-5] = XI_JMP; - if (as->loopinv) { /* Inverted loop branch? */ - /* asm_guardcc already inverted the jcc and patched the jmp. */ - p -= 5; - newloop = target+4; - *(int32_t *)(p-4) = (int32_t)(target - p); /* Patch jcc. */ - if (as->loopinv == 2) { - *(int32_t *)(p-10) = (int32_t)(target - p + 6); /* Patch opt. jp. */ - newloop = target+8; - } - } else { /* Otherwise just patch jmp. */ - *(int32_t *)(p-4) = (int32_t)(target - p); - newloop = target+3; - } - /* Realign small loops and shorten the loop branch. */ - if (newloop >= p - 128) { - as->realign = newloop; /* Force a retry and remember alignment. */ - as->curins = as->stopins; /* Abort asm_trace now. */ - as->T->nins = as->orignins; /* Remove any added renames. */ - } - } -} - -/* -- Head of trace ------------------------------------------------------- */ - -/* Coalesce BASE register for a root trace. */ -static void asm_head_root_base(ASMState *as) -{ - IRIns *ir = IR(REF_BASE); - Reg r = ir->r; - if (ra_hasreg(r)) { - ra_free(as, r); - if (rset_test(as->modset, r) || irt_ismarked(ir->t)) - ir->r = RID_INIT; /* No inheritance for modified BASE register. */ - if (r != RID_BASE) - emit_rr(as, XO_MOV, r, RID_BASE); - } -} - -/* Coalesce or reload BASE register for a side trace. */ -static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow) -{ - IRIns *ir = IR(REF_BASE); - Reg r = ir->r; - if (ra_hasreg(r)) { - ra_free(as, r); - if (rset_test(as->modset, r) || irt_ismarked(ir->t)) - ir->r = RID_INIT; /* No inheritance for modified BASE register. */ - if (irp->r == r) { - rset_clear(allow, r); /* Mark same BASE register as coalesced. */ - } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) { - rset_clear(allow, irp->r); - emit_rr(as, XO_MOV, r, irp->r); /* Move from coalesced parent reg. */ - } else { - emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */ - } - } - return allow; -} - -/* -- Tail of trace ------------------------------------------------------- */ - -/* Fixup the tail code. */ -static void asm_tail_fixup(ASMState *as, TraceNo lnk) -{ - /* Note: don't use as->mcp swap + emit_*: emit_op overwrites more bytes. */ - MCode *p = as->mctop; - MCode *target, *q; - int32_t spadj = as->T->spadjust; - if (spadj == 0) { - p -= ((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0); - } else { - MCode *p1; - /* Patch stack adjustment. */ - if (checki8(spadj)) { - p -= 3; - p1 = p-6; - *p1 = (MCode)spadj; - } else { - p1 = p-9; - *(int32_t *)p1 = spadj; - } - if ((as->flags & JIT_F_LEA_AGU)) { -#if LJ_64 - p1[-4] = 0x48; -#endif - p1[-3] = (MCode)XI_LEA; - p1[-2] = MODRM(checki8(spadj) ? XM_OFS8 : XM_OFS32, RID_ESP, RID_ESP); - p1[-1] = MODRM(XM_SCALE1, RID_ESP, RID_ESP); - } else { -#if LJ_64 - p1[-3] = 0x48; -#endif - p1[-2] = (MCode)(checki8(spadj) ? XI_ARITHi8 : XI_ARITHi); - p1[-1] = MODRM(XM_REG, XOg_ADD, RID_ESP); - } - } - /* Patch exit branch. */ - target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp; - *(int32_t *)(p-4) = jmprel(p, target); - p[-5] = XI_JMP; - /* Drop unused mcode tail. Fill with NOPs to make the prefetcher happy. */ - for (q = as->mctop-1; q >= p; q--) - *q = XI_NOP; - as->mctop = p; -} - -/* Prepare tail of code. */ -static void asm_tail_prep(ASMState *as) -{ - MCode *p = as->mctop; - /* Realign and leave room for backwards loop branch or exit branch. */ - if (as->realign) { - int i = ((int)(intptr_t)as->realign) & 15; - /* Fill unused mcode tail with NOPs to make the prefetcher happy. */ - while (i-- > 0) - *--p = XI_NOP; - as->mctop = p; - p -= (as->loopinv ? 5 : 2); /* Space for short/near jmp. */ - } else { - p -= 5; /* Space for exit branch (near jmp). */ - } - if (as->loopref) { - as->invmcp = as->mcp = p; - } else { - /* Leave room for ESP adjustment: add esp, imm or lea esp, [esp+imm] */ - as->mcp = p - (((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0)); - as->invmcp = NULL; - } -} - -/* -- Trace setup --------------------------------------------------------- */ - -/* Ensure there are enough stack slots for call arguments. */ -static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci) -{ - IRRef args[CCI_NARGS_MAX*2]; - int nslots; - asm_collectargs(as, ir, ci, args); - nslots = asm_count_call_slots(as, ci, args); - if (nslots > as->evenspill) /* Leave room for args in stack slots. */ - as->evenspill = nslots; -#if LJ_64 - return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET); -#else - return irt_isfp(ir->t) ? REGSP_INIT : REGSP_HINT(RID_RET); -#endif -} - -/* Target-specific setup. */ -static void asm_setup_target(ASMState *as) -{ - asm_exitstub_setup(as, as->T->nsnap); -} - -/* -- Trace patching ------------------------------------------------------ */ - -/* Patch exit jumps of existing machine code to a new target. */ -void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target) -{ - MCode *p = T->mcode; - MCode *mcarea = lj_mcode_patch(J, p, 0); - MSize len = T->szmcode; - MCode *px = exitstub_addr(J, exitno) - 6; - MCode *pe = p+len-6; - uint32_t stateaddr = u32ptr(&J2G(J)->vmstate); - if (len > 5 && p[len-5] == XI_JMP && p+len-6 + *(int32_t *)(p+len-4) == px) - *(int32_t *)(p+len-4) = jmprel(p+len, target); - /* Do not patch parent exit for a stack check. Skip beyond vmstate update. */ - for (; p < pe; p++) - if (*(uint32_t *)(p+(LJ_64 ? 3 : 2)) == stateaddr && p[0] == XI_MOVmi) { - p += LJ_64 ? 11 : 10; - break; - } - lua_assert(p < pe); - for (; p < pe; p++) { - if ((*(uint16_t *)p & 0xf0ff) == 0x800f && p + *(int32_t *)(p+2) == px) { - *(int32_t *)(p+2) = jmprel(p+6, target); - p += 5; - } - } - lj_mcode_sync(T->mcode, T->mcode + T->szmcode); - lj_mcode_patch(J, mcarea, 1); -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bc.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bc.c deleted file mode 100644 index a8f444c26e1..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bc.c +++ /dev/null @@ -1,14 +0,0 @@ -/* -** Bytecode instruction modes. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_bc_c -#define LUA_CORE - -#include "lj_obj.h" -#include "lj_bc.h" - -/* Bytecode offsets and bytecode instruction modes. */ -#include "lj_bcdef.h" - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bc.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bc.h deleted file mode 100644 index 64c1bcda35a..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bc.h +++ /dev/null @@ -1,265 +0,0 @@ -/* -** Bytecode instruction format. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_BC_H -#define _LJ_BC_H - -#include "lj_def.h" -#include "lj_arch.h" - -/* Bytecode instruction format, 32 bit wide, fields of 8 or 16 bit: -** -** +----+----+----+----+ -** | B | C | A | OP | Format ABC -** +----+----+----+----+ -** | D | A | OP | Format AD -** +-------------------- -** MSB LSB -** -** In-memory instructions are always stored in host byte order. -*/ - -/* Operand ranges and related constants. */ -#define BCMAX_A 0xff -#define BCMAX_B 0xff -#define BCMAX_C 0xff -#define BCMAX_D 0xffff -#define BCBIAS_J 0x8000 -#define NO_REG BCMAX_A -#define NO_JMP (~(BCPos)0) - -/* Macros to get instruction fields. */ -#define bc_op(i) ((BCOp)((i)&0xff)) -#define bc_a(i) ((BCReg)(((i)>>8)&0xff)) -#define bc_b(i) ((BCReg)((i)>>24)) -#define bc_c(i) ((BCReg)(((i)>>16)&0xff)) -#define bc_d(i) ((BCReg)((i)>>16)) -#define bc_j(i) ((ptrdiff_t)bc_d(i)-BCBIAS_J) - -/* Macros to set instruction fields. */ -#define setbc_byte(p, x, ofs) \ - ((uint8_t *)(p))[LJ_ENDIAN_SELECT(ofs, 3-ofs)] = (uint8_t)(x) -#define setbc_op(p, x) setbc_byte(p, (x), 0) -#define setbc_a(p, x) setbc_byte(p, (x), 1) -#define setbc_b(p, x) setbc_byte(p, (x), 3) -#define setbc_c(p, x) setbc_byte(p, (x), 2) -#define setbc_d(p, x) \ - ((uint16_t *)(p))[LJ_ENDIAN_SELECT(1, 0)] = (uint16_t)(x) -#define setbc_j(p, x) setbc_d(p, (BCPos)((int32_t)(x)+BCBIAS_J)) - -/* Macros to compose instructions. */ -#define BCINS_ABC(o, a, b, c) \ - (((BCIns)(o))|((BCIns)(a)<<8)|((BCIns)(b)<<24)|((BCIns)(c)<<16)) -#define BCINS_AD(o, a, d) \ - (((BCIns)(o))|((BCIns)(a)<<8)|((BCIns)(d)<<16)) -#define BCINS_AJ(o, a, j) BCINS_AD(o, a, (BCPos)((int32_t)(j)+BCBIAS_J)) - -/* Bytecode instruction definition. Order matters, see below. -** -** (name, filler, Amode, Bmode, Cmode or Dmode, metamethod) -** -** The opcode name suffixes specify the type for RB/RC or RD: -** V = variable slot -** S = string const -** N = number const -** P = primitive type (~itype) -** B = unsigned byte literal -** M = multiple args/results -*/ -#define BCDEF(_) \ - /* Comparison ops. ORDER OPR. */ \ - _(ISLT, var, ___, var, lt) \ - _(ISGE, var, ___, var, lt) \ - _(ISLE, var, ___, var, le) \ - _(ISGT, var, ___, var, le) \ - \ - _(ISEQV, var, ___, var, eq) \ - _(ISNEV, var, ___, var, eq) \ - _(ISEQS, var, ___, str, eq) \ - _(ISNES, var, ___, str, eq) \ - _(ISEQN, var, ___, num, eq) \ - _(ISNEN, var, ___, num, eq) \ - _(ISEQP, var, ___, pri, eq) \ - _(ISNEP, var, ___, pri, eq) \ - \ - /* Unary test and copy ops. */ \ - _(ISTC, dst, ___, var, ___) \ - _(ISFC, dst, ___, var, ___) \ - _(IST, ___, ___, var, ___) \ - _(ISF, ___, ___, var, ___) \ - _(ISTYPE, var, ___, lit, ___) \ - _(ISNUM, var, ___, lit, ___) \ - \ - /* Unary ops. */ \ - _(MOV, dst, ___, var, ___) \ - _(NOT, dst, ___, var, ___) \ - _(UNM, dst, ___, var, unm) \ - _(LEN, dst, ___, var, len) \ - \ - /* Binary ops. ORDER OPR. VV last, POW must be next. */ \ - _(ADDVN, dst, var, num, add) \ - _(SUBVN, dst, var, num, sub) \ - _(MULVN, dst, var, num, mul) \ - _(DIVVN, dst, var, num, div) \ - _(MODVN, dst, var, num, mod) \ - \ - _(ADDNV, dst, var, num, add) \ - _(SUBNV, dst, var, num, sub) \ - _(MULNV, dst, var, num, mul) \ - _(DIVNV, dst, var, num, div) \ - _(MODNV, dst, var, num, mod) \ - \ - _(ADDVV, dst, var, var, add) \ - _(SUBVV, dst, var, var, sub) \ - _(MULVV, dst, var, var, mul) \ - _(DIVVV, dst, var, var, div) \ - _(MODVV, dst, var, var, mod) \ - \ - _(POW, dst, var, var, pow) \ - _(CAT, dst, rbase, rbase, concat) \ - \ - /* Constant ops. */ \ - _(KSTR, dst, ___, str, ___) \ - _(KCDATA, dst, ___, cdata, ___) \ - _(KSHORT, dst, ___, lits, ___) \ - _(KNUM, dst, ___, num, ___) \ - _(KPRI, dst, ___, pri, ___) \ - _(KNIL, base, ___, base, ___) \ - \ - /* Upvalue and function ops. */ \ - _(UGET, dst, ___, uv, ___) \ - _(USETV, uv, ___, var, ___) \ - _(USETS, uv, ___, str, ___) \ - _(USETN, uv, ___, num, ___) \ - _(USETP, uv, ___, pri, ___) \ - _(UCLO, rbase, ___, jump, ___) \ - _(FNEW, dst, ___, func, gc) \ - \ - /* Table ops. */ \ - _(TNEW, dst, ___, lit, gc) \ - _(TDUP, dst, ___, tab, gc) \ - _(GGET, dst, ___, str, index) \ - _(GSET, var, ___, str, newindex) \ - _(TGETV, dst, var, var, index) \ - _(TGETS, dst, var, str, index) \ - _(TGETB, dst, var, lit, index) \ - _(TGETR, dst, var, var, index) \ - _(TSETV, var, var, var, newindex) \ - _(TSETS, var, var, str, newindex) \ - _(TSETB, var, var, lit, newindex) \ - _(TSETM, base, ___, num, newindex) \ - _(TSETR, var, var, var, newindex) \ - \ - /* Calls and vararg handling. T = tail call. */ \ - _(CALLM, base, lit, lit, call) \ - _(CALL, base, lit, lit, call) \ - _(CALLMT, base, ___, lit, call) \ - _(CALLT, base, ___, lit, call) \ - _(ITERC, base, lit, lit, call) \ - _(ITERN, base, lit, lit, call) \ - _(VARG, base, lit, lit, ___) \ - _(ISNEXT, base, ___, jump, ___) \ - \ - /* Returns. */ \ - _(RETM, base, ___, lit, ___) \ - _(RET, rbase, ___, lit, ___) \ - _(RET0, rbase, ___, lit, ___) \ - _(RET1, rbase, ___, lit, ___) \ - \ - /* Loops and branches. I/J = interp/JIT, I/C/L = init/call/loop. */ \ - _(FORI, base, ___, jump, ___) \ - _(JFORI, base, ___, jump, ___) \ - \ - _(FORL, base, ___, jump, ___) \ - _(IFORL, base, ___, jump, ___) \ - _(JFORL, base, ___, lit, ___) \ - \ - _(ITERL, base, ___, jump, ___) \ - _(IITERL, base, ___, jump, ___) \ - _(JITERL, base, ___, lit, ___) \ - \ - _(LOOP, rbase, ___, jump, ___) \ - _(ILOOP, rbase, ___, jump, ___) \ - _(JLOOP, rbase, ___, lit, ___) \ - \ - _(JMP, rbase, ___, jump, ___) \ - \ - /* Function headers. I/J = interp/JIT, F/V/C = fixarg/vararg/C func. */ \ - _(FUNCF, rbase, ___, ___, ___) \ - _(IFUNCF, rbase, ___, ___, ___) \ - _(JFUNCF, rbase, ___, lit, ___) \ - _(FUNCV, rbase, ___, ___, ___) \ - _(IFUNCV, rbase, ___, ___, ___) \ - _(JFUNCV, rbase, ___, lit, ___) \ - _(FUNCC, rbase, ___, ___, ___) \ - _(FUNCCW, rbase, ___, ___, ___) - -/* Bytecode opcode numbers. */ -typedef enum { -#define BCENUM(name, ma, mb, mc, mt) BC_##name, -BCDEF(BCENUM) -#undef BCENUM - BC__MAX -} BCOp; - -LJ_STATIC_ASSERT((int)BC_ISEQV+1 == (int)BC_ISNEV); -LJ_STATIC_ASSERT(((int)BC_ISEQV^1) == (int)BC_ISNEV); -LJ_STATIC_ASSERT(((int)BC_ISEQS^1) == (int)BC_ISNES); -LJ_STATIC_ASSERT(((int)BC_ISEQN^1) == (int)BC_ISNEN); -LJ_STATIC_ASSERT(((int)BC_ISEQP^1) == (int)BC_ISNEP); -LJ_STATIC_ASSERT(((int)BC_ISLT^1) == (int)BC_ISGE); -LJ_STATIC_ASSERT(((int)BC_ISLE^1) == (int)BC_ISGT); -LJ_STATIC_ASSERT(((int)BC_ISLT^3) == (int)BC_ISGT); -LJ_STATIC_ASSERT((int)BC_IST-(int)BC_ISTC == (int)BC_ISF-(int)BC_ISFC); -LJ_STATIC_ASSERT((int)BC_CALLT-(int)BC_CALL == (int)BC_CALLMT-(int)BC_CALLM); -LJ_STATIC_ASSERT((int)BC_CALLMT + 1 == (int)BC_CALLT); -LJ_STATIC_ASSERT((int)BC_RETM + 1 == (int)BC_RET); -LJ_STATIC_ASSERT((int)BC_FORL + 1 == (int)BC_IFORL); -LJ_STATIC_ASSERT((int)BC_FORL + 2 == (int)BC_JFORL); -LJ_STATIC_ASSERT((int)BC_ITERL + 1 == (int)BC_IITERL); -LJ_STATIC_ASSERT((int)BC_ITERL + 2 == (int)BC_JITERL); -LJ_STATIC_ASSERT((int)BC_LOOP + 1 == (int)BC_ILOOP); -LJ_STATIC_ASSERT((int)BC_LOOP + 2 == (int)BC_JLOOP); -LJ_STATIC_ASSERT((int)BC_FUNCF + 1 == (int)BC_IFUNCF); -LJ_STATIC_ASSERT((int)BC_FUNCF + 2 == (int)BC_JFUNCF); -LJ_STATIC_ASSERT((int)BC_FUNCV + 1 == (int)BC_IFUNCV); -LJ_STATIC_ASSERT((int)BC_FUNCV + 2 == (int)BC_JFUNCV); - -/* This solves a circular dependency problem, change as needed. */ -#define FF_next_N 4 - -/* Stack slots used by FORI/FORL, relative to operand A. */ -enum { - FORL_IDX, FORL_STOP, FORL_STEP, FORL_EXT -}; - -/* Bytecode operand modes. ORDER BCMode */ -typedef enum { - BCMnone, BCMdst, BCMbase, BCMvar, BCMrbase, BCMuv, /* Mode A must be <= 7 */ - BCMlit, BCMlits, BCMpri, BCMnum, BCMstr, BCMtab, BCMfunc, BCMjump, BCMcdata, - BCM_max -} BCMode; -#define BCM___ BCMnone - -#define bcmode_a(op) ((BCMode)(lj_bc_mode[op] & 7)) -#define bcmode_b(op) ((BCMode)((lj_bc_mode[op]>>3) & 15)) -#define bcmode_c(op) ((BCMode)((lj_bc_mode[op]>>7) & 15)) -#define bcmode_d(op) bcmode_c(op) -#define bcmode_hasd(op) ((lj_bc_mode[op] & (15<<3)) == (BCMnone<<3)) -#define bcmode_mm(op) ((MMS)(lj_bc_mode[op]>>11)) - -#define BCMODE(name, ma, mb, mc, mm) \ - (BCM##ma|(BCM##mb<<3)|(BCM##mc<<7)|(MM_##mm<<11)), -#define BCMODE_FF 0 - -static LJ_AINLINE int bc_isret(BCOp op) -{ - return (op == BC_RETM || op == BC_RET || op == BC_RET0 || op == BC_RET1); -} - -LJ_DATA const uint16_t lj_bc_mode[]; -LJ_DATA const uint16_t lj_bc_ofs[]; - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bcdump.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bcdump.h deleted file mode 100644 index c38983146bb..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bcdump.h +++ /dev/null @@ -1,68 +0,0 @@ -/* -** Bytecode dump definitions. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_BCDUMP_H -#define _LJ_BCDUMP_H - -#include "lj_obj.h" -#include "lj_lex.h" - -/* -- Bytecode dump format ------------------------------------------------ */ - -/* -** dump = header proto+ 0U -** header = ESC 'L' 'J' versionB flagsU [namelenU nameB*] -** proto = lengthU pdata -** pdata = phead bcinsW* uvdataH* kgc* knum* [debugB*] -** phead = flagsB numparamsB framesizeB numuvB numkgcU numknU numbcU -** [debuglenU [firstlineU numlineU]] -** kgc = kgctypeU { ktab | (loU hiU) | (rloU rhiU iloU ihiU) | strB* } -** knum = intU0 | (loU1 hiU) -** ktab = narrayU nhashU karray* khash* -** karray = ktabk -** khash = ktabk ktabk -** ktabk = ktabtypeU { intU | (loU hiU) | strB* } -** -** B = 8 bit, H = 16 bit, W = 32 bit, U = ULEB128 of W, U0/U1 = ULEB128 of W+1 -*/ - -/* Bytecode dump header. */ -#define BCDUMP_HEAD1 0x1b -#define BCDUMP_HEAD2 0x4c -#define BCDUMP_HEAD3 0x4a - -/* If you perform *any* kind of private modifications to the bytecode itself -** or to the dump format, you *must* set BCDUMP_VERSION to 0x80 or higher. -*/ -#define BCDUMP_VERSION 2 - -/* Compatibility flags. */ -#define BCDUMP_F_BE 0x01 -#define BCDUMP_F_STRIP 0x02 -#define BCDUMP_F_FFI 0x04 -#define BCDUMP_F_FR2 0x08 - -#define BCDUMP_F_KNOWN (BCDUMP_F_FR2*2-1) - -/* Type codes for the GC constants of a prototype. Plus length for strings. */ -enum { - BCDUMP_KGC_CHILD, BCDUMP_KGC_TAB, BCDUMP_KGC_I64, BCDUMP_KGC_U64, - BCDUMP_KGC_COMPLEX, BCDUMP_KGC_STR -}; - -/* Type codes for the keys/values of a constant table. */ -enum { - BCDUMP_KTAB_NIL, BCDUMP_KTAB_FALSE, BCDUMP_KTAB_TRUE, - BCDUMP_KTAB_INT, BCDUMP_KTAB_NUM, BCDUMP_KTAB_STR -}; - -/* -- Bytecode reader/writer ---------------------------------------------- */ - -LJ_FUNC int lj_bcwrite(lua_State *L, GCproto *pt, lua_Writer writer, - void *data, int strip); -LJ_FUNC GCproto *lj_bcread_proto(LexState *ls); -LJ_FUNC GCproto *lj_bcread(LexState *ls); - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bcread.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bcread.c deleted file mode 100644 index 5e5021773d7..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bcread.c +++ /dev/null @@ -1,457 +0,0 @@ -/* -** Bytecode reader. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_bcread_c -#define LUA_CORE - -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_buf.h" -#include "lj_str.h" -#include "lj_tab.h" -#include "lj_bc.h" -#if LJ_HASFFI -#include "lj_ctype.h" -#include "lj_cdata.h" -#include "lualib.h" -#endif -#include "lj_lex.h" -#include "lj_bcdump.h" -#include "lj_state.h" -#include "lj_strfmt.h" - -/* Reuse some lexer fields for our own purposes. */ -#define bcread_flags(ls) ls->level -#define bcread_swap(ls) \ - ((bcread_flags(ls) & BCDUMP_F_BE) != LJ_BE*BCDUMP_F_BE) -#define bcread_oldtop(L, ls) restorestack(L, ls->lastline) -#define bcread_savetop(L, ls, top) \ - ls->lastline = (BCLine)savestack(L, (top)) - -/* -- Input buffer handling ----------------------------------------------- */ - -/* Throw reader error. */ -static LJ_NOINLINE void bcread_error(LexState *ls, ErrMsg em) -{ - lua_State *L = ls->L; - const char *name = ls->chunkarg; - if (*name == BCDUMP_HEAD1) name = "(binary)"; - else if (*name == '@' || *name == '=') name++; - lj_strfmt_pushf(L, "%s: %s", name, err2msg(em)); - lj_err_throw(L, LUA_ERRSYNTAX); -} - -/* Refill buffer. */ -static LJ_NOINLINE void bcread_fill(LexState *ls, MSize len, int need) -{ - lua_assert(len != 0); - if (len > LJ_MAX_BUF || ls->c < 0) - bcread_error(ls, LJ_ERR_BCBAD); - do { - const char *buf; - size_t sz; - char *p = sbufB(&ls->sb); - MSize n = (MSize)(ls->pe - ls->p); - if (n) { /* Copy remainder to buffer. */ - if (sbuflen(&ls->sb)) { /* Move down in buffer. */ - lua_assert(ls->pe == sbufP(&ls->sb)); - if (ls->p != p) memmove(p, ls->p, n); - } else { /* Copy from buffer provided by reader. */ - p = lj_buf_need(&ls->sb, len); - memcpy(p, ls->p, n); - } - ls->p = p; - ls->pe = p + n; - } - setsbufP(&ls->sb, p + n); - buf = ls->rfunc(ls->L, ls->rdata, &sz); /* Get more data from reader. */ - if (buf == NULL || sz == 0) { /* EOF? */ - if (need) bcread_error(ls, LJ_ERR_BCBAD); - ls->c = -1; /* Only bad if we get called again. */ - break; - } - if (n) { /* Append to buffer. */ - n += (MSize)sz; - p = lj_buf_need(&ls->sb, n < len ? len : n); - memcpy(sbufP(&ls->sb), buf, sz); - setsbufP(&ls->sb, p + n); - ls->p = p; - ls->pe = p + n; - } else { /* Return buffer provided by reader. */ - ls->p = buf; - ls->pe = buf + sz; - } - } while (ls->p + len > ls->pe); -} - -/* Need a certain number of bytes. */ -static LJ_AINLINE void bcread_need(LexState *ls, MSize len) -{ - if (LJ_UNLIKELY(ls->p + len > ls->pe)) - bcread_fill(ls, len, 1); -} - -/* Want to read up to a certain number of bytes, but may need less. */ -static LJ_AINLINE void bcread_want(LexState *ls, MSize len) -{ - if (LJ_UNLIKELY(ls->p + len > ls->pe)) - bcread_fill(ls, len, 0); -} - -/* Return memory block from buffer. */ -static LJ_AINLINE uint8_t *bcread_mem(LexState *ls, MSize len) -{ - uint8_t *p = (uint8_t *)ls->p; - ls->p += len; - lua_assert(ls->p <= ls->pe); - return p; -} - -/* Copy memory block from buffer. */ -static void bcread_block(LexState *ls, void *q, MSize len) -{ - memcpy(q, bcread_mem(ls, len), len); -} - -/* Read byte from buffer. */ -static LJ_AINLINE uint32_t bcread_byte(LexState *ls) -{ - lua_assert(ls->p < ls->pe); - return (uint32_t)(uint8_t)*ls->p++; -} - -/* Read ULEB128 value from buffer. */ -static LJ_AINLINE uint32_t bcread_uleb128(LexState *ls) -{ - uint32_t v = lj_buf_ruleb128(&ls->p); - lua_assert(ls->p <= ls->pe); - return v; -} - -/* Read top 32 bits of 33 bit ULEB128 value from buffer. */ -static uint32_t bcread_uleb128_33(LexState *ls) -{ - const uint8_t *p = (const uint8_t *)ls->p; - uint32_t v = (*p++ >> 1); - if (LJ_UNLIKELY(v >= 0x40)) { - int sh = -1; - v &= 0x3f; - do { - v |= ((*p & 0x7f) << (sh += 7)); - } while (*p++ >= 0x80); - } - ls->p = (char *)p; - lua_assert(ls->p <= ls->pe); - return v; -} - -/* -- Bytecode reader ----------------------------------------------------- */ - -/* Read debug info of a prototype. */ -static void bcread_dbg(LexState *ls, GCproto *pt, MSize sizedbg) -{ - void *lineinfo = (void *)proto_lineinfo(pt); - bcread_block(ls, lineinfo, sizedbg); - /* Swap lineinfo if the endianess differs. */ - if (bcread_swap(ls) && pt->numline >= 256) { - MSize i, n = pt->sizebc-1; - if (pt->numline < 65536) { - uint16_t *p = (uint16_t *)lineinfo; - for (i = 0; i < n; i++) p[i] = (uint16_t)((p[i] >> 8)|(p[i] << 8)); - } else { - uint32_t *p = (uint32_t *)lineinfo; - for (i = 0; i < n; i++) p[i] = lj_bswap(p[i]); - } - } -} - -/* Find pointer to varinfo. */ -static const void *bcread_varinfo(GCproto *pt) -{ - const uint8_t *p = proto_uvinfo(pt); - MSize n = pt->sizeuv; - if (n) while (*p++ || --n) ; - return p; -} - -/* Read a single constant key/value of a template table. */ -static void bcread_ktabk(LexState *ls, TValue *o) -{ - MSize tp = bcread_uleb128(ls); - if (tp >= BCDUMP_KTAB_STR) { - MSize len = tp - BCDUMP_KTAB_STR; - const char *p = (const char *)bcread_mem(ls, len); - setstrV(ls->L, o, lj_str_new(ls->L, p, len)); - } else if (tp == BCDUMP_KTAB_INT) { - setintV(o, (int32_t)bcread_uleb128(ls)); - } else if (tp == BCDUMP_KTAB_NUM) { - o->u32.lo = bcread_uleb128(ls); - o->u32.hi = bcread_uleb128(ls); - } else { - lua_assert(tp <= BCDUMP_KTAB_TRUE); - setpriV(o, ~tp); - } -} - -/* Read a template table. */ -static GCtab *bcread_ktab(LexState *ls) -{ - MSize narray = bcread_uleb128(ls); - MSize nhash = bcread_uleb128(ls); - GCtab *t = lj_tab_new(ls->L, narray, hsize2hbits(nhash)); - if (narray) { /* Read array entries. */ - MSize i; - TValue *o = tvref(t->array); - for (i = 0; i < narray; i++, o++) - bcread_ktabk(ls, o); - } - if (nhash) { /* Read hash entries. */ - MSize i; - for (i = 0; i < nhash; i++) { - TValue key; - bcread_ktabk(ls, &key); - lua_assert(!tvisnil(&key)); - bcread_ktabk(ls, lj_tab_set(ls->L, t, &key)); - } - } - return t; -} - -/* Read GC constants of a prototype. */ -static void bcread_kgc(LexState *ls, GCproto *pt, MSize sizekgc) -{ - MSize i; - GCRef *kr = mref(pt->k, GCRef) - (ptrdiff_t)sizekgc; - for (i = 0; i < sizekgc; i++, kr++) { - MSize tp = bcread_uleb128(ls); - if (tp >= BCDUMP_KGC_STR) { - MSize len = tp - BCDUMP_KGC_STR; - const char *p = (const char *)bcread_mem(ls, len); - setgcref(*kr, obj2gco(lj_str_new(ls->L, p, len))); - } else if (tp == BCDUMP_KGC_TAB) { - setgcref(*kr, obj2gco(bcread_ktab(ls))); -#if LJ_HASFFI - } else if (tp != BCDUMP_KGC_CHILD) { - CTypeID id = tp == BCDUMP_KGC_COMPLEX ? CTID_COMPLEX_DOUBLE : - tp == BCDUMP_KGC_I64 ? CTID_INT64 : CTID_UINT64; - CTSize sz = tp == BCDUMP_KGC_COMPLEX ? 16 : 8; - GCcdata *cd = lj_cdata_new_(ls->L, id, sz); - TValue *p = (TValue *)cdataptr(cd); - setgcref(*kr, obj2gco(cd)); - p[0].u32.lo = bcread_uleb128(ls); - p[0].u32.hi = bcread_uleb128(ls); - if (tp == BCDUMP_KGC_COMPLEX) { - p[1].u32.lo = bcread_uleb128(ls); - p[1].u32.hi = bcread_uleb128(ls); - } -#endif - } else { - lua_State *L = ls->L; - lua_assert(tp == BCDUMP_KGC_CHILD); - if (L->top <= bcread_oldtop(L, ls)) /* Stack underflow? */ - bcread_error(ls, LJ_ERR_BCBAD); - L->top--; - setgcref(*kr, obj2gco(protoV(L->top))); - } - } -} - -/* Read number constants of a prototype. */ -static void bcread_knum(LexState *ls, GCproto *pt, MSize sizekn) -{ - MSize i; - TValue *o = mref(pt->k, TValue); - for (i = 0; i < sizekn; i++, o++) { - int isnum = (ls->p[0] & 1); - uint32_t lo = bcread_uleb128_33(ls); - if (isnum) { - o->u32.lo = lo; - o->u32.hi = bcread_uleb128(ls); - } else { - setintV(o, lo); - } - } -} - -/* Read bytecode instructions. */ -static void bcread_bytecode(LexState *ls, GCproto *pt, MSize sizebc) -{ - BCIns *bc = proto_bc(pt); - bc[0] = BCINS_AD((pt->flags & PROTO_VARARG) ? BC_FUNCV : BC_FUNCF, - pt->framesize, 0); - bcread_block(ls, bc+1, (sizebc-1)*(MSize)sizeof(BCIns)); - /* Swap bytecode instructions if the endianess differs. */ - if (bcread_swap(ls)) { - MSize i; - for (i = 1; i < sizebc; i++) bc[i] = lj_bswap(bc[i]); - } -} - -/* Read upvalue refs. */ -static void bcread_uv(LexState *ls, GCproto *pt, MSize sizeuv) -{ - if (sizeuv) { - uint16_t *uv = proto_uv(pt); - bcread_block(ls, uv, sizeuv*2); - /* Swap upvalue refs if the endianess differs. */ - if (bcread_swap(ls)) { - MSize i; - for (i = 0; i < sizeuv; i++) - uv[i] = (uint16_t)((uv[i] >> 8)|(uv[i] << 8)); - } - } -} - -/* Read a prototype. */ -GCproto *lj_bcread_proto(LexState *ls) -{ - GCproto *pt; - MSize framesize, numparams, flags, sizeuv, sizekgc, sizekn, sizebc, sizept; - MSize ofsk, ofsuv, ofsdbg; - MSize sizedbg = 0; - BCLine firstline = 0, numline = 0; - - /* Read prototype header. */ - flags = bcread_byte(ls); - numparams = bcread_byte(ls); - framesize = bcread_byte(ls); - sizeuv = bcread_byte(ls); - sizekgc = bcread_uleb128(ls); - sizekn = bcread_uleb128(ls); - sizebc = bcread_uleb128(ls) + 1; - if (!(bcread_flags(ls) & BCDUMP_F_STRIP)) { - sizedbg = bcread_uleb128(ls); - if (sizedbg) { - firstline = bcread_uleb128(ls); - numline = bcread_uleb128(ls); - } - } - - /* Calculate total size of prototype including all colocated arrays. */ - sizept = (MSize)sizeof(GCproto) + - sizebc*(MSize)sizeof(BCIns) + - sizekgc*(MSize)sizeof(GCRef); - sizept = (sizept + (MSize)sizeof(TValue)-1) & ~((MSize)sizeof(TValue)-1); - ofsk = sizept; sizept += sizekn*(MSize)sizeof(TValue); - ofsuv = sizept; sizept += ((sizeuv+1)&~1)*2; - ofsdbg = sizept; sizept += sizedbg; - - /* Allocate prototype object and initialize its fields. */ - pt = (GCproto *)lj_mem_newgco(ls->L, (MSize)sizept); - pt->gct = ~LJ_TPROTO; - pt->numparams = (uint8_t)numparams; - pt->framesize = (uint8_t)framesize; - pt->sizebc = sizebc; - setmref(pt->k, (char *)pt + ofsk); - setmref(pt->uv, (char *)pt + ofsuv); - pt->sizekgc = 0; /* Set to zero until fully initialized. */ - pt->sizekn = sizekn; - pt->sizept = sizept; - pt->sizeuv = (uint8_t)sizeuv; - pt->flags = (uint8_t)flags; - pt->trace = 0; - setgcref(pt->chunkname, obj2gco(ls->chunkname)); - - /* Close potentially uninitialized gap between bc and kgc. */ - *(uint32_t *)((char *)pt + ofsk - sizeof(GCRef)*(sizekgc+1)) = 0; - - /* Read bytecode instructions and upvalue refs. */ - bcread_bytecode(ls, pt, sizebc); - bcread_uv(ls, pt, sizeuv); - - /* Read constants. */ - bcread_kgc(ls, pt, sizekgc); - pt->sizekgc = sizekgc; - bcread_knum(ls, pt, sizekn); - - /* Read and initialize debug info. */ - pt->firstline = firstline; - pt->numline = numline; - if (sizedbg) { - MSize sizeli = (sizebc-1) << (numline < 256 ? 0 : numline < 65536 ? 1 : 2); - setmref(pt->lineinfo, (char *)pt + ofsdbg); - setmref(pt->uvinfo, (char *)pt + ofsdbg + sizeli); - bcread_dbg(ls, pt, sizedbg); - setmref(pt->varinfo, bcread_varinfo(pt)); - } else { - setmref(pt->lineinfo, NULL); - setmref(pt->uvinfo, NULL); - setmref(pt->varinfo, NULL); - } - return pt; -} - -/* Read and check header of bytecode dump. */ -static int bcread_header(LexState *ls) -{ - uint32_t flags; - bcread_want(ls, 3+5+5); - if (bcread_byte(ls) != BCDUMP_HEAD2 || - bcread_byte(ls) != BCDUMP_HEAD3 || - bcread_byte(ls) != BCDUMP_VERSION) return 0; - bcread_flags(ls) = flags = bcread_uleb128(ls); - if ((flags & ~(BCDUMP_F_KNOWN)) != 0) return 0; - if ((flags & BCDUMP_F_FR2) != LJ_FR2*BCDUMP_F_FR2) return 0; - if ((flags & BCDUMP_F_FFI)) { -#if LJ_HASFFI - lua_State *L = ls->L; - if (!ctype_ctsG(G(L))) { - ptrdiff_t oldtop = savestack(L, L->top); - luaopen_ffi(L); /* Load FFI library on-demand. */ - L->top = restorestack(L, oldtop); - } -#else - return 0; -#endif - } - if ((flags & BCDUMP_F_STRIP)) { - ls->chunkname = lj_str_newz(ls->L, ls->chunkarg); - } else { - MSize len = bcread_uleb128(ls); - bcread_need(ls, len); - ls->chunkname = lj_str_new(ls->L, (const char *)bcread_mem(ls, len), len); - } - return 1; /* Ok. */ -} - -/* Read a bytecode dump. */ -GCproto *lj_bcread(LexState *ls) -{ - lua_State *L = ls->L; - lua_assert(ls->c == BCDUMP_HEAD1); - bcread_savetop(L, ls, L->top); - lj_buf_reset(&ls->sb); - /* Check for a valid bytecode dump header. */ - if (!bcread_header(ls)) - bcread_error(ls, LJ_ERR_BCFMT); - for (;;) { /* Process all prototypes in the bytecode dump. */ - GCproto *pt; - MSize len; - const char *startp; - /* Read length. */ - if (ls->p < ls->pe && ls->p[0] == 0) { /* Shortcut EOF. */ - ls->p++; - break; - } - bcread_want(ls, 5); - len = bcread_uleb128(ls); - if (!len) break; /* EOF */ - bcread_need(ls, len); - startp = ls->p; - pt = lj_bcread_proto(ls); - if (ls->p != startp + len) - bcread_error(ls, LJ_ERR_BCBAD); - setprotoV(L, L->top, pt); - incr_top(L); - } - if ((int32_t)(2*(uint32_t)(ls->pe - ls->p)) > 0 || - L->top-1 != bcread_oldtop(L, ls)) - bcread_error(ls, LJ_ERR_BCBAD); - /* Pop off last prototype. */ - L->top--; - return protoV(L->top); -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bcwrite.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bcwrite.c deleted file mode 100644 index b2c09738387..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_bcwrite.c +++ /dev/null @@ -1,361 +0,0 @@ -/* -** Bytecode writer. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_bcwrite_c -#define LUA_CORE - -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_buf.h" -#include "lj_bc.h" -#if LJ_HASFFI -#include "lj_ctype.h" -#endif -#if LJ_HASJIT -#include "lj_dispatch.h" -#include "lj_jit.h" -#endif -#include "lj_strfmt.h" -#include "lj_bcdump.h" -#include "lj_vm.h" - -/* Context for bytecode writer. */ -typedef struct BCWriteCtx { - SBuf sb; /* Output buffer. */ - GCproto *pt; /* Root prototype. */ - lua_Writer wfunc; /* Writer callback. */ - void *wdata; /* Writer callback data. */ - int strip; /* Strip debug info. */ - int status; /* Status from writer callback. */ -} BCWriteCtx; - -/* -- Bytecode writer ----------------------------------------------------- */ - -/* Write a single constant key/value of a template table. */ -static void bcwrite_ktabk(BCWriteCtx *ctx, cTValue *o, int narrow) -{ - char *p = lj_buf_more(&ctx->sb, 1+10); - if (tvisstr(o)) { - const GCstr *str = strV(o); - MSize len = str->len; - p = lj_buf_more(&ctx->sb, 5+len); - p = lj_strfmt_wuleb128(p, BCDUMP_KTAB_STR+len); - p = lj_buf_wmem(p, strdata(str), len); - } else if (tvisint(o)) { - *p++ = BCDUMP_KTAB_INT; - p = lj_strfmt_wuleb128(p, intV(o)); - } else if (tvisnum(o)) { - if (!LJ_DUALNUM && narrow) { /* Narrow number constants to integers. */ - lua_Number num = numV(o); - int32_t k = lj_num2int(num); - if (num == (lua_Number)k) { /* -0 is never a constant. */ - *p++ = BCDUMP_KTAB_INT; - p = lj_strfmt_wuleb128(p, k); - setsbufP(&ctx->sb, p); - return; - } - } - *p++ = BCDUMP_KTAB_NUM; - p = lj_strfmt_wuleb128(p, o->u32.lo); - p = lj_strfmt_wuleb128(p, o->u32.hi); - } else { - lua_assert(tvispri(o)); - *p++ = BCDUMP_KTAB_NIL+~itype(o); - } - setsbufP(&ctx->sb, p); -} - -/* Write a template table. */ -static void bcwrite_ktab(BCWriteCtx *ctx, char *p, const GCtab *t) -{ - MSize narray = 0, nhash = 0; - if (t->asize > 0) { /* Determine max. length of array part. */ - ptrdiff_t i; - TValue *array = tvref(t->array); - for (i = (ptrdiff_t)t->asize-1; i >= 0; i--) - if (!tvisnil(&array[i])) - break; - narray = (MSize)(i+1); - } - if (t->hmask > 0) { /* Count number of used hash slots. */ - MSize i, hmask = t->hmask; - Node *node = noderef(t->node); - for (i = 0; i <= hmask; i++) - nhash += !tvisnil(&node[i].val); - } - /* Write number of array slots and hash slots. */ - p = lj_strfmt_wuleb128(p, narray); - p = lj_strfmt_wuleb128(p, nhash); - setsbufP(&ctx->sb, p); - if (narray) { /* Write array entries (may contain nil). */ - MSize i; - TValue *o = tvref(t->array); - for (i = 0; i < narray; i++, o++) - bcwrite_ktabk(ctx, o, 1); - } - if (nhash) { /* Write hash entries. */ - MSize i = nhash; - Node *node = noderef(t->node) + t->hmask; - for (;; node--) - if (!tvisnil(&node->val)) { - bcwrite_ktabk(ctx, &node->key, 0); - bcwrite_ktabk(ctx, &node->val, 1); - if (--i == 0) break; - } - } -} - -/* Write GC constants of a prototype. */ -static void bcwrite_kgc(BCWriteCtx *ctx, GCproto *pt) -{ - MSize i, sizekgc = pt->sizekgc; - GCRef *kr = mref(pt->k, GCRef) - (ptrdiff_t)sizekgc; - for (i = 0; i < sizekgc; i++, kr++) { - GCobj *o = gcref(*kr); - MSize tp, need = 1; - char *p; - /* Determine constant type and needed size. */ - if (o->gch.gct == ~LJ_TSTR) { - tp = BCDUMP_KGC_STR + gco2str(o)->len; - need = 5+gco2str(o)->len; - } else if (o->gch.gct == ~LJ_TPROTO) { - lua_assert((pt->flags & PROTO_CHILD)); - tp = BCDUMP_KGC_CHILD; -#if LJ_HASFFI - } else if (o->gch.gct == ~LJ_TCDATA) { - CTypeID id = gco2cd(o)->ctypeid; - need = 1+4*5; - if (id == CTID_INT64) { - tp = BCDUMP_KGC_I64; - } else if (id == CTID_UINT64) { - tp = BCDUMP_KGC_U64; - } else { - lua_assert(id == CTID_COMPLEX_DOUBLE); - tp = BCDUMP_KGC_COMPLEX; - } -#endif - } else { - lua_assert(o->gch.gct == ~LJ_TTAB); - tp = BCDUMP_KGC_TAB; - need = 1+2*5; - } - /* Write constant type. */ - p = lj_buf_more(&ctx->sb, need); - p = lj_strfmt_wuleb128(p, tp); - /* Write constant data (if any). */ - if (tp >= BCDUMP_KGC_STR) { - p = lj_buf_wmem(p, strdata(gco2str(o)), gco2str(o)->len); - } else if (tp == BCDUMP_KGC_TAB) { - bcwrite_ktab(ctx, p, gco2tab(o)); - continue; -#if LJ_HASFFI - } else if (tp != BCDUMP_KGC_CHILD) { - cTValue *q = (TValue *)cdataptr(gco2cd(o)); - p = lj_strfmt_wuleb128(p, q[0].u32.lo); - p = lj_strfmt_wuleb128(p, q[0].u32.hi); - if (tp == BCDUMP_KGC_COMPLEX) { - p = lj_strfmt_wuleb128(p, q[1].u32.lo); - p = lj_strfmt_wuleb128(p, q[1].u32.hi); - } -#endif - } - setsbufP(&ctx->sb, p); - } -} - -/* Write number constants of a prototype. */ -static void bcwrite_knum(BCWriteCtx *ctx, GCproto *pt) -{ - MSize i, sizekn = pt->sizekn; - cTValue *o = mref(pt->k, TValue); - char *p = lj_buf_more(&ctx->sb, 10*sizekn); - for (i = 0; i < sizekn; i++, o++) { - int32_t k; - if (tvisint(o)) { - k = intV(o); - goto save_int; - } else { - /* Write a 33 bit ULEB128 for the int (lsb=0) or loword (lsb=1). */ - if (!LJ_DUALNUM) { /* Narrow number constants to integers. */ - lua_Number num = numV(o); - k = lj_num2int(num); - if (num == (lua_Number)k) { /* -0 is never a constant. */ - save_int: - p = lj_strfmt_wuleb128(p, 2*(uint32_t)k | ((uint32_t)k&0x80000000u)); - if (k < 0) - p[-1] = (p[-1] & 7) | ((k>>27) & 0x18); - continue; - } - } - p = lj_strfmt_wuleb128(p, 1+(2*o->u32.lo | (o->u32.lo & 0x80000000u))); - if (o->u32.lo >= 0x80000000u) - p[-1] = (p[-1] & 7) | ((o->u32.lo>>27) & 0x18); - p = lj_strfmt_wuleb128(p, o->u32.hi); - } - } - setsbufP(&ctx->sb, p); -} - -/* Write bytecode instructions. */ -static char *bcwrite_bytecode(BCWriteCtx *ctx, char *p, GCproto *pt) -{ - MSize nbc = pt->sizebc-1; /* Omit the [JI]FUNC* header. */ -#if LJ_HASJIT - uint8_t *q = (uint8_t *)p; -#endif - p = lj_buf_wmem(p, proto_bc(pt)+1, nbc*(MSize)sizeof(BCIns)); - UNUSED(ctx); -#if LJ_HASJIT - /* Unpatch modified bytecode containing ILOOP/JLOOP etc. */ - if ((pt->flags & PROTO_ILOOP) || pt->trace) { - jit_State *J = L2J(sbufL(&ctx->sb)); - MSize i; - for (i = 0; i < nbc; i++, q += sizeof(BCIns)) { - BCOp op = (BCOp)q[LJ_ENDIAN_SELECT(0, 3)]; - if (op == BC_IFORL || op == BC_IITERL || op == BC_ILOOP || - op == BC_JFORI) { - q[LJ_ENDIAN_SELECT(0, 3)] = (uint8_t)(op-BC_IFORL+BC_FORL); - } else if (op == BC_JFORL || op == BC_JITERL || op == BC_JLOOP) { - BCReg rd = q[LJ_ENDIAN_SELECT(2, 1)] + (q[LJ_ENDIAN_SELECT(3, 0)] << 8); - BCIns ins = traceref(J, rd)->startins; - q[LJ_ENDIAN_SELECT(0, 3)] = (uint8_t)(op-BC_JFORL+BC_FORL); - q[LJ_ENDIAN_SELECT(2, 1)] = bc_c(ins); - q[LJ_ENDIAN_SELECT(3, 0)] = bc_b(ins); - } - } - } -#endif - return p; -} - -/* Write prototype. */ -static void bcwrite_proto(BCWriteCtx *ctx, GCproto *pt) -{ - MSize sizedbg = 0; - char *p; - - /* Recursively write children of prototype. */ - if ((pt->flags & PROTO_CHILD)) { - ptrdiff_t i, n = pt->sizekgc; - GCRef *kr = mref(pt->k, GCRef) - 1; - for (i = 0; i < n; i++, kr--) { - GCobj *o = gcref(*kr); - if (o->gch.gct == ~LJ_TPROTO) - bcwrite_proto(ctx, gco2pt(o)); - } - } - - /* Start writing the prototype info to a buffer. */ - p = lj_buf_need(&ctx->sb, - 5+4+6*5+(pt->sizebc-1)*(MSize)sizeof(BCIns)+pt->sizeuv*2); - p += 5; /* Leave room for final size. */ - - /* Write prototype header. */ - *p++ = (pt->flags & (PROTO_CHILD|PROTO_VARARG|PROTO_FFI)); - *p++ = pt->numparams; - *p++ = pt->framesize; - *p++ = pt->sizeuv; - p = lj_strfmt_wuleb128(p, pt->sizekgc); - p = lj_strfmt_wuleb128(p, pt->sizekn); - p = lj_strfmt_wuleb128(p, pt->sizebc-1); - if (!ctx->strip) { - if (proto_lineinfo(pt)) - sizedbg = pt->sizept - (MSize)((char *)proto_lineinfo(pt) - (char *)pt); - p = lj_strfmt_wuleb128(p, sizedbg); - if (sizedbg) { - p = lj_strfmt_wuleb128(p, pt->firstline); - p = lj_strfmt_wuleb128(p, pt->numline); - } - } - - /* Write bytecode instructions and upvalue refs. */ - p = bcwrite_bytecode(ctx, p, pt); - p = lj_buf_wmem(p, proto_uv(pt), pt->sizeuv*2); - setsbufP(&ctx->sb, p); - - /* Write constants. */ - bcwrite_kgc(ctx, pt); - bcwrite_knum(ctx, pt); - - /* Write debug info, if not stripped. */ - if (sizedbg) { - p = lj_buf_more(&ctx->sb, sizedbg); - p = lj_buf_wmem(p, proto_lineinfo(pt), sizedbg); - setsbufP(&ctx->sb, p); - } - - /* Pass buffer to writer function. */ - if (ctx->status == 0) { - MSize n = sbuflen(&ctx->sb) - 5; - MSize nn = (lj_fls(n)+8)*9 >> 6; - char *q = sbufB(&ctx->sb) + (5 - nn); - p = lj_strfmt_wuleb128(q, n); /* Fill in final size. */ - lua_assert(p == sbufB(&ctx->sb) + 5); - ctx->status = ctx->wfunc(sbufL(&ctx->sb), q, nn+n, ctx->wdata); - } -} - -/* Write header of bytecode dump. */ -static void bcwrite_header(BCWriteCtx *ctx) -{ - GCstr *chunkname = proto_chunkname(ctx->pt); - const char *name = strdata(chunkname); - MSize len = chunkname->len; - char *p = lj_buf_need(&ctx->sb, 5+5+len); - *p++ = BCDUMP_HEAD1; - *p++ = BCDUMP_HEAD2; - *p++ = BCDUMP_HEAD3; - *p++ = BCDUMP_VERSION; - *p++ = (ctx->strip ? BCDUMP_F_STRIP : 0) + - LJ_BE*BCDUMP_F_BE + - ((ctx->pt->flags & PROTO_FFI) ? BCDUMP_F_FFI : 0) + - LJ_FR2*BCDUMP_F_FR2; - if (!ctx->strip) { - p = lj_strfmt_wuleb128(p, len); - p = lj_buf_wmem(p, name, len); - } - ctx->status = ctx->wfunc(sbufL(&ctx->sb), sbufB(&ctx->sb), - (MSize)(p - sbufB(&ctx->sb)), ctx->wdata); -} - -/* Write footer of bytecode dump. */ -static void bcwrite_footer(BCWriteCtx *ctx) -{ - if (ctx->status == 0) { - uint8_t zero = 0; - ctx->status = ctx->wfunc(sbufL(&ctx->sb), &zero, 1, ctx->wdata); - } -} - -/* Protected callback for bytecode writer. */ -static TValue *cpwriter(lua_State *L, lua_CFunction dummy, void *ud) -{ - BCWriteCtx *ctx = (BCWriteCtx *)ud; - UNUSED(L); UNUSED(dummy); - lj_buf_need(&ctx->sb, 1024); /* Avoids resize for most prototypes. */ - bcwrite_header(ctx); - bcwrite_proto(ctx, ctx->pt); - bcwrite_footer(ctx); - return NULL; -} - -/* Write bytecode for a prototype. */ -int lj_bcwrite(lua_State *L, GCproto *pt, lua_Writer writer, void *data, - int strip) -{ - BCWriteCtx ctx; - int status; - ctx.pt = pt; - ctx.wfunc = writer; - ctx.wdata = data; - ctx.strip = strip; - ctx.status = 0; - lj_buf_init(L, &ctx.sb); - status = lj_vm_cpcall(L, NULL, &ctx, cpwriter); - if (status == 0) status = ctx.status; - lj_buf_free(G(sbufL(&ctx.sb)), &ctx.sb); - return status; -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_buf.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_buf.c deleted file mode 100644 index 023bb9aac8f..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_buf.c +++ /dev/null @@ -1,234 +0,0 @@ -/* -** Buffer handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_buf_c -#define LUA_CORE - -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_buf.h" -#include "lj_str.h" -#include "lj_tab.h" -#include "lj_strfmt.h" - -/* -- Buffer management --------------------------------------------------- */ - -static void buf_grow(SBuf *sb, MSize sz) -{ - MSize osz = sbufsz(sb), len = sbuflen(sb), nsz = osz; - char *b; - if (nsz < LJ_MIN_SBUF) nsz = LJ_MIN_SBUF; - while (nsz < sz) nsz += nsz; - b = (char *)lj_mem_realloc(sbufL(sb), sbufB(sb), osz, nsz); - setmref(sb->b, b); - setmref(sb->p, b + len); - setmref(sb->e, b + nsz); -} - -LJ_NOINLINE char *LJ_FASTCALL lj_buf_need2(SBuf *sb, MSize sz) -{ - lua_assert(sz > sbufsz(sb)); - if (LJ_UNLIKELY(sz > LJ_MAX_BUF)) - lj_err_mem(sbufL(sb)); - buf_grow(sb, sz); - return sbufB(sb); -} - -LJ_NOINLINE char *LJ_FASTCALL lj_buf_more2(SBuf *sb, MSize sz) -{ - MSize len = sbuflen(sb); - lua_assert(sz > sbufleft(sb)); - if (LJ_UNLIKELY(sz > LJ_MAX_BUF || len + sz > LJ_MAX_BUF)) - lj_err_mem(sbufL(sb)); - buf_grow(sb, len + sz); - return sbufP(sb); -} - -void LJ_FASTCALL lj_buf_shrink(lua_State *L, SBuf *sb) -{ - char *b = sbufB(sb); - MSize osz = (MSize)(sbufE(sb) - b); - if (osz > 2*LJ_MIN_SBUF) { - MSize n = (MSize)(sbufP(sb) - b); - b = lj_mem_realloc(L, b, osz, (osz >> 1)); - setmref(sb->b, b); - setmref(sb->p, b + n); - setmref(sb->e, b + (osz >> 1)); - } -} - -char * LJ_FASTCALL lj_buf_tmp(lua_State *L, MSize sz) -{ - SBuf *sb = &G(L)->tmpbuf; - setsbufL(sb, L); - return lj_buf_need(sb, sz); -} - -/* -- Low-level buffer put operations ------------------------------------- */ - -SBuf *lj_buf_putmem(SBuf *sb, const void *q, MSize len) -{ - char *p = lj_buf_more(sb, len); - p = lj_buf_wmem(p, q, len); - setsbufP(sb, p); - return sb; -} - -#if LJ_HASJIT -SBuf * LJ_FASTCALL lj_buf_putchar(SBuf *sb, int c) -{ - char *p = lj_buf_more(sb, 1); - *p++ = (char)c; - setsbufP(sb, p); - return sb; -} -#endif - -SBuf * LJ_FASTCALL lj_buf_putstr(SBuf *sb, GCstr *s) -{ - MSize len = s->len; - char *p = lj_buf_more(sb, len); - p = lj_buf_wmem(p, strdata(s), len); - setsbufP(sb, p); - return sb; -} - -/* -- High-level buffer put operations ------------------------------------ */ - -SBuf * LJ_FASTCALL lj_buf_putstr_reverse(SBuf *sb, GCstr *s) -{ - MSize len = s->len; - char *p = lj_buf_more(sb, len), *e = p+len; - const char *q = strdata(s)+len-1; - while (p < e) - *p++ = *q--; - setsbufP(sb, p); - return sb; -} - -SBuf * LJ_FASTCALL lj_buf_putstr_lower(SBuf *sb, GCstr *s) -{ - MSize len = s->len; - char *p = lj_buf_more(sb, len), *e = p+len; - const char *q = strdata(s); - for (; p < e; p++, q++) { - uint32_t c = *(unsigned char *)q; -#if LJ_TARGET_PPC - *p = c + ((c >= 'A' && c <= 'Z') << 5); -#else - if (c >= 'A' && c <= 'Z') c += 0x20; - *p = c; -#endif - } - setsbufP(sb, p); - return sb; -} - -SBuf * LJ_FASTCALL lj_buf_putstr_upper(SBuf *sb, GCstr *s) -{ - MSize len = s->len; - char *p = lj_buf_more(sb, len), *e = p+len; - const char *q = strdata(s); - for (; p < e; p++, q++) { - uint32_t c = *(unsigned char *)q; -#if LJ_TARGET_PPC - *p = c - ((c >= 'a' && c <= 'z') << 5); -#else - if (c >= 'a' && c <= 'z') c -= 0x20; - *p = c; -#endif - } - setsbufP(sb, p); - return sb; -} - -SBuf *lj_buf_putstr_rep(SBuf *sb, GCstr *s, int32_t rep) -{ - MSize len = s->len; - if (rep > 0 && len) { - uint64_t tlen = (uint64_t)rep * len; - char *p; - if (LJ_UNLIKELY(tlen > LJ_MAX_STR)) - lj_err_mem(sbufL(sb)); - p = lj_buf_more(sb, (MSize)tlen); - if (len == 1) { /* Optimize a common case. */ - uint32_t c = strdata(s)[0]; - do { *p++ = c; } while (--rep > 0); - } else { - const char *e = strdata(s) + len; - do { - const char *q = strdata(s); - do { *p++ = *q++; } while (q < e); - } while (--rep > 0); - } - setsbufP(sb, p); - } - return sb; -} - -SBuf *lj_buf_puttab(SBuf *sb, GCtab *t, GCstr *sep, int32_t i, int32_t e) -{ - MSize seplen = sep ? sep->len : 0; - if (i <= e) { - for (;;) { - cTValue *o = lj_tab_getint(t, i); - char *p; - if (!o) { - badtype: /* Error: bad element type. */ - setsbufP(sb, (void *)(intptr_t)i); /* Store failing index. */ - return NULL; - } else if (tvisstr(o)) { - MSize len = strV(o)->len; - p = lj_buf_wmem(lj_buf_more(sb, len + seplen), strVdata(o), len); - } else if (tvisint(o)) { - p = lj_strfmt_wint(lj_buf_more(sb, STRFMT_MAXBUF_INT+seplen), intV(o)); - } else if (tvisnum(o)) { - p = lj_strfmt_wnum(lj_buf_more(sb, STRFMT_MAXBUF_NUM+seplen), o); - } else { - goto badtype; - } - if (i++ == e) { - setsbufP(sb, p); - break; - } - if (seplen) p = lj_buf_wmem(p, strdata(sep), seplen); - setsbufP(sb, p); - } - } - return sb; -} - -/* -- Miscellaneous buffer operations ------------------------------------- */ - -GCstr * LJ_FASTCALL lj_buf_tostr(SBuf *sb) -{ - return lj_str_new(sbufL(sb), sbufB(sb), sbuflen(sb)); -} - -/* Concatenate two strings. */ -GCstr *lj_buf_cat2str(lua_State *L, GCstr *s1, GCstr *s2) -{ - MSize len1 = s1->len, len2 = s2->len; - char *buf = lj_buf_tmp(L, len1 + len2); - memcpy(buf, strdata(s1), len1); - memcpy(buf+len1, strdata(s2), len2); - return lj_str_new(L, buf, len1 + len2); -} - -/* Read ULEB128 from buffer. */ -uint32_t LJ_FASTCALL lj_buf_ruleb128(const char **pp) -{ - const uint8_t *p = (const uint8_t *)*pp; - uint32_t v = *p++; - if (LJ_UNLIKELY(v >= 0x80)) { - int sh = 0; - v &= 0x7f; - do { v |= ((*p & 0x7f) << (sh += 7)); } while (*p++ >= 0x80); - } - *pp = (const char *)p; - return v; -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_buf.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_buf.h deleted file mode 100644 index 1cf1780b020..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_buf.h +++ /dev/null @@ -1,105 +0,0 @@ -/* -** Buffer handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_BUF_H -#define _LJ_BUF_H - -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_str.h" - -/* Resizable string buffers. Struct definition in lj_obj.h. */ -#define sbufB(sb) (mref((sb)->b, char)) -#define sbufP(sb) (mref((sb)->p, char)) -#define sbufE(sb) (mref((sb)->e, char)) -#define sbufL(sb) (mref((sb)->L, lua_State)) -#define sbufsz(sb) ((MSize)(sbufE((sb)) - sbufB((sb)))) -#define sbuflen(sb) ((MSize)(sbufP((sb)) - sbufB((sb)))) -#define sbufleft(sb) ((MSize)(sbufE((sb)) - sbufP((sb)))) -#define setsbufP(sb, q) (setmref((sb)->p, (q))) -#define setsbufL(sb, l) (setmref((sb)->L, (l))) - -/* Buffer management */ -LJ_FUNC char *LJ_FASTCALL lj_buf_need2(SBuf *sb, MSize sz); -LJ_FUNC char *LJ_FASTCALL lj_buf_more2(SBuf *sb, MSize sz); -LJ_FUNC void LJ_FASTCALL lj_buf_shrink(lua_State *L, SBuf *sb); -LJ_FUNC char * LJ_FASTCALL lj_buf_tmp(lua_State *L, MSize sz); - -static LJ_AINLINE void lj_buf_init(lua_State *L, SBuf *sb) -{ - setsbufL(sb, L); - setmref(sb->p, NULL); setmref(sb->e, NULL); setmref(sb->b, NULL); -} - -static LJ_AINLINE void lj_buf_reset(SBuf *sb) -{ - setmrefr(sb->p, sb->b); -} - -static LJ_AINLINE SBuf *lj_buf_tmp_(lua_State *L) -{ - SBuf *sb = &G(L)->tmpbuf; - setsbufL(sb, L); - lj_buf_reset(sb); - return sb; -} - -static LJ_AINLINE void lj_buf_free(global_State *g, SBuf *sb) -{ - lj_mem_free(g, sbufB(sb), sbufsz(sb)); -} - -static LJ_AINLINE char *lj_buf_need(SBuf *sb, MSize sz) -{ - if (LJ_UNLIKELY(sz > sbufsz(sb))) - return lj_buf_need2(sb, sz); - return sbufB(sb); -} - -static LJ_AINLINE char *lj_buf_more(SBuf *sb, MSize sz) -{ - if (LJ_UNLIKELY(sz > sbufleft(sb))) - return lj_buf_more2(sb, sz); - return sbufP(sb); -} - -/* Low-level buffer put operations */ -LJ_FUNC SBuf *lj_buf_putmem(SBuf *sb, const void *q, MSize len); -#if LJ_HASJIT -LJ_FUNC SBuf * LJ_FASTCALL lj_buf_putchar(SBuf *sb, int c); -#endif -LJ_FUNC SBuf * LJ_FASTCALL lj_buf_putstr(SBuf *sb, GCstr *s); - -static LJ_AINLINE char *lj_buf_wmem(char *p, const void *q, MSize len) -{ - return (char *)memcpy(p, q, len) + len; -} - -static LJ_AINLINE void lj_buf_putb(SBuf *sb, int c) -{ - char *p = lj_buf_more(sb, 1); - *p++ = (char)c; - setsbufP(sb, p); -} - -/* High-level buffer put operations */ -LJ_FUNCA SBuf * LJ_FASTCALL lj_buf_putstr_reverse(SBuf *sb, GCstr *s); -LJ_FUNCA SBuf * LJ_FASTCALL lj_buf_putstr_lower(SBuf *sb, GCstr *s); -LJ_FUNCA SBuf * LJ_FASTCALL lj_buf_putstr_upper(SBuf *sb, GCstr *s); -LJ_FUNC SBuf *lj_buf_putstr_rep(SBuf *sb, GCstr *s, int32_t rep); -LJ_FUNC SBuf *lj_buf_puttab(SBuf *sb, GCtab *t, GCstr *sep, - int32_t i, int32_t e); - -/* Miscellaneous buffer operations */ -LJ_FUNCA GCstr * LJ_FASTCALL lj_buf_tostr(SBuf *sb); -LJ_FUNC GCstr *lj_buf_cat2str(lua_State *L, GCstr *s1, GCstr *s2); -LJ_FUNC uint32_t LJ_FASTCALL lj_buf_ruleb128(const char **pp); - -static LJ_AINLINE GCstr *lj_buf_str(lua_State *L, SBuf *sb) -{ - return lj_str_new(L, sbufB(sb), sbuflen(sb)); -} - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_carith.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_carith.c deleted file mode 100644 index 9032ea32410..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_carith.c +++ /dev/null @@ -1,429 +0,0 @@ -/* -** C data arithmetic. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#include "lj_obj.h" - -#if LJ_HASFFI - -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_tab.h" -#include "lj_meta.h" -#include "lj_ir.h" -#include "lj_ctype.h" -#include "lj_cconv.h" -#include "lj_cdata.h" -#include "lj_carith.h" -#include "lj_strscan.h" - -/* -- C data arithmetic --------------------------------------------------- */ - -/* Binary operands of an operator converted to ctypes. */ -typedef struct CDArith { - uint8_t *p[2]; - CType *ct[2]; -} CDArith; - -/* Check arguments for arithmetic metamethods. */ -static int carith_checkarg(lua_State *L, CTState *cts, CDArith *ca) -{ - TValue *o = L->base; - int ok = 1; - MSize i; - if (o+1 >= L->top) - lj_err_argt(L, 1, LUA_TCDATA); - for (i = 0; i < 2; i++, o++) { - if (tviscdata(o)) { - GCcdata *cd = cdataV(o); - CTypeID id = (CTypeID)cd->ctypeid; - CType *ct = ctype_raw(cts, id); - uint8_t *p = (uint8_t *)cdataptr(cd); - if (ctype_isptr(ct->info)) { - p = (uint8_t *)cdata_getptr(p, ct->size); - if (ctype_isref(ct->info)) ct = ctype_rawchild(cts, ct); - } else if (ctype_isfunc(ct->info)) { - p = (uint8_t *)*(void **)p; - ct = ctype_get(cts, - lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|id), CTSIZE_PTR)); - } - if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct); - ca->ct[i] = ct; - ca->p[i] = p; - } else if (tvisint(o)) { - ca->ct[i] = ctype_get(cts, CTID_INT32); - ca->p[i] = (uint8_t *)&o->i; - } else if (tvisnum(o)) { - ca->ct[i] = ctype_get(cts, CTID_DOUBLE); - ca->p[i] = (uint8_t *)&o->n; - } else if (tvisnil(o)) { - ca->ct[i] = ctype_get(cts, CTID_P_VOID); - ca->p[i] = (uint8_t *)0; - } else if (tvisstr(o)) { - TValue *o2 = i == 0 ? o+1 : o-1; - CType *ct = ctype_raw(cts, cdataV(o2)->ctypeid); - ca->ct[i] = NULL; - ca->p[i] = (uint8_t *)strVdata(o); - ok = 0; - if (ctype_isenum(ct->info)) { - CTSize ofs; - CType *cct = lj_ctype_getfield(cts, ct, strV(o), &ofs); - if (cct && ctype_isconstval(cct->info)) { - ca->ct[i] = ctype_child(cts, cct); - ca->p[i] = (uint8_t *)&cct->size; /* Assumes ct does not grow. */ - ok = 1; - } else { - ca->ct[1-i] = ct; /* Use enum to improve error message. */ - ca->p[1-i] = NULL; - break; - } - } - } else { - ca->ct[i] = NULL; - ca->p[i] = (void *)(intptr_t)1; /* To make it unequal. */ - ok = 0; - } - } - return ok; -} - -/* Pointer arithmetic. */ -static int carith_ptr(lua_State *L, CTState *cts, CDArith *ca, MMS mm) -{ - CType *ctp = ca->ct[0]; - uint8_t *pp = ca->p[0]; - ptrdiff_t idx; - CTSize sz; - CTypeID id; - GCcdata *cd; - if (ctype_isptr(ctp->info) || ctype_isrefarray(ctp->info)) { - if ((mm == MM_sub || mm == MM_eq || mm == MM_lt || mm == MM_le) && - (ctype_isptr(ca->ct[1]->info) || ctype_isrefarray(ca->ct[1]->info))) { - uint8_t *pp2 = ca->p[1]; - if (mm == MM_eq) { /* Pointer equality. Incompatible pointers are ok. */ - setboolV(L->top-1, (pp == pp2)); - return 1; - } - if (!lj_cconv_compatptr(cts, ctp, ca->ct[1], CCF_IGNQUAL)) - return 0; - if (mm == MM_sub) { /* Pointer difference. */ - intptr_t diff; - sz = lj_ctype_size(cts, ctype_cid(ctp->info)); /* Element size. */ - if (sz == 0 || sz == CTSIZE_INVALID) - return 0; - diff = ((intptr_t)pp - (intptr_t)pp2) / (int32_t)sz; - /* All valid pointer differences on x64 are in (-2^47, +2^47), - ** which fits into a double without loss of precision. - */ - setintptrV(L->top-1, (int32_t)diff); - return 1; - } else if (mm == MM_lt) { /* Pointer comparison (unsigned). */ - setboolV(L->top-1, ((uintptr_t)pp < (uintptr_t)pp2)); - return 1; - } else { - lua_assert(mm == MM_le); - setboolV(L->top-1, ((uintptr_t)pp <= (uintptr_t)pp2)); - return 1; - } - } - if (!((mm == MM_add || mm == MM_sub) && ctype_isnum(ca->ct[1]->info))) - return 0; - lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT_PSZ), ca->ct[1], - (uint8_t *)&idx, ca->p[1], 0); - if (mm == MM_sub) idx = -idx; - } else if (mm == MM_add && ctype_isnum(ctp->info) && - (ctype_isptr(ca->ct[1]->info) || ctype_isrefarray(ca->ct[1]->info))) { - /* Swap pointer and index. */ - ctp = ca->ct[1]; pp = ca->p[1]; - lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT_PSZ), ca->ct[0], - (uint8_t *)&idx, ca->p[0], 0); - } else { - return 0; - } - sz = lj_ctype_size(cts, ctype_cid(ctp->info)); /* Element size. */ - if (sz == CTSIZE_INVALID) - return 0; - pp += idx*(int32_t)sz; /* Compute pointer + index. */ - id = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(ctp->info)), - CTSIZE_PTR); - cd = lj_cdata_new(cts, id, CTSIZE_PTR); - *(uint8_t **)cdataptr(cd) = pp; - setcdataV(L, L->top-1, cd); - lj_gc_check(L); - return 1; -} - -/* 64 bit integer arithmetic. */ -static int carith_int64(lua_State *L, CTState *cts, CDArith *ca, MMS mm) -{ - if (ctype_isnum(ca->ct[0]->info) && ca->ct[0]->size <= 8 && - ctype_isnum(ca->ct[1]->info) && ca->ct[1]->size <= 8) { - CTypeID id = (((ca->ct[0]->info & CTF_UNSIGNED) && ca->ct[0]->size == 8) || - ((ca->ct[1]->info & CTF_UNSIGNED) && ca->ct[1]->size == 8)) ? - CTID_UINT64 : CTID_INT64; - CType *ct = ctype_get(cts, id); - GCcdata *cd; - uint64_t u0, u1, *up; - lj_cconv_ct_ct(cts, ct, ca->ct[0], (uint8_t *)&u0, ca->p[0], 0); - if (mm != MM_unm) - lj_cconv_ct_ct(cts, ct, ca->ct[1], (uint8_t *)&u1, ca->p[1], 0); - switch (mm) { - case MM_eq: - setboolV(L->top-1, (u0 == u1)); - return 1; - case MM_lt: - setboolV(L->top-1, - id == CTID_INT64 ? ((int64_t)u0 < (int64_t)u1) : (u0 < u1)); - return 1; - case MM_le: - setboolV(L->top-1, - id == CTID_INT64 ? ((int64_t)u0 <= (int64_t)u1) : (u0 <= u1)); - return 1; - default: break; - } - cd = lj_cdata_new(cts, id, 8); - up = (uint64_t *)cdataptr(cd); - setcdataV(L, L->top-1, cd); - switch (mm) { - case MM_add: *up = u0 + u1; break; - case MM_sub: *up = u0 - u1; break; - case MM_mul: *up = u0 * u1; break; - case MM_div: - if (id == CTID_INT64) - *up = (uint64_t)lj_carith_divi64((int64_t)u0, (int64_t)u1); - else - *up = lj_carith_divu64(u0, u1); - break; - case MM_mod: - if (id == CTID_INT64) - *up = (uint64_t)lj_carith_modi64((int64_t)u0, (int64_t)u1); - else - *up = lj_carith_modu64(u0, u1); - break; - case MM_pow: - if (id == CTID_INT64) - *up = (uint64_t)lj_carith_powi64((int64_t)u0, (int64_t)u1); - else - *up = lj_carith_powu64(u0, u1); - break; - case MM_unm: *up = (uint64_t)-(int64_t)u0; break; - default: lua_assert(0); break; - } - lj_gc_check(L); - return 1; - } - return 0; -} - -/* Handle ctype arithmetic metamethods. */ -static int lj_carith_meta(lua_State *L, CTState *cts, CDArith *ca, MMS mm) -{ - cTValue *tv = NULL; - if (tviscdata(L->base)) { - CTypeID id = cdataV(L->base)->ctypeid; - CType *ct = ctype_raw(cts, id); - if (ctype_isptr(ct->info)) id = ctype_cid(ct->info); - tv = lj_ctype_meta(cts, id, mm); - } - if (!tv && L->base+1 < L->top && tviscdata(L->base+1)) { - CTypeID id = cdataV(L->base+1)->ctypeid; - CType *ct = ctype_raw(cts, id); - if (ctype_isptr(ct->info)) id = ctype_cid(ct->info); - tv = lj_ctype_meta(cts, id, mm); - } - if (!tv) { - const char *repr[2]; - int i, isenum = -1, isstr = -1; - if (mm == MM_eq) { /* Equality checks never raise an error. */ - int eq = ca->p[0] == ca->p[1]; - setboolV(L->top-1, eq); - setboolV(&G(L)->tmptv2, eq); /* Remember for trace recorder. */ - return 1; - } - for (i = 0; i < 2; i++) { - if (ca->ct[i] && tviscdata(L->base+i)) { - if (ctype_isenum(ca->ct[i]->info)) isenum = i; - repr[i] = strdata(lj_ctype_repr(L, ctype_typeid(cts, ca->ct[i]), NULL)); - } else { - if (tvisstr(&L->base[i])) isstr = i; - repr[i] = lj_typename(&L->base[i]); - } - } - if ((isenum ^ isstr) == 1) - lj_err_callerv(L, LJ_ERR_FFI_BADCONV, repr[isstr], repr[isenum]); - lj_err_callerv(L, mm == MM_len ? LJ_ERR_FFI_BADLEN : - mm == MM_concat ? LJ_ERR_FFI_BADCONCAT : - mm < MM_add ? LJ_ERR_FFI_BADCOMP : LJ_ERR_FFI_BADARITH, - repr[0], repr[1]); - } - return lj_meta_tailcall(L, tv); -} - -/* Arithmetic operators for cdata. */ -int lj_carith_op(lua_State *L, MMS mm) -{ - CTState *cts = ctype_cts(L); - CDArith ca; - if (carith_checkarg(L, cts, &ca)) { - if (carith_int64(L, cts, &ca, mm) || carith_ptr(L, cts, &ca, mm)) { - copyTV(L, &G(L)->tmptv2, L->top-1); /* Remember for trace recorder. */ - return 1; - } - } - return lj_carith_meta(L, cts, &ca, mm); -} - -/* -- 64 bit bit operations helpers --------------------------------------- */ - -#if LJ_64 -#define B64DEF(name) \ - static LJ_AINLINE uint64_t lj_carith_##name(uint64_t x, int32_t sh) -#else -/* Not inlined on 32 bit archs, since some of these are quite lengthy. */ -#define B64DEF(name) \ - uint64_t LJ_NOINLINE lj_carith_##name(uint64_t x, int32_t sh) -#endif - -B64DEF(shl64) { return x << (sh&63); } -B64DEF(shr64) { return x >> (sh&63); } -B64DEF(sar64) { return (uint64_t)((int64_t)x >> (sh&63)); } -B64DEF(rol64) { return lj_rol(x, (sh&63)); } -B64DEF(ror64) { return lj_ror(x, (sh&63)); } - -#undef B64DEF - -uint64_t lj_carith_shift64(uint64_t x, int32_t sh, int op) -{ - switch (op) { - case IR_BSHL-IR_BSHL: x = lj_carith_shl64(x, sh); break; - case IR_BSHR-IR_BSHL: x = lj_carith_shr64(x, sh); break; - case IR_BSAR-IR_BSHL: x = lj_carith_sar64(x, sh); break; - case IR_BROL-IR_BSHL: x = lj_carith_rol64(x, sh); break; - case IR_BROR-IR_BSHL: x = lj_carith_ror64(x, sh); break; - default: lua_assert(0); break; - } - return x; -} - -/* Equivalent to lj_lib_checkbit(), but handles cdata. */ -uint64_t lj_carith_check64(lua_State *L, int narg, CTypeID *id) -{ - TValue *o = L->base + narg-1; - if (o >= L->top) { - err: - lj_err_argt(L, narg, LUA_TNUMBER); - } else if (LJ_LIKELY(tvisnumber(o))) { - /* Handled below. */ - } else if (tviscdata(o)) { - CTState *cts = ctype_cts(L); - uint8_t *sp = (uint8_t *)cdataptr(cdataV(o)); - CTypeID sid = cdataV(o)->ctypeid; - CType *s = ctype_get(cts, sid); - uint64_t x; - if (ctype_isref(s->info)) { - sp = *(void **)sp; - sid = ctype_cid(s->info); - } - s = ctype_raw(cts, sid); - if (ctype_isenum(s->info)) s = ctype_child(cts, s); - if ((s->info & (CTMASK_NUM|CTF_BOOL|CTF_FP|CTF_UNSIGNED)) == - CTINFO(CT_NUM, CTF_UNSIGNED) && s->size == 8) - *id = CTID_UINT64; /* Use uint64_t, since it has the highest rank. */ - else if (!*id) - *id = CTID_INT64; /* Use int64_t, unless already set. */ - lj_cconv_ct_ct(cts, ctype_get(cts, *id), s, - (uint8_t *)&x, sp, CCF_ARG(narg)); - return x; - } else if (!(tvisstr(o) && lj_strscan_number(strV(o), o))) { - goto err; - } - if (LJ_LIKELY(tvisint(o))) { - return (uint32_t)intV(o); - } else { - int32_t i = lj_num2bit(numV(o)); - if (LJ_DUALNUM) setintV(o, i); - return (uint32_t)i; - } -} - - -/* -- 64 bit integer arithmetic helpers ----------------------------------- */ - -#if LJ_32 && LJ_HASJIT -/* Signed/unsigned 64 bit multiplication. */ -int64_t lj_carith_mul64(int64_t a, int64_t b) -{ - return a * b; -} -#endif - -/* Unsigned 64 bit division. */ -uint64_t lj_carith_divu64(uint64_t a, uint64_t b) -{ - if (b == 0) return U64x(80000000,00000000); - return a / b; -} - -/* Signed 64 bit division. */ -int64_t lj_carith_divi64(int64_t a, int64_t b) -{ - if (b == 0 || (a == (int64_t)U64x(80000000,00000000) && b == -1)) - return U64x(80000000,00000000); - return a / b; -} - -/* Unsigned 64 bit modulo. */ -uint64_t lj_carith_modu64(uint64_t a, uint64_t b) -{ - if (b == 0) return U64x(80000000,00000000); - return a % b; -} - -/* Signed 64 bit modulo. */ -int64_t lj_carith_modi64(int64_t a, int64_t b) -{ - if (b == 0) return U64x(80000000,00000000); - if (a == (int64_t)U64x(80000000,00000000) && b == -1) return 0; - return a % b; -} - -/* Unsigned 64 bit x^k. */ -uint64_t lj_carith_powu64(uint64_t x, uint64_t k) -{ - uint64_t y; - if (k == 0) - return 1; - for (; (k & 1) == 0; k >>= 1) x *= x; - y = x; - if ((k >>= 1) != 0) { - for (;;) { - x *= x; - if (k == 1) break; - if (k & 1) y *= x; - k >>= 1; - } - y *= x; - } - return y; -} - -/* Signed 64 bit x^k. */ -int64_t lj_carith_powi64(int64_t x, int64_t k) -{ - if (k == 0) - return 1; - if (k < 0) { - if (x == 0) - return U64x(7fffffff,ffffffff); - else if (x == 1) - return 1; - else if (x == -1) - return (k & 1) ? -1 : 1; - else - return 0; - } - return (int64_t)lj_carith_powu64((uint64_t)x, (uint64_t)k); -} - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_carith.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_carith.h deleted file mode 100644 index da8320f3066..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_carith.h +++ /dev/null @@ -1,37 +0,0 @@ -/* -** C data arithmetic. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_CARITH_H -#define _LJ_CARITH_H - -#include "lj_obj.h" - -#if LJ_HASFFI - -LJ_FUNC int lj_carith_op(lua_State *L, MMS mm); - -#if LJ_32 -LJ_FUNC uint64_t lj_carith_shl64(uint64_t x, int32_t sh); -LJ_FUNC uint64_t lj_carith_shr64(uint64_t x, int32_t sh); -LJ_FUNC uint64_t lj_carith_sar64(uint64_t x, int32_t sh); -LJ_FUNC uint64_t lj_carith_rol64(uint64_t x, int32_t sh); -LJ_FUNC uint64_t lj_carith_ror64(uint64_t x, int32_t sh); -#endif -LJ_FUNC uint64_t lj_carith_shift64(uint64_t x, int32_t sh, int op); -LJ_FUNC uint64_t lj_carith_check64(lua_State *L, int narg, CTypeID *id); - -#if LJ_32 && LJ_HASJIT -LJ_FUNC int64_t lj_carith_mul64(int64_t x, int64_t k); -#endif -LJ_FUNC uint64_t lj_carith_divu64(uint64_t a, uint64_t b); -LJ_FUNC int64_t lj_carith_divi64(int64_t a, int64_t b); -LJ_FUNC uint64_t lj_carith_modu64(uint64_t a, uint64_t b); -LJ_FUNC int64_t lj_carith_modi64(int64_t a, int64_t b); -LJ_FUNC uint64_t lj_carith_powu64(uint64_t x, uint64_t k); -LJ_FUNC int64_t lj_carith_powi64(int64_t x, int64_t k); - -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ccall.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ccall.c deleted file mode 100644 index 5ab5b60daab..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ccall.c +++ /dev/null @@ -1,984 +0,0 @@ -/* -** FFI C call handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#include "lj_obj.h" - -#if LJ_HASFFI - -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_tab.h" -#include "lj_ctype.h" -#include "lj_cconv.h" -#include "lj_cdata.h" -#include "lj_ccall.h" -#include "lj_trace.h" - -/* Target-specific handling of register arguments. */ -#if LJ_TARGET_X86 -/* -- x86 calling conventions --------------------------------------------- */ - -#if LJ_ABI_WIN - -#define CCALL_HANDLE_STRUCTRET \ - /* Return structs bigger than 8 by reference (on stack only). */ \ - cc->retref = (sz > 8); \ - if (cc->retref) cc->stack[nsp++] = (GPRArg)dp; - -#define CCALL_HANDLE_COMPLEXRET CCALL_HANDLE_STRUCTRET - -#else - -#if LJ_TARGET_OSX - -#define CCALL_HANDLE_STRUCTRET \ - /* Return structs of size 1, 2, 4 or 8 in registers. */ \ - cc->retref = !(sz == 1 || sz == 2 || sz == 4 || sz == 8); \ - if (cc->retref) { \ - if (ngpr < maxgpr) \ - cc->gpr[ngpr++] = (GPRArg)dp; \ - else \ - cc->stack[nsp++] = (GPRArg)dp; \ - } else { /* Struct with single FP field ends up in FPR. */ \ - cc->resx87 = ccall_classify_struct(cts, ctr); \ - } - -#define CCALL_HANDLE_STRUCTRET2 \ - if (cc->resx87) sp = (uint8_t *)&cc->fpr[0]; \ - memcpy(dp, sp, ctr->size); - -#else - -#define CCALL_HANDLE_STRUCTRET \ - cc->retref = 1; /* Return all structs by reference (in reg or on stack). */ \ - if (ngpr < maxgpr) \ - cc->gpr[ngpr++] = (GPRArg)dp; \ - else \ - cc->stack[nsp++] = (GPRArg)dp; - -#endif - -#define CCALL_HANDLE_COMPLEXRET \ - /* Return complex float in GPRs and complex double by reference. */ \ - cc->retref = (sz > 8); \ - if (cc->retref) { \ - if (ngpr < maxgpr) \ - cc->gpr[ngpr++] = (GPRArg)dp; \ - else \ - cc->stack[nsp++] = (GPRArg)dp; \ - } - -#endif - -#define CCALL_HANDLE_COMPLEXRET2 \ - if (!cc->retref) \ - *(int64_t *)dp = *(int64_t *)sp; /* Copy complex float from GPRs. */ - -#define CCALL_HANDLE_STRUCTARG \ - ngpr = maxgpr; /* Pass all structs by value on the stack. */ - -#define CCALL_HANDLE_COMPLEXARG \ - isfp = 1; /* Pass complex by value on stack. */ - -#define CCALL_HANDLE_REGARG \ - if (!isfp) { /* Only non-FP values may be passed in registers. */ \ - if (n > 1) { /* Anything > 32 bit is passed on the stack. */ \ - if (!LJ_ABI_WIN) ngpr = maxgpr; /* Prevent reordering. */ \ - } else if (ngpr + 1 <= maxgpr) { \ - dp = &cc->gpr[ngpr]; \ - ngpr += n; \ - goto done; \ - } \ - } - -#elif LJ_TARGET_X64 && LJ_ABI_WIN -/* -- Windows/x64 calling conventions ------------------------------------- */ - -#define CCALL_HANDLE_STRUCTRET \ - /* Return structs of size 1, 2, 4 or 8 in a GPR. */ \ - cc->retref = !(sz == 1 || sz == 2 || sz == 4 || sz == 8); \ - if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp; - -#define CCALL_HANDLE_COMPLEXRET CCALL_HANDLE_STRUCTRET - -#define CCALL_HANDLE_COMPLEXRET2 \ - if (!cc->retref) \ - *(int64_t *)dp = *(int64_t *)sp; /* Copy complex float from GPRs. */ - -#define CCALL_HANDLE_STRUCTARG \ - /* Pass structs of size 1, 2, 4 or 8 in a GPR by value. */ \ - if (!(sz == 1 || sz == 2 || sz == 4 || sz == 8)) { \ - rp = cdataptr(lj_cdata_new(cts, did, sz)); \ - sz = CTSIZE_PTR; /* Pass all other structs by reference. */ \ - } - -#define CCALL_HANDLE_COMPLEXARG \ - /* Pass complex float in a GPR and complex double by reference. */ \ - if (sz != 2*sizeof(float)) { \ - rp = cdataptr(lj_cdata_new(cts, did, sz)); \ - sz = CTSIZE_PTR; \ - } - -/* Windows/x64 argument registers are strictly positional (use ngpr). */ -#define CCALL_HANDLE_REGARG \ - if (isfp) { \ - if (ngpr < maxgpr) { dp = &cc->fpr[ngpr++]; nfpr = ngpr; goto done; } \ - } else { \ - if (ngpr < maxgpr) { dp = &cc->gpr[ngpr++]; goto done; } \ - } - -#elif LJ_TARGET_X64 -/* -- POSIX/x64 calling conventions --------------------------------------- */ - -#define CCALL_HANDLE_STRUCTRET \ - int rcl[2]; rcl[0] = rcl[1] = 0; \ - if (ccall_classify_struct(cts, ctr, rcl, 0)) { \ - cc->retref = 1; /* Return struct by reference. */ \ - cc->gpr[ngpr++] = (GPRArg)dp; \ - } else { \ - cc->retref = 0; /* Return small structs in registers. */ \ - } - -#define CCALL_HANDLE_STRUCTRET2 \ - int rcl[2]; rcl[0] = rcl[1] = 0; \ - ccall_classify_struct(cts, ctr, rcl, 0); \ - ccall_struct_ret(cc, rcl, dp, ctr->size); - -#define CCALL_HANDLE_COMPLEXRET \ - /* Complex values are returned in one or two FPRs. */ \ - cc->retref = 0; - -#define CCALL_HANDLE_COMPLEXRET2 \ - if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPR. */ \ - *(int64_t *)dp = cc->fpr[0].l[0]; \ - } else { /* Copy non-contiguous complex double from FPRs. */ \ - ((int64_t *)dp)[0] = cc->fpr[0].l[0]; \ - ((int64_t *)dp)[1] = cc->fpr[1].l[0]; \ - } - -#define CCALL_HANDLE_STRUCTARG \ - int rcl[2]; rcl[0] = rcl[1] = 0; \ - if (!ccall_classify_struct(cts, d, rcl, 0)) { \ - cc->nsp = nsp; cc->ngpr = ngpr; cc->nfpr = nfpr; \ - if (ccall_struct_arg(cc, cts, d, rcl, o, narg)) goto err_nyi; \ - nsp = cc->nsp; ngpr = cc->ngpr; nfpr = cc->nfpr; \ - continue; \ - } /* Pass all other structs by value on stack. */ - -#define CCALL_HANDLE_COMPLEXARG \ - isfp = 2; /* Pass complex in FPRs or on stack. Needs postprocessing. */ - -#define CCALL_HANDLE_REGARG \ - if (isfp) { /* Try to pass argument in FPRs. */ \ - int n2 = ctype_isvector(d->info) ? 1 : n; \ - if (nfpr + n2 <= CCALL_NARG_FPR) { \ - dp = &cc->fpr[nfpr]; \ - nfpr += n2; \ - goto done; \ - } \ - } else { /* Try to pass argument in GPRs. */ \ - /* Note that reordering is explicitly allowed in the x64 ABI. */ \ - if (n <= 2 && ngpr + n <= maxgpr) { \ - dp = &cc->gpr[ngpr]; \ - ngpr += n; \ - goto done; \ - } \ - } - -#elif LJ_TARGET_ARM -/* -- ARM calling conventions --------------------------------------------- */ - -#if LJ_ABI_SOFTFP - -#define CCALL_HANDLE_STRUCTRET \ - /* Return structs of size <= 4 in a GPR. */ \ - cc->retref = !(sz <= 4); \ - if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp; - -#define CCALL_HANDLE_COMPLEXRET \ - cc->retref = 1; /* Return all complex values by reference. */ \ - cc->gpr[ngpr++] = (GPRArg)dp; - -#define CCALL_HANDLE_COMPLEXRET2 \ - UNUSED(dp); /* Nothing to do. */ - -#define CCALL_HANDLE_STRUCTARG \ - /* Pass all structs by value in registers and/or on the stack. */ - -#define CCALL_HANDLE_COMPLEXARG \ - /* Pass complex by value in 2 or 4 GPRs. */ - -#define CCALL_HANDLE_REGARG_FP1 -#define CCALL_HANDLE_REGARG_FP2 - -#else - -#define CCALL_HANDLE_STRUCTRET \ - cc->retref = !ccall_classify_struct(cts, ctr, ct); \ - if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp; - -#define CCALL_HANDLE_STRUCTRET2 \ - if (ccall_classify_struct(cts, ctr, ct) > 1) sp = (uint8_t *)&cc->fpr[0]; \ - memcpy(dp, sp, ctr->size); - -#define CCALL_HANDLE_COMPLEXRET \ - if (!(ct->info & CTF_VARARG)) cc->retref = 0; /* Return complex in FPRs. */ - -#define CCALL_HANDLE_COMPLEXRET2 \ - if (!(ct->info & CTF_VARARG)) memcpy(dp, &cc->fpr[0], ctr->size); - -#define CCALL_HANDLE_STRUCTARG \ - isfp = (ccall_classify_struct(cts, d, ct) > 1); - /* Pass all structs by value in registers and/or on the stack. */ - -#define CCALL_HANDLE_COMPLEXARG \ - isfp = 1; /* Pass complex by value in FPRs or on stack. */ - -#define CCALL_HANDLE_REGARG_FP1 \ - if (isfp && !(ct->info & CTF_VARARG)) { \ - if ((d->info & CTF_ALIGN) > CTALIGN_PTR) { \ - if (nfpr + (n >> 1) <= CCALL_NARG_FPR) { \ - dp = &cc->fpr[nfpr]; \ - nfpr += (n >> 1); \ - goto done; \ - } \ - } else { \ - if (sz > 1 && fprodd != nfpr) fprodd = 0; \ - if (fprodd) { \ - if (2*nfpr+n <= 2*CCALL_NARG_FPR+1) { \ - dp = (void *)&cc->fpr[fprodd-1].f[1]; \ - nfpr += (n >> 1); \ - if ((n & 1)) fprodd = 0; else fprodd = nfpr-1; \ - goto done; \ - } \ - } else { \ - if (2*nfpr+n <= 2*CCALL_NARG_FPR) { \ - dp = (void *)&cc->fpr[nfpr]; \ - nfpr += (n >> 1); \ - if ((n & 1)) fprodd = ++nfpr; else fprodd = 0; \ - goto done; \ - } \ - } \ - } \ - fprodd = 0; /* No reordering after the first FP value is on stack. */ \ - } else { - -#define CCALL_HANDLE_REGARG_FP2 } - -#endif - -#define CCALL_HANDLE_REGARG \ - CCALL_HANDLE_REGARG_FP1 \ - if ((d->info & CTF_ALIGN) > CTALIGN_PTR) { \ - if (ngpr < maxgpr) \ - ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \ - } \ - if (ngpr < maxgpr) { \ - dp = &cc->gpr[ngpr]; \ - if (ngpr + n > maxgpr) { \ - nsp += ngpr + n - maxgpr; /* Assumes contiguous gpr/stack fields. */ \ - if (nsp > CCALL_MAXSTACK) goto err_nyi; /* Too many arguments. */ \ - ngpr = maxgpr; \ - } else { \ - ngpr += n; \ - } \ - goto done; \ - } CCALL_HANDLE_REGARG_FP2 - -#define CCALL_HANDLE_RET \ - if ((ct->info & CTF_VARARG)) sp = (uint8_t *)&cc->gpr[0]; - -#elif LJ_TARGET_ARM64 -/* -- ARM64 calling conventions ------------------------------------------- */ - -#define CCALL_HANDLE_STRUCTRET \ - cc->retref = !ccall_classify_struct(cts, ctr); \ - if (cc->retref) cc->retp = dp; - -#define CCALL_HANDLE_STRUCTRET2 \ - unsigned int cl = ccall_classify_struct(cts, ctr); \ - if ((cl & 4)) { /* Combine float HFA from separate registers. */ \ - CTSize i = (cl >> 8) - 1; \ - do { ((uint32_t *)dp)[i] = cc->fpr[i].u32; } while (i--); \ - } else { \ - if (cl > 1) sp = (uint8_t *)&cc->fpr[0]; \ - memcpy(dp, sp, ctr->size); \ - } - -#define CCALL_HANDLE_COMPLEXRET \ - /* Complex values are returned in one or two FPRs. */ \ - cc->retref = 0; - -#define CCALL_HANDLE_COMPLEXRET2 \ - if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPRs. */ \ - ((float *)dp)[0] = cc->fpr[0].f; \ - ((float *)dp)[1] = cc->fpr[1].f; \ - } else { /* Copy complex double from FPRs. */ \ - ((double *)dp)[0] = cc->fpr[0].d; \ - ((double *)dp)[1] = cc->fpr[1].d; \ - } - -#define CCALL_HANDLE_STRUCTARG \ - unsigned int cl = ccall_classify_struct(cts, d); \ - if (cl == 0) { /* Pass struct by reference. */ \ - rp = cdataptr(lj_cdata_new(cts, did, sz)); \ - sz = CTSIZE_PTR; \ - } else if (cl > 1) { /* Pass struct in FPRs or on stack. */ \ - isfp = (cl & 4) ? 2 : 1; \ - } /* else: Pass struct in GPRs or on stack. */ - -#define CCALL_HANDLE_COMPLEXARG \ - /* Pass complex by value in separate (!) FPRs or on stack. */ \ - isfp = ctr->size == 2*sizeof(float) ? 2 : 1; - -#define CCALL_HANDLE_REGARG \ - if (LJ_TARGET_IOS && isva) { \ - /* IOS: All variadic arguments are on the stack. */ \ - } else if (isfp) { /* Try to pass argument in FPRs. */ \ - int n2 = ctype_isvector(d->info) ? 1 : n*isfp; \ - if (nfpr + n2 <= CCALL_NARG_FPR) { \ - dp = &cc->fpr[nfpr]; \ - nfpr += n2; \ - goto done; \ - } else { \ - nfpr = CCALL_NARG_FPR; /* Prevent reordering. */ \ - if (LJ_TARGET_IOS && d->size < 8) goto err_nyi; \ - } \ - } else { /* Try to pass argument in GPRs. */ \ - if (!LJ_TARGET_IOS && (d->info & CTF_ALIGN) > CTALIGN_PTR) \ - ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \ - if (ngpr + n <= maxgpr) { \ - dp = &cc->gpr[ngpr]; \ - ngpr += n; \ - goto done; \ - } else { \ - ngpr = maxgpr; /* Prevent reordering. */ \ - if (LJ_TARGET_IOS && d->size < 8) goto err_nyi; \ - } \ - } - -#elif LJ_TARGET_PPC -/* -- PPC calling conventions --------------------------------------------- */ - -#define CCALL_HANDLE_STRUCTRET \ - cc->retref = 1; /* Return all structs by reference. */ \ - cc->gpr[ngpr++] = (GPRArg)dp; - -#define CCALL_HANDLE_COMPLEXRET \ - /* Complex values are returned in 2 or 4 GPRs. */ \ - cc->retref = 0; - -#define CCALL_HANDLE_COMPLEXRET2 \ - memcpy(dp, sp, ctr->size); /* Copy complex from GPRs. */ - -#define CCALL_HANDLE_STRUCTARG \ - rp = cdataptr(lj_cdata_new(cts, did, sz)); \ - sz = CTSIZE_PTR; /* Pass all structs by reference. */ - -#define CCALL_HANDLE_COMPLEXARG \ - /* Pass complex by value in 2 or 4 GPRs. */ - -#define CCALL_HANDLE_REGARG \ - if (isfp) { /* Try to pass argument in FPRs. */ \ - if (nfpr + 1 <= CCALL_NARG_FPR) { \ - dp = &cc->fpr[nfpr]; \ - nfpr += 1; \ - d = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */ \ - goto done; \ - } \ - } else { /* Try to pass argument in GPRs. */ \ - if (n > 1) { \ - lua_assert(n == 2 || n == 4); /* int64_t or complex (float). */ \ - if (ctype_isinteger(d->info)) \ - ngpr = (ngpr + 1u) & ~1u; /* Align int64_t to regpair. */ \ - else if (ngpr + n > maxgpr) \ - ngpr = maxgpr; /* Prevent reordering. */ \ - } \ - if (ngpr + n <= maxgpr) { \ - dp = &cc->gpr[ngpr]; \ - ngpr += n; \ - goto done; \ - } \ - } - -#define CCALL_HANDLE_RET \ - if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \ - ctr = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */ - -#elif LJ_TARGET_MIPS -/* -- MIPS calling conventions -------------------------------------------- */ - -#define CCALL_HANDLE_STRUCTRET \ - cc->retref = 1; /* Return all structs by reference. */ \ - cc->gpr[ngpr++] = (GPRArg)dp; - -#define CCALL_HANDLE_COMPLEXRET \ - /* Complex values are returned in 1 or 2 FPRs. */ \ - cc->retref = 0; - -#define CCALL_HANDLE_COMPLEXRET2 \ - if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPRs. */ \ - ((float *)dp)[0] = cc->fpr[0].f; \ - ((float *)dp)[1] = cc->fpr[1].f; \ - } else { /* Copy complex double from FPRs. */ \ - ((double *)dp)[0] = cc->fpr[0].d; \ - ((double *)dp)[1] = cc->fpr[1].d; \ - } - -#define CCALL_HANDLE_STRUCTARG \ - /* Pass all structs by value in registers and/or on the stack. */ - -#define CCALL_HANDLE_COMPLEXARG \ - /* Pass complex by value in 2 or 4 GPRs. */ - -#define CCALL_HANDLE_REGARG \ - if (isfp && nfpr < CCALL_NARG_FPR && !(ct->info & CTF_VARARG)) { \ - /* Try to pass argument in FPRs. */ \ - dp = n == 1 ? (void *)&cc->fpr[nfpr].f : (void *)&cc->fpr[nfpr].d; \ - nfpr++; ngpr += n; \ - goto done; \ - } else { /* Try to pass argument in GPRs. */ \ - nfpr = CCALL_NARG_FPR; \ - if ((d->info & CTF_ALIGN) > CTALIGN_PTR) \ - ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \ - if (ngpr < maxgpr) { \ - dp = &cc->gpr[ngpr]; \ - if (ngpr + n > maxgpr) { \ - nsp += ngpr + n - maxgpr; /* Assumes contiguous gpr/stack fields. */ \ - if (nsp > CCALL_MAXSTACK) goto err_nyi; /* Too many arguments. */ \ - ngpr = maxgpr; \ - } else { \ - ngpr += n; \ - } \ - goto done; \ - } \ - } - -#define CCALL_HANDLE_RET \ - if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \ - sp = (uint8_t *)&cc->fpr[0].f; - -#else -#error "Missing calling convention definitions for this architecture" -#endif - -#ifndef CCALL_HANDLE_STRUCTRET2 -#define CCALL_HANDLE_STRUCTRET2 \ - memcpy(dp, sp, ctr->size); /* Copy struct return value from GPRs. */ -#endif - -/* -- x86 OSX ABI struct classification ----------------------------------- */ - -#if LJ_TARGET_X86 && LJ_TARGET_OSX - -/* Check for struct with single FP field. */ -static int ccall_classify_struct(CTState *cts, CType *ct) -{ - CTSize sz = ct->size; - if (!(sz == sizeof(float) || sz == sizeof(double))) return 0; - if ((ct->info & CTF_UNION)) return 0; - while (ct->sib) { - ct = ctype_get(cts, ct->sib); - if (ctype_isfield(ct->info)) { - CType *sct = ctype_rawchild(cts, ct); - if (ctype_isfp(sct->info)) { - if (sct->size == sz) - return (sz >> 2); /* Return 1 for float or 2 for double. */ - } else if (ctype_isstruct(sct->info)) { - if (sct->size) - return ccall_classify_struct(cts, sct); - } else { - break; - } - } else if (ctype_isbitfield(ct->info)) { - break; - } else if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) { - CType *sct = ctype_rawchild(cts, ct); - if (sct->size) - return ccall_classify_struct(cts, sct); - } - } - return 0; -} - -#endif - -/* -- x64 struct classification ------------------------------------------- */ - -#if LJ_TARGET_X64 && !LJ_ABI_WIN - -/* Register classes for x64 struct classification. */ -#define CCALL_RCL_INT 1 -#define CCALL_RCL_SSE 2 -#define CCALL_RCL_MEM 4 -/* NYI: classify vectors. */ - -static int ccall_classify_struct(CTState *cts, CType *ct, int *rcl, CTSize ofs); - -/* Classify a C type. */ -static void ccall_classify_ct(CTState *cts, CType *ct, int *rcl, CTSize ofs) -{ - if (ctype_isarray(ct->info)) { - CType *cct = ctype_rawchild(cts, ct); - CTSize eofs, esz = cct->size, asz = ct->size; - for (eofs = 0; eofs < asz; eofs += esz) - ccall_classify_ct(cts, cct, rcl, ofs+eofs); - } else if (ctype_isstruct(ct->info)) { - ccall_classify_struct(cts, ct, rcl, ofs); - } else { - int cl = ctype_isfp(ct->info) ? CCALL_RCL_SSE : CCALL_RCL_INT; - lua_assert(ctype_hassize(ct->info)); - if ((ofs & (ct->size-1))) cl = CCALL_RCL_MEM; /* Unaligned. */ - rcl[(ofs >= 8)] |= cl; - } -} - -/* Recursively classify a struct based on its fields. */ -static int ccall_classify_struct(CTState *cts, CType *ct, int *rcl, CTSize ofs) -{ - if (ct->size > 16) return CCALL_RCL_MEM; /* Too big, gets memory class. */ - while (ct->sib) { - CTSize fofs; - ct = ctype_get(cts, ct->sib); - fofs = ofs+ct->size; - if (ctype_isfield(ct->info)) - ccall_classify_ct(cts, ctype_rawchild(cts, ct), rcl, fofs); - else if (ctype_isbitfield(ct->info)) - rcl[(fofs >= 8)] |= CCALL_RCL_INT; /* NYI: unaligned bitfields? */ - else if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) - ccall_classify_struct(cts, ctype_rawchild(cts, ct), rcl, fofs); - } - return ((rcl[0]|rcl[1]) & CCALL_RCL_MEM); /* Memory class? */ -} - -/* Try to split up a small struct into registers. */ -static int ccall_struct_reg(CCallState *cc, GPRArg *dp, int *rcl) -{ - MSize ngpr = cc->ngpr, nfpr = cc->nfpr; - uint32_t i; - for (i = 0; i < 2; i++) { - lua_assert(!(rcl[i] & CCALL_RCL_MEM)); - if ((rcl[i] & CCALL_RCL_INT)) { /* Integer class takes precedence. */ - if (ngpr >= CCALL_NARG_GPR) return 1; /* Register overflow. */ - cc->gpr[ngpr++] = dp[i]; - } else if ((rcl[i] & CCALL_RCL_SSE)) { - if (nfpr >= CCALL_NARG_FPR) return 1; /* Register overflow. */ - cc->fpr[nfpr++].l[0] = dp[i]; - } - } - cc->ngpr = ngpr; cc->nfpr = nfpr; - return 0; /* Ok. */ -} - -/* Pass a small struct argument. */ -static int ccall_struct_arg(CCallState *cc, CTState *cts, CType *d, int *rcl, - TValue *o, int narg) -{ - GPRArg dp[2]; - dp[0] = dp[1] = 0; - /* Convert to temp. struct. */ - lj_cconv_ct_tv(cts, d, (uint8_t *)dp, o, CCF_ARG(narg)); - if (ccall_struct_reg(cc, dp, rcl)) { /* Register overflow? Pass on stack. */ - MSize nsp = cc->nsp, n = rcl[1] ? 2 : 1; - if (nsp + n > CCALL_MAXSTACK) return 1; /* Too many arguments. */ - cc->nsp = nsp + n; - memcpy(&cc->stack[nsp], dp, n*CTSIZE_PTR); - } - return 0; /* Ok. */ -} - -/* Combine returned small struct. */ -static void ccall_struct_ret(CCallState *cc, int *rcl, uint8_t *dp, CTSize sz) -{ - GPRArg sp[2]; - MSize ngpr = 0, nfpr = 0; - uint32_t i; - for (i = 0; i < 2; i++) { - if ((rcl[i] & CCALL_RCL_INT)) { /* Integer class takes precedence. */ - sp[i] = cc->gpr[ngpr++]; - } else if ((rcl[i] & CCALL_RCL_SSE)) { - sp[i] = cc->fpr[nfpr++].l[0]; - } - } - memcpy(dp, sp, sz); -} -#endif - -/* -- ARM hard-float ABI struct classification ---------------------------- */ - -#if LJ_TARGET_ARM && !LJ_ABI_SOFTFP - -/* Classify a struct based on its fields. */ -static unsigned int ccall_classify_struct(CTState *cts, CType *ct, CType *ctf) -{ - CTSize sz = ct->size; - unsigned int r = 0, n = 0, isu = (ct->info & CTF_UNION); - if ((ctf->info & CTF_VARARG)) goto noth; - while (ct->sib) { - CType *sct; - ct = ctype_get(cts, ct->sib); - if (ctype_isfield(ct->info)) { - sct = ctype_rawchild(cts, ct); - if (ctype_isfp(sct->info)) { - r |= sct->size; - if (!isu) n++; else if (n == 0) n = 1; - } else if (ctype_iscomplex(sct->info)) { - r |= (sct->size >> 1); - if (!isu) n += 2; else if (n < 2) n = 2; - } else if (ctype_isstruct(sct->info)) { - goto substruct; - } else { - goto noth; - } - } else if (ctype_isbitfield(ct->info)) { - goto noth; - } else if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) { - sct = ctype_rawchild(cts, ct); - substruct: - if (sct->size > 0) { - unsigned int s = ccall_classify_struct(cts, sct, ctf); - if (s <= 1) goto noth; - r |= (s & 255); - if (!isu) n += (s >> 8); else if (n < (s >>8)) n = (s >> 8); - } - } - } - if ((r == 4 || r == 8) && n <= 4) - return r + (n << 8); -noth: /* Not a homogeneous float/double aggregate. */ - return (sz <= 4); /* Return structs of size <= 4 in a GPR. */ -} - -#endif - -/* -- ARM64 ABI struct classification ------------------------------------- */ - -#if LJ_TARGET_ARM64 - -/* Classify a struct based on its fields. */ -static unsigned int ccall_classify_struct(CTState *cts, CType *ct) -{ - CTSize sz = ct->size; - unsigned int r = 0, n = 0, isu = (ct->info & CTF_UNION); - while (ct->sib) { - CType *sct; - ct = ctype_get(cts, ct->sib); - if (ctype_isfield(ct->info)) { - sct = ctype_rawchild(cts, ct); - if (ctype_isfp(sct->info)) { - r |= sct->size; - if (!isu) n++; else if (n == 0) n = 1; - } else if (ctype_iscomplex(sct->info)) { - r |= (sct->size >> 1); - if (!isu) n += 2; else if (n < 2) n = 2; - } else if (ctype_isstruct(sct->info)) { - goto substruct; - } else { - goto noth; - } - } else if (ctype_isbitfield(ct->info)) { - goto noth; - } else if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) { - sct = ctype_rawchild(cts, ct); - substruct: - if (sct->size > 0) { - unsigned int s = ccall_classify_struct(cts, sct); - if (s <= 1) goto noth; - r |= (s & 255); - if (!isu) n += (s >> 8); else if (n < (s >>8)) n = (s >> 8); - } - } - } - if ((r == 4 || r == 8) && n <= 4) - return r + (n << 8); -noth: /* Not a homogeneous float/double aggregate. */ - return (sz <= 16); /* Return structs of size <= 16 in GPRs. */ -} - -#endif - -/* -- Common C call handling ---------------------------------------------- */ - -/* Infer the destination CTypeID for a vararg argument. */ -CTypeID lj_ccall_ctid_vararg(CTState *cts, cTValue *o) -{ - if (tvisnumber(o)) { - return CTID_DOUBLE; - } else if (tviscdata(o)) { - CTypeID id = cdataV(o)->ctypeid; - CType *s = ctype_get(cts, id); - if (ctype_isrefarray(s->info)) { - return lj_ctype_intern(cts, - CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(s->info)), CTSIZE_PTR); - } else if (ctype_isstruct(s->info) || ctype_isfunc(s->info)) { - /* NYI: how to pass a struct by value in a vararg argument? */ - return lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|id), CTSIZE_PTR); - } else if (ctype_isfp(s->info) && s->size == sizeof(float)) { - return CTID_DOUBLE; - } else { - return id; - } - } else if (tvisstr(o)) { - return CTID_P_CCHAR; - } else if (tvisbool(o)) { - return CTID_BOOL; - } else { - return CTID_P_VOID; - } -} - -/* Setup arguments for C call. */ -static int ccall_set_args(lua_State *L, CTState *cts, CType *ct, - CCallState *cc) -{ - int gcsteps = 0; - TValue *o, *top = L->top; - CTypeID fid; - CType *ctr; - MSize maxgpr, ngpr = 0, nsp = 0, narg; -#if CCALL_NARG_FPR - MSize nfpr = 0; -#if LJ_TARGET_ARM - MSize fprodd = 0; -#endif -#endif - - /* Clear unused regs to get some determinism in case of misdeclaration. */ - memset(cc->gpr, 0, sizeof(cc->gpr)); -#if CCALL_NUM_FPR - memset(cc->fpr, 0, sizeof(cc->fpr)); -#endif - -#if LJ_TARGET_X86 - /* x86 has several different calling conventions. */ - cc->resx87 = 0; - switch (ctype_cconv(ct->info)) { - case CTCC_FASTCALL: maxgpr = 2; break; - case CTCC_THISCALL: maxgpr = 1; break; - default: maxgpr = 0; break; - } -#else - maxgpr = CCALL_NARG_GPR; -#endif - - /* Perform required setup for some result types. */ - ctr = ctype_rawchild(cts, ct); - if (ctype_isvector(ctr->info)) { - if (!(CCALL_VECTOR_REG && (ctr->size == 8 || ctr->size == 16))) - goto err_nyi; - } else if (ctype_iscomplex(ctr->info) || ctype_isstruct(ctr->info)) { - /* Preallocate cdata object and anchor it after arguments. */ - CTSize sz = ctr->size; - GCcdata *cd = lj_cdata_new(cts, ctype_cid(ct->info), sz); - void *dp = cdataptr(cd); - setcdataV(L, L->top++, cd); - if (ctype_isstruct(ctr->info)) { - CCALL_HANDLE_STRUCTRET - } else { - CCALL_HANDLE_COMPLEXRET - } -#if LJ_TARGET_X86 - } else if (ctype_isfp(ctr->info)) { - cc->resx87 = ctr->size == sizeof(float) ? 1 : 2; -#endif - } - - /* Skip initial attributes. */ - fid = ct->sib; - while (fid) { - CType *ctf = ctype_get(cts, fid); - if (!ctype_isattrib(ctf->info)) break; - fid = ctf->sib; - } - - /* Walk through all passed arguments. */ - for (o = L->base+1, narg = 1; o < top; o++, narg++) { - CTypeID did; - CType *d; - CTSize sz; - MSize n, isfp = 0, isva = 0; - void *dp, *rp = NULL; - - if (fid) { /* Get argument type from field. */ - CType *ctf = ctype_get(cts, fid); - fid = ctf->sib; - lua_assert(ctype_isfield(ctf->info)); - did = ctype_cid(ctf->info); - } else { - if (!(ct->info & CTF_VARARG)) - lj_err_caller(L, LJ_ERR_FFI_NUMARG); /* Too many arguments. */ - did = lj_ccall_ctid_vararg(cts, o); /* Infer vararg type. */ - isva = 1; - } - d = ctype_raw(cts, did); - sz = d->size; - - /* Find out how (by value/ref) and where (GPR/FPR) to pass an argument. */ - if (ctype_isnum(d->info)) { - if (sz > 8) goto err_nyi; - if ((d->info & CTF_FP)) - isfp = 1; - } else if (ctype_isvector(d->info)) { - if (CCALL_VECTOR_REG && (sz == 8 || sz == 16)) - isfp = 1; - else - goto err_nyi; - } else if (ctype_isstruct(d->info)) { - CCALL_HANDLE_STRUCTARG - } else if (ctype_iscomplex(d->info)) { - CCALL_HANDLE_COMPLEXARG - } else { - sz = CTSIZE_PTR; - } - sz = (sz + CTSIZE_PTR-1) & ~(CTSIZE_PTR-1); - n = sz / CTSIZE_PTR; /* Number of GPRs or stack slots needed. */ - - CCALL_HANDLE_REGARG /* Handle register arguments. */ - - /* Otherwise pass argument on stack. */ - if (CCALL_ALIGN_STACKARG && !rp && (d->info & CTF_ALIGN) > CTALIGN_PTR) { - MSize align = (1u << ctype_align(d->info-CTALIGN_PTR)) -1; - nsp = (nsp + align) & ~align; /* Align argument on stack. */ - } - if (nsp + n > CCALL_MAXSTACK) { /* Too many arguments. */ - err_nyi: - lj_err_caller(L, LJ_ERR_FFI_NYICALL); - } - dp = &cc->stack[nsp]; - nsp += n; - isva = 0; - - done: - if (rp) { /* Pass by reference. */ - gcsteps++; - *(void **)dp = rp; - dp = rp; - } - lj_cconv_ct_tv(cts, d, (uint8_t *)dp, o, CCF_ARG(narg)); - /* Extend passed integers to 32 bits at least. */ - if (ctype_isinteger_or_bool(d->info) && d->size < 4) { - if (d->info & CTF_UNSIGNED) - *(uint32_t *)dp = d->size == 1 ? (uint32_t)*(uint8_t *)dp : - (uint32_t)*(uint16_t *)dp; - else - *(int32_t *)dp = d->size == 1 ? (int32_t)*(int8_t *)dp : - (int32_t)*(int16_t *)dp; - } -#if LJ_TARGET_X64 && LJ_ABI_WIN - if (isva) { /* Windows/x64 mirrors varargs in both register sets. */ - if (nfpr == ngpr) - cc->gpr[ngpr-1] = cc->fpr[ngpr-1].l[0]; - else - cc->fpr[ngpr-1].l[0] = cc->gpr[ngpr-1]; - } -#else - UNUSED(isva); -#endif -#if LJ_TARGET_X64 && !LJ_ABI_WIN - if (isfp == 2 && n == 2 && (uint8_t *)dp == (uint8_t *)&cc->fpr[nfpr-2]) { - cc->fpr[nfpr-1].d[0] = cc->fpr[nfpr-2].d[1]; /* Split complex double. */ - cc->fpr[nfpr-2].d[1] = 0; - } -#elif LJ_TARGET_ARM64 - if (isfp == 2 && (uint8_t *)dp < (uint8_t *)cc->stack) { - /* Split float HFA or complex float into separate registers. */ - CTSize i = (sz >> 2) - 1; - do { ((uint64_t *)dp)[i] = ((uint32_t *)dp)[i]; } while (i--); - } -#else - UNUSED(isfp); -#endif - } - if (fid) lj_err_caller(L, LJ_ERR_FFI_NUMARG); /* Too few arguments. */ - -#if LJ_TARGET_X64 || LJ_TARGET_PPC - cc->nfpr = nfpr; /* Required for vararg functions. */ -#endif - cc->nsp = nsp; - cc->spadj = (CCALL_SPS_FREE + CCALL_SPS_EXTRA)*CTSIZE_PTR; - if (nsp > CCALL_SPS_FREE) - cc->spadj += (((nsp-CCALL_SPS_FREE)*CTSIZE_PTR + 15u) & ~15u); - return gcsteps; -} - -/* Get results from C call. */ -static int ccall_get_results(lua_State *L, CTState *cts, CType *ct, - CCallState *cc, int *ret) -{ - CType *ctr = ctype_rawchild(cts, ct); - uint8_t *sp = (uint8_t *)&cc->gpr[0]; - if (ctype_isvoid(ctr->info)) { - *ret = 0; /* Zero results. */ - return 0; /* No additional GC step. */ - } - *ret = 1; /* One result. */ - if (ctype_isstruct(ctr->info)) { - /* Return cdata object which is already on top of stack. */ - if (!cc->retref) { - void *dp = cdataptr(cdataV(L->top-1)); /* Use preallocated object. */ - CCALL_HANDLE_STRUCTRET2 - } - return 1; /* One GC step. */ - } - if (ctype_iscomplex(ctr->info)) { - /* Return cdata object which is already on top of stack. */ - void *dp = cdataptr(cdataV(L->top-1)); /* Use preallocated object. */ - CCALL_HANDLE_COMPLEXRET2 - return 1; /* One GC step. */ - } - if (LJ_BE && ctype_isinteger_or_bool(ctr->info) && ctr->size < CTSIZE_PTR) - sp += (CTSIZE_PTR - ctr->size); -#if CCALL_NUM_FPR - if (ctype_isfp(ctr->info) || ctype_isvector(ctr->info)) - sp = (uint8_t *)&cc->fpr[0]; -#endif -#ifdef CCALL_HANDLE_RET - CCALL_HANDLE_RET -#endif - /* No reference types end up here, so there's no need for the CTypeID. */ - lua_assert(!(ctype_isrefarray(ctr->info) || ctype_isstruct(ctr->info))); - return lj_cconv_tv_ct(cts, ctr, 0, L->top-1, sp); -} - -/* Call C function. */ -int lj_ccall_func(lua_State *L, GCcdata *cd) -{ - CTState *cts = ctype_cts(L); - CType *ct = ctype_raw(cts, cd->ctypeid); - CTSize sz = CTSIZE_PTR; - if (ctype_isptr(ct->info)) { - sz = ct->size; - ct = ctype_rawchild(cts, ct); - } - if (ctype_isfunc(ct->info)) { - CCallState cc; - int gcsteps, ret; - cc.func = (void (*)(void))cdata_getptr(cdataptr(cd), sz); - gcsteps = ccall_set_args(L, cts, ct, &cc); - ct = (CType *)((intptr_t)ct-(intptr_t)cts->tab); - cts->cb.slot = ~0u; - lj_vm_ffi_call(&cc); - if (cts->cb.slot != ~0u) { /* Blacklist function that called a callback. */ - TValue tv; - setlightudV(&tv, (void *)cc.func); - setboolV(lj_tab_set(L, cts->miscmap, &tv), 1); - } - ct = (CType *)((intptr_t)ct+(intptr_t)cts->tab); /* May be reallocated. */ - gcsteps += ccall_get_results(L, cts, ct, &cc, &ret); -#if LJ_TARGET_X86 && LJ_ABI_WIN - /* Automatically detect __stdcall and fix up C function declaration. */ - if (cc.spadj && ctype_cconv(ct->info) == CTCC_CDECL) { - CTF_INSERT(ct->info, CCONV, CTCC_STDCALL); - lj_trace_abort(G(L)); - } -#endif - while (gcsteps-- > 0) - lj_gc_check(L); - return ret; - } - return -1; /* Not a function. */ -} - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ccall.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ccall.h deleted file mode 100644 index 91983feebd7..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ccall.h +++ /dev/null @@ -1,178 +0,0 @@ -/* -** FFI C call handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_CCALL_H -#define _LJ_CCALL_H - -#include "lj_obj.h" -#include "lj_ctype.h" - -#if LJ_HASFFI - -/* -- C calling conventions ----------------------------------------------- */ - -#if LJ_TARGET_X86ORX64 - -#if LJ_TARGET_X86 -#define CCALL_NARG_GPR 2 /* For fastcall arguments. */ -#define CCALL_NARG_FPR 0 -#define CCALL_NRET_GPR 2 -#define CCALL_NRET_FPR 1 /* For FP results on x87 stack. */ -#define CCALL_ALIGN_STACKARG 0 /* Don't align argument on stack. */ -#elif LJ_ABI_WIN -#define CCALL_NARG_GPR 4 -#define CCALL_NARG_FPR 4 -#define CCALL_NRET_GPR 1 -#define CCALL_NRET_FPR 1 -#define CCALL_SPS_EXTRA 4 -#else -#define CCALL_NARG_GPR 6 -#define CCALL_NARG_FPR 8 -#define CCALL_NRET_GPR 2 -#define CCALL_NRET_FPR 2 -#define CCALL_VECTOR_REG 1 /* Pass vectors in registers. */ -#endif - -#define CCALL_SPS_FREE 1 -#define CCALL_ALIGN_CALLSTATE 16 - -typedef LJ_ALIGN(16) union FPRArg { - double d[2]; - float f[4]; - uint8_t b[16]; - uint16_t s[8]; - int i[4]; - int64_t l[2]; -} FPRArg; - -typedef intptr_t GPRArg; - -#elif LJ_TARGET_ARM - -#define CCALL_NARG_GPR 4 -#define CCALL_NRET_GPR 2 /* For softfp double. */ -#if LJ_ABI_SOFTFP -#define CCALL_NARG_FPR 0 -#define CCALL_NRET_FPR 0 -#else -#define CCALL_NARG_FPR 8 -#define CCALL_NRET_FPR 4 -#endif -#define CCALL_SPS_FREE 0 - -typedef intptr_t GPRArg; -typedef union FPRArg { - double d; - float f[2]; -} FPRArg; - -#elif LJ_TARGET_ARM64 - -#define CCALL_NARG_GPR 8 -#define CCALL_NRET_GPR 2 -#define CCALL_NARG_FPR 8 -#define CCALL_NRET_FPR 4 -#define CCALL_SPS_FREE 0 - -typedef intptr_t GPRArg; -typedef union FPRArg { - double d; - float f; - uint32_t u32; -} FPRArg; - -#elif LJ_TARGET_PPC - -#define CCALL_NARG_GPR 8 -#define CCALL_NARG_FPR 8 -#define CCALL_NRET_GPR 4 /* For complex double. */ -#define CCALL_NRET_FPR 1 -#define CCALL_SPS_EXTRA 4 -#define CCALL_SPS_FREE 0 - -typedef intptr_t GPRArg; -typedef double FPRArg; - -#elif LJ_TARGET_MIPS - -#define CCALL_NARG_GPR 4 -#define CCALL_NARG_FPR 2 -#define CCALL_NRET_GPR 2 -#define CCALL_NRET_FPR 2 -#define CCALL_SPS_EXTRA 7 -#define CCALL_SPS_FREE 1 - -typedef intptr_t GPRArg; -typedef union FPRArg { - double d; - struct { LJ_ENDIAN_LOHI(float f; , float g;) }; -} FPRArg; - -#else -#error "Missing calling convention definitions for this architecture" -#endif - -#ifndef CCALL_SPS_EXTRA -#define CCALL_SPS_EXTRA 0 -#endif -#ifndef CCALL_VECTOR_REG -#define CCALL_VECTOR_REG 0 -#endif -#ifndef CCALL_ALIGN_STACKARG -#define CCALL_ALIGN_STACKARG 1 -#endif -#ifndef CCALL_ALIGN_CALLSTATE -#define CCALL_ALIGN_CALLSTATE 8 -#endif - -#define CCALL_NUM_GPR \ - (CCALL_NARG_GPR > CCALL_NRET_GPR ? CCALL_NARG_GPR : CCALL_NRET_GPR) -#define CCALL_NUM_FPR \ - (CCALL_NARG_FPR > CCALL_NRET_FPR ? CCALL_NARG_FPR : CCALL_NRET_FPR) - -/* Check against constants in lj_ctype.h. */ -LJ_STATIC_ASSERT(CCALL_NUM_GPR <= CCALL_MAX_GPR); -LJ_STATIC_ASSERT(CCALL_NUM_FPR <= CCALL_MAX_FPR); - -#define CCALL_MAXSTACK 32 - -/* -- C call state -------------------------------------------------------- */ - -typedef LJ_ALIGN(CCALL_ALIGN_CALLSTATE) struct CCallState { - void (*func)(void); /* Pointer to called function. */ - uint32_t spadj; /* Stack pointer adjustment. */ - uint8_t nsp; /* Number of stack slots. */ - uint8_t retref; /* Return value by reference. */ -#if LJ_TARGET_X64 - uint8_t ngpr; /* Number of arguments in GPRs. */ - uint8_t nfpr; /* Number of arguments in FPRs. */ -#elif LJ_TARGET_X86 - uint8_t resx87; /* Result on x87 stack: 1:float, 2:double. */ -#elif LJ_TARGET_ARM64 - void *retp; /* Aggregate return pointer in x8. */ -#elif LJ_TARGET_PPC - uint8_t nfpr; /* Number of arguments in FPRs. */ -#endif -#if LJ_32 - int32_t align1; -#endif -#if CCALL_NUM_FPR - FPRArg fpr[CCALL_NUM_FPR]; /* Arguments/results in FPRs. */ -#endif - GPRArg gpr[CCALL_NUM_GPR]; /* Arguments/results in GPRs. */ - GPRArg stack[CCALL_MAXSTACK]; /* Stack slots. */ -} CCallState; - -/* -- C call handling ----------------------------------------------------- */ - -/* Really belongs to lj_vm.h. */ -LJ_ASMF void LJ_FASTCALL lj_vm_ffi_call(CCallState *cc); - -LJ_FUNC CTypeID lj_ccall_ctid_vararg(CTState *cts, cTValue *o); -LJ_FUNC int lj_ccall_func(lua_State *L, GCcdata *cd); - -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ccallback.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ccallback.c deleted file mode 100644 index 065c329fa7e..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ccallback.c +++ /dev/null @@ -1,712 +0,0 @@ -/* -** FFI C callback handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#include "lj_obj.h" - -#if LJ_HASFFI - -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_tab.h" -#include "lj_state.h" -#include "lj_frame.h" -#include "lj_ctype.h" -#include "lj_cconv.h" -#include "lj_ccall.h" -#include "lj_ccallback.h" -#include "lj_target.h" -#include "lj_mcode.h" -#include "lj_trace.h" -#include "lj_vm.h" - -/* -- Target-specific handling of callback slots -------------------------- */ - -#define CALLBACK_MCODE_SIZE (LJ_PAGESIZE * LJ_NUM_CBPAGE) - -#if LJ_OS_NOJIT - -/* Callbacks disabled. */ -#define CALLBACK_SLOT2OFS(slot) (0*(slot)) -#define CALLBACK_OFS2SLOT(ofs) (0*(ofs)) -#define CALLBACK_MAX_SLOT 0 - -#elif LJ_TARGET_X86ORX64 - -#define CALLBACK_MCODE_HEAD (LJ_64 ? 8 : 0) -#define CALLBACK_MCODE_GROUP (-2+1+2+(LJ_GC64 ? 10 : 5)+(LJ_64 ? 6 : 5)) - -#define CALLBACK_SLOT2OFS(slot) \ - (CALLBACK_MCODE_HEAD + CALLBACK_MCODE_GROUP*((slot)/32) + 4*(slot)) - -static MSize CALLBACK_OFS2SLOT(MSize ofs) -{ - MSize group; - ofs -= CALLBACK_MCODE_HEAD; - group = ofs / (32*4 + CALLBACK_MCODE_GROUP); - return (ofs % (32*4 + CALLBACK_MCODE_GROUP))/4 + group*32; -} - -#define CALLBACK_MAX_SLOT \ - (((CALLBACK_MCODE_SIZE-CALLBACK_MCODE_HEAD)/(CALLBACK_MCODE_GROUP+4*32))*32) - -#elif LJ_TARGET_ARM - -#define CALLBACK_MCODE_HEAD 32 - -#elif LJ_TARGET_ARM64 - -#define CALLBACK_MCODE_HEAD 32 - -#elif LJ_TARGET_PPC - -#define CALLBACK_MCODE_HEAD 24 - -#elif LJ_TARGET_MIPS - -#define CALLBACK_MCODE_HEAD 24 - -#else - -/* Missing support for this architecture. */ -#define CALLBACK_SLOT2OFS(slot) (0*(slot)) -#define CALLBACK_OFS2SLOT(ofs) (0*(ofs)) -#define CALLBACK_MAX_SLOT 0 - -#endif - -#ifndef CALLBACK_SLOT2OFS -#define CALLBACK_SLOT2OFS(slot) (CALLBACK_MCODE_HEAD + 8*(slot)) -#define CALLBACK_OFS2SLOT(ofs) (((ofs)-CALLBACK_MCODE_HEAD)/8) -#define CALLBACK_MAX_SLOT (CALLBACK_OFS2SLOT(CALLBACK_MCODE_SIZE)) -#endif - -/* Convert callback slot number to callback function pointer. */ -static void *callback_slot2ptr(CTState *cts, MSize slot) -{ - return (uint8_t *)cts->cb.mcode + CALLBACK_SLOT2OFS(slot); -} - -/* Convert callback function pointer to slot number. */ -MSize lj_ccallback_ptr2slot(CTState *cts, void *p) -{ - uintptr_t ofs = (uintptr_t)((uint8_t *)p -(uint8_t *)cts->cb.mcode); - if (ofs < CALLBACK_MCODE_SIZE) { - MSize slot = CALLBACK_OFS2SLOT((MSize)ofs); - if (CALLBACK_SLOT2OFS(slot) == (MSize)ofs) - return slot; - } - return ~0u; /* Not a known callback function pointer. */ -} - -/* Initialize machine code for callback function pointers. */ -#if LJ_OS_NOJIT -/* Disabled callback support. */ -#define callback_mcode_init(g, p) UNUSED(p) -#elif LJ_TARGET_X86ORX64 -static void callback_mcode_init(global_State *g, uint8_t *page) -{ - uint8_t *p = page; - uint8_t *target = (uint8_t *)(void *)lj_vm_ffi_callback; - MSize slot; -#if LJ_64 - *(void **)p = target; p += 8; -#endif - for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) { - /* mov al, slot; jmp group */ - *p++ = XI_MOVrib | RID_EAX; *p++ = (uint8_t)slot; - if ((slot & 31) == 31 || slot == CALLBACK_MAX_SLOT-1) { - /* push ebp/rbp; mov ah, slot>>8; mov ebp, &g. */ - *p++ = XI_PUSH + RID_EBP; - *p++ = XI_MOVrib | (RID_EAX+4); *p++ = (uint8_t)(slot >> 8); -#if LJ_GC64 - *p++ = 0x48; *p++ = XI_MOVri | RID_EBP; - *(uint64_t *)p = (uint64_t)(g); p += 8; -#else - *p++ = XI_MOVri | RID_EBP; - *(int32_t *)p = i32ptr(g); p += 4; -#endif -#if LJ_64 - /* jmp [rip-pageofs] where lj_vm_ffi_callback is stored. */ - *p++ = XI_GROUP5; *p++ = XM_OFS0 + (XOg_JMP<<3) + RID_EBP; - *(int32_t *)p = (int32_t)(page-(p+4)); p += 4; -#else - /* jmp lj_vm_ffi_callback. */ - *p++ = XI_JMP; *(int32_t *)p = target-(p+4); p += 4; -#endif - } else { - *p++ = XI_JMPs; *p++ = (uint8_t)((2+2)*(31-(slot&31)) - 2); - } - } - lua_assert(p - page <= CALLBACK_MCODE_SIZE); -} -#elif LJ_TARGET_ARM -static void callback_mcode_init(global_State *g, uint32_t *page) -{ - uint32_t *p = page; - void *target = (void *)lj_vm_ffi_callback; - MSize slot; - /* This must match with the saveregs macro in buildvm_arm.dasc. */ - *p++ = ARMI_SUB|ARMF_D(RID_R12)|ARMF_N(RID_R12)|ARMF_M(RID_PC); - *p++ = ARMI_PUSH|ARMF_N(RID_SP)|RSET_RANGE(RID_R4,RID_R11+1)|RID2RSET(RID_LR); - *p++ = ARMI_SUB|ARMI_K12|ARMF_D(RID_R12)|ARMF_N(RID_R12)|CALLBACK_MCODE_HEAD; - *p++ = ARMI_STR|ARMI_LS_P|ARMI_LS_W|ARMF_D(RID_R12)|ARMF_N(RID_SP)|(CFRAME_SIZE-4*9); - *p++ = ARMI_LDR|ARMI_LS_P|ARMI_LS_U|ARMF_D(RID_R12)|ARMF_N(RID_PC); - *p++ = ARMI_LDR|ARMI_LS_P|ARMI_LS_U|ARMF_D(RID_PC)|ARMF_N(RID_PC); - *p++ = u32ptr(g); - *p++ = u32ptr(target); - for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) { - *p++ = ARMI_MOV|ARMF_D(RID_R12)|ARMF_M(RID_PC); - *p = ARMI_B | ((page-p-2) & 0x00ffffffu); - p++; - } - lua_assert(p - page <= CALLBACK_MCODE_SIZE); -} -#elif LJ_TARGET_ARM64 -static void callback_mcode_init(global_State *g, uint32_t *page) -{ - uint32_t *p = page; - void *target = (void *)lj_vm_ffi_callback; - MSize slot; - *p++ = A64I_LDRLx | A64F_D(RID_X11) | A64F_S19(4); - *p++ = A64I_LDRLx | A64F_D(RID_X10) | A64F_S19(5); - *p++ = A64I_BR | A64F_N(RID_X11); - *p++ = A64I_NOP; - ((void **)p)[0] = target; - ((void **)p)[1] = g; - p += 4; - for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) { - *p++ = A64I_MOVZw | A64F_D(RID_X9) | A64F_U16(slot); - *p = A64I_B | A64F_S26((page-p) & 0x03ffffffu); - p++; - } - lua_assert(p - page <= CALLBACK_MCODE_SIZE); -} -#elif LJ_TARGET_PPC -static void callback_mcode_init(global_State *g, uint32_t *page) -{ - uint32_t *p = page; - void *target = (void *)lj_vm_ffi_callback; - MSize slot; - *p++ = PPCI_LIS | PPCF_T(RID_TMP) | (u32ptr(target) >> 16); - *p++ = PPCI_LIS | PPCF_T(RID_R12) | (u32ptr(g) >> 16); - *p++ = PPCI_ORI | PPCF_A(RID_TMP)|PPCF_T(RID_TMP) | (u32ptr(target) & 0xffff); - *p++ = PPCI_ORI | PPCF_A(RID_R12)|PPCF_T(RID_R12) | (u32ptr(g) & 0xffff); - *p++ = PPCI_MTCTR | PPCF_T(RID_TMP); - *p++ = PPCI_BCTR; - for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) { - *p++ = PPCI_LI | PPCF_T(RID_R11) | slot; - *p = PPCI_B | (((page-p) & 0x00ffffffu) << 2); - p++; - } - lua_assert(p - page <= CALLBACK_MCODE_SIZE); -} -#elif LJ_TARGET_MIPS -static void callback_mcode_init(global_State *g, uint32_t *page) -{ - uint32_t *p = page; - void *target = (void *)lj_vm_ffi_callback; - MSize slot; - *p++ = MIPSI_SW | MIPSF_T(RID_R1)|MIPSF_S(RID_SP) | 0; - *p++ = MIPSI_LUI | MIPSF_T(RID_R3) | (u32ptr(target) >> 16); - *p++ = MIPSI_LUI | MIPSF_T(RID_R2) | (u32ptr(g) >> 16); - *p++ = MIPSI_ORI | MIPSF_T(RID_R3)|MIPSF_S(RID_R3) |(u32ptr(target)&0xffff); - *p++ = MIPSI_JR | MIPSF_S(RID_R3); - *p++ = MIPSI_ORI | MIPSF_T(RID_R2)|MIPSF_S(RID_R2) | (u32ptr(g)&0xffff); - for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) { - *p = MIPSI_B | ((page-p-1) & 0x0000ffffu); - p++; - *p++ = MIPSI_LI | MIPSF_T(RID_R1) | slot; - } - lua_assert(p - page <= CALLBACK_MCODE_SIZE); -} -#else -/* Missing support for this architecture. */ -#define callback_mcode_init(g, p) UNUSED(p) -#endif - -/* -- Machine code management --------------------------------------------- */ - -#if LJ_TARGET_WINDOWS - -#define WIN32_LEAN_AND_MEAN -#include - -#elif LJ_TARGET_POSIX - -#include -#ifndef MAP_ANONYMOUS -#define MAP_ANONYMOUS MAP_ANON -#endif - -#endif - -/* Allocate and initialize area for callback function pointers. */ -static void callback_mcode_new(CTState *cts) -{ - size_t sz = (size_t)CALLBACK_MCODE_SIZE; - void *p; - if (CALLBACK_MAX_SLOT == 0) - lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV); -#if LJ_TARGET_WINDOWS - p = VirtualAlloc(NULL, sz, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); - if (!p) - lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV); -#elif LJ_TARGET_POSIX - p = mmap(NULL, sz, (PROT_READ|PROT_WRITE), MAP_PRIVATE|MAP_ANONYMOUS, - -1, 0); - if (p == MAP_FAILED) - lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV); -#else - /* Fallback allocator. Fails if memory is not executable by default. */ - p = lj_mem_new(cts->L, sz); -#endif - cts->cb.mcode = p; - callback_mcode_init(cts->g, p); - lj_mcode_sync(p, (char *)p + sz); -#if LJ_TARGET_WINDOWS - { - DWORD oprot; - VirtualProtect(p, sz, PAGE_EXECUTE_READ, &oprot); - } -#elif LJ_TARGET_POSIX - mprotect(p, sz, (PROT_READ|PROT_EXEC)); -#endif -} - -/* Free area for callback function pointers. */ -void lj_ccallback_mcode_free(CTState *cts) -{ - size_t sz = (size_t)CALLBACK_MCODE_SIZE; - void *p = cts->cb.mcode; - if (p == NULL) return; -#if LJ_TARGET_WINDOWS - VirtualFree(p, 0, MEM_RELEASE); - UNUSED(sz); -#elif LJ_TARGET_POSIX - munmap(p, sz); -#else - lj_mem_free(cts->g, p, sz); -#endif -} - -/* -- C callback entry ---------------------------------------------------- */ - -/* Target-specific handling of register arguments. Similar to lj_ccall.c. */ -#if LJ_TARGET_X86 - -#define CALLBACK_HANDLE_REGARG \ - if (!isfp) { /* Only non-FP values may be passed in registers. */ \ - if (n > 1) { /* Anything > 32 bit is passed on the stack. */ \ - if (!LJ_ABI_WIN) ngpr = maxgpr; /* Prevent reordering. */ \ - } else if (ngpr + 1 <= maxgpr) { \ - sp = &cts->cb.gpr[ngpr]; \ - ngpr += n; \ - goto done; \ - } \ - } - -#elif LJ_TARGET_X64 && LJ_ABI_WIN - -/* Windows/x64 argument registers are strictly positional (use ngpr). */ -#define CALLBACK_HANDLE_REGARG \ - if (isfp) { \ - if (ngpr < maxgpr) { sp = &cts->cb.fpr[ngpr++]; UNUSED(nfpr); goto done; } \ - } else { \ - if (ngpr < maxgpr) { sp = &cts->cb.gpr[ngpr++]; goto done; } \ - } - -#elif LJ_TARGET_X64 - -#define CALLBACK_HANDLE_REGARG \ - if (isfp) { \ - if (nfpr + n <= CCALL_NARG_FPR) { \ - sp = &cts->cb.fpr[nfpr]; \ - nfpr += n; \ - goto done; \ - } \ - } else { \ - if (ngpr + n <= maxgpr) { \ - sp = &cts->cb.gpr[ngpr]; \ - ngpr += n; \ - goto done; \ - } \ - } - -#elif LJ_TARGET_ARM - -#if LJ_ABI_SOFTFP - -#define CALLBACK_HANDLE_REGARG_FP1 UNUSED(isfp); -#define CALLBACK_HANDLE_REGARG_FP2 - -#else - -#define CALLBACK_HANDLE_REGARG_FP1 \ - if (isfp) { \ - if (n == 1) { \ - if (fprodd) { \ - sp = &cts->cb.fpr[fprodd-1]; \ - fprodd = 0; \ - goto done; \ - } else if (nfpr + 1 <= CCALL_NARG_FPR) { \ - sp = &cts->cb.fpr[nfpr++]; \ - fprodd = nfpr; \ - goto done; \ - } \ - } else { \ - if (nfpr + 1 <= CCALL_NARG_FPR) { \ - sp = &cts->cb.fpr[nfpr++]; \ - goto done; \ - } \ - } \ - fprodd = 0; /* No reordering after the first FP value is on stack. */ \ - } else { - -#define CALLBACK_HANDLE_REGARG_FP2 } - -#endif - -#define CALLBACK_HANDLE_REGARG \ - CALLBACK_HANDLE_REGARG_FP1 \ - if (n > 1) ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \ - if (ngpr + n <= maxgpr) { \ - sp = &cts->cb.gpr[ngpr]; \ - ngpr += n; \ - goto done; \ - } CALLBACK_HANDLE_REGARG_FP2 - -#elif LJ_TARGET_ARM64 - -#define CALLBACK_HANDLE_REGARG \ - if (isfp) { \ - if (nfpr + n <= CCALL_NARG_FPR) { \ - sp = &cts->cb.fpr[nfpr]; \ - nfpr += n; \ - goto done; \ - } else { \ - nfpr = CCALL_NARG_FPR; /* Prevent reordering. */ \ - } \ - } else { \ - if (!LJ_TARGET_IOS && n > 1) \ - ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \ - if (ngpr + n <= maxgpr) { \ - sp = &cts->cb.gpr[ngpr]; \ - ngpr += n; \ - goto done; \ - } else { \ - ngpr = CCALL_NARG_GPR; /* Prevent reordering. */ \ - } \ - } - -#elif LJ_TARGET_PPC - -#define CALLBACK_HANDLE_REGARG \ - if (isfp) { \ - if (nfpr + 1 <= CCALL_NARG_FPR) { \ - sp = &cts->cb.fpr[nfpr++]; \ - cta = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */ \ - goto done; \ - } \ - } else { /* Try to pass argument in GPRs. */ \ - if (n > 1) { \ - lua_assert(ctype_isinteger(cta->info) && n == 2); /* int64_t. */ \ - ngpr = (ngpr + 1u) & ~1u; /* Align int64_t to regpair. */ \ - } \ - if (ngpr + n <= maxgpr) { \ - sp = &cts->cb.gpr[ngpr]; \ - ngpr += n; \ - goto done; \ - } \ - } - -#define CALLBACK_HANDLE_RET \ - if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \ - *(double *)dp = *(float *)dp; /* FPRs always hold doubles. */ - -#elif LJ_TARGET_MIPS - -#define CALLBACK_HANDLE_REGARG \ - if (isfp && nfpr < CCALL_NARG_FPR) { /* Try to pass argument in FPRs. */ \ - sp = (void *)((uint8_t *)&cts->cb.fpr[nfpr] + ((LJ_BE && n==1) ? 4 : 0)); \ - nfpr++; ngpr += n; \ - goto done; \ - } else { /* Try to pass argument in GPRs. */ \ - nfpr = CCALL_NARG_FPR; \ - if (n > 1) ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \ - if (ngpr + n <= maxgpr) { \ - sp = &cts->cb.gpr[ngpr]; \ - ngpr += n; \ - goto done; \ - } \ - } - -#define CALLBACK_HANDLE_RET \ - if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \ - ((float *)dp)[1] = *(float *)dp; - -#else -#error "Missing calling convention definitions for this architecture" -#endif - -/* Convert and push callback arguments to Lua stack. */ -static void callback_conv_args(CTState *cts, lua_State *L) -{ - TValue *o = L->top; - intptr_t *stack = cts->cb.stack; - MSize slot = cts->cb.slot; - CTypeID id = 0, rid, fid; - int gcsteps = 0; - CType *ct; - GCfunc *fn; - int fntp; - MSize ngpr = 0, nsp = 0, maxgpr = CCALL_NARG_GPR; -#if CCALL_NARG_FPR - MSize nfpr = 0; -#if LJ_TARGET_ARM - MSize fprodd = 0; -#endif -#endif - - if (slot < cts->cb.sizeid && (id = cts->cb.cbid[slot]) != 0) { - ct = ctype_get(cts, id); - rid = ctype_cid(ct->info); /* Return type. x86: +(spadj<<16). */ - fn = funcV(lj_tab_getint(cts->miscmap, (int32_t)slot)); - fntp = LJ_TFUNC; - } else { /* Must set up frame first, before throwing the error. */ - ct = NULL; - rid = 0; - fn = (GCfunc *)L; - fntp = LJ_TTHREAD; - } - /* Continuation returns from callback. */ - if (LJ_FR2) { - (o++)->u64 = LJ_CONT_FFI_CALLBACK; - (o++)->u64 = rid; - o++; - } else { - o->u32.lo = LJ_CONT_FFI_CALLBACK; - o->u32.hi = rid; - o++; - } - setframe_gc(o, obj2gco(fn), fntp); - setframe_ftsz(o, ((char *)(o+1) - (char *)L->base) + FRAME_CONT); - L->top = L->base = ++o; - if (!ct) - lj_err_caller(cts->L, LJ_ERR_FFI_BADCBACK); - if (isluafunc(fn)) - setcframe_pc(L->cframe, proto_bc(funcproto(fn))+1); - lj_state_checkstack(L, LUA_MINSTACK); /* May throw. */ - o = L->base; /* Might have been reallocated. */ - -#if LJ_TARGET_X86 - /* x86 has several different calling conventions. */ - switch (ctype_cconv(ct->info)) { - case CTCC_FASTCALL: maxgpr = 2; break; - case CTCC_THISCALL: maxgpr = 1; break; - default: maxgpr = 0; break; - } -#endif - - fid = ct->sib; - while (fid) { - CType *ctf = ctype_get(cts, fid); - if (!ctype_isattrib(ctf->info)) { - CType *cta; - void *sp; - CTSize sz; - int isfp; - MSize n; - lua_assert(ctype_isfield(ctf->info)); - cta = ctype_rawchild(cts, ctf); - isfp = ctype_isfp(cta->info); - sz = (cta->size + CTSIZE_PTR-1) & ~(CTSIZE_PTR-1); - n = sz / CTSIZE_PTR; /* Number of GPRs or stack slots needed. */ - - CALLBACK_HANDLE_REGARG /* Handle register arguments. */ - - /* Otherwise pass argument on stack. */ - if (CCALL_ALIGN_STACKARG && LJ_32 && sz == 8) - nsp = (nsp + 1) & ~1u; /* Align 64 bit argument on stack. */ - sp = &stack[nsp]; - nsp += n; - - done: - if (LJ_BE && cta->size < CTSIZE_PTR) - sp = (void *)((uint8_t *)sp + CTSIZE_PTR-cta->size); - gcsteps += lj_cconv_tv_ct(cts, cta, 0, o++, sp); - } - fid = ctf->sib; - } - L->top = o; -#if LJ_TARGET_X86 - /* Store stack adjustment for returns from non-cdecl callbacks. */ - if (ctype_cconv(ct->info) != CTCC_CDECL) { -#if LJ_FR2 - (L->base-3)->u64 |= (nsp << (16+2)); -#else - (L->base-2)->u32.hi |= (nsp << (16+2)); -#endif - } -#endif - while (gcsteps-- > 0) - lj_gc_check(L); -} - -/* Convert Lua object to callback result. */ -static void callback_conv_result(CTState *cts, lua_State *L, TValue *o) -{ -#if LJ_FR2 - CType *ctr = ctype_raw(cts, (uint16_t)(L->base-3)->u64); -#else - CType *ctr = ctype_raw(cts, (uint16_t)(L->base-2)->u32.hi); -#endif -#if LJ_TARGET_X86 - cts->cb.gpr[2] = 0; -#endif - if (!ctype_isvoid(ctr->info)) { - uint8_t *dp = (uint8_t *)&cts->cb.gpr[0]; -#if CCALL_NUM_FPR - if (ctype_isfp(ctr->info)) - dp = (uint8_t *)&cts->cb.fpr[0]; -#endif - lj_cconv_ct_tv(cts, ctr, dp, o, 0); -#ifdef CALLBACK_HANDLE_RET - CALLBACK_HANDLE_RET -#endif - /* Extend returned integers to (at least) 32 bits. */ - if (ctype_isinteger_or_bool(ctr->info) && ctr->size < 4) { - if (ctr->info & CTF_UNSIGNED) - *(uint32_t *)dp = ctr->size == 1 ? (uint32_t)*(uint8_t *)dp : - (uint32_t)*(uint16_t *)dp; - else - *(int32_t *)dp = ctr->size == 1 ? (int32_t)*(int8_t *)dp : - (int32_t)*(int16_t *)dp; - } -#if LJ_TARGET_X86 - if (ctype_isfp(ctr->info)) - cts->cb.gpr[2] = ctr->size == sizeof(float) ? 1 : 2; -#endif - } -} - -/* Enter callback. */ -lua_State * LJ_FASTCALL lj_ccallback_enter(CTState *cts, void *cf) -{ - lua_State *L = cts->L; - global_State *g = cts->g; - lua_assert(L != NULL); - if (tvref(g->jit_base)) { - setstrV(L, L->top++, lj_err_str(L, LJ_ERR_FFI_BADCBACK)); - if (g->panic) g->panic(L); - exit(EXIT_FAILURE); - } - lj_trace_abort(g); /* Never record across callback. */ - /* Setup C frame. */ - cframe_prev(cf) = L->cframe; - setcframe_L(cf, L); - cframe_errfunc(cf) = -1; - cframe_nres(cf) = 0; - L->cframe = cf; - callback_conv_args(cts, L); - return L; /* Now call the function on this stack. */ -} - -/* Leave callback. */ -void LJ_FASTCALL lj_ccallback_leave(CTState *cts, TValue *o) -{ - lua_State *L = cts->L; - GCfunc *fn; - TValue *obase = L->base; - L->base = L->top; /* Keep continuation frame for throwing errors. */ - if (o >= L->base) { - /* PC of RET* is lost. Point to last line for result conv. errors. */ - fn = curr_func(L); - if (isluafunc(fn)) { - GCproto *pt = funcproto(fn); - setcframe_pc(L->cframe, proto_bc(pt)+pt->sizebc+1); - } - } - callback_conv_result(cts, L, o); - /* Finally drop C frame and continuation frame. */ - L->top -= 2+2*LJ_FR2; - L->base = obase; - L->cframe = cframe_prev(L->cframe); - cts->cb.slot = 0; /* Blacklist C function that called the callback. */ -} - -/* -- C callback management ----------------------------------------------- */ - -/* Get an unused slot in the callback slot table. */ -static MSize callback_slot_new(CTState *cts, CType *ct) -{ - CTypeID id = ctype_typeid(cts, ct); - CTypeID1 *cbid = cts->cb.cbid; - MSize top; - for (top = cts->cb.topid; top < cts->cb.sizeid; top++) - if (LJ_LIKELY(cbid[top] == 0)) - goto found; -#if CALLBACK_MAX_SLOT - if (top >= CALLBACK_MAX_SLOT) -#endif - lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV); - if (!cts->cb.mcode) - callback_mcode_new(cts); - lj_mem_growvec(cts->L, cbid, cts->cb.sizeid, CALLBACK_MAX_SLOT, CTypeID1); - cts->cb.cbid = cbid; - memset(cbid+top, 0, (cts->cb.sizeid-top)*sizeof(CTypeID1)); -found: - cbid[top] = id; - cts->cb.topid = top+1; - return top; -} - -/* Check for function pointer and supported argument/result types. */ -static CType *callback_checkfunc(CTState *cts, CType *ct) -{ - int narg = 0; - if (!ctype_isptr(ct->info) || (LJ_64 && ct->size != CTSIZE_PTR)) - return NULL; - ct = ctype_rawchild(cts, ct); - if (ctype_isfunc(ct->info)) { - CType *ctr = ctype_rawchild(cts, ct); - CTypeID fid = ct->sib; - if (!(ctype_isvoid(ctr->info) || ctype_isenum(ctr->info) || - ctype_isptr(ctr->info) || (ctype_isnum(ctr->info) && ctr->size <= 8))) - return NULL; - if ((ct->info & CTF_VARARG)) - return NULL; - while (fid) { - CType *ctf = ctype_get(cts, fid); - if (!ctype_isattrib(ctf->info)) { - CType *cta; - lua_assert(ctype_isfield(ctf->info)); - cta = ctype_rawchild(cts, ctf); - if (!(ctype_isenum(cta->info) || ctype_isptr(cta->info) || - (ctype_isnum(cta->info) && cta->size <= 8)) || - ++narg >= LUA_MINSTACK-3) - return NULL; - } - fid = ctf->sib; - } - return ct; - } - return NULL; -} - -/* Create a new callback and return the callback function pointer. */ -void *lj_ccallback_new(CTState *cts, CType *ct, GCfunc *fn) -{ - ct = callback_checkfunc(cts, ct); - if (ct) { - MSize slot = callback_slot_new(cts, ct); - GCtab *t = cts->miscmap; - setfuncV(cts->L, lj_tab_setint(cts->L, t, (int32_t)slot), fn); - lj_gc_anybarriert(cts->L, t); - return callback_slot2ptr(cts, slot); - } - return NULL; /* Bad conversion. */ -} - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ccallback.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ccallback.h deleted file mode 100644 index 83dbe048b5b..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ccallback.h +++ /dev/null @@ -1,25 +0,0 @@ -/* -** FFI C callback handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_CCALLBACK_H -#define _LJ_CCALLBACK_H - -#include "lj_obj.h" -#include "lj_ctype.h" - -#if LJ_HASFFI - -/* Really belongs to lj_vm.h. */ -LJ_ASMF void lj_vm_ffi_callback(void); - -LJ_FUNC MSize lj_ccallback_ptr2slot(CTState *cts, void *p); -LJ_FUNCA lua_State * LJ_FASTCALL lj_ccallback_enter(CTState *cts, void *cf); -LJ_FUNCA void LJ_FASTCALL lj_ccallback_leave(CTState *cts, TValue *o); -LJ_FUNC void *lj_ccallback_new(CTState *cts, CType *ct, GCfunc *fn); -LJ_FUNC void lj_ccallback_mcode_free(CTState *cts); - -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cconv.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cconv.c deleted file mode 100644 index 8a270766fb9..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cconv.c +++ /dev/null @@ -1,752 +0,0 @@ -/* -** C type conversions. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#include "lj_obj.h" - -#if LJ_HASFFI - -#include "lj_err.h" -#include "lj_tab.h" -#include "lj_ctype.h" -#include "lj_cdata.h" -#include "lj_cconv.h" -#include "lj_ccallback.h" - -/* -- Conversion errors --------------------------------------------------- */ - -/* Bad conversion. */ -LJ_NORET static void cconv_err_conv(CTState *cts, CType *d, CType *s, - CTInfo flags) -{ - const char *dst = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, d), NULL)); - const char *src; - if ((flags & CCF_FROMTV)) - src = lj_obj_typename[1+(ctype_isnum(s->info) ? LUA_TNUMBER : - ctype_isarray(s->info) ? LUA_TSTRING : LUA_TNIL)]; - else - src = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, s), NULL)); - if (CCF_GETARG(flags)) - lj_err_argv(cts->L, CCF_GETARG(flags), LJ_ERR_FFI_BADCONV, src, dst); - else - lj_err_callerv(cts->L, LJ_ERR_FFI_BADCONV, src, dst); -} - -/* Bad conversion from TValue. */ -LJ_NORET static void cconv_err_convtv(CTState *cts, CType *d, TValue *o, - CTInfo flags) -{ - const char *dst = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, d), NULL)); - const char *src = lj_typename(o); - if (CCF_GETARG(flags)) - lj_err_argv(cts->L, CCF_GETARG(flags), LJ_ERR_FFI_BADCONV, src, dst); - else - lj_err_callerv(cts->L, LJ_ERR_FFI_BADCONV, src, dst); -} - -/* Initializer overflow. */ -LJ_NORET static void cconv_err_initov(CTState *cts, CType *d) -{ - const char *dst = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, d), NULL)); - lj_err_callerv(cts->L, LJ_ERR_FFI_INITOV, dst); -} - -/* -- C type compatibility checks ----------------------------------------- */ - -/* Get raw type and qualifiers for a child type. Resolves enums, too. */ -static CType *cconv_childqual(CTState *cts, CType *ct, CTInfo *qual) -{ - ct = ctype_child(cts, ct); - for (;;) { - if (ctype_isattrib(ct->info)) { - if (ctype_attrib(ct->info) == CTA_QUAL) *qual |= ct->size; - } else if (!ctype_isenum(ct->info)) { - break; - } - ct = ctype_child(cts, ct); - } - *qual |= (ct->info & CTF_QUAL); - return ct; -} - -/* Check for compatible types when converting to a pointer. -** Note: these checks are more relaxed than what C99 mandates. -*/ -int lj_cconv_compatptr(CTState *cts, CType *d, CType *s, CTInfo flags) -{ - if (!((flags & CCF_CAST) || d == s)) { - CTInfo dqual = 0, squal = 0; - d = cconv_childqual(cts, d, &dqual); - if (!ctype_isstruct(s->info)) - s = cconv_childqual(cts, s, &squal); - if ((flags & CCF_SAME)) { - if (dqual != squal) - return 0; /* Different qualifiers. */ - } else if (!(flags & CCF_IGNQUAL)) { - if ((dqual & squal) != squal) - return 0; /* Discarded qualifiers. */ - if (ctype_isvoid(d->info) || ctype_isvoid(s->info)) - return 1; /* Converting to/from void * is always ok. */ - } - if (ctype_type(d->info) != ctype_type(s->info) || - d->size != s->size) - return 0; /* Different type or different size. */ - if (ctype_isnum(d->info)) { - if (((d->info ^ s->info) & (CTF_BOOL|CTF_FP))) - return 0; /* Different numeric types. */ - } else if (ctype_ispointer(d->info)) { - /* Check child types for compatibility. */ - return lj_cconv_compatptr(cts, d, s, flags|CCF_SAME); - } else if (ctype_isstruct(d->info)) { - if (d != s) - return 0; /* Must be exact same type for struct/union. */ - } else if (ctype_isfunc(d->info)) { - /* NYI: structural equality of functions. */ - } - } - return 1; /* Types are compatible. */ -} - -/* -- C type to C type conversion ----------------------------------------- */ - -/* Convert C type to C type. Caveat: expects to get the raw CType! -** -** Note: This is only used by the interpreter and not optimized at all. -** The JIT compiler will do a much better job specializing for each case. -*/ -void lj_cconv_ct_ct(CTState *cts, CType *d, CType *s, - uint8_t *dp, uint8_t *sp, CTInfo flags) -{ - CTSize dsize = d->size, ssize = s->size; - CTInfo dinfo = d->info, sinfo = s->info; - void *tmpptr; - - lua_assert(!ctype_isenum(dinfo) && !ctype_isenum(sinfo)); - lua_assert(!ctype_isattrib(dinfo) && !ctype_isattrib(sinfo)); - - if (ctype_type(dinfo) > CT_MAYCONVERT || ctype_type(sinfo) > CT_MAYCONVERT) - goto err_conv; - - /* Some basic sanity checks. */ - lua_assert(!ctype_isnum(dinfo) || dsize > 0); - lua_assert(!ctype_isnum(sinfo) || ssize > 0); - lua_assert(!ctype_isbool(dinfo) || dsize == 1 || dsize == 4); - lua_assert(!ctype_isbool(sinfo) || ssize == 1 || ssize == 4); - lua_assert(!ctype_isinteger(dinfo) || (1u< ssize) { /* Zero-extend or sign-extend LSB. */ -#if LJ_LE - uint8_t fill = (!(sinfo & CTF_UNSIGNED) && (sp[ssize-1]&0x80)) ? 0xff : 0; - memcpy(dp, sp, ssize); - memset(dp + ssize, fill, dsize-ssize); -#else - uint8_t fill = (!(sinfo & CTF_UNSIGNED) && (sp[0]&0x80)) ? 0xff : 0; - memset(dp, fill, dsize-ssize); - memcpy(dp + (dsize-ssize), sp, ssize); -#endif - } else { /* Copy LSB. */ -#if LJ_LE - memcpy(dp, sp, dsize); -#else - memcpy(dp, sp + (ssize-dsize), dsize); -#endif - } - break; - case CCX(I, F): { - double n; /* Always convert via double. */ - conv_I_F: - /* Convert source to double. */ - if (ssize == sizeof(double)) n = *(double *)sp; - else if (ssize == sizeof(float)) n = (double)*(float *)sp; - else goto err_conv; /* NYI: long double. */ - /* Then convert double to integer. */ - /* The conversion must exactly match the semantics of JIT-compiled code! */ - if (dsize < 4 || (dsize == 4 && !(dinfo & CTF_UNSIGNED))) { - int32_t i = (int32_t)n; - if (dsize == 4) *(int32_t *)dp = i; - else if (dsize == 2) *(int16_t *)dp = (int16_t)i; - else *(int8_t *)dp = (int8_t)i; - } else if (dsize == 4) { - *(uint32_t *)dp = (uint32_t)n; - } else if (dsize == 8) { - if (!(dinfo & CTF_UNSIGNED)) - *(int64_t *)dp = (int64_t)n; - else - *(uint64_t *)dp = lj_num2u64(n); - } else { - goto err_conv; /* NYI: conversion to >64 bit integers. */ - } - break; - } - case CCX(I, C): - s = ctype_child(cts, s); - sinfo = s->info; - ssize = s->size; - goto conv_I_F; /* Just convert re. */ - case CCX(I, P): - if (!(flags & CCF_CAST)) goto err_conv; - sinfo = CTINFO(CT_NUM, CTF_UNSIGNED); - goto conv_I_I; - case CCX(I, A): - if (!(flags & CCF_CAST)) goto err_conv; - sinfo = CTINFO(CT_NUM, CTF_UNSIGNED); - ssize = CTSIZE_PTR; - tmpptr = sp; - sp = (uint8_t *)&tmpptr; - goto conv_I_I; - - /* Destination is a floating-point number. */ - case CCX(F, B): - case CCX(F, I): { - double n; /* Always convert via double. */ - conv_F_I: - /* First convert source to double. */ - /* The conversion must exactly match the semantics of JIT-compiled code! */ - if (ssize < 4 || (ssize == 4 && !(sinfo & CTF_UNSIGNED))) { - int32_t i; - if (ssize == 4) { - i = *(int32_t *)sp; - } else if (!(sinfo & CTF_UNSIGNED)) { - if (ssize == 2) i = *(int16_t *)sp; - else i = *(int8_t *)sp; - } else { - if (ssize == 2) i = *(uint16_t *)sp; - else i = *(uint8_t *)sp; - } - n = (double)i; - } else if (ssize == 4) { - n = (double)*(uint32_t *)sp; - } else if (ssize == 8) { - if (!(sinfo & CTF_UNSIGNED)) n = (double)*(int64_t *)sp; - else n = (double)*(uint64_t *)sp; - } else { - goto err_conv; /* NYI: conversion from >64 bit integers. */ - } - /* Convert double to destination. */ - if (dsize == sizeof(double)) *(double *)dp = n; - else if (dsize == sizeof(float)) *(float *)dp = (float)n; - else goto err_conv; /* NYI: long double. */ - break; - } - case CCX(F, F): { - double n; /* Always convert via double. */ - conv_F_F: - if (ssize == dsize) goto copyval; - /* Convert source to double. */ - if (ssize == sizeof(double)) n = *(double *)sp; - else if (ssize == sizeof(float)) n = (double)*(float *)sp; - else goto err_conv; /* NYI: long double. */ - /* Convert double to destination. */ - if (dsize == sizeof(double)) *(double *)dp = n; - else if (dsize == sizeof(float)) *(float *)dp = (float)n; - else goto err_conv; /* NYI: long double. */ - break; - } - case CCX(F, C): - s = ctype_child(cts, s); - sinfo = s->info; - ssize = s->size; - goto conv_F_F; /* Ignore im, and convert from re. */ - - /* Destination is a complex number. */ - case CCX(C, I): - d = ctype_child(cts, d); - dinfo = d->info; - dsize = d->size; - memset(dp + dsize, 0, dsize); /* Clear im. */ - goto conv_F_I; /* Convert to re. */ - case CCX(C, F): - d = ctype_child(cts, d); - dinfo = d->info; - dsize = d->size; - memset(dp + dsize, 0, dsize); /* Clear im. */ - goto conv_F_F; /* Convert to re. */ - - case CCX(C, C): - if (dsize != ssize) { /* Different types: convert re/im separately. */ - CType *dc = ctype_child(cts, d); - CType *sc = ctype_child(cts, s); - lj_cconv_ct_ct(cts, dc, sc, dp, sp, flags); - lj_cconv_ct_ct(cts, dc, sc, dp + dc->size, sp + sc->size, flags); - return; - } - goto copyval; /* Otherwise this is easy. */ - - /* Destination is a vector. */ - case CCX(V, I): - case CCX(V, F): - case CCX(V, C): { - CType *dc = ctype_child(cts, d); - CTSize esize; - /* First convert the scalar to the first element. */ - lj_cconv_ct_ct(cts, dc, s, dp, sp, flags); - /* Then replicate it to the other elements (splat). */ - for (sp = dp, esize = dc->size; dsize > esize; dsize -= esize) { - dp += esize; - memcpy(dp, sp, esize); - } - break; - } - - case CCX(V, V): - /* Copy same-sized vectors, even for different lengths/element-types. */ - if (dsize != ssize) goto err_conv; - goto copyval; - - /* Destination is a pointer. */ - case CCX(P, I): - if (!(flags & CCF_CAST)) goto err_conv; - dinfo = CTINFO(CT_NUM, CTF_UNSIGNED); - goto conv_I_I; - - case CCX(P, F): - if (!(flags & CCF_CAST) || !(flags & CCF_FROMTV)) goto err_conv; - /* The signed conversion is cheaper. x64 really has 47 bit pointers. */ - dinfo = CTINFO(CT_NUM, (LJ_64 && dsize == 8) ? 0 : CTF_UNSIGNED); - goto conv_I_F; - - case CCX(P, P): - if (!lj_cconv_compatptr(cts, d, s, flags)) goto err_conv; - cdata_setptr(dp, dsize, cdata_getptr(sp, ssize)); - break; - - case CCX(P, A): - case CCX(P, S): - if (!lj_cconv_compatptr(cts, d, s, flags)) goto err_conv; - cdata_setptr(dp, dsize, sp); - break; - - /* Destination is an array. */ - case CCX(A, A): - if ((flags & CCF_CAST) || (d->info & CTF_VLA) || dsize != ssize || - d->size == CTSIZE_INVALID || !lj_cconv_compatptr(cts, d, s, flags)) - goto err_conv; - goto copyval; - - /* Destination is a struct/union. */ - case CCX(S, S): - if ((flags & CCF_CAST) || (d->info & CTF_VLA) || d != s) - goto err_conv; /* Must be exact same type. */ -copyval: /* Copy value. */ - lua_assert(dsize == ssize); - memcpy(dp, sp, dsize); - break; - - default: - err_conv: - cconv_err_conv(cts, d, s, flags); - } -} - -/* -- C type to TValue conversion ----------------------------------------- */ - -/* Convert C type to TValue. Caveat: expects to get the raw CType! */ -int lj_cconv_tv_ct(CTState *cts, CType *s, CTypeID sid, - TValue *o, uint8_t *sp) -{ - CTInfo sinfo = s->info; - if (ctype_isnum(sinfo)) { - if (!ctype_isbool(sinfo)) { - if (ctype_isinteger(sinfo) && s->size > 4) goto copyval; - if (LJ_DUALNUM && ctype_isinteger(sinfo)) { - int32_t i; - lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT32), s, - (uint8_t *)&i, sp, 0); - if ((sinfo & CTF_UNSIGNED) && i < 0) - setnumV(o, (lua_Number)(uint32_t)i); - else - setintV(o, i); - } else { - lj_cconv_ct_ct(cts, ctype_get(cts, CTID_DOUBLE), s, - (uint8_t *)&o->n, sp, 0); - /* Numbers are NOT canonicalized here! Beware of uninitialized data. */ - lua_assert(tvisnum(o)); - } - } else { - uint32_t b = s->size == 1 ? (*sp != 0) : (*(int *)sp != 0); - setboolV(o, b); - setboolV(&cts->g->tmptv2, b); /* Remember for trace recorder. */ - } - return 0; - } else if (ctype_isrefarray(sinfo) || ctype_isstruct(sinfo)) { - /* Create reference. */ - setcdataV(cts->L, o, lj_cdata_newref(cts, sp, sid)); - return 1; /* Need GC step. */ - } else { - GCcdata *cd; - CTSize sz; - copyval: /* Copy value. */ - sz = s->size; - lua_assert(sz != CTSIZE_INVALID); - /* Attributes are stripped, qualifiers are kept (but mostly ignored). */ - cd = lj_cdata_new(cts, ctype_typeid(cts, s), sz); - setcdataV(cts->L, o, cd); - memcpy(cdataptr(cd), sp, sz); - return 1; /* Need GC step. */ - } -} - -/* Convert bitfield to TValue. */ -int lj_cconv_tv_bf(CTState *cts, CType *s, TValue *o, uint8_t *sp) -{ - CTInfo info = s->info; - CTSize pos, bsz; - uint32_t val; - lua_assert(ctype_isbitfield(info)); - /* NYI: packed bitfields may cause misaligned reads. */ - switch (ctype_bitcsz(info)) { - case 4: val = *(uint32_t *)sp; break; - case 2: val = *(uint16_t *)sp; break; - case 1: val = *(uint8_t *)sp; break; - default: lua_assert(0); val = 0; break; - } - /* Check if a packed bitfield crosses a container boundary. */ - pos = ctype_bitpos(info); - bsz = ctype_bitbsz(info); - lua_assert(pos < 8*ctype_bitcsz(info)); - lua_assert(bsz > 0 && bsz <= 8*ctype_bitcsz(info)); - if (pos + bsz > 8*ctype_bitcsz(info)) - lj_err_caller(cts->L, LJ_ERR_FFI_NYIPACKBIT); - if (!(info & CTF_BOOL)) { - CTSize shift = 32 - bsz; - if (!(info & CTF_UNSIGNED)) { - setintV(o, (int32_t)(val << (shift-pos)) >> shift); - } else { - val = (val << (shift-pos)) >> shift; - if (!LJ_DUALNUM || (int32_t)val < 0) - setnumV(o, (lua_Number)(uint32_t)val); - else - setintV(o, (int32_t)val); - } - } else { - lua_assert(bsz == 1); - setboolV(o, (val >> pos) & 1); - } - return 0; /* No GC step needed. */ -} - -/* -- TValue to C type conversion ----------------------------------------- */ - -/* Convert table to array. */ -static void cconv_array_tab(CTState *cts, CType *d, - uint8_t *dp, GCtab *t, CTInfo flags) -{ - int32_t i; - CType *dc = ctype_rawchild(cts, d); /* Array element type. */ - CTSize size = d->size, esize = dc->size, ofs = 0; - for (i = 0; ; i++) { - TValue *tv = (TValue *)lj_tab_getint(t, i); - if (!tv || tvisnil(tv)) { - if (i == 0) continue; /* Try again for 1-based tables. */ - break; /* Stop at first nil. */ - } - if (ofs >= size) - cconv_err_initov(cts, d); - lj_cconv_ct_tv(cts, dc, dp + ofs, tv, flags); - ofs += esize; - } - if (size != CTSIZE_INVALID) { /* Only fill up arrays with known size. */ - if (ofs == esize) { /* Replicate a single element. */ - for (; ofs < size; ofs += esize) memcpy(dp + ofs, dp, esize); - } else { /* Otherwise fill the remainder with zero. */ - memset(dp + ofs, 0, size - ofs); - } - } -} - -/* Convert table to sub-struct/union. */ -static void cconv_substruct_tab(CTState *cts, CType *d, uint8_t *dp, - GCtab *t, int32_t *ip, CTInfo flags) -{ - CTypeID id = d->sib; - while (id) { - CType *df = ctype_get(cts, id); - id = df->sib; - if (ctype_isfield(df->info) || ctype_isbitfield(df->info)) { - TValue *tv; - int32_t i = *ip, iz = i; - if (!gcref(df->name)) continue; /* Ignore unnamed fields. */ - if (i >= 0) { - retry: - tv = (TValue *)lj_tab_getint(t, i); - if (!tv || tvisnil(tv)) { - if (i == 0) { i = 1; goto retry; } /* 1-based tables. */ - if (iz == 0) { *ip = i = -1; goto tryname; } /* Init named fields. */ - break; /* Stop at first nil. */ - } - *ip = i + 1; - } else { - tryname: - tv = (TValue *)lj_tab_getstr(t, gco2str(gcref(df->name))); - if (!tv || tvisnil(tv)) continue; - } - if (ctype_isfield(df->info)) - lj_cconv_ct_tv(cts, ctype_rawchild(cts, df), dp+df->size, tv, flags); - else - lj_cconv_bf_tv(cts, df, dp+df->size, tv); - if ((d->info & CTF_UNION)) break; - } else if (ctype_isxattrib(df->info, CTA_SUBTYPE)) { - cconv_substruct_tab(cts, ctype_rawchild(cts, df), - dp+df->size, t, ip, flags); - } /* Ignore all other entries in the chain. */ - } -} - -/* Convert table to struct/union. */ -static void cconv_struct_tab(CTState *cts, CType *d, - uint8_t *dp, GCtab *t, CTInfo flags) -{ - int32_t i = 0; - memset(dp, 0, d->size); /* Much simpler to clear the struct first. */ - cconv_substruct_tab(cts, d, dp, t, &i, flags); -} - -/* Convert TValue to C type. Caveat: expects to get the raw CType! */ -void lj_cconv_ct_tv(CTState *cts, CType *d, - uint8_t *dp, TValue *o, CTInfo flags) -{ - CTypeID sid = CTID_P_VOID; - CType *s; - void *tmpptr; - uint8_t tmpbool, *sp = (uint8_t *)&tmpptr; - if (LJ_LIKELY(tvisint(o))) { - sp = (uint8_t *)&o->i; - sid = CTID_INT32; - flags |= CCF_FROMTV; - } else if (LJ_LIKELY(tvisnum(o))) { - sp = (uint8_t *)&o->n; - sid = CTID_DOUBLE; - flags |= CCF_FROMTV; - } else if (tviscdata(o)) { - sp = cdataptr(cdataV(o)); - sid = cdataV(o)->ctypeid; - s = ctype_get(cts, sid); - if (ctype_isref(s->info)) { /* Resolve reference for value. */ - lua_assert(s->size == CTSIZE_PTR); - sp = *(void **)sp; - sid = ctype_cid(s->info); - } - s = ctype_raw(cts, sid); - if (ctype_isfunc(s->info)) { - sid = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|sid), CTSIZE_PTR); - } else { - if (ctype_isenum(s->info)) s = ctype_child(cts, s); - goto doconv; - } - } else if (tvisstr(o)) { - GCstr *str = strV(o); - if (ctype_isenum(d->info)) { /* Match string against enum constant. */ - CTSize ofs; - CType *cct = lj_ctype_getfield(cts, d, str, &ofs); - if (!cct || !ctype_isconstval(cct->info)) - goto err_conv; - lua_assert(d->size == 4); - sp = (uint8_t *)&cct->size; - sid = ctype_cid(cct->info); - } else if (ctype_isrefarray(d->info)) { /* Copy string to array. */ - CType *dc = ctype_rawchild(cts, d); - CTSize sz = str->len+1; - if (!ctype_isinteger(dc->info) || dc->size != 1) - goto err_conv; - if (d->size != 0 && d->size < sz) - sz = d->size; - memcpy(dp, strdata(str), sz); - return; - } else { /* Otherwise pass it as a const char[]. */ - sp = (uint8_t *)strdata(str); - sid = CTID_A_CCHAR; - flags |= CCF_FROMTV; - } - } else if (tvistab(o)) { - if (ctype_isarray(d->info)) { - cconv_array_tab(cts, d, dp, tabV(o), flags); - return; - } else if (ctype_isstruct(d->info)) { - cconv_struct_tab(cts, d, dp, tabV(o), flags); - return; - } else { - goto err_conv; - } - } else if (tvisbool(o)) { - tmpbool = boolV(o); - sp = &tmpbool; - sid = CTID_BOOL; - } else if (tvisnil(o)) { - tmpptr = (void *)0; - flags |= CCF_FROMTV; - } else if (tvisudata(o)) { - GCudata *ud = udataV(o); - tmpptr = uddata(ud); - if (ud->udtype == UDTYPE_IO_FILE) - tmpptr = *(void **)tmpptr; - } else if (tvislightud(o)) { - tmpptr = lightudV(o); - } else if (tvisfunc(o)) { - void *p = lj_ccallback_new(cts, d, funcV(o)); - if (p) { - *(void **)dp = p; - return; - } - goto err_conv; - } else { - err_conv: - cconv_err_convtv(cts, d, o, flags); - } - s = ctype_get(cts, sid); -doconv: - if (ctype_isenum(d->info)) d = ctype_child(cts, d); - lj_cconv_ct_ct(cts, d, s, dp, sp, flags); -} - -/* Convert TValue to bitfield. */ -void lj_cconv_bf_tv(CTState *cts, CType *d, uint8_t *dp, TValue *o) -{ - CTInfo info = d->info; - CTSize pos, bsz; - uint32_t val, mask; - lua_assert(ctype_isbitfield(info)); - if ((info & CTF_BOOL)) { - uint8_t tmpbool; - lua_assert(ctype_bitbsz(info) == 1); - lj_cconv_ct_tv(cts, ctype_get(cts, CTID_BOOL), &tmpbool, o, 0); - val = tmpbool; - } else { - CTypeID did = (info & CTF_UNSIGNED) ? CTID_UINT32 : CTID_INT32; - lj_cconv_ct_tv(cts, ctype_get(cts, did), (uint8_t *)&val, o, 0); - } - pos = ctype_bitpos(info); - bsz = ctype_bitbsz(info); - lua_assert(pos < 8*ctype_bitcsz(info)); - lua_assert(bsz > 0 && bsz <= 8*ctype_bitcsz(info)); - /* Check if a packed bitfield crosses a container boundary. */ - if (pos + bsz > 8*ctype_bitcsz(info)) - lj_err_caller(cts->L, LJ_ERR_FFI_NYIPACKBIT); - mask = ((1u << bsz) - 1u) << pos; - val = (val << pos) & mask; - /* NYI: packed bitfields may cause misaligned reads/writes. */ - switch (ctype_bitcsz(info)) { - case 4: *(uint32_t *)dp = (*(uint32_t *)dp & ~mask) | (uint32_t)val; break; - case 2: *(uint16_t *)dp = (*(uint16_t *)dp & ~mask) | (uint16_t)val; break; - case 1: *(uint8_t *)dp = (*(uint8_t *)dp & ~mask) | (uint8_t)val; break; - default: lua_assert(0); break; - } -} - -/* -- Initialize C type with TValues -------------------------------------- */ - -/* Initialize an array with TValues. */ -static void cconv_array_init(CTState *cts, CType *d, CTSize sz, uint8_t *dp, - TValue *o, MSize len) -{ - CType *dc = ctype_rawchild(cts, d); /* Array element type. */ - CTSize ofs, esize = dc->size; - MSize i; - if (len*esize > sz) - cconv_err_initov(cts, d); - for (i = 0, ofs = 0; i < len; i++, ofs += esize) - lj_cconv_ct_tv(cts, dc, dp + ofs, o + i, 0); - if (ofs == esize) { /* Replicate a single element. */ - for (; ofs < sz; ofs += esize) memcpy(dp + ofs, dp, esize); - } else { /* Otherwise fill the remainder with zero. */ - memset(dp + ofs, 0, sz - ofs); - } -} - -/* Initialize a sub-struct/union with TValues. */ -static void cconv_substruct_init(CTState *cts, CType *d, uint8_t *dp, - TValue *o, MSize len, MSize *ip) -{ - CTypeID id = d->sib; - while (id) { - CType *df = ctype_get(cts, id); - id = df->sib; - if (ctype_isfield(df->info) || ctype_isbitfield(df->info)) { - MSize i = *ip; - if (!gcref(df->name)) continue; /* Ignore unnamed fields. */ - if (i >= len) break; - *ip = i + 1; - if (ctype_isfield(df->info)) - lj_cconv_ct_tv(cts, ctype_rawchild(cts, df), dp+df->size, o + i, 0); - else - lj_cconv_bf_tv(cts, df, dp+df->size, o + i); - if ((d->info & CTF_UNION)) break; - } else if (ctype_isxattrib(df->info, CTA_SUBTYPE)) { - cconv_substruct_init(cts, ctype_rawchild(cts, df), - dp+df->size, o, len, ip); - if ((d->info & CTF_UNION)) break; - } /* Ignore all other entries in the chain. */ - } -} - -/* Initialize a struct/union with TValues. */ -static void cconv_struct_init(CTState *cts, CType *d, CTSize sz, uint8_t *dp, - TValue *o, MSize len) -{ - MSize i = 0; - memset(dp, 0, sz); /* Much simpler to clear the struct first. */ - cconv_substruct_init(cts, d, dp, o, len, &i); - if (i < len) - cconv_err_initov(cts, d); -} - -/* Check whether to use a multi-value initializer. -** This is true if an aggregate is to be initialized with a value. -** Valarrays are treated as values here so ct_tv handles (V|C, I|F). -*/ -int lj_cconv_multi_init(CTState *cts, CType *d, TValue *o) -{ - if (!(ctype_isrefarray(d->info) || ctype_isstruct(d->info))) - return 0; /* Destination is not an aggregate. */ - if (tvistab(o) || (tvisstr(o) && !ctype_isstruct(d->info))) - return 0; /* Initializer is not a value. */ - if (tviscdata(o) && lj_ctype_rawref(cts, cdataV(o)->ctypeid) == d) - return 0; /* Source and destination are identical aggregates. */ - return 1; /* Otherwise the initializer is a value. */ -} - -/* Initialize C type with TValues. Caveat: expects to get the raw CType! */ -void lj_cconv_ct_init(CTState *cts, CType *d, CTSize sz, - uint8_t *dp, TValue *o, MSize len) -{ - if (len == 0) - memset(dp, 0, sz); - else if (len == 1 && !lj_cconv_multi_init(cts, d, o)) - lj_cconv_ct_tv(cts, d, dp, o, 0); - else if (ctype_isarray(d->info)) /* Also handles valarray init with len>1. */ - cconv_array_init(cts, d, sz, dp, o, len); - else if (ctype_isstruct(d->info)) - cconv_struct_init(cts, d, sz, dp, o, len); - else - cconv_err_initov(cts, d); -} - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cconv.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cconv.h deleted file mode 100644 index 2bd50ff46e9..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cconv.h +++ /dev/null @@ -1,70 +0,0 @@ -/* -** C type conversions. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_CCONV_H -#define _LJ_CCONV_H - -#include "lj_obj.h" -#include "lj_ctype.h" - -#if LJ_HASFFI - -/* Compressed C type index. ORDER CCX. */ -enum { - CCX_B, /* Bool. */ - CCX_I, /* Integer. */ - CCX_F, /* Floating-point number. */ - CCX_C, /* Complex. */ - CCX_V, /* Vector. */ - CCX_P, /* Pointer. */ - CCX_A, /* Refarray. */ - CCX_S /* Struct/union. */ -}; - -/* Convert C type info to compressed C type index. ORDER CT. ORDER CCX. */ -static LJ_AINLINE uint32_t cconv_idx(CTInfo info) -{ - uint32_t idx = ((info >> 26) & 15u); /* Dispatch bits. */ - lua_assert(ctype_type(info) <= CT_MAYCONVERT); -#if LJ_64 - idx = ((uint32_t)(U64x(f436fff5,fff7f021) >> 4*idx) & 15u); -#else - idx = (((idx < 8 ? 0xfff7f021u : 0xf436fff5) >> 4*(idx & 7u)) & 15u); -#endif - lua_assert(idx < 8); - return idx; -} - -#define cconv_idx2(dinfo, sinfo) \ - ((cconv_idx((dinfo)) << 3) + cconv_idx((sinfo))) - -#define CCX(dst, src) ((CCX_##dst << 3) + CCX_##src) - -/* Conversion flags. */ -#define CCF_CAST 0x00000001u -#define CCF_FROMTV 0x00000002u -#define CCF_SAME 0x00000004u -#define CCF_IGNQUAL 0x00000008u - -#define CCF_ARG_SHIFT 8 -#define CCF_ARG(n) ((n) << CCF_ARG_SHIFT) -#define CCF_GETARG(f) ((f) >> CCF_ARG_SHIFT) - -LJ_FUNC int lj_cconv_compatptr(CTState *cts, CType *d, CType *s, CTInfo flags); -LJ_FUNC void lj_cconv_ct_ct(CTState *cts, CType *d, CType *s, - uint8_t *dp, uint8_t *sp, CTInfo flags); -LJ_FUNC int lj_cconv_tv_ct(CTState *cts, CType *s, CTypeID sid, - TValue *o, uint8_t *sp); -LJ_FUNC int lj_cconv_tv_bf(CTState *cts, CType *s, TValue *o, uint8_t *sp); -LJ_FUNC void lj_cconv_ct_tv(CTState *cts, CType *d, - uint8_t *dp, TValue *o, CTInfo flags); -LJ_FUNC void lj_cconv_bf_tv(CTState *cts, CType *d, uint8_t *dp, TValue *o); -LJ_FUNC int lj_cconv_multi_init(CTState *cts, CType *d, TValue *o); -LJ_FUNC void lj_cconv_ct_init(CTState *cts, CType *d, CTSize sz, - uint8_t *dp, TValue *o, MSize len); - -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cdata.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cdata.c deleted file mode 100644 index 5cd2c1140ef..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cdata.c +++ /dev/null @@ -1,288 +0,0 @@ -/* -** C data management. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#include "lj_obj.h" - -#if LJ_HASFFI - -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_tab.h" -#include "lj_ctype.h" -#include "lj_cconv.h" -#include "lj_cdata.h" - -/* -- C data allocation --------------------------------------------------- */ - -/* Allocate a new C data object holding a reference to another object. */ -GCcdata *lj_cdata_newref(CTState *cts, const void *p, CTypeID id) -{ - CTypeID refid = lj_ctype_intern(cts, CTINFO_REF(id), CTSIZE_PTR); - GCcdata *cd = lj_cdata_new(cts, refid, CTSIZE_PTR); - *(const void **)cdataptr(cd) = p; - return cd; -} - -/* Allocate variable-sized or specially aligned C data object. */ -GCcdata *lj_cdata_newv(lua_State *L, CTypeID id, CTSize sz, CTSize align) -{ - global_State *g; - MSize extra = sizeof(GCcdataVar) + sizeof(GCcdata) + - (align > CT_MEMALIGN ? (1u<offset = (uint16_t)((char *)cd - p); - cdatav(cd)->extra = extra; - cdatav(cd)->len = sz; - g = G(L); - setgcrefr(cd->nextgc, g->gc.root); - setgcref(g->gc.root, obj2gco(cd)); - newwhite(g, obj2gco(cd)); - cd->marked |= 0x80; - cd->gct = ~LJ_TCDATA; - cd->ctypeid = id; - return cd; -} - -/* Free a C data object. */ -void LJ_FASTCALL lj_cdata_free(global_State *g, GCcdata *cd) -{ - if (LJ_UNLIKELY(cd->marked & LJ_GC_CDATA_FIN)) { - GCobj *root; - makewhite(g, obj2gco(cd)); - markfinalized(obj2gco(cd)); - if ((root = gcref(g->gc.mmudata)) != NULL) { - setgcrefr(cd->nextgc, root->gch.nextgc); - setgcref(root->gch.nextgc, obj2gco(cd)); - setgcref(g->gc.mmudata, obj2gco(cd)); - } else { - setgcref(cd->nextgc, obj2gco(cd)); - setgcref(g->gc.mmudata, obj2gco(cd)); - } - } else if (LJ_LIKELY(!cdataisv(cd))) { - CType *ct = ctype_raw(ctype_ctsG(g), cd->ctypeid); - CTSize sz = ctype_hassize(ct->info) ? ct->size : CTSIZE_PTR; - lua_assert(ctype_hassize(ct->info) || ctype_isfunc(ct->info) || - ctype_isextern(ct->info)); - lj_mem_free(g, cd, sizeof(GCcdata) + sz); - } else { - lj_mem_free(g, memcdatav(cd), sizecdatav(cd)); - } -} - -void lj_cdata_setfin(lua_State *L, GCcdata *cd, GCobj *obj, uint32_t it) -{ - GCtab *t = ctype_ctsG(G(L))->finalizer; - if (gcref(t->metatable)) { - /* Add cdata to finalizer table, if still enabled. */ - TValue *tv, tmp; - setcdataV(L, &tmp, cd); - lj_gc_anybarriert(L, t); - tv = lj_tab_set(L, t, &tmp); - setgcV(L, tv, obj, it); - if (!tvisnil(tv)) - cd->marked |= LJ_GC_CDATA_FIN; - else - cd->marked &= ~LJ_GC_CDATA_FIN; - } -} - -/* -- C data indexing ----------------------------------------------------- */ - -/* Index C data by a TValue. Return CType and pointer. */ -CType *lj_cdata_index(CTState *cts, GCcdata *cd, cTValue *key, uint8_t **pp, - CTInfo *qual) -{ - uint8_t *p = (uint8_t *)cdataptr(cd); - CType *ct = ctype_get(cts, cd->ctypeid); - ptrdiff_t idx; - - /* Resolve reference for cdata object. */ - if (ctype_isref(ct->info)) { - lua_assert(ct->size == CTSIZE_PTR); - p = *(uint8_t **)p; - ct = ctype_child(cts, ct); - } - -collect_attrib: - /* Skip attributes and collect qualifiers. */ - while (ctype_isattrib(ct->info)) { - if (ctype_attrib(ct->info) == CTA_QUAL) *qual |= ct->size; - ct = ctype_child(cts, ct); - } - lua_assert(!ctype_isref(ct->info)); /* Interning rejects refs to refs. */ - - if (tvisint(key)) { - idx = (ptrdiff_t)intV(key); - goto integer_key; - } else if (tvisnum(key)) { /* Numeric key. */ -#ifdef _MSC_VER - /* Workaround for MSVC bug. */ - volatile -#endif - lua_Number n = numV(key); - idx = LJ_64 ? (ptrdiff_t)n : (ptrdiff_t)lj_num2int(n); - integer_key: - if (ctype_ispointer(ct->info)) { - CTSize sz = lj_ctype_size(cts, ctype_cid(ct->info)); /* Element size. */ - if (sz == CTSIZE_INVALID) - lj_err_caller(cts->L, LJ_ERR_FFI_INVSIZE); - if (ctype_isptr(ct->info)) { - p = (uint8_t *)cdata_getptr(p, ct->size); - } else if ((ct->info & (CTF_VECTOR|CTF_COMPLEX))) { - if ((ct->info & CTF_COMPLEX)) idx &= 1; - *qual |= CTF_CONST; /* Valarray elements are constant. */ - } - *pp = p + idx*(int32_t)sz; - return ct; - } - } else if (tviscdata(key)) { /* Integer cdata key. */ - GCcdata *cdk = cdataV(key); - CType *ctk = ctype_raw(cts, cdk->ctypeid); - if (ctype_isenum(ctk->info)) ctk = ctype_child(cts, ctk); - if (ctype_isinteger(ctk->info)) { - lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT_PSZ), ctk, - (uint8_t *)&idx, cdataptr(cdk), 0); - goto integer_key; - } - } else if (tvisstr(key)) { /* String key. */ - GCstr *name = strV(key); - if (ctype_isstruct(ct->info)) { - CTSize ofs; - CType *fct = lj_ctype_getfieldq(cts, ct, name, &ofs, qual); - if (fct) { - *pp = p + ofs; - return fct; - } - } else if (ctype_iscomplex(ct->info)) { - if (name->len == 2) { - *qual |= CTF_CONST; /* Complex fields are constant. */ - if (strdata(name)[0] == 'r' && strdata(name)[1] == 'e') { - *pp = p; - return ct; - } else if (strdata(name)[0] == 'i' && strdata(name)[1] == 'm') { - *pp = p + (ct->size >> 1); - return ct; - } - } - } else if (cd->ctypeid == CTID_CTYPEID) { - /* Allow indexing a (pointer to) struct constructor to get constants. */ - CType *sct = ctype_raw(cts, *(CTypeID *)p); - if (ctype_isptr(sct->info)) - sct = ctype_rawchild(cts, sct); - if (ctype_isstruct(sct->info)) { - CTSize ofs; - CType *fct = lj_ctype_getfield(cts, sct, name, &ofs); - if (fct && ctype_isconstval(fct->info)) - return fct; - } - ct = sct; /* Allow resolving metamethods for constructors, too. */ - } - } - if (ctype_isptr(ct->info)) { /* Automatically perform '->'. */ - if (ctype_isstruct(ctype_rawchild(cts, ct)->info)) { - p = (uint8_t *)cdata_getptr(p, ct->size); - ct = ctype_child(cts, ct); - goto collect_attrib; - } - } - *qual |= 1; /* Lookup failed. */ - return ct; /* But return the resolved raw type. */ -} - -/* -- C data getters ------------------------------------------------------ */ - -/* Get constant value and convert to TValue. */ -static void cdata_getconst(CTState *cts, TValue *o, CType *ct) -{ - CType *ctt = ctype_child(cts, ct); - lua_assert(ctype_isinteger(ctt->info) && ctt->size <= 4); - /* Constants are already zero-extended/sign-extended to 32 bits. */ - if ((ctt->info & CTF_UNSIGNED) && (int32_t)ct->size < 0) - setnumV(o, (lua_Number)(uint32_t)ct->size); - else - setintV(o, (int32_t)ct->size); -} - -/* Get C data value and convert to TValue. */ -int lj_cdata_get(CTState *cts, CType *s, TValue *o, uint8_t *sp) -{ - CTypeID sid; - - if (ctype_isconstval(s->info)) { - cdata_getconst(cts, o, s); - return 0; /* No GC step needed. */ - } else if (ctype_isbitfield(s->info)) { - return lj_cconv_tv_bf(cts, s, o, sp); - } - - /* Get child type of pointer/array/field. */ - lua_assert(ctype_ispointer(s->info) || ctype_isfield(s->info)); - sid = ctype_cid(s->info); - s = ctype_get(cts, sid); - - /* Resolve reference for field. */ - if (ctype_isref(s->info)) { - lua_assert(s->size == CTSIZE_PTR); - sp = *(uint8_t **)sp; - sid = ctype_cid(s->info); - s = ctype_get(cts, sid); - } - - /* Skip attributes. */ - while (ctype_isattrib(s->info)) - s = ctype_child(cts, s); - - return lj_cconv_tv_ct(cts, s, sid, o, sp); -} - -/* -- C data setters ------------------------------------------------------ */ - -/* Convert TValue and set C data value. */ -void lj_cdata_set(CTState *cts, CType *d, uint8_t *dp, TValue *o, CTInfo qual) -{ - if (ctype_isconstval(d->info)) { - goto err_const; - } else if (ctype_isbitfield(d->info)) { - if (((d->info|qual) & CTF_CONST)) goto err_const; - lj_cconv_bf_tv(cts, d, dp, o); - return; - } - - /* Get child type of pointer/array/field. */ - lua_assert(ctype_ispointer(d->info) || ctype_isfield(d->info)); - d = ctype_child(cts, d); - - /* Resolve reference for field. */ - if (ctype_isref(d->info)) { - lua_assert(d->size == CTSIZE_PTR); - dp = *(uint8_t **)dp; - d = ctype_child(cts, d); - } - - /* Skip attributes and collect qualifiers. */ - for (;;) { - if (ctype_isattrib(d->info)) { - if (ctype_attrib(d->info) == CTA_QUAL) qual |= d->size; - } else { - break; - } - d = ctype_child(cts, d); - } - - lua_assert(ctype_hassize(d->info) && !ctype_isvoid(d->info)); - - if (((d->info|qual) & CTF_CONST)) { - err_const: - lj_err_caller(cts->L, LJ_ERR_FFI_WRCONST); - } - - lj_cconv_ct_tv(cts, d, dp, o, 0); -} - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cdata.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cdata.h deleted file mode 100644 index c8975be1c9f..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cdata.h +++ /dev/null @@ -1,76 +0,0 @@ -/* -** C data management. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_CDATA_H -#define _LJ_CDATA_H - -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_ctype.h" - -#if LJ_HASFFI - -/* Get C data pointer. */ -static LJ_AINLINE void *cdata_getptr(void *p, CTSize sz) -{ - if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */ - return ((void *)(uintptr_t)*(uint32_t *)p); - } else { - lua_assert(sz == CTSIZE_PTR); - return *(void **)p; - } -} - -/* Set C data pointer. */ -static LJ_AINLINE void cdata_setptr(void *p, CTSize sz, const void *v) -{ - if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */ - *(uint32_t *)p = (uint32_t)(uintptr_t)v; - } else { - lua_assert(sz == CTSIZE_PTR); - *(void **)p = (void *)v; - } -} - -/* Allocate fixed-size C data object. */ -static LJ_AINLINE GCcdata *lj_cdata_new(CTState *cts, CTypeID id, CTSize sz) -{ - GCcdata *cd; -#ifdef LUA_USE_ASSERT - CType *ct = ctype_raw(cts, id); - lua_assert((ctype_hassize(ct->info) ? ct->size : CTSIZE_PTR) == sz); -#endif - cd = (GCcdata *)lj_mem_newgco(cts->L, sizeof(GCcdata) + sz); - cd->gct = ~LJ_TCDATA; - cd->ctypeid = ctype_check(cts, id); - return cd; -} - -/* Variant which works without a valid CTState. */ -static LJ_AINLINE GCcdata *lj_cdata_new_(lua_State *L, CTypeID id, CTSize sz) -{ - GCcdata *cd = (GCcdata *)lj_mem_newgco(L, sizeof(GCcdata) + sz); - cd->gct = ~LJ_TCDATA; - cd->ctypeid = id; - return cd; -} - -LJ_FUNC GCcdata *lj_cdata_newref(CTState *cts, const void *pp, CTypeID id); -LJ_FUNC GCcdata *lj_cdata_newv(lua_State *L, CTypeID id, CTSize sz, - CTSize align); - -LJ_FUNC void LJ_FASTCALL lj_cdata_free(global_State *g, GCcdata *cd); -LJ_FUNC void lj_cdata_setfin(lua_State *L, GCcdata *cd, GCobj *obj, - uint32_t it); - -LJ_FUNC CType *lj_cdata_index(CTState *cts, GCcdata *cd, cTValue *key, - uint8_t **pp, CTInfo *qual); -LJ_FUNC int lj_cdata_get(CTState *cts, CType *s, TValue *o, uint8_t *sp); -LJ_FUNC void lj_cdata_set(CTState *cts, CType *d, uint8_t *dp, TValue *o, - CTInfo qual); - -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_char.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_char.c deleted file mode 100644 index 11f23efe44d..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_char.c +++ /dev/null @@ -1,43 +0,0 @@ -/* -** Character types. -** Donated to the public domain. -** -** This is intended to replace the problematic libc single-byte NLS functions. -** These just don't make sense anymore with UTF-8 locales becoming the norm -** on POSIX systems. It never worked too well on Windows systems since hardly -** anyone bothered to call setlocale(). -** -** This table is hardcoded for ASCII. Identifiers include the characters -** 128-255, too. This allows for the use of all non-ASCII chars as identifiers -** in the lexer. This is a broad definition, but works well in practice -** for both UTF-8 locales and most single-byte locales (such as ISO-8859-*). -** -** If you really need proper character types for UTF-8 strings, please use -** an add-on library such as slnunicode: http://luaforge.net/projects/sln/ -*/ - -#define lj_char_c -#define LUA_CORE - -#include "lj_char.h" - -LJ_DATADEF const uint8_t lj_char_bits[257] = { - 0, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 152,152,152,152,152,152,152,152,152,152, 4, 4, 4, 4, 4, 4, - 4,176,176,176,176,176,176,160,160,160,160,160,160,160,160,160, - 160,160,160,160,160,160,160,160,160,160,160, 4, 4, 4, 4,132, - 4,208,208,208,208,208,208,192,192,192,192,192,192,192,192,192, - 192,192,192,192,192,192,192,192,192,192,192, 4, 4, 4, 4, 1, - 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128, - 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128, - 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128, - 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128, - 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128, - 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128, - 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128, - 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128 -}; - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_char.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_char.h deleted file mode 100644 index c3c86d34b1d..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_char.h +++ /dev/null @@ -1,42 +0,0 @@ -/* -** Character types. -** Donated to the public domain. -*/ - -#ifndef _LJ_CHAR_H -#define _LJ_CHAR_H - -#include "lj_def.h" - -#define LJ_CHAR_CNTRL 0x01 -#define LJ_CHAR_SPACE 0x02 -#define LJ_CHAR_PUNCT 0x04 -#define LJ_CHAR_DIGIT 0x08 -#define LJ_CHAR_XDIGIT 0x10 -#define LJ_CHAR_UPPER 0x20 -#define LJ_CHAR_LOWER 0x40 -#define LJ_CHAR_IDENT 0x80 -#define LJ_CHAR_ALPHA (LJ_CHAR_LOWER|LJ_CHAR_UPPER) -#define LJ_CHAR_ALNUM (LJ_CHAR_ALPHA|LJ_CHAR_DIGIT) -#define LJ_CHAR_GRAPH (LJ_CHAR_ALNUM|LJ_CHAR_PUNCT) - -/* Only pass -1 or 0..255 to these macros. Never pass a signed char! */ -#define lj_char_isa(c, t) ((lj_char_bits+1)[(c)] & t) -#define lj_char_iscntrl(c) lj_char_isa((c), LJ_CHAR_CNTRL) -#define lj_char_isspace(c) lj_char_isa((c), LJ_CHAR_SPACE) -#define lj_char_ispunct(c) lj_char_isa((c), LJ_CHAR_PUNCT) -#define lj_char_isdigit(c) lj_char_isa((c), LJ_CHAR_DIGIT) -#define lj_char_isxdigit(c) lj_char_isa((c), LJ_CHAR_XDIGIT) -#define lj_char_isupper(c) lj_char_isa((c), LJ_CHAR_UPPER) -#define lj_char_islower(c) lj_char_isa((c), LJ_CHAR_LOWER) -#define lj_char_isident(c) lj_char_isa((c), LJ_CHAR_IDENT) -#define lj_char_isalpha(c) lj_char_isa((c), LJ_CHAR_ALPHA) -#define lj_char_isalnum(c) lj_char_isa((c), LJ_CHAR_ALNUM) -#define lj_char_isgraph(c) lj_char_isa((c), LJ_CHAR_GRAPH) - -#define lj_char_toupper(c) ((c) - (lj_char_islower(c) >> 1)) -#define lj_char_tolower(c) ((c) + lj_char_isupper(c)) - -LJ_DATA const uint8_t lj_char_bits[257]; - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_clib.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_clib.c deleted file mode 100644 index 1e927ebe99e..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_clib.c +++ /dev/null @@ -1,418 +0,0 @@ -/* -** FFI C library loader. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#include "lj_obj.h" - -#if LJ_HASFFI - -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_tab.h" -#include "lj_str.h" -#include "lj_udata.h" -#include "lj_ctype.h" -#include "lj_cconv.h" -#include "lj_cdata.h" -#include "lj_clib.h" -#include "lj_strfmt.h" - -/* -- OS-specific functions ----------------------------------------------- */ - -#if LJ_TARGET_DLOPEN - -#include -#include - -#if defined(RTLD_DEFAULT) -#define CLIB_DEFHANDLE RTLD_DEFAULT -#elif LJ_TARGET_OSX || LJ_TARGET_BSD -#define CLIB_DEFHANDLE ((void *)(intptr_t)-2) -#else -#define CLIB_DEFHANDLE NULL -#endif - -LJ_NORET LJ_NOINLINE static void clib_error_(lua_State *L) -{ - lj_err_callermsg(L, dlerror()); -} - -#define clib_error(L, fmt, name) clib_error_(L) - -#if defined(__CYGWIN__) -#define CLIB_SOPREFIX "cyg" -#else -#define CLIB_SOPREFIX "lib" -#endif - -#if LJ_TARGET_OSX -#define CLIB_SOEXT "%s.dylib" -#elif defined(__CYGWIN__) -#define CLIB_SOEXT "%s.dll" -#else -#define CLIB_SOEXT "%s.so" -#endif - -static const char *clib_extname(lua_State *L, const char *name) -{ - if (!strchr(name, '/') -#ifdef __CYGWIN__ - && !strchr(name, '\\') -#endif - ) { - if (!strchr(name, '.')) { - name = lj_strfmt_pushf(L, CLIB_SOEXT, name); - L->top--; -#ifdef __CYGWIN__ - } else { - return name; -#endif - } - if (!(name[0] == CLIB_SOPREFIX[0] && name[1] == CLIB_SOPREFIX[1] && - name[2] == CLIB_SOPREFIX[2])) { - name = lj_strfmt_pushf(L, CLIB_SOPREFIX "%s", name); - L->top--; - } - } - return name; -} - -/* Check for a recognized ld script line. */ -static const char *clib_check_lds(lua_State *L, const char *buf) -{ - char *p, *e; - if ((!strncmp(buf, "GROUP", 5) || !strncmp(buf, "INPUT", 5)) && - (p = strchr(buf, '('))) { - while (*++p == ' ') ; - for (e = p; *e && *e != ' ' && *e != ')'; e++) ; - return strdata(lj_str_new(L, p, e-p)); - } - return NULL; -} - -/* Quick and dirty solution to resolve shared library name from ld script. */ -static const char *clib_resolve_lds(lua_State *L, const char *name) -{ - FILE *fp = fopen(name, "r"); - const char *p = NULL; - if (fp) { - char buf[256]; - if (fgets(buf, sizeof(buf), fp)) { - if (!strncmp(buf, "/* GNU ld script", 16)) { /* ld script magic? */ - while (fgets(buf, sizeof(buf), fp)) { /* Check all lines. */ - p = clib_check_lds(L, buf); - if (p) break; - } - } else { /* Otherwise check only the first line. */ - p = clib_check_lds(L, buf); - } - } - fclose(fp); - } - return p; -} - -static void *clib_loadlib(lua_State *L, const char *name, int global) -{ - void *h = dlopen(clib_extname(L, name), - RTLD_LAZY | (global?RTLD_GLOBAL:RTLD_LOCAL)); - if (!h) { - const char *e, *err = dlerror(); - if (*err == '/' && (e = strchr(err, ':')) && - (name = clib_resolve_lds(L, strdata(lj_str_new(L, err, e-err))))) { - h = dlopen(name, RTLD_LAZY | (global?RTLD_GLOBAL:RTLD_LOCAL)); - if (h) return h; - err = dlerror(); - } - lj_err_callermsg(L, err); - } - return h; -} - -static void clib_unloadlib(CLibrary *cl) -{ - if (cl->handle && cl->handle != CLIB_DEFHANDLE) - dlclose(cl->handle); -} - -static void *clib_getsym(CLibrary *cl, const char *name) -{ - void *p = dlsym(cl->handle, name); - return p; -} - -#elif LJ_TARGET_WINDOWS - -#define WIN32_LEAN_AND_MEAN -#include - -#ifndef GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS -#define GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS 4 -#define GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT 2 -BOOL WINAPI GetModuleHandleExA(DWORD, LPCSTR, HMODULE*); -#endif - -#define CLIB_DEFHANDLE ((void *)-1) - -/* Default libraries. */ -enum { - CLIB_HANDLE_EXE, - CLIB_HANDLE_DLL, - CLIB_HANDLE_CRT, - CLIB_HANDLE_KERNEL32, - CLIB_HANDLE_USER32, - CLIB_HANDLE_GDI32, - CLIB_HANDLE_MAX -}; - -static void *clib_def_handle[CLIB_HANDLE_MAX]; - -LJ_NORET LJ_NOINLINE static void clib_error(lua_State *L, const char *fmt, - const char *name) -{ - DWORD err = GetLastError(); -#if LJ_TARGET_XBOXONE - wchar_t wbuf[128]; - char buf[128*2]; - if (!FormatMessageW(FORMAT_MESSAGE_IGNORE_INSERTS|FORMAT_MESSAGE_FROM_SYSTEM, - NULL, err, 0, wbuf, sizeof(wbuf)/sizeof(wchar_t), NULL) || - !WideCharToMultiByte(CP_ACP, 0, wbuf, 128, buf, 128*2, NULL, NULL)) -#else - char buf[128]; - if (!FormatMessageA(FORMAT_MESSAGE_IGNORE_INSERTS|FORMAT_MESSAGE_FROM_SYSTEM, - NULL, err, 0, buf, sizeof(buf), NULL)) -#endif - buf[0] = '\0'; - lj_err_callermsg(L, lj_strfmt_pushf(L, fmt, name, buf)); -} - -static int clib_needext(const char *s) -{ - while (*s) { - if (*s == '/' || *s == '\\' || *s == '.') return 0; - s++; - } - return 1; -} - -static const char *clib_extname(lua_State *L, const char *name) -{ - if (clib_needext(name)) { - name = lj_strfmt_pushf(L, "%s.dll", name); - L->top--; - } - return name; -} - -static void *clib_loadlib(lua_State *L, const char *name, int global) -{ - DWORD oldwerr = GetLastError(); - void *h = (void *)LoadLibraryExA(clib_extname(L, name), NULL, 0); - if (!h) clib_error(L, "cannot load module " LUA_QS ": %s", name); - SetLastError(oldwerr); - UNUSED(global); - return h; -} - -static void clib_unloadlib(CLibrary *cl) -{ - if (cl->handle == CLIB_DEFHANDLE) { - MSize i; - for (i = CLIB_HANDLE_KERNEL32; i < CLIB_HANDLE_MAX; i++) { - void *h = clib_def_handle[i]; - if (h) { - clib_def_handle[i] = NULL; - FreeLibrary((HINSTANCE)h); - } - } - } else if (cl->handle) { - FreeLibrary((HINSTANCE)cl->handle); - } -} - -static void *clib_getsym(CLibrary *cl, const char *name) -{ - void *p = NULL; - if (cl->handle == CLIB_DEFHANDLE) { /* Search default libraries. */ - MSize i; - for (i = 0; i < CLIB_HANDLE_MAX; i++) { - HINSTANCE h = (HINSTANCE)clib_def_handle[i]; - if (!(void *)h) { /* Resolve default library handles (once). */ - switch (i) { - case CLIB_HANDLE_EXE: GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, NULL, &h); break; - case CLIB_HANDLE_DLL: - GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS|GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, - (const char *)clib_def_handle, &h); - break; - case CLIB_HANDLE_CRT: - GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS|GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, - (const char *)&_fmode, &h); - break; - case CLIB_HANDLE_KERNEL32: h = LoadLibraryExA("kernel32.dll", NULL, 0); break; - case CLIB_HANDLE_USER32: h = LoadLibraryExA("user32.dll", NULL, 0); break; - case CLIB_HANDLE_GDI32: h = LoadLibraryExA("gdi32.dll", NULL, 0); break; - } - if (!h) continue; - clib_def_handle[i] = (void *)h; - } - p = (void *)GetProcAddress(h, name); - if (p) break; - } - } else { - p = (void *)GetProcAddress((HINSTANCE)cl->handle, name); - } - return p; -} - -#else - -#define CLIB_DEFHANDLE NULL - -LJ_NORET LJ_NOINLINE static void clib_error(lua_State *L, const char *fmt, - const char *name) -{ - lj_err_callermsg(L, lj_strfmt_pushf(L, fmt, name, "no support for this OS")); -} - -static void *clib_loadlib(lua_State *L, const char *name, int global) -{ - lj_err_callermsg(L, "no support for loading dynamic libraries for this OS"); - UNUSED(name); UNUSED(global); - return NULL; -} - -static void clib_unloadlib(CLibrary *cl) -{ - UNUSED(cl); -} - -static void *clib_getsym(CLibrary *cl, const char *name) -{ - UNUSED(cl); UNUSED(name); - return NULL; -} - -#endif - -/* -- C library indexing -------------------------------------------------- */ - -#if LJ_TARGET_X86 && LJ_ABI_WIN -/* Compute argument size for fastcall/stdcall functions. */ -static CTSize clib_func_argsize(CTState *cts, CType *ct) -{ - CTSize n = 0; - while (ct->sib) { - CType *d; - ct = ctype_get(cts, ct->sib); - if (ctype_isfield(ct->info)) { - d = ctype_rawchild(cts, ct); - n += ((d->size + 3) & ~3); - } - } - return n; -} -#endif - -/* Get redirected or mangled external symbol. */ -static const char *clib_extsym(CTState *cts, CType *ct, GCstr *name) -{ - if (ct->sib) { - CType *ctf = ctype_get(cts, ct->sib); - if (ctype_isxattrib(ctf->info, CTA_REDIR)) - return strdata(gco2str(gcref(ctf->name))); - } - return strdata(name); -} - -/* Index a C library by name. */ -TValue *lj_clib_index(lua_State *L, CLibrary *cl, GCstr *name) -{ - TValue *tv = lj_tab_setstr(L, cl->cache, name); - if (LJ_UNLIKELY(tvisnil(tv))) { - CTState *cts = ctype_cts(L); - CType *ct; - CTypeID id = lj_ctype_getname(cts, &ct, name, CLNS_INDEX); - if (!id) - lj_err_callerv(L, LJ_ERR_FFI_NODECL, strdata(name)); - if (ctype_isconstval(ct->info)) { - CType *ctt = ctype_child(cts, ct); - lua_assert(ctype_isinteger(ctt->info) && ctt->size <= 4); - if ((ctt->info & CTF_UNSIGNED) && (int32_t)ct->size < 0) - setnumV(tv, (lua_Number)(uint32_t)ct->size); - else - setintV(tv, (int32_t)ct->size); - } else { - const char *sym = clib_extsym(cts, ct, name); -#if LJ_TARGET_WINDOWS - DWORD oldwerr = GetLastError(); -#endif - void *p = clib_getsym(cl, sym); - GCcdata *cd; - lua_assert(ctype_isfunc(ct->info) || ctype_isextern(ct->info)); -#if LJ_TARGET_X86 && LJ_ABI_WIN - /* Retry with decorated name for fastcall/stdcall functions. */ - if (!p && ctype_isfunc(ct->info)) { - CTInfo cconv = ctype_cconv(ct->info); - if (cconv == CTCC_FASTCALL || cconv == CTCC_STDCALL) { - CTSize sz = clib_func_argsize(cts, ct); - const char *symd = lj_strfmt_pushf(L, - cconv == CTCC_FASTCALL ? "@%s@%d" : "_%s@%d", - sym, sz); - L->top--; - p = clib_getsym(cl, symd); - } - } -#endif - if (!p) - clib_error(L, "cannot resolve symbol " LUA_QS ": %s", sym); -#if LJ_TARGET_WINDOWS - SetLastError(oldwerr); -#endif - cd = lj_cdata_new(cts, id, CTSIZE_PTR); - *(void **)cdataptr(cd) = p; - setcdataV(L, tv, cd); - } - } - return tv; -} - -/* -- C library management ------------------------------------------------ */ - -/* Create a new CLibrary object and push it on the stack. */ -static CLibrary *clib_new(lua_State *L, GCtab *mt) -{ - GCtab *t = lj_tab_new(L, 0, 0); - GCudata *ud = lj_udata_new(L, sizeof(CLibrary), t); - CLibrary *cl = (CLibrary *)uddata(ud); - cl->cache = t; - ud->udtype = UDTYPE_FFI_CLIB; - /* NOBARRIER: The GCudata is new (marked white). */ - setgcref(ud->metatable, obj2gco(mt)); - setudataV(L, L->top++, ud); - return cl; -} - -/* Load a C library. */ -void lj_clib_load(lua_State *L, GCtab *mt, GCstr *name, int global) -{ - void *handle = clib_loadlib(L, strdata(name), global); - CLibrary *cl = clib_new(L, mt); - cl->handle = handle; -} - -/* Unload a C library. */ -void lj_clib_unload(CLibrary *cl) -{ - clib_unloadlib(cl); - cl->handle = NULL; -} - -/* Create the default C library object. */ -void lj_clib_default(lua_State *L, GCtab *mt) -{ - CLibrary *cl = clib_new(L, mt); - cl->handle = CLIB_DEFHANDLE; -} - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_clib.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_clib.h deleted file mode 100644 index e5dc98e1cd7..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_clib.h +++ /dev/null @@ -1,29 +0,0 @@ -/* -** FFI C library loader. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_CLIB_H -#define _LJ_CLIB_H - -#include "lj_obj.h" - -#if LJ_HASFFI - -/* Namespace for C library indexing. */ -#define CLNS_INDEX ((1u<env. */ -} CLibrary; - -LJ_FUNC TValue *lj_clib_index(lua_State *L, CLibrary *cl, GCstr *name); -LJ_FUNC void lj_clib_load(lua_State *L, GCtab *mt, GCstr *name, int global); -LJ_FUNC void lj_clib_unload(CLibrary *cl); -LJ_FUNC void lj_clib_default(lua_State *L, GCtab *mt); - -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cparse.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cparse.c deleted file mode 100644 index f212bd36820..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cparse.c +++ /dev/null @@ -1,1862 +0,0 @@ -/* -** C declaration parser. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#include "lj_obj.h" - -#if LJ_HASFFI - -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_buf.h" -#include "lj_ctype.h" -#include "lj_cparse.h" -#include "lj_frame.h" -#include "lj_vm.h" -#include "lj_char.h" -#include "lj_strscan.h" -#include "lj_strfmt.h" - -/* -** Important note: this is NOT a validating C parser! This is a minimal -** C declaration parser, solely for use by the LuaJIT FFI. -** -** It ought to return correct results for properly formed C declarations, -** but it may accept some invalid declarations, too (and return nonsense). -** Also, it shows rather generic error messages to avoid unnecessary bloat. -** If in doubt, please check the input against your favorite C compiler. -*/ - -/* -- C lexer ------------------------------------------------------------- */ - -/* C lexer token names. */ -static const char *const ctoknames[] = { -#define CTOKSTR(name, str) str, -CTOKDEF(CTOKSTR) -#undef CTOKSTR - NULL -}; - -/* Forward declaration. */ -LJ_NORET static void cp_err(CPState *cp, ErrMsg em); - -static const char *cp_tok2str(CPState *cp, CPToken tok) -{ - lua_assert(tok < CTOK_FIRSTDECL); - if (tok > CTOK_OFS) - return ctoknames[tok-CTOK_OFS-1]; - else if (!lj_char_iscntrl(tok)) - return lj_strfmt_pushf(cp->L, "%c", tok); - else - return lj_strfmt_pushf(cp->L, "char(%d)", tok); -} - -/* End-of-line? */ -static LJ_AINLINE int cp_iseol(CPChar c) -{ - return (c == '\n' || c == '\r'); -} - -/* Peek next raw character. */ -static LJ_AINLINE CPChar cp_rawpeek(CPState *cp) -{ - return (CPChar)(uint8_t)(*cp->p); -} - -static LJ_NOINLINE CPChar cp_get_bs(CPState *cp); - -/* Get next character. */ -static LJ_AINLINE CPChar cp_get(CPState *cp) -{ - cp->c = (CPChar)(uint8_t)(*cp->p++); - if (LJ_LIKELY(cp->c != '\\')) return cp->c; - return cp_get_bs(cp); -} - -/* Transparently skip backslash-escaped line breaks. */ -static LJ_NOINLINE CPChar cp_get_bs(CPState *cp) -{ - CPChar c2, c = cp_rawpeek(cp); - if (!cp_iseol(c)) return cp->c; - cp->p++; - c2 = cp_rawpeek(cp); - if (cp_iseol(c2) && c2 != c) cp->p++; - cp->linenumber++; - return cp_get(cp); -} - -/* Save character in buffer. */ -static LJ_AINLINE void cp_save(CPState *cp, CPChar c) -{ - lj_buf_putb(&cp->sb, c); -} - -/* Skip line break. Handles "\n", "\r", "\r\n" or "\n\r". */ -static void cp_newline(CPState *cp) -{ - CPChar c = cp_rawpeek(cp); - if (cp_iseol(c) && c != cp->c) cp->p++; - cp->linenumber++; -} - -LJ_NORET static void cp_errmsg(CPState *cp, CPToken tok, ErrMsg em, ...) -{ - const char *msg, *tokstr; - lua_State *L; - va_list argp; - if (tok == 0) { - tokstr = NULL; - } else if (tok == CTOK_IDENT || tok == CTOK_INTEGER || tok == CTOK_STRING || - tok >= CTOK_FIRSTDECL) { - if (sbufP(&cp->sb) == sbufB(&cp->sb)) cp_save(cp, '$'); - cp_save(cp, '\0'); - tokstr = sbufB(&cp->sb); - } else { - tokstr = cp_tok2str(cp, tok); - } - L = cp->L; - va_start(argp, em); - msg = lj_strfmt_pushvf(L, err2msg(em), argp); - va_end(argp); - if (tokstr) - msg = lj_strfmt_pushf(L, err2msg(LJ_ERR_XNEAR), msg, tokstr); - if (cp->linenumber > 1) - msg = lj_strfmt_pushf(L, "%s at line %d", msg, cp->linenumber); - lj_err_callermsg(L, msg); -} - -LJ_NORET LJ_NOINLINE static void cp_err_token(CPState *cp, CPToken tok) -{ - cp_errmsg(cp, cp->tok, LJ_ERR_XTOKEN, cp_tok2str(cp, tok)); -} - -LJ_NORET LJ_NOINLINE static void cp_err_badidx(CPState *cp, CType *ct) -{ - GCstr *s = lj_ctype_repr(cp->cts->L, ctype_typeid(cp->cts, ct), NULL); - cp_errmsg(cp, 0, LJ_ERR_FFI_BADIDX, strdata(s)); -} - -LJ_NORET LJ_NOINLINE static void cp_err(CPState *cp, ErrMsg em) -{ - cp_errmsg(cp, 0, em); -} - -/* -- Main lexical scanner ------------------------------------------------ */ - -/* Parse number literal. Only handles int32_t/uint32_t right now. */ -static CPToken cp_number(CPState *cp) -{ - StrScanFmt fmt; - TValue o; - do { cp_save(cp, cp->c); } while (lj_char_isident(cp_get(cp))); - cp_save(cp, '\0'); - fmt = lj_strscan_scan((const uint8_t *)sbufB(&cp->sb), &o, STRSCAN_OPT_C); - if (fmt == STRSCAN_INT) cp->val.id = CTID_INT32; - else if (fmt == STRSCAN_U32) cp->val.id = CTID_UINT32; - else if (!(cp->mode & CPARSE_MODE_SKIP)) - cp_errmsg(cp, CTOK_INTEGER, LJ_ERR_XNUMBER); - cp->val.u32 = (uint32_t)o.i; - return CTOK_INTEGER; -} - -/* Parse identifier or keyword. */ -static CPToken cp_ident(CPState *cp) -{ - do { cp_save(cp, cp->c); } while (lj_char_isident(cp_get(cp))); - cp->str = lj_buf_str(cp->L, &cp->sb); - cp->val.id = lj_ctype_getname(cp->cts, &cp->ct, cp->str, cp->tmask); - if (ctype_type(cp->ct->info) == CT_KW) - return ctype_cid(cp->ct->info); - return CTOK_IDENT; -} - -/* Parse parameter. */ -static CPToken cp_param(CPState *cp) -{ - CPChar c = cp_get(cp); - TValue *o = cp->param; - if (lj_char_isident(c) || c == '$') /* Reserve $xyz for future extensions. */ - cp_errmsg(cp, c, LJ_ERR_XSYNTAX); - if (!o || o >= cp->L->top) - cp_err(cp, LJ_ERR_FFI_NUMPARAM); - cp->param = o+1; - if (tvisstr(o)) { - cp->str = strV(o); - cp->val.id = 0; - cp->ct = &cp->cts->tab[0]; - return CTOK_IDENT; - } else if (tvisnumber(o)) { - cp->val.i32 = numberVint(o); - cp->val.id = CTID_INT32; - return CTOK_INTEGER; - } else { - GCcdata *cd; - if (!tviscdata(o)) - lj_err_argtype(cp->L, (int)(o-cp->L->base)+1, "type parameter"); - cd = cdataV(o); - if (cd->ctypeid == CTID_CTYPEID) - cp->val.id = *(CTypeID *)cdataptr(cd); - else - cp->val.id = cd->ctypeid; - return '$'; - } -} - -/* Parse string or character constant. */ -static CPToken cp_string(CPState *cp) -{ - CPChar delim = cp->c; - cp_get(cp); - while (cp->c != delim) { - CPChar c = cp->c; - if (c == '\0') cp_errmsg(cp, CTOK_EOF, LJ_ERR_XSTR); - if (c == '\\') { - c = cp_get(cp); - switch (c) { - case '\0': cp_errmsg(cp, CTOK_EOF, LJ_ERR_XSTR); break; - case 'a': c = '\a'; break; - case 'b': c = '\b'; break; - case 'f': c = '\f'; break; - case 'n': c = '\n'; break; - case 'r': c = '\r'; break; - case 't': c = '\t'; break; - case 'v': c = '\v'; break; - case 'e': c = 27; break; - case 'x': - c = 0; - while (lj_char_isxdigit(cp_get(cp))) - c = (c<<4) + (lj_char_isdigit(cp->c) ? cp->c-'0' : (cp->c&15)+9); - cp_save(cp, (c & 0xff)); - continue; - default: - if (lj_char_isdigit(c)) { - c -= '0'; - if (lj_char_isdigit(cp_get(cp))) { - c = c*8 + (cp->c - '0'); - if (lj_char_isdigit(cp_get(cp))) { - c = c*8 + (cp->c - '0'); - cp_get(cp); - } - } - cp_save(cp, (c & 0xff)); - continue; - } - break; - } - } - cp_save(cp, c); - cp_get(cp); - } - cp_get(cp); - if (delim == '"') { - cp->str = lj_buf_str(cp->L, &cp->sb); - return CTOK_STRING; - } else { - if (sbuflen(&cp->sb) != 1) cp_err_token(cp, '\''); - cp->val.i32 = (int32_t)(char)*sbufB(&cp->sb); - cp->val.id = CTID_INT32; - return CTOK_INTEGER; - } -} - -/* Skip C comment. */ -static void cp_comment_c(CPState *cp) -{ - do { - if (cp_get(cp) == '*') { - do { - if (cp_get(cp) == '/') { cp_get(cp); return; } - } while (cp->c == '*'); - } - if (cp_iseol(cp->c)) cp_newline(cp); - } while (cp->c != '\0'); -} - -/* Skip C++ comment. */ -static void cp_comment_cpp(CPState *cp) -{ - while (!cp_iseol(cp_get(cp)) && cp->c != '\0') - ; -} - -/* Lexical scanner for C. Only a minimal subset is implemented. */ -static CPToken cp_next_(CPState *cp) -{ - lj_buf_reset(&cp->sb); - for (;;) { - if (lj_char_isident(cp->c)) - return lj_char_isdigit(cp->c) ? cp_number(cp) : cp_ident(cp); - switch (cp->c) { - case '\n': case '\r': cp_newline(cp); /* fallthrough. */ - case ' ': case '\t': case '\v': case '\f': cp_get(cp); break; - case '"': case '\'': return cp_string(cp); - case '/': - if (cp_get(cp) == '*') cp_comment_c(cp); - else if (cp->c == '/') cp_comment_cpp(cp); - else return '/'; - break; - case '|': - if (cp_get(cp) != '|') return '|'; cp_get(cp); return CTOK_OROR; - case '&': - if (cp_get(cp) != '&') return '&'; cp_get(cp); return CTOK_ANDAND; - case '=': - if (cp_get(cp) != '=') return '='; cp_get(cp); return CTOK_EQ; - case '!': - if (cp_get(cp) != '=') return '!'; cp_get(cp); return CTOK_NE; - case '<': - if (cp_get(cp) == '=') { cp_get(cp); return CTOK_LE; } - else if (cp->c == '<') { cp_get(cp); return CTOK_SHL; } - return '<'; - case '>': - if (cp_get(cp) == '=') { cp_get(cp); return CTOK_GE; } - else if (cp->c == '>') { cp_get(cp); return CTOK_SHR; } - return '>'; - case '-': - if (cp_get(cp) != '>') return '-'; cp_get(cp); return CTOK_DEREF; - case '$': - return cp_param(cp); - case '\0': return CTOK_EOF; - default: { CPToken c = cp->c; cp_get(cp); return c; } - } - } -} - -static LJ_NOINLINE CPToken cp_next(CPState *cp) -{ - return (cp->tok = cp_next_(cp)); -} - -/* -- C parser ------------------------------------------------------------ */ - -/* Namespaces for resolving identifiers. */ -#define CPNS_DEFAULT \ - ((1u<linenumber = 1; - cp->depth = 0; - cp->curpack = 0; - cp->packstack[0] = 255; - lj_buf_init(cp->L, &cp->sb); - lua_assert(cp->p != NULL); - cp_get(cp); /* Read-ahead first char. */ - cp->tok = 0; - cp->tmask = CPNS_DEFAULT; - cp_next(cp); /* Read-ahead first token. */ -} - -/* Cleanup C parser state. */ -static void cp_cleanup(CPState *cp) -{ - global_State *g = G(cp->L); - lj_buf_free(g, &cp->sb); -} - -/* Check and consume optional token. */ -static int cp_opt(CPState *cp, CPToken tok) -{ - if (cp->tok == tok) { cp_next(cp); return 1; } - return 0; -} - -/* Check and consume token. */ -static void cp_check(CPState *cp, CPToken tok) -{ - if (cp->tok != tok) cp_err_token(cp, tok); - cp_next(cp); -} - -/* Check if the next token may start a type declaration. */ -static int cp_istypedecl(CPState *cp) -{ - if (cp->tok >= CTOK_FIRSTDECL && cp->tok <= CTOK_LASTDECL) return 1; - if (cp->tok == CTOK_IDENT && ctype_istypedef(cp->ct->info)) return 1; - if (cp->tok == '$') return 1; - return 0; -} - -/* -- Constant expression evaluator --------------------------------------- */ - -/* Forward declarations. */ -static void cp_expr_unary(CPState *cp, CPValue *k); -static void cp_expr_sub(CPState *cp, CPValue *k, int pri); - -/* Please note that type handling is very weak here. Most ops simply -** assume integer operands. Accessors are only needed to compute types and -** return synthetic values. The only purpose of the expression evaluator -** is to compute the values of constant expressions one would typically -** find in C header files. And again: this is NOT a validating C parser! -*/ - -/* Parse comma separated expression and return last result. */ -static void cp_expr_comma(CPState *cp, CPValue *k) -{ - do { cp_expr_sub(cp, k, 0); } while (cp_opt(cp, ',')); -} - -/* Parse sizeof/alignof operator. */ -static void cp_expr_sizeof(CPState *cp, CPValue *k, int wantsz) -{ - CTSize sz; - CTInfo info; - if (cp_opt(cp, '(')) { - if (cp_istypedecl(cp)) - k->id = cp_decl_abstract(cp); - else - cp_expr_comma(cp, k); - cp_check(cp, ')'); - } else { - cp_expr_unary(cp, k); - } - info = lj_ctype_info(cp->cts, k->id, &sz); - if (wantsz) { - if (sz != CTSIZE_INVALID) - k->u32 = sz; - else if (k->id != CTID_A_CCHAR) /* Special case for sizeof("string"). */ - cp_err(cp, LJ_ERR_FFI_INVSIZE); - } else { - k->u32 = 1u << ctype_align(info); - } - k->id = CTID_UINT32; /* Really size_t. */ -} - -/* Parse prefix operators. */ -static void cp_expr_prefix(CPState *cp, CPValue *k) -{ - if (cp->tok == CTOK_INTEGER) { - *k = cp->val; cp_next(cp); - } else if (cp_opt(cp, '+')) { - cp_expr_unary(cp, k); /* Nothing to do (well, integer promotion). */ - } else if (cp_opt(cp, '-')) { - cp_expr_unary(cp, k); k->i32 = -k->i32; - } else if (cp_opt(cp, '~')) { - cp_expr_unary(cp, k); k->i32 = ~k->i32; - } else if (cp_opt(cp, '!')) { - cp_expr_unary(cp, k); k->i32 = !k->i32; k->id = CTID_INT32; - } else if (cp_opt(cp, '(')) { - if (cp_istypedecl(cp)) { /* Cast operator. */ - CTypeID id = cp_decl_abstract(cp); - cp_check(cp, ')'); - cp_expr_unary(cp, k); - k->id = id; /* No conversion performed. */ - } else { /* Sub-expression. */ - cp_expr_comma(cp, k); - cp_check(cp, ')'); - } - } else if (cp_opt(cp, '*')) { /* Indirection. */ - CType *ct; - cp_expr_unary(cp, k); - ct = lj_ctype_rawref(cp->cts, k->id); - if (!ctype_ispointer(ct->info)) - cp_err_badidx(cp, ct); - k->u32 = 0; k->id = ctype_cid(ct->info); - } else if (cp_opt(cp, '&')) { /* Address operator. */ - cp_expr_unary(cp, k); - k->id = lj_ctype_intern(cp->cts, CTINFO(CT_PTR, CTALIGN_PTR+k->id), - CTSIZE_PTR); - } else if (cp_opt(cp, CTOK_SIZEOF)) { - cp_expr_sizeof(cp, k, 1); - } else if (cp_opt(cp, CTOK_ALIGNOF)) { - cp_expr_sizeof(cp, k, 0); - } else if (cp->tok == CTOK_IDENT) { - if (ctype_type(cp->ct->info) == CT_CONSTVAL) { - k->u32 = cp->ct->size; k->id = ctype_cid(cp->ct->info); - } else if (ctype_type(cp->ct->info) == CT_EXTERN) { - k->u32 = cp->val.id; k->id = ctype_cid(cp->ct->info); - } else if (ctype_type(cp->ct->info) == CT_FUNC) { - k->u32 = cp->val.id; k->id = cp->val.id; - } else { - goto err_expr; - } - cp_next(cp); - } else if (cp->tok == CTOK_STRING) { - CTSize sz = cp->str->len; - while (cp_next(cp) == CTOK_STRING) - sz += cp->str->len; - k->u32 = sz + 1; - k->id = CTID_A_CCHAR; - } else { - err_expr: - cp_errmsg(cp, cp->tok, LJ_ERR_XSYMBOL); - } -} - -/* Parse postfix operators. */ -static void cp_expr_postfix(CPState *cp, CPValue *k) -{ - for (;;) { - CType *ct; - if (cp_opt(cp, '[')) { /* Array/pointer index. */ - CPValue k2; - cp_expr_comma(cp, &k2); - ct = lj_ctype_rawref(cp->cts, k->id); - if (!ctype_ispointer(ct->info)) { - ct = lj_ctype_rawref(cp->cts, k2.id); - if (!ctype_ispointer(ct->info)) - cp_err_badidx(cp, ct); - } - cp_check(cp, ']'); - k->u32 = 0; - } else if (cp->tok == '.' || cp->tok == CTOK_DEREF) { /* Struct deref. */ - CTSize ofs; - CType *fct; - ct = lj_ctype_rawref(cp->cts, k->id); - if (cp->tok == CTOK_DEREF) { - if (!ctype_ispointer(ct->info)) - cp_err_badidx(cp, ct); - ct = lj_ctype_rawref(cp->cts, ctype_cid(ct->info)); - } - cp_next(cp); - if (cp->tok != CTOK_IDENT) cp_err_token(cp, CTOK_IDENT); - if (!ctype_isstruct(ct->info) || ct->size == CTSIZE_INVALID || - !(fct = lj_ctype_getfield(cp->cts, ct, cp->str, &ofs)) || - ctype_isbitfield(fct->info)) { - GCstr *s = lj_ctype_repr(cp->cts->L, ctype_typeid(cp->cts, ct), NULL); - cp_errmsg(cp, 0, LJ_ERR_FFI_BADMEMBER, strdata(s), strdata(cp->str)); - } - ct = fct; - k->u32 = ctype_isconstval(ct->info) ? ct->size : 0; - cp_next(cp); - } else { - return; - } - k->id = ctype_cid(ct->info); - } -} - -/* Parse infix operators. */ -static void cp_expr_infix(CPState *cp, CPValue *k, int pri) -{ - CPValue k2; - k2.u32 = 0; k2.id = 0; /* Silence the compiler. */ - for (;;) { - switch (pri) { - case 0: - if (cp_opt(cp, '?')) { - CPValue k3; - cp_expr_comma(cp, &k2); /* Right-associative. */ - cp_check(cp, ':'); - cp_expr_sub(cp, &k3, 0); - k->u32 = k->u32 ? k2.u32 : k3.u32; - k->id = k2.id > k3.id ? k2.id : k3.id; - continue; - } - case 1: - if (cp_opt(cp, CTOK_OROR)) { - cp_expr_sub(cp, &k2, 2); k->i32 = k->u32 || k2.u32; k->id = CTID_INT32; - continue; - } - case 2: - if (cp_opt(cp, CTOK_ANDAND)) { - cp_expr_sub(cp, &k2, 3); k->i32 = k->u32 && k2.u32; k->id = CTID_INT32; - continue; - } - case 3: - if (cp_opt(cp, '|')) { - cp_expr_sub(cp, &k2, 4); k->u32 = k->u32 | k2.u32; goto arith_result; - } - case 4: - if (cp_opt(cp, '^')) { - cp_expr_sub(cp, &k2, 5); k->u32 = k->u32 ^ k2.u32; goto arith_result; - } - case 5: - if (cp_opt(cp, '&')) { - cp_expr_sub(cp, &k2, 6); k->u32 = k->u32 & k2.u32; goto arith_result; - } - case 6: - if (cp_opt(cp, CTOK_EQ)) { - cp_expr_sub(cp, &k2, 7); k->i32 = k->u32 == k2.u32; k->id = CTID_INT32; - continue; - } else if (cp_opt(cp, CTOK_NE)) { - cp_expr_sub(cp, &k2, 7); k->i32 = k->u32 != k2.u32; k->id = CTID_INT32; - continue; - } - case 7: - if (cp_opt(cp, '<')) { - cp_expr_sub(cp, &k2, 8); - if (k->id == CTID_INT32 && k2.id == CTID_INT32) - k->i32 = k->i32 < k2.i32; - else - k->i32 = k->u32 < k2.u32; - k->id = CTID_INT32; - continue; - } else if (cp_opt(cp, '>')) { - cp_expr_sub(cp, &k2, 8); - if (k->id == CTID_INT32 && k2.id == CTID_INT32) - k->i32 = k->i32 > k2.i32; - else - k->i32 = k->u32 > k2.u32; - k->id = CTID_INT32; - continue; - } else if (cp_opt(cp, CTOK_LE)) { - cp_expr_sub(cp, &k2, 8); - if (k->id == CTID_INT32 && k2.id == CTID_INT32) - k->i32 = k->i32 <= k2.i32; - else - k->i32 = k->u32 <= k2.u32; - k->id = CTID_INT32; - continue; - } else if (cp_opt(cp, CTOK_GE)) { - cp_expr_sub(cp, &k2, 8); - if (k->id == CTID_INT32 && k2.id == CTID_INT32) - k->i32 = k->i32 >= k2.i32; - else - k->i32 = k->u32 >= k2.u32; - k->id = CTID_INT32; - continue; - } - case 8: - if (cp_opt(cp, CTOK_SHL)) { - cp_expr_sub(cp, &k2, 9); k->u32 = k->u32 << k2.u32; - continue; - } else if (cp_opt(cp, CTOK_SHR)) { - cp_expr_sub(cp, &k2, 9); - if (k->id == CTID_INT32) - k->i32 = k->i32 >> k2.i32; - else - k->u32 = k->u32 >> k2.u32; - continue; - } - case 9: - if (cp_opt(cp, '+')) { - cp_expr_sub(cp, &k2, 10); k->u32 = k->u32 + k2.u32; - arith_result: - if (k2.id > k->id) k->id = k2.id; /* Trivial promotion to unsigned. */ - continue; - } else if (cp_opt(cp, '-')) { - cp_expr_sub(cp, &k2, 10); k->u32 = k->u32 - k2.u32; goto arith_result; - } - case 10: - if (cp_opt(cp, '*')) { - cp_expr_unary(cp, &k2); k->u32 = k->u32 * k2.u32; goto arith_result; - } else if (cp_opt(cp, '/')) { - cp_expr_unary(cp, &k2); - if (k2.id > k->id) k->id = k2.id; /* Trivial promotion to unsigned. */ - if (k2.u32 == 0 || - (k->id == CTID_INT32 && k->u32 == 0x80000000u && k2.i32 == -1)) - cp_err(cp, LJ_ERR_BADVAL); - if (k->id == CTID_INT32) - k->i32 = k->i32 / k2.i32; - else - k->u32 = k->u32 / k2.u32; - continue; - } else if (cp_opt(cp, '%')) { - cp_expr_unary(cp, &k2); - if (k2.id > k->id) k->id = k2.id; /* Trivial promotion to unsigned. */ - if (k2.u32 == 0 || - (k->id == CTID_INT32 && k->u32 == 0x80000000u && k2.i32 == -1)) - cp_err(cp, LJ_ERR_BADVAL); - if (k->id == CTID_INT32) - k->i32 = k->i32 % k2.i32; - else - k->u32 = k->u32 % k2.u32; - continue; - } - default: - return; - } - } -} - -/* Parse and evaluate unary expression. */ -static void cp_expr_unary(CPState *cp, CPValue *k) -{ - if (++cp->depth > CPARSE_MAX_DECLDEPTH) cp_err(cp, LJ_ERR_XLEVELS); - cp_expr_prefix(cp, k); - cp_expr_postfix(cp, k); - cp->depth--; -} - -/* Parse and evaluate sub-expression. */ -static void cp_expr_sub(CPState *cp, CPValue *k, int pri) -{ - cp_expr_unary(cp, k); - cp_expr_infix(cp, k, pri); -} - -/* Parse constant integer expression. */ -static void cp_expr_kint(CPState *cp, CPValue *k) -{ - CType *ct; - cp_expr_sub(cp, k, 0); - ct = ctype_raw(cp->cts, k->id); - if (!ctype_isinteger(ct->info)) cp_err(cp, LJ_ERR_BADVAL); -} - -/* Parse (non-negative) size expression. */ -static CTSize cp_expr_ksize(CPState *cp) -{ - CPValue k; - cp_expr_kint(cp, &k); - if (k.u32 >= 0x80000000u) cp_err(cp, LJ_ERR_FFI_INVSIZE); - return k.u32; -} - -/* -- Type declaration stack management ----------------------------------- */ - -/* Add declaration element behind the insertion position. */ -static CPDeclIdx cp_add(CPDecl *decl, CTInfo info, CTSize size) -{ - CPDeclIdx top = decl->top; - if (top >= CPARSE_MAX_DECLSTACK) cp_err(decl->cp, LJ_ERR_XLEVELS); - decl->stack[top].info = info; - decl->stack[top].size = size; - decl->stack[top].sib = 0; - setgcrefnull(decl->stack[top].name); - decl->stack[top].next = decl->stack[decl->pos].next; - decl->stack[decl->pos].next = (CTypeID1)top; - decl->top = top+1; - return top; -} - -/* Push declaration element before the insertion position. */ -static CPDeclIdx cp_push(CPDecl *decl, CTInfo info, CTSize size) -{ - return (decl->pos = cp_add(decl, info, size)); -} - -/* Push or merge attributes. */ -static void cp_push_attributes(CPDecl *decl) -{ - CType *ct = &decl->stack[decl->pos]; - if (ctype_isfunc(ct->info)) { /* Ok to modify in-place. */ -#if LJ_TARGET_X86 - if ((decl->fattr & CTFP_CCONV)) - ct->info = (ct->info & (CTMASK_NUM|CTF_VARARG|CTMASK_CID)) + - (decl->fattr & ~CTMASK_CID); -#endif - } else { - if ((decl->attr & CTFP_ALIGNED) && !(decl->mode & CPARSE_MODE_FIELD)) - cp_push(decl, CTINFO(CT_ATTRIB, CTATTRIB(CTA_ALIGN)), - ctype_align(decl->attr)); - } -} - -/* Push unrolled type to declaration stack and merge qualifiers. */ -static void cp_push_type(CPDecl *decl, CTypeID id) -{ - CType *ct = ctype_get(decl->cp->cts, id); - CTInfo info = ct->info; - CTSize size = ct->size; - switch (ctype_type(info)) { - case CT_STRUCT: case CT_ENUM: - cp_push(decl, CTINFO(CT_TYPEDEF, id), 0); /* Don't copy unique types. */ - if ((decl->attr & CTF_QUAL)) { /* Push unmerged qualifiers. */ - cp_push(decl, CTINFO(CT_ATTRIB, CTATTRIB(CTA_QUAL)), - (decl->attr & CTF_QUAL)); - decl->attr &= ~CTF_QUAL; - } - break; - case CT_ATTRIB: - if (ctype_isxattrib(info, CTA_QUAL)) - decl->attr &= ~size; /* Remove redundant qualifiers. */ - cp_push_type(decl, ctype_cid(info)); /* Unroll. */ - cp_push(decl, info & ~CTMASK_CID, size); /* Copy type. */ - break; - case CT_ARRAY: - if ((ct->info & (CTF_VECTOR|CTF_COMPLEX))) { - info |= (decl->attr & CTF_QUAL); - decl->attr &= ~CTF_QUAL; - } - cp_push_type(decl, ctype_cid(info)); /* Unroll. */ - cp_push(decl, info & ~CTMASK_CID, size); /* Copy type. */ - decl->stack[decl->pos].sib = 1; /* Mark as already checked and sized. */ - /* Note: this is not copied to the ct->sib in the C type table. */ - break; - case CT_FUNC: - /* Copy type, link parameters (shared). */ - decl->stack[cp_push(decl, info, size)].sib = ct->sib; - break; - default: - /* Copy type, merge common qualifiers. */ - cp_push(decl, info|(decl->attr & CTF_QUAL), size); - decl->attr &= ~CTF_QUAL; - break; - } -} - -/* Consume the declaration element chain and intern the C type. */ -static CTypeID cp_decl_intern(CPState *cp, CPDecl *decl) -{ - CTypeID id = 0; - CPDeclIdx idx = 0; - CTSize csize = CTSIZE_INVALID; - CTSize cinfo = 0; - do { - CType *ct = &decl->stack[idx]; - CTInfo info = ct->info; - CTInfo size = ct->size; - /* The cid is already part of info for copies of pointers/functions. */ - idx = ct->next; - if (ctype_istypedef(info)) { - lua_assert(id == 0); - id = ctype_cid(info); - /* Always refetch info/size, since struct/enum may have been completed. */ - cinfo = ctype_get(cp->cts, id)->info; - csize = ctype_get(cp->cts, id)->size; - lua_assert(ctype_isstruct(cinfo) || ctype_isenum(cinfo)); - } else if (ctype_isfunc(info)) { /* Intern function. */ - CType *fct; - CTypeID fid; - CTypeID sib; - if (id) { - CType *refct = ctype_raw(cp->cts, id); - /* Reject function or refarray return types. */ - if (ctype_isfunc(refct->info) || ctype_isrefarray(refct->info)) - cp_err(cp, LJ_ERR_FFI_INVTYPE); - } - /* No intervening attributes allowed, skip forward. */ - while (idx) { - CType *ctn = &decl->stack[idx]; - if (!ctype_isattrib(ctn->info)) break; - idx = ctn->next; /* Skip attribute. */ - } - sib = ct->sib; /* Next line may reallocate the C type table. */ - fid = lj_ctype_new(cp->cts, &fct); - csize = CTSIZE_INVALID; - fct->info = cinfo = info + id; - fct->size = size; - fct->sib = sib; - id = fid; - } else if (ctype_isattrib(info)) { - if (ctype_isxattrib(info, CTA_QUAL)) - cinfo |= size; - else if (ctype_isxattrib(info, CTA_ALIGN)) - CTF_INSERT(cinfo, ALIGN, size); - id = lj_ctype_intern(cp->cts, info+id, size); - /* Inherit csize/cinfo from original type. */ - } else { - if (ctype_isnum(info)) { /* Handle mode/vector-size attributes. */ - lua_assert(id == 0); - if (!(info & CTF_BOOL)) { - CTSize msize = ctype_msizeP(decl->attr); - CTSize vsize = ctype_vsizeP(decl->attr); - if (msize && (!(info & CTF_FP) || (msize == 4 || msize == 8))) { - CTSize malign = lj_fls(msize); - if (malign > 4) malign = 4; /* Limit alignment. */ - CTF_INSERT(info, ALIGN, malign); - size = msize; /* Override size via mode. */ - } - if (vsize) { /* Vector size set? */ - CTSize esize = lj_fls(size); - if (vsize >= esize) { - /* Intern the element type first. */ - id = lj_ctype_intern(cp->cts, info, size); - /* Then create a vector (array) with vsize alignment. */ - size = (1u << vsize); - if (vsize > 4) vsize = 4; /* Limit alignment. */ - if (ctype_align(info) > vsize) vsize = ctype_align(info); - info = CTINFO(CT_ARRAY, (info & CTF_QUAL) + CTF_VECTOR + - CTALIGN(vsize)); - } - } - } - } else if (ctype_isptr(info)) { - /* Reject pointer/ref to ref. */ - if (id && ctype_isref(ctype_raw(cp->cts, id)->info)) - cp_err(cp, LJ_ERR_FFI_INVTYPE); - if (ctype_isref(info)) { - info &= ~CTF_VOLATILE; /* Refs are always const, never volatile. */ - /* No intervening attributes allowed, skip forward. */ - while (idx) { - CType *ctn = &decl->stack[idx]; - if (!ctype_isattrib(ctn->info)) break; - idx = ctn->next; /* Skip attribute. */ - } - } - } else if (ctype_isarray(info)) { /* Check for valid array size etc. */ - if (ct->sib == 0) { /* Only check/size arrays not copied by unroll. */ - if (ctype_isref(cinfo)) /* Reject arrays of refs. */ - cp_err(cp, LJ_ERR_FFI_INVTYPE); - /* Reject VLS or unknown-sized types. */ - if (ctype_isvltype(cinfo) || csize == CTSIZE_INVALID) - cp_err(cp, LJ_ERR_FFI_INVSIZE); - /* a[] and a[?] keep their invalid size. */ - if (size != CTSIZE_INVALID) { - uint64_t xsz = (uint64_t)size * csize; - if (xsz >= 0x80000000u) cp_err(cp, LJ_ERR_FFI_INVSIZE); - size = (CTSize)xsz; - } - } - if ((cinfo & CTF_ALIGN) > (info & CTF_ALIGN)) /* Find max. align. */ - info = (info & ~CTF_ALIGN) | (cinfo & CTF_ALIGN); - info |= (cinfo & CTF_QUAL); /* Inherit qual. */ - } else { - lua_assert(ctype_isvoid(info)); - } - csize = size; - cinfo = info+id; - id = lj_ctype_intern(cp->cts, info+id, size); - } - } while (idx); - return id; -} - -/* -- C declaration parser ------------------------------------------------ */ - -#define H_(le, be) LJ_ENDIAN_SELECT(0x##le, 0x##be) - -/* Reset declaration state to declaration specifier. */ -static void cp_decl_reset(CPDecl *decl) -{ - decl->pos = decl->specpos; - decl->top = decl->specpos+1; - decl->stack[decl->specpos].next = 0; - decl->attr = decl->specattr; - decl->fattr = decl->specfattr; - decl->name = NULL; - decl->redir = NULL; -} - -/* Parse constant initializer. */ -/* NYI: FP constants and strings as initializers. */ -static CTypeID cp_decl_constinit(CPState *cp, CType **ctp, CTypeID ctypeid) -{ - CType *ctt = ctype_get(cp->cts, ctypeid); - CTInfo info; - CTSize size; - CPValue k; - CTypeID constid; - while (ctype_isattrib(ctt->info)) { /* Skip attributes. */ - ctypeid = ctype_cid(ctt->info); /* Update ID, too. */ - ctt = ctype_get(cp->cts, ctypeid); - } - info = ctt->info; - size = ctt->size; - if (!ctype_isinteger(info) || !(info & CTF_CONST) || size > 4) - cp_err(cp, LJ_ERR_FFI_INVTYPE); - cp_check(cp, '='); - cp_expr_sub(cp, &k, 0); - constid = lj_ctype_new(cp->cts, ctp); - (*ctp)->info = CTINFO(CT_CONSTVAL, CTF_CONST|ctypeid); - k.u32 <<= 8*(4-size); - if ((info & CTF_UNSIGNED)) - k.u32 >>= 8*(4-size); - else - k.u32 = (uint32_t)((int32_t)k.u32 >> 8*(4-size)); - (*ctp)->size = k.u32; - return constid; -} - -/* Parse size in parentheses as part of attribute. */ -static CTSize cp_decl_sizeattr(CPState *cp) -{ - CTSize sz; - uint32_t oldtmask = cp->tmask; - cp->tmask = CPNS_DEFAULT; /* Required for expression evaluator. */ - cp_check(cp, '('); - sz = cp_expr_ksize(cp); - cp->tmask = oldtmask; - cp_check(cp, ')'); - return sz; -} - -/* Parse alignment attribute. */ -static void cp_decl_align(CPState *cp, CPDecl *decl) -{ - CTSize al = 4; /* Unspecified alignment is 16 bytes. */ - if (cp->tok == '(') { - al = cp_decl_sizeattr(cp); - al = al ? lj_fls(al) : 0; - } - CTF_INSERT(decl->attr, ALIGN, al); - decl->attr |= CTFP_ALIGNED; -} - -/* Parse GCC asm("name") redirect. */ -static void cp_decl_asm(CPState *cp, CPDecl *decl) -{ - UNUSED(decl); - cp_next(cp); - cp_check(cp, '('); - if (cp->tok == CTOK_STRING) { - GCstr *str = cp->str; - while (cp_next(cp) == CTOK_STRING) { - lj_strfmt_pushf(cp->L, "%s%s", strdata(str), strdata(cp->str)); - cp->L->top--; - str = strV(cp->L->top); - } - decl->redir = str; - } - cp_check(cp, ')'); -} - -/* Parse GCC __attribute__((mode(...))). */ -static void cp_decl_mode(CPState *cp, CPDecl *decl) -{ - cp_check(cp, '('); - if (cp->tok == CTOK_IDENT) { - const char *s = strdata(cp->str); - CTSize sz = 0, vlen = 0; - if (s[0] == '_' && s[1] == '_') s += 2; - if (*s == 'V') { - s++; - vlen = *s++ - '0'; - if (*s >= '0' && *s <= '9') - vlen = vlen*10 + (*s++ - '0'); - } - switch (*s++) { - case 'Q': sz = 1; break; - case 'H': sz = 2; break; - case 'S': sz = 4; break; - case 'D': sz = 8; break; - case 'T': sz = 16; break; - case 'O': sz = 32; break; - default: goto bad_size; - } - if (*s == 'I' || *s == 'F') { - CTF_INSERT(decl->attr, MSIZEP, sz); - if (vlen) CTF_INSERT(decl->attr, VSIZEP, lj_fls(vlen*sz)); - } - bad_size: - cp_next(cp); - } - cp_check(cp, ')'); -} - -/* Parse GCC __attribute__((...)). */ -static void cp_decl_gccattribute(CPState *cp, CPDecl *decl) -{ - cp_next(cp); - cp_check(cp, '('); - cp_check(cp, '('); - while (cp->tok != ')') { - if (cp->tok == CTOK_IDENT) { - GCstr *attrstr = cp->str; - cp_next(cp); - switch (attrstr->hash) { - case H_(64a9208e,8ce14319): case H_(8e6331b2,95a282af): /* aligned */ - cp_decl_align(cp, decl); - break; - case H_(42eb47de,f0ede26c): case H_(29f48a09,cf383e0c): /* packed */ - decl->attr |= CTFP_PACKED; - break; - case H_(0a84eef6,8dfab04c): case H_(995cf92c,d5696591): /* mode */ - cp_decl_mode(cp, decl); - break; - case H_(0ab31997,2d5213fa): case H_(bf875611,200e9990): /* vector_size */ - { - CTSize vsize = cp_decl_sizeattr(cp); - if (vsize) CTF_INSERT(decl->attr, VSIZEP, lj_fls(vsize)); - } - break; -#if LJ_TARGET_X86 - case H_(5ad22db8,c689b848): case H_(439150fa,65ea78cb): /* regparm */ - CTF_INSERT(decl->fattr, REGPARM, cp_decl_sizeattr(cp)); - decl->fattr |= CTFP_CCONV; - break; - case H_(18fc0b98,7ff4c074): case H_(4e62abed,0a747424): /* cdecl */ - CTF_INSERT(decl->fattr, CCONV, CTCC_CDECL); - decl->fattr |= CTFP_CCONV; - break; - case H_(72b2e41b,494c5a44): case H_(f2356d59,f25fc9bd): /* thiscall */ - CTF_INSERT(decl->fattr, CCONV, CTCC_THISCALL); - decl->fattr |= CTFP_CCONV; - break; - case H_(0d0ffc42,ab746f88): case H_(21c54ba1,7f0ca7e3): /* fastcall */ - CTF_INSERT(decl->fattr, CCONV, CTCC_FASTCALL); - decl->fattr |= CTFP_CCONV; - break; - case H_(ef76b040,9412e06a): case H_(de56697b,c750e6e1): /* stdcall */ - CTF_INSERT(decl->fattr, CCONV, CTCC_STDCALL); - decl->fattr |= CTFP_CCONV; - break; - case H_(ea78b622,f234bd8e): case H_(252ffb06,8d50f34b): /* sseregparm */ - decl->fattr |= CTF_SSEREGPARM; - decl->fattr |= CTFP_CCONV; - break; -#endif - default: /* Skip all other attributes. */ - goto skip_attr; - } - } else if (cp->tok >= CTOK_FIRSTDECL) { /* For __attribute((const)) etc. */ - cp_next(cp); - skip_attr: - if (cp_opt(cp, '(')) { - while (cp->tok != ')' && cp->tok != CTOK_EOF) cp_next(cp); - cp_check(cp, ')'); - } - } else { - break; - } - if (!cp_opt(cp, ',')) break; - } - cp_check(cp, ')'); - cp_check(cp, ')'); -} - -/* Parse MSVC __declspec(...). */ -static void cp_decl_msvcattribute(CPState *cp, CPDecl *decl) -{ - cp_next(cp); - cp_check(cp, '('); - while (cp->tok == CTOK_IDENT) { - GCstr *attrstr = cp->str; - cp_next(cp); - switch (attrstr->hash) { - case H_(bc2395fa,98f267f8): /* align */ - cp_decl_align(cp, decl); - break; - default: /* Ignore all other attributes. */ - if (cp_opt(cp, '(')) { - while (cp->tok != ')' && cp->tok != CTOK_EOF) cp_next(cp); - cp_check(cp, ')'); - } - break; - } - } - cp_check(cp, ')'); -} - -/* Parse declaration attributes (and common qualifiers). */ -static void cp_decl_attributes(CPState *cp, CPDecl *decl) -{ - for (;;) { - switch (cp->tok) { - case CTOK_CONST: decl->attr |= CTF_CONST; break; - case CTOK_VOLATILE: decl->attr |= CTF_VOLATILE; break; - case CTOK_RESTRICT: break; /* Ignore. */ - case CTOK_EXTENSION: break; /* Ignore. */ - case CTOK_ATTRIBUTE: cp_decl_gccattribute(cp, decl); continue; - case CTOK_ASM: cp_decl_asm(cp, decl); continue; - case CTOK_DECLSPEC: cp_decl_msvcattribute(cp, decl); continue; - case CTOK_CCDECL: -#if LJ_TARGET_X86 - CTF_INSERT(decl->fattr, CCONV, cp->ct->size); - decl->fattr |= CTFP_CCONV; -#endif - break; - case CTOK_PTRSZ: -#if LJ_64 - CTF_INSERT(decl->attr, MSIZEP, cp->ct->size); -#endif - break; - default: return; - } - cp_next(cp); - } -} - -/* Parse struct/union/enum name. */ -static CTypeID cp_struct_name(CPState *cp, CPDecl *sdecl, CTInfo info) -{ - CTypeID sid; - CType *ct; - cp->tmask = CPNS_STRUCT; - cp_next(cp); - cp_decl_attributes(cp, sdecl); - cp->tmask = CPNS_DEFAULT; - if (cp->tok != '{') { - if (cp->tok != CTOK_IDENT) cp_err_token(cp, CTOK_IDENT); - if (cp->val.id) { /* Name of existing struct/union/enum. */ - sid = cp->val.id; - ct = cp->ct; - if ((ct->info ^ info) & (CTMASK_NUM|CTF_UNION)) /* Wrong type. */ - cp_errmsg(cp, 0, LJ_ERR_FFI_REDEF, strdata(gco2str(gcref(ct->name)))); - } else { /* Create named, incomplete struct/union/enum. */ - if ((cp->mode & CPARSE_MODE_NOIMPLICIT)) - cp_errmsg(cp, 0, LJ_ERR_FFI_BADTAG, strdata(cp->str)); - sid = lj_ctype_new(cp->cts, &ct); - ct->info = info; - ct->size = CTSIZE_INVALID; - ctype_setname(ct, cp->str); - lj_ctype_addname(cp->cts, ct, sid); - } - cp_next(cp); - } else { /* Create anonymous, incomplete struct/union/enum. */ - sid = lj_ctype_new(cp->cts, &ct); - ct->info = info; - ct->size = CTSIZE_INVALID; - } - if (cp->tok == '{') { - if (ct->size != CTSIZE_INVALID || ct->sib) - cp_errmsg(cp, 0, LJ_ERR_FFI_REDEF, strdata(gco2str(gcref(ct->name)))); - ct->sib = 1; /* Indicate the type is currently being defined. */ - } - return sid; -} - -/* Determine field alignment. */ -static CTSize cp_field_align(CPState *cp, CType *ct, CTInfo info) -{ - CTSize align = ctype_align(info); - UNUSED(cp); UNUSED(ct); -#if (LJ_TARGET_X86 && !LJ_ABI_WIN) || (LJ_TARGET_ARM && __APPLE__) - /* The SYSV i386 and iOS ABIs limit alignment of non-vector fields to 2^2. */ - if (align > 2 && !(info & CTFP_ALIGNED)) { - if (ctype_isarray(info) && !(info & CTF_VECTOR)) { - do { - ct = ctype_rawchild(cp->cts, ct); - info = ct->info; - } while (ctype_isarray(info) && !(info & CTF_VECTOR)); - } - if (ctype_isnum(info) || ctype_isenum(info)) - align = 2; - } -#endif - return align; -} - -/* Layout struct/union fields. */ -static void cp_struct_layout(CPState *cp, CTypeID sid, CTInfo sattr) -{ - CTSize bofs = 0, bmaxofs = 0; /* Bit offset and max. bit offset. */ - CTSize maxalign = ctype_align(sattr); - CType *sct = ctype_get(cp->cts, sid); - CTInfo sinfo = sct->info; - CTypeID fieldid = sct->sib; - while (fieldid) { - CType *ct = ctype_get(cp->cts, fieldid); - CTInfo attr = ct->size; /* Field declaration attributes (temp.). */ - - if (ctype_isfield(ct->info) || - (ctype_isxattrib(ct->info, CTA_SUBTYPE) && attr)) { - CTSize align, amask; /* Alignment (pow2) and alignment mask (bits). */ - CTSize sz; - CTInfo info = lj_ctype_info(cp->cts, ctype_cid(ct->info), &sz); - CTSize bsz, csz = 8*sz; /* Field size and container size (in bits). */ - sinfo |= (info & (CTF_QUAL|CTF_VLA)); /* Merge pseudo-qualifiers. */ - - /* Check for size overflow and determine alignment. */ - if (sz >= 0x20000000u || bofs + csz < bofs || (info & CTF_VLA)) { - if (!(sz == CTSIZE_INVALID && ctype_isarray(info) && - !(sinfo & CTF_UNION))) - cp_err(cp, LJ_ERR_FFI_INVSIZE); - csz = sz = 0; /* Treat a[] and a[?] as zero-sized. */ - } - align = cp_field_align(cp, ct, info); - if (((attr|sattr) & CTFP_PACKED) || - ((attr & CTFP_ALIGNED) && ctype_align(attr) > align)) - align = ctype_align(attr); - if (cp->packstack[cp->curpack] < align) - align = cp->packstack[cp->curpack]; - if (align > maxalign) maxalign = align; - amask = (8u << align) - 1; - - bsz = ctype_bitcsz(ct->info); /* Bitfield size (temp.). */ - if (bsz == CTBSZ_FIELD || !ctype_isfield(ct->info)) { - bsz = csz; /* Regular fields or subtypes always fill the container. */ - bofs = (bofs + amask) & ~amask; /* Start new aligned field. */ - ct->size = (bofs >> 3); /* Store field offset. */ - } else { /* Bitfield. */ - if (bsz == 0 || (attr & CTFP_ALIGNED) || - (!((attr|sattr) & CTFP_PACKED) && (bofs & amask) + bsz > csz)) - bofs = (bofs + amask) & ~amask; /* Start new aligned field. */ - - /* Prefer regular field over bitfield. */ - if (bsz == csz && (bofs & amask) == 0) { - ct->info = CTINFO(CT_FIELD, ctype_cid(ct->info)); - ct->size = (bofs >> 3); /* Store field offset. */ - } else { - ct->info = CTINFO(CT_BITFIELD, - (info & (CTF_QUAL|CTF_UNSIGNED|CTF_BOOL)) + - (csz << (CTSHIFT_BITCSZ-3)) + (bsz << CTSHIFT_BITBSZ)); -#if LJ_BE - ct->info += ((csz - (bofs & (csz-1)) - bsz) << CTSHIFT_BITPOS); -#else - ct->info += ((bofs & (csz-1)) << CTSHIFT_BITPOS); -#endif - ct->size = ((bofs & ~(csz-1)) >> 3); /* Store container offset. */ - } - } - - /* Determine next offset or max. offset. */ - if ((sinfo & CTF_UNION)) { - if (bsz > bmaxofs) bmaxofs = bsz; - } else { - bofs += bsz; - } - } /* All other fields in the chain are already set up. */ - - fieldid = ct->sib; - } - - /* Complete struct/union. */ - sct->info = sinfo + CTALIGN(maxalign); - bofs = (sinfo & CTF_UNION) ? bmaxofs : bofs; - maxalign = (8u << maxalign) - 1; - sct->size = (((bofs + maxalign) & ~maxalign) >> 3); -} - -/* Parse struct/union declaration. */ -static CTypeID cp_decl_struct(CPState *cp, CPDecl *sdecl, CTInfo sinfo) -{ - CTypeID sid = cp_struct_name(cp, sdecl, sinfo); - if (cp_opt(cp, '{')) { /* Struct/union definition. */ - CTypeID lastid = sid; - int lastdecl = 0; - while (cp->tok != '}') { - CPDecl decl; - CPscl scl = cp_decl_spec(cp, &decl, CDF_STATIC); - decl.mode = scl ? CPARSE_MODE_DIRECT : - CPARSE_MODE_DIRECT|CPARSE_MODE_ABSTRACT|CPARSE_MODE_FIELD; - - for (;;) { - CTypeID ctypeid; - - if (lastdecl) cp_err_token(cp, '}'); - - /* Parse field declarator. */ - decl.bits = CTSIZE_INVALID; - cp_declarator(cp, &decl); - ctypeid = cp_decl_intern(cp, &decl); - - if ((scl & CDF_STATIC)) { /* Static constant in struct namespace. */ - CType *ct; - CTypeID fieldid = cp_decl_constinit(cp, &ct, ctypeid); - ctype_get(cp->cts, lastid)->sib = fieldid; - lastid = fieldid; - ctype_setname(ct, decl.name); - } else { - CTSize bsz = CTBSZ_FIELD; /* Temp. for layout phase. */ - CType *ct; - CTypeID fieldid = lj_ctype_new(cp->cts, &ct); /* Do this first. */ - CType *tct = ctype_raw(cp->cts, ctypeid); - - if (decl.bits == CTSIZE_INVALID) { /* Regular field. */ - if (ctype_isarray(tct->info) && tct->size == CTSIZE_INVALID) - lastdecl = 1; /* a[] or a[?] must be the last declared field. */ - - /* Accept transparent struct/union/enum. */ - if (!decl.name) { - if (!((ctype_isstruct(tct->info) && !(tct->info & CTF_VLA)) || - ctype_isenum(tct->info))) - cp_err_token(cp, CTOK_IDENT); - ct->info = CTINFO(CT_ATTRIB, CTATTRIB(CTA_SUBTYPE) + ctypeid); - ct->size = ctype_isstruct(tct->info) ? - (decl.attr|0x80000000u) : 0; /* For layout phase. */ - goto add_field; - } - } else { /* Bitfield. */ - bsz = decl.bits; - if (!ctype_isinteger_or_bool(tct->info) || - (bsz == 0 && decl.name) || 8*tct->size > CTBSZ_MAX || - bsz > ((tct->info & CTF_BOOL) ? 1 : 8*tct->size)) - cp_errmsg(cp, ':', LJ_ERR_BADVAL); - } - - /* Create temporary field for layout phase. */ - ct->info = CTINFO(CT_FIELD, ctypeid + (bsz << CTSHIFT_BITCSZ)); - ct->size = decl.attr; - if (decl.name) ctype_setname(ct, decl.name); - - add_field: - ctype_get(cp->cts, lastid)->sib = fieldid; - lastid = fieldid; - } - if (!cp_opt(cp, ',')) break; - cp_decl_reset(&decl); - } - cp_check(cp, ';'); - } - cp_check(cp, '}'); - ctype_get(cp->cts, lastid)->sib = 0; /* Drop sib = 1 for empty structs. */ - cp_decl_attributes(cp, sdecl); /* Layout phase needs postfix attributes. */ - cp_struct_layout(cp, sid, sdecl->attr); - } - return sid; -} - -/* Parse enum declaration. */ -static CTypeID cp_decl_enum(CPState *cp, CPDecl *sdecl) -{ - CTypeID eid = cp_struct_name(cp, sdecl, CTINFO(CT_ENUM, CTID_VOID)); - CTInfo einfo = CTINFO(CT_ENUM, CTALIGN(2) + CTID_UINT32); - CTSize esize = 4; /* Only 32 bit enums are supported. */ - if (cp_opt(cp, '{')) { /* Enum definition. */ - CPValue k; - CTypeID lastid = eid; - k.u32 = 0; - k.id = CTID_INT32; - do { - GCstr *name = cp->str; - if (cp->tok != CTOK_IDENT) cp_err_token(cp, CTOK_IDENT); - if (cp->val.id) cp_errmsg(cp, 0, LJ_ERR_FFI_REDEF, strdata(name)); - cp_next(cp); - if (cp_opt(cp, '=')) { - cp_expr_kint(cp, &k); - if (k.id == CTID_UINT32) { - /* C99 says that enum constants are always (signed) integers. - ** But since unsigned constants like 0x80000000 are quite common, - ** those are left as uint32_t. - */ - if (k.i32 >= 0) k.id = CTID_INT32; - } else { - /* OTOH it's common practice and even mandated by some ABIs - ** that the enum type itself is unsigned, unless there are any - ** negative constants. - */ - k.id = CTID_INT32; - if (k.i32 < 0) einfo = CTINFO(CT_ENUM, CTALIGN(2) + CTID_INT32); - } - } - /* Add named enum constant. */ - { - CType *ct; - CTypeID constid = lj_ctype_new(cp->cts, &ct); - ctype_get(cp->cts, lastid)->sib = constid; - lastid = constid; - ctype_setname(ct, name); - ct->info = CTINFO(CT_CONSTVAL, CTF_CONST|k.id); - ct->size = k.u32++; - if (k.u32 == 0x80000000u) k.id = CTID_UINT32; - lj_ctype_addname(cp->cts, ct, constid); - } - if (!cp_opt(cp, ',')) break; - } while (cp->tok != '}'); /* Trailing ',' is ok. */ - cp_check(cp, '}'); - /* Complete enum. */ - ctype_get(cp->cts, eid)->info = einfo; - ctype_get(cp->cts, eid)->size = esize; - } - return eid; -} - -/* Parse declaration specifiers. */ -static CPscl cp_decl_spec(CPState *cp, CPDecl *decl, CPscl scl) -{ - uint32_t cds = 0, sz = 0; - CTypeID tdef = 0; - - decl->cp = cp; - decl->mode = cp->mode; - decl->name = NULL; - decl->redir = NULL; - decl->attr = 0; - decl->fattr = 0; - decl->pos = decl->top = 0; - decl->stack[0].next = 0; - - for (;;) { /* Parse basic types. */ - cp_decl_attributes(cp, decl); - if (cp->tok >= CTOK_FIRSTDECL && cp->tok <= CTOK_LASTDECLFLAG) { - uint32_t cbit; - if (cp->ct->size) { - if (sz) goto end_decl; - sz = cp->ct->size; - } - cbit = (1u << (cp->tok - CTOK_FIRSTDECL)); - cds = cds | cbit | ((cbit & cds & CDF_LONG) << 1); - if (cp->tok >= CTOK_FIRSTSCL) { - if (!(scl & cbit)) cp_errmsg(cp, cp->tok, LJ_ERR_FFI_BADSCL); - } else if (tdef) { - goto end_decl; - } - cp_next(cp); - continue; - } - if (sz || tdef || - (cds & (CDF_SHORT|CDF_LONG|CDF_SIGNED|CDF_UNSIGNED|CDF_COMPLEX))) - break; - switch (cp->tok) { - case CTOK_STRUCT: - tdef = cp_decl_struct(cp, decl, CTINFO(CT_STRUCT, 0)); - continue; - case CTOK_UNION: - tdef = cp_decl_struct(cp, decl, CTINFO(CT_STRUCT, CTF_UNION)); - continue; - case CTOK_ENUM: - tdef = cp_decl_enum(cp, decl); - continue; - case CTOK_IDENT: - if (ctype_istypedef(cp->ct->info)) { - tdef = ctype_cid(cp->ct->info); /* Get typedef. */ - cp_next(cp); - continue; - } - break; - case '$': - tdef = cp->val.id; - cp_next(cp); - continue; - default: - break; - } - break; - } -end_decl: - - if ((cds & CDF_COMPLEX)) /* Use predefined complex types. */ - tdef = sz == 4 ? CTID_COMPLEX_FLOAT : CTID_COMPLEX_DOUBLE; - - if (tdef) { - cp_push_type(decl, tdef); - } else if ((cds & CDF_VOID)) { - cp_push(decl, CTINFO(CT_VOID, (decl->attr & CTF_QUAL)), CTSIZE_INVALID); - decl->attr &= ~CTF_QUAL; - } else { - /* Determine type info and size. */ - CTInfo info = CTINFO(CT_NUM, (cds & CDF_UNSIGNED) ? CTF_UNSIGNED : 0); - if ((cds & CDF_BOOL)) { - if ((cds & ~(CDF_SCL|CDF_BOOL|CDF_INT|CDF_SIGNED|CDF_UNSIGNED))) - cp_errmsg(cp, 0, LJ_ERR_FFI_INVTYPE); - info |= CTF_BOOL; - if (!(cds & CDF_SIGNED)) info |= CTF_UNSIGNED; - if (!sz) { - sz = 1; - } - } else if ((cds & CDF_FP)) { - info = CTINFO(CT_NUM, CTF_FP); - if ((cds & CDF_LONG)) sz = sizeof(long double); - } else if ((cds & CDF_CHAR)) { - if ((cds & (CDF_CHAR|CDF_SIGNED|CDF_UNSIGNED)) == CDF_CHAR) - info |= CTF_UCHAR; /* Handle platforms where char is unsigned. */ - } else if ((cds & CDF_SHORT)) { - sz = sizeof(short); - } else if ((cds & CDF_LONGLONG)) { - sz = 8; - } else if ((cds & CDF_LONG)) { - info |= CTF_LONG; - sz = sizeof(long); - } else if (!sz) { - if (!(cds & (CDF_SIGNED|CDF_UNSIGNED))) - cp_errmsg(cp, cp->tok, LJ_ERR_FFI_DECLSPEC); - sz = sizeof(int); - } - lua_assert(sz != 0); - info += CTALIGN(lj_fls(sz)); /* Use natural alignment. */ - info += (decl->attr & CTF_QUAL); /* Merge qualifiers. */ - cp_push(decl, info, sz); - decl->attr &= ~CTF_QUAL; - } - decl->specpos = decl->pos; - decl->specattr = decl->attr; - decl->specfattr = decl->fattr; - return (cds & CDF_SCL); /* Return storage class. */ -} - -/* Parse array declaration. */ -static void cp_decl_array(CPState *cp, CPDecl *decl) -{ - CTInfo info = CTINFO(CT_ARRAY, 0); - CTSize nelem = CTSIZE_INVALID; /* Default size for a[] or a[?]. */ - cp_decl_attributes(cp, decl); - if (cp_opt(cp, '?')) - info |= CTF_VLA; /* Create variable-length array a[?]. */ - else if (cp->tok != ']') - nelem = cp_expr_ksize(cp); - cp_check(cp, ']'); - cp_add(decl, info, nelem); -} - -/* Parse function declaration. */ -static void cp_decl_func(CPState *cp, CPDecl *fdecl) -{ - CTSize nargs = 0; - CTInfo info = CTINFO(CT_FUNC, 0); - CTypeID lastid = 0, anchor = 0; - if (cp->tok != ')') { - do { - CPDecl decl; - CTypeID ctypeid, fieldid; - CType *ct; - if (cp_opt(cp, '.')) { /* Vararg function. */ - cp_check(cp, '.'); /* Workaround for the minimalistic lexer. */ - cp_check(cp, '.'); - info |= CTF_VARARG; - break; - } - cp_decl_spec(cp, &decl, CDF_REGISTER); - decl.mode = CPARSE_MODE_DIRECT|CPARSE_MODE_ABSTRACT; - cp_declarator(cp, &decl); - ctypeid = cp_decl_intern(cp, &decl); - ct = ctype_raw(cp->cts, ctypeid); - if (ctype_isvoid(ct->info)) - break; - else if (ctype_isrefarray(ct->info)) - ctypeid = lj_ctype_intern(cp->cts, - CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(ct->info)), CTSIZE_PTR); - else if (ctype_isfunc(ct->info)) - ctypeid = lj_ctype_intern(cp->cts, - CTINFO(CT_PTR, CTALIGN_PTR|ctypeid), CTSIZE_PTR); - /* Add new parameter. */ - fieldid = lj_ctype_new(cp->cts, &ct); - if (anchor) - ctype_get(cp->cts, lastid)->sib = fieldid; - else - anchor = fieldid; - lastid = fieldid; - if (decl.name) ctype_setname(ct, decl.name); - ct->info = CTINFO(CT_FIELD, ctypeid); - ct->size = nargs++; - } while (cp_opt(cp, ',')); - } - cp_check(cp, ')'); - if (cp_opt(cp, '{')) { /* Skip function definition. */ - int level = 1; - cp->mode |= CPARSE_MODE_SKIP; - for (;;) { - if (cp->tok == '{') level++; - else if (cp->tok == '}' && --level == 0) break; - else if (cp->tok == CTOK_EOF) cp_err_token(cp, '}'); - cp_next(cp); - } - cp->mode &= ~CPARSE_MODE_SKIP; - cp->tok = ';'; /* Ok for cp_decl_multi(), error in cp_decl_single(). */ - } - info |= (fdecl->fattr & ~CTMASK_CID); - fdecl->fattr = 0; - fdecl->stack[cp_add(fdecl, info, nargs)].sib = anchor; -} - -/* Parse declarator. */ -static void cp_declarator(CPState *cp, CPDecl *decl) -{ - if (++cp->depth > CPARSE_MAX_DECLDEPTH) cp_err(cp, LJ_ERR_XLEVELS); - - for (;;) { /* Head of declarator. */ - if (cp_opt(cp, '*')) { /* Pointer. */ - CTSize sz; - CTInfo info; - cp_decl_attributes(cp, decl); - sz = CTSIZE_PTR; - info = CTINFO(CT_PTR, CTALIGN_PTR); -#if LJ_64 - if (ctype_msizeP(decl->attr) == 4) { - sz = 4; - info = CTINFO(CT_PTR, CTALIGN(2)); - } -#endif - info += (decl->attr & (CTF_QUAL|CTF_REF)); - decl->attr &= ~(CTF_QUAL|(CTMASK_MSIZEP<attr &= ~(CTF_QUAL|(CTMASK_MSIZEP<mode & CPARSE_MODE_ABSTRACT) && - (cp->tok == ')' || cp_istypedecl(cp))) goto func_decl; - pos = decl->pos; - cp_declarator(cp, decl); - cp_check(cp, ')'); - decl->pos = pos; - } else if (cp->tok == CTOK_IDENT) { /* Direct declarator. */ - if (!(decl->mode & CPARSE_MODE_DIRECT)) cp_err_token(cp, CTOK_EOF); - decl->name = cp->str; - decl->nameid = cp->val.id; - cp_next(cp); - } else { /* Abstract declarator. */ - if (!(decl->mode & CPARSE_MODE_ABSTRACT)) cp_err_token(cp, CTOK_IDENT); - } - - for (;;) { /* Tail of declarator. */ - if (cp_opt(cp, '[')) { /* Array. */ - cp_decl_array(cp, decl); - } else if (cp_opt(cp, '(')) { /* Function. */ - func_decl: - cp_decl_func(cp, decl); - } else { - break; - } - } - - if ((decl->mode & CPARSE_MODE_FIELD) && cp_opt(cp, ':')) /* Field width. */ - decl->bits = cp_expr_ksize(cp); - - /* Process postfix attributes. */ - cp_decl_attributes(cp, decl); - cp_push_attributes(decl); - - cp->depth--; -} - -/* Parse an abstract type declaration and return it's C type ID. */ -static CTypeID cp_decl_abstract(CPState *cp) -{ - CPDecl decl; - cp_decl_spec(cp, &decl, 0); - decl.mode = CPARSE_MODE_ABSTRACT; - cp_declarator(cp, &decl); - return cp_decl_intern(cp, &decl); -} - -/* Handle pragmas. */ -static void cp_pragma(CPState *cp, BCLine pragmaline) -{ - cp_next(cp); - if (cp->tok == CTOK_IDENT && - cp->str->hash == H_(e79b999f,42ca3e85)) { /* pack */ - cp_next(cp); - cp_check(cp, '('); - if (cp->tok == CTOK_IDENT) { - if (cp->str->hash == H_(738e923c,a1b65954)) { /* push */ - if (cp->curpack < CPARSE_MAX_PACKSTACK) { - cp->packstack[cp->curpack+1] = cp->packstack[cp->curpack]; - cp->curpack++; - } - } else if (cp->str->hash == H_(6c71cf27,6c71cf27)) { /* pop */ - if (cp->curpack > 0) cp->curpack--; - } else { - cp_errmsg(cp, cp->tok, LJ_ERR_XSYMBOL); - } - cp_next(cp); - if (!cp_opt(cp, ',')) goto end_pack; - } - if (cp->tok == CTOK_INTEGER) { - cp->packstack[cp->curpack] = cp->val.u32 ? lj_fls(cp->val.u32) : 0; - cp_next(cp); - } else { - cp->packstack[cp->curpack] = 255; - } - end_pack: - cp_check(cp, ')'); - } else { /* Ignore all other pragmas. */ - while (cp->tok != CTOK_EOF && cp->linenumber == pragmaline) - cp_next(cp); - } -} - -/* Parse multiple C declarations of types or extern identifiers. */ -static void cp_decl_multi(CPState *cp) -{ - int first = 1; - while (cp->tok != CTOK_EOF) { - CPDecl decl; - CPscl scl; - if (cp_opt(cp, ';')) { /* Skip empty statements. */ - first = 0; - continue; - } - if (cp->tok == '#') { /* Workaround, since we have no preprocessor, yet. */ - BCLine pragmaline = cp->linenumber; - if (!(cp_next(cp) == CTOK_IDENT && - cp->str->hash == H_(f5e6b4f8,1d509107))) /* pragma */ - cp_errmsg(cp, cp->tok, LJ_ERR_XSYMBOL); - cp_pragma(cp, pragmaline); - continue; - } - scl = cp_decl_spec(cp, &decl, CDF_TYPEDEF|CDF_EXTERN|CDF_STATIC); - if ((cp->tok == ';' || cp->tok == CTOK_EOF) && - ctype_istypedef(decl.stack[0].info)) { - CTInfo info = ctype_rawchild(cp->cts, &decl.stack[0])->info; - if (ctype_isstruct(info) || ctype_isenum(info)) - goto decl_end; /* Accept empty declaration of struct/union/enum. */ - } - for (;;) { - CTypeID ctypeid; - cp_declarator(cp, &decl); - ctypeid = cp_decl_intern(cp, &decl); - if (decl.name && !decl.nameid) { /* NYI: redeclarations are ignored. */ - CType *ct; - CTypeID id; - if ((scl & CDF_TYPEDEF)) { /* Create new typedef. */ - id = lj_ctype_new(cp->cts, &ct); - ct->info = CTINFO(CT_TYPEDEF, ctypeid); - goto noredir; - } else if (ctype_isfunc(ctype_get(cp->cts, ctypeid)->info)) { - /* Treat both static and extern function declarations as extern. */ - ct = ctype_get(cp->cts, ctypeid); - /* We always get new anonymous functions (typedefs are copied). */ - lua_assert(gcref(ct->name) == NULL); - id = ctypeid; /* Just name it. */ - } else if ((scl & CDF_STATIC)) { /* Accept static constants. */ - id = cp_decl_constinit(cp, &ct, ctypeid); - goto noredir; - } else { /* External references have extern or no storage class. */ - id = lj_ctype_new(cp->cts, &ct); - ct->info = CTINFO(CT_EXTERN, ctypeid); - } - if (decl.redir) { /* Add attribute for redirected symbol name. */ - CType *cta; - CTypeID aid = lj_ctype_new(cp->cts, &cta); - ct = ctype_get(cp->cts, id); /* Table may have been reallocated. */ - cta->info = CTINFO(CT_ATTRIB, CTATTRIB(CTA_REDIR)); - cta->sib = ct->sib; - ct->sib = aid; - ctype_setname(cta, decl.redir); - } - noredir: - ctype_setname(ct, decl.name); - lj_ctype_addname(cp->cts, ct, id); - } - if (!cp_opt(cp, ',')) break; - cp_decl_reset(&decl); - } - decl_end: - if (cp->tok == CTOK_EOF && first) break; /* May omit ';' for 1 decl. */ - first = 0; - cp_check(cp, ';'); - } -} - -/* Parse a single C type declaration. */ -static void cp_decl_single(CPState *cp) -{ - CPDecl decl; - cp_decl_spec(cp, &decl, 0); - cp_declarator(cp, &decl); - cp->val.id = cp_decl_intern(cp, &decl); - if (cp->tok != CTOK_EOF) cp_err_token(cp, CTOK_EOF); -} - -#undef H_ - -/* ------------------------------------------------------------------------ */ - -/* Protected callback for C parser. */ -static TValue *cpcparser(lua_State *L, lua_CFunction dummy, void *ud) -{ - CPState *cp = (CPState *)ud; - UNUSED(dummy); - cframe_errfunc(L->cframe) = -1; /* Inherit error function. */ - cp_init(cp); - if ((cp->mode & CPARSE_MODE_MULTI)) - cp_decl_multi(cp); - else - cp_decl_single(cp); - if (cp->param && cp->param != cp->L->top) - cp_err(cp, LJ_ERR_FFI_NUMPARAM); - lua_assert(cp->depth == 0); - return NULL; -} - -/* C parser. */ -int lj_cparse(CPState *cp) -{ - LJ_CTYPE_SAVE(cp->cts); - int errcode = lj_vm_cpcall(cp->L, NULL, cp, cpcparser); - if (errcode) - LJ_CTYPE_RESTORE(cp->cts); - cp_cleanup(cp); - return errcode; -} - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cparse.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cparse.h deleted file mode 100644 index 441580de6fe..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_cparse.h +++ /dev/null @@ -1,65 +0,0 @@ -/* -** C declaration parser. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_CPARSE_H -#define _LJ_CPARSE_H - -#include "lj_obj.h" -#include "lj_ctype.h" - -#if LJ_HASFFI - -/* C parser limits. */ -#define CPARSE_MAX_BUF 32768 /* Max. token buffer size. */ -#define CPARSE_MAX_DECLSTACK 100 /* Max. declaration stack depth. */ -#define CPARSE_MAX_DECLDEPTH 20 /* Max. recursive declaration depth. */ -#define CPARSE_MAX_PACKSTACK 7 /* Max. pack pragma stack depth. */ - -/* Flags for C parser mode. */ -#define CPARSE_MODE_MULTI 1 /* Process multiple declarations. */ -#define CPARSE_MODE_ABSTRACT 2 /* Accept abstract declarators. */ -#define CPARSE_MODE_DIRECT 4 /* Accept direct declarators. */ -#define CPARSE_MODE_FIELD 8 /* Accept field width in bits, too. */ -#define CPARSE_MODE_NOIMPLICIT 16 /* Reject implicit declarations. */ -#define CPARSE_MODE_SKIP 32 /* Skip definitions, ignore errors. */ - -typedef int CPChar; /* C parser character. Unsigned ext. from char. */ -typedef int CPToken; /* C parser token. */ - -/* C parser internal value representation. */ -typedef struct CPValue { - union { - int32_t i32; /* Value for CTID_INT32. */ - uint32_t u32; /* Value for CTID_UINT32. */ - }; - CTypeID id; /* C Type ID of the value. */ -} CPValue; - -/* C parser state. */ -typedef struct CPState { - CPChar c; /* Current character. */ - CPToken tok; /* Current token. */ - CPValue val; /* Token value. */ - GCstr *str; /* Interned string of identifier/keyword. */ - CType *ct; /* C type table entry. */ - const char *p; /* Current position in input buffer. */ - SBuf sb; /* String buffer for tokens. */ - lua_State *L; /* Lua state. */ - CTState *cts; /* C type state. */ - TValue *param; /* C type parameters. */ - const char *srcname; /* Current source name. */ - BCLine linenumber; /* Input line counter. */ - int depth; /* Recursive declaration depth. */ - uint32_t tmask; /* Type mask for next identifier. */ - uint32_t mode; /* C parser mode. */ - uint8_t packstack[CPARSE_MAX_PACKSTACK]; /* Stack for pack pragmas. */ - uint8_t curpack; /* Current position in pack pragma stack. */ -} CPState; - -LJ_FUNC int lj_cparse(CPState *cp); - -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_crecord.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_crecord.c deleted file mode 100644 index e200cc99423..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_crecord.c +++ /dev/null @@ -1,1834 +0,0 @@ -/* -** Trace recorder for C data operations. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_ffrecord_c -#define LUA_CORE - -#include "lj_obj.h" - -#if LJ_HASJIT && LJ_HASFFI - -#include "lj_err.h" -#include "lj_tab.h" -#include "lj_frame.h" -#include "lj_ctype.h" -#include "lj_cdata.h" -#include "lj_cparse.h" -#include "lj_cconv.h" -#include "lj_carith.h" -#include "lj_clib.h" -#include "lj_ccall.h" -#include "lj_ff.h" -#include "lj_ir.h" -#include "lj_jit.h" -#include "lj_ircall.h" -#include "lj_iropt.h" -#include "lj_trace.h" -#include "lj_record.h" -#include "lj_ffrecord.h" -#include "lj_snap.h" -#include "lj_crecord.h" -#include "lj_dispatch.h" -#include "lj_strfmt.h" - -/* Some local macros to save typing. Undef'd at the end. */ -#define IR(ref) (&J->cur.ir[(ref)]) - -/* Pass IR on to next optimization in chain (FOLD). */ -#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) - -#define emitconv(a, dt, st, flags) \ - emitir(IRT(IR_CONV, (dt)), (a), (st)|((dt) << 5)|(flags)) - -/* -- C type checks ------------------------------------------------------- */ - -static GCcdata *argv2cdata(jit_State *J, TRef tr, cTValue *o) -{ - GCcdata *cd; - TRef trtypeid; - if (!tref_iscdata(tr)) - lj_trace_err(J, LJ_TRERR_BADTYPE); - cd = cdataV(o); - /* Specialize to the CTypeID. */ - trtypeid = emitir(IRT(IR_FLOAD, IRT_U16), tr, IRFL_CDATA_CTYPEID); - emitir(IRTG(IR_EQ, IRT_INT), trtypeid, lj_ir_kint(J, (int32_t)cd->ctypeid)); - return cd; -} - -/* Specialize to the CTypeID held by a cdata constructor. */ -static CTypeID crec_constructor(jit_State *J, GCcdata *cd, TRef tr) -{ - CTypeID id; - lua_assert(tref_iscdata(tr) && cd->ctypeid == CTID_CTYPEID); - id = *(CTypeID *)cdataptr(cd); - tr = emitir(IRT(IR_FLOAD, IRT_INT), tr, IRFL_CDATA_INT); - emitir(IRTG(IR_EQ, IRT_INT), tr, lj_ir_kint(J, (int32_t)id)); - return id; -} - -static CTypeID argv2ctype(jit_State *J, TRef tr, cTValue *o) -{ - if (tref_isstr(tr)) { - GCstr *s = strV(o); - CPState cp; - CTypeID oldtop; - /* Specialize to the string containing the C type declaration. */ - emitir(IRTG(IR_EQ, IRT_STR), tr, lj_ir_kstr(J, s)); - cp.L = J->L; - cp.cts = ctype_ctsG(J2G(J)); - oldtop = cp.cts->top; - cp.srcname = strdata(s); - cp.p = strdata(s); - cp.param = NULL; - cp.mode = CPARSE_MODE_ABSTRACT|CPARSE_MODE_NOIMPLICIT; - if (lj_cparse(&cp) || cp.cts->top > oldtop) /* Avoid new struct defs. */ - lj_trace_err(J, LJ_TRERR_BADTYPE); - return cp.val.id; - } else { - GCcdata *cd = argv2cdata(J, tr, o); - return cd->ctypeid == CTID_CTYPEID ? crec_constructor(J, cd, tr) : - cd->ctypeid; - } -} - -/* Convert CType to IRType (if possible). */ -static IRType crec_ct2irt(CTState *cts, CType *ct) -{ - if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct); - if (LJ_LIKELY(ctype_isnum(ct->info))) { - if ((ct->info & CTF_FP)) { - if (ct->size == sizeof(double)) - return IRT_NUM; - else if (ct->size == sizeof(float)) - return IRT_FLOAT; - } else { - uint32_t b = lj_fls(ct->size); - if (b <= 3) - return IRT_I8 + 2*b + ((ct->info & CTF_UNSIGNED) ? 1 : 0); - } - } else if (ctype_isptr(ct->info)) { - return (LJ_64 && ct->size == 8) ? IRT_P64 : IRT_P32; - } else if (ctype_iscomplex(ct->info)) { - if (ct->size == 2*sizeof(double)) - return IRT_NUM; - else if (ct->size == 2*sizeof(float)) - return IRT_FLOAT; - } - return IRT_CDATA; -} - -/* -- Optimized memory fill and copy -------------------------------------- */ - -/* Maximum length and unroll of inlined copy/fill. */ -#define CREC_COPY_MAXUNROLL 16 -#define CREC_COPY_MAXLEN 128 - -#define CREC_FILL_MAXUNROLL 16 - -/* Number of windowed registers used for optimized memory copy. */ -#if LJ_TARGET_X86 -#define CREC_COPY_REGWIN 2 -#elif LJ_TARGET_PPC || LJ_TARGET_MIPS -#define CREC_COPY_REGWIN 8 -#else -#define CREC_COPY_REGWIN 4 -#endif - -/* List of memory offsets for copy/fill. */ -typedef struct CRecMemList { - CTSize ofs; /* Offset in bytes. */ - IRType tp; /* Type of load/store. */ - TRef trofs; /* TRef of interned offset. */ - TRef trval; /* TRef of load value. */ -} CRecMemList; - -/* Generate copy list for element-wise struct copy. */ -static MSize crec_copy_struct(CRecMemList *ml, CTState *cts, CType *ct) -{ - CTypeID fid = ct->sib; - MSize mlp = 0; - while (fid) { - CType *df = ctype_get(cts, fid); - fid = df->sib; - if (ctype_isfield(df->info)) { - CType *cct; - IRType tp; - if (!gcref(df->name)) continue; /* Ignore unnamed fields. */ - cct = ctype_rawchild(cts, df); /* Field type. */ - tp = crec_ct2irt(cts, cct); - if (tp == IRT_CDATA) return 0; /* NYI: aggregates. */ - if (mlp >= CREC_COPY_MAXUNROLL) return 0; - ml[mlp].ofs = df->size; - ml[mlp].tp = tp; - mlp++; - if (ctype_iscomplex(cct->info)) { - if (mlp >= CREC_COPY_MAXUNROLL) return 0; - ml[mlp].ofs = df->size + (cct->size >> 1); - ml[mlp].tp = tp; - mlp++; - } - } else if (!ctype_isconstval(df->info)) { - /* NYI: bitfields and sub-structures. */ - return 0; - } - } - return mlp; -} - -/* Generate unrolled copy list, from highest to lowest step size/alignment. */ -static MSize crec_copy_unroll(CRecMemList *ml, CTSize len, CTSize step, - IRType tp) -{ - CTSize ofs = 0; - MSize mlp = 0; - if (tp == IRT_CDATA) tp = IRT_U8 + 2*lj_fls(step); - do { - while (ofs + step <= len) { - if (mlp >= CREC_COPY_MAXUNROLL) return 0; - ml[mlp].ofs = ofs; - ml[mlp].tp = tp; - mlp++; - ofs += step; - } - step >>= 1; - tp -= 2; - } while (ofs < len); - return mlp; -} - -/* -** Emit copy list with windowed loads/stores. -** LJ_TARGET_UNALIGNED: may emit unaligned loads/stores (not marked as such). -*/ -static void crec_copy_emit(jit_State *J, CRecMemList *ml, MSize mlp, - TRef trdst, TRef trsrc) -{ - MSize i, j, rwin = 0; - for (i = 0, j = 0; i < mlp; ) { - TRef trofs = lj_ir_kintp(J, ml[i].ofs); - TRef trsptr = emitir(IRT(IR_ADD, IRT_PTR), trsrc, trofs); - ml[i].trval = emitir(IRT(IR_XLOAD, ml[i].tp), trsptr, 0); - ml[i].trofs = trofs; - i++; - rwin += (LJ_SOFTFP && ml[i].tp == IRT_NUM) ? 2 : 1; - if (rwin >= CREC_COPY_REGWIN || i >= mlp) { /* Flush buffered stores. */ - rwin = 0; - for ( ; j < i; j++) { - TRef trdptr = emitir(IRT(IR_ADD, IRT_PTR), trdst, ml[j].trofs); - emitir(IRT(IR_XSTORE, ml[j].tp), trdptr, ml[j].trval); - } - } - } -} - -/* Optimized memory copy. */ -static void crec_copy(jit_State *J, TRef trdst, TRef trsrc, TRef trlen, - CType *ct) -{ - if (tref_isk(trlen)) { /* Length must be constant. */ - CRecMemList ml[CREC_COPY_MAXUNROLL]; - MSize mlp = 0; - CTSize step = 1, len = (CTSize)IR(tref_ref(trlen))->i; - IRType tp = IRT_CDATA; - int needxbar = 0; - if (len == 0) return; /* Shortcut. */ - if (len > CREC_COPY_MAXLEN) goto fallback; - if (ct) { - CTState *cts = ctype_ctsG(J2G(J)); - lua_assert(ctype_isarray(ct->info) || ctype_isstruct(ct->info)); - if (ctype_isarray(ct->info)) { - CType *cct = ctype_rawchild(cts, ct); - tp = crec_ct2irt(cts, cct); - if (tp == IRT_CDATA) goto rawcopy; - step = lj_ir_type_size[tp]; - lua_assert((len & (step-1)) == 0); - } else if ((ct->info & CTF_UNION)) { - step = (1u << ctype_align(ct->info)); - goto rawcopy; - } else { - mlp = crec_copy_struct(ml, cts, ct); - goto emitcopy; - } - } else { - rawcopy: - needxbar = 1; - if (LJ_TARGET_UNALIGNED || step >= CTSIZE_PTR) - step = CTSIZE_PTR; - } - mlp = crec_copy_unroll(ml, len, step, tp); - emitcopy: - if (mlp) { - crec_copy_emit(J, ml, mlp, trdst, trsrc); - if (needxbar) - emitir(IRT(IR_XBAR, IRT_NIL), 0, 0); - return; - } - } -fallback: - /* Call memcpy. Always needs a barrier to disable alias analysis. */ - lj_ir_call(J, IRCALL_memcpy, trdst, trsrc, trlen); - emitir(IRT(IR_XBAR, IRT_NIL), 0, 0); -} - -/* Generate unrolled fill list, from highest to lowest step size/alignment. */ -static MSize crec_fill_unroll(CRecMemList *ml, CTSize len, CTSize step) -{ - CTSize ofs = 0; - MSize mlp = 0; - IRType tp = IRT_U8 + 2*lj_fls(step); - do { - while (ofs + step <= len) { - if (mlp >= CREC_COPY_MAXUNROLL) return 0; - ml[mlp].ofs = ofs; - ml[mlp].tp = tp; - mlp++; - ofs += step; - } - step >>= 1; - tp -= 2; - } while (ofs < len); - return mlp; -} - -/* -** Emit stores for fill list. -** LJ_TARGET_UNALIGNED: may emit unaligned stores (not marked as such). -*/ -static void crec_fill_emit(jit_State *J, CRecMemList *ml, MSize mlp, - TRef trdst, TRef trfill) -{ - MSize i; - for (i = 0; i < mlp; i++) { - TRef trofs = lj_ir_kintp(J, ml[i].ofs); - TRef trdptr = emitir(IRT(IR_ADD, IRT_PTR), trdst, trofs); - emitir(IRT(IR_XSTORE, ml[i].tp), trdptr, trfill); - } -} - -/* Optimized memory fill. */ -static void crec_fill(jit_State *J, TRef trdst, TRef trlen, TRef trfill, - CTSize step) -{ - if (tref_isk(trlen)) { /* Length must be constant. */ - CRecMemList ml[CREC_FILL_MAXUNROLL]; - MSize mlp; - CTSize len = (CTSize)IR(tref_ref(trlen))->i; - if (len == 0) return; /* Shortcut. */ - if (LJ_TARGET_UNALIGNED || step >= CTSIZE_PTR) - step = CTSIZE_PTR; - if (step * CREC_FILL_MAXUNROLL < len) goto fallback; - mlp = crec_fill_unroll(ml, len, step); - if (!mlp) goto fallback; - if (tref_isk(trfill) || ml[0].tp != IRT_U8) - trfill = emitconv(trfill, IRT_INT, IRT_U8, 0); - if (ml[0].tp != IRT_U8) { /* Scatter U8 to U16/U32/U64. */ - if (CTSIZE_PTR == 8 && ml[0].tp == IRT_U64) { - if (tref_isk(trfill)) /* Pointless on x64 with zero-extended regs. */ - trfill = emitconv(trfill, IRT_U64, IRT_U32, 0); - trfill = emitir(IRT(IR_MUL, IRT_U64), trfill, - lj_ir_kint64(J, U64x(01010101,01010101))); - } else { - trfill = emitir(IRTI(IR_MUL), trfill, - lj_ir_kint(J, ml[0].tp == IRT_U16 ? 0x0101 : 0x01010101)); - } - } - crec_fill_emit(J, ml, mlp, trdst, trfill); - } else { -fallback: - /* Call memset. Always needs a barrier to disable alias analysis. */ - lj_ir_call(J, IRCALL_memset, trdst, trfill, trlen); /* Note: arg order! */ - } - emitir(IRT(IR_XBAR, IRT_NIL), 0, 0); -} - -/* -- Convert C type to C type -------------------------------------------- */ - -/* -** This code mirrors the code in lj_cconv.c. It performs the same steps -** for the trace recorder that lj_cconv.c does for the interpreter. -** -** One major difference is that we can get away with much fewer checks -** here. E.g. checks for casts, constness or correct types can often be -** omitted, even if they might fail. The interpreter subsequently throws -** an error, which aborts the trace. -** -** All operations are specialized to their C types, so the on-trace -** outcome must be the same as the outcome in the interpreter. If the -** interpreter doesn't throw an error, then the trace is correct, too. -** Care must be taken not to generate invalid (temporary) IR or to -** trigger asserts. -*/ - -/* Determine whether a passed number or cdata number is non-zero. */ -static int crec_isnonzero(CType *s, void *p) -{ - if (p == (void *)0) - return 0; - if (p == (void *)1) - return 1; - if ((s->info & CTF_FP)) { - if (s->size == sizeof(float)) - return (*(float *)p != 0); - else - return (*(double *)p != 0); - } else { - if (s->size == 1) - return (*(uint8_t *)p != 0); - else if (s->size == 2) - return (*(uint16_t *)p != 0); - else if (s->size == 4) - return (*(uint32_t *)p != 0); - else - return (*(uint64_t *)p != 0); - } -} - -static TRef crec_ct_ct(jit_State *J, CType *d, CType *s, TRef dp, TRef sp, - void *svisnz) -{ - IRType dt = crec_ct2irt(ctype_ctsG(J2G(J)), d); - IRType st = crec_ct2irt(ctype_ctsG(J2G(J)), s); - CTSize dsize = d->size, ssize = s->size; - CTInfo dinfo = d->info, sinfo = s->info; - - if (ctype_type(dinfo) > CT_MAYCONVERT || ctype_type(sinfo) > CT_MAYCONVERT) - goto err_conv; - - /* - ** Note: Unlike lj_cconv_ct_ct(), sp holds the _value_ of pointers and - ** numbers up to 8 bytes. Otherwise sp holds a pointer. - */ - - switch (cconv_idx2(dinfo, sinfo)) { - /* Destination is a bool. */ - case CCX(B, B): - goto xstore; /* Source operand is already normalized. */ - case CCX(B, I): - case CCX(B, F): - if (st != IRT_CDATA) { - /* Specialize to the result of a comparison against 0. */ - TRef zero = (st == IRT_NUM || st == IRT_FLOAT) ? lj_ir_knum(J, 0) : - (st == IRT_I64 || st == IRT_U64) ? lj_ir_kint64(J, 0) : - lj_ir_kint(J, 0); - int isnz = crec_isnonzero(s, svisnz); - emitir(IRTG(isnz ? IR_NE : IR_EQ, st), sp, zero); - sp = lj_ir_kint(J, isnz); - goto xstore; - } - goto err_nyi; - - /* Destination is an integer. */ - case CCX(I, B): - case CCX(I, I): - conv_I_I: - if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi; - /* Extend 32 to 64 bit integer. */ - if (dsize == 8 && ssize < 8 && !(LJ_64 && (sinfo & CTF_UNSIGNED))) - sp = emitconv(sp, dt, ssize < 4 ? IRT_INT : st, - (sinfo & CTF_UNSIGNED) ? 0 : IRCONV_SEXT); - else if (dsize < 8 && ssize == 8) /* Truncate from 64 bit integer. */ - sp = emitconv(sp, dsize < 4 ? IRT_INT : dt, st, 0); - else if (st == IRT_INT) - sp = lj_opt_narrow_toint(J, sp); - xstore: - if (dt == IRT_I64 || dt == IRT_U64) lj_needsplit(J); - if (dp == 0) return sp; - emitir(IRT(IR_XSTORE, dt), dp, sp); - break; - case CCX(I, C): - sp = emitir(IRT(IR_XLOAD, st), sp, 0); /* Load re. */ - /* fallthrough */ - case CCX(I, F): - if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi; - sp = emitconv(sp, dsize < 4 ? IRT_INT : dt, st, IRCONV_ANY); - goto xstore; - case CCX(I, P): - case CCX(I, A): - sinfo = CTINFO(CT_NUM, CTF_UNSIGNED); - ssize = CTSIZE_PTR; - st = IRT_UINTP; - if (((dsize ^ ssize) & 8) == 0) { /* Must insert no-op type conversion. */ - sp = emitconv(sp, dsize < 4 ? IRT_INT : dt, IRT_PTR, 0); - goto xstore; - } - goto conv_I_I; - - /* Destination is a floating-point number. */ - case CCX(F, B): - case CCX(F, I): - conv_F_I: - if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi; - sp = emitconv(sp, dt, ssize < 4 ? IRT_INT : st, 0); - goto xstore; - case CCX(F, C): - sp = emitir(IRT(IR_XLOAD, st), sp, 0); /* Load re. */ - /* fallthrough */ - case CCX(F, F): - conv_F_F: - if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi; - if (dt != st) sp = emitconv(sp, dt, st, 0); - goto xstore; - - /* Destination is a complex number. */ - case CCX(C, I): - case CCX(C, F): - { /* Clear im. */ - TRef ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, (dsize >> 1))); - emitir(IRT(IR_XSTORE, dt), ptr, lj_ir_knum(J, 0)); - } - /* Convert to re. */ - if ((sinfo & CTF_FP)) goto conv_F_F; else goto conv_F_I; - - case CCX(C, C): - if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi; - { - TRef re, im, ptr; - re = emitir(IRT(IR_XLOAD, st), sp, 0); - ptr = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, (ssize >> 1))); - im = emitir(IRT(IR_XLOAD, st), ptr, 0); - if (dt != st) { - re = emitconv(re, dt, st, 0); - im = emitconv(im, dt, st, 0); - } - emitir(IRT(IR_XSTORE, dt), dp, re); - ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, (dsize >> 1))); - emitir(IRT(IR_XSTORE, dt), ptr, im); - } - break; - - /* Destination is a vector. */ - case CCX(V, I): - case CCX(V, F): - case CCX(V, C): - case CCX(V, V): - goto err_nyi; - - /* Destination is a pointer. */ - case CCX(P, P): - case CCX(P, A): - case CCX(P, S): - /* There are only 32 bit pointers/addresses on 32 bit machines. - ** Also ok on x64, since all 32 bit ops clear the upper part of the reg. - */ - goto xstore; - case CCX(P, I): - if (st == IRT_CDATA) goto err_nyi; - if (!LJ_64 && ssize == 8) /* Truncate from 64 bit integer. */ - sp = emitconv(sp, IRT_U32, st, 0); - goto xstore; - case CCX(P, F): - if (st == IRT_CDATA) goto err_nyi; - /* The signed conversion is cheaper. x64 really has 47 bit pointers. */ - sp = emitconv(sp, (LJ_64 && dsize == 8) ? IRT_I64 : IRT_U32, - st, IRCONV_ANY); - goto xstore; - - /* Destination is an array. */ - case CCX(A, A): - /* Destination is a struct/union. */ - case CCX(S, S): - if (dp == 0) goto err_conv; - crec_copy(J, dp, sp, lj_ir_kint(J, dsize), d); - break; - - default: - err_conv: - err_nyi: - lj_trace_err(J, LJ_TRERR_NYICONV); - break; - } - return 0; -} - -/* -- Convert C type to TValue (load) ------------------------------------- */ - -static TRef crec_tv_ct(jit_State *J, CType *s, CTypeID sid, TRef sp) -{ - CTState *cts = ctype_ctsG(J2G(J)); - IRType t = crec_ct2irt(cts, s); - CTInfo sinfo = s->info; - if (ctype_isnum(sinfo)) { - TRef tr; - if (t == IRT_CDATA) - goto err_nyi; /* NYI: copyval of >64 bit integers. */ - tr = emitir(IRT(IR_XLOAD, t), sp, 0); - if (t == IRT_FLOAT || t == IRT_U32) { /* Keep uint32_t/float as numbers. */ - return emitconv(tr, IRT_NUM, t, 0); - } else if (t == IRT_I64 || t == IRT_U64) { /* Box 64 bit integer. */ - sp = tr; - lj_needsplit(J); - } else if ((sinfo & CTF_BOOL)) { - /* Assume not equal to zero. Fixup and emit pending guard later. */ - lj_ir_set(J, IRTGI(IR_NE), tr, lj_ir_kint(J, 0)); - J->postproc = LJ_POST_FIXGUARD; - return TREF_TRUE; - } else { - return tr; - } - } else if (ctype_isptr(sinfo) || ctype_isenum(sinfo)) { - sp = emitir(IRT(IR_XLOAD, t), sp, 0); /* Box pointers and enums. */ - } else if (ctype_isrefarray(sinfo) || ctype_isstruct(sinfo)) { - cts->L = J->L; - sid = lj_ctype_intern(cts, CTINFO_REF(sid), CTSIZE_PTR); /* Create ref. */ - } else if (ctype_iscomplex(sinfo)) { /* Unbox/box complex. */ - ptrdiff_t esz = (ptrdiff_t)(s->size >> 1); - TRef ptr, tr1, tr2, dp; - dp = emitir(IRTG(IR_CNEW, IRT_CDATA), lj_ir_kint(J, sid), TREF_NIL); - tr1 = emitir(IRT(IR_XLOAD, t), sp, 0); - ptr = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, esz)); - tr2 = emitir(IRT(IR_XLOAD, t), ptr, 0); - ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, sizeof(GCcdata))); - emitir(IRT(IR_XSTORE, t), ptr, tr1); - ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, sizeof(GCcdata)+esz)); - emitir(IRT(IR_XSTORE, t), ptr, tr2); - return dp; - } else { - /* NYI: copyval of vectors. */ - err_nyi: - lj_trace_err(J, LJ_TRERR_NYICONV); - } - /* Box pointer, ref, enum or 64 bit integer. */ - return emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, sid), sp); -} - -/* -- Convert TValue to C type (store) ------------------------------------ */ - -static TRef crec_ct_tv(jit_State *J, CType *d, TRef dp, TRef sp, cTValue *sval) -{ - CTState *cts = ctype_ctsG(J2G(J)); - CTypeID sid = CTID_P_VOID; - void *svisnz = 0; - CType *s; - if (LJ_LIKELY(tref_isinteger(sp))) { - sid = CTID_INT32; - svisnz = (void *)(intptr_t)(tvisint(sval)?(intV(sval)!=0):!tviszero(sval)); - } else if (tref_isnum(sp)) { - sid = CTID_DOUBLE; - svisnz = (void *)(intptr_t)(tvisint(sval)?(intV(sval)!=0):!tviszero(sval)); - } else if (tref_isbool(sp)) { - sp = lj_ir_kint(J, tref_istrue(sp) ? 1 : 0); - sid = CTID_BOOL; - } else if (tref_isnil(sp)) { - sp = lj_ir_kptr(J, NULL); - } else if (tref_isudata(sp)) { - GCudata *ud = udataV(sval); - if (ud->udtype == UDTYPE_IO_FILE) { - TRef tr = emitir(IRT(IR_FLOAD, IRT_U8), sp, IRFL_UDATA_UDTYPE); - emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, UDTYPE_IO_FILE)); - sp = emitir(IRT(IR_FLOAD, IRT_PTR), sp, IRFL_UDATA_FILE); - } else { - sp = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, sizeof(GCudata))); - } - } else if (tref_isstr(sp)) { - if (ctype_isenum(d->info)) { /* Match string against enum constant. */ - GCstr *str = strV(sval); - CTSize ofs; - CType *cct = lj_ctype_getfield(cts, d, str, &ofs); - /* Specialize to the name of the enum constant. */ - emitir(IRTG(IR_EQ, IRT_STR), sp, lj_ir_kstr(J, str)); - if (cct && ctype_isconstval(cct->info)) { - lua_assert(ctype_child(cts, cct)->size == 4); - svisnz = (void *)(intptr_t)(ofs != 0); - sp = lj_ir_kint(J, (int32_t)ofs); - sid = ctype_cid(cct->info); - } /* else: interpreter will throw. */ - } else if (ctype_isrefarray(d->info)) { /* Copy string to array. */ - lj_trace_err(J, LJ_TRERR_BADTYPE); /* NYI */ - } else { /* Otherwise pass the string data as a const char[]. */ - /* Don't use STRREF. It folds with SNEW, which loses the trailing NUL. */ - sp = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, sizeof(GCstr))); - sid = CTID_A_CCHAR; - } - } else if (tref_islightud(sp)) { -#if LJ_64 - sp = emitir(IRT(IR_BAND, IRT_P64), sp, - lj_ir_kint64(J, U64x(00007fff,ffffffff))); -#endif - } else { /* NYI: tref_istab(sp). */ - IRType t; - sid = argv2cdata(J, sp, sval)->ctypeid; - s = ctype_raw(cts, sid); - svisnz = cdataptr(cdataV(sval)); - if (ctype_isfunc(s->info)) { - sid = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|sid), CTSIZE_PTR); - s = ctype_get(cts, sid); - t = IRT_PTR; - } else { - t = crec_ct2irt(cts, s); - } - if (ctype_isptr(s->info)) { - sp = emitir(IRT(IR_FLOAD, t), sp, IRFL_CDATA_PTR); - if (ctype_isref(s->info)) { - svisnz = *(void **)svisnz; - s = ctype_rawchild(cts, s); - if (ctype_isenum(s->info)) s = ctype_child(cts, s); - t = crec_ct2irt(cts, s); - } else { - goto doconv; - } - } else if (t == IRT_I64 || t == IRT_U64) { - sp = emitir(IRT(IR_FLOAD, t), sp, IRFL_CDATA_INT64); - lj_needsplit(J); - goto doconv; - } else if (t == IRT_INT || t == IRT_U32) { - if (ctype_isenum(s->info)) s = ctype_child(cts, s); - sp = emitir(IRT(IR_FLOAD, t), sp, IRFL_CDATA_INT); - goto doconv; - } else { - sp = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, sizeof(GCcdata))); - } - if (ctype_isnum(s->info) && t != IRT_CDATA) - sp = emitir(IRT(IR_XLOAD, t), sp, 0); /* Load number value. */ - goto doconv; - } - s = ctype_get(cts, sid); -doconv: - if (ctype_isenum(d->info)) d = ctype_child(cts, d); - return crec_ct_ct(J, d, s, dp, sp, svisnz); -} - -/* -- C data metamethods -------------------------------------------------- */ - -/* This would be rather difficult in FOLD, so do it here: -** (base+k)+(idx*sz)+ofs ==> (base+idx*sz)+(ofs+k) -** (base+(idx+k)*sz)+ofs ==> (base+idx*sz)+(ofs+k*sz) -*/ -static TRef crec_reassoc_ofs(jit_State *J, TRef tr, ptrdiff_t *ofsp, MSize sz) -{ - IRIns *ir = IR(tref_ref(tr)); - if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && irref_isk(ir->op2) && - (ir->o == IR_ADD || ir->o == IR_ADDOV || ir->o == IR_SUBOV)) { - IRIns *irk = IR(ir->op2); - ptrdiff_t k; - if (LJ_64 && irk->o == IR_KINT64) - k = (ptrdiff_t)ir_kint64(irk)->u64 * sz; - else - k = (ptrdiff_t)irk->i * sz; - if (ir->o == IR_SUBOV) *ofsp -= k; else *ofsp += k; - tr = ir->op1; /* Not a TRef, but the caller doesn't care. */ - } - return tr; -} - -/* Record ctype __index/__newindex metamethods. */ -static void crec_index_meta(jit_State *J, CTState *cts, CType *ct, - RecordFFData *rd) -{ - CTypeID id = ctype_typeid(cts, ct); - cTValue *tv = lj_ctype_meta(cts, id, rd->data ? MM_newindex : MM_index); - if (!tv) - lj_trace_err(J, LJ_TRERR_BADTYPE); - if (tvisfunc(tv)) { - J->base[-1] = lj_ir_kfunc(J, funcV(tv)) | TREF_FRAME; - rd->nres = -1; /* Pending tailcall. */ - } else if (rd->data == 0 && tvistab(tv) && tref_isstr(J->base[1])) { - /* Specialize to result of __index lookup. */ - cTValue *o = lj_tab_get(J->L, tabV(tv), &rd->argv[1]); - J->base[0] = lj_record_constify(J, o); - if (!J->base[0]) - lj_trace_err(J, LJ_TRERR_BADTYPE); - /* Always specialize to the key. */ - emitir(IRTG(IR_EQ, IRT_STR), J->base[1], lj_ir_kstr(J, strV(&rd->argv[1]))); - } else { - /* NYI: resolving of non-function metamethods. */ - /* NYI: non-string keys for __index table. */ - /* NYI: stores to __newindex table. */ - lj_trace_err(J, LJ_TRERR_BADTYPE); - } -} - -void LJ_FASTCALL recff_cdata_index(jit_State *J, RecordFFData *rd) -{ - TRef idx, ptr = J->base[0]; - ptrdiff_t ofs = sizeof(GCcdata); - GCcdata *cd = argv2cdata(J, ptr, &rd->argv[0]); - CTState *cts = ctype_ctsG(J2G(J)); - CType *ct = ctype_raw(cts, cd->ctypeid); - CTypeID sid = 0; - - /* Resolve pointer or reference for cdata object. */ - if (ctype_isptr(ct->info)) { - IRType t = (LJ_64 && ct->size == 8) ? IRT_P64 : IRT_P32; - if (ctype_isref(ct->info)) ct = ctype_rawchild(cts, ct); - ptr = emitir(IRT(IR_FLOAD, t), ptr, IRFL_CDATA_PTR); - ofs = 0; - ptr = crec_reassoc_ofs(J, ptr, &ofs, 1); - } - -again: - idx = J->base[1]; - if (tref_isnumber(idx)) { - idx = lj_opt_narrow_cindex(J, idx); - if (ctype_ispointer(ct->info)) { - CTSize sz; - integer_key: - if ((ct->info & CTF_COMPLEX)) - idx = emitir(IRT(IR_BAND, IRT_INTP), idx, lj_ir_kintp(J, 1)); - sz = lj_ctype_size(cts, (sid = ctype_cid(ct->info))); - idx = crec_reassoc_ofs(J, idx, &ofs, sz); -#if LJ_TARGET_ARM || LJ_TARGET_PPC - /* Hoist base add to allow fusion of index/shift into operands. */ - if (LJ_LIKELY(J->flags & JIT_F_OPT_LOOP) && ofs -#if LJ_TARGET_ARM - && (sz == 1 || sz == 4) -#endif - ) { - ptr = emitir(IRT(IR_ADD, IRT_PTR), ptr, lj_ir_kintp(J, ofs)); - ofs = 0; - } -#endif - idx = emitir(IRT(IR_MUL, IRT_INTP), idx, lj_ir_kintp(J, sz)); - ptr = emitir(IRT(IR_ADD, IRT_PTR), idx, ptr); - } - } else if (tref_iscdata(idx)) { - GCcdata *cdk = cdataV(&rd->argv[1]); - CType *ctk = ctype_raw(cts, cdk->ctypeid); - IRType t = crec_ct2irt(cts, ctk); - if (ctype_ispointer(ct->info) && t >= IRT_I8 && t <= IRT_U64) { - if (ctk->size == 8) { - idx = emitir(IRT(IR_FLOAD, t), idx, IRFL_CDATA_INT64); - } else if (ctk->size == 4) { - idx = emitir(IRT(IR_FLOAD, t), idx, IRFL_CDATA_INT); - } else { - idx = emitir(IRT(IR_ADD, IRT_PTR), idx, - lj_ir_kintp(J, sizeof(GCcdata))); - idx = emitir(IRT(IR_XLOAD, t), idx, 0); - } - if (LJ_64 && ctk->size < sizeof(intptr_t) && !(ctk->info & CTF_UNSIGNED)) - idx = emitconv(idx, IRT_INTP, IRT_INT, IRCONV_SEXT); - if (!LJ_64 && ctk->size > sizeof(intptr_t)) { - idx = emitconv(idx, IRT_INTP, t, 0); - lj_needsplit(J); - } - goto integer_key; - } - } else if (tref_isstr(idx)) { - GCstr *name = strV(&rd->argv[1]); - if (cd && cd->ctypeid == CTID_CTYPEID) - ct = ctype_raw(cts, crec_constructor(J, cd, ptr)); - if (ctype_isstruct(ct->info)) { - CTSize fofs; - CType *fct; - fct = lj_ctype_getfield(cts, ct, name, &fofs); - if (fct) { - /* Always specialize to the field name. */ - emitir(IRTG(IR_EQ, IRT_STR), idx, lj_ir_kstr(J, name)); - if (ctype_isconstval(fct->info)) { - if (fct->size >= 0x80000000u && - (ctype_child(cts, fct)->info & CTF_UNSIGNED)) { - J->base[0] = lj_ir_knum(J, (lua_Number)(uint32_t)fct->size); - return; - } - J->base[0] = lj_ir_kint(J, (int32_t)fct->size); - return; /* Interpreter will throw for newindex. */ - } else if (ctype_isbitfield(fct->info)) { - lj_trace_err(J, LJ_TRERR_NYICONV); - } else { - lua_assert(ctype_isfield(fct->info)); - sid = ctype_cid(fct->info); - } - ofs += (ptrdiff_t)fofs; - } - } else if (ctype_iscomplex(ct->info)) { - if (name->len == 2 && - ((strdata(name)[0] == 'r' && strdata(name)[1] == 'e') || - (strdata(name)[0] == 'i' && strdata(name)[1] == 'm'))) { - /* Always specialize to the field name. */ - emitir(IRTG(IR_EQ, IRT_STR), idx, lj_ir_kstr(J, name)); - if (strdata(name)[0] == 'i') ofs += (ct->size >> 1); - sid = ctype_cid(ct->info); - } - } - } - if (!sid) { - if (ctype_isptr(ct->info)) { /* Automatically perform '->'. */ - CType *cct = ctype_rawchild(cts, ct); - if (ctype_isstruct(cct->info)) { - ct = cct; - cd = NULL; - if (tref_isstr(idx)) goto again; - } - } - crec_index_meta(J, cts, ct, rd); - return; - } - - if (ofs) - ptr = emitir(IRT(IR_ADD, IRT_PTR), ptr, lj_ir_kintp(J, ofs)); - - /* Resolve reference for field. */ - ct = ctype_get(cts, sid); - if (ctype_isref(ct->info)) { - ptr = emitir(IRT(IR_XLOAD, IRT_PTR), ptr, 0); - sid = ctype_cid(ct->info); - ct = ctype_get(cts, sid); - } - - while (ctype_isattrib(ct->info)) - ct = ctype_child(cts, ct); /* Skip attributes. */ - - if (rd->data == 0) { /* __index metamethod. */ - J->base[0] = crec_tv_ct(J, ct, sid, ptr); - } else { /* __newindex metamethod. */ - rd->nres = 0; - J->needsnap = 1; - crec_ct_tv(J, ct, ptr, J->base[2], &rd->argv[2]); - } -} - -/* Record setting a finalizer. */ -static void crec_finalizer(jit_State *J, TRef trcd, TRef trfin, cTValue *fin) -{ - if (tvisgcv(fin)) { - if (!trfin) trfin = lj_ir_kptr(J, gcval(fin)); - } else if (tvisnil(fin)) { - trfin = lj_ir_kptr(J, NULL); - } else { - lj_trace_err(J, LJ_TRERR_BADTYPE); - } - lj_ir_call(J, IRCALL_lj_cdata_setfin, trcd, - trfin, lj_ir_kint(J, (int32_t)itype(fin))); - J->needsnap = 1; -} - -/* Record cdata allocation. */ -static void crec_alloc(jit_State *J, RecordFFData *rd, CTypeID id) -{ - CTState *cts = ctype_ctsG(J2G(J)); - CTSize sz; - CTInfo info = lj_ctype_info(cts, id, &sz); - CType *d = ctype_raw(cts, id); - TRef trcd, trid = lj_ir_kint(J, id); - cTValue *fin; - /* Use special instruction to box pointer or 32/64 bit integer. */ - if (ctype_isptr(info) || (ctype_isinteger(info) && (sz == 4 || sz == 8))) { - TRef sp = J->base[1] ? crec_ct_tv(J, d, 0, J->base[1], &rd->argv[1]) : - ctype_isptr(info) ? lj_ir_kptr(J, NULL) : - sz == 4 ? lj_ir_kint(J, 0) : - (lj_needsplit(J), lj_ir_kint64(J, 0)); - J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA), trid, sp); - return; - } else { - TRef trsz = TREF_NIL; - if ((info & CTF_VLA)) { /* Calculate VLA/VLS size at runtime. */ - CTSize sz0, sz1; - if (!J->base[1] || J->base[2]) - lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: init VLA/VLS. */ - trsz = crec_ct_tv(J, ctype_get(cts, CTID_INT32), 0, - J->base[1], &rd->argv[1]); - sz0 = lj_ctype_vlsize(cts, d, 0); - sz1 = lj_ctype_vlsize(cts, d, 1); - trsz = emitir(IRTGI(IR_MULOV), trsz, lj_ir_kint(J, (int32_t)(sz1-sz0))); - trsz = emitir(IRTGI(IR_ADDOV), trsz, lj_ir_kint(J, (int32_t)sz0)); - J->base[1] = 0; /* Simplify logic below. */ - } else if (ctype_align(info) > CT_MEMALIGN) { - trsz = lj_ir_kint(J, sz); - } - trcd = emitir(IRTG(IR_CNEW, IRT_CDATA), trid, trsz); - if (sz > 128 || (info & CTF_VLA)) { - TRef dp; - CTSize align; - special: /* Only handle bulk zero-fill for large/VLA/VLS types. */ - if (J->base[1]) - lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: init large/VLA/VLS types. */ - dp = emitir(IRT(IR_ADD, IRT_PTR), trcd, lj_ir_kintp(J, sizeof(GCcdata))); - if (trsz == TREF_NIL) trsz = lj_ir_kint(J, sz); - align = ctype_align(info); - if (align < CT_MEMALIGN) align = CT_MEMALIGN; - crec_fill(J, dp, trsz, lj_ir_kint(J, 0), (1u << align)); - } else if (J->base[1] && !J->base[2] && - !lj_cconv_multi_init(cts, d, &rd->argv[1])) { - goto single_init; - } else if (ctype_isarray(d->info)) { - CType *dc = ctype_rawchild(cts, d); /* Array element type. */ - CTSize ofs, esize = dc->size; - TRef sp = 0; - TValue tv; - TValue *sval = &tv; - MSize i; - tv.u64 = 0; - if (!(ctype_isnum(dc->info) || ctype_isptr(dc->info)) || - esize * CREC_FILL_MAXUNROLL < sz) - goto special; - for (i = 1, ofs = 0; ofs < sz; ofs += esize) { - TRef dp = emitir(IRT(IR_ADD, IRT_PTR), trcd, - lj_ir_kintp(J, ofs + sizeof(GCcdata))); - if (J->base[i]) { - sp = J->base[i]; - sval = &rd->argv[i]; - i++; - } else if (i != 2) { - sp = ctype_isnum(dc->info) ? lj_ir_kint(J, 0) : TREF_NIL; - } - crec_ct_tv(J, dc, dp, sp, sval); - } - } else if (ctype_isstruct(d->info)) { - CTypeID fid = d->sib; - MSize i = 1; - while (fid) { - CType *df = ctype_get(cts, fid); - fid = df->sib; - if (ctype_isfield(df->info)) { - CType *dc; - TRef sp, dp; - TValue tv; - TValue *sval = &tv; - setintV(&tv, 0); - if (!gcref(df->name)) continue; /* Ignore unnamed fields. */ - dc = ctype_rawchild(cts, df); /* Field type. */ - if (!(ctype_isnum(dc->info) || ctype_isptr(dc->info) || - ctype_isenum(dc->info))) - lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: init aggregates. */ - if (J->base[i]) { - sp = J->base[i]; - sval = &rd->argv[i]; - i++; - } else { - sp = ctype_isptr(dc->info) ? TREF_NIL : lj_ir_kint(J, 0); - } - dp = emitir(IRT(IR_ADD, IRT_PTR), trcd, - lj_ir_kintp(J, df->size + sizeof(GCcdata))); - crec_ct_tv(J, dc, dp, sp, sval); - } else if (!ctype_isconstval(df->info)) { - /* NYI: init bitfields and sub-structures. */ - lj_trace_err(J, LJ_TRERR_NYICONV); - } - } - } else { - TRef dp; - single_init: - dp = emitir(IRT(IR_ADD, IRT_PTR), trcd, lj_ir_kintp(J, sizeof(GCcdata))); - if (J->base[1]) { - crec_ct_tv(J, d, dp, J->base[1], &rd->argv[1]); - } else { - TValue tv; - tv.u64 = 0; - crec_ct_tv(J, d, dp, lj_ir_kint(J, 0), &tv); - } - } - } - J->base[0] = trcd; - /* Handle __gc metamethod. */ - fin = lj_ctype_meta(cts, id, MM_gc); - if (fin) - crec_finalizer(J, trcd, 0, fin); -} - -/* Record argument conversions. */ -static TRef crec_call_args(jit_State *J, RecordFFData *rd, - CTState *cts, CType *ct) -{ - TRef args[CCI_NARGS_MAX]; - CTypeID fid; - MSize i, n; - TRef tr, *base; - cTValue *o; -#if LJ_TARGET_X86 -#if LJ_ABI_WIN - TRef *arg0 = NULL, *arg1 = NULL; -#endif - int ngpr = 0; - if (ctype_cconv(ct->info) == CTCC_THISCALL) - ngpr = 1; - else if (ctype_cconv(ct->info) == CTCC_FASTCALL) - ngpr = 2; -#endif - - /* Skip initial attributes. */ - fid = ct->sib; - while (fid) { - CType *ctf = ctype_get(cts, fid); - if (!ctype_isattrib(ctf->info)) break; - fid = ctf->sib; - } - args[0] = TREF_NIL; - for (n = 0, base = J->base+1, o = rd->argv+1; *base; n++, base++, o++) { - CTypeID did; - CType *d; - - if (n >= CCI_NARGS_MAX) - lj_trace_err(J, LJ_TRERR_NYICALL); - - if (fid) { /* Get argument type from field. */ - CType *ctf = ctype_get(cts, fid); - fid = ctf->sib; - lua_assert(ctype_isfield(ctf->info)); - did = ctype_cid(ctf->info); - } else { - if (!(ct->info & CTF_VARARG)) - lj_trace_err(J, LJ_TRERR_NYICALL); /* Too many arguments. */ - did = lj_ccall_ctid_vararg(cts, o); /* Infer vararg type. */ - } - d = ctype_raw(cts, did); - if (!(ctype_isnum(d->info) || ctype_isptr(d->info) || - ctype_isenum(d->info))) - lj_trace_err(J, LJ_TRERR_NYICALL); - tr = crec_ct_tv(J, d, 0, *base, o); - if (ctype_isinteger_or_bool(d->info)) { - if (d->size < 4) { - if ((d->info & CTF_UNSIGNED)) - tr = emitconv(tr, IRT_INT, d->size==1 ? IRT_U8 : IRT_U16, 0); - else - tr = emitconv(tr, IRT_INT, d->size==1 ? IRT_I8 : IRT_I16,IRCONV_SEXT); - } - } else if (LJ_SOFTFP && ctype_isfp(d->info) && d->size > 4) { - lj_needsplit(J); - } -#if LJ_TARGET_X86 - /* 64 bit args must not end up in registers for fastcall/thiscall. */ -#if LJ_ABI_WIN - if (!ctype_isfp(d->info)) { - /* Sigh, the Windows/x86 ABI allows reordering across 64 bit args. */ - if (tref_typerange(tr, IRT_I64, IRT_U64)) { - if (ngpr) { - arg0 = &args[n]; args[n++] = TREF_NIL; ngpr--; - if (ngpr) { - arg1 = &args[n]; args[n++] = TREF_NIL; ngpr--; - } - } - } else { - if (arg0) { *arg0 = tr; arg0 = NULL; n--; continue; } - if (arg1) { *arg1 = tr; arg1 = NULL; n--; continue; } - if (ngpr) ngpr--; - } - } -#else - if (!ctype_isfp(d->info) && ngpr) { - if (tref_typerange(tr, IRT_I64, IRT_U64)) { - /* No reordering for other x86 ABIs. Simply add alignment args. */ - do { args[n++] = TREF_NIL; } while (--ngpr); - } else { - ngpr--; - } - } -#endif -#endif - args[n] = tr; - } - tr = args[0]; - for (i = 1; i < n; i++) - tr = emitir(IRT(IR_CARG, IRT_NIL), tr, args[i]); - return tr; -} - -/* Create a snapshot for the caller, simulating a 'false' return value. */ -static void crec_snap_caller(jit_State *J) -{ - lua_State *L = J->L; - TValue *base = L->base, *top = L->top; - const BCIns *pc = J->pc; - TRef ftr = J->base[-1]; - ptrdiff_t delta; - if (!frame_islua(base-1) || J->framedepth <= 0) - lj_trace_err(J, LJ_TRERR_NYICALL); - J->pc = frame_pc(base-1); delta = 1+LJ_FR2+bc_a(J->pc[-1]); - L->top = base; L->base = base - delta; - J->base[-1] = TREF_FALSE; - J->base -= delta; J->baseslot -= (BCReg)delta; - J->maxslot = (BCReg)delta; J->framedepth--; - lj_snap_add(J); - L->base = base; L->top = top; - J->framedepth++; J->maxslot = 1; - J->base += delta; J->baseslot += (BCReg)delta; - J->base[-1] = ftr; J->pc = pc; -} - -/* Record function call. */ -static int crec_call(jit_State *J, RecordFFData *rd, GCcdata *cd) -{ - CTState *cts = ctype_ctsG(J2G(J)); - CType *ct = ctype_raw(cts, cd->ctypeid); - IRType tp = IRT_PTR; - if (ctype_isptr(ct->info)) { - tp = (LJ_64 && ct->size == 8) ? IRT_P64 : IRT_P32; - ct = ctype_rawchild(cts, ct); - } - if (ctype_isfunc(ct->info)) { - TRef func = emitir(IRT(IR_FLOAD, tp), J->base[0], IRFL_CDATA_PTR); - CType *ctr = ctype_rawchild(cts, ct); - IRType t = crec_ct2irt(cts, ctr); - TRef tr; - TValue tv; - /* Check for blacklisted C functions that might call a callback. */ - setlightudV(&tv, - cdata_getptr(cdataptr(cd), (LJ_64 && tp == IRT_P64) ? 8 : 4)); - if (tvistrue(lj_tab_get(J->L, cts->miscmap, &tv))) - lj_trace_err(J, LJ_TRERR_BLACKL); - if (ctype_isvoid(ctr->info)) { - t = IRT_NIL; - rd->nres = 0; - } else if (!(ctype_isnum(ctr->info) || ctype_isptr(ctr->info) || - ctype_isenum(ctr->info)) || t == IRT_CDATA) { - lj_trace_err(J, LJ_TRERR_NYICALL); - } - if ((ct->info & CTF_VARARG) -#if LJ_TARGET_X86 - || ctype_cconv(ct->info) != CTCC_CDECL -#endif - ) - func = emitir(IRT(IR_CARG, IRT_NIL), func, - lj_ir_kint(J, ctype_typeid(cts, ct))); - tr = emitir(IRT(IR_CALLXS, t), crec_call_args(J, rd, cts, ct), func); - if (ctype_isbool(ctr->info)) { - if (frame_islua(J->L->base-1) && bc_b(frame_pc(J->L->base-1)[-1]) == 1) { - /* Don't check result if ignored. */ - tr = TREF_NIL; - } else { - crec_snap_caller(J); -#if LJ_TARGET_X86ORX64 - /* Note: only the x86/x64 backend supports U8 and only for EQ(tr, 0). */ - lj_ir_set(J, IRTG(IR_NE, IRT_U8), tr, lj_ir_kint(J, 0)); -#else - lj_ir_set(J, IRTGI(IR_NE), tr, lj_ir_kint(J, 0)); -#endif - J->postproc = LJ_POST_FIXGUARDSNAP; - tr = TREF_TRUE; - } - } else if (t == IRT_PTR || (LJ_64 && t == IRT_P32) || - t == IRT_I64 || t == IRT_U64 || ctype_isenum(ctr->info)) { - TRef trid = lj_ir_kint(J, ctype_cid(ct->info)); - tr = emitir(IRTG(IR_CNEWI, IRT_CDATA), trid, tr); - if (t == IRT_I64 || t == IRT_U64) lj_needsplit(J); - } else if (t == IRT_FLOAT || t == IRT_U32) { - tr = emitconv(tr, IRT_NUM, t, 0); - } else if (t == IRT_I8 || t == IRT_I16) { - tr = emitconv(tr, IRT_INT, t, IRCONV_SEXT); - } else if (t == IRT_U8 || t == IRT_U16) { - tr = emitconv(tr, IRT_INT, t, 0); - } - J->base[0] = tr; - J->needsnap = 1; - return 1; - } - return 0; -} - -void LJ_FASTCALL recff_cdata_call(jit_State *J, RecordFFData *rd) -{ - CTState *cts = ctype_ctsG(J2G(J)); - GCcdata *cd = argv2cdata(J, J->base[0], &rd->argv[0]); - CTypeID id = cd->ctypeid; - CType *ct; - cTValue *tv; - MMS mm = MM_call; - if (id == CTID_CTYPEID) { - id = crec_constructor(J, cd, J->base[0]); - mm = MM_new; - } else if (crec_call(J, rd, cd)) { - return; - } - /* Record ctype __call/__new metamethod. */ - ct = ctype_raw(cts, id); - tv = lj_ctype_meta(cts, ctype_isptr(ct->info) ? ctype_cid(ct->info) : id, mm); - if (tv) { - if (tvisfunc(tv)) { - J->base[-1] = lj_ir_kfunc(J, funcV(tv)) | TREF_FRAME; - rd->nres = -1; /* Pending tailcall. */ - return; - } - } else if (mm == MM_new) { - crec_alloc(J, rd, id); - return; - } - /* No metamethod or NYI: non-function metamethods. */ - lj_trace_err(J, LJ_TRERR_BADTYPE); -} - -static TRef crec_arith_int64(jit_State *J, TRef *sp, CType **s, MMS mm) -{ - if (ctype_isnum(s[0]->info) && ctype_isnum(s[1]->info)) { - IRType dt; - CTypeID id; - TRef tr; - MSize i; - IROp op; - lj_needsplit(J); - if (((s[0]->info & CTF_UNSIGNED) && s[0]->size == 8) || - ((s[1]->info & CTF_UNSIGNED) && s[1]->size == 8)) { - dt = IRT_U64; id = CTID_UINT64; - } else { - dt = IRT_I64; id = CTID_INT64; - if (mm < MM_add && - !((s[0]->info | s[1]->info) & CTF_FP) && - s[0]->size == 4 && s[1]->size == 4) { /* Try to narrow comparison. */ - if (!((s[0]->info ^ s[1]->info) & CTF_UNSIGNED) || - (tref_isk(sp[1]) && IR(tref_ref(sp[1]))->i >= 0)) { - dt = (s[0]->info & CTF_UNSIGNED) ? IRT_U32 : IRT_INT; - goto comp; - } else if (tref_isk(sp[0]) && IR(tref_ref(sp[0]))->i >= 0) { - dt = (s[1]->info & CTF_UNSIGNED) ? IRT_U32 : IRT_INT; - goto comp; - } - } - } - for (i = 0; i < 2; i++) { - IRType st = tref_type(sp[i]); - if (st == IRT_NUM || st == IRT_FLOAT) - sp[i] = emitconv(sp[i], dt, st, IRCONV_ANY); - else if (!(st == IRT_I64 || st == IRT_U64)) - sp[i] = emitconv(sp[i], dt, IRT_INT, - (s[i]->info & CTF_UNSIGNED) ? 0 : IRCONV_SEXT); - } - if (mm < MM_add) { - comp: - /* Assume true comparison. Fixup and emit pending guard later. */ - if (mm == MM_eq) { - op = IR_EQ; - } else { - op = mm == MM_lt ? IR_LT : IR_LE; - if (dt == IRT_U32 || dt == IRT_U64) - op += (IR_ULT-IR_LT); - } - lj_ir_set(J, IRTG(op, dt), sp[0], sp[1]); - J->postproc = LJ_POST_FIXGUARD; - return TREF_TRUE; - } else { - tr = emitir(IRT(mm+(int)IR_ADD-(int)MM_add, dt), sp[0], sp[1]); - } - return emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr); - } - return 0; -} - -static TRef crec_arith_ptr(jit_State *J, TRef *sp, CType **s, MMS mm) -{ - CTState *cts = ctype_ctsG(J2G(J)); - CType *ctp = s[0]; - if (ctype_isptr(ctp->info) || ctype_isrefarray(ctp->info)) { - if ((mm == MM_sub || mm == MM_eq || mm == MM_lt || mm == MM_le) && - (ctype_isptr(s[1]->info) || ctype_isrefarray(s[1]->info))) { - if (mm == MM_sub) { /* Pointer difference. */ - TRef tr; - CTSize sz = lj_ctype_size(cts, ctype_cid(ctp->info)); - if (sz == 0 || (sz & (sz-1)) != 0) - return 0; /* NYI: integer division. */ - tr = emitir(IRT(IR_SUB, IRT_INTP), sp[0], sp[1]); - tr = emitir(IRT(IR_BSAR, IRT_INTP), tr, lj_ir_kint(J, lj_fls(sz))); -#if LJ_64 - tr = emitconv(tr, IRT_NUM, IRT_INTP, 0); -#endif - return tr; - } else { /* Pointer comparison (unsigned). */ - /* Assume true comparison. Fixup and emit pending guard later. */ - IROp op = mm == MM_eq ? IR_EQ : mm == MM_lt ? IR_ULT : IR_ULE; - lj_ir_set(J, IRTG(op, IRT_PTR), sp[0], sp[1]); - J->postproc = LJ_POST_FIXGUARD; - return TREF_TRUE; - } - } - if (!((mm == MM_add || mm == MM_sub) && ctype_isnum(s[1]->info))) - return 0; - } else if (mm == MM_add && ctype_isnum(ctp->info) && - (ctype_isptr(s[1]->info) || ctype_isrefarray(s[1]->info))) { - TRef tr = sp[0]; sp[0] = sp[1]; sp[1] = tr; /* Swap pointer and index. */ - ctp = s[1]; - } else { - return 0; - } - { - TRef tr = sp[1]; - IRType t = tref_type(tr); - CTSize sz = lj_ctype_size(cts, ctype_cid(ctp->info)); - CTypeID id; -#if LJ_64 - if (t == IRT_NUM || t == IRT_FLOAT) - tr = emitconv(tr, IRT_INTP, t, IRCONV_ANY); - else if (!(t == IRT_I64 || t == IRT_U64)) - tr = emitconv(tr, IRT_INTP, IRT_INT, - ((t - IRT_I8) & 1) ? 0 : IRCONV_SEXT); -#else - if (!tref_typerange(sp[1], IRT_I8, IRT_U32)) { - tr = emitconv(tr, IRT_INTP, t, - (t == IRT_NUM || t == IRT_FLOAT) ? IRCONV_ANY : 0); - } -#endif - tr = emitir(IRT(IR_MUL, IRT_INTP), tr, lj_ir_kintp(J, sz)); - tr = emitir(IRT(mm+(int)IR_ADD-(int)MM_add, IRT_PTR), sp[0], tr); - id = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(ctp->info)), - CTSIZE_PTR); - return emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr); - } -} - -/* Record ctype arithmetic metamethods. */ -static TRef crec_arith_meta(jit_State *J, TRef *sp, CType **s, CTState *cts, - RecordFFData *rd) -{ - cTValue *tv = NULL; - if (J->base[0]) { - if (tviscdata(&rd->argv[0])) { - CTypeID id = argv2cdata(J, J->base[0], &rd->argv[0])->ctypeid; - CType *ct = ctype_raw(cts, id); - if (ctype_isptr(ct->info)) id = ctype_cid(ct->info); - tv = lj_ctype_meta(cts, id, (MMS)rd->data); - } - if (!tv && J->base[1] && tviscdata(&rd->argv[1])) { - CTypeID id = argv2cdata(J, J->base[1], &rd->argv[1])->ctypeid; - CType *ct = ctype_raw(cts, id); - if (ctype_isptr(ct->info)) id = ctype_cid(ct->info); - tv = lj_ctype_meta(cts, id, (MMS)rd->data); - } - } - if (tv) { - if (tvisfunc(tv)) { - J->base[-1] = lj_ir_kfunc(J, funcV(tv)) | TREF_FRAME; - rd->nres = -1; /* Pending tailcall. */ - return 0; - } /* NYI: non-function metamethods. */ - } else if ((MMS)rd->data == MM_eq) { /* Fallback cdata pointer comparison. */ - if (sp[0] && sp[1] && ctype_isnum(s[0]->info) == ctype_isnum(s[1]->info)) { - /* Assume true comparison. Fixup and emit pending guard later. */ - lj_ir_set(J, IRTG(IR_EQ, IRT_PTR), sp[0], sp[1]); - J->postproc = LJ_POST_FIXGUARD; - return TREF_TRUE; - } else { - return TREF_FALSE; - } - } - lj_trace_err(J, LJ_TRERR_BADTYPE); - return 0; -} - -void LJ_FASTCALL recff_cdata_arith(jit_State *J, RecordFFData *rd) -{ - CTState *cts = ctype_ctsG(J2G(J)); - TRef sp[2]; - CType *s[2]; - MSize i; - for (i = 0; i < 2; i++) { - TRef tr = J->base[i]; - CType *ct = ctype_get(cts, CTID_DOUBLE); - if (!tr) { - lj_trace_err(J, LJ_TRERR_BADTYPE); - } else if (tref_iscdata(tr)) { - CTypeID id = argv2cdata(J, tr, &rd->argv[i])->ctypeid; - IRType t; - ct = ctype_raw(cts, id); - t = crec_ct2irt(cts, ct); - if (ctype_isptr(ct->info)) { /* Resolve pointer or reference. */ - tr = emitir(IRT(IR_FLOAD, t), tr, IRFL_CDATA_PTR); - if (ctype_isref(ct->info)) { - ct = ctype_rawchild(cts, ct); - t = crec_ct2irt(cts, ct); - } - } else if (t == IRT_I64 || t == IRT_U64) { - tr = emitir(IRT(IR_FLOAD, t), tr, IRFL_CDATA_INT64); - lj_needsplit(J); - goto ok; - } else if (t == IRT_INT || t == IRT_U32) { - tr = emitir(IRT(IR_FLOAD, t), tr, IRFL_CDATA_INT); - if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct); - goto ok; - } else if (ctype_isfunc(ct->info)) { - tr = emitir(IRT(IR_FLOAD, IRT_PTR), tr, IRFL_CDATA_PTR); - ct = ctype_get(cts, - lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|id), CTSIZE_PTR)); - goto ok; - } else { - tr = emitir(IRT(IR_ADD, IRT_PTR), tr, lj_ir_kintp(J, sizeof(GCcdata))); - } - if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct); - if (ctype_isnum(ct->info)) { - if (t == IRT_CDATA) { - tr = 0; - } else { - if (t == IRT_I64 || t == IRT_U64) lj_needsplit(J); - tr = emitir(IRT(IR_XLOAD, t), tr, 0); - } - } - } else if (tref_isnil(tr)) { - tr = lj_ir_kptr(J, NULL); - ct = ctype_get(cts, CTID_P_VOID); - } else if (tref_isinteger(tr)) { - ct = ctype_get(cts, CTID_INT32); - } else if (tref_isstr(tr)) { - TRef tr2 = J->base[1-i]; - CTypeID id = argv2cdata(J, tr2, &rd->argv[1-i])->ctypeid; - ct = ctype_raw(cts, id); - if (ctype_isenum(ct->info)) { /* Match string against enum constant. */ - GCstr *str = strV(&rd->argv[i]); - CTSize ofs; - CType *cct = lj_ctype_getfield(cts, ct, str, &ofs); - if (cct && ctype_isconstval(cct->info)) { - /* Specialize to the name of the enum constant. */ - emitir(IRTG(IR_EQ, IRT_STR), tr, lj_ir_kstr(J, str)); - ct = ctype_child(cts, cct); - tr = lj_ir_kint(J, (int32_t)ofs); - } else { /* Interpreter will throw or return false. */ - ct = ctype_get(cts, CTID_P_VOID); - } - } else if (ctype_isptr(ct->info)) { - tr = emitir(IRT(IR_ADD, IRT_PTR), tr, lj_ir_kintp(J, sizeof(GCstr))); - } else { - ct = ctype_get(cts, CTID_P_VOID); - } - } else if (!tref_isnum(tr)) { - tr = 0; - ct = ctype_get(cts, CTID_P_VOID); - } - ok: - s[i] = ct; - sp[i] = tr; - } - { - TRef tr; - if (!(tr = crec_arith_int64(J, sp, s, (MMS)rd->data)) && - !(tr = crec_arith_ptr(J, sp, s, (MMS)rd->data)) && - !(tr = crec_arith_meta(J, sp, s, cts, rd))) - return; - J->base[0] = tr; - /* Fixup cdata comparisons, too. Avoids some cdata escapes. */ - if (J->postproc == LJ_POST_FIXGUARD && frame_iscont(J->L->base-1) && - !irt_isguard(J->guardemit)) { - const BCIns *pc = frame_contpc(J->L->base-1) - 1; - if (bc_op(*pc) <= BC_ISNEP) { - J2G(J)->tmptv.u64 = (uint64_t)(uintptr_t)pc; - J->postproc = LJ_POST_FIXCOMP; - } - } - } -} - -/* -- C library namespace metamethods ------------------------------------- */ - -void LJ_FASTCALL recff_clib_index(jit_State *J, RecordFFData *rd) -{ - CTState *cts = ctype_ctsG(J2G(J)); - if (tref_isudata(J->base[0]) && tref_isstr(J->base[1]) && - udataV(&rd->argv[0])->udtype == UDTYPE_FFI_CLIB) { - CLibrary *cl = (CLibrary *)uddata(udataV(&rd->argv[0])); - GCstr *name = strV(&rd->argv[1]); - CType *ct; - CTypeID id = lj_ctype_getname(cts, &ct, name, CLNS_INDEX); - cTValue *tv = lj_tab_getstr(cl->cache, name); - rd->nres = rd->data; - if (id && tv && !tvisnil(tv)) { - /* Specialize to the symbol name and make the result a constant. */ - emitir(IRTG(IR_EQ, IRT_STR), J->base[1], lj_ir_kstr(J, name)); - if (ctype_isconstval(ct->info)) { - if (ct->size >= 0x80000000u && - (ctype_child(cts, ct)->info & CTF_UNSIGNED)) - J->base[0] = lj_ir_knum(J, (lua_Number)(uint32_t)ct->size); - else - J->base[0] = lj_ir_kint(J, (int32_t)ct->size); - } else if (ctype_isextern(ct->info)) { - CTypeID sid = ctype_cid(ct->info); - void *sp = *(void **)cdataptr(cdataV(tv)); - TRef ptr; - ct = ctype_raw(cts, sid); - if (LJ_64 && !checkptr32(sp)) - ptr = lj_ir_kintp(J, (uintptr_t)sp); - else - ptr = lj_ir_kptr(J, sp); - if (rd->data) { - J->base[0] = crec_tv_ct(J, ct, sid, ptr); - } else { - J->needsnap = 1; - crec_ct_tv(J, ct, ptr, J->base[2], &rd->argv[2]); - } - } else { - J->base[0] = lj_ir_kgc(J, obj2gco(cdataV(tv)), IRT_CDATA); - } - } else { - lj_trace_err(J, LJ_TRERR_NOCACHE); - } - } /* else: interpreter will throw. */ -} - -/* -- FFI library functions ----------------------------------------------- */ - -static TRef crec_toint(jit_State *J, CTState *cts, TRef sp, TValue *sval) -{ - return crec_ct_tv(J, ctype_get(cts, CTID_INT32), 0, sp, sval); -} - -void LJ_FASTCALL recff_ffi_new(jit_State *J, RecordFFData *rd) -{ - crec_alloc(J, rd, argv2ctype(J, J->base[0], &rd->argv[0])); -} - -void LJ_FASTCALL recff_ffi_errno(jit_State *J, RecordFFData *rd) -{ - UNUSED(rd); - if (J->base[0]) - lj_trace_err(J, LJ_TRERR_NYICALL); - J->base[0] = lj_ir_call(J, IRCALL_lj_vm_errno); -} - -void LJ_FASTCALL recff_ffi_string(jit_State *J, RecordFFData *rd) -{ - CTState *cts = ctype_ctsG(J2G(J)); - TRef tr = J->base[0]; - if (tr) { - TRef trlen = J->base[1]; - if (!tref_isnil(trlen)) { - trlen = crec_toint(J, cts, trlen, &rd->argv[1]); - tr = crec_ct_tv(J, ctype_get(cts, CTID_P_CVOID), 0, tr, &rd->argv[0]); - } else { - tr = crec_ct_tv(J, ctype_get(cts, CTID_P_CCHAR), 0, tr, &rd->argv[0]); - trlen = lj_ir_call(J, IRCALL_strlen, tr); - } - J->base[0] = emitir(IRT(IR_XSNEW, IRT_STR), tr, trlen); - } /* else: interpreter will throw. */ -} - -void LJ_FASTCALL recff_ffi_copy(jit_State *J, RecordFFData *rd) -{ - CTState *cts = ctype_ctsG(J2G(J)); - TRef trdst = J->base[0], trsrc = J->base[1], trlen = J->base[2]; - if (trdst && trsrc && (trlen || tref_isstr(trsrc))) { - trdst = crec_ct_tv(J, ctype_get(cts, CTID_P_VOID), 0, trdst, &rd->argv[0]); - trsrc = crec_ct_tv(J, ctype_get(cts, CTID_P_CVOID), 0, trsrc, &rd->argv[1]); - if (trlen) { - trlen = crec_toint(J, cts, trlen, &rd->argv[2]); - } else { - trlen = emitir(IRTI(IR_FLOAD), J->base[1], IRFL_STR_LEN); - trlen = emitir(IRTI(IR_ADD), trlen, lj_ir_kint(J, 1)); - } - rd->nres = 0; - crec_copy(J, trdst, trsrc, trlen, NULL); - } /* else: interpreter will throw. */ -} - -void LJ_FASTCALL recff_ffi_fill(jit_State *J, RecordFFData *rd) -{ - CTState *cts = ctype_ctsG(J2G(J)); - TRef trdst = J->base[0], trlen = J->base[1], trfill = J->base[2]; - if (trdst && trlen) { - CTSize step = 1; - if (tviscdata(&rd->argv[0])) { /* Get alignment of original destination. */ - CTSize sz; - CType *ct = ctype_raw(cts, cdataV(&rd->argv[0])->ctypeid); - if (ctype_isptr(ct->info)) - ct = ctype_rawchild(cts, ct); - step = (1u<argv[0]); - trlen = crec_toint(J, cts, trlen, &rd->argv[1]); - if (trfill) - trfill = crec_toint(J, cts, trfill, &rd->argv[2]); - else - trfill = lj_ir_kint(J, 0); - rd->nres = 0; - crec_fill(J, trdst, trlen, trfill, step); - } /* else: interpreter will throw. */ -} - -void LJ_FASTCALL recff_ffi_typeof(jit_State *J, RecordFFData *rd) -{ - if (tref_iscdata(J->base[0])) { - TRef trid = lj_ir_kint(J, argv2ctype(J, J->base[0], &rd->argv[0])); - J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA), - lj_ir_kint(J, CTID_CTYPEID), trid); - } else { - setfuncV(J->L, &J->errinfo, J->fn); - lj_trace_err_info(J, LJ_TRERR_NYIFFU); - } -} - -void LJ_FASTCALL recff_ffi_istype(jit_State *J, RecordFFData *rd) -{ - argv2ctype(J, J->base[0], &rd->argv[0]); - if (tref_iscdata(J->base[1])) { - argv2ctype(J, J->base[1], &rd->argv[1]); - J->postproc = LJ_POST_FIXBOOL; - J->base[0] = TREF_TRUE; - } else { - J->base[0] = TREF_FALSE; - } -} - -void LJ_FASTCALL recff_ffi_abi(jit_State *J, RecordFFData *rd) -{ - if (tref_isstr(J->base[0])) { - /* Specialize to the ABI string to make the boolean result a constant. */ - emitir(IRTG(IR_EQ, IRT_STR), J->base[0], lj_ir_kstr(J, strV(&rd->argv[0]))); - J->postproc = LJ_POST_FIXBOOL; - J->base[0] = TREF_TRUE; - } else { - lj_trace_err(J, LJ_TRERR_BADTYPE); - } -} - -/* Record ffi.sizeof(), ffi.alignof(), ffi.offsetof(). */ -void LJ_FASTCALL recff_ffi_xof(jit_State *J, RecordFFData *rd) -{ - CTypeID id = argv2ctype(J, J->base[0], &rd->argv[0]); - if (rd->data == FF_ffi_sizeof) { - CType *ct = lj_ctype_rawref(ctype_ctsG(J2G(J)), id); - if (ctype_isvltype(ct->info)) - lj_trace_err(J, LJ_TRERR_BADTYPE); - } else if (rd->data == FF_ffi_offsetof) { /* Specialize to the field name. */ - if (!tref_isstr(J->base[1])) - lj_trace_err(J, LJ_TRERR_BADTYPE); - emitir(IRTG(IR_EQ, IRT_STR), J->base[1], lj_ir_kstr(J, strV(&rd->argv[1]))); - rd->nres = 3; /* Just in case. */ - } - J->postproc = LJ_POST_FIXCONST; - J->base[0] = J->base[1] = J->base[2] = TREF_NIL; -} - -void LJ_FASTCALL recff_ffi_gc(jit_State *J, RecordFFData *rd) -{ - argv2cdata(J, J->base[0], &rd->argv[0]); - if (!J->base[1]) - lj_trace_err(J, LJ_TRERR_BADTYPE); - crec_finalizer(J, J->base[0], J->base[1], &rd->argv[1]); -} - -/* -- 64 bit bit.* library functions -------------------------------------- */ - -/* Determine bit operation type from argument type. */ -static CTypeID crec_bit64_type(CTState *cts, cTValue *tv) -{ - if (tviscdata(tv)) { - CType *ct = lj_ctype_rawref(cts, cdataV(tv)->ctypeid); - if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct); - if ((ct->info & (CTMASK_NUM|CTF_BOOL|CTF_FP|CTF_UNSIGNED)) == - CTINFO(CT_NUM, CTF_UNSIGNED) && ct->size == 8) - return CTID_UINT64; /* Use uint64_t, since it has the highest rank. */ - return CTID_INT64; /* Otherwise use int64_t. */ - } - return 0; /* Use regular 32 bit ops. */ -} - -void LJ_FASTCALL recff_bit64_tobit(jit_State *J, RecordFFData *rd) -{ - CTState *cts = ctype_ctsG(J2G(J)); - TRef tr = crec_ct_tv(J, ctype_get(cts, CTID_INT64), 0, - J->base[0], &rd->argv[0]); - if (!tref_isinteger(tr)) - tr = emitconv(tr, IRT_INT, tref_type(tr), 0); - J->base[0] = tr; -} - -int LJ_FASTCALL recff_bit64_unary(jit_State *J, RecordFFData *rd) -{ - CTState *cts = ctype_ctsG(J2G(J)); - CTypeID id = crec_bit64_type(cts, &rd->argv[0]); - if (id) { - TRef tr = crec_ct_tv(J, ctype_get(cts, id), 0, J->base[0], &rd->argv[0]); - tr = emitir(IRT(rd->data, id-CTID_INT64+IRT_I64), tr, 0); - J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr); - return 1; - } - return 0; -} - -int LJ_FASTCALL recff_bit64_nary(jit_State *J, RecordFFData *rd) -{ - CTState *cts = ctype_ctsG(J2G(J)); - CTypeID id = 0; - MSize i; - for (i = 0; J->base[i] != 0; i++) { - CTypeID aid = crec_bit64_type(cts, &rd->argv[i]); - if (id < aid) id = aid; /* Determine highest type rank of all arguments. */ - } - if (id) { - CType *ct = ctype_get(cts, id); - uint32_t ot = IRT(rd->data, id-CTID_INT64+IRT_I64); - TRef tr = crec_ct_tv(J, ct, 0, J->base[0], &rd->argv[0]); - for (i = 1; J->base[i] != 0; i++) { - TRef tr2 = crec_ct_tv(J, ct, 0, J->base[i], &rd->argv[i]); - tr = emitir(ot, tr, tr2); - } - J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr); - return 1; - } - return 0; -} - -int LJ_FASTCALL recff_bit64_shift(jit_State *J, RecordFFData *rd) -{ - CTState *cts = ctype_ctsG(J2G(J)); - CTypeID id; - TRef tsh = 0; - if (J->base[0] && tref_iscdata(J->base[1])) { - tsh = crec_ct_tv(J, ctype_get(cts, CTID_INT64), 0, - J->base[1], &rd->argv[1]); - if (!tref_isinteger(tsh)) - tsh = emitconv(tsh, IRT_INT, tref_type(tsh), 0); - J->base[1] = tsh; - } - id = crec_bit64_type(cts, &rd->argv[0]); - if (id) { - TRef tr = crec_ct_tv(J, ctype_get(cts, id), 0, J->base[0], &rd->argv[0]); - uint32_t op = rd->data; - if (!tsh) tsh = lj_opt_narrow_tobit(J, J->base[1]); - if (!(op < IR_BROL ? LJ_TARGET_MASKSHIFT : LJ_TARGET_MASKROT) && - !tref_isk(tsh)) - tsh = emitir(IRTI(IR_BAND), tsh, lj_ir_kint(J, 63)); -#ifdef LJ_TARGET_UNIFYROT - if (op == (LJ_TARGET_UNIFYROT == 1 ? IR_BROR : IR_BROL)) { - op = LJ_TARGET_UNIFYROT == 1 ? IR_BROL : IR_BROR; - tsh = emitir(IRTI(IR_NEG), tsh, tsh); - } -#endif - tr = emitir(IRT(op, id-CTID_INT64+IRT_I64), tr, tsh); - J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr); - return 1; - } - return 0; -} - -TRef recff_bit64_tohex(jit_State *J, RecordFFData *rd, TRef hdr) -{ - CTState *cts = ctype_ctsG(J2G(J)); - CTypeID id = crec_bit64_type(cts, &rd->argv[0]); - TRef tr, trsf = J->base[1]; - SFormat sf = (STRFMT_UINT|STRFMT_T_HEX); - int32_t n; - if (trsf) { - CTypeID id2 = 0; - n = (int32_t)lj_carith_check64(J->L, 2, &id2); - if (id2) - trsf = crec_ct_tv(J, ctype_get(cts, CTID_INT32), 0, trsf, &rd->argv[1]); - else - trsf = lj_opt_narrow_tobit(J, trsf); - emitir(IRTGI(IR_EQ), trsf, lj_ir_kint(J, n)); /* Specialize to n. */ - } else { - n = id ? 16 : 8; - } - if (n < 0) { n = -n; sf |= STRFMT_F_UPPER; } - sf |= ((SFormat)((n+1)&255) << STRFMT_SH_PREC); - if (id) { - tr = crec_ct_tv(J, ctype_get(cts, id), 0, J->base[0], &rd->argv[0]); - if (n < 16) - tr = emitir(IRT(IR_BAND, IRT_U64), tr, - lj_ir_kint64(J, ((uint64_t)1 << 4*n)-1)); - } else { - tr = lj_opt_narrow_tobit(J, J->base[0]); - if (n < 8) - tr = emitir(IRTI(IR_BAND), tr, lj_ir_kint(J, (int32_t)((1u << 4*n)-1))); - tr = emitconv(tr, IRT_U64, IRT_INT, 0); /* No sign-extension. */ - lj_needsplit(J); - } - return lj_ir_call(J, IRCALL_lj_strfmt_putfxint, hdr, lj_ir_kint(J, sf), tr); -} - -/* -- Miscellaneous library functions ------------------------------------- */ - -void LJ_FASTCALL lj_crecord_tonumber(jit_State *J, RecordFFData *rd) -{ - CTState *cts = ctype_ctsG(J2G(J)); - CType *d, *ct = lj_ctype_rawref(cts, cdataV(&rd->argv[0])->ctypeid); - if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct); - if (ctype_isnum(ct->info) || ctype_iscomplex(ct->info)) { - if (ctype_isinteger_or_bool(ct->info) && ct->size <= 4 && - !(ct->size == 4 && (ct->info & CTF_UNSIGNED))) - d = ctype_get(cts, CTID_INT32); - else - d = ctype_get(cts, CTID_DOUBLE); - J->base[0] = crec_ct_tv(J, d, 0, J->base[0], &rd->argv[0]); - } else { - J->base[0] = TREF_NIL; - } -} - -#undef IR -#undef emitir -#undef emitconv - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_crecord.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_crecord.h deleted file mode 100644 index 59f342a1d9a..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_crecord.h +++ /dev/null @@ -1,38 +0,0 @@ -/* -** Trace recorder for C data operations. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_CRECORD_H -#define _LJ_CRECORD_H - -#include "lj_obj.h" -#include "lj_jit.h" -#include "lj_ffrecord.h" - -#if LJ_HASJIT && LJ_HASFFI -LJ_FUNC void LJ_FASTCALL recff_cdata_index(jit_State *J, RecordFFData *rd); -LJ_FUNC void LJ_FASTCALL recff_cdata_call(jit_State *J, RecordFFData *rd); -LJ_FUNC void LJ_FASTCALL recff_cdata_arith(jit_State *J, RecordFFData *rd); -LJ_FUNC void LJ_FASTCALL recff_clib_index(jit_State *J, RecordFFData *rd); -LJ_FUNC void LJ_FASTCALL recff_ffi_new(jit_State *J, RecordFFData *rd); -LJ_FUNC void LJ_FASTCALL recff_ffi_errno(jit_State *J, RecordFFData *rd); -LJ_FUNC void LJ_FASTCALL recff_ffi_string(jit_State *J, RecordFFData *rd); -LJ_FUNC void LJ_FASTCALL recff_ffi_copy(jit_State *J, RecordFFData *rd); -LJ_FUNC void LJ_FASTCALL recff_ffi_fill(jit_State *J, RecordFFData *rd); -LJ_FUNC void LJ_FASTCALL recff_ffi_typeof(jit_State *J, RecordFFData *rd); -LJ_FUNC void LJ_FASTCALL recff_ffi_istype(jit_State *J, RecordFFData *rd); -LJ_FUNC void LJ_FASTCALL recff_ffi_abi(jit_State *J, RecordFFData *rd); -LJ_FUNC void LJ_FASTCALL recff_ffi_xof(jit_State *J, RecordFFData *rd); -LJ_FUNC void LJ_FASTCALL recff_ffi_gc(jit_State *J, RecordFFData *rd); - -LJ_FUNC void LJ_FASTCALL recff_bit64_tobit(jit_State *J, RecordFFData *rd); -LJ_FUNC int LJ_FASTCALL recff_bit64_unary(jit_State *J, RecordFFData *rd); -LJ_FUNC int LJ_FASTCALL recff_bit64_nary(jit_State *J, RecordFFData *rd); -LJ_FUNC int LJ_FASTCALL recff_bit64_shift(jit_State *J, RecordFFData *rd); -LJ_FUNC TRef recff_bit64_tohex(jit_State *J, RecordFFData *rd, TRef hdr); - -LJ_FUNC void LJ_FASTCALL lj_crecord_tonumber(jit_State *J, RecordFFData *rd); -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ctype.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ctype.c deleted file mode 100644 index 2e23c994bb0..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ctype.c +++ /dev/null @@ -1,635 +0,0 @@ -/* -** C type management. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#include "lj_obj.h" - -#if LJ_HASFFI - -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_str.h" -#include "lj_tab.h" -#include "lj_strfmt.h" -#include "lj_ctype.h" -#include "lj_ccallback.h" - -/* -- C type definitions -------------------------------------------------- */ - -/* Predefined typedefs. */ -#define CTTDDEF(_) \ - /* Vararg handling. */ \ - _("va_list", P_VOID) \ - _("__builtin_va_list", P_VOID) \ - _("__gnuc_va_list", P_VOID) \ - /* From stddef.h. */ \ - _("ptrdiff_t", INT_PSZ) \ - _("size_t", UINT_PSZ) \ - _("wchar_t", WCHAR) \ - /* Subset of stdint.h. */ \ - _("int8_t", INT8) \ - _("int16_t", INT16) \ - _("int32_t", INT32) \ - _("int64_t", INT64) \ - _("uint8_t", UINT8) \ - _("uint16_t", UINT16) \ - _("uint32_t", UINT32) \ - _("uint64_t", UINT64) \ - _("intptr_t", INT_PSZ) \ - _("uintptr_t", UINT_PSZ) \ - /* End of typedef list. */ - -/* Keywords (only the ones we actually care for). */ -#define CTKWDEF(_) \ - /* Type specifiers. */ \ - _("void", -1, CTOK_VOID) \ - _("_Bool", 0, CTOK_BOOL) \ - _("bool", 1, CTOK_BOOL) \ - _("char", 1, CTOK_CHAR) \ - _("int", 4, CTOK_INT) \ - _("__int8", 1, CTOK_INT) \ - _("__int16", 2, CTOK_INT) \ - _("__int32", 4, CTOK_INT) \ - _("__int64", 8, CTOK_INT) \ - _("float", 4, CTOK_FP) \ - _("double", 8, CTOK_FP) \ - _("long", 0, CTOK_LONG) \ - _("short", 0, CTOK_SHORT) \ - _("_Complex", 0, CTOK_COMPLEX) \ - _("complex", 0, CTOK_COMPLEX) \ - _("__complex", 0, CTOK_COMPLEX) \ - _("__complex__", 0, CTOK_COMPLEX) \ - _("signed", 0, CTOK_SIGNED) \ - _("__signed", 0, CTOK_SIGNED) \ - _("__signed__", 0, CTOK_SIGNED) \ - _("unsigned", 0, CTOK_UNSIGNED) \ - /* Type qualifiers. */ \ - _("const", 0, CTOK_CONST) \ - _("__const", 0, CTOK_CONST) \ - _("__const__", 0, CTOK_CONST) \ - _("volatile", 0, CTOK_VOLATILE) \ - _("__volatile", 0, CTOK_VOLATILE) \ - _("__volatile__", 0, CTOK_VOLATILE) \ - _("restrict", 0, CTOK_RESTRICT) \ - _("__restrict", 0, CTOK_RESTRICT) \ - _("__restrict__", 0, CTOK_RESTRICT) \ - _("inline", 0, CTOK_INLINE) \ - _("__inline", 0, CTOK_INLINE) \ - _("__inline__", 0, CTOK_INLINE) \ - /* Storage class specifiers. */ \ - _("typedef", 0, CTOK_TYPEDEF) \ - _("extern", 0, CTOK_EXTERN) \ - _("static", 0, CTOK_STATIC) \ - _("auto", 0, CTOK_AUTO) \ - _("register", 0, CTOK_REGISTER) \ - /* GCC Attributes. */ \ - _("__extension__", 0, CTOK_EXTENSION) \ - _("__attribute", 0, CTOK_ATTRIBUTE) \ - _("__attribute__", 0, CTOK_ATTRIBUTE) \ - _("asm", 0, CTOK_ASM) \ - _("__asm", 0, CTOK_ASM) \ - _("__asm__", 0, CTOK_ASM) \ - /* MSVC Attributes. */ \ - _("__declspec", 0, CTOK_DECLSPEC) \ - _("__cdecl", CTCC_CDECL, CTOK_CCDECL) \ - _("__thiscall", CTCC_THISCALL, CTOK_CCDECL) \ - _("__fastcall", CTCC_FASTCALL, CTOK_CCDECL) \ - _("__stdcall", CTCC_STDCALL, CTOK_CCDECL) \ - _("__ptr32", 4, CTOK_PTRSZ) \ - _("__ptr64", 8, CTOK_PTRSZ) \ - /* Other type specifiers. */ \ - _("struct", 0, CTOK_STRUCT) \ - _("union", 0, CTOK_UNION) \ - _("enum", 0, CTOK_ENUM) \ - /* Operators. */ \ - _("sizeof", 0, CTOK_SIZEOF) \ - _("__alignof", 0, CTOK_ALIGNOF) \ - _("__alignof__", 0, CTOK_ALIGNOF) \ - /* End of keyword list. */ - -/* Type info for predefined types. Size merged in. */ -static CTInfo lj_ctype_typeinfo[] = { -#define CTTYINFODEF(id, sz, ct, info) CTINFO((ct),(((sz)&0x3fu)<<10)+(info)), -#define CTTDINFODEF(name, id) CTINFO(CT_TYPEDEF, CTID_##id), -#define CTKWINFODEF(name, sz, kw) CTINFO(CT_KW,(((sz)&0x3fu)<<10)+(kw)), -CTTYDEF(CTTYINFODEF) -CTTDDEF(CTTDINFODEF) -CTKWDEF(CTKWINFODEF) -#undef CTTYINFODEF -#undef CTTDINFODEF -#undef CTKWINFODEF - 0 -}; - -/* Predefined type names collected in a single string. */ -static const char * const lj_ctype_typenames = -#define CTTDNAMEDEF(name, id) name "\0" -#define CTKWNAMEDEF(name, sz, cds) name "\0" -CTTDDEF(CTTDNAMEDEF) -CTKWDEF(CTKWNAMEDEF) -#undef CTTDNAMEDEF -#undef CTKWNAMEDEF -; - -#define CTTYPEINFO_NUM (sizeof(lj_ctype_typeinfo)/sizeof(CTInfo)-1) -#ifdef LUAJIT_CTYPE_CHECK_ANCHOR -#define CTTYPETAB_MIN CTTYPEINFO_NUM -#else -#define CTTYPETAB_MIN 128 -#endif - -/* -- C type interning ---------------------------------------------------- */ - -#define ct_hashtype(info, size) (hashrot(info, size) & CTHASH_MASK) -#define ct_hashname(name) \ - (hashrot(u32ptr(name), u32ptr(name) + HASH_BIAS) & CTHASH_MASK) - -/* Create new type element. */ -CTypeID lj_ctype_new(CTState *cts, CType **ctp) -{ - CTypeID id = cts->top; - CType *ct; - lua_assert(cts->L); - if (LJ_UNLIKELY(id >= cts->sizetab)) { - if (id >= CTID_MAX) lj_err_msg(cts->L, LJ_ERR_TABOV); -#ifdef LUAJIT_CTYPE_CHECK_ANCHOR - ct = lj_mem_newvec(cts->L, id+1, CType); - memcpy(ct, cts->tab, id*sizeof(CType)); - memset(cts->tab, 0, id*sizeof(CType)); - lj_mem_freevec(cts->g, cts->tab, cts->sizetab, CType); - cts->tab = ct; - cts->sizetab = id+1; -#else - lj_mem_growvec(cts->L, cts->tab, cts->sizetab, CTID_MAX, CType); -#endif - } - cts->top = id+1; - *ctp = ct = &cts->tab[id]; - ct->info = 0; - ct->size = 0; - ct->sib = 0; - ct->next = 0; - setgcrefnull(ct->name); - return id; -} - -/* Intern a type element. */ -CTypeID lj_ctype_intern(CTState *cts, CTInfo info, CTSize size) -{ - uint32_t h = ct_hashtype(info, size); - CTypeID id = cts->hash[h]; - lua_assert(cts->L); - while (id) { - CType *ct = ctype_get(cts, id); - if (ct->info == info && ct->size == size) - return id; - id = ct->next; - } - id = cts->top; - if (LJ_UNLIKELY(id >= cts->sizetab)) { - if (id >= CTID_MAX) lj_err_msg(cts->L, LJ_ERR_TABOV); - lj_mem_growvec(cts->L, cts->tab, cts->sizetab, CTID_MAX, CType); - } - cts->top = id+1; - cts->tab[id].info = info; - cts->tab[id].size = size; - cts->tab[id].sib = 0; - cts->tab[id].next = cts->hash[h]; - setgcrefnull(cts->tab[id].name); - cts->hash[h] = (CTypeID1)id; - return id; -} - -/* Add type element to hash table. */ -static void ctype_addtype(CTState *cts, CType *ct, CTypeID id) -{ - uint32_t h = ct_hashtype(ct->info, ct->size); - ct->next = cts->hash[h]; - cts->hash[h] = (CTypeID1)id; -} - -/* Add named element to hash table. */ -void lj_ctype_addname(CTState *cts, CType *ct, CTypeID id) -{ - uint32_t h = ct_hashname(gcref(ct->name)); - ct->next = cts->hash[h]; - cts->hash[h] = (CTypeID1)id; -} - -/* Get a C type by name, matching the type mask. */ -CTypeID lj_ctype_getname(CTState *cts, CType **ctp, GCstr *name, uint32_t tmask) -{ - CTypeID id = cts->hash[ct_hashname(name)]; - while (id) { - CType *ct = ctype_get(cts, id); - if (gcref(ct->name) == obj2gco(name) && - ((tmask >> ctype_type(ct->info)) & 1)) { - *ctp = ct; - return id; - } - id = ct->next; - } - *ctp = &cts->tab[0]; /* Simplify caller logic. ctype_get() would assert. */ - return 0; -} - -/* Get a struct/union/enum/function field by name. */ -CType *lj_ctype_getfieldq(CTState *cts, CType *ct, GCstr *name, CTSize *ofs, - CTInfo *qual) -{ - while (ct->sib) { - ct = ctype_get(cts, ct->sib); - if (gcref(ct->name) == obj2gco(name)) { - *ofs = ct->size; - return ct; - } - if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) { - CType *fct, *cct = ctype_child(cts, ct); - CTInfo q = 0; - while (ctype_isattrib(cct->info)) { - if (ctype_attrib(cct->info) == CTA_QUAL) q |= cct->size; - cct = ctype_child(cts, cct); - } - fct = lj_ctype_getfieldq(cts, cct, name, ofs, qual); - if (fct) { - if (qual) *qual |= q; - *ofs += ct->size; - return fct; - } - } - } - return NULL; /* Not found. */ -} - -/* -- C type information -------------------------------------------------- */ - -/* Follow references and get raw type for a C type ID. */ -CType *lj_ctype_rawref(CTState *cts, CTypeID id) -{ - CType *ct = ctype_get(cts, id); - while (ctype_isattrib(ct->info) || ctype_isref(ct->info)) - ct = ctype_child(cts, ct); - return ct; -} - -/* Get size for a C type ID. Does NOT support VLA/VLS. */ -CTSize lj_ctype_size(CTState *cts, CTypeID id) -{ - CType *ct = ctype_raw(cts, id); - return ctype_hassize(ct->info) ? ct->size : CTSIZE_INVALID; -} - -/* Get size for a variable-length C type. Does NOT support other C types. */ -CTSize lj_ctype_vlsize(CTState *cts, CType *ct, CTSize nelem) -{ - uint64_t xsz = 0; - if (ctype_isstruct(ct->info)) { - CTypeID arrid = 0, fid = ct->sib; - xsz = ct->size; /* Add the struct size. */ - while (fid) { - CType *ctf = ctype_get(cts, fid); - if (ctype_type(ctf->info) == CT_FIELD) - arrid = ctype_cid(ctf->info); /* Remember last field of VLS. */ - fid = ctf->sib; - } - ct = ctype_raw(cts, arrid); - } - lua_assert(ctype_isvlarray(ct->info)); /* Must be a VLA. */ - ct = ctype_rawchild(cts, ct); /* Get array element. */ - lua_assert(ctype_hassize(ct->info)); - /* Calculate actual size of VLA and check for overflow. */ - xsz += (uint64_t)ct->size * nelem; - return xsz < 0x80000000u ? (CTSize)xsz : CTSIZE_INVALID; -} - -/* Get type, qualifiers, size and alignment for a C type ID. */ -CTInfo lj_ctype_info(CTState *cts, CTypeID id, CTSize *szp) -{ - CTInfo qual = 0; - CType *ct = ctype_get(cts, id); - for (;;) { - CTInfo info = ct->info; - if (ctype_isenum(info)) { - /* Follow child. Need to look at its attributes, too. */ - } else if (ctype_isattrib(info)) { - if (ctype_isxattrib(info, CTA_QUAL)) - qual |= ct->size; - else if (ctype_isxattrib(info, CTA_ALIGN) && !(qual & CTFP_ALIGNED)) - qual |= CTFP_ALIGNED + CTALIGN(ct->size); - } else { - if (!(qual & CTFP_ALIGNED)) qual |= (info & CTF_ALIGN); - qual |= (info & ~(CTF_ALIGN|CTMASK_CID)); - lua_assert(ctype_hassize(info) || ctype_isfunc(info)); - *szp = ctype_isfunc(info) ? CTSIZE_INVALID : ct->size; - break; - } - ct = ctype_get(cts, ctype_cid(info)); - } - return qual; -} - -/* Get ctype metamethod. */ -cTValue *lj_ctype_meta(CTState *cts, CTypeID id, MMS mm) -{ - CType *ct = ctype_get(cts, id); - cTValue *tv; - while (ctype_isattrib(ct->info) || ctype_isref(ct->info)) { - id = ctype_cid(ct->info); - ct = ctype_get(cts, id); - } - if (ctype_isptr(ct->info) && - ctype_isfunc(ctype_get(cts, ctype_cid(ct->info))->info)) - tv = lj_tab_getstr(cts->miscmap, &cts->g->strempty); - else - tv = lj_tab_getinth(cts->miscmap, -(int32_t)id); - if (tv && tvistab(tv) && - (tv = lj_tab_getstr(tabV(tv), mmname_str(cts->g, mm))) && !tvisnil(tv)) - return tv; - return NULL; -} - -/* -- C type representation ----------------------------------------------- */ - -/* Fixed max. length of a C type representation. */ -#define CTREPR_MAX 512 - -typedef struct CTRepr { - char *pb, *pe; - CTState *cts; - lua_State *L; - int needsp; - int ok; - char buf[CTREPR_MAX]; -} CTRepr; - -/* Prepend string. */ -static void ctype_prepstr(CTRepr *ctr, const char *str, MSize len) -{ - char *p = ctr->pb; - if (ctr->buf + len+1 > p) { ctr->ok = 0; return; } - if (ctr->needsp) *--p = ' '; - ctr->needsp = 1; - p -= len; - while (len-- > 0) p[len] = str[len]; - ctr->pb = p; -} - -#define ctype_preplit(ctr, str) ctype_prepstr((ctr), "" str, sizeof(str)-1) - -/* Prepend char. */ -static void ctype_prepc(CTRepr *ctr, int c) -{ - if (ctr->buf >= ctr->pb) { ctr->ok = 0; return; } - *--ctr->pb = c; -} - -/* Prepend number. */ -static void ctype_prepnum(CTRepr *ctr, uint32_t n) -{ - char *p = ctr->pb; - if (ctr->buf + 10+1 > p) { ctr->ok = 0; return; } - do { *--p = (char)('0' + n % 10); } while (n /= 10); - ctr->pb = p; - ctr->needsp = 0; -} - -/* Append char. */ -static void ctype_appc(CTRepr *ctr, int c) -{ - if (ctr->pe >= ctr->buf + CTREPR_MAX) { ctr->ok = 0; return; } - *ctr->pe++ = c; -} - -/* Append number. */ -static void ctype_appnum(CTRepr *ctr, uint32_t n) -{ - char buf[10]; - char *p = buf+sizeof(buf); - char *q = ctr->pe; - if (q > ctr->buf + CTREPR_MAX - 10) { ctr->ok = 0; return; } - do { *--p = (char)('0' + n % 10); } while (n /= 10); - do { *q++ = *p++; } while (p < buf+sizeof(buf)); - ctr->pe = q; -} - -/* Prepend qualifiers. */ -static void ctype_prepqual(CTRepr *ctr, CTInfo info) -{ - if ((info & CTF_VOLATILE)) ctype_preplit(ctr, "volatile"); - if ((info & CTF_CONST)) ctype_preplit(ctr, "const"); -} - -/* Prepend named type. */ -static void ctype_preptype(CTRepr *ctr, CType *ct, CTInfo qual, const char *t) -{ - if (gcref(ct->name)) { - GCstr *str = gco2str(gcref(ct->name)); - ctype_prepstr(ctr, strdata(str), str->len); - } else { - if (ctr->needsp) ctype_prepc(ctr, ' '); - ctype_prepnum(ctr, ctype_typeid(ctr->cts, ct)); - ctr->needsp = 1; - } - ctype_prepstr(ctr, t, (MSize)strlen(t)); - ctype_prepqual(ctr, qual); -} - -static void ctype_repr(CTRepr *ctr, CTypeID id) -{ - CType *ct = ctype_get(ctr->cts, id); - CTInfo qual = 0; - int ptrto = 0; - for (;;) { - CTInfo info = ct->info; - CTSize size = ct->size; - switch (ctype_type(info)) { - case CT_NUM: - if ((info & CTF_BOOL)) { - ctype_preplit(ctr, "bool"); - } else if ((info & CTF_FP)) { - if (size == sizeof(double)) ctype_preplit(ctr, "double"); - else if (size == sizeof(float)) ctype_preplit(ctr, "float"); - else ctype_preplit(ctr, "long double"); - } else if (size == 1) { - if (!((info ^ CTF_UCHAR) & CTF_UNSIGNED)) ctype_preplit(ctr, "char"); - else if (CTF_UCHAR) ctype_preplit(ctr, "signed char"); - else ctype_preplit(ctr, "unsigned char"); - } else if (size < 8) { - if (size == 4) ctype_preplit(ctr, "int"); - else ctype_preplit(ctr, "short"); - if ((info & CTF_UNSIGNED)) ctype_preplit(ctr, "unsigned"); - } else { - ctype_preplit(ctr, "_t"); - ctype_prepnum(ctr, size*8); - ctype_preplit(ctr, "int"); - if ((info & CTF_UNSIGNED)) ctype_prepc(ctr, 'u'); - } - ctype_prepqual(ctr, (qual|info)); - return; - case CT_VOID: - ctype_preplit(ctr, "void"); - ctype_prepqual(ctr, (qual|info)); - return; - case CT_STRUCT: - ctype_preptype(ctr, ct, qual, (info & CTF_UNION) ? "union" : "struct"); - return; - case CT_ENUM: - if (id == CTID_CTYPEID) { - ctype_preplit(ctr, "ctype"); - return; - } - ctype_preptype(ctr, ct, qual, "enum"); - return; - case CT_ATTRIB: - if (ctype_attrib(info) == CTA_QUAL) qual |= size; - break; - case CT_PTR: - if ((info & CTF_REF)) { - ctype_prepc(ctr, '&'); - } else { - ctype_prepqual(ctr, (qual|info)); - if (LJ_64 && size == 4) ctype_preplit(ctr, "__ptr32"); - ctype_prepc(ctr, '*'); - } - qual = 0; - ptrto = 1; - ctr->needsp = 1; - break; - case CT_ARRAY: - if (ctype_isrefarray(info)) { - ctr->needsp = 1; - if (ptrto) { ptrto = 0; ctype_prepc(ctr, '('); ctype_appc(ctr, ')'); } - ctype_appc(ctr, '['); - if (size != CTSIZE_INVALID) { - CTSize csize = ctype_child(ctr->cts, ct)->size; - ctype_appnum(ctr, csize ? size/csize : 0); - } else if ((info & CTF_VLA)) { - ctype_appc(ctr, '?'); - } - ctype_appc(ctr, ']'); - } else if ((info & CTF_COMPLEX)) { - if (size == 2*sizeof(float)) ctype_preplit(ctr, "float"); - ctype_preplit(ctr, "complex"); - return; - } else { - ctype_preplit(ctr, ")))"); - ctype_prepnum(ctr, size); - ctype_preplit(ctr, "__attribute__((vector_size("); - } - break; - case CT_FUNC: - ctr->needsp = 1; - if (ptrto) { ptrto = 0; ctype_prepc(ctr, '('); ctype_appc(ctr, ')'); } - ctype_appc(ctr, '('); - ctype_appc(ctr, ')'); - break; - default: - lua_assert(0); - break; - } - ct = ctype_get(ctr->cts, ctype_cid(info)); - } -} - -/* Return a printable representation of a C type. */ -GCstr *lj_ctype_repr(lua_State *L, CTypeID id, GCstr *name) -{ - global_State *g = G(L); - CTRepr ctr; - ctr.pb = ctr.pe = &ctr.buf[CTREPR_MAX/2]; - ctr.cts = ctype_ctsG(g); - ctr.L = L; - ctr.ok = 1; - ctr.needsp = 0; - if (name) ctype_prepstr(&ctr, strdata(name), name->len); - ctype_repr(&ctr, id); - if (LJ_UNLIKELY(!ctr.ok)) return lj_str_newlit(L, "?"); - return lj_str_new(L, ctr.pb, ctr.pe - ctr.pb); -} - -/* Convert int64_t/uint64_t to string with 'LL' or 'ULL' suffix. */ -GCstr *lj_ctype_repr_int64(lua_State *L, uint64_t n, int isunsigned) -{ - char buf[1+20+3]; - char *p = buf+sizeof(buf); - int sign = 0; - *--p = 'L'; *--p = 'L'; - if (isunsigned) { - *--p = 'U'; - } else if ((int64_t)n < 0) { - n = (uint64_t)-(int64_t)n; - sign = 1; - } - do { *--p = (char)('0' + n % 10); } while (n /= 10); - if (sign) *--p = '-'; - return lj_str_new(L, p, (size_t)(buf+sizeof(buf)-p)); -} - -/* Convert complex to string with 'i' or 'I' suffix. */ -GCstr *lj_ctype_repr_complex(lua_State *L, void *sp, CTSize size) -{ - char buf[2*STRFMT_MAXBUF_NUM+2+1], *p = buf; - TValue re, im; - if (size == 2*sizeof(double)) { - re.n = *(double *)sp; im.n = ((double *)sp)[1]; - } else { - re.n = (double)*(float *)sp; im.n = (double)((float *)sp)[1]; - } - p = lj_strfmt_wnum(p, &re); - if (!(im.u32.hi & 0x80000000u) || im.n != im.n) *p++ = '+'; - p = lj_strfmt_wnum(p, &im); - *p = *(p-1) >= 'a' ? 'I' : 'i'; - p++; - return lj_str_new(L, buf, p-buf); -} - -/* -- C type state -------------------------------------------------------- */ - -/* Initialize C type table and state. */ -CTState *lj_ctype_init(lua_State *L) -{ - CTState *cts = lj_mem_newt(L, sizeof(CTState), CTState); - CType *ct = lj_mem_newvec(L, CTTYPETAB_MIN, CType); - const char *name = lj_ctype_typenames; - CTypeID id; - memset(cts, 0, sizeof(CTState)); - cts->tab = ct; - cts->sizetab = CTTYPETAB_MIN; - cts->top = CTTYPEINFO_NUM; - cts->L = NULL; - cts->g = G(L); - for (id = 0; id < CTTYPEINFO_NUM; id++, ct++) { - CTInfo info = lj_ctype_typeinfo[id]; - ct->size = (CTSize)((int32_t)(info << 16) >> 26); - ct->info = info & 0xffff03ffu; - ct->sib = 0; - if (ctype_type(info) == CT_KW || ctype_istypedef(info)) { - size_t len = strlen(name); - GCstr *str = lj_str_new(L, name, len); - ctype_setname(ct, str); - name += len+1; - lj_ctype_addname(cts, ct, id); - } else { - setgcrefnull(ct->name); - ct->next = 0; - if (!ctype_isenum(info)) ctype_addtype(cts, ct, id); - } - } - setmref(G(L)->ctype_state, cts); - return cts; -} - -/* Free C type table and state. */ -void lj_ctype_freestate(global_State *g) -{ - CTState *cts = ctype_ctsG(g); - if (cts) { - lj_ccallback_mcode_free(cts); - lj_mem_freevec(g, cts->tab, cts->sizetab, CType); - lj_mem_freevec(g, cts->cb.cbid, cts->cb.sizeid, CTypeID1); - lj_mem_freet(g, cts); - } -} - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ctype.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ctype.h deleted file mode 100644 index 6639547a431..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ctype.h +++ /dev/null @@ -1,461 +0,0 @@ -/* -** C type management. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_CTYPE_H -#define _LJ_CTYPE_H - -#include "lj_obj.h" -#include "lj_gc.h" - -#if LJ_HASFFI - -/* -- C type definitions -------------------------------------------------- */ - -/* C type numbers. Highest 4 bits of C type info. ORDER CT. */ -enum { - /* Externally visible types. */ - CT_NUM, /* Integer or floating-point numbers. */ - CT_STRUCT, /* Struct or union. */ - CT_PTR, /* Pointer or reference. */ - CT_ARRAY, /* Array or complex type. */ - CT_MAYCONVERT = CT_ARRAY, - CT_VOID, /* Void type. */ - CT_ENUM, /* Enumeration. */ - CT_HASSIZE = CT_ENUM, /* Last type where ct->size holds the actual size. */ - CT_FUNC, /* Function. */ - CT_TYPEDEF, /* Typedef. */ - CT_ATTRIB, /* Miscellaneous attributes. */ - /* Internal element types. */ - CT_FIELD, /* Struct/union field or function parameter. */ - CT_BITFIELD, /* Struct/union bitfield. */ - CT_CONSTVAL, /* Constant value. */ - CT_EXTERN, /* External reference. */ - CT_KW /* Keyword. */ -}; - -LJ_STATIC_ASSERT(((int)CT_PTR & (int)CT_ARRAY) == CT_PTR); -LJ_STATIC_ASSERT(((int)CT_STRUCT & (int)CT_ARRAY) == CT_STRUCT); - -/* -** ---------- info ------------ -** |type flags... A cid | size | sib | next | name | -** +----------------------------+--------+-------+-------+-------+-- -** |NUM BFvcUL.. A | size | | type | | -** |STRUCT ..vcU..V A | size | field | name? | name? | -** |PTR ..vcR... A cid | size | | type | | -** |ARRAY VCvc...V A cid | size | | type | | -** |VOID ..vc.... A | size | | type | | -** |ENUM A cid | size | const | name? | name? | -** |FUNC ....VS.. cc cid | nargs | field | name? | name? | -** |TYPEDEF cid | | | name | name | -** |ATTRIB attrnum cid | attr | sib? | type? | | -** |FIELD cid | offset | field | | name? | -** |BITFIELD B.vcU csz bsz pos | offset | field | | name? | -** |CONSTVAL c cid | value | const | name | name | -** |EXTERN cid | | sib? | name | name | -** |KW tok | size | | name | name | -** +----------------------------+--------+-------+-------+-------+-- -** ^^ ^^--- bits used for C type conversion dispatch -*/ - -/* C type info flags. TFFArrrr */ -#define CTF_BOOL 0x08000000u /* Boolean: NUM, BITFIELD. */ -#define CTF_FP 0x04000000u /* Floating-point: NUM. */ -#define CTF_CONST 0x02000000u /* Const qualifier. */ -#define CTF_VOLATILE 0x01000000u /* Volatile qualifier. */ -#define CTF_UNSIGNED 0x00800000u /* Unsigned: NUM, BITFIELD. */ -#define CTF_LONG 0x00400000u /* Long: NUM. */ -#define CTF_VLA 0x00100000u /* Variable-length: ARRAY, STRUCT. */ -#define CTF_REF 0x00800000u /* Reference: PTR. */ -#define CTF_VECTOR 0x08000000u /* Vector: ARRAY. */ -#define CTF_COMPLEX 0x04000000u /* Complex: ARRAY. */ -#define CTF_UNION 0x00800000u /* Union: STRUCT. */ -#define CTF_VARARG 0x00800000u /* Vararg: FUNC. */ -#define CTF_SSEREGPARM 0x00400000u /* SSE register parameters: FUNC. */ - -#define CTF_QUAL (CTF_CONST|CTF_VOLATILE) -#define CTF_ALIGN (CTMASK_ALIGN< 0 ? CTF_UNSIGNED : 0) - -/* Flags used in parser. .F.Ammvf cp->attr */ -#define CTFP_ALIGNED 0x00000001u /* cp->attr + ALIGN */ -#define CTFP_PACKED 0x00000002u /* cp->attr */ -/* ...C...f cp->fattr */ -#define CTFP_CCONV 0x00000001u /* cp->fattr + CCONV/[SSE]REGPARM */ - -/* C type info bitfields. */ -#define CTMASK_CID 0x0000ffffu /* Max. 65536 type IDs. */ -#define CTMASK_NUM 0xf0000000u /* Max. 16 type numbers. */ -#define CTSHIFT_NUM 28 -#define CTMASK_ALIGN 15 /* Max. alignment is 2^15. */ -#define CTSHIFT_ALIGN 16 -#define CTMASK_ATTRIB 255 /* Max. 256 attributes. */ -#define CTSHIFT_ATTRIB 16 -#define CTMASK_CCONV 3 /* Max. 4 calling conventions. */ -#define CTSHIFT_CCONV 16 -#define CTMASK_REGPARM 3 /* Max. 0-3 regparms. */ -#define CTSHIFT_REGPARM 18 -/* Bitfields only used in parser. */ -#define CTMASK_VSIZEP 15 /* Max. vector size is 2^15. */ -#define CTSHIFT_VSIZEP 4 -#define CTMASK_MSIZEP 255 /* Max. type size (via mode) is 128. */ -#define CTSHIFT_MSIZEP 8 - -/* Info bits for BITFIELD. Max. size of bitfield is 64 bits. */ -#define CTBSZ_MAX 32 /* Max. size of bitfield is 32 bit. */ -#define CTBSZ_FIELD 127 /* Temp. marker for regular field. */ -#define CTMASK_BITPOS 127 -#define CTMASK_BITBSZ 127 -#define CTMASK_BITCSZ 127 -#define CTSHIFT_BITPOS 0 -#define CTSHIFT_BITBSZ 8 -#define CTSHIFT_BITCSZ 16 - -#define CTF_INSERT(info, field, val) \ - info = (info & ~(CTMASK_##field<> CTSHIFT_NUM) -#define ctype_cid(info) ((CTypeID)((info) & CTMASK_CID)) -#define ctype_align(info) (((info) >> CTSHIFT_ALIGN) & CTMASK_ALIGN) -#define ctype_attrib(info) (((info) >> CTSHIFT_ATTRIB) & CTMASK_ATTRIB) -#define ctype_bitpos(info) (((info) >> CTSHIFT_BITPOS) & CTMASK_BITPOS) -#define ctype_bitbsz(info) (((info) >> CTSHIFT_BITBSZ) & CTMASK_BITBSZ) -#define ctype_bitcsz(info) (((info) >> CTSHIFT_BITCSZ) & CTMASK_BITCSZ) -#define ctype_vsizeP(info) (((info) >> CTSHIFT_VSIZEP) & CTMASK_VSIZEP) -#define ctype_msizeP(info) (((info) >> CTSHIFT_MSIZEP) & CTMASK_MSIZEP) -#define ctype_cconv(info) (((info) >> CTSHIFT_CCONV) & CTMASK_CCONV) - -/* Simple type checks. */ -#define ctype_isnum(info) (ctype_type((info)) == CT_NUM) -#define ctype_isvoid(info) (ctype_type((info)) == CT_VOID) -#define ctype_isptr(info) (ctype_type((info)) == CT_PTR) -#define ctype_isarray(info) (ctype_type((info)) == CT_ARRAY) -#define ctype_isstruct(info) (ctype_type((info)) == CT_STRUCT) -#define ctype_isfunc(info) (ctype_type((info)) == CT_FUNC) -#define ctype_isenum(info) (ctype_type((info)) == CT_ENUM) -#define ctype_istypedef(info) (ctype_type((info)) == CT_TYPEDEF) -#define ctype_isattrib(info) (ctype_type((info)) == CT_ATTRIB) -#define ctype_isfield(info) (ctype_type((info)) == CT_FIELD) -#define ctype_isbitfield(info) (ctype_type((info)) == CT_BITFIELD) -#define ctype_isconstval(info) (ctype_type((info)) == CT_CONSTVAL) -#define ctype_isextern(info) (ctype_type((info)) == CT_EXTERN) -#define ctype_hassize(info) (ctype_type((info)) <= CT_HASSIZE) - -/* Combined type and flag checks. */ -#define ctype_isinteger(info) \ - (((info) & (CTMASK_NUM|CTF_BOOL|CTF_FP)) == CTINFO(CT_NUM, 0)) -#define ctype_isinteger_or_bool(info) \ - (((info) & (CTMASK_NUM|CTF_FP)) == CTINFO(CT_NUM, 0)) -#define ctype_isbool(info) \ - (((info) & (CTMASK_NUM|CTF_BOOL)) == CTINFO(CT_NUM, CTF_BOOL)) -#define ctype_isfp(info) \ - (((info) & (CTMASK_NUM|CTF_FP)) == CTINFO(CT_NUM, CTF_FP)) - -#define ctype_ispointer(info) \ - ((ctype_type(info) >> 1) == (CT_PTR >> 1)) /* Pointer or array. */ -#define ctype_isref(info) \ - (((info) & (CTMASK_NUM|CTF_REF)) == CTINFO(CT_PTR, CTF_REF)) - -#define ctype_isrefarray(info) \ - (((info) & (CTMASK_NUM|CTF_VECTOR|CTF_COMPLEX)) == CTINFO(CT_ARRAY, 0)) -#define ctype_isvector(info) \ - (((info) & (CTMASK_NUM|CTF_VECTOR)) == CTINFO(CT_ARRAY, CTF_VECTOR)) -#define ctype_iscomplex(info) \ - (((info) & (CTMASK_NUM|CTF_COMPLEX)) == CTINFO(CT_ARRAY, CTF_COMPLEX)) - -#define ctype_isvltype(info) \ - (((info) & ((CTMASK_NUM|CTF_VLA) - (2u<") _(STRING, "") \ - _(INTEGER, "") _(EOF, "") \ - _(OROR, "||") _(ANDAND, "&&") _(EQ, "==") _(NE, "!=") \ - _(LE, "<=") _(GE, ">=") _(SHL, "<<") _(SHR, ">>") _(DEREF, "->") - -/* Simple declaration specifiers. */ -#define CDSDEF(_) \ - _(VOID) _(BOOL) _(CHAR) _(INT) _(FP) \ - _(LONG) _(LONGLONG) _(SHORT) _(COMPLEX) _(SIGNED) _(UNSIGNED) \ - _(CONST) _(VOLATILE) _(RESTRICT) _(INLINE) \ - _(TYPEDEF) _(EXTERN) _(STATIC) _(AUTO) _(REGISTER) - -/* C keywords. */ -#define CKWDEF(_) \ - CDSDEF(_) _(EXTENSION) _(ASM) _(ATTRIBUTE) \ - _(DECLSPEC) _(CCDECL) _(PTRSZ) \ - _(STRUCT) _(UNION) _(ENUM) \ - _(SIZEOF) _(ALIGNOF) - -/* C token numbers. */ -enum { - CTOK_OFS = 255, -#define CTOKNUM(name, sym) CTOK_##name, -#define CKWNUM(name) CTOK_##name, -CTOKDEF(CTOKNUM) -CKWDEF(CKWNUM) -#undef CTOKNUM -#undef CKWNUM - CTOK_FIRSTDECL = CTOK_VOID, - CTOK_FIRSTSCL = CTOK_TYPEDEF, - CTOK_LASTDECLFLAG = CTOK_REGISTER, - CTOK_LASTDECL = CTOK_ENUM -}; - -/* Declaration specifier flags. */ -enum { -#define CDSFLAG(name) CDF_##name = (1u << (CTOK_##name - CTOK_FIRSTDECL)), -CDSDEF(CDSFLAG) -#undef CDSFLAG - CDF__END -}; - -#define CDF_SCL (CDF_TYPEDEF|CDF_EXTERN|CDF_STATIC|CDF_AUTO|CDF_REGISTER) - -/* -- C type management --------------------------------------------------- */ - -#define ctype_ctsG(g) (mref((g)->ctype_state, CTState)) - -/* Get C type state. */ -static LJ_AINLINE CTState *ctype_cts(lua_State *L) -{ - CTState *cts = ctype_ctsG(G(L)); - cts->L = L; /* Save L for errors and allocations. */ - return cts; -} - -/* Save and restore state of C type table. */ -#define LJ_CTYPE_SAVE(cts) CTState savects_ = *(cts) -#define LJ_CTYPE_RESTORE(cts) \ - ((cts)->top = savects_.top, \ - memcpy((cts)->hash, savects_.hash, sizeof(savects_.hash))) - -/* Check C type ID for validity when assertions are enabled. */ -static LJ_AINLINE CTypeID ctype_check(CTState *cts, CTypeID id) -{ - lua_assert(id > 0 && id < cts->top); UNUSED(cts); - return id; -} - -/* Get C type for C type ID. */ -static LJ_AINLINE CType *ctype_get(CTState *cts, CTypeID id) -{ - return &cts->tab[ctype_check(cts, id)]; -} - -/* Get C type ID for a C type. */ -#define ctype_typeid(cts, ct) ((CTypeID)((ct) - (cts)->tab)) - -/* Get child C type. */ -static LJ_AINLINE CType *ctype_child(CTState *cts, CType *ct) -{ - lua_assert(!(ctype_isvoid(ct->info) || ctype_isstruct(ct->info) || - ctype_isbitfield(ct->info))); /* These don't have children. */ - return ctype_get(cts, ctype_cid(ct->info)); -} - -/* Get raw type for a C type ID. */ -static LJ_AINLINE CType *ctype_raw(CTState *cts, CTypeID id) -{ - CType *ct = ctype_get(cts, id); - while (ctype_isattrib(ct->info)) ct = ctype_child(cts, ct); - return ct; -} - -/* Get raw type of the child of a C type. */ -static LJ_AINLINE CType *ctype_rawchild(CTState *cts, CType *ct) -{ - do { ct = ctype_child(cts, ct); } while (ctype_isattrib(ct->info)); - return ct; -} - -/* Set the name of a C type table element. */ -static LJ_AINLINE void ctype_setname(CType *ct, GCstr *s) -{ - /* NOBARRIER: mark string as fixed -- the C type table is never collected. */ - fixstring(s); - setgcref(ct->name, obj2gco(s)); -} - -LJ_FUNC CTypeID lj_ctype_new(CTState *cts, CType **ctp); -LJ_FUNC CTypeID lj_ctype_intern(CTState *cts, CTInfo info, CTSize size); -LJ_FUNC void lj_ctype_addname(CTState *cts, CType *ct, CTypeID id); -LJ_FUNC CTypeID lj_ctype_getname(CTState *cts, CType **ctp, GCstr *name, - uint32_t tmask); -LJ_FUNC CType *lj_ctype_getfieldq(CTState *cts, CType *ct, GCstr *name, - CTSize *ofs, CTInfo *qual); -#define lj_ctype_getfield(cts, ct, name, ofs) \ - lj_ctype_getfieldq((cts), (ct), (name), (ofs), NULL) -LJ_FUNC CType *lj_ctype_rawref(CTState *cts, CTypeID id); -LJ_FUNC CTSize lj_ctype_size(CTState *cts, CTypeID id); -LJ_FUNC CTSize lj_ctype_vlsize(CTState *cts, CType *ct, CTSize nelem); -LJ_FUNC CTInfo lj_ctype_info(CTState *cts, CTypeID id, CTSize *szp); -LJ_FUNC cTValue *lj_ctype_meta(CTState *cts, CTypeID id, MMS mm); -LJ_FUNC GCstr *lj_ctype_repr(lua_State *L, CTypeID id, GCstr *name); -LJ_FUNC GCstr *lj_ctype_repr_int64(lua_State *L, uint64_t n, int isunsigned); -LJ_FUNC GCstr *lj_ctype_repr_complex(lua_State *L, void *sp, CTSize size); -LJ_FUNC CTState *lj_ctype_init(lua_State *L); -LJ_FUNC void lj_ctype_freestate(global_State *g); - -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_debug.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_debug.c deleted file mode 100644 index 3226d03b9e1..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_debug.c +++ /dev/null @@ -1,699 +0,0 @@ -/* -** Debugging and introspection. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_debug_c -#define LUA_CORE - -#include "lj_obj.h" -#include "lj_err.h" -#include "lj_debug.h" -#include "lj_buf.h" -#include "lj_tab.h" -#include "lj_state.h" -#include "lj_frame.h" -#include "lj_bc.h" -#include "lj_strfmt.h" -#if LJ_HASJIT -#include "lj_jit.h" -#endif - -/* -- Frames -------------------------------------------------------------- */ - -/* Get frame corresponding to a level. */ -cTValue *lj_debug_frame(lua_State *L, int level, int *size) -{ - cTValue *frame, *nextframe, *bot = tvref(L->stack)+LJ_FR2; - /* Traverse frames backwards. */ - for (nextframe = frame = L->base-1; frame > bot; ) { - if (frame_gc(frame) == obj2gco(L)) - level++; /* Skip dummy frames. See lj_err_optype_call(). */ - if (level-- == 0) { - *size = (int)(nextframe - frame); - return frame; /* Level found. */ - } - nextframe = frame; - if (frame_islua(frame)) { - frame = frame_prevl(frame); - } else { - if (frame_isvarg(frame)) - level++; /* Skip vararg pseudo-frame. */ - frame = frame_prevd(frame); - } - } - *size = level; - return NULL; /* Level not found. */ -} - -/* Invalid bytecode position. */ -#define NO_BCPOS (~(BCPos)0) - -/* Return bytecode position for function/frame or NO_BCPOS. */ -static BCPos debug_framepc(lua_State *L, GCfunc *fn, cTValue *nextframe) -{ - const BCIns *ins; - GCproto *pt; - BCPos pos; - lua_assert(fn->c.gct == ~LJ_TFUNC || fn->c.gct == ~LJ_TTHREAD); - if (!isluafunc(fn)) { /* Cannot derive a PC for non-Lua functions. */ - return NO_BCPOS; - } else if (nextframe == NULL) { /* Lua function on top. */ - void *cf = cframe_raw(L->cframe); - if (cf == NULL || (char *)cframe_pc(cf) == (char *)cframe_L(cf)) - return NO_BCPOS; - ins = cframe_pc(cf); /* Only happens during error/hook handling. */ - } else { - if (frame_islua(nextframe)) { - ins = frame_pc(nextframe); - } else if (frame_iscont(nextframe)) { - ins = frame_contpc(nextframe); - } else { - /* Lua function below errfunc/gc/hook: find cframe to get the PC. */ - void *cf = cframe_raw(L->cframe); - TValue *f = L->base-1; - for (;;) { - if (cf == NULL) - return NO_BCPOS; - while (cframe_nres(cf) < 0) { - if (f >= restorestack(L, -cframe_nres(cf))) - break; - cf = cframe_raw(cframe_prev(cf)); - if (cf == NULL) - return NO_BCPOS; - } - if (f < nextframe) - break; - if (frame_islua(f)) { - f = frame_prevl(f); - } else { - if (frame_isc(f) || (frame_iscont(f) && frame_iscont_fficb(f))) - cf = cframe_raw(cframe_prev(cf)); - f = frame_prevd(f); - } - } - ins = cframe_pc(cf); - } - } - pt = funcproto(fn); - pos = proto_bcpos(pt, ins) - 1; -#if LJ_HASJIT - if (pos > pt->sizebc) { /* Undo the effects of lj_trace_exit for JLOOP. */ - GCtrace *T = (GCtrace *)((char *)(ins-1) - offsetof(GCtrace, startins)); - lua_assert(bc_isret(bc_op(ins[-1]))); - pos = proto_bcpos(pt, mref(T->startpc, const BCIns)); - } -#endif - return pos; -} - -/* -- Line numbers -------------------------------------------------------- */ - -/* Get line number for a bytecode position. */ -BCLine LJ_FASTCALL lj_debug_line(GCproto *pt, BCPos pc) -{ - const void *lineinfo = proto_lineinfo(pt); - if (pc <= pt->sizebc && lineinfo) { - BCLine first = pt->firstline; - if (pc == pt->sizebc) return first + pt->numline; - if (pc-- == 0) return first; - if (pt->numline < 256) - return first + (BCLine)((const uint8_t *)lineinfo)[pc]; - else if (pt->numline < 65536) - return first + (BCLine)((const uint16_t *)lineinfo)[pc]; - else - return first + (BCLine)((const uint32_t *)lineinfo)[pc]; - } - return 0; -} - -/* Get line number for function/frame. */ -static BCLine debug_frameline(lua_State *L, GCfunc *fn, cTValue *nextframe) -{ - BCPos pc = debug_framepc(L, fn, nextframe); - if (pc != NO_BCPOS) { - GCproto *pt = funcproto(fn); - lua_assert(pc <= pt->sizebc); - return lj_debug_line(pt, pc); - } - return -1; -} - -/* -- Variable names ------------------------------------------------------ */ - -/* Get name of a local variable from slot number and PC. */ -static const char *debug_varname(const GCproto *pt, BCPos pc, BCReg slot) -{ - const char *p = (const char *)proto_varinfo(pt); - if (p) { - BCPos lastpc = 0; - for (;;) { - const char *name = p; - uint32_t vn = *(const uint8_t *)p; - BCPos startpc, endpc; - if (vn < VARNAME__MAX) { - if (vn == VARNAME_END) break; /* End of varinfo. */ - } else { - do { p++; } while (*(const uint8_t *)p); /* Skip over variable name. */ - } - p++; - lastpc = startpc = lastpc + lj_buf_ruleb128(&p); - if (startpc > pc) break; - endpc = startpc + lj_buf_ruleb128(&p); - if (pc < endpc && slot-- == 0) { - if (vn < VARNAME__MAX) { -#define VARNAMESTR(name, str) str "\0" - name = VARNAMEDEF(VARNAMESTR); -#undef VARNAMESTR - if (--vn) while (*name++ || --vn) ; - } - return name; - } - } - } - return NULL; -} - -/* Get name of local variable from 1-based slot number and function/frame. */ -static TValue *debug_localname(lua_State *L, const lua_Debug *ar, - const char **name, BCReg slot1) -{ - uint32_t offset = (uint32_t)ar->i_ci & 0xffff; - uint32_t size = (uint32_t)ar->i_ci >> 16; - TValue *frame = tvref(L->stack) + offset; - TValue *nextframe = size ? frame + size : NULL; - GCfunc *fn = frame_func(frame); - BCPos pc = debug_framepc(L, fn, nextframe); - if (!nextframe) nextframe = L->top+LJ_FR2; - if ((int)slot1 < 0) { /* Negative slot number is for varargs. */ - if (pc != NO_BCPOS) { - GCproto *pt = funcproto(fn); - if ((pt->flags & PROTO_VARARG)) { - slot1 = pt->numparams + (BCReg)(-(int)slot1); - if (frame_isvarg(frame)) { /* Vararg frame has been set up? (pc!=0) */ - nextframe = frame; - frame = frame_prevd(frame); - } - if (frame + slot1+LJ_FR2 < nextframe) { - *name = "(*vararg)"; - return frame+slot1; - } - } - } - return NULL; - } - if (pc != NO_BCPOS && - (*name = debug_varname(funcproto(fn), pc, slot1-1)) != NULL) - ; - else if (slot1 > 0 && frame + slot1+LJ_FR2 < nextframe) - *name = "(*temporary)"; - return frame+slot1; -} - -/* Get name of upvalue. */ -const char *lj_debug_uvname(GCproto *pt, uint32_t idx) -{ - const uint8_t *p = proto_uvinfo(pt); - lua_assert(idx < pt->sizeuv); - if (!p) return ""; - if (idx) while (*p++ || --idx) ; - return (const char *)p; -} - -/* Get name and value of upvalue. */ -const char *lj_debug_uvnamev(cTValue *o, uint32_t idx, TValue **tvp) -{ - if (tvisfunc(o)) { - GCfunc *fn = funcV(o); - if (isluafunc(fn)) { - GCproto *pt = funcproto(fn); - if (idx < pt->sizeuv) { - *tvp = uvval(&gcref(fn->l.uvptr[idx])->uv); - return lj_debug_uvname(pt, idx); - } - } else { - if (idx < fn->c.nupvalues) { - *tvp = &fn->c.upvalue[idx]; - return ""; - } - } - } - return NULL; -} - -/* Deduce name of an object from slot number and PC. */ -const char *lj_debug_slotname(GCproto *pt, const BCIns *ip, BCReg slot, - const char **name) -{ - const char *lname; -restart: - lname = debug_varname(pt, proto_bcpos(pt, ip), slot); - if (lname != NULL) { *name = lname; return "local"; } - while (--ip > proto_bc(pt)) { - BCIns ins = *ip; - BCOp op = bc_op(ins); - BCReg ra = bc_a(ins); - if (bcmode_a(op) == BCMbase) { - if (slot >= ra && (op != BC_KNIL || slot <= bc_d(ins))) - return NULL; - } else if (bcmode_a(op) == BCMdst && ra == slot) { - switch (bc_op(ins)) { - case BC_MOV: - if (ra == slot) { slot = bc_d(ins); goto restart; } - break; - case BC_GGET: - *name = strdata(gco2str(proto_kgc(pt, ~(ptrdiff_t)bc_d(ins)))); - return "global"; - case BC_TGETS: - *name = strdata(gco2str(proto_kgc(pt, ~(ptrdiff_t)bc_c(ins)))); - if (ip > proto_bc(pt)) { - BCIns insp = ip[-1]; - if (bc_op(insp) == BC_MOV && bc_a(insp) == ra+1+LJ_FR2 && - bc_d(insp) == bc_b(ins)) - return "method"; - } - return "field"; - case BC_UGET: - *name = lj_debug_uvname(pt, bc_d(ins)); - return "upvalue"; - default: - return NULL; - } - } - } - return NULL; -} - -/* Deduce function name from caller of a frame. */ -const char *lj_debug_funcname(lua_State *L, cTValue *frame, const char **name) -{ - cTValue *pframe; - GCfunc *fn; - BCPos pc; - if (frame <= tvref(L->stack)+LJ_FR2) - return NULL; - if (frame_isvarg(frame)) - frame = frame_prevd(frame); - pframe = frame_prev(frame); - fn = frame_func(pframe); - pc = debug_framepc(L, fn, frame); - if (pc != NO_BCPOS) { - GCproto *pt = funcproto(fn); - const BCIns *ip = &proto_bc(pt)[check_exp(pc < pt->sizebc, pc)]; - MMS mm = bcmode_mm(bc_op(*ip)); - if (mm == MM_call) { - BCReg slot = bc_a(*ip); - if (bc_op(*ip) == BC_ITERC) slot -= 3; - return lj_debug_slotname(pt, ip, slot, name); - } else if (mm != MM__MAX) { - *name = strdata(mmname_str(G(L), mm)); - return "metamethod"; - } - } - return NULL; -} - -/* -- Source code locations ----------------------------------------------- */ - -/* Generate shortened source name. */ -void lj_debug_shortname(char *out, GCstr *str, BCLine line) -{ - const char *src = strdata(str); - if (*src == '=') { - strncpy(out, src+1, LUA_IDSIZE); /* Remove first char. */ - out[LUA_IDSIZE-1] = '\0'; /* Ensures null termination. */ - } else if (*src == '@') { /* Output "source", or "...source". */ - size_t len = str->len-1; - src++; /* Skip the `@' */ - if (len >= LUA_IDSIZE) { - src += len-(LUA_IDSIZE-4); /* Get last part of file name. */ - *out++ = '.'; *out++ = '.'; *out++ = '.'; - } - strcpy(out, src); - } else { /* Output [string "string"] or [builtin:name]. */ - size_t len; /* Length, up to first control char. */ - for (len = 0; len < LUA_IDSIZE-12; len++) - if (((const unsigned char *)src)[len] < ' ') break; - strcpy(out, line == ~(BCLine)0 ? "[builtin:" : "[string \""); out += 9; - if (src[len] != '\0') { /* Must truncate? */ - if (len > LUA_IDSIZE-15) len = LUA_IDSIZE-15; - strncpy(out, src, len); out += len; - strcpy(out, "..."); out += 3; - } else { - strcpy(out, src); out += len; - } - strcpy(out, line == ~(BCLine)0 ? "]" : "\"]"); - } -} - -/* Add current location of a frame to error message. */ -void lj_debug_addloc(lua_State *L, const char *msg, - cTValue *frame, cTValue *nextframe) -{ - if (frame) { - GCfunc *fn = frame_func(frame); - if (isluafunc(fn)) { - BCLine line = debug_frameline(L, fn, nextframe); - if (line >= 0) { - GCproto *pt = funcproto(fn); - char buf[LUA_IDSIZE]; - lj_debug_shortname(buf, proto_chunkname(pt), pt->firstline); - lj_strfmt_pushf(L, "%s:%d: %s", buf, line, msg); - return; - } - } - } - lj_strfmt_pushf(L, "%s", msg); -} - -/* Push location string for a bytecode position to Lua stack. */ -void lj_debug_pushloc(lua_State *L, GCproto *pt, BCPos pc) -{ - GCstr *name = proto_chunkname(pt); - const char *s = strdata(name); - MSize i, len = name->len; - BCLine line = lj_debug_line(pt, pc); - if (pt->firstline == ~(BCLine)0) { - lj_strfmt_pushf(L, "builtin:%s", s); - } else if (*s == '@') { - s++; len--; - for (i = len; i > 0; i--) - if (s[i] == '/' || s[i] == '\\') { - s += i+1; - break; - } - lj_strfmt_pushf(L, "%s:%d", s, line); - } else if (len > 40) { - lj_strfmt_pushf(L, "%p:%d", pt, line); - } else if (*s == '=') { - lj_strfmt_pushf(L, "%s:%d", s+1, line); - } else { - lj_strfmt_pushf(L, "\"%s\":%d", s, line); - } -} - -/* -- Public debug API ---------------------------------------------------- */ - -/* lua_getupvalue() and lua_setupvalue() are in lj_api.c. */ - -LUA_API const char *lua_getlocal(lua_State *L, const lua_Debug *ar, int n) -{ - const char *name = NULL; - if (ar) { - TValue *o = debug_localname(L, ar, &name, (BCReg)n); - if (name) { - copyTV(L, L->top, o); - incr_top(L); - } - } else if (tvisfunc(L->top-1) && isluafunc(funcV(L->top-1))) { - name = debug_varname(funcproto(funcV(L->top-1)), 0, (BCReg)n-1); - } - return name; -} - -LUA_API const char *lua_setlocal(lua_State *L, const lua_Debug *ar, int n) -{ - const char *name = NULL; - TValue *o = debug_localname(L, ar, &name, (BCReg)n); - if (name) - copyTV(L, o, L->top-1); - L->top--; - return name; -} - -int lj_debug_getinfo(lua_State *L, const char *what, lj_Debug *ar, int ext) -{ - int opt_f = 0, opt_L = 0; - TValue *frame = NULL; - TValue *nextframe = NULL; - GCfunc *fn; - if (*what == '>') { - TValue *func = L->top - 1; - api_check(L, tvisfunc(func)); - fn = funcV(func); - L->top--; - what++; - } else { - uint32_t offset = (uint32_t)ar->i_ci & 0xffff; - uint32_t size = (uint32_t)ar->i_ci >> 16; - lua_assert(offset != 0); - frame = tvref(L->stack) + offset; - if (size) nextframe = frame + size; - lua_assert(frame <= tvref(L->maxstack) && - (!nextframe || nextframe <= tvref(L->maxstack))); - fn = frame_func(frame); - lua_assert(fn->c.gct == ~LJ_TFUNC); - } - for (; *what; what++) { - if (*what == 'S') { - if (isluafunc(fn)) { - GCproto *pt = funcproto(fn); - BCLine firstline = pt->firstline; - GCstr *name = proto_chunkname(pt); - ar->source = strdata(name); - lj_debug_shortname(ar->short_src, name, pt->firstline); - ar->linedefined = (int)firstline; - ar->lastlinedefined = (int)(firstline + pt->numline); - ar->what = (firstline || !pt->numline) ? "Lua" : "main"; - } else { - ar->source = "=[C]"; - ar->short_src[0] = '['; - ar->short_src[1] = 'C'; - ar->short_src[2] = ']'; - ar->short_src[3] = '\0'; - ar->linedefined = -1; - ar->lastlinedefined = -1; - ar->what = "C"; - } - } else if (*what == 'l') { - ar->currentline = frame ? debug_frameline(L, fn, nextframe) : -1; - } else if (*what == 'u') { - ar->nups = fn->c.nupvalues; - if (ext) { - if (isluafunc(fn)) { - GCproto *pt = funcproto(fn); - ar->nparams = pt->numparams; - ar->isvararg = !!(pt->flags & PROTO_VARARG); - } else { - ar->nparams = 0; - ar->isvararg = 1; - } - } - } else if (*what == 'n') { - ar->namewhat = frame ? lj_debug_funcname(L, frame, &ar->name) : NULL; - if (ar->namewhat == NULL) { - ar->namewhat = ""; - ar->name = NULL; - } - } else if (*what == 'f') { - opt_f = 1; - } else if (*what == 'L') { - opt_L = 1; - } else { - return 0; /* Bad option. */ - } - } - if (opt_f) { - setfuncV(L, L->top, fn); - incr_top(L); - } - if (opt_L) { - if (isluafunc(fn)) { - GCtab *t = lj_tab_new(L, 0, 0); - GCproto *pt = funcproto(fn); - const void *lineinfo = proto_lineinfo(pt); - if (lineinfo) { - BCLine first = pt->firstline; - int sz = pt->numline < 256 ? 1 : pt->numline < 65536 ? 2 : 4; - MSize i, szl = pt->sizebc-1; - for (i = 0; i < szl; i++) { - BCLine line = first + - (sz == 1 ? (BCLine)((const uint8_t *)lineinfo)[i] : - sz == 2 ? (BCLine)((const uint16_t *)lineinfo)[i] : - (BCLine)((const uint32_t *)lineinfo)[i]); - setboolV(lj_tab_setint(L, t, line), 1); - } - } - settabV(L, L->top, t); - } else { - setnilV(L->top); - } - incr_top(L); - } - return 1; /* Ok. */ -} - -LUA_API int lua_getinfo(lua_State *L, const char *what, lua_Debug *ar) -{ - return lj_debug_getinfo(L, what, (lj_Debug *)ar, 0); -} - -LUA_API int lua_getstack(lua_State *L, int level, lua_Debug *ar) -{ - int size; - cTValue *frame = lj_debug_frame(L, level, &size); - if (frame) { - ar->i_ci = (size << 16) + (int)(frame - tvref(L->stack)); - return 1; - } else { - ar->i_ci = level - size; - return 0; - } -} - -#if LJ_HASPROFILE -/* Put the chunkname into a buffer. */ -static int debug_putchunkname(SBuf *sb, GCproto *pt, int pathstrip) -{ - GCstr *name = proto_chunkname(pt); - const char *p = strdata(name); - if (pt->firstline == ~(BCLine)0) { - lj_buf_putmem(sb, "[builtin:", 9); - lj_buf_putstr(sb, name); - lj_buf_putb(sb, ']'); - return 0; - } - if (*p == '=' || *p == '@') { - MSize len = name->len-1; - p++; - if (pathstrip) { - int i; - for (i = len-1; i >= 0; i--) - if (p[i] == '/' || p[i] == '\\') { - len -= i+1; - p = p+i+1; - break; - } - } - lj_buf_putmem(sb, p, len); - } else { - lj_buf_putmem(sb, "[string]", 8); - } - return 1; -} - -/* Put a compact stack dump into a buffer. */ -void lj_debug_dumpstack(lua_State *L, SBuf *sb, const char *fmt, int depth) -{ - int level = 0, dir = 1, pathstrip = 1; - MSize lastlen = 0; - if (depth < 0) { level = ~depth; depth = dir = -1; } /* Reverse frames. */ - while (level != depth) { /* Loop through all frame. */ - int size; - cTValue *frame = lj_debug_frame(L, level, &size); - if (frame) { - cTValue *nextframe = size ? frame+size : NULL; - GCfunc *fn = frame_func(frame); - const uint8_t *p = (const uint8_t *)fmt; - int c; - while ((c = *p++)) { - switch (c) { - case 'p': /* Preserve full path. */ - pathstrip = 0; - break; - case 'F': case 'f': { /* Dump function name. */ - const char *name; - const char *what = lj_debug_funcname(L, frame, &name); - if (what) { - if (c == 'F' && isluafunc(fn)) { /* Dump module:name for 'F'. */ - GCproto *pt = funcproto(fn); - if (pt->firstline != ~(BCLine)0) { /* Not a bytecode builtin. */ - debug_putchunkname(sb, pt, pathstrip); - lj_buf_putb(sb, ':'); - } - } - lj_buf_putmem(sb, name, (MSize)strlen(name)); - break; - } /* else: can't derive a name, dump module:line. */ - } - /* fallthrough */ - case 'l': /* Dump module:line. */ - if (isluafunc(fn)) { - GCproto *pt = funcproto(fn); - if (debug_putchunkname(sb, pt, pathstrip)) { - /* Regular Lua function. */ - BCLine line = c == 'l' ? debug_frameline(L, fn, nextframe) : - pt->firstline; - lj_buf_putb(sb, ':'); - lj_strfmt_putint(sb, line >= 0 ? line : pt->firstline); - } - } else if (isffunc(fn)) { /* Dump numbered builtins. */ - lj_buf_putmem(sb, "[builtin#", 9); - lj_strfmt_putint(sb, fn->c.ffid); - lj_buf_putb(sb, ']'); - } else { /* Dump C function address. */ - lj_buf_putb(sb, '@'); - lj_strfmt_putptr(sb, fn->c.f); - } - break; - case 'Z': /* Zap trailing separator. */ - lastlen = sbuflen(sb); - break; - default: - lj_buf_putb(sb, c); - break; - } - } - } else if (dir == 1) { - break; - } else { - level -= size; /* Reverse frame order: quickly skip missing level. */ - } - level += dir; - } - if (lastlen) - setsbufP(sb, sbufB(sb) + lastlen); /* Zap trailing separator. */ -} -#endif - -/* Number of frames for the leading and trailing part of a traceback. */ -#define TRACEBACK_LEVELS1 12 -#define TRACEBACK_LEVELS2 10 - -LUALIB_API void luaL_traceback (lua_State *L, lua_State *L1, const char *msg, - int level) -{ - int top = (int)(L->top - L->base); - int lim = TRACEBACK_LEVELS1; - lua_Debug ar; - if (msg) lua_pushfstring(L, "%s\n", msg); - lua_pushliteral(L, "stack traceback:"); - while (lua_getstack(L1, level++, &ar)) { - GCfunc *fn; - if (level > lim) { - if (!lua_getstack(L1, level + TRACEBACK_LEVELS2, &ar)) { - level--; - } else { - lua_pushliteral(L, "\n\t..."); - lua_getstack(L1, -10, &ar); - level = ar.i_ci - TRACEBACK_LEVELS2; - } - lim = 2147483647; - continue; - } - lua_getinfo(L1, "Snlf", &ar); - fn = funcV(L1->top-1); L1->top--; - if (isffunc(fn) && !*ar.namewhat) - lua_pushfstring(L, "\n\t[builtin#%d]:", fn->c.ffid); - else - lua_pushfstring(L, "\n\t%s:", ar.short_src); - if (ar.currentline > 0) - lua_pushfstring(L, "%d:", ar.currentline); - if (*ar.namewhat) { - lua_pushfstring(L, " in function " LUA_QS, ar.name); - } else { - if (*ar.what == 'm') { - lua_pushliteral(L, " in main chunk"); - } else if (*ar.what == 'C') { - lua_pushfstring(L, " at %p", fn->c.f); - } else { - lua_pushfstring(L, " in function <%s:%d>", - ar.short_src, ar.linedefined); - } - } - if ((int)(L->top - L->base) - top >= 15) - lua_concat(L, (int)(L->top - L->base) - top); - } - lua_concat(L, (int)(L->top - L->base) - top); -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_debug.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_debug.h deleted file mode 100644 index 11d308a41b3..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_debug.h +++ /dev/null @@ -1,65 +0,0 @@ -/* -** Debugging and introspection. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_DEBUG_H -#define _LJ_DEBUG_H - -#include "lj_obj.h" - -typedef struct lj_Debug { - /* Common fields. Must be in the same order as in lua.h. */ - int event; - const char *name; - const char *namewhat; - const char *what; - const char *source; - int currentline; - int nups; - int linedefined; - int lastlinedefined; - char short_src[LUA_IDSIZE]; - int i_ci; - /* Extended fields. Only valid if lj_debug_getinfo() is called with ext = 1.*/ - int nparams; - int isvararg; -} lj_Debug; - -LJ_FUNC cTValue *lj_debug_frame(lua_State *L, int level, int *size); -LJ_FUNC BCLine LJ_FASTCALL lj_debug_line(GCproto *pt, BCPos pc); -LJ_FUNC const char *lj_debug_uvname(GCproto *pt, uint32_t idx); -LJ_FUNC const char *lj_debug_uvnamev(cTValue *o, uint32_t idx, TValue **tvp); -LJ_FUNC const char *lj_debug_slotname(GCproto *pt, const BCIns *pc, - BCReg slot, const char **name); -LJ_FUNC const char *lj_debug_funcname(lua_State *L, cTValue *frame, - const char **name); -LJ_FUNC void lj_debug_shortname(char *out, GCstr *str, BCLine line); -LJ_FUNC void lj_debug_addloc(lua_State *L, const char *msg, - cTValue *frame, cTValue *nextframe); -LJ_FUNC void lj_debug_pushloc(lua_State *L, GCproto *pt, BCPos pc); -LJ_FUNC int lj_debug_getinfo(lua_State *L, const char *what, lj_Debug *ar, - int ext); -#if LJ_HASPROFILE -LJ_FUNC void lj_debug_dumpstack(lua_State *L, SBuf *sb, const char *fmt, - int depth); -#endif - -/* Fixed internal variable names. */ -#define VARNAMEDEF(_) \ - _(FOR_IDX, "(for index)") \ - _(FOR_STOP, "(for limit)") \ - _(FOR_STEP, "(for step)") \ - _(FOR_GEN, "(for generator)") \ - _(FOR_STATE, "(for state)") \ - _(FOR_CTL, "(for control)") - -enum { - VARNAME_END, -#define VARNAMEENUM(name, str) VARNAME_##name, - VARNAMEDEF(VARNAMEENUM) -#undef VARNAMEENUM - VARNAME__MAX -}; - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_def.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_def.h deleted file mode 100644 index 04bf00c89be..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_def.h +++ /dev/null @@ -1,365 +0,0 @@ -/* -** LuaJIT common internal definitions. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_DEF_H -#define _LJ_DEF_H - -#include "lua.h" - -#if defined(_MSC_VER) -/* MSVC is stuck in the last century and doesn't have C99's stdint.h. */ -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -#ifdef _WIN64 -typedef __int64 intptr_t; -typedef unsigned __int64 uintptr_t; -#else -typedef __int32 intptr_t; -typedef unsigned __int32 uintptr_t; -#endif -#elif defined(__symbian__) -/* Cough. */ -typedef signed char int8_t; -typedef short int int16_t; -typedef int int32_t; -typedef long long int64_t; -typedef unsigned char uint8_t; -typedef unsigned short int uint16_t; -typedef unsigned int uint32_t; -typedef unsigned long long uint64_t; -typedef int intptr_t; -typedef unsigned int uintptr_t; -#else -#include -#endif - -/* Needed everywhere. */ -#include -#include - -/* Various VM limits. */ -#define LJ_MAX_MEM32 0x7fffff00 /* Max. 32 bit memory allocation. */ -#define LJ_MAX_MEM64 ((uint64_t)1<<47) /* Max. 64 bit memory allocation. */ -/* Max. total memory allocation. */ -#define LJ_MAX_MEM (LJ_GC64 ? LJ_MAX_MEM64 : LJ_MAX_MEM32) -#define LJ_MAX_ALLOC LJ_MAX_MEM /* Max. individual allocation length. */ -#define LJ_MAX_STR LJ_MAX_MEM32 /* Max. string length. */ -#define LJ_MAX_BUF LJ_MAX_MEM32 /* Max. buffer length. */ -#define LJ_MAX_UDATA LJ_MAX_MEM32 /* Max. userdata length. */ - -#define LJ_MAX_STRTAB (1<<26) /* Max. string table size. */ -#define LJ_MAX_HBITS 26 /* Max. hash bits. */ -#define LJ_MAX_ABITS 28 /* Max. bits of array key. */ -#define LJ_MAX_ASIZE ((1<<(LJ_MAX_ABITS-1))+1) /* Max. array part size. */ -#define LJ_MAX_COLOSIZE 16 /* Max. elems for colocated array. */ - -#define LJ_MAX_LINE LJ_MAX_MEM32 /* Max. source code line number. */ -#define LJ_MAX_XLEVEL 200 /* Max. syntactic nesting level. */ -#define LJ_MAX_BCINS (1<<26) /* Max. # of bytecode instructions. */ -#define LJ_MAX_SLOTS 250 /* Max. # of slots in a Lua func. */ -#define LJ_MAX_LOCVAR 200 /* Max. # of local variables. */ -#define LJ_MAX_UPVAL 249 /* Max. # of upvalues. */ - -#define LJ_MAX_IDXCHAIN 100 /* __index/__newindex chain limit. */ -#define LJ_STACK_EXTRA (5+2*LJ_FR2) /* Extra stack space (metamethods). */ - -#define LJ_NUM_CBPAGE 1 /* Number of FFI callback pages. */ - -/* Minimum table/buffer sizes. */ -#define LJ_MIN_GLOBAL 6 /* Min. global table size (hbits). */ -#define LJ_MIN_REGISTRY 2 /* Min. registry size (hbits). */ -#define LJ_MIN_STRTAB 256 /* Min. string table size (pow2). */ -#define LJ_MIN_SBUF 32 /* Min. string buffer length. */ -#define LJ_MIN_VECSZ 8 /* Min. size for growable vectors. */ -#define LJ_MIN_IRSZ 32 /* Min. size for growable IR. */ -#define LJ_MIN_K64SZ 16 /* Min. size for chained K64Array. */ - -/* JIT compiler limits. */ -#define LJ_MAX_JSLOTS 250 /* Max. # of stack slots for a trace. */ -#define LJ_MAX_PHI 64 /* Max. # of PHIs for a loop. */ -#define LJ_MAX_EXITSTUBGR 16 /* Max. # of exit stub groups. */ - -/* Various macros. */ -#ifndef UNUSED -#define UNUSED(x) ((void)(x)) /* to avoid warnings */ -#endif - -#define U64x(hi, lo) (((uint64_t)0x##hi << 32) + (uint64_t)0x##lo) -#define i32ptr(p) ((int32_t)(intptr_t)(void *)(p)) -#define u32ptr(p) ((uint32_t)(intptr_t)(void *)(p)) - -#define checki8(x) ((x) == (int32_t)(int8_t)(x)) -#define checku8(x) ((x) == (int32_t)(uint8_t)(x)) -#define checki16(x) ((x) == (int32_t)(int16_t)(x)) -#define checku16(x) ((x) == (int32_t)(uint16_t)(x)) -#define checki32(x) ((x) == (int32_t)(x)) -#define checku32(x) ((x) == (uint32_t)(x)) -#define checkptr32(x) ((uintptr_t)(x) == (uint32_t)(uintptr_t)(x)) -#define checkptr47(x) (((uint64_t)(x) >> 47) == 0) -#if LJ_GC64 -#define checkptrGC(x) (checkptr47((x))) -#elif LJ_64 -#define checkptrGC(x) (checkptr32((x))) -#else -#define checkptrGC(x) 1 -#endif - -/* Every half-decent C compiler transforms this into a rotate instruction. */ -#define lj_rol(x, n) (((x)<<(n)) | ((x)>>(-(int)(n)&(8*sizeof(x)-1)))) -#define lj_ror(x, n) (((x)<<(-(int)(n)&(8*sizeof(x)-1))) | ((x)>>(n))) - -/* A really naive Bloom filter. But sufficient for our needs. */ -typedef uintptr_t BloomFilter; -#define BLOOM_MASK (8*sizeof(BloomFilter) - 1) -#define bloombit(x) ((uintptr_t)1 << ((x) & BLOOM_MASK)) -#define bloomset(b, x) ((b) |= bloombit((x))) -#define bloomtest(b, x) ((b) & bloombit((x))) - -#if defined(__GNUC__) || defined(__psp2__) - -#define LJ_NORET __attribute__((noreturn)) -#define LJ_ALIGN(n) __attribute__((aligned(n))) -#define LJ_INLINE inline -#define LJ_AINLINE inline __attribute__((always_inline)) -#define LJ_NOINLINE __attribute__((noinline)) - -#if defined(__ELF__) || defined(__MACH__) || defined(__psp2__) -#if !((defined(__sun__) && defined(__svr4__)) || defined(__CELLOS_LV2__)) -#define LJ_NOAPI extern __attribute__((visibility("hidden"))) -#endif -#endif - -/* Note: it's only beneficial to use fastcall on x86 and then only for up to -** two non-FP args. The amalgamated compile covers all LJ_FUNC cases. Only -** indirect calls and related tail-called C functions are marked as fastcall. -*/ -#if defined(__i386__) -#define LJ_FASTCALL __attribute__((fastcall)) -#endif - -#define LJ_LIKELY(x) __builtin_expect(!!(x), 1) -#define LJ_UNLIKELY(x) __builtin_expect(!!(x), 0) - -#define lj_ffs(x) ((uint32_t)__builtin_ctz(x)) -/* Don't ask ... */ -#if defined(__INTEL_COMPILER) && (defined(__i386__) || defined(__x86_64__)) -static LJ_AINLINE uint32_t lj_fls(uint32_t x) -{ - uint32_t r; __asm__("bsrl %1, %0" : "=r" (r) : "rm" (x) : "cc"); return r; -} -#else -#define lj_fls(x) ((uint32_t)(__builtin_clz(x)^31)) -#endif - -#if defined(__arm__) -static LJ_AINLINE uint32_t lj_bswap(uint32_t x) -{ -#if defined(__psp2__) - return __builtin_rev(x); -#else - uint32_t r; -#if __ARM_ARCH_6__ || __ARM_ARCH_6J__ || __ARM_ARCH_6T2__ || __ARM_ARCH_6Z__ ||\ - __ARM_ARCH_6ZK__ || __ARM_ARCH_7__ || __ARM_ARCH_7A__ || __ARM_ARCH_7R__ - __asm__("rev %0, %1" : "=r" (r) : "r" (x)); - return r; -#else -#ifdef __thumb__ - r = x ^ lj_ror(x, 16); -#else - __asm__("eor %0, %1, %1, ror #16" : "=r" (r) : "r" (x)); -#endif - return ((r & 0xff00ffffu) >> 8) ^ lj_ror(x, 8); -#endif -#endif -} - -static LJ_AINLINE uint64_t lj_bswap64(uint64_t x) -{ - return ((uint64_t)lj_bswap((uint32_t)x)<<32) | lj_bswap((uint32_t)(x>>32)); -} -#elif (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) -static LJ_AINLINE uint32_t lj_bswap(uint32_t x) -{ - return (uint32_t)__builtin_bswap32((int32_t)x); -} - -static LJ_AINLINE uint64_t lj_bswap64(uint64_t x) -{ - return (uint64_t)__builtin_bswap64((int64_t)x); -} -#elif defined(__i386__) || defined(__x86_64__) -static LJ_AINLINE uint32_t lj_bswap(uint32_t x) -{ - uint32_t r; __asm__("bswap %0" : "=r" (r) : "0" (x)); return r; -} - -#if defined(__i386__) -static LJ_AINLINE uint64_t lj_bswap64(uint64_t x) -{ - return ((uint64_t)lj_bswap((uint32_t)x)<<32) | lj_bswap((uint32_t)(x>>32)); -} -#else -static LJ_AINLINE uint64_t lj_bswap64(uint64_t x) -{ - uint64_t r; __asm__("bswap %0" : "=r" (r) : "0" (x)); return r; -} -#endif -#else -static LJ_AINLINE uint32_t lj_bswap(uint32_t x) -{ - return (x << 24) | ((x & 0xff00) << 8) | ((x >> 8) & 0xff00) | (x >> 24); -} - -static LJ_AINLINE uint64_t lj_bswap64(uint64_t x) -{ - return (uint64_t)lj_bswap((uint32_t)(x >> 32)) | - ((uint64_t)lj_bswap((uint32_t)x) << 32); -} -#endif - -typedef union __attribute__((packed)) Unaligned16 { - uint16_t u; - uint8_t b[2]; -} Unaligned16; - -typedef union __attribute__((packed)) Unaligned32 { - uint32_t u; - uint8_t b[4]; -} Unaligned32; - -/* Unaligned load of uint16_t. */ -static LJ_AINLINE uint16_t lj_getu16(const void *p) -{ - return ((const Unaligned16 *)p)->u; -} - -/* Unaligned load of uint32_t. */ -static LJ_AINLINE uint32_t lj_getu32(const void *p) -{ - return ((const Unaligned32 *)p)->u; -} - -#elif defined(_MSC_VER) - -#define LJ_NORET __declspec(noreturn) -#define LJ_ALIGN(n) __declspec(align(n)) -#define LJ_INLINE __inline -#define LJ_AINLINE __forceinline -#define LJ_NOINLINE __declspec(noinline) -#if defined(_M_IX86) -#define LJ_FASTCALL __fastcall -#endif - -#ifdef _M_PPC -unsigned int _CountLeadingZeros(long); -#pragma intrinsic(_CountLeadingZeros) -static LJ_AINLINE uint32_t lj_fls(uint32_t x) -{ - return _CountLeadingZeros(x) ^ 31; -} -#else -unsigned char _BitScanForward(uint32_t *, unsigned long); -unsigned char _BitScanReverse(uint32_t *, unsigned long); -#pragma intrinsic(_BitScanForward) -#pragma intrinsic(_BitScanReverse) - -static LJ_AINLINE uint32_t lj_ffs(uint32_t x) -{ - uint32_t r; _BitScanForward(&r, x); return r; -} - -static LJ_AINLINE uint32_t lj_fls(uint32_t x) -{ - uint32_t r; _BitScanReverse(&r, x); return r; -} -#endif - -unsigned long _byteswap_ulong(unsigned long); -uint64_t _byteswap_uint64(uint64_t); -#define lj_bswap(x) (_byteswap_ulong((x))) -#define lj_bswap64(x) (_byteswap_uint64((x))) - -#if defined(_M_PPC) && defined(LUAJIT_NO_UNALIGNED) -/* -** Replacement for unaligned loads on Xbox 360. Disabled by default since it's -** usually more costly than the occasional stall when crossing a cache-line. -*/ -static LJ_AINLINE uint16_t lj_getu16(const void *v) -{ - const uint8_t *p = (const uint8_t *)v; - return (uint16_t)((p[0]<<8) | p[1]); -} -static LJ_AINLINE uint32_t lj_getu32(const void *v) -{ - const uint8_t *p = (const uint8_t *)v; - return (uint32_t)((p[0]<<24) | (p[1]<<16) | (p[2]<<8) | p[3]); -} -#else -/* Unaligned loads are generally ok on x86/x64. */ -#define lj_getu16(p) (*(uint16_t *)(p)) -#define lj_getu32(p) (*(uint32_t *)(p)) -#endif - -#else -#error "missing defines for your compiler" -#endif - -/* Optional defines. */ -#ifndef LJ_FASTCALL -#define LJ_FASTCALL -#endif -#ifndef LJ_NORET -#define LJ_NORET -#endif -#ifndef LJ_NOAPI -#define LJ_NOAPI extern -#endif -#ifndef LJ_LIKELY -#define LJ_LIKELY(x) (x) -#define LJ_UNLIKELY(x) (x) -#endif - -/* Attributes for internal functions. */ -#define LJ_DATA LJ_NOAPI -#define LJ_DATADEF -#define LJ_ASMF LJ_NOAPI -#define LJ_FUNCA LJ_NOAPI -#if defined(ljamalg_c) -#define LJ_FUNC static -#else -#define LJ_FUNC LJ_NOAPI -#endif -#define LJ_FUNC_NORET LJ_FUNC LJ_NORET -#define LJ_FUNCA_NORET LJ_FUNCA LJ_NORET -#define LJ_ASMF_NORET LJ_ASMF LJ_NORET - -/* Runtime assertions. */ -#ifdef lua_assert -#define check_exp(c, e) (lua_assert(c), (e)) -#define api_check(l, e) lua_assert(e) -#else -#define lua_assert(c) ((void)0) -#define check_exp(c, e) (e) -#define api_check luai_apicheck -#endif - -/* Static assertions. */ -#define LJ_ASSERT_NAME2(name, line) name ## line -#define LJ_ASSERT_NAME(line) LJ_ASSERT_NAME2(lj_assert_, line) -#ifdef __COUNTER__ -#define LJ_STATIC_ASSERT(cond) \ - extern void LJ_ASSERT_NAME(__COUNTER__)(int STATIC_ASSERTION_FAILED[(cond)?1:-1]) -#else -#define LJ_STATIC_ASSERT(cond) \ - extern void LJ_ASSERT_NAME(__LINE__)(int STATIC_ASSERTION_FAILED[(cond)?1:-1]) -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_dispatch.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_dispatch.c deleted file mode 100644 index 1a07371cacf..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_dispatch.c +++ /dev/null @@ -1,557 +0,0 @@ -/* -** Instruction dispatch handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_dispatch_c -#define LUA_CORE - -#include "lj_obj.h" -#include "lj_err.h" -#include "lj_buf.h" -#include "lj_func.h" -#include "lj_str.h" -#include "lj_tab.h" -#include "lj_meta.h" -#include "lj_debug.h" -#include "lj_state.h" -#include "lj_frame.h" -#include "lj_bc.h" -#include "lj_ff.h" -#include "lj_strfmt.h" -#if LJ_HASJIT -#include "lj_jit.h" -#endif -#if LJ_HASFFI -#include "lj_ccallback.h" -#endif -#include "lj_trace.h" -#include "lj_dispatch.h" -#if LJ_HASPROFILE -#include "lj_profile.h" -#endif -#include "lj_vm.h" -#include "luajit.h" - -/* Bump GG_NUM_ASMFF in lj_dispatch.h as needed. Ugly. */ -LJ_STATIC_ASSERT(GG_NUM_ASMFF == FF_NUM_ASMFUNC); - -/* -- Dispatch table management ------------------------------------------- */ - -#if LJ_TARGET_MIPS -#include -LJ_FUNCA_NORET void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L, - lua_State *co); -#if !LJ_HASJIT -#define lj_dispatch_stitch lj_dispatch_ins -#endif -#if !LJ_HASPROFILE -#define lj_dispatch_profile lj_dispatch_ins -#endif - -#define GOTFUNC(name) (ASMFunction)name, -static const ASMFunction dispatch_got[] = { - GOTDEF(GOTFUNC) -}; -#undef GOTFUNC -#endif - -/* Initialize instruction dispatch table and hot counters. */ -void lj_dispatch_init(GG_State *GG) -{ - uint32_t i; - ASMFunction *disp = GG->dispatch; - for (i = 0; i < GG_LEN_SDISP; i++) - disp[GG_LEN_DDISP+i] = disp[i] = makeasmfunc(lj_bc_ofs[i]); - for (i = GG_LEN_SDISP; i < GG_LEN_DDISP; i++) - disp[i] = makeasmfunc(lj_bc_ofs[i]); - /* The JIT engine is off by default. luaopen_jit() turns it on. */ - disp[BC_FORL] = disp[BC_IFORL]; - disp[BC_ITERL] = disp[BC_IITERL]; - disp[BC_LOOP] = disp[BC_ILOOP]; - disp[BC_FUNCF] = disp[BC_IFUNCF]; - disp[BC_FUNCV] = disp[BC_IFUNCV]; - GG->g.bc_cfunc_ext = GG->g.bc_cfunc_int = BCINS_AD(BC_FUNCC, LUA_MINSTACK, 0); - for (i = 0; i < GG_NUM_ASMFF; i++) - GG->bcff[i] = BCINS_AD(BC__MAX+i, 0, 0); -#if LJ_TARGET_MIPS - memcpy(GG->got, dispatch_got, LJ_GOT__MAX*4); -#endif -} - -#if LJ_HASJIT -/* Initialize hotcount table. */ -void lj_dispatch_init_hotcount(global_State *g) -{ - int32_t hotloop = G2J(g)->param[JIT_P_hotloop]; - HotCount start = (HotCount)(hotloop*HOTCOUNT_LOOP - 1); - HotCount *hotcount = G2GG(g)->hotcount; - uint32_t i; - for (i = 0; i < HOTCOUNT_SIZE; i++) - hotcount[i] = start; -} -#endif - -/* Internal dispatch mode bits. */ -#define DISPMODE_CALL 0x01 /* Override call dispatch. */ -#define DISPMODE_RET 0x02 /* Override return dispatch. */ -#define DISPMODE_INS 0x04 /* Override instruction dispatch. */ -#define DISPMODE_JIT 0x10 /* JIT compiler on. */ -#define DISPMODE_REC 0x20 /* Recording active. */ -#define DISPMODE_PROF 0x40 /* Profiling active. */ - -/* Update dispatch table depending on various flags. */ -void lj_dispatch_update(global_State *g) -{ - uint8_t oldmode = g->dispatchmode; - uint8_t mode = 0; -#if LJ_HASJIT - mode |= (G2J(g)->flags & JIT_F_ON) ? DISPMODE_JIT : 0; - mode |= G2J(g)->state != LJ_TRACE_IDLE ? - (DISPMODE_REC|DISPMODE_INS|DISPMODE_CALL) : 0; -#endif -#if LJ_HASPROFILE - mode |= (g->hookmask & HOOK_PROFILE) ? (DISPMODE_PROF|DISPMODE_INS) : 0; -#endif - mode |= (g->hookmask & (LUA_MASKLINE|LUA_MASKCOUNT)) ? DISPMODE_INS : 0; - mode |= (g->hookmask & LUA_MASKCALL) ? DISPMODE_CALL : 0; - mode |= (g->hookmask & LUA_MASKRET) ? DISPMODE_RET : 0; - if (oldmode != mode) { /* Mode changed? */ - ASMFunction *disp = G2GG(g)->dispatch; - ASMFunction f_forl, f_iterl, f_loop, f_funcf, f_funcv; - g->dispatchmode = mode; - - /* Hotcount if JIT is on, but not while recording. */ - if ((mode & (DISPMODE_JIT|DISPMODE_REC)) == DISPMODE_JIT) { - f_forl = makeasmfunc(lj_bc_ofs[BC_FORL]); - f_iterl = makeasmfunc(lj_bc_ofs[BC_ITERL]); - f_loop = makeasmfunc(lj_bc_ofs[BC_LOOP]); - f_funcf = makeasmfunc(lj_bc_ofs[BC_FUNCF]); - f_funcv = makeasmfunc(lj_bc_ofs[BC_FUNCV]); - } else { /* Otherwise use the non-hotcounting instructions. */ - f_forl = disp[GG_LEN_DDISP+BC_IFORL]; - f_iterl = disp[GG_LEN_DDISP+BC_IITERL]; - f_loop = disp[GG_LEN_DDISP+BC_ILOOP]; - f_funcf = makeasmfunc(lj_bc_ofs[BC_IFUNCF]); - f_funcv = makeasmfunc(lj_bc_ofs[BC_IFUNCV]); - } - /* Init static counting instruction dispatch first (may be copied below). */ - disp[GG_LEN_DDISP+BC_FORL] = f_forl; - disp[GG_LEN_DDISP+BC_ITERL] = f_iterl; - disp[GG_LEN_DDISP+BC_LOOP] = f_loop; - - /* Set dynamic instruction dispatch. */ - if ((oldmode ^ mode) & (DISPMODE_PROF|DISPMODE_REC|DISPMODE_INS)) { - /* Need to update the whole table. */ - if (!(mode & DISPMODE_INS)) { /* No ins dispatch? */ - /* Copy static dispatch table to dynamic dispatch table. */ - memcpy(&disp[0], &disp[GG_LEN_DDISP], GG_LEN_SDISP*sizeof(ASMFunction)); - /* Overwrite with dynamic return dispatch. */ - if ((mode & DISPMODE_RET)) { - disp[BC_RETM] = lj_vm_rethook; - disp[BC_RET] = lj_vm_rethook; - disp[BC_RET0] = lj_vm_rethook; - disp[BC_RET1] = lj_vm_rethook; - } - } else { - /* The recording dispatch also checks for hooks. */ - ASMFunction f = (mode & DISPMODE_PROF) ? lj_vm_profhook : - (mode & DISPMODE_REC) ? lj_vm_record : lj_vm_inshook; - uint32_t i; - for (i = 0; i < GG_LEN_SDISP; i++) - disp[i] = f; - } - } else if (!(mode & DISPMODE_INS)) { - /* Otherwise set dynamic counting ins. */ - disp[BC_FORL] = f_forl; - disp[BC_ITERL] = f_iterl; - disp[BC_LOOP] = f_loop; - /* Set dynamic return dispatch. */ - if ((mode & DISPMODE_RET)) { - disp[BC_RETM] = lj_vm_rethook; - disp[BC_RET] = lj_vm_rethook; - disp[BC_RET0] = lj_vm_rethook; - disp[BC_RET1] = lj_vm_rethook; - } else { - disp[BC_RETM] = disp[GG_LEN_DDISP+BC_RETM]; - disp[BC_RET] = disp[GG_LEN_DDISP+BC_RET]; - disp[BC_RET0] = disp[GG_LEN_DDISP+BC_RET0]; - disp[BC_RET1] = disp[GG_LEN_DDISP+BC_RET1]; - } - } - - /* Set dynamic call dispatch. */ - if ((oldmode ^ mode) & DISPMODE_CALL) { /* Update the whole table? */ - uint32_t i; - if ((mode & DISPMODE_CALL) == 0) { /* No call hooks? */ - for (i = GG_LEN_SDISP; i < GG_LEN_DDISP; i++) - disp[i] = makeasmfunc(lj_bc_ofs[i]); - } else { - for (i = GG_LEN_SDISP; i < GG_LEN_DDISP; i++) - disp[i] = lj_vm_callhook; - } - } - if (!(mode & DISPMODE_CALL)) { /* Overwrite dynamic counting ins. */ - disp[BC_FUNCF] = f_funcf; - disp[BC_FUNCV] = f_funcv; - } - -#if LJ_HASJIT - /* Reset hotcounts for JIT off to on transition. */ - if ((mode & DISPMODE_JIT) && !(oldmode & DISPMODE_JIT)) - lj_dispatch_init_hotcount(g); -#endif - } -} - -/* -- JIT mode setting ---------------------------------------------------- */ - -#if LJ_HASJIT -/* Set JIT mode for a single prototype. */ -static void setptmode(global_State *g, GCproto *pt, int mode) -{ - if ((mode & LUAJIT_MODE_ON)) { /* (Re-)enable JIT compilation. */ - pt->flags &= ~PROTO_NOJIT; - lj_trace_reenableproto(pt); /* Unpatch all ILOOP etc. bytecodes. */ - } else { /* Flush and/or disable JIT compilation. */ - if (!(mode & LUAJIT_MODE_FLUSH)) - pt->flags |= PROTO_NOJIT; - lj_trace_flushproto(g, pt); /* Flush all traces of prototype. */ - } -} - -/* Recursively set the JIT mode for all children of a prototype. */ -static void setptmode_all(global_State *g, GCproto *pt, int mode) -{ - ptrdiff_t i; - if (!(pt->flags & PROTO_CHILD)) return; - for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++) { - GCobj *o = proto_kgc(pt, i); - if (o->gch.gct == ~LJ_TPROTO) { - setptmode(g, gco2pt(o), mode); - setptmode_all(g, gco2pt(o), mode); - } - } -} -#endif - -/* Public API function: control the JIT engine. */ -int luaJIT_setmode(lua_State *L, int idx, int mode) -{ - global_State *g = G(L); - int mm = mode & LUAJIT_MODE_MASK; - lj_trace_abort(g); /* Abort recording on any state change. */ - /* Avoid pulling the rug from under our own feet. */ - if ((g->hookmask & HOOK_GC)) - lj_err_caller(L, LJ_ERR_NOGCMM); - switch (mm) { -#if LJ_HASJIT - case LUAJIT_MODE_ENGINE: - if ((mode & LUAJIT_MODE_FLUSH)) { - lj_trace_flushall(L); - } else { - if (!(mode & LUAJIT_MODE_ON)) - G2J(g)->flags &= ~(uint32_t)JIT_F_ON; -#if LJ_TARGET_X86ORX64 - else if ((G2J(g)->flags & JIT_F_SSE2)) - G2J(g)->flags |= (uint32_t)JIT_F_ON; - else - return 0; /* Don't turn on JIT compiler without SSE2 support. */ -#else - else - G2J(g)->flags |= (uint32_t)JIT_F_ON; -#endif - lj_dispatch_update(g); - } - break; - case LUAJIT_MODE_FUNC: - case LUAJIT_MODE_ALLFUNC: - case LUAJIT_MODE_ALLSUBFUNC: { - cTValue *tv = idx == 0 ? frame_prev(L->base-1) : - idx > 0 ? L->base + (idx-1) : L->top + idx; - GCproto *pt; - if ((idx == 0 || tvisfunc(tv)) && isluafunc(&gcval(tv)->fn)) - pt = funcproto(&gcval(tv)->fn); /* Cannot use funcV() for frame slot. */ - else if (tvisproto(tv)) - pt = protoV(tv); - else - return 0; /* Failed. */ - if (mm != LUAJIT_MODE_ALLSUBFUNC) - setptmode(g, pt, mode); - if (mm != LUAJIT_MODE_FUNC) - setptmode_all(g, pt, mode); - break; - } - case LUAJIT_MODE_TRACE: - if (!(mode & LUAJIT_MODE_FLUSH)) - return 0; /* Failed. */ - lj_trace_flush(G2J(g), idx); - break; -#else - case LUAJIT_MODE_ENGINE: - case LUAJIT_MODE_FUNC: - case LUAJIT_MODE_ALLFUNC: - case LUAJIT_MODE_ALLSUBFUNC: - UNUSED(idx); - if ((mode & LUAJIT_MODE_ON)) - return 0; /* Failed. */ - break; -#endif - case LUAJIT_MODE_WRAPCFUNC: - if ((mode & LUAJIT_MODE_ON)) { - if (idx != 0) { - cTValue *tv = idx > 0 ? L->base + (idx-1) : L->top + idx; - if (tvislightud(tv)) - g->wrapf = (lua_CFunction)lightudV(tv); - else - return 0; /* Failed. */ - } else { - return 0; /* Failed. */ - } - g->bc_cfunc_ext = BCINS_AD(BC_FUNCCW, 0, 0); - } else { - g->bc_cfunc_ext = BCINS_AD(BC_FUNCC, 0, 0); - } - break; - default: - return 0; /* Failed. */ - } - return 1; /* OK. */ -} - -/* Enforce (dynamic) linker error for version mismatches. See luajit.c. */ -LUA_API void LUAJIT_VERSION_SYM(void) -{ -} - -/* -- Hooks --------------------------------------------------------------- */ - -/* This function can be called asynchronously (e.g. during a signal). */ -LUA_API int lua_sethook(lua_State *L, lua_Hook func, int mask, int count) -{ - global_State *g = G(L); - mask &= HOOK_EVENTMASK; - if (func == NULL || mask == 0) { mask = 0; func = NULL; } /* Consistency. */ - g->hookf = func; - g->hookcount = g->hookcstart = (int32_t)count; - g->hookmask = (uint8_t)((g->hookmask & ~HOOK_EVENTMASK) | mask); - lj_trace_abort(g); /* Abort recording on any hook change. */ - lj_dispatch_update(g); - return 1; -} - -LUA_API lua_Hook lua_gethook(lua_State *L) -{ - return G(L)->hookf; -} - -LUA_API int lua_gethookmask(lua_State *L) -{ - return G(L)->hookmask & HOOK_EVENTMASK; -} - -LUA_API int lua_gethookcount(lua_State *L) -{ - return (int)G(L)->hookcstart; -} - -/* Call a hook. */ -static void callhook(lua_State *L, int event, BCLine line) -{ - global_State *g = G(L); - lua_Hook hookf = g->hookf; - if (hookf && !hook_active(g)) { - lua_Debug ar; - lj_trace_abort(g); /* Abort recording on any hook call. */ - ar.event = event; - ar.currentline = line; - /* Top frame, nextframe = NULL. */ - ar.i_ci = (int)((L->base-1) - tvref(L->stack)); - lj_state_checkstack(L, 1+LUA_MINSTACK); -#if LJ_HASPROFILE && !LJ_PROFILE_SIGPROF - lj_profile_hook_enter(g); -#else - hook_enter(g); -#endif - hookf(L, &ar); - lua_assert(hook_active(g)); - setgcref(g->cur_L, obj2gco(L)); -#if LJ_HASPROFILE && !LJ_PROFILE_SIGPROF - lj_profile_hook_leave(g); -#else - hook_leave(g); -#endif - } -} - -/* -- Dispatch callbacks -------------------------------------------------- */ - -/* Calculate number of used stack slots in the current frame. */ -static BCReg cur_topslot(GCproto *pt, const BCIns *pc, uint32_t nres) -{ - BCIns ins = pc[-1]; - if (bc_op(ins) == BC_UCLO) - ins = pc[bc_j(ins)]; - switch (bc_op(ins)) { - case BC_CALLM: case BC_CALLMT: return bc_a(ins) + bc_c(ins) + nres-1+1+LJ_FR2; - case BC_RETM: return bc_a(ins) + bc_d(ins) + nres-1; - case BC_TSETM: return bc_a(ins) + nres-1; - default: return pt->framesize; - } -} - -/* Instruction dispatch. Used by instr/line/return hooks or when recording. */ -void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc) -{ - ERRNO_SAVE - GCfunc *fn = curr_func(L); - GCproto *pt = funcproto(fn); - void *cf = cframe_raw(L->cframe); - const BCIns *oldpc = cframe_pc(cf); - global_State *g = G(L); - BCReg slots; - setcframe_pc(cf, pc); - slots = cur_topslot(pt, pc, cframe_multres_n(cf)); - L->top = L->base + slots; /* Fix top. */ -#if LJ_HASJIT - { - jit_State *J = G2J(g); - if (J->state != LJ_TRACE_IDLE) { -#ifdef LUA_USE_ASSERT - ptrdiff_t delta = L->top - L->base; -#endif - J->L = L; - lj_trace_ins(J, pc-1); /* The interpreter bytecode PC is offset by 1. */ - lua_assert(L->top - L->base == delta); - } - } -#endif - if ((g->hookmask & LUA_MASKCOUNT) && g->hookcount == 0) { - g->hookcount = g->hookcstart; - callhook(L, LUA_HOOKCOUNT, -1); - L->top = L->base + slots; /* Fix top again. */ - } - if ((g->hookmask & LUA_MASKLINE)) { - BCPos npc = proto_bcpos(pt, pc) - 1; - BCPos opc = proto_bcpos(pt, oldpc) - 1; - BCLine line = lj_debug_line(pt, npc); - if (pc <= oldpc || opc >= pt->sizebc || line != lj_debug_line(pt, opc)) { - callhook(L, LUA_HOOKLINE, line); - L->top = L->base + slots; /* Fix top again. */ - } - } - if ((g->hookmask & LUA_MASKRET) && bc_isret(bc_op(pc[-1]))) - callhook(L, LUA_HOOKRET, -1); - ERRNO_RESTORE -} - -/* Initialize call. Ensure stack space and return # of missing parameters. */ -static int call_init(lua_State *L, GCfunc *fn) -{ - if (isluafunc(fn)) { - GCproto *pt = funcproto(fn); - int numparams = pt->numparams; - int gotparams = (int)(L->top - L->base); - int need = pt->framesize; - if ((pt->flags & PROTO_VARARG)) need += 1+gotparams; - lj_state_checkstack(L, (MSize)need); - numparams -= gotparams; - return numparams >= 0 ? numparams : 0; - } else { - lj_state_checkstack(L, LUA_MINSTACK); - return 0; - } -} - -/* Call dispatch. Used by call hooks, hot calls or when recording. */ -ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns *pc) -{ - ERRNO_SAVE - GCfunc *fn = curr_func(L); - BCOp op; - global_State *g = G(L); -#if LJ_HASJIT - jit_State *J = G2J(g); -#endif - int missing = call_init(L, fn); -#if LJ_HASJIT - J->L = L; - if ((uintptr_t)pc & 1) { /* Marker for hot call. */ -#ifdef LUA_USE_ASSERT - ptrdiff_t delta = L->top - L->base; -#endif - pc = (const BCIns *)((uintptr_t)pc & ~(uintptr_t)1); - lj_trace_hot(J, pc); - lua_assert(L->top - L->base == delta); - goto out; - } else if (J->state != LJ_TRACE_IDLE && - !(g->hookmask & (HOOK_GC|HOOK_VMEVENT))) { -#ifdef LUA_USE_ASSERT - ptrdiff_t delta = L->top - L->base; -#endif - /* Record the FUNC* bytecodes, too. */ - lj_trace_ins(J, pc-1); /* The interpreter bytecode PC is offset by 1. */ - lua_assert(L->top - L->base == delta); - } -#endif - if ((g->hookmask & LUA_MASKCALL)) { - int i; - for (i = 0; i < missing; i++) /* Add missing parameters. */ - setnilV(L->top++); - callhook(L, LUA_HOOKCALL, -1); - /* Preserve modifications of missing parameters by lua_setlocal(). */ - while (missing-- > 0 && tvisnil(L->top - 1)) - L->top--; - } -#if LJ_HASJIT -out: -#endif - op = bc_op(pc[-1]); /* Get FUNC* op. */ -#if LJ_HASJIT - /* Use the non-hotcounting variants if JIT is off or while recording. */ - if ((!(J->flags & JIT_F_ON) || J->state != LJ_TRACE_IDLE) && - (op == BC_FUNCF || op == BC_FUNCV)) - op = (BCOp)((int)op+(int)BC_IFUNCF-(int)BC_FUNCF); -#endif - ERRNO_RESTORE - return makeasmfunc(lj_bc_ofs[op]); /* Return static dispatch target. */ -} - -#if LJ_HASJIT -/* Stitch a new trace. */ -void LJ_FASTCALL lj_dispatch_stitch(jit_State *J, const BCIns *pc) -{ - ERRNO_SAVE - lua_State *L = J->L; - void *cf = cframe_raw(L->cframe); - const BCIns *oldpc = cframe_pc(cf); - setcframe_pc(cf, pc); - /* Before dispatch, have to bias PC by 1. */ - L->top = L->base + cur_topslot(curr_proto(L), pc+1, cframe_multres_n(cf)); - lj_trace_stitch(J, pc-1); /* Point to the CALL instruction. */ - setcframe_pc(cf, oldpc); - ERRNO_RESTORE -} -#endif - -#if LJ_HASPROFILE -/* Profile dispatch. */ -void LJ_FASTCALL lj_dispatch_profile(lua_State *L, const BCIns *pc) -{ - ERRNO_SAVE - GCfunc *fn = curr_func(L); - GCproto *pt = funcproto(fn); - void *cf = cframe_raw(L->cframe); - const BCIns *oldpc = cframe_pc(cf); - global_State *g; - setcframe_pc(cf, pc); - L->top = L->base + cur_topslot(pt, pc, cframe_multres_n(cf)); - lj_profile_interpreter(L); - setcframe_pc(cf, oldpc); - g = G(L); - setgcref(g->cur_L, obj2gco(L)); - setvmstate(g, INTERP); - ERRNO_RESTORE -} -#endif - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_dispatch.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_dispatch.h deleted file mode 100644 index 1e247e38286..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_dispatch.h +++ /dev/null @@ -1,138 +0,0 @@ -/* -** Instruction dispatch handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_DISPATCH_H -#define _LJ_DISPATCH_H - -#include "lj_obj.h" -#include "lj_bc.h" -#if LJ_HASJIT -#include "lj_jit.h" -#endif - -#if LJ_TARGET_MIPS -/* Need our own global offset table for the dreaded MIPS calling conventions. */ -#if LJ_HASJIT -#define JITGOTDEF(_) _(lj_trace_exit) _(lj_trace_hot) -#else -#define JITGOTDEF(_) -#endif -#if LJ_HASFFI -#define FFIGOTDEF(_) \ - _(lj_meta_equal_cd) _(lj_ccallback_enter) _(lj_ccallback_leave) -#else -#define FFIGOTDEF(_) -#endif -#define GOTDEF(_) \ - _(floor) _(ceil) _(trunc) _(log) _(log10) _(exp) _(sin) _(cos) _(tan) \ - _(asin) _(acos) _(atan) _(sinh) _(cosh) _(tanh) _(frexp) _(modf) _(atan2) \ - _(pow) _(fmod) _(ldexp) \ - _(lj_dispatch_call) _(lj_dispatch_ins) _(lj_dispatch_stitch) \ - _(lj_dispatch_profile) _(lj_err_throw) \ - _(lj_ffh_coroutine_wrap_err) _(lj_func_closeuv) _(lj_func_newL_gc) \ - _(lj_gc_barrieruv) _(lj_gc_step) _(lj_gc_step_fixtop) _(lj_meta_arith) \ - _(lj_meta_call) _(lj_meta_cat) _(lj_meta_comp) _(lj_meta_equal) \ - _(lj_meta_for) _(lj_meta_istype) _(lj_meta_len) _(lj_meta_tget) \ - _(lj_meta_tset) _(lj_state_growstack) _(lj_strfmt_num) \ - _(lj_str_new) _(lj_tab_dup) _(lj_tab_get) _(lj_tab_getinth) _(lj_tab_len) \ - _(lj_tab_new) _(lj_tab_newkey) _(lj_tab_next) _(lj_tab_reasize) \ - _(lj_tab_setinth) _(lj_buf_putstr_reverse) _(lj_buf_putstr_lower) \ - _(lj_buf_putstr_upper) _(lj_buf_tostr) JITGOTDEF(_) FFIGOTDEF(_) - -enum { -#define GOTENUM(name) LJ_GOT_##name, -GOTDEF(GOTENUM) -#undef GOTENUM - LJ_GOT__MAX -}; -#endif - -/* Type of hot counter. Must match the code in the assembler VM. */ -/* 16 bits are sufficient. Only 0.0015% overhead with maximum slot penalty. */ -typedef uint16_t HotCount; - -/* Number of hot counter hash table entries (must be a power of two). */ -#define HOTCOUNT_SIZE 64 -#define HOTCOUNT_PCMASK ((HOTCOUNT_SIZE-1)*sizeof(HotCount)) - -/* Hotcount decrements. */ -#define HOTCOUNT_LOOP 2 -#define HOTCOUNT_CALL 1 - -/* This solves a circular dependency problem -- bump as needed. Sigh. */ -#define GG_NUM_ASMFF 57 - -#define GG_LEN_DDISP (BC__MAX + GG_NUM_ASMFF) -#define GG_LEN_SDISP BC_FUNCF -#define GG_LEN_DISP (GG_LEN_DDISP + GG_LEN_SDISP) - -/* Global state, main thread and extra fields are allocated together. */ -typedef struct GG_State { - lua_State L; /* Main thread. */ - global_State g; /* Global state. */ -#if LJ_TARGET_MIPS - ASMFunction got[LJ_GOT__MAX]; /* Global offset table. */ -#endif -#if LJ_HASJIT - jit_State J; /* JIT state. */ - HotCount hotcount[HOTCOUNT_SIZE]; /* Hot counters. */ -#endif - ASMFunction dispatch[GG_LEN_DISP]; /* Instruction dispatch tables. */ - BCIns bcff[GG_NUM_ASMFF]; /* Bytecode for ASM fast functions. */ -} GG_State; - -#define GG_OFS(field) ((int)offsetof(GG_State, field)) -#define G2GG(gl) ((GG_State *)((char *)(gl) - GG_OFS(g))) -#define J2GG(j) ((GG_State *)((char *)(j) - GG_OFS(J))) -#define L2GG(L) (G2GG(G(L))) -#define J2G(J) (&J2GG(J)->g) -#define G2J(gl) (&G2GG(gl)->J) -#define L2J(L) (&L2GG(L)->J) -#define GG_G2DISP (GG_OFS(dispatch) - GG_OFS(g)) -#define GG_DISP2G (GG_OFS(g) - GG_OFS(dispatch)) -#define GG_DISP2J (GG_OFS(J) - GG_OFS(dispatch)) -#define GG_DISP2HOT (GG_OFS(hotcount) - GG_OFS(dispatch)) -#define GG_DISP2STATIC (GG_LEN_DDISP*(int)sizeof(ASMFunction)) - -#define hotcount_get(gg, pc) \ - (gg)->hotcount[(u32ptr(pc)>>2) & (HOTCOUNT_SIZE-1)] -#define hotcount_set(gg, pc, val) \ - (hotcount_get((gg), (pc)) = (HotCount)(val)) - -/* Dispatch table management. */ -LJ_FUNC void lj_dispatch_init(GG_State *GG); -#if LJ_HASJIT -LJ_FUNC void lj_dispatch_init_hotcount(global_State *g); -#endif -LJ_FUNC void lj_dispatch_update(global_State *g); - -/* Instruction dispatch callback for hooks or when recording. */ -LJ_FUNCA void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc); -LJ_FUNCA ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns*pc); -#if LJ_HASJIT -LJ_FUNCA void LJ_FASTCALL lj_dispatch_stitch(jit_State *J, const BCIns *pc); -#endif -#if LJ_HASPROFILE -LJ_FUNCA void LJ_FASTCALL lj_dispatch_profile(lua_State *L, const BCIns *pc); -#endif - -#if LJ_HASFFI && !defined(_BUILDVM_H) -/* Save/restore errno and GetLastError() around hooks, exits and recording. */ -#include -#if LJ_TARGET_WINDOWS -#define WIN32_LEAN_AND_MEAN -#include -#define ERRNO_SAVE int olderr = errno; DWORD oldwerr = GetLastError(); -#define ERRNO_RESTORE errno = olderr; SetLastError(oldwerr); -#else -#define ERRNO_SAVE int olderr = errno; -#define ERRNO_RESTORE errno = olderr; -#endif -#else -#define ERRNO_SAVE -#define ERRNO_RESTORE -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_emit_arm.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_emit_arm.h deleted file mode 100644 index 45ce519ef6b..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_emit_arm.h +++ /dev/null @@ -1,356 +0,0 @@ -/* -** ARM instruction emitter. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -/* -- Constant encoding --------------------------------------------------- */ - -static uint8_t emit_invai[16] = { - /* AND */ (ARMI_AND^ARMI_BIC) >> 21, - /* EOR */ 0, - /* SUB */ (ARMI_SUB^ARMI_ADD) >> 21, - /* RSB */ 0, - /* ADD */ (ARMI_ADD^ARMI_SUB) >> 21, - /* ADC */ (ARMI_ADC^ARMI_SBC) >> 21, - /* SBC */ (ARMI_SBC^ARMI_ADC) >> 21, - /* RSC */ 0, - /* TST */ 0, - /* TEQ */ 0, - /* CMP */ (ARMI_CMP^ARMI_CMN) >> 21, - /* CMN */ (ARMI_CMN^ARMI_CMP) >> 21, - /* ORR */ 0, - /* MOV */ (ARMI_MOV^ARMI_MVN) >> 21, - /* BIC */ (ARMI_BIC^ARMI_AND) >> 21, - /* MVN */ (ARMI_MVN^ARMI_MOV) >> 21 -}; - -/* Encode constant in K12 format for data processing instructions. */ -static uint32_t emit_isk12(ARMIns ai, int32_t n) -{ - uint32_t invai, i, m = (uint32_t)n; - /* K12: unsigned 8 bit value, rotated in steps of two bits. */ - for (i = 0; i < 4096; i += 256, m = lj_rol(m, 2)) - if (m <= 255) return ARMI_K12|m|i; - /* Otherwise try negation/complement with the inverse instruction. */ - invai = emit_invai[((ai >> 21) & 15)]; - if (!invai) return 0; /* Failed. No inverse instruction. */ - m = ~(uint32_t)n; - if (invai == ((ARMI_SUB^ARMI_ADD) >> 21) || - invai == (ARMI_CMP^ARMI_CMN) >> 21) m++; - for (i = 0; i < 4096; i += 256, m = lj_rol(m, 2)) - if (m <= 255) return ARMI_K12|(invai<<21)|m|i; - return 0; /* Failed. */ -} - -/* -- Emit basic instructions --------------------------------------------- */ - -static void emit_dnm(ASMState *as, ARMIns ai, Reg rd, Reg rn, Reg rm) -{ - *--as->mcp = ai | ARMF_D(rd) | ARMF_N(rn) | ARMF_M(rm); -} - -static void emit_dm(ASMState *as, ARMIns ai, Reg rd, Reg rm) -{ - *--as->mcp = ai | ARMF_D(rd) | ARMF_M(rm); -} - -static void emit_dn(ASMState *as, ARMIns ai, Reg rd, Reg rn) -{ - *--as->mcp = ai | ARMF_D(rd) | ARMF_N(rn); -} - -static void emit_nm(ASMState *as, ARMIns ai, Reg rn, Reg rm) -{ - *--as->mcp = ai | ARMF_N(rn) | ARMF_M(rm); -} - -static void emit_d(ASMState *as, ARMIns ai, Reg rd) -{ - *--as->mcp = ai | ARMF_D(rd); -} - -static void emit_n(ASMState *as, ARMIns ai, Reg rn) -{ - *--as->mcp = ai | ARMF_N(rn); -} - -static void emit_m(ASMState *as, ARMIns ai, Reg rm) -{ - *--as->mcp = ai | ARMF_M(rm); -} - -static void emit_lsox(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs) -{ - lua_assert(ofs >= -255 && ofs <= 255); - if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U; - *--as->mcp = ai | ARMI_LS_P | ARMI_LSX_I | ARMF_D(rd) | ARMF_N(rn) | - ((ofs & 0xf0) << 4) | (ofs & 0x0f); -} - -static void emit_lso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs) -{ - lua_assert(ofs >= -4095 && ofs <= 4095); - /* Combine LDR/STR pairs to LDRD/STRD. */ - if (*as->mcp == (ai|ARMI_LS_P|ARMI_LS_U|ARMF_D(rd^1)|ARMF_N(rn)|(ofs^4)) && - (ai & ~(ARMI_LDR^ARMI_STR)) == ARMI_STR && rd != rn && - (uint32_t)ofs <= 252 && !(ofs & 3) && !((rd ^ (ofs >>2)) & 1) && - as->mcp != as->mcloop) { - as->mcp++; - emit_lsox(as, ai == ARMI_LDR ? ARMI_LDRD : ARMI_STRD, rd&~1, rn, ofs&~4); - return; - } - if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U; - *--as->mcp = ai | ARMI_LS_P | ARMF_D(rd) | ARMF_N(rn) | ofs; -} - -#if !LJ_SOFTFP -static void emit_vlso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs) -{ - lua_assert(ofs >= -1020 && ofs <= 1020 && (ofs&3) == 0); - if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U; - *--as->mcp = ai | ARMI_LS_P | ARMF_D(rd & 15) | ARMF_N(rn) | (ofs >> 2); -} -#endif - -/* -- Emit loads/stores --------------------------------------------------- */ - -/* Prefer spills of BASE/L. */ -#define emit_canremat(ref) ((ref) < ASMREF_L) - -/* Try to find a one step delta relative to another constant. */ -static int emit_kdelta1(ASMState *as, Reg d, int32_t i) -{ - RegSet work = ~as->freeset & RSET_GPR; - while (work) { - Reg r = rset_picktop(work); - IRRef ref = regcost_ref(as->cost[r]); - lua_assert(r != d); - if (emit_canremat(ref)) { - int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i); - uint32_t k = emit_isk12(ARMI_ADD, delta); - if (k) { - if (k == ARMI_K12) - emit_dm(as, ARMI_MOV, d, r); - else - emit_dn(as, ARMI_ADD^k, d, r); - return 1; - } - } - rset_clear(work, r); - } - return 0; /* Failed. */ -} - -/* Try to find a two step delta relative to another constant. */ -static int emit_kdelta2(ASMState *as, Reg d, int32_t i) -{ - RegSet work = ~as->freeset & RSET_GPR; - while (work) { - Reg r = rset_picktop(work); - IRRef ref = regcost_ref(as->cost[r]); - lua_assert(r != d); - if (emit_canremat(ref)) { - int32_t other = ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i; - if (other) { - int32_t delta = i - other; - uint32_t sh, inv = 0, k2, k; - if (delta < 0) { delta = -delta; inv = ARMI_ADD^ARMI_SUB; } - sh = lj_ffs(delta) & ~1; - k2 = emit_isk12(0, delta & (255 << sh)); - k = emit_isk12(0, delta & ~(255 << sh)); - if (k) { - emit_dn(as, ARMI_ADD^k2^inv, d, d); - emit_dn(as, ARMI_ADD^k^inv, d, r); - return 1; - } - } - } - rset_clear(work, r); - } - return 0; /* Failed. */ -} - -/* Load a 32 bit constant into a GPR. */ -static void emit_loadi(ASMState *as, Reg r, int32_t i) -{ - uint32_t k = emit_isk12(ARMI_MOV, i); - lua_assert(rset_test(as->freeset, r) || r == RID_TMP); - if (k) { - /* Standard K12 constant. */ - emit_d(as, ARMI_MOV^k, r); - } else if ((as->flags & JIT_F_ARMV6T2) && (uint32_t)i < 0x00010000u) { - /* 16 bit loword constant for ARMv6T2. */ - emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), r); - } else if (emit_kdelta1(as, r, i)) { - /* One step delta relative to another constant. */ - } else if ((as->flags & JIT_F_ARMV6T2)) { - /* 32 bit hiword/loword constant for ARMv6T2. */ - emit_d(as, ARMI_MOVT|((i>>16) & 0x0fff)|(((i>>16) & 0xf000)<<4), r); - emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), r); - } else if (emit_kdelta2(as, r, i)) { - /* Two step delta relative to another constant. */ - } else { - /* Otherwise construct the constant with up to 4 instructions. */ - /* NYI: use mvn+bic, use pc-relative loads. */ - for (;;) { - uint32_t sh = lj_ffs(i) & ~1; - int32_t m = i & (255 << sh); - i &= ~(255 << sh); - if (i == 0) { - emit_d(as, ARMI_MOV ^ emit_isk12(0, m), r); - break; - } - emit_dn(as, ARMI_ORR ^ emit_isk12(0, m), r, r); - } - } -} - -#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr))) - -static Reg ra_allock(ASMState *as, int32_t k, RegSet allow); - -/* Get/set from constant pointer. */ -static void emit_lsptr(ASMState *as, ARMIns ai, Reg r, void *p) -{ - int32_t i = i32ptr(p); - emit_lso(as, ai, r, ra_allock(as, (i & ~4095), rset_exclude(RSET_GPR, r)), - (i & 4095)); -} - -#if !LJ_SOFTFP -/* Load a number constant into an FPR. */ -static void emit_loadn(ASMState *as, Reg r, cTValue *tv) -{ - int32_t i; - if ((as->flags & JIT_F_VFPV3) && !tv->u32.lo) { - uint32_t hi = tv->u32.hi; - uint32_t b = ((hi >> 22) & 0x1ff); - if (!(hi & 0xffff) && (b == 0x100 || b == 0x0ff)) { - *--as->mcp = ARMI_VMOVI_D | ARMF_D(r & 15) | - ((tv->u32.hi >> 12) & 0x00080000) | - ((tv->u32.hi >> 4) & 0x00070000) | - ((tv->u32.hi >> 16) & 0x0000000f); - return; - } - } - i = i32ptr(tv); - emit_vlso(as, ARMI_VLDR_D, r, - ra_allock(as, (i & ~1020), RSET_GPR), (i & 1020)); -} -#endif - -/* Get/set global_State fields. */ -#define emit_getgl(as, r, field) \ - emit_lsptr(as, ARMI_LDR, (r), (void *)&J2G(as->J)->field) -#define emit_setgl(as, r, field) \ - emit_lsptr(as, ARMI_STR, (r), (void *)&J2G(as->J)->field) - -/* Trace number is determined from pc of exit instruction. */ -#define emit_setvmstate(as, i) UNUSED(i) - -/* -- Emit control-flow instructions -------------------------------------- */ - -/* Label for internal jumps. */ -typedef MCode *MCLabel; - -/* Return label pointing to current PC. */ -#define emit_label(as) ((as)->mcp) - -static void emit_branch(ASMState *as, ARMIns ai, MCode *target) -{ - MCode *p = as->mcp; - ptrdiff_t delta = (target - p) - 1; - lua_assert(((delta + 0x00800000) >> 24) == 0); - *--p = ai | ((uint32_t)delta & 0x00ffffffu); - as->mcp = p; -} - -#define emit_jmp(as, target) emit_branch(as, ARMI_B, (target)) - -static void emit_call(ASMState *as, void *target) -{ - MCode *p = --as->mcp; - ptrdiff_t delta = ((char *)target - (char *)p) - 8; - if ((((delta>>2) + 0x00800000) >> 24) == 0) { - if ((delta & 1)) - *p = ARMI_BLX | ((uint32_t)(delta>>2) & 0x00ffffffu) | ((delta&2) << 27); - else - *p = ARMI_BL | ((uint32_t)(delta>>2) & 0x00ffffffu); - } else { /* Target out of range: need indirect call. But don't use R0-R3. */ - Reg r = ra_allock(as, i32ptr(target), RSET_RANGE(RID_R4, RID_R12+1)); - *p = ARMI_BLXr | ARMF_M(r); - } -} - -/* -- Emit generic operations --------------------------------------------- */ - -/* Generic move between two regs. */ -static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src) -{ -#if LJ_SOFTFP - lua_assert(!irt_isnum(ir->t)); UNUSED(ir); -#else - if (dst >= RID_MAX_GPR) { - emit_dm(as, irt_isnum(ir->t) ? ARMI_VMOV_D : ARMI_VMOV_S, - (dst & 15), (src & 15)); - return; - } -#endif - if (as->mcp != as->mcloop) { /* Swap early registers for loads/stores. */ - MCode ins = *as->mcp, swp = (src^dst); - if ((ins & 0x0c000000) == 0x04000000 && (ins & 0x02000010) != 0x02000010) { - if (!((ins ^ (dst << 16)) & 0x000f0000)) - *as->mcp = ins ^ (swp << 16); /* Swap N in load/store. */ - if (!(ins & 0x00100000) && !((ins ^ (dst << 12)) & 0x0000f000)) - *as->mcp = ins ^ (swp << 12); /* Swap D in store. */ - } - } - emit_dm(as, ARMI_MOV, dst, src); -} - -/* Generic load of register with base and (small) offset address. */ -static void emit_loadofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs) -{ -#if LJ_SOFTFP - lua_assert(!irt_isnum(ir->t)); UNUSED(ir); -#else - if (r >= RID_MAX_GPR) - emit_vlso(as, irt_isnum(ir->t) ? ARMI_VLDR_D : ARMI_VLDR_S, r, base, ofs); - else -#endif - emit_lso(as, ARMI_LDR, r, base, ofs); -} - -/* Generic store of register with base and (small) offset address. */ -static void emit_storeofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs) -{ -#if LJ_SOFTFP - lua_assert(!irt_isnum(ir->t)); UNUSED(ir); -#else - if (r >= RID_MAX_GPR) - emit_vlso(as, irt_isnum(ir->t) ? ARMI_VSTR_D : ARMI_VSTR_S, r, base, ofs); - else -#endif - emit_lso(as, ARMI_STR, r, base, ofs); -} - -/* Emit an arithmetic/logic operation with a constant operand. */ -static void emit_opk(ASMState *as, ARMIns ai, Reg dest, Reg src, - int32_t i, RegSet allow) -{ - uint32_t k = emit_isk12(ai, i); - if (k) - emit_dn(as, ai^k, dest, src); - else - emit_dnm(as, ai, dest, src, ra_allock(as, i, allow)); -} - -/* Add offset to pointer. */ -static void emit_addptr(ASMState *as, Reg r, int32_t ofs) -{ - if (ofs) - emit_opk(as, ARMI_ADD, r, r, ofs, rset_exclude(RSET_GPR, r)); -} - -#define emit_spsub(as, ofs) emit_addptr(as, RID_SP, -(ofs)) - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_emit_mips.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_emit_mips.h deleted file mode 100644 index 8e7ee66fb1a..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_emit_mips.h +++ /dev/null @@ -1,211 +0,0 @@ -/* -** MIPS instruction emitter. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -/* -- Emit basic instructions --------------------------------------------- */ - -static void emit_dst(ASMState *as, MIPSIns mi, Reg rd, Reg rs, Reg rt) -{ - *--as->mcp = mi | MIPSF_D(rd) | MIPSF_S(rs) | MIPSF_T(rt); -} - -static void emit_dta(ASMState *as, MIPSIns mi, Reg rd, Reg rt, uint32_t a) -{ - *--as->mcp = mi | MIPSF_D(rd) | MIPSF_T(rt) | MIPSF_A(a); -} - -#define emit_ds(as, mi, rd, rs) emit_dst(as, (mi), (rd), (rs), 0) -#define emit_tg(as, mi, rt, rg) emit_dst(as, (mi), (rg)&31, 0, (rt)) - -static void emit_tsi(ASMState *as, MIPSIns mi, Reg rt, Reg rs, int32_t i) -{ - *--as->mcp = mi | MIPSF_T(rt) | MIPSF_S(rs) | (i & 0xffff); -} - -#define emit_ti(as, mi, rt, i) emit_tsi(as, (mi), (rt), 0, (i)) -#define emit_hsi(as, mi, rh, rs, i) emit_tsi(as, (mi), (rh) & 31, (rs), (i)) - -static void emit_fgh(ASMState *as, MIPSIns mi, Reg rf, Reg rg, Reg rh) -{ - *--as->mcp = mi | MIPSF_F(rf&31) | MIPSF_G(rg&31) | MIPSF_H(rh&31); -} - -#define emit_fg(as, mi, rf, rg) emit_fgh(as, (mi), (rf), (rg), 0) - -static void emit_rotr(ASMState *as, Reg dest, Reg src, Reg tmp, uint32_t shift) -{ - if ((as->flags & JIT_F_MIPS32R2)) { - emit_dta(as, MIPSI_ROTR, dest, src, shift); - } else { - emit_dst(as, MIPSI_OR, dest, dest, tmp); - emit_dta(as, MIPSI_SLL, dest, src, (-shift)&31); - emit_dta(as, MIPSI_SRL, tmp, src, shift); - } -} - -/* -- Emit loads/stores --------------------------------------------------- */ - -/* Prefer rematerialization of BASE/L from global_State over spills. */ -#define emit_canremat(ref) ((ref) <= REF_BASE) - -/* Try to find a one step delta relative to another constant. */ -static int emit_kdelta1(ASMState *as, Reg t, int32_t i) -{ - RegSet work = ~as->freeset & RSET_GPR; - while (work) { - Reg r = rset_picktop(work); - IRRef ref = regcost_ref(as->cost[r]); - lua_assert(r != t); - if (ref < ASMREF_L) { - int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i); - if (checki16(delta)) { - emit_tsi(as, MIPSI_ADDIU, t, r, delta); - return 1; - } - } - rset_clear(work, r); - } - return 0; /* Failed. */ -} - -/* Load a 32 bit constant into a GPR. */ -static void emit_loadi(ASMState *as, Reg r, int32_t i) -{ - if (checki16(i)) { - emit_ti(as, MIPSI_LI, r, i); - } else { - if ((i & 0xffff)) { - int32_t jgl = i32ptr(J2G(as->J)); - if ((uint32_t)(i-jgl) < 65536) { - emit_tsi(as, MIPSI_ADDIU, r, RID_JGL, i-jgl-32768); - return; - } else if (emit_kdelta1(as, r, i)) { - return; - } else if ((i >> 16) == 0) { - emit_tsi(as, MIPSI_ORI, r, RID_ZERO, i); - return; - } - emit_tsi(as, MIPSI_ORI, r, r, i); - } - emit_ti(as, MIPSI_LUI, r, (i >> 16)); - } -} - -#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr))) - -static Reg ra_allock(ASMState *as, int32_t k, RegSet allow); -static void ra_allockreg(ASMState *as, int32_t k, Reg r); - -/* Get/set from constant pointer. */ -static void emit_lsptr(ASMState *as, MIPSIns mi, Reg r, void *p, RegSet allow) -{ - int32_t jgl = i32ptr(J2G(as->J)); - int32_t i = i32ptr(p); - Reg base; - if ((uint32_t)(i-jgl) < 65536) { - i = i-jgl-32768; - base = RID_JGL; - } else { - base = ra_allock(as, i-(int16_t)i, allow); - } - emit_tsi(as, mi, r, base, i); -} - -#define emit_loadn(as, r, tv) \ - emit_lsptr(as, MIPSI_LDC1, ((r) & 31), (void *)(tv), RSET_GPR) - -/* Get/set global_State fields. */ -static void emit_lsglptr(ASMState *as, MIPSIns mi, Reg r, int32_t ofs) -{ - emit_tsi(as, mi, r, RID_JGL, ofs-32768); -} - -#define emit_getgl(as, r, field) \ - emit_lsglptr(as, MIPSI_LW, (r), (int32_t)offsetof(global_State, field)) -#define emit_setgl(as, r, field) \ - emit_lsglptr(as, MIPSI_SW, (r), (int32_t)offsetof(global_State, field)) - -/* Trace number is determined from per-trace exit stubs. */ -#define emit_setvmstate(as, i) UNUSED(i) - -/* -- Emit control-flow instructions -------------------------------------- */ - -/* Label for internal jumps. */ -typedef MCode *MCLabel; - -/* Return label pointing to current PC. */ -#define emit_label(as) ((as)->mcp) - -static void emit_branch(ASMState *as, MIPSIns mi, Reg rs, Reg rt, MCode *target) -{ - MCode *p = as->mcp; - ptrdiff_t delta = target - p; - lua_assert(((delta + 0x8000) >> 16) == 0); - *--p = mi | MIPSF_S(rs) | MIPSF_T(rt) | ((uint32_t)delta & 0xffffu); - as->mcp = p; -} - -static void emit_jmp(ASMState *as, MCode *target) -{ - *--as->mcp = MIPSI_NOP; - emit_branch(as, MIPSI_B, RID_ZERO, RID_ZERO, (target)); -} - -static void emit_call(ASMState *as, void *target) -{ - MCode *p = as->mcp; - *--p = MIPSI_NOP; - if ((((uintptr_t)target ^ (uintptr_t)p) >> 28) == 0) - *--p = MIPSI_JAL | (((uintptr_t)target >>2) & 0x03ffffffu); - else /* Target out of range: need indirect call. */ - *--p = MIPSI_JALR | MIPSF_S(RID_CFUNCADDR); - as->mcp = p; - ra_allockreg(as, i32ptr(target), RID_CFUNCADDR); -} - -/* -- Emit generic operations --------------------------------------------- */ - -#define emit_move(as, dst, src) \ - emit_ds(as, MIPSI_MOVE, (dst), (src)) - -/* Generic move between two regs. */ -static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src) -{ - if (dst < RID_MAX_GPR) - emit_move(as, dst, src); - else - emit_fg(as, irt_isnum(ir->t) ? MIPSI_MOV_D : MIPSI_MOV_S, dst, src); -} - -/* Generic load of register with base and (small) offset address. */ -static void emit_loadofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs) -{ - if (r < RID_MAX_GPR) - emit_tsi(as, MIPSI_LW, r, base, ofs); - else - emit_tsi(as, irt_isnum(ir->t) ? MIPSI_LDC1 : MIPSI_LWC1, - (r & 31), base, ofs); -} - -/* Generic store of register with base and (small) offset address. */ -static void emit_storeofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs) -{ - if (r < RID_MAX_GPR) - emit_tsi(as, MIPSI_SW, r, base, ofs); - else - emit_tsi(as, irt_isnum(ir->t) ? MIPSI_SDC1 : MIPSI_SWC1, - (r&31), base, ofs); -} - -/* Add offset to pointer. */ -static void emit_addptr(ASMState *as, Reg r, int32_t ofs) -{ - if (ofs) { - lua_assert(checki16(ofs)); - emit_tsi(as, MIPSI_ADDIU, r, r, ofs); - } -} - -#define emit_spsub(as, ofs) emit_addptr(as, RID_SP, -(ofs)) - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_emit_ppc.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_emit_ppc.h deleted file mode 100644 index 087860ed382..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_emit_ppc.h +++ /dev/null @@ -1,238 +0,0 @@ -/* -** PPC instruction emitter. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -/* -- Emit basic instructions --------------------------------------------- */ - -static void emit_tab(ASMState *as, PPCIns pi, Reg rt, Reg ra, Reg rb) -{ - *--as->mcp = pi | PPCF_T(rt) | PPCF_A(ra) | PPCF_B(rb); -} - -#define emit_asb(as, pi, ra, rs, rb) emit_tab(as, (pi), (rs), (ra), (rb)) -#define emit_as(as, pi, ra, rs) emit_tab(as, (pi), (rs), (ra), 0) -#define emit_ab(as, pi, ra, rb) emit_tab(as, (pi), 0, (ra), (rb)) - -static void emit_tai(ASMState *as, PPCIns pi, Reg rt, Reg ra, int32_t i) -{ - *--as->mcp = pi | PPCF_T(rt) | PPCF_A(ra) | (i & 0xffff); -} - -#define emit_ti(as, pi, rt, i) emit_tai(as, (pi), (rt), 0, (i)) -#define emit_ai(as, pi, ra, i) emit_tai(as, (pi), 0, (ra), (i)) -#define emit_asi(as, pi, ra, rs, i) emit_tai(as, (pi), (rs), (ra), (i)) - -#define emit_fab(as, pi, rf, ra, rb) \ - emit_tab(as, (pi), (rf)&31, (ra)&31, (rb)&31) -#define emit_fb(as, pi, rf, rb) emit_tab(as, (pi), (rf)&31, 0, (rb)&31) -#define emit_fac(as, pi, rf, ra, rc) \ - emit_tab(as, (pi) | PPCF_C((rc) & 31), (rf)&31, (ra)&31, 0) -#define emit_facb(as, pi, rf, ra, rc, rb) \ - emit_tab(as, (pi) | PPCF_C((rc) & 31), (rf)&31, (ra)&31, (rb)&31) -#define emit_fai(as, pi, rf, ra, i) emit_tai(as, (pi), (rf)&31, (ra), (i)) - -static void emit_rot(ASMState *as, PPCIns pi, Reg ra, Reg rs, - int32_t n, int32_t b, int32_t e) -{ - *--as->mcp = pi | PPCF_T(rs) | PPCF_A(ra) | PPCF_B(n) | - PPCF_MB(b) | PPCF_ME(e); -} - -static void emit_slwi(ASMState *as, Reg ra, Reg rs, int32_t n) -{ - lua_assert(n >= 0 && n < 32); - emit_rot(as, PPCI_RLWINM, ra, rs, n, 0, 31-n); -} - -static void emit_rotlwi(ASMState *as, Reg ra, Reg rs, int32_t n) -{ - lua_assert(n >= 0 && n < 32); - emit_rot(as, PPCI_RLWINM, ra, rs, n, 0, 31); -} - -/* -- Emit loads/stores --------------------------------------------------- */ - -/* Prefer rematerialization of BASE/L from global_State over spills. */ -#define emit_canremat(ref) ((ref) <= REF_BASE) - -/* Try to find a one step delta relative to another constant. */ -static int emit_kdelta1(ASMState *as, Reg t, int32_t i) -{ - RegSet work = ~as->freeset & RSET_GPR; - while (work) { - Reg r = rset_picktop(work); - IRRef ref = regcost_ref(as->cost[r]); - lua_assert(r != t); - if (ref < ASMREF_L) { - int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i); - if (checki16(delta)) { - emit_tai(as, PPCI_ADDI, t, r, delta); - return 1; - } - } - rset_clear(work, r); - } - return 0; /* Failed. */ -} - -/* Load a 32 bit constant into a GPR. */ -static void emit_loadi(ASMState *as, Reg r, int32_t i) -{ - if (checki16(i)) { - emit_ti(as, PPCI_LI, r, i); - } else { - if ((i & 0xffff)) { - int32_t jgl = i32ptr(J2G(as->J)); - if ((uint32_t)(i-jgl) < 65536) { - emit_tai(as, PPCI_ADDI, r, RID_JGL, i-jgl-32768); - return; - } else if (emit_kdelta1(as, r, i)) { - return; - } - emit_asi(as, PPCI_ORI, r, r, i); - } - emit_ti(as, PPCI_LIS, r, (i >> 16)); - } -} - -#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr))) - -static Reg ra_allock(ASMState *as, int32_t k, RegSet allow); - -/* Get/set from constant pointer. */ -static void emit_lsptr(ASMState *as, PPCIns pi, Reg r, void *p, RegSet allow) -{ - int32_t jgl = i32ptr(J2G(as->J)); - int32_t i = i32ptr(p); - Reg base; - if ((uint32_t)(i-jgl) < 65536) { - i = i-jgl-32768; - base = RID_JGL; - } else { - base = ra_allock(as, i-(int16_t)i, allow); - } - emit_tai(as, pi, r, base, i); -} - -#define emit_loadn(as, r, tv) \ - emit_lsptr(as, PPCI_LFD, ((r) & 31), (void *)(tv), RSET_GPR) - -/* Get/set global_State fields. */ -static void emit_lsglptr(ASMState *as, PPCIns pi, Reg r, int32_t ofs) -{ - emit_tai(as, pi, r, RID_JGL, ofs-32768); -} - -#define emit_getgl(as, r, field) \ - emit_lsglptr(as, PPCI_LWZ, (r), (int32_t)offsetof(global_State, field)) -#define emit_setgl(as, r, field) \ - emit_lsglptr(as, PPCI_STW, (r), (int32_t)offsetof(global_State, field)) - -/* Trace number is determined from per-trace exit stubs. */ -#define emit_setvmstate(as, i) UNUSED(i) - -/* -- Emit control-flow instructions -------------------------------------- */ - -/* Label for internal jumps. */ -typedef MCode *MCLabel; - -/* Return label pointing to current PC. */ -#define emit_label(as) ((as)->mcp) - -static void emit_condbranch(ASMState *as, PPCIns pi, PPCCC cc, MCode *target) -{ - MCode *p = --as->mcp; - ptrdiff_t delta = (char *)target - (char *)p; - lua_assert(((delta + 0x8000) >> 16) == 0); - pi ^= (delta & 0x8000) * (PPCF_Y/0x8000); - *p = pi | PPCF_CC(cc) | ((uint32_t)delta & 0xffffu); -} - -static void emit_jmp(ASMState *as, MCode *target) -{ - MCode *p = --as->mcp; - ptrdiff_t delta = (char *)target - (char *)p; - *p = PPCI_B | (delta & 0x03fffffcu); -} - -static void emit_call(ASMState *as, void *target) -{ - MCode *p = --as->mcp; - ptrdiff_t delta = (char *)target - (char *)p; - if ((((delta>>2) + 0x00800000) >> 24) == 0) { - *p = PPCI_BL | (delta & 0x03fffffcu); - } else { /* Target out of range: need indirect call. Don't use arg reg. */ - RegSet allow = RSET_GPR & ~RSET_RANGE(RID_R0, REGARG_LASTGPR+1); - Reg r = ra_allock(as, i32ptr(target), allow); - *p = PPCI_BCTRL; - p[-1] = PPCI_MTCTR | PPCF_T(r); - as->mcp = p-1; - } -} - -/* -- Emit generic operations --------------------------------------------- */ - -#define emit_mr(as, dst, src) \ - emit_asb(as, PPCI_MR, (dst), (src), (src)) - -/* Generic move between two regs. */ -static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src) -{ - UNUSED(ir); - if (dst < RID_MAX_GPR) - emit_mr(as, dst, src); - else - emit_fb(as, PPCI_FMR, dst, src); -} - -/* Generic load of register with base and (small) offset address. */ -static void emit_loadofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs) -{ - if (r < RID_MAX_GPR) - emit_tai(as, PPCI_LWZ, r, base, ofs); - else - emit_fai(as, irt_isnum(ir->t) ? PPCI_LFD : PPCI_LFS, r, base, ofs); -} - -/* Generic store of register with base and (small) offset address. */ -static void emit_storeofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs) -{ - if (r < RID_MAX_GPR) - emit_tai(as, PPCI_STW, r, base, ofs); - else - emit_fai(as, irt_isnum(ir->t) ? PPCI_STFD : PPCI_STFS, r, base, ofs); -} - -/* Emit a compare (for equality) with a constant operand. */ -static void emit_cmpi(ASMState *as, Reg r, int32_t k) -{ - if (checki16(k)) { - emit_ai(as, PPCI_CMPWI, r, k); - } else if (checku16(k)) { - emit_ai(as, PPCI_CMPLWI, r, k); - } else { - emit_ai(as, PPCI_CMPLWI, RID_TMP, k); - emit_asi(as, PPCI_XORIS, RID_TMP, r, (k >> 16)); - } -} - -/* Add offset to pointer. */ -static void emit_addptr(ASMState *as, Reg r, int32_t ofs) -{ - if (ofs) { - emit_tai(as, PPCI_ADDI, r, r, ofs); - if (!checki16(ofs)) - emit_tai(as, PPCI_ADDIS, r, r, (ofs + 32768) >> 16); - } -} - -static void emit_spsub(ASMState *as, int32_t ofs) -{ - if (ofs) { - emit_tai(as, PPCI_STWU, RID_TMP, RID_SP, -ofs); - emit_tai(as, PPCI_ADDI, RID_TMP, RID_SP, - CFRAME_SIZE + (as->parent ? as->parent->spadjust : 0)); - } -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_emit_x86.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_emit_x86.h deleted file mode 100644 index ac42db3e20f..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_emit_x86.h +++ /dev/null @@ -1,462 +0,0 @@ -/* -** x86/x64 instruction emitter. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -/* -- Emit basic instructions --------------------------------------------- */ - -#define MODRM(mode, r1, r2) ((MCode)((mode)+(((r1)&7)<<3)+((r2)&7))) - -#if LJ_64 -#define REXRB(p, rr, rb) \ - { MCode rex = 0x40 + (((rr)>>1)&4) + (((rb)>>3)&1); \ - if (rex != 0x40) *--(p) = rex; } -#define FORCE_REX 0x200 -#define REX_64 (FORCE_REX|0x080000) -#else -#define REXRB(p, rr, rb) ((void)0) -#define FORCE_REX 0 -#define REX_64 0 -#endif - -#define emit_i8(as, i) (*--as->mcp = (MCode)(i)) -#define emit_i32(as, i) (*(int32_t *)(as->mcp-4) = (i), as->mcp -= 4) -#define emit_u32(as, u) (*(uint32_t *)(as->mcp-4) = (u), as->mcp -= 4) - -#define emit_x87op(as, xo) \ - (*(uint16_t *)(as->mcp-2) = (uint16_t)(xo), as->mcp -= 2) - -/* op */ -static LJ_AINLINE MCode *emit_op(x86Op xo, Reg rr, Reg rb, Reg rx, - MCode *p, int delta) -{ - int n = (int8_t)xo; -#if defined(__GNUC__) - if (__builtin_constant_p(xo) && n == -2) - p[delta-2] = (MCode)(xo >> 24); - else if (__builtin_constant_p(xo) && n == -3) - *(uint16_t *)(p+delta-3) = (uint16_t)(xo >> 16); - else -#endif - *(uint32_t *)(p+delta-5) = (uint32_t)xo; - p += n + delta; -#if LJ_64 - { - uint32_t rex = 0x40 + ((rr>>1)&(4+(FORCE_REX>>1)))+((rx>>2)&2)+((rb>>3)&1); - if (rex != 0x40) { - rex |= (rr >> 16); - if (n == -4) { *p = (MCode)rex; rex = (MCode)(xo >> 8); } - else if ((xo & 0xffffff) == 0x6600fd) { *p = (MCode)rex; rex = 0x66; } - *--p = (MCode)rex; - } - } -#else - UNUSED(rr); UNUSED(rb); UNUSED(rx); -#endif - return p; -} - -/* op + modrm */ -#define emit_opm(xo, mode, rr, rb, p, delta) \ - (p[(delta)-1] = MODRM((mode), (rr), (rb)), \ - emit_op((xo), (rr), (rb), 0, (p), (delta))) - -/* op + modrm + sib */ -#define emit_opmx(xo, mode, scale, rr, rb, rx, p) \ - (p[-1] = MODRM((scale), (rx), (rb)), \ - p[-2] = MODRM((mode), (rr), RID_ESP), \ - emit_op((xo), (rr), (rb), (rx), (p), -1)) - -/* op r1, r2 */ -static void emit_rr(ASMState *as, x86Op xo, Reg r1, Reg r2) -{ - MCode *p = as->mcp; - as->mcp = emit_opm(xo, XM_REG, r1, r2, p, 0); -} - -#if LJ_64 && defined(LUA_USE_ASSERT) -/* [addr] is sign-extended in x64 and must be in lower 2G (not 4G). */ -static int32_t ptr2addr(const void *p) -{ - lua_assert((uintptr_t)p < (uintptr_t)0x80000000); - return i32ptr(p); -} -#else -#define ptr2addr(p) (i32ptr((p))) -#endif - -/* op r, [addr] */ -static void emit_rma(ASMState *as, x86Op xo, Reg rr, const void *addr) -{ - MCode *p = as->mcp; - *(int32_t *)(p-4) = ptr2addr(addr); -#if LJ_64 - p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP); - as->mcp = emit_opm(xo, XM_OFS0, rr, RID_ESP, p, -5); -#else - as->mcp = emit_opm(xo, XM_OFS0, rr, RID_EBP, p, -4); -#endif -} - -/* op r, [base+ofs] */ -static void emit_rmro(ASMState *as, x86Op xo, Reg rr, Reg rb, int32_t ofs) -{ - MCode *p = as->mcp; - x86Mode mode; - if (ra_hasreg(rb)) { - if (ofs == 0 && (rb&7) != RID_EBP) { - mode = XM_OFS0; - } else if (checki8(ofs)) { - *--p = (MCode)ofs; - mode = XM_OFS8; - } else { - p -= 4; - *(int32_t *)p = ofs; - mode = XM_OFS32; - } - if ((rb&7) == RID_ESP) - *--p = MODRM(XM_SCALE1, RID_ESP, RID_ESP); - } else { - *(int32_t *)(p-4) = ofs; -#if LJ_64 - p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP); - p -= 5; - rb = RID_ESP; -#else - p -= 4; - rb = RID_EBP; -#endif - mode = XM_OFS0; - } - as->mcp = emit_opm(xo, mode, rr, rb, p, 0); -} - -/* op r, [base+idx*scale+ofs] */ -static void emit_rmrxo(ASMState *as, x86Op xo, Reg rr, Reg rb, Reg rx, - x86Mode scale, int32_t ofs) -{ - MCode *p = as->mcp; - x86Mode mode; - if (ofs == 0 && (rb&7) != RID_EBP) { - mode = XM_OFS0; - } else if (checki8(ofs)) { - mode = XM_OFS8; - *--p = (MCode)ofs; - } else { - mode = XM_OFS32; - p -= 4; - *(int32_t *)p = ofs; - } - as->mcp = emit_opmx(xo, mode, scale, rr, rb, rx, p); -} - -/* op r, i */ -static void emit_gri(ASMState *as, x86Group xg, Reg rb, int32_t i) -{ - MCode *p = as->mcp; - x86Op xo; - if (checki8(i)) { - *--p = (MCode)i; - xo = XG_TOXOi8(xg); - } else { - p -= 4; - *(int32_t *)p = i; - xo = XG_TOXOi(xg); - } - as->mcp = emit_opm(xo, XM_REG, (Reg)(xg & 7) | (rb & REX_64), rb, p, 0); -} - -/* op [base+ofs], i */ -static void emit_gmroi(ASMState *as, x86Group xg, Reg rb, int32_t ofs, - int32_t i) -{ - x86Op xo; - if (checki8(i)) { - emit_i8(as, i); - xo = XG_TOXOi8(xg); - } else { - emit_i32(as, i); - xo = XG_TOXOi(xg); - } - emit_rmro(as, xo, (Reg)(xg & 7), rb, ofs); -} - -#define emit_shifti(as, xg, r, i) \ - (emit_i8(as, (i)), emit_rr(as, XO_SHIFTi, (Reg)(xg), (r))) - -/* op r, rm/mrm */ -static void emit_mrm(ASMState *as, x86Op xo, Reg rr, Reg rb) -{ - MCode *p = as->mcp; - x86Mode mode = XM_REG; - if (rb == RID_MRM) { - rb = as->mrm.base; - if (rb == RID_NONE) { - rb = RID_EBP; - mode = XM_OFS0; - p -= 4; - *(int32_t *)p = as->mrm.ofs; - if (as->mrm.idx != RID_NONE) - goto mrmidx; -#if LJ_64 - *--p = MODRM(XM_SCALE1, RID_ESP, RID_EBP); - rb = RID_ESP; -#endif - } else { - if (as->mrm.ofs == 0 && (rb&7) != RID_EBP) { - mode = XM_OFS0; - } else if (checki8(as->mrm.ofs)) { - *--p = (MCode)as->mrm.ofs; - mode = XM_OFS8; - } else { - p -= 4; - *(int32_t *)p = as->mrm.ofs; - mode = XM_OFS32; - } - if (as->mrm.idx != RID_NONE) { - mrmidx: - as->mcp = emit_opmx(xo, mode, as->mrm.scale, rr, rb, as->mrm.idx, p); - return; - } - if ((rb&7) == RID_ESP) - *--p = MODRM(XM_SCALE1, RID_ESP, RID_ESP); - } - } - as->mcp = emit_opm(xo, mode, rr, rb, p, 0); -} - -/* op rm/mrm, i */ -static void emit_gmrmi(ASMState *as, x86Group xg, Reg rb, int32_t i) -{ - x86Op xo; - if (checki8(i)) { - emit_i8(as, i); - xo = XG_TOXOi8(xg); - } else { - emit_i32(as, i); - xo = XG_TOXOi(xg); - } - emit_mrm(as, xo, (Reg)(xg & 7) | (rb & REX_64), (rb & ~REX_64)); -} - -/* -- Emit loads/stores --------------------------------------------------- */ - -/* mov [base+ofs], i */ -static void emit_movmroi(ASMState *as, Reg base, int32_t ofs, int32_t i) -{ - emit_i32(as, i); - emit_rmro(as, XO_MOVmi, 0, base, ofs); -} - -/* mov [base+ofs], r */ -#define emit_movtomro(as, r, base, ofs) \ - emit_rmro(as, XO_MOVto, (r), (base), (ofs)) - -/* Get/set global_State fields. */ -#define emit_opgl(as, xo, r, field) \ - emit_rma(as, (xo), (r), (void *)&J2G(as->J)->field) -#define emit_getgl(as, r, field) emit_opgl(as, XO_MOV, (r), field) -#define emit_setgl(as, r, field) emit_opgl(as, XO_MOVto, (r), field) - -#define emit_setvmstate(as, i) \ - (emit_i32(as, i), emit_opgl(as, XO_MOVmi, 0, vmstate)) - -/* mov r, i / xor r, r */ -static void emit_loadi(ASMState *as, Reg r, int32_t i) -{ - /* XOR r,r is shorter, but modifies the flags. This is bad for HIOP. */ - if (i == 0 && !(LJ_32 && (IR(as->curins)->o == IR_HIOP || - (as->curins+1 < as->T->nins && - IR(as->curins+1)->o == IR_HIOP)))) { - emit_rr(as, XO_ARITH(XOg_XOR), r, r); - } else { - MCode *p = as->mcp; - *(int32_t *)(p-4) = i; - p[-5] = (MCode)(XI_MOVri+(r&7)); - p -= 5; - REXRB(p, 0, r); - as->mcp = p; - } -} - -/* mov r, addr */ -#define emit_loada(as, r, addr) \ - emit_loadi(as, (r), ptr2addr((addr))) - -#if LJ_64 -/* mov r, imm64 or shorter 32 bit extended load. */ -static void emit_loadu64(ASMState *as, Reg r, uint64_t u64) -{ - if (checku32(u64)) { /* 32 bit load clears upper 32 bits. */ - emit_loadi(as, r, (int32_t)u64); - } else if (checki32((int64_t)u64)) { /* Sign-extended 32 bit load. */ - MCode *p = as->mcp; - *(int32_t *)(p-4) = (int32_t)u64; - as->mcp = emit_opm(XO_MOVmi, XM_REG, REX_64, r, p, -4); - } else { /* Full-size 64 bit load. */ - MCode *p = as->mcp; - *(uint64_t *)(p-8) = u64; - p[-9] = (MCode)(XI_MOVri+(r&7)); - p[-10] = 0x48 + ((r>>3)&1); - p -= 10; - as->mcp = p; - } -} -#endif - -/* movsd r, [&tv->n] / xorps r, r */ -static void emit_loadn(ASMState *as, Reg r, cTValue *tv) -{ - if (tvispzero(tv)) /* Use xor only for +0. */ - emit_rr(as, XO_XORPS, r, r); - else - emit_rma(as, XO_MOVSD, r, &tv->n); -} - -/* -- Emit control-flow instructions -------------------------------------- */ - -/* Label for short jumps. */ -typedef MCode *MCLabel; - -#if LJ_32 && LJ_HASFFI -/* jmp short target */ -static void emit_sjmp(ASMState *as, MCLabel target) -{ - MCode *p = as->mcp; - ptrdiff_t delta = target - p; - lua_assert(delta == (int8_t)delta); - p[-1] = (MCode)(int8_t)delta; - p[-2] = XI_JMPs; - as->mcp = p - 2; -} -#endif - -/* jcc short target */ -static void emit_sjcc(ASMState *as, int cc, MCLabel target) -{ - MCode *p = as->mcp; - ptrdiff_t delta = target - p; - lua_assert(delta == (int8_t)delta); - p[-1] = (MCode)(int8_t)delta; - p[-2] = (MCode)(XI_JCCs+(cc&15)); - as->mcp = p - 2; -} - -/* jcc short (pending target) */ -static MCLabel emit_sjcc_label(ASMState *as, int cc) -{ - MCode *p = as->mcp; - p[-1] = 0; - p[-2] = (MCode)(XI_JCCs+(cc&15)); - as->mcp = p - 2; - return p; -} - -/* Fixup jcc short target. */ -static void emit_sfixup(ASMState *as, MCLabel source) -{ - source[-1] = (MCode)(as->mcp-source); -} - -/* Return label pointing to current PC. */ -#define emit_label(as) ((as)->mcp) - -/* Compute relative 32 bit offset for jump and call instructions. */ -static LJ_AINLINE int32_t jmprel(MCode *p, MCode *target) -{ - ptrdiff_t delta = target - p; - lua_assert(delta == (int32_t)delta); - return (int32_t)delta; -} - -/* jcc target */ -static void emit_jcc(ASMState *as, int cc, MCode *target) -{ - MCode *p = as->mcp; - *(int32_t *)(p-4) = jmprel(p, target); - p[-5] = (MCode)(XI_JCCn+(cc&15)); - p[-6] = 0x0f; - as->mcp = p - 6; -} - -/* jmp target */ -static void emit_jmp(ASMState *as, MCode *target) -{ - MCode *p = as->mcp; - *(int32_t *)(p-4) = jmprel(p, target); - p[-5] = XI_JMP; - as->mcp = p - 5; -} - -/* call target */ -static void emit_call_(ASMState *as, MCode *target) -{ - MCode *p = as->mcp; -#if LJ_64 - if (target-p != (int32_t)(target-p)) { - /* Assumes RID_RET is never an argument to calls and always clobbered. */ - emit_rr(as, XO_GROUP5, XOg_CALL, RID_RET); - emit_loadu64(as, RID_RET, (uint64_t)target); - return; - } -#endif - *(int32_t *)(p-4) = jmprel(p, target); - p[-5] = XI_CALL; - as->mcp = p - 5; -} - -#define emit_call(as, f) emit_call_(as, (MCode *)(void *)(f)) - -/* -- Emit generic operations --------------------------------------------- */ - -/* Use 64 bit operations to handle 64 bit IR types. */ -#if LJ_64 -#define REX_64IR(ir, r) ((r) + (irt_is64((ir)->t) ? REX_64 : 0)) -#else -#define REX_64IR(ir, r) (r) -#endif - -/* Generic move between two regs. */ -static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src) -{ - UNUSED(ir); - if (dst < RID_MAX_GPR) - emit_rr(as, XO_MOV, REX_64IR(ir, dst), src); - else - emit_rr(as, XO_MOVAPS, dst, src); -} - -/* Generic load of register with base and (small) offset address. */ -static void emit_loadofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs) -{ - if (r < RID_MAX_GPR) - emit_rmro(as, XO_MOV, REX_64IR(ir, r), base, ofs); - else - emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSD : XO_MOVSS, r, base, ofs); -} - -/* Generic store of register with base and (small) offset address. */ -static void emit_storeofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs) -{ - if (r < RID_MAX_GPR) - emit_rmro(as, XO_MOVto, REX_64IR(ir, r), base, ofs); - else - emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto, r, base, ofs); -} - -/* Add offset to pointer. */ -static void emit_addptr(ASMState *as, Reg r, int32_t ofs) -{ - if (ofs) { - if ((as->flags & JIT_F_LEA_AGU)) - emit_rmro(as, XO_LEA, r, r, ofs); - else - emit_gri(as, XG_ARITHi(XOg_ADD), r, ofs); - } -} - -#define emit_spsub(as, ofs) emit_addptr(as, RID_ESP|REX_64, -(ofs)) - -/* Prefer rematerialization of BASE/L from global_State over spills. */ -#define emit_canremat(ref) ((ref) <= REF_BASE) - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_err.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_err.c deleted file mode 100644 index 9ac0c988fc9..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_err.c +++ /dev/null @@ -1,796 +0,0 @@ -/* -** Error handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_err_c -#define LUA_CORE - -#include "lj_obj.h" -#include "lj_err.h" -#include "lj_debug.h" -#include "lj_str.h" -#include "lj_func.h" -#include "lj_state.h" -#include "lj_frame.h" -#include "lj_ff.h" -#include "lj_trace.h" -#include "lj_vm.h" -#include "lj_strfmt.h" - -/* -** LuaJIT can either use internal or external frame unwinding: -** -** - Internal frame unwinding (INT) is free-standing and doesn't require -** any OS or library support. -** -** - External frame unwinding (EXT) uses the system-provided unwind handler. -** -** Pros and Cons: -** -** - EXT requires unwind tables for *all* functions on the C stack between -** the pcall/catch and the error/throw. This is the default on x64, -** but needs to be manually enabled on x86/PPC for non-C++ code. -** -** - INT is faster when actually throwing errors (but this happens rarely). -** Setting up error handlers is zero-cost in any case. -** -** - EXT provides full interoperability with C++ exceptions. You can throw -** Lua errors or C++ exceptions through a mix of Lua frames and C++ frames. -** C++ destructors are called as needed. C++ exceptions caught by pcall -** are converted to the string "C++ exception". Lua errors can be caught -** with catch (...) in C++. -** -** - INT has only limited support for automatically catching C++ exceptions -** on POSIX systems using DWARF2 stack unwinding. Other systems may use -** the wrapper function feature. Lua errors thrown through C++ frames -** cannot be caught by C++ code and C++ destructors are not run. -** -** EXT is the default on x64 systems, INT is the default on all other systems. -** -** EXT can be manually enabled on POSIX systems using GCC and DWARF2 stack -** unwinding with -DLUAJIT_UNWIND_EXTERNAL. *All* C code must be compiled -** with -funwind-tables (or -fexceptions). This includes LuaJIT itself (set -** TARGET_CFLAGS), all of your C/Lua binding code, all loadable C modules -** and all C libraries that have callbacks which may be used to call back -** into Lua. C++ code must *not* be compiled with -fno-exceptions. -** -** EXT cannot be enabled on WIN32 since system exceptions use code-driven SEH. -** EXT is mandatory on WIN64 since the calling convention has an abundance -** of callee-saved registers (rbx, rbp, rsi, rdi, r12-r15, xmm6-xmm15). -** The POSIX/x64 interpreter only saves r12/r13 for INT (e.g. PS4). -*/ - -#if defined(__GNUC__) && (LJ_TARGET_X64 || defined(LUAJIT_UNWIND_EXTERNAL)) && !LJ_NO_UNWIND -#define LJ_UNWIND_EXT 1 -#elif LJ_TARGET_X64 && LJ_TARGET_WINDOWS -#define LJ_UNWIND_EXT 1 -#endif - -/* -- Error messages ------------------------------------------------------ */ - -/* Error message strings. */ -LJ_DATADEF const char *lj_err_allmsg = -#define ERRDEF(name, msg) msg "\0" -#include "lj_errmsg.h" -; - -/* -- Internal frame unwinding -------------------------------------------- */ - -/* Unwind Lua stack and move error message to new top. */ -LJ_NOINLINE static void unwindstack(lua_State *L, TValue *top) -{ - lj_func_closeuv(L, top); - if (top < L->top-1) { - copyTV(L, top, L->top-1); - L->top = top+1; - } - lj_state_relimitstack(L); -} - -/* Unwind until stop frame. Optionally cleanup frames. */ -static void *err_unwind(lua_State *L, void *stopcf, int errcode) -{ - TValue *frame = L->base-1; - void *cf = L->cframe; - while (cf) { - int32_t nres = cframe_nres(cframe_raw(cf)); - if (nres < 0) { /* C frame without Lua frame? */ - TValue *top = restorestack(L, -nres); - if (frame < top) { /* Frame reached? */ - if (errcode) { - L->base = frame+1; - L->cframe = cframe_prev(cf); - unwindstack(L, top); - } - return cf; - } - } - if (frame <= tvref(L->stack)+LJ_FR2) - break; - switch (frame_typep(frame)) { - case FRAME_LUA: /* Lua frame. */ - case FRAME_LUAP: - frame = frame_prevl(frame); - break; - case FRAME_C: /* C frame. */ - unwind_c: -#if LJ_UNWIND_EXT - if (errcode) { - L->base = frame_prevd(frame) + 1; - L->cframe = cframe_prev(cf); - unwindstack(L, frame - LJ_FR2); - } else if (cf != stopcf) { - cf = cframe_prev(cf); - frame = frame_prevd(frame); - break; - } - return NULL; /* Continue unwinding. */ -#else - UNUSED(stopcf); - cf = cframe_prev(cf); - frame = frame_prevd(frame); - break; -#endif - case FRAME_CP: /* Protected C frame. */ - if (cframe_canyield(cf)) { /* Resume? */ - if (errcode) { - hook_leave(G(L)); /* Assumes nobody uses coroutines inside hooks. */ - L->cframe = NULL; - L->status = (uint8_t)errcode; - } - return cf; - } - if (errcode) { - L->base = frame_prevd(frame) + 1; - L->cframe = cframe_prev(cf); - unwindstack(L, frame - LJ_FR2); - } - return cf; - case FRAME_CONT: /* Continuation frame. */ - if (frame_iscont_fficb(frame)) - goto unwind_c; - case FRAME_VARG: /* Vararg frame. */ - frame = frame_prevd(frame); - break; - case FRAME_PCALL: /* FF pcall() frame. */ - case FRAME_PCALLH: /* FF pcall() frame inside hook. */ - if (errcode) { - if (errcode == LUA_YIELD) { - frame = frame_prevd(frame); - break; - } - if (frame_typep(frame) == FRAME_PCALL) - hook_leave(G(L)); - L->base = frame_prevd(frame) + 1; - L->cframe = cf; - unwindstack(L, L->base); - } - return (void *)((intptr_t)cf | CFRAME_UNWIND_FF); - } - } - /* No C frame. */ - if (errcode) { - L->base = tvref(L->stack)+1+LJ_FR2; - L->cframe = NULL; - unwindstack(L, L->base); - if (G(L)->panic) - G(L)->panic(L); - exit(EXIT_FAILURE); - } - return L; /* Anything non-NULL will do. */ -} - -/* -- External frame unwinding -------------------------------------------- */ - -#if defined(__GNUC__) && !LJ_NO_UNWIND && !LJ_ABI_WIN - -/* -** We have to use our own definitions instead of the mandatory (!) unwind.h, -** since various OS, distros and compilers mess up the header installation. -*/ - -typedef struct _Unwind_Exception -{ - uint64_t exclass; - void (*excleanup)(int, struct _Unwind_Exception *); - uintptr_t p1, p2; -} __attribute__((__aligned__)) _Unwind_Exception; - -typedef struct _Unwind_Context _Unwind_Context; - -#define _URC_OK 0 -#define _URC_FATAL_PHASE1_ERROR 3 -#define _URC_HANDLER_FOUND 6 -#define _URC_INSTALL_CONTEXT 7 -#define _URC_CONTINUE_UNWIND 8 -#define _URC_FAILURE 9 - -#if !LJ_TARGET_ARM - -extern uintptr_t _Unwind_GetCFA(_Unwind_Context *); -extern void _Unwind_SetGR(_Unwind_Context *, int, uintptr_t); -extern void _Unwind_SetIP(_Unwind_Context *, uintptr_t); -extern void _Unwind_DeleteException(_Unwind_Exception *); -extern int _Unwind_RaiseException(_Unwind_Exception *); - -#define _UA_SEARCH_PHASE 1 -#define _UA_CLEANUP_PHASE 2 -#define _UA_HANDLER_FRAME 4 -#define _UA_FORCE_UNWIND 8 - -#define LJ_UEXCLASS 0x4c55414a49543200ULL /* LUAJIT2\0 */ -#define LJ_UEXCLASS_MAKE(c) (LJ_UEXCLASS | (uint64_t)(c)) -#define LJ_UEXCLASS_CHECK(cl) (((cl) ^ LJ_UEXCLASS) <= 0xff) -#define LJ_UEXCLASS_ERRCODE(cl) ((int)((cl) & 0xff)) - -/* DWARF2 personality handler referenced from interpreter .eh_frame. */ -LJ_FUNCA int lj_err_unwind_dwarf(int version, int actions, - uint64_t uexclass, _Unwind_Exception *uex, _Unwind_Context *ctx) -{ - void *cf; - lua_State *L; - if (version != 1) - return _URC_FATAL_PHASE1_ERROR; - UNUSED(uexclass); - cf = (void *)_Unwind_GetCFA(ctx); - L = cframe_L(cf); - if ((actions & _UA_SEARCH_PHASE)) { -#if LJ_UNWIND_EXT - if (err_unwind(L, cf, 0) == NULL) - return _URC_CONTINUE_UNWIND; -#endif - if (!LJ_UEXCLASS_CHECK(uexclass)) { - setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRCPP)); - } - return _URC_HANDLER_FOUND; - } - if ((actions & _UA_CLEANUP_PHASE)) { - int errcode; - if (LJ_UEXCLASS_CHECK(uexclass)) { - errcode = LJ_UEXCLASS_ERRCODE(uexclass); - } else { - if ((actions & _UA_HANDLER_FRAME)) - _Unwind_DeleteException(uex); - errcode = LUA_ERRRUN; - } -#if LJ_UNWIND_EXT - cf = err_unwind(L, cf, errcode); - if ((actions & _UA_FORCE_UNWIND)) { - return _URC_CONTINUE_UNWIND; - } else if (cf) { - _Unwind_SetGR(ctx, LJ_TARGET_EHRETREG, errcode); - _Unwind_SetIP(ctx, (uintptr_t)(cframe_unwind_ff(cf) ? - lj_vm_unwind_ff_eh : - lj_vm_unwind_c_eh)); - return _URC_INSTALL_CONTEXT; - } -#if LJ_TARGET_X86ORX64 - else if ((actions & _UA_HANDLER_FRAME)) { - /* Workaround for ancient libgcc bug. Still present in RHEL 5.5. :-/ - ** Real fix: http://gcc.gnu.org/viewcvs/trunk/gcc/unwind-dw2.c?r1=121165&r2=124837&pathrev=153877&diff_format=h - */ - _Unwind_SetGR(ctx, LJ_TARGET_EHRETREG, errcode); - _Unwind_SetIP(ctx, (uintptr_t)lj_vm_unwind_rethrow); - return _URC_INSTALL_CONTEXT; - } -#endif -#else - /* This is not the proper way to escape from the unwinder. We get away with - ** it on non-x64 because the interpreter restores all callee-saved regs. - */ - lj_err_throw(L, errcode); -#endif - } - return _URC_CONTINUE_UNWIND; -} - -#if LJ_UNWIND_EXT -#if LJ_TARGET_OSX || defined(__OpenBSD__) -/* Sorry, no thread safety for OSX. Complain to Apple, not me. */ -static _Unwind_Exception static_uex; -#else -static __thread _Unwind_Exception static_uex; -#endif - -/* Raise DWARF2 exception. */ -static void err_raise_ext(int errcode) -{ - static_uex.exclass = LJ_UEXCLASS_MAKE(errcode); - static_uex.excleanup = NULL; - _Unwind_RaiseException(&static_uex); -} -#endif - -#else - -extern void _Unwind_DeleteException(void *); -extern int __gnu_unwind_frame (void *, _Unwind_Context *); -extern int _Unwind_VRS_Set(_Unwind_Context *, int, uint32_t, int, void *); -extern int _Unwind_VRS_Get(_Unwind_Context *, int, uint32_t, int, void *); - -static inline uint32_t _Unwind_GetGR(_Unwind_Context *ctx, int r) -{ - uint32_t v; - _Unwind_VRS_Get(ctx, 0, r, 0, &v); - return v; -} - -static inline void _Unwind_SetGR(_Unwind_Context *ctx, int r, uint32_t v) -{ - _Unwind_VRS_Set(ctx, 0, r, 0, &v); -} - -#define _US_VIRTUAL_UNWIND_FRAME 0 -#define _US_UNWIND_FRAME_STARTING 1 -#define _US_ACTION_MASK 3 -#define _US_FORCE_UNWIND 8 - -/* ARM unwinder personality handler referenced from interpreter .ARM.extab. */ -LJ_FUNCA int lj_err_unwind_arm(int state, void *ucb, _Unwind_Context *ctx) -{ - void *cf = (void *)_Unwind_GetGR(ctx, 13); - lua_State *L = cframe_L(cf); - if ((state & _US_ACTION_MASK) == _US_VIRTUAL_UNWIND_FRAME) { - setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRCPP)); - return _URC_HANDLER_FOUND; - } - if ((state&(_US_ACTION_MASK|_US_FORCE_UNWIND)) == _US_UNWIND_FRAME_STARTING) { - _Unwind_DeleteException(ucb); - _Unwind_SetGR(ctx, 15, (uint32_t)(void *)lj_err_throw); - _Unwind_SetGR(ctx, 0, (uint32_t)L); - _Unwind_SetGR(ctx, 1, (uint32_t)LUA_ERRRUN); - return _URC_INSTALL_CONTEXT; - } - if (__gnu_unwind_frame(ucb, ctx) != _URC_OK) - return _URC_FAILURE; - return _URC_CONTINUE_UNWIND; -} - -#endif - -#elif LJ_TARGET_X64 && LJ_ABI_WIN - -/* -** Someone in Redmond owes me several days of my life. A lot of this is -** undocumented or just plain wrong on MSDN. Some of it can be gathered -** from 3rd party docs or must be found by trial-and-error. They really -** don't want you to write your own language-specific exception handler -** or to interact gracefully with MSVC. :-( -** -** Apparently MSVC doesn't call C++ destructors for foreign exceptions -** unless you compile your C++ code with /EHa. Unfortunately this means -** catch (...) also catches things like access violations. The use of -** _set_se_translator doesn't really help, because it requires /EHa, too. -*/ - -#define WIN32_LEAN_AND_MEAN -#include - -/* Taken from: http://www.nynaeve.net/?p=99 */ -typedef struct UndocumentedDispatcherContext { - ULONG64 ControlPc; - ULONG64 ImageBase; - PRUNTIME_FUNCTION FunctionEntry; - ULONG64 EstablisherFrame; - ULONG64 TargetIp; - PCONTEXT ContextRecord; - void (*LanguageHandler)(void); - PVOID HandlerData; - PUNWIND_HISTORY_TABLE HistoryTable; - ULONG ScopeIndex; - ULONG Fill0; -} UndocumentedDispatcherContext; - -/* Another wild guess. */ -extern void __DestructExceptionObject(EXCEPTION_RECORD *rec, int nothrow); - -#ifdef MINGW_SDK_INIT -/* Workaround for broken MinGW64 declaration. */ -VOID RtlUnwindEx_FIXED(PVOID,PVOID,PVOID,PVOID,PVOID,PVOID) asm("RtlUnwindEx"); -#define RtlUnwindEx RtlUnwindEx_FIXED -#endif - -#define LJ_MSVC_EXCODE ((DWORD)0xe06d7363) -#define LJ_GCC_EXCODE ((DWORD)0x20474343) - -#define LJ_EXCODE ((DWORD)0xe24c4a00) -#define LJ_EXCODE_MAKE(c) (LJ_EXCODE | (DWORD)(c)) -#define LJ_EXCODE_CHECK(cl) (((cl) ^ LJ_EXCODE) <= 0xff) -#define LJ_EXCODE_ERRCODE(cl) ((int)((cl) & 0xff)) - -/* Win64 exception handler for interpreter frame. */ -LJ_FUNCA EXCEPTION_DISPOSITION lj_err_unwind_win64(EXCEPTION_RECORD *rec, - void *cf, CONTEXT *ctx, UndocumentedDispatcherContext *dispatch) -{ - lua_State *L = cframe_L(cf); - int errcode = LJ_EXCODE_CHECK(rec->ExceptionCode) ? - LJ_EXCODE_ERRCODE(rec->ExceptionCode) : LUA_ERRRUN; - if ((rec->ExceptionFlags & 6)) { /* EH_UNWINDING|EH_EXIT_UNWIND */ - /* Unwind internal frames. */ - err_unwind(L, cf, errcode); - } else { - void *cf2 = err_unwind(L, cf, 0); - if (cf2) { /* We catch it, so start unwinding the upper frames. */ - if (rec->ExceptionCode == LJ_MSVC_EXCODE || - rec->ExceptionCode == LJ_GCC_EXCODE) { -#if LJ_TARGET_WINDOWS - __DestructExceptionObject(rec, 1); -#endif - setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRCPP)); - } else if (!LJ_EXCODE_CHECK(rec->ExceptionCode)) { - /* Don't catch access violations etc. */ - return ExceptionContinueSearch; - } - /* Unwind the stack and call all handlers for all lower C frames - ** (including ourselves) again with EH_UNWINDING set. Then set - ** rsp = cf, rax = errcode and jump to the specified target. - */ - RtlUnwindEx(cf, (void *)((cframe_unwind_ff(cf2) && errcode != LUA_YIELD) ? - lj_vm_unwind_ff_eh : - lj_vm_unwind_c_eh), - rec, (void *)(uintptr_t)errcode, ctx, dispatch->HistoryTable); - /* RtlUnwindEx should never return. */ - } - } - return ExceptionContinueSearch; -} - -/* Raise Windows exception. */ -static void err_raise_ext(int errcode) -{ - RaiseException(LJ_EXCODE_MAKE(errcode), 1 /* EH_NONCONTINUABLE */, 0, NULL); -} - -#endif - -/* -- Error handling ------------------------------------------------------ */ - -/* Throw error. Find catch frame, unwind stack and continue. */ -LJ_NOINLINE void LJ_FASTCALL lj_err_throw(lua_State *L, int errcode) -{ - global_State *g = G(L); - lj_trace_abort(g); - setmref(g->jit_base, NULL); - L->status = 0; -#if LJ_UNWIND_EXT - err_raise_ext(errcode); - /* - ** A return from this function signals a corrupt C stack that cannot be - ** unwound. We have no choice but to call the panic function and exit. - ** - ** Usually this is caused by a C function without unwind information. - ** This should never happen on x64, but may happen if you've manually - ** enabled LUAJIT_UNWIND_EXTERNAL and forgot to recompile *every* - ** non-C++ file with -funwind-tables. - */ - if (G(L)->panic) - G(L)->panic(L); -#else - { - void *cf = err_unwind(L, NULL, errcode); - if (cframe_unwind_ff(cf)) - lj_vm_unwind_ff(cframe_raw(cf)); - else - lj_vm_unwind_c(cframe_raw(cf), errcode); - } -#endif - exit(EXIT_FAILURE); -} - -/* Return string object for error message. */ -LJ_NOINLINE GCstr *lj_err_str(lua_State *L, ErrMsg em) -{ - return lj_str_newz(L, err2msg(em)); -} - -/* Out-of-memory error. */ -LJ_NOINLINE void lj_err_mem(lua_State *L) -{ - if (L->status == LUA_ERRERR+1) /* Don't touch the stack during lua_open. */ - lj_vm_unwind_c(L->cframe, LUA_ERRMEM); - setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRMEM)); - lj_err_throw(L, LUA_ERRMEM); -} - -/* Find error function for runtime errors. Requires an extra stack traversal. */ -static ptrdiff_t finderrfunc(lua_State *L) -{ - cTValue *frame = L->base-1, *bot = tvref(L->stack)+LJ_FR2; - void *cf = L->cframe; - while (frame > bot && cf) { - while (cframe_nres(cframe_raw(cf)) < 0) { /* cframe without frame? */ - if (frame >= restorestack(L, -cframe_nres(cf))) - break; - if (cframe_errfunc(cf) >= 0) /* Error handler not inherited (-1)? */ - return cframe_errfunc(cf); - cf = cframe_prev(cf); /* Else unwind cframe and continue searching. */ - if (cf == NULL) - return 0; - } - switch (frame_typep(frame)) { - case FRAME_LUA: - case FRAME_LUAP: - frame = frame_prevl(frame); - break; - case FRAME_C: - cf = cframe_prev(cf); - /* fallthrough */ - case FRAME_VARG: - frame = frame_prevd(frame); - break; - case FRAME_CONT: - if (frame_iscont_fficb(frame)) - cf = cframe_prev(cf); - frame = frame_prevd(frame); - break; - case FRAME_CP: - if (cframe_canyield(cf)) return 0; - if (cframe_errfunc(cf) >= 0) - return cframe_errfunc(cf); - frame = frame_prevd(frame); - break; - case FRAME_PCALL: - case FRAME_PCALLH: - if (frame_func(frame_prevd(frame))->c.ffid == FF_xpcall) - return savestack(L, frame_prevd(frame)+1); /* xpcall's errorfunc. */ - return 0; - default: - lua_assert(0); - return 0; - } - } - return 0; -} - -/* Runtime error. */ -LJ_NOINLINE void lj_err_run(lua_State *L) -{ - ptrdiff_t ef = finderrfunc(L); - if (ef) { - TValue *errfunc = restorestack(L, ef); - TValue *top = L->top; - lj_trace_abort(G(L)); - if (!tvisfunc(errfunc) || L->status == LUA_ERRERR) { - setstrV(L, top-1, lj_err_str(L, LJ_ERR_ERRERR)); - lj_err_throw(L, LUA_ERRERR); - } - L->status = LUA_ERRERR; - copyTV(L, top+LJ_FR2, top-1); - copyTV(L, top-1, errfunc); - if (LJ_FR2) setnilV(top++); - L->top = top+1; - lj_vm_call(L, top, 1+1); /* Stack: |errfunc|msg| -> |msg| */ - } - lj_err_throw(L, LUA_ERRRUN); -} - -/* Formatted runtime error message. */ -LJ_NORET LJ_NOINLINE static void err_msgv(lua_State *L, ErrMsg em, ...) -{ - const char *msg; - va_list argp; - va_start(argp, em); - if (curr_funcisL(L)) L->top = curr_topL(L); - msg = lj_strfmt_pushvf(L, err2msg(em), argp); - va_end(argp); - lj_debug_addloc(L, msg, L->base-1, NULL); - lj_err_run(L); -} - -/* Non-vararg variant for better calling conventions. */ -LJ_NOINLINE void lj_err_msg(lua_State *L, ErrMsg em) -{ - err_msgv(L, em); -} - -/* Lexer error. */ -LJ_NOINLINE void lj_err_lex(lua_State *L, GCstr *src, const char *tok, - BCLine line, ErrMsg em, va_list argp) -{ - char buff[LUA_IDSIZE]; - const char *msg; - lj_debug_shortname(buff, src, line); - msg = lj_strfmt_pushvf(L, err2msg(em), argp); - msg = lj_strfmt_pushf(L, "%s:%d: %s", buff, line, msg); - if (tok) - lj_strfmt_pushf(L, err2msg(LJ_ERR_XNEAR), msg, tok); - lj_err_throw(L, LUA_ERRSYNTAX); -} - -/* Typecheck error for operands. */ -LJ_NOINLINE void lj_err_optype(lua_State *L, cTValue *o, ErrMsg opm) -{ - const char *tname = lj_typename(o); - const char *opname = err2msg(opm); - if (curr_funcisL(L)) { - GCproto *pt = curr_proto(L); - const BCIns *pc = cframe_Lpc(L) - 1; - const char *oname = NULL; - const char *kind = lj_debug_slotname(pt, pc, (BCReg)(o-L->base), &oname); - if (kind) - err_msgv(L, LJ_ERR_BADOPRT, opname, kind, oname, tname); - } - err_msgv(L, LJ_ERR_BADOPRV, opname, tname); -} - -/* Typecheck error for ordered comparisons. */ -LJ_NOINLINE void lj_err_comp(lua_State *L, cTValue *o1, cTValue *o2) -{ - const char *t1 = lj_typename(o1); - const char *t2 = lj_typename(o2); - err_msgv(L, t1 == t2 ? LJ_ERR_BADCMPV : LJ_ERR_BADCMPT, t1, t2); - /* This assumes the two "boolean" entries are commoned by the C compiler. */ -} - -/* Typecheck error for __call. */ -LJ_NOINLINE void lj_err_optype_call(lua_State *L, TValue *o) -{ - /* Gross hack if lua_[p]call or pcall/xpcall fail for a non-callable object: - ** L->base still points to the caller. So add a dummy frame with L instead - ** of a function. See lua_getstack(). - */ - const BCIns *pc = cframe_Lpc(L); - if (((ptrdiff_t)pc & FRAME_TYPE) != FRAME_LUA) { - const char *tname = lj_typename(o); - if (LJ_FR2) o++; - setframe_pc(o, pc); - setframe_gc(o, obj2gco(L), LJ_TTHREAD); - L->top = L->base = o+1; - err_msgv(L, LJ_ERR_BADCALL, tname); - } - lj_err_optype(L, o, LJ_ERR_OPCALL); -} - -/* Error in context of caller. */ -LJ_NOINLINE void lj_err_callermsg(lua_State *L, const char *msg) -{ - TValue *frame = L->base-1; - TValue *pframe = NULL; - if (frame_islua(frame)) { - pframe = frame_prevl(frame); - } else if (frame_iscont(frame)) { - if (frame_iscont_fficb(frame)) { - pframe = frame; - frame = NULL; - } else { - pframe = frame_prevd(frame); -#if LJ_HASFFI - /* Remove frame for FFI metamethods. */ - if (frame_func(frame)->c.ffid >= FF_ffi_meta___index && - frame_func(frame)->c.ffid <= FF_ffi_meta___tostring) { - L->base = pframe+1; - L->top = frame; - setcframe_pc(cframe_raw(L->cframe), frame_contpc(frame)); - } -#endif - } - } - lj_debug_addloc(L, msg, pframe, frame); - lj_err_run(L); -} - -/* Formatted error in context of caller. */ -LJ_NOINLINE void lj_err_callerv(lua_State *L, ErrMsg em, ...) -{ - const char *msg; - va_list argp; - va_start(argp, em); - msg = lj_strfmt_pushvf(L, err2msg(em), argp); - va_end(argp); - lj_err_callermsg(L, msg); -} - -/* Error in context of caller. */ -LJ_NOINLINE void lj_err_caller(lua_State *L, ErrMsg em) -{ - lj_err_callermsg(L, err2msg(em)); -} - -/* Argument error message. */ -LJ_NORET LJ_NOINLINE static void err_argmsg(lua_State *L, int narg, - const char *msg) -{ - const char *fname = "?"; - const char *ftype = lj_debug_funcname(L, L->base - 1, &fname); - if (narg < 0 && narg > LUA_REGISTRYINDEX) - narg = (int)(L->top - L->base) + narg + 1; - if (ftype && ftype[3] == 'h' && --narg == 0) /* Check for "method". */ - msg = lj_strfmt_pushf(L, err2msg(LJ_ERR_BADSELF), fname, msg); - else - msg = lj_strfmt_pushf(L, err2msg(LJ_ERR_BADARG), narg, fname, msg); - lj_err_callermsg(L, msg); -} - -/* Formatted argument error. */ -LJ_NOINLINE void lj_err_argv(lua_State *L, int narg, ErrMsg em, ...) -{ - const char *msg; - va_list argp; - va_start(argp, em); - msg = lj_strfmt_pushvf(L, err2msg(em), argp); - va_end(argp); - err_argmsg(L, narg, msg); -} - -/* Argument error. */ -LJ_NOINLINE void lj_err_arg(lua_State *L, int narg, ErrMsg em) -{ - err_argmsg(L, narg, err2msg(em)); -} - -/* Typecheck error for arguments. */ -LJ_NOINLINE void lj_err_argtype(lua_State *L, int narg, const char *xname) -{ - const char *tname, *msg; - if (narg <= LUA_REGISTRYINDEX) { - if (narg >= LUA_GLOBALSINDEX) { - tname = lj_obj_itypename[~LJ_TTAB]; - } else { - GCfunc *fn = curr_func(L); - int idx = LUA_GLOBALSINDEX - narg; - if (idx <= fn->c.nupvalues) - tname = lj_typename(&fn->c.upvalue[idx-1]); - else - tname = lj_obj_typename[0]; - } - } else { - TValue *o = narg < 0 ? L->top + narg : L->base + narg-1; - tname = o < L->top ? lj_typename(o) : lj_obj_typename[0]; - } - msg = lj_strfmt_pushf(L, err2msg(LJ_ERR_BADTYPE), xname, tname); - err_argmsg(L, narg, msg); -} - -/* Typecheck error for arguments. */ -LJ_NOINLINE void lj_err_argt(lua_State *L, int narg, int tt) -{ - lj_err_argtype(L, narg, lj_obj_typename[tt+1]); -} - -/* -- Public error handling API ------------------------------------------- */ - -LUA_API lua_CFunction lua_atpanic(lua_State *L, lua_CFunction panicf) -{ - lua_CFunction old = G(L)->panic; - G(L)->panic = panicf; - return old; -} - -/* Forwarders for the public API (C calling convention and no LJ_NORET). */ -LUA_API int lua_error(lua_State *L) -{ - lj_err_run(L); - return 0; /* unreachable */ -} - -LUALIB_API int luaL_argerror(lua_State *L, int narg, const char *msg) -{ - err_argmsg(L, narg, msg); - return 0; /* unreachable */ -} - -LUALIB_API int luaL_typerror(lua_State *L, int narg, const char *xname) -{ - lj_err_argtype(L, narg, xname); - return 0; /* unreachable */ -} - -LUALIB_API void luaL_where(lua_State *L, int level) -{ - int size; - cTValue *frame = lj_debug_frame(L, level, &size); - lj_debug_addloc(L, "", frame, size ? frame+size : NULL); -} - -LUALIB_API int luaL_error(lua_State *L, const char *fmt, ...) -{ - const char *msg; - va_list argp; - va_start(argp, fmt); - msg = lj_strfmt_pushvf(L, fmt, argp); - va_end(argp); - lj_err_callermsg(L, msg); - return 0; /* unreachable */ -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_err.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_err.h deleted file mode 100644 index 03a56f0362c..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_err.h +++ /dev/null @@ -1,41 +0,0 @@ -/* -** Error handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_ERR_H -#define _LJ_ERR_H - -#include - -#include "lj_obj.h" - -typedef enum { -#define ERRDEF(name, msg) \ - LJ_ERR_##name, LJ_ERR_##name##_ = LJ_ERR_##name + sizeof(msg)-1, -#include "lj_errmsg.h" - LJ_ERR__MAX -} ErrMsg; - -LJ_DATA const char *lj_err_allmsg; -#define err2msg(em) (lj_err_allmsg+(int)(em)) - -LJ_FUNC GCstr *lj_err_str(lua_State *L, ErrMsg em); -LJ_FUNCA_NORET void LJ_FASTCALL lj_err_throw(lua_State *L, int errcode); -LJ_FUNC_NORET void lj_err_mem(lua_State *L); -LJ_FUNC_NORET void lj_err_run(lua_State *L); -LJ_FUNC_NORET void lj_err_msg(lua_State *L, ErrMsg em); -LJ_FUNC_NORET void lj_err_lex(lua_State *L, GCstr *src, const char *tok, - BCLine line, ErrMsg em, va_list argp); -LJ_FUNC_NORET void lj_err_optype(lua_State *L, cTValue *o, ErrMsg opm); -LJ_FUNC_NORET void lj_err_comp(lua_State *L, cTValue *o1, cTValue *o2); -LJ_FUNC_NORET void lj_err_optype_call(lua_State *L, TValue *o); -LJ_FUNC_NORET void lj_err_callermsg(lua_State *L, const char *msg); -LJ_FUNC_NORET void lj_err_callerv(lua_State *L, ErrMsg em, ...); -LJ_FUNC_NORET void lj_err_caller(lua_State *L, ErrMsg em); -LJ_FUNC_NORET void lj_err_arg(lua_State *L, int narg, ErrMsg em); -LJ_FUNC_NORET void lj_err_argv(lua_State *L, int narg, ErrMsg em, ...); -LJ_FUNC_NORET void lj_err_argtype(lua_State *L, int narg, const char *xname); -LJ_FUNC_NORET void lj_err_argt(lua_State *L, int narg, int tt); - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_errmsg.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_errmsg.h deleted file mode 100644 index 7717665b038..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_errmsg.h +++ /dev/null @@ -1,190 +0,0 @@ -/* -** VM error messages. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -/* This file may be included multiple times with different ERRDEF macros. */ - -/* Basic error handling. */ -ERRDEF(ERRMEM, "not enough memory") -ERRDEF(ERRERR, "error in error handling") -ERRDEF(ERRCPP, "C++ exception") - -/* Allocations. */ -ERRDEF(STROV, "string length overflow") -ERRDEF(UDATAOV, "userdata length overflow") -ERRDEF(STKOV, "stack overflow") -ERRDEF(STKOVM, "stack overflow (%s)") -ERRDEF(TABOV, "table overflow") - -/* Table indexing. */ -ERRDEF(NANIDX, "table index is NaN") -ERRDEF(NILIDX, "table index is nil") -ERRDEF(NEXTIDX, "invalid key to " LUA_QL("next")) - -/* Metamethod resolving. */ -ERRDEF(BADCALL, "attempt to call a %s value") -ERRDEF(BADOPRT, "attempt to %s %s " LUA_QS " (a %s value)") -ERRDEF(BADOPRV, "attempt to %s a %s value") -ERRDEF(BADCMPT, "attempt to compare %s with %s") -ERRDEF(BADCMPV, "attempt to compare two %s values") -ERRDEF(GETLOOP, "loop in gettable") -ERRDEF(SETLOOP, "loop in settable") -ERRDEF(OPCALL, "call") -ERRDEF(OPINDEX, "index") -ERRDEF(OPARITH, "perform arithmetic on") -ERRDEF(OPCAT, "concatenate") -ERRDEF(OPLEN, "get length of") - -/* Type checks. */ -ERRDEF(BADSELF, "calling " LUA_QS " on bad self (%s)") -ERRDEF(BADARG, "bad argument #%d to " LUA_QS " (%s)") -ERRDEF(BADTYPE, "%s expected, got %s") -ERRDEF(BADVAL, "invalid value") -ERRDEF(NOVAL, "value expected") -ERRDEF(NOCORO, "coroutine expected") -ERRDEF(NOTABN, "nil or table expected") -ERRDEF(NOLFUNC, "Lua function expected") -ERRDEF(NOFUNCL, "function or level expected") -ERRDEF(NOSFT, "string/function/table expected") -ERRDEF(NOPROXY, "boolean or proxy expected") -ERRDEF(FORINIT, LUA_QL("for") " initial value must be a number") -ERRDEF(FORLIM, LUA_QL("for") " limit must be a number") -ERRDEF(FORSTEP, LUA_QL("for") " step must be a number") - -/* C API checks. */ -ERRDEF(NOENV, "no calling environment") -ERRDEF(CYIELD, "attempt to yield across C-call boundary") -ERRDEF(BADLU, "bad light userdata pointer") -ERRDEF(NOGCMM, "bad action while in __gc metamethod") -#if LJ_TARGET_WINDOWS -ERRDEF(BADFPU, "bad FPU precision (use D3DCREATE_FPU_PRESERVE with DirectX)") -#endif - -/* Standard library function errors. */ -ERRDEF(ASSERT, "assertion failed!") -ERRDEF(PROTMT, "cannot change a protected metatable") -ERRDEF(UNPACK, "too many results to unpack") -ERRDEF(RDRSTR, "reader function must return a string") -ERRDEF(PRTOSTR, LUA_QL("tostring") " must return a string to " LUA_QL("print")) -ERRDEF(IDXRNG, "index out of range") -ERRDEF(BASERNG, "base out of range") -ERRDEF(LVLRNG, "level out of range") -ERRDEF(INVLVL, "invalid level") -ERRDEF(INVOPT, "invalid option") -ERRDEF(INVOPTM, "invalid option " LUA_QS) -ERRDEF(INVFMT, "invalid format") -ERRDEF(SETFENV, LUA_QL("setfenv") " cannot change environment of given object") -ERRDEF(CORUN, "cannot resume running coroutine") -ERRDEF(CODEAD, "cannot resume dead coroutine") -ERRDEF(COSUSP, "cannot resume non-suspended coroutine") -ERRDEF(TABINS, "wrong number of arguments to " LUA_QL("insert")) -ERRDEF(TABCAT, "invalid value (%s) at index %d in table for " LUA_QL("concat")) -ERRDEF(TABSORT, "invalid order function for sorting") -ERRDEF(IOCLFL, "attempt to use a closed file") -ERRDEF(IOSTDCL, "standard file is closed") -ERRDEF(OSUNIQF, "unable to generate a unique filename") -ERRDEF(OSDATEF, "field " LUA_QS " missing in date table") -ERRDEF(STRDUMP, "unable to dump given function") -ERRDEF(STRSLC, "string slice too long") -ERRDEF(STRPATB, "missing " LUA_QL("[") " after " LUA_QL("%f") " in pattern") -ERRDEF(STRPATC, "invalid pattern capture") -ERRDEF(STRPATE, "malformed pattern (ends with " LUA_QL("%") ")") -ERRDEF(STRPATM, "malformed pattern (missing " LUA_QL("]") ")") -ERRDEF(STRPATU, "unbalanced pattern") -ERRDEF(STRPATX, "pattern too complex") -ERRDEF(STRCAPI, "invalid capture index") -ERRDEF(STRCAPN, "too many captures") -ERRDEF(STRCAPU, "unfinished capture") -ERRDEF(STRFMT, "invalid option " LUA_QS " to " LUA_QL("format")) -ERRDEF(STRGSRV, "invalid replacement value (a %s)") -ERRDEF(BADMODN, "name conflict for module " LUA_QS) -#if LJ_HASJIT -ERRDEF(JITPROT, "runtime code generation failed, restricted kernel?") -#if LJ_TARGET_X86ORX64 -ERRDEF(NOJIT, "JIT compiler disabled, CPU does not support SSE2") -#else -ERRDEF(NOJIT, "JIT compiler disabled") -#endif -#elif defined(LJ_ARCH_NOJIT) -ERRDEF(NOJIT, "no JIT compiler for this architecture (yet)") -#else -ERRDEF(NOJIT, "JIT compiler permanently disabled by build option") -#endif -ERRDEF(JITOPT, "unknown or malformed optimization flag " LUA_QS) - -/* Lexer/parser errors. */ -ERRDEF(XMODE, "attempt to load chunk with wrong mode") -ERRDEF(XNEAR, "%s near " LUA_QS) -ERRDEF(XLINES, "chunk has too many lines") -ERRDEF(XLEVELS, "chunk has too many syntax levels") -ERRDEF(XNUMBER, "malformed number") -ERRDEF(XLSTR, "unfinished long string") -ERRDEF(XLCOM, "unfinished long comment") -ERRDEF(XSTR, "unfinished string") -ERRDEF(XESC, "invalid escape sequence") -ERRDEF(XLDELIM, "invalid long string delimiter") -ERRDEF(XTOKEN, LUA_QS " expected") -ERRDEF(XJUMP, "control structure too long") -ERRDEF(XSLOTS, "function or expression too complex") -ERRDEF(XLIMC, "chunk has more than %d local variables") -ERRDEF(XLIMM, "main function has more than %d %s") -ERRDEF(XLIMF, "function at line %d has more than %d %s") -ERRDEF(XMATCH, LUA_QS " expected (to close " LUA_QS " at line %d)") -ERRDEF(XFIXUP, "function too long for return fixup") -ERRDEF(XPARAM, " or " LUA_QL("...") " expected") -#if !LJ_52 -ERRDEF(XAMBIG, "ambiguous syntax (function call x new statement)") -#endif -ERRDEF(XFUNARG, "function arguments expected") -ERRDEF(XSYMBOL, "unexpected symbol") -ERRDEF(XDOTS, "cannot use " LUA_QL("...") " outside a vararg function") -ERRDEF(XSYNTAX, "syntax error") -ERRDEF(XFOR, LUA_QL("=") " or " LUA_QL("in") " expected") -ERRDEF(XBREAK, "no loop to break") -ERRDEF(XLUNDEF, "undefined label " LUA_QS) -ERRDEF(XLDUP, "duplicate label " LUA_QS) -ERRDEF(XGSCOPE, " jumps into the scope of local " LUA_QS) - -/* Bytecode reader errors. */ -ERRDEF(BCFMT, "cannot load incompatible bytecode") -ERRDEF(BCBAD, "cannot load malformed bytecode") - -#if LJ_HASFFI -/* FFI errors. */ -ERRDEF(FFI_INVTYPE, "invalid C type") -ERRDEF(FFI_INVSIZE, "size of C type is unknown or too large") -ERRDEF(FFI_BADSCL, "bad storage class") -ERRDEF(FFI_DECLSPEC, "declaration specifier expected") -ERRDEF(FFI_BADTAG, "undeclared or implicit tag " LUA_QS) -ERRDEF(FFI_REDEF, "attempt to redefine " LUA_QS) -ERRDEF(FFI_NUMPARAM, "wrong number of type parameters") -ERRDEF(FFI_INITOV, "too many initializers for " LUA_QS) -ERRDEF(FFI_BADCONV, "cannot convert " LUA_QS " to " LUA_QS) -ERRDEF(FFI_BADLEN, "attempt to get length of " LUA_QS) -ERRDEF(FFI_BADCONCAT, "attempt to concatenate " LUA_QS " and " LUA_QS) -ERRDEF(FFI_BADARITH, "attempt to perform arithmetic on " LUA_QS " and " LUA_QS) -ERRDEF(FFI_BADCOMP, "attempt to compare " LUA_QS " with " LUA_QS) -ERRDEF(FFI_BADCALL, LUA_QS " is not callable") -ERRDEF(FFI_NUMARG, "wrong number of arguments for function call") -ERRDEF(FFI_BADMEMBER, LUA_QS " has no member named " LUA_QS) -ERRDEF(FFI_BADIDX, LUA_QS " cannot be indexed") -ERRDEF(FFI_BADIDXW, LUA_QS " cannot be indexed with " LUA_QS) -ERRDEF(FFI_BADMM, LUA_QS " has no " LUA_QS " metamethod") -ERRDEF(FFI_WRCONST, "attempt to write to constant location") -ERRDEF(FFI_NODECL, "missing declaration for symbol " LUA_QS) -ERRDEF(FFI_BADCBACK, "bad callback") -#if LJ_OS_NOJIT -ERRDEF(FFI_CBACKOV, "no support for callbacks on this OS") -#else -ERRDEF(FFI_CBACKOV, "too many callbacks") -#endif -ERRDEF(FFI_NYIPACKBIT, "NYI: packed bit fields") -ERRDEF(FFI_NYICALL, "NYI: cannot call this C function (yet)") -#endif - -#undef ERRDEF - -/* Detecting unused error messages: - awk -F, '/^ERRDEF/ { gsub(/ERRDEF./, ""); printf "grep -q LJ_ERR_%s *.[ch] || echo %s\n", $1, $1}' lj_errmsg.h | sh -*/ diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ff.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ff.h deleted file mode 100644 index 73dad9695a6..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ff.h +++ /dev/null @@ -1,18 +0,0 @@ -/* -** Fast function IDs. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_FF_H -#define _LJ_FF_H - -/* Fast function ID. */ -typedef enum { - FF_LUA_ = FF_LUA, /* Lua function (must be 0). */ - FF_C_ = FF_C, /* Regular C function (must be 1). */ -#define FFDEF(name) FF_##name, -#include "lj_ffdef.h" - FF__MAX -} FastFunc; - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ffrecord.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ffrecord.c deleted file mode 100644 index a08113ca386..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ffrecord.c +++ /dev/null @@ -1,1247 +0,0 @@ -/* -** Fast function call recorder. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_ffrecord_c -#define LUA_CORE - -#include "lj_obj.h" - -#if LJ_HASJIT - -#include "lj_err.h" -#include "lj_str.h" -#include "lj_tab.h" -#include "lj_frame.h" -#include "lj_bc.h" -#include "lj_ff.h" -#include "lj_ir.h" -#include "lj_jit.h" -#include "lj_ircall.h" -#include "lj_iropt.h" -#include "lj_trace.h" -#include "lj_record.h" -#include "lj_ffrecord.h" -#include "lj_crecord.h" -#include "lj_dispatch.h" -#include "lj_vm.h" -#include "lj_strscan.h" -#include "lj_strfmt.h" - -/* Some local macros to save typing. Undef'd at the end. */ -#define IR(ref) (&J->cur.ir[(ref)]) - -/* Pass IR on to next optimization in chain (FOLD). */ -#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) - -/* -- Fast function recording handlers ------------------------------------ */ - -/* Conventions for fast function call handlers: -** -** The argument slots start at J->base[0]. All of them are guaranteed to be -** valid and type-specialized references. J->base[J->maxslot] is set to 0 -** as a sentinel. The runtime argument values start at rd->argv[0]. -** -** In general fast functions should check for presence of all of their -** arguments and for the correct argument types. Some simplifications -** are allowed if the interpreter throws instead. But even if recording -** is aborted, the generated IR must be consistent (no zero-refs). -** -** The number of results in rd->nres is set to 1. Handlers that return -** a different number of results need to override it. A negative value -** prevents return processing (e.g. for pending calls). -** -** Results need to be stored starting at J->base[0]. Return processing -** moves them to the right slots later. -** -** The per-ffid auxiliary data is the value of the 2nd part of the -** LJLIB_REC() annotation. This allows handling similar functionality -** in a common handler. -*/ - -/* Type of handler to record a fast function. */ -typedef void (LJ_FASTCALL *RecordFunc)(jit_State *J, RecordFFData *rd); - -/* Get runtime value of int argument. */ -static int32_t argv2int(jit_State *J, TValue *o) -{ - if (!lj_strscan_numberobj(o)) - lj_trace_err(J, LJ_TRERR_BADTYPE); - return tvisint(o) ? intV(o) : lj_num2int(numV(o)); -} - -/* Get runtime value of string argument. */ -static GCstr *argv2str(jit_State *J, TValue *o) -{ - if (LJ_LIKELY(tvisstr(o))) { - return strV(o); - } else { - GCstr *s; - if (!tvisnumber(o)) - lj_trace_err(J, LJ_TRERR_BADTYPE); - s = lj_strfmt_number(J->L, o); - setstrV(J->L, o, s); - return s; - } -} - -/* Return number of results wanted by caller. */ -static ptrdiff_t results_wanted(jit_State *J) -{ - TValue *frame = J->L->base-1; - if (frame_islua(frame)) - return (ptrdiff_t)bc_b(frame_pc(frame)[-1]) - 1; - else - return -1; -} - -#ifdef LUAJIT_TRACE_STITCHING -/* This feature is disabled for now due to a design mistake. Sorry. -** -** It causes unpredictable behavior and crashes when a full trace flush -** happens with a stitching continuation still in the stack somewhere. -*/ - -/* Trace stitching: add continuation below frame to start a new trace. */ -static void recff_stitch(jit_State *J) -{ - ASMFunction cont = lj_cont_stitch; - TraceNo traceno = J->cur.traceno; - lua_State *L = J->L; - TValue *base = L->base; - const BCIns *pc = frame_pc(base-1); - TValue *pframe = frame_prevl(base-1); - TRef trcont; - - lua_assert(!LJ_FR2); /* TODO_FR2: handle frame shift. */ - /* Move func + args up in Lua stack and insert continuation. */ - memmove(&base[1], &base[-1], sizeof(TValue)*(J->maxslot+1)); - setframe_ftsz(base+1, ((char *)(base+1) - (char *)pframe) + FRAME_CONT); - setcont(base, cont); - setframe_pc(base, pc); - if (LJ_DUALNUM) setintV(base-1, traceno); else base[-1].u64 = traceno; - L->base += 2; - L->top += 2; - - /* Ditto for the IR. */ - memmove(&J->base[1], &J->base[-1], sizeof(TRef)*(J->maxslot+1)); -#if LJ_64 - trcont = lj_ir_kptr(J, (void *)((int64_t)cont-(int64_t)lj_vm_asm_begin)); -#else - trcont = lj_ir_kptr(J, (void *)cont); -#endif - J->base[0] = trcont | TREF_CONT; - J->base[-1] = LJ_DUALNUM ? lj_ir_kint(J,traceno) : lj_ir_knum_u64(J,traceno); - J->base += 2; - J->baseslot += 2; - J->framedepth++; - - lj_record_stop(J, LJ_TRLINK_STITCH, 0); - - /* Undo Lua stack changes. */ - memmove(&base[-1], &base[1], sizeof(TValue)*(J->maxslot+1)); - setframe_pc(base-1, pc); - L->base -= 2; - L->top -= 2; -} - -/* Fallback handler for fast functions that are not recorded (yet). */ -static void LJ_FASTCALL recff_nyi(jit_State *J, RecordFFData *rd) -{ - if (J->cur.nins < (IRRef)J->param[JIT_P_minstitch] + REF_BASE) { - lj_trace_err_info(J, LJ_TRERR_TRACEUV); - } else { - /* Can only stitch from Lua call. */ - if (J->framedepth && frame_islua(J->L->base-1)) { - BCOp op = bc_op(*frame_pc(J->L->base-1)); - /* Stitched trace cannot start with *M op with variable # of args. */ - if (!(op == BC_CALLM || op == BC_CALLMT || - op == BC_RETM || op == BC_TSETM)) { - switch (J->fn->c.ffid) { - case FF_error: - case FF_debug_sethook: - case FF_jit_flush: - break; /* Don't stitch across special builtins. */ - default: - recff_stitch(J); /* Use trace stitching. */ - rd->nres = -1; - return; - } - } - } - /* Otherwise stop trace and return to interpreter. */ - lj_record_stop(J, LJ_TRLINK_RETURN, 0); - rd->nres = -1; - } -} - -/* Fallback handler for unsupported variants of fast functions. */ -#define recff_nyiu recff_nyi - -/* Must stop the trace for classic C functions with arbitrary side-effects. */ -#define recff_c recff_nyi -#else -/* Fallback handler for fast functions that are not recorded (yet). */ -static void LJ_FASTCALL recff_nyi(jit_State *J, RecordFFData *rd) -{ - setfuncV(J->L, &J->errinfo, J->fn); - lj_trace_err_info(J, LJ_TRERR_NYIFF); - UNUSED(rd); -} - -/* Throw error for unsupported variant of fast function. */ -LJ_NORET static void recff_nyiu(jit_State *J, RecordFFData *rd) -{ - setfuncV(J->L, &J->errinfo, J->fn); - lj_trace_err_info(J, LJ_TRERR_NYIFFU); - UNUSED(rd); -} - -/* Must abort the trace for classic C functions with arbitrary side-effects. */ -static void LJ_FASTCALL recff_c(jit_State *J, RecordFFData *rd) -{ - setfuncV(J->L, &J->errinfo, J->fn); - lj_trace_err_info(J, LJ_TRERR_NYICF); - UNUSED(rd); -} -#endif - -/* Emit BUFHDR for the global temporary buffer. */ -static TRef recff_bufhdr(jit_State *J) -{ - return emitir(IRT(IR_BUFHDR, IRT_P32), - lj_ir_kptr(J, &J2G(J)->tmpbuf), IRBUFHDR_RESET); -} - -/* -- Base library fast functions ----------------------------------------- */ - -static void LJ_FASTCALL recff_assert(jit_State *J, RecordFFData *rd) -{ - /* Arguments already specialized. The interpreter throws for nil/false. */ - rd->nres = J->maxslot; /* Pass through all arguments. */ -} - -static void LJ_FASTCALL recff_type(jit_State *J, RecordFFData *rd) -{ - /* Arguments already specialized. Result is a constant string. Neat, huh? */ - uint32_t t; - if (tvisnumber(&rd->argv[0])) - t = ~LJ_TNUMX; - else if (LJ_64 && !LJ_GC64 && tvislightud(&rd->argv[0])) - t = ~LJ_TLIGHTUD; - else - t = ~itype(&rd->argv[0]); - J->base[0] = lj_ir_kstr(J, strV(&J->fn->c.upvalue[t])); - UNUSED(rd); -} - -static void LJ_FASTCALL recff_getmetatable(jit_State *J, RecordFFData *rd) -{ - TRef tr = J->base[0]; - if (tr) { - RecordIndex ix; - ix.tab = tr; - copyTV(J->L, &ix.tabv, &rd->argv[0]); - if (lj_record_mm_lookup(J, &ix, MM_metatable)) - J->base[0] = ix.mobj; - else - J->base[0] = ix.mt; - } /* else: Interpreter will throw. */ -} - -static void LJ_FASTCALL recff_setmetatable(jit_State *J, RecordFFData *rd) -{ - TRef tr = J->base[0]; - TRef mt = J->base[1]; - if (tref_istab(tr) && (tref_istab(mt) || (mt && tref_isnil(mt)))) { - TRef fref, mtref; - RecordIndex ix; - ix.tab = tr; - copyTV(J->L, &ix.tabv, &rd->argv[0]); - lj_record_mm_lookup(J, &ix, MM_metatable); /* Guard for no __metatable. */ - fref = emitir(IRT(IR_FREF, IRT_P32), tr, IRFL_TAB_META); - mtref = tref_isnil(mt) ? lj_ir_knull(J, IRT_TAB) : mt; - emitir(IRT(IR_FSTORE, IRT_TAB), fref, mtref); - if (!tref_isnil(mt)) - emitir(IRT(IR_TBAR, IRT_TAB), tr, 0); - J->base[0] = tr; - J->needsnap = 1; - } /* else: Interpreter will throw. */ -} - -static void LJ_FASTCALL recff_rawget(jit_State *J, RecordFFData *rd) -{ - RecordIndex ix; - ix.tab = J->base[0]; ix.key = J->base[1]; - if (tref_istab(ix.tab) && ix.key) { - ix.val = 0; ix.idxchain = 0; - settabV(J->L, &ix.tabv, tabV(&rd->argv[0])); - copyTV(J->L, &ix.keyv, &rd->argv[1]); - J->base[0] = lj_record_idx(J, &ix); - } /* else: Interpreter will throw. */ -} - -static void LJ_FASTCALL recff_rawset(jit_State *J, RecordFFData *rd) -{ - RecordIndex ix; - ix.tab = J->base[0]; ix.key = J->base[1]; ix.val = J->base[2]; - if (tref_istab(ix.tab) && ix.key && ix.val) { - ix.idxchain = 0; - settabV(J->L, &ix.tabv, tabV(&rd->argv[0])); - copyTV(J->L, &ix.keyv, &rd->argv[1]); - copyTV(J->L, &ix.valv, &rd->argv[2]); - lj_record_idx(J, &ix); - /* Pass through table at J->base[0] as result. */ - } /* else: Interpreter will throw. */ -} - -static void LJ_FASTCALL recff_rawequal(jit_State *J, RecordFFData *rd) -{ - TRef tra = J->base[0]; - TRef trb = J->base[1]; - if (tra && trb) { - int diff = lj_record_objcmp(J, tra, trb, &rd->argv[0], &rd->argv[1]); - J->base[0] = diff ? TREF_FALSE : TREF_TRUE; - } /* else: Interpreter will throw. */ -} - -#if LJ_52 -static void LJ_FASTCALL recff_rawlen(jit_State *J, RecordFFData *rd) -{ - TRef tr = J->base[0]; - if (tref_isstr(tr)) - J->base[0] = emitir(IRTI(IR_FLOAD), tr, IRFL_STR_LEN); - else if (tref_istab(tr)) - J->base[0] = lj_ir_call(J, IRCALL_lj_tab_len, tr); - /* else: Interpreter will throw. */ - UNUSED(rd); -} -#endif - -/* Determine mode of select() call. */ -int32_t lj_ffrecord_select_mode(jit_State *J, TRef tr, TValue *tv) -{ - if (tref_isstr(tr) && *strVdata(tv) == '#') { /* select('#', ...) */ - if (strV(tv)->len == 1) { - emitir(IRTG(IR_EQ, IRT_STR), tr, lj_ir_kstr(J, strV(tv))); - } else { - TRef trptr = emitir(IRT(IR_STRREF, IRT_P32), tr, lj_ir_kint(J, 0)); - TRef trchar = emitir(IRT(IR_XLOAD, IRT_U8), trptr, IRXLOAD_READONLY); - emitir(IRTG(IR_EQ, IRT_INT), trchar, lj_ir_kint(J, '#')); - } - return 0; - } else { /* select(n, ...) */ - int32_t start = argv2int(J, tv); - if (start == 0) lj_trace_err(J, LJ_TRERR_BADTYPE); /* A bit misleading. */ - return start; - } -} - -static void LJ_FASTCALL recff_select(jit_State *J, RecordFFData *rd) -{ - TRef tr = J->base[0]; - if (tr) { - ptrdiff_t start = lj_ffrecord_select_mode(J, tr, &rd->argv[0]); - if (start == 0) { /* select('#', ...) */ - J->base[0] = lj_ir_kint(J, J->maxslot - 1); - } else if (tref_isk(tr)) { /* select(k, ...) */ - ptrdiff_t n = (ptrdiff_t)J->maxslot; - if (start < 0) start += n; - else if (start > n) start = n; - rd->nres = n - start; - if (start >= 1) { - ptrdiff_t i; - for (i = 0; i < n - start; i++) - J->base[i] = J->base[start+i]; - } /* else: Interpreter will throw. */ - } else { - recff_nyiu(J, rd); - return; - } - } /* else: Interpreter will throw. */ -} - -static void LJ_FASTCALL recff_tonumber(jit_State *J, RecordFFData *rd) -{ - TRef tr = J->base[0]; - TRef base = J->base[1]; - if (tr && !tref_isnil(base)) { - base = lj_opt_narrow_toint(J, base); - if (!tref_isk(base) || IR(tref_ref(base))->i != 10) { - recff_nyiu(J, rd); - return; - } - } - if (tref_isnumber_str(tr)) { - if (tref_isstr(tr)) { - TValue tmp; - if (!lj_strscan_num(strV(&rd->argv[0]), &tmp)) { - recff_nyiu(J, rd); /* Would need an inverted STRTO for this case. */ - return; - } - tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0); - } -#if LJ_HASFFI - } else if (tref_iscdata(tr)) { - lj_crecord_tonumber(J, rd); - return; -#endif - } else { - tr = TREF_NIL; - } - J->base[0] = tr; - UNUSED(rd); -} - -static TValue *recff_metacall_cp(lua_State *L, lua_CFunction dummy, void *ud) -{ - jit_State *J = (jit_State *)ud; - lj_record_tailcall(J, 0, 1); - UNUSED(L); UNUSED(dummy); - return NULL; -} - -static int recff_metacall(jit_State *J, RecordFFData *rd, MMS mm) -{ - RecordIndex ix; - ix.tab = J->base[0]; - copyTV(J->L, &ix.tabv, &rd->argv[0]); - if (lj_record_mm_lookup(J, &ix, mm)) { /* Has metamethod? */ - int errcode; - TValue argv0; - /* Temporarily insert metamethod below object. */ - J->base[1] = J->base[0]; - J->base[0] = ix.mobj; - copyTV(J->L, &argv0, &rd->argv[0]); - copyTV(J->L, &rd->argv[1], &rd->argv[0]); - copyTV(J->L, &rd->argv[0], &ix.mobjv); - /* Need to protect lj_record_tailcall because it may throw. */ - errcode = lj_vm_cpcall(J->L, NULL, J, recff_metacall_cp); - /* Always undo Lua stack changes to avoid confusing the interpreter. */ - copyTV(J->L, &rd->argv[0], &argv0); - if (errcode) - lj_err_throw(J->L, errcode); /* Propagate errors. */ - rd->nres = -1; /* Pending call. */ - return 1; /* Tailcalled to metamethod. */ - } - return 0; -} - -static void LJ_FASTCALL recff_tostring(jit_State *J, RecordFFData *rd) -{ - TRef tr = J->base[0]; - if (tref_isstr(tr)) { - /* Ignore __tostring in the string base metatable. */ - /* Pass on result in J->base[0]. */ - } else if (tr && !recff_metacall(J, rd, MM_tostring)) { - if (tref_isnumber(tr)) { - J->base[0] = emitir(IRT(IR_TOSTR, IRT_STR), tr, - tref_isnum(tr) ? IRTOSTR_NUM : IRTOSTR_INT); - } else if (tref_ispri(tr)) { - J->base[0] = lj_ir_kstr(J, lj_strfmt_obj(J->L, &rd->argv[0])); - } else { - recff_nyiu(J, rd); - return; - } - } -} - -static void LJ_FASTCALL recff_ipairs_aux(jit_State *J, RecordFFData *rd) -{ - RecordIndex ix; - ix.tab = J->base[0]; - if (tref_istab(ix.tab)) { - if (!tvisnumber(&rd->argv[1])) /* No support for string coercion. */ - lj_trace_err(J, LJ_TRERR_BADTYPE); - setintV(&ix.keyv, numberVint(&rd->argv[1])+1); - settabV(J->L, &ix.tabv, tabV(&rd->argv[0])); - ix.val = 0; ix.idxchain = 0; - ix.key = lj_opt_narrow_toint(J, J->base[1]); - J->base[0] = ix.key = emitir(IRTI(IR_ADD), ix.key, lj_ir_kint(J, 1)); - J->base[1] = lj_record_idx(J, &ix); - rd->nres = tref_isnil(J->base[1]) ? 0 : 2; - } /* else: Interpreter will throw. */ -} - -static void LJ_FASTCALL recff_xpairs(jit_State *J, RecordFFData *rd) -{ - if (!(LJ_52 && recff_metacall(J, rd, MM_ipairs))) { - TRef tab = J->base[0]; - if (tref_istab(tab)) { - J->base[0] = lj_ir_kfunc(J, funcV(&J->fn->c.upvalue[0])); - J->base[1] = tab; - J->base[2] = rd->data ? lj_ir_kint(J, 0) : TREF_NIL; - rd->nres = 3; - } /* else: Interpreter will throw. */ - } -} - -static void LJ_FASTCALL recff_pcall(jit_State *J, RecordFFData *rd) -{ - if (J->maxslot >= 1) { - lj_record_call(J, 0, J->maxslot - 1); - rd->nres = -1; /* Pending call. */ - } /* else: Interpreter will throw. */ -} - -static TValue *recff_xpcall_cp(lua_State *L, lua_CFunction dummy, void *ud) -{ - jit_State *J = (jit_State *)ud; - lj_record_call(J, 1, J->maxslot - 2); - UNUSED(L); UNUSED(dummy); - return NULL; -} - -static void LJ_FASTCALL recff_xpcall(jit_State *J, RecordFFData *rd) -{ - if (J->maxslot >= 2) { - TValue argv0, argv1; - TRef tmp; - int errcode; - lua_assert(!LJ_FR2); /* TODO_FR2: handle different frame setup. */ - /* Swap function and traceback. */ - tmp = J->base[0]; J->base[0] = J->base[1]; J->base[1] = tmp; - copyTV(J->L, &argv0, &rd->argv[0]); - copyTV(J->L, &argv1, &rd->argv[1]); - copyTV(J->L, &rd->argv[0], &argv1); - copyTV(J->L, &rd->argv[1], &argv0); - /* Need to protect lj_record_call because it may throw. */ - errcode = lj_vm_cpcall(J->L, NULL, J, recff_xpcall_cp); - /* Always undo Lua stack swap to avoid confusing the interpreter. */ - copyTV(J->L, &rd->argv[0], &argv0); - copyTV(J->L, &rd->argv[1], &argv1); - if (errcode) - lj_err_throw(J->L, errcode); /* Propagate errors. */ - rd->nres = -1; /* Pending call. */ - } /* else: Interpreter will throw. */ -} - -static void LJ_FASTCALL recff_getfenv(jit_State *J, RecordFFData *rd) -{ - TRef tr = J->base[0]; - /* Only support getfenv(0) for now. */ - if (tref_isint(tr) && tref_isk(tr) && IR(tref_ref(tr))->i == 0) { - TRef trl = emitir(IRT(IR_LREF, IRT_THREAD), 0, 0); - J->base[0] = emitir(IRT(IR_FLOAD, IRT_TAB), trl, IRFL_THREAD_ENV); - return; - } - recff_nyiu(J, rd); -} - -/* -- Math library fast functions ----------------------------------------- */ - -static void LJ_FASTCALL recff_math_abs(jit_State *J, RecordFFData *rd) -{ - TRef tr = lj_ir_tonum(J, J->base[0]); - J->base[0] = emitir(IRTN(IR_ABS), tr, lj_ir_knum_abs(J)); - UNUSED(rd); -} - -/* Record rounding functions math.floor and math.ceil. */ -static void LJ_FASTCALL recff_math_round(jit_State *J, RecordFFData *rd) -{ - TRef tr = J->base[0]; - if (!tref_isinteger(tr)) { /* Pass through integers unmodified. */ - tr = emitir(IRTN(IR_FPMATH), lj_ir_tonum(J, tr), rd->data); - /* Result is integral (or NaN/Inf), but may not fit an int32_t. */ - if (LJ_DUALNUM) { /* Try to narrow using a guarded conversion to int. */ - lua_Number n = lj_vm_foldfpm(numberVnum(&rd->argv[0]), rd->data); - if (n == (lua_Number)lj_num2int(n)) - tr = emitir(IRTGI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_CHECK); - } - J->base[0] = tr; - } -} - -/* Record unary math.* functions, mapped to IR_FPMATH opcode. */ -static void LJ_FASTCALL recff_math_unary(jit_State *J, RecordFFData *rd) -{ - J->base[0] = emitir(IRTN(IR_FPMATH), lj_ir_tonum(J, J->base[0]), rd->data); -} - -/* Record math.log. */ -static void LJ_FASTCALL recff_math_log(jit_State *J, RecordFFData *rd) -{ - TRef tr = lj_ir_tonum(J, J->base[0]); - if (J->base[1]) { -#ifdef LUAJIT_NO_LOG2 - uint32_t fpm = IRFPM_LOG; -#else - uint32_t fpm = IRFPM_LOG2; -#endif - TRef trb = lj_ir_tonum(J, J->base[1]); - tr = emitir(IRTN(IR_FPMATH), tr, fpm); - trb = emitir(IRTN(IR_FPMATH), trb, fpm); - trb = emitir(IRTN(IR_DIV), lj_ir_knum_one(J), trb); - tr = emitir(IRTN(IR_MUL), tr, trb); - } else { - tr = emitir(IRTN(IR_FPMATH), tr, IRFPM_LOG); - } - J->base[0] = tr; - UNUSED(rd); -} - -/* Record math.atan2. */ -static void LJ_FASTCALL recff_math_atan2(jit_State *J, RecordFFData *rd) -{ - TRef tr = lj_ir_tonum(J, J->base[0]); - TRef tr2 = lj_ir_tonum(J, J->base[1]); - J->base[0] = emitir(IRTN(IR_ATAN2), tr, tr2); - UNUSED(rd); -} - -/* Record math.ldexp. */ -static void LJ_FASTCALL recff_math_ldexp(jit_State *J, RecordFFData *rd) -{ - TRef tr = lj_ir_tonum(J, J->base[0]); -#if LJ_TARGET_X86ORX64 - TRef tr2 = lj_ir_tonum(J, J->base[1]); -#else - TRef tr2 = lj_opt_narrow_toint(J, J->base[1]); -#endif - J->base[0] = emitir(IRTN(IR_LDEXP), tr, tr2); - UNUSED(rd); -} - -/* Record math.asin, math.acos, math.atan. */ -static void LJ_FASTCALL recff_math_atrig(jit_State *J, RecordFFData *rd) -{ - TRef y = lj_ir_tonum(J, J->base[0]); - TRef x = lj_ir_knum_one(J); - uint32_t ffid = rd->data; - if (ffid != FF_math_atan) { - TRef tmp = emitir(IRTN(IR_MUL), y, y); - tmp = emitir(IRTN(IR_SUB), x, tmp); - tmp = emitir(IRTN(IR_FPMATH), tmp, IRFPM_SQRT); - if (ffid == FF_math_asin) { x = tmp; } else { x = y; y = tmp; } - } - J->base[0] = emitir(IRTN(IR_ATAN2), y, x); -} - -static void LJ_FASTCALL recff_math_htrig(jit_State *J, RecordFFData *rd) -{ - TRef tr = lj_ir_tonum(J, J->base[0]); - J->base[0] = emitir(IRTN(IR_CALLN), tr, rd->data); -} - -static void LJ_FASTCALL recff_math_modf(jit_State *J, RecordFFData *rd) -{ - TRef tr = J->base[0]; - if (tref_isinteger(tr)) { - J->base[0] = tr; - J->base[1] = lj_ir_kint(J, 0); - } else { - TRef trt; - tr = lj_ir_tonum(J, tr); - trt = emitir(IRTN(IR_FPMATH), tr, IRFPM_TRUNC); - J->base[0] = trt; - J->base[1] = emitir(IRTN(IR_SUB), tr, trt); - } - rd->nres = 2; -} - -static void LJ_FASTCALL recff_math_pow(jit_State *J, RecordFFData *rd) -{ - TRef tr = lj_ir_tonum(J, J->base[0]); - if (!tref_isnumber_str(J->base[1])) - lj_trace_err(J, LJ_TRERR_BADTYPE); - J->base[0] = lj_opt_narrow_pow(J, tr, J->base[1], &rd->argv[1]); - UNUSED(rd); -} - -static void LJ_FASTCALL recff_math_minmax(jit_State *J, RecordFFData *rd) -{ - TRef tr = lj_ir_tonumber(J, J->base[0]); - uint32_t op = rd->data; - BCReg i; - for (i = 1; J->base[i] != 0; i++) { - TRef tr2 = lj_ir_tonumber(J, J->base[i]); - IRType t = IRT_INT; - if (!(tref_isinteger(tr) && tref_isinteger(tr2))) { - if (tref_isinteger(tr)) tr = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT); - if (tref_isinteger(tr2)) tr2 = emitir(IRTN(IR_CONV), tr2, IRCONV_NUM_INT); - t = IRT_NUM; - } - tr = emitir(IRT(op, t), tr, tr2); - } - J->base[0] = tr; -} - -static void LJ_FASTCALL recff_math_random(jit_State *J, RecordFFData *rd) -{ - GCudata *ud = udataV(&J->fn->c.upvalue[0]); - TRef tr, one; - lj_ir_kgc(J, obj2gco(ud), IRT_UDATA); /* Prevent collection. */ - tr = lj_ir_call(J, IRCALL_lj_math_random_step, lj_ir_kptr(J, uddata(ud))); - one = lj_ir_knum_one(J); - tr = emitir(IRTN(IR_SUB), tr, one); - if (J->base[0]) { - TRef tr1 = lj_ir_tonum(J, J->base[0]); - if (J->base[1]) { /* d = floor(d*(r2-r1+1.0)) + r1 */ - TRef tr2 = lj_ir_tonum(J, J->base[1]); - tr2 = emitir(IRTN(IR_SUB), tr2, tr1); - tr2 = emitir(IRTN(IR_ADD), tr2, one); - tr = emitir(IRTN(IR_MUL), tr, tr2); - tr = emitir(IRTN(IR_FPMATH), tr, IRFPM_FLOOR); - tr = emitir(IRTN(IR_ADD), tr, tr1); - } else { /* d = floor(d*r1) + 1.0 */ - tr = emitir(IRTN(IR_MUL), tr, tr1); - tr = emitir(IRTN(IR_FPMATH), tr, IRFPM_FLOOR); - tr = emitir(IRTN(IR_ADD), tr, one); - } - } - J->base[0] = tr; - UNUSED(rd); -} - -/* -- Bit library fast functions ------------------------------------------ */ - -/* Record bit.tobit. */ -static void LJ_FASTCALL recff_bit_tobit(jit_State *J, RecordFFData *rd) -{ - TRef tr = J->base[0]; -#if LJ_HASFFI - if (tref_iscdata(tr)) { recff_bit64_tobit(J, rd); return; } -#endif - J->base[0] = lj_opt_narrow_tobit(J, tr); - UNUSED(rd); -} - -/* Record unary bit.bnot, bit.bswap. */ -static void LJ_FASTCALL recff_bit_unary(jit_State *J, RecordFFData *rd) -{ -#if LJ_HASFFI - if (recff_bit64_unary(J, rd)) - return; -#endif - J->base[0] = emitir(IRTI(rd->data), lj_opt_narrow_tobit(J, J->base[0]), 0); -} - -/* Record N-ary bit.band, bit.bor, bit.bxor. */ -static void LJ_FASTCALL recff_bit_nary(jit_State *J, RecordFFData *rd) -{ -#if LJ_HASFFI - if (recff_bit64_nary(J, rd)) - return; -#endif - { - TRef tr = lj_opt_narrow_tobit(J, J->base[0]); - uint32_t ot = IRTI(rd->data); - BCReg i; - for (i = 1; J->base[i] != 0; i++) - tr = emitir(ot, tr, lj_opt_narrow_tobit(J, J->base[i])); - J->base[0] = tr; - } -} - -/* Record bit shifts. */ -static void LJ_FASTCALL recff_bit_shift(jit_State *J, RecordFFData *rd) -{ -#if LJ_HASFFI - if (recff_bit64_shift(J, rd)) - return; -#endif - { - TRef tr = lj_opt_narrow_tobit(J, J->base[0]); - TRef tsh = lj_opt_narrow_tobit(J, J->base[1]); - IROp op = (IROp)rd->data; - if (!(op < IR_BROL ? LJ_TARGET_MASKSHIFT : LJ_TARGET_MASKROT) && - !tref_isk(tsh)) - tsh = emitir(IRTI(IR_BAND), tsh, lj_ir_kint(J, 31)); -#ifdef LJ_TARGET_UNIFYROT - if (op == (LJ_TARGET_UNIFYROT == 1 ? IR_BROR : IR_BROL)) { - op = LJ_TARGET_UNIFYROT == 1 ? IR_BROL : IR_BROR; - tsh = emitir(IRTI(IR_NEG), tsh, tsh); - } -#endif - J->base[0] = emitir(IRTI(op), tr, tsh); - } -} - -static void LJ_FASTCALL recff_bit_tohex(jit_State *J, RecordFFData *rd) -{ -#if LJ_HASFFI - TRef hdr = recff_bufhdr(J); - TRef tr = recff_bit64_tohex(J, rd, hdr); - J->base[0] = emitir(IRT(IR_BUFSTR, IRT_STR), tr, hdr); -#else - recff_nyiu(J, rd); /* Don't bother working around this NYI. */ -#endif -} - -/* -- String library fast functions --------------------------------------- */ - -/* Specialize to relative starting position for string. */ -static TRef recff_string_start(jit_State *J, GCstr *s, int32_t *st, TRef tr, - TRef trlen, TRef tr0) -{ - int32_t start = *st; - if (start < 0) { - emitir(IRTGI(IR_LT), tr, tr0); - tr = emitir(IRTI(IR_ADD), trlen, tr); - start = start + (int32_t)s->len; - emitir(start < 0 ? IRTGI(IR_LT) : IRTGI(IR_GE), tr, tr0); - if (start < 0) { - tr = tr0; - start = 0; - } - } else if (start == 0) { - emitir(IRTGI(IR_EQ), tr, tr0); - tr = tr0; - } else { - tr = emitir(IRTI(IR_ADD), tr, lj_ir_kint(J, -1)); - emitir(IRTGI(IR_GE), tr, tr0); - start--; - } - *st = start; - return tr; -} - -/* Handle string.byte (rd->data = 0) and string.sub (rd->data = 1). */ -static void LJ_FASTCALL recff_string_range(jit_State *J, RecordFFData *rd) -{ - TRef trstr = lj_ir_tostr(J, J->base[0]); - TRef trlen = emitir(IRTI(IR_FLOAD), trstr, IRFL_STR_LEN); - TRef tr0 = lj_ir_kint(J, 0); - TRef trstart, trend; - GCstr *str = argv2str(J, &rd->argv[0]); - int32_t start, end; - if (rd->data) { /* string.sub(str, start [,end]) */ - start = argv2int(J, &rd->argv[1]); - trstart = lj_opt_narrow_toint(J, J->base[1]); - trend = J->base[2]; - if (tref_isnil(trend)) { - trend = lj_ir_kint(J, -1); - end = -1; - } else { - trend = lj_opt_narrow_toint(J, trend); - end = argv2int(J, &rd->argv[2]); - } - } else { /* string.byte(str, [,start [,end]]) */ - if (tref_isnil(J->base[1])) { - start = 1; - trstart = lj_ir_kint(J, 1); - } else { - start = argv2int(J, &rd->argv[1]); - trstart = lj_opt_narrow_toint(J, J->base[1]); - } - if (J->base[1] && !tref_isnil(J->base[2])) { - trend = lj_opt_narrow_toint(J, J->base[2]); - end = argv2int(J, &rd->argv[2]); - } else { - trend = trstart; - end = start; - } - } - if (end < 0) { - emitir(IRTGI(IR_LT), trend, tr0); - trend = emitir(IRTI(IR_ADD), emitir(IRTI(IR_ADD), trlen, trend), - lj_ir_kint(J, 1)); - end = end+(int32_t)str->len+1; - } else if ((MSize)end <= str->len) { - emitir(IRTGI(IR_ULE), trend, trlen); - } else { - emitir(IRTGI(IR_UGT), trend, trlen); - end = (int32_t)str->len; - trend = trlen; - } - trstart = recff_string_start(J, str, &start, trstart, trlen, tr0); - if (rd->data) { /* Return string.sub result. */ - if (end - start >= 0) { - /* Also handle empty range here, to avoid extra traces. */ - TRef trptr, trslen = emitir(IRTI(IR_SUB), trend, trstart); - emitir(IRTGI(IR_GE), trslen, tr0); - trptr = emitir(IRT(IR_STRREF, IRT_P32), trstr, trstart); - J->base[0] = emitir(IRT(IR_SNEW, IRT_STR), trptr, trslen); - } else { /* Range underflow: return empty string. */ - emitir(IRTGI(IR_LT), trend, trstart); - J->base[0] = lj_ir_kstr(J, &J2G(J)->strempty); - } - } else { /* Return string.byte result(s). */ - ptrdiff_t i, len = end - start; - if (len > 0) { - TRef trslen = emitir(IRTI(IR_SUB), trend, trstart); - emitir(IRTGI(IR_EQ), trslen, lj_ir_kint(J, (int32_t)len)); - if (J->baseslot + len > LJ_MAX_JSLOTS) - lj_trace_err_info(J, LJ_TRERR_STACKOV); - rd->nres = len; - for (i = 0; i < len; i++) { - TRef tmp = emitir(IRTI(IR_ADD), trstart, lj_ir_kint(J, (int32_t)i)); - tmp = emitir(IRT(IR_STRREF, IRT_P32), trstr, tmp); - J->base[i] = emitir(IRT(IR_XLOAD, IRT_U8), tmp, IRXLOAD_READONLY); - } - } else { /* Empty range or range underflow: return no results. */ - emitir(IRTGI(IR_LE), trend, trstart); - rd->nres = 0; - } - } -} - -static void LJ_FASTCALL recff_string_char(jit_State *J, RecordFFData *rd) -{ - TRef k255 = lj_ir_kint(J, 255); - BCReg i; - for (i = 0; J->base[i] != 0; i++) { /* Convert char values to strings. */ - TRef tr = lj_opt_narrow_toint(J, J->base[i]); - emitir(IRTGI(IR_ULE), tr, k255); - J->base[i] = emitir(IRT(IR_TOSTR, IRT_STR), tr, IRTOSTR_CHAR); - } - if (i > 1) { /* Concatenate the strings, if there's more than one. */ - TRef hdr = recff_bufhdr(J), tr = hdr; - for (i = 0; J->base[i] != 0; i++) - tr = emitir(IRT(IR_BUFPUT, IRT_P32), tr, J->base[i]); - J->base[0] = emitir(IRT(IR_BUFSTR, IRT_STR), tr, hdr); - } - UNUSED(rd); -} - -static void LJ_FASTCALL recff_string_rep(jit_State *J, RecordFFData *rd) -{ - TRef str = lj_ir_tostr(J, J->base[0]); - TRef rep = lj_opt_narrow_toint(J, J->base[1]); - TRef hdr, tr, str2 = 0; - if (!tref_isnil(J->base[2])) { - TRef sep = lj_ir_tostr(J, J->base[2]); - int32_t vrep = argv2int(J, &rd->argv[1]); - emitir(IRTGI(vrep > 1 ? IR_GT : IR_LE), rep, lj_ir_kint(J, 1)); - if (vrep > 1) { - TRef hdr2 = recff_bufhdr(J); - TRef tr2 = emitir(IRT(IR_BUFPUT, IRT_P32), hdr2, sep); - tr2 = emitir(IRT(IR_BUFPUT, IRT_P32), tr2, str); - str2 = emitir(IRT(IR_BUFSTR, IRT_STR), tr2, hdr2); - } - } - tr = hdr = recff_bufhdr(J); - if (str2) { - tr = emitir(IRT(IR_BUFPUT, IRT_P32), tr, str); - str = str2; - rep = emitir(IRTI(IR_ADD), rep, lj_ir_kint(J, -1)); - } - tr = lj_ir_call(J, IRCALL_lj_buf_putstr_rep, tr, str, rep); - J->base[0] = emitir(IRT(IR_BUFSTR, IRT_STR), tr, hdr); -} - -static void LJ_FASTCALL recff_string_op(jit_State *J, RecordFFData *rd) -{ - TRef str = lj_ir_tostr(J, J->base[0]); - TRef hdr = recff_bufhdr(J); - TRef tr = lj_ir_call(J, rd->data, hdr, str); - J->base[0] = emitir(IRT(IR_BUFSTR, IRT_STR), tr, hdr); -} - -static void LJ_FASTCALL recff_string_find(jit_State *J, RecordFFData *rd) -{ - TRef trstr = lj_ir_tostr(J, J->base[0]); - TRef trpat = lj_ir_tostr(J, J->base[1]); - TRef trlen = emitir(IRTI(IR_FLOAD), trstr, IRFL_STR_LEN); - TRef tr0 = lj_ir_kint(J, 0); - TRef trstart; - GCstr *str = argv2str(J, &rd->argv[0]); - GCstr *pat = argv2str(J, &rd->argv[1]); - int32_t start; - J->needsnap = 1; - if (tref_isnil(J->base[2])) { - trstart = lj_ir_kint(J, 1); - start = 1; - } else { - trstart = lj_opt_narrow_toint(J, J->base[2]); - start = argv2int(J, &rd->argv[2]); - } - trstart = recff_string_start(J, str, &start, trstart, trlen, tr0); - if ((MSize)start <= str->len) { - emitir(IRTGI(IR_ULE), trstart, trlen); - } else { - emitir(IRTGI(IR_UGT), trstart, trlen); -#if LJ_52 - J->base[0] = TREF_NIL; - return; -#else - trstart = trlen; - start = str->len; -#endif - } - /* Fixed arg or no pattern matching chars? (Specialized to pattern string.) */ - if ((J->base[2] && tref_istruecond(J->base[3])) || - (emitir(IRTG(IR_EQ, IRT_STR), trpat, lj_ir_kstr(J, pat)), - !lj_str_haspattern(pat))) { /* Search for fixed string. */ - TRef trsptr = emitir(IRT(IR_STRREF, IRT_P32), trstr, trstart); - TRef trpptr = emitir(IRT(IR_STRREF, IRT_P32), trpat, tr0); - TRef trslen = emitir(IRTI(IR_SUB), trlen, trstart); - TRef trplen = emitir(IRTI(IR_FLOAD), trpat, IRFL_STR_LEN); - TRef tr = lj_ir_call(J, IRCALL_lj_str_find, trsptr, trpptr, trslen, trplen); - TRef trp0 = lj_ir_kkptr(J, NULL); - if (lj_str_find(strdata(str)+(MSize)start, strdata(pat), - str->len-(MSize)start, pat->len)) { - TRef pos; - emitir(IRTG(IR_NE, IRT_P32), tr, trp0); - pos = emitir(IRTI(IR_SUB), tr, emitir(IRT(IR_STRREF, IRT_P32), trstr, tr0)); - J->base[0] = emitir(IRTI(IR_ADD), pos, lj_ir_kint(J, 1)); - J->base[1] = emitir(IRTI(IR_ADD), pos, trplen); - rd->nres = 2; - } else { - emitir(IRTG(IR_EQ, IRT_P32), tr, trp0); - J->base[0] = TREF_NIL; - } - } else { /* Search for pattern. */ - recff_nyiu(J, rd); - return; - } -} - -static void LJ_FASTCALL recff_string_format(jit_State *J, RecordFFData *rd) -{ - TRef trfmt = lj_ir_tostr(J, J->base[0]); - GCstr *fmt = argv2str(J, &rd->argv[0]); - int arg = 1; - TRef hdr, tr; - FormatState fs; - SFormat sf; - /* Specialize to the format string. */ - emitir(IRTG(IR_EQ, IRT_STR), trfmt, lj_ir_kstr(J, fmt)); - tr = hdr = recff_bufhdr(J); - lj_strfmt_init(&fs, strdata(fmt), fmt->len); - while ((sf = lj_strfmt_parse(&fs)) != STRFMT_EOF) { /* Parse format. */ - TRef tra = sf == STRFMT_LIT ? 0 : J->base[arg++]; - TRef trsf = lj_ir_kint(J, (int32_t)sf); - IRCallID id; - switch (STRFMT_TYPE(sf)) { - case STRFMT_LIT: - tr = emitir(IRT(IR_BUFPUT, IRT_P32), tr, - lj_ir_kstr(J, lj_str_new(J->L, fs.str, fs.len))); - break; - case STRFMT_INT: - id = IRCALL_lj_strfmt_putfnum_int; - handle_int: - if (!tref_isinteger(tra)) - goto handle_num; - if (sf == STRFMT_INT) { /* Shortcut for plain %d. */ - tr = emitir(IRT(IR_BUFPUT, IRT_P32), tr, - emitir(IRT(IR_TOSTR, IRT_STR), tra, IRTOSTR_INT)); - } else { -#if LJ_HASFFI - tra = emitir(IRT(IR_CONV, IRT_U64), tra, - (IRT_INT|(IRT_U64<<5)|IRCONV_SEXT)); - tr = lj_ir_call(J, IRCALL_lj_strfmt_putfxint, tr, trsf, tra); - lj_needsplit(J); -#else - recff_nyiu(J, rd); /* Don't bother working around this NYI. */ - return; -#endif - } - break; - case STRFMT_UINT: - id = IRCALL_lj_strfmt_putfnum_uint; - goto handle_int; - case STRFMT_NUM: - id = IRCALL_lj_strfmt_putfnum; - handle_num: - tra = lj_ir_tonum(J, tra); - tr = lj_ir_call(J, id, tr, trsf, tra); - if (LJ_SOFTFP) lj_needsplit(J); - break; - case STRFMT_STR: - if (!tref_isstr(tra)) { - recff_nyiu(J, rd); /* NYI: __tostring and non-string types for %s. */ - return; - } - if (sf == STRFMT_STR) /* Shortcut for plain %s. */ - tr = emitir(IRT(IR_BUFPUT, IRT_P32), tr, tra); - else if ((sf & STRFMT_T_QUOTED)) - tr = lj_ir_call(J, IRCALL_lj_strfmt_putquoted, tr, tra); - else - tr = lj_ir_call(J, IRCALL_lj_strfmt_putfstr, tr, trsf, tra); - break; - case STRFMT_CHAR: - tra = lj_opt_narrow_toint(J, tra); - if (sf == STRFMT_CHAR) /* Shortcut for plain %c. */ - tr = emitir(IRT(IR_BUFPUT, IRT_P32), tr, - emitir(IRT(IR_TOSTR, IRT_STR), tra, IRTOSTR_CHAR)); - else - tr = lj_ir_call(J, IRCALL_lj_strfmt_putfchar, tr, trsf, tra); - break; - case STRFMT_PTR: /* NYI */ - case STRFMT_ERR: - default: - recff_nyiu(J, rd); - return; - } - } - J->base[0] = emitir(IRT(IR_BUFSTR, IRT_STR), tr, hdr); -} - -/* -- Table library fast functions ---------------------------------------- */ - -static void LJ_FASTCALL recff_table_insert(jit_State *J, RecordFFData *rd) -{ - RecordIndex ix; - ix.tab = J->base[0]; - ix.val = J->base[1]; - rd->nres = 0; - if (tref_istab(ix.tab) && ix.val) { - if (!J->base[2]) { /* Simple push: t[#t+1] = v */ - TRef trlen = lj_ir_call(J, IRCALL_lj_tab_len, ix.tab); - GCtab *t = tabV(&rd->argv[0]); - ix.key = emitir(IRTI(IR_ADD), trlen, lj_ir_kint(J, 1)); - settabV(J->L, &ix.tabv, t); - setintV(&ix.keyv, lj_tab_len(t) + 1); - ix.idxchain = 0; - lj_record_idx(J, &ix); /* Set new value. */ - } else { /* Complex case: insert in the middle. */ - recff_nyiu(J, rd); - return; - } - } /* else: Interpreter will throw. */ -} - -static void LJ_FASTCALL recff_table_concat(jit_State *J, RecordFFData *rd) -{ - TRef tab = J->base[0]; - if (tref_istab(tab)) { - TRef sep = !tref_isnil(J->base[1]) ? - lj_ir_tostr(J, J->base[1]) : lj_ir_knull(J, IRT_STR); - TRef tri = (J->base[1] && !tref_isnil(J->base[2])) ? - lj_opt_narrow_toint(J, J->base[2]) : lj_ir_kint(J, 1); - TRef tre = (J->base[1] && J->base[2] && !tref_isnil(J->base[3])) ? - lj_opt_narrow_toint(J, J->base[3]) : - lj_ir_call(J, IRCALL_lj_tab_len, tab); - TRef hdr = recff_bufhdr(J); - TRef tr = lj_ir_call(J, IRCALL_lj_buf_puttab, hdr, tab, sep, tri, tre); - emitir(IRTG(IR_NE, IRT_PTR), tr, lj_ir_kptr(J, NULL)); - J->base[0] = emitir(IRT(IR_BUFSTR, IRT_STR), tr, hdr); - } /* else: Interpreter will throw. */ - UNUSED(rd); -} - -static void LJ_FASTCALL recff_table_new(jit_State *J, RecordFFData *rd) -{ - TRef tra = lj_opt_narrow_toint(J, J->base[0]); - TRef trh = lj_opt_narrow_toint(J, J->base[1]); - J->base[0] = lj_ir_call(J, IRCALL_lj_tab_new_ah, tra, trh); - UNUSED(rd); -} - -static void LJ_FASTCALL recff_table_clear(jit_State *J, RecordFFData *rd) -{ - TRef tr = J->base[0]; - if (tref_istab(tr)) { - rd->nres = 0; - lj_ir_call(J, IRCALL_lj_tab_clear, tr); - J->needsnap = 1; - } /* else: Interpreter will throw. */ -} - -/* -- I/O library fast functions ------------------------------------------ */ - -/* Get FILE* for I/O function. Any I/O error aborts recording, so there's -** no need to encode the alternate cases for any of the guards. -*/ -static TRef recff_io_fp(jit_State *J, TRef *udp, int32_t id) -{ - TRef tr, ud, fp; - if (id) { /* io.func() */ - tr = lj_ir_kptr(J, &J2G(J)->gcroot[id]); - ud = emitir(IRT(IR_XLOAD, IRT_UDATA), tr, 0); - } else { /* fp:method() */ - ud = J->base[0]; - if (!tref_isudata(ud)) - lj_trace_err(J, LJ_TRERR_BADTYPE); - tr = emitir(IRT(IR_FLOAD, IRT_U8), ud, IRFL_UDATA_UDTYPE); - emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, UDTYPE_IO_FILE)); - } - *udp = ud; - fp = emitir(IRT(IR_FLOAD, IRT_PTR), ud, IRFL_UDATA_FILE); - emitir(IRTG(IR_NE, IRT_PTR), fp, lj_ir_knull(J, IRT_PTR)); - return fp; -} - -static void LJ_FASTCALL recff_io_write(jit_State *J, RecordFFData *rd) -{ - TRef ud, fp = recff_io_fp(J, &ud, rd->data); - TRef zero = lj_ir_kint(J, 0); - TRef one = lj_ir_kint(J, 1); - ptrdiff_t i = rd->data == 0 ? 1 : 0; - for (; J->base[i]; i++) { - TRef str = lj_ir_tostr(J, J->base[i]); - TRef buf = emitir(IRT(IR_STRREF, IRT_P32), str, zero); - TRef len = emitir(IRTI(IR_FLOAD), str, IRFL_STR_LEN); - if (tref_isk(len) && IR(tref_ref(len))->i == 1) { - IRIns *irs = IR(tref_ref(str)); - TRef tr = (irs->o == IR_TOSTR && irs->op2 == IRTOSTR_CHAR) ? - irs->op1 : - emitir(IRT(IR_XLOAD, IRT_U8), buf, IRXLOAD_READONLY); - tr = lj_ir_call(J, IRCALL_fputc, tr, fp); - if (results_wanted(J) != 0) /* Check result only if not ignored. */ - emitir(IRTGI(IR_NE), tr, lj_ir_kint(J, -1)); - } else { - TRef tr = lj_ir_call(J, IRCALL_fwrite, buf, one, len, fp); - if (results_wanted(J) != 0) /* Check result only if not ignored. */ - emitir(IRTGI(IR_EQ), tr, len); - } - } - J->base[0] = LJ_52 ? ud : TREF_TRUE; -} - -static void LJ_FASTCALL recff_io_flush(jit_State *J, RecordFFData *rd) -{ - TRef ud, fp = recff_io_fp(J, &ud, rd->data); - TRef tr = lj_ir_call(J, IRCALL_fflush, fp); - if (results_wanted(J) != 0) /* Check result only if not ignored. */ - emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, 0)); - J->base[0] = TREF_TRUE; -} - -/* -- Debug library fast functions ---------------------------------------- */ - -static void LJ_FASTCALL recff_debug_getmetatable(jit_State *J, RecordFFData *rd) -{ - GCtab *mt; - TRef mtref; - TRef tr = J->base[0]; - if (tref_istab(tr)) { - mt = tabref(tabV(&rd->argv[0])->metatable); - mtref = emitir(IRT(IR_FLOAD, IRT_TAB), tr, IRFL_TAB_META); - } else if (tref_isudata(tr)) { - mt = tabref(udataV(&rd->argv[0])->metatable); - mtref = emitir(IRT(IR_FLOAD, IRT_TAB), tr, IRFL_UDATA_META); - } else { - mt = tabref(basemt_obj(J2G(J), &rd->argv[0])); - J->base[0] = mt ? lj_ir_ktab(J, mt) : TREF_NIL; - return; - } - emitir(IRTG(mt ? IR_NE : IR_EQ, IRT_TAB), mtref, lj_ir_knull(J, IRT_TAB)); - J->base[0] = mt ? mtref : TREF_NIL; -} - -/* -- Record calls to fast functions -------------------------------------- */ - -#include "lj_recdef.h" - -static uint32_t recdef_lookup(GCfunc *fn) -{ - if (fn->c.ffid < sizeof(recff_idmap)/sizeof(recff_idmap[0])) - return recff_idmap[fn->c.ffid]; - else - return 0; -} - -/* Record entry to a fast function or C function. */ -void lj_ffrecord_func(jit_State *J) -{ - RecordFFData rd; - uint32_t m = recdef_lookup(J->fn); - rd.data = m & 0xff; - rd.nres = 1; /* Default is one result. */ - rd.argv = J->L->base; - J->base[J->maxslot] = 0; /* Mark end of arguments. */ - (recff_func[m >> 8])(J, &rd); /* Call recff_* handler. */ - if (rd.nres >= 0) { - if (J->postproc == LJ_POST_NONE) J->postproc = LJ_POST_FFRETRY; - lj_record_ret(J, 0, rd.nres); - } -} - -#undef IR -#undef emitir - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ffrecord.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ffrecord.h deleted file mode 100644 index f858ca29cfc..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ffrecord.h +++ /dev/null @@ -1,24 +0,0 @@ -/* -** Fast function call recorder. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_FFRECORD_H -#define _LJ_FFRECORD_H - -#include "lj_obj.h" -#include "lj_jit.h" - -#if LJ_HASJIT -/* Data used by handlers to record a fast function. */ -typedef struct RecordFFData { - TValue *argv; /* Runtime argument values. */ - ptrdiff_t nres; /* Number of returned results (defaults to 1). */ - uint32_t data; /* Per-ffid auxiliary data (opcode, literal etc.). */ -} RecordFFData; - -LJ_FUNC int32_t lj_ffrecord_select_mode(jit_State *J, TRef tr, TValue *tv); -LJ_FUNC void lj_ffrecord_func(jit_State *J); -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_frame.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_frame.h deleted file mode 100644 index a86c36be7e5..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_frame.h +++ /dev/null @@ -1,259 +0,0 @@ -/* -** Stack frames. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_FRAME_H -#define _LJ_FRAME_H - -#include "lj_obj.h" -#include "lj_bc.h" - -/* -- Lua stack frame ----------------------------------------------------- */ - -/* Frame type markers in LSB of PC (4-byte aligned) or delta (8-byte aligned: -** -** PC 00 Lua frame -** delta 001 C frame -** delta 010 Continuation frame -** delta 011 Lua vararg frame -** delta 101 cpcall() frame -** delta 110 ff pcall() frame -** delta 111 ff pcall() frame with active hook -*/ -enum { - FRAME_LUA, FRAME_C, FRAME_CONT, FRAME_VARG, - FRAME_LUAP, FRAME_CP, FRAME_PCALL, FRAME_PCALLH -}; -#define FRAME_TYPE 3 -#define FRAME_P 4 -#define FRAME_TYPEP (FRAME_TYPE|FRAME_P) - -/* Macros to access and modify Lua frames. */ -#if LJ_FR2 -/* Two-slot frame info, required for 64 bit PC/GCRef: -** -** base-2 base-1 | base base+1 ... -** [func PC/delta/ft] | [slots ...] -** ^-- frame | ^-- base ^-- top -** -** Continuation frames: -** -** base-4 base-3 base-2 base-1 | base base+1 ... -** [cont PC ] [func PC/delta/ft] | [slots ...] -** ^-- frame | ^-- base ^-- top -*/ -#define frame_gc(f) (gcval((f)-1)) -#define frame_ftsz(f) ((ptrdiff_t)(f)->ftsz) -#define frame_pc(f) ((const BCIns *)frame_ftsz(f)) -#define setframe_gc(f, p, tp) (setgcVraw((f)-1, (p), (tp))) -#define setframe_ftsz(f, sz) ((f)->ftsz = (sz)) -#define setframe_pc(f, pc) ((f)->ftsz = (int64_t)(intptr_t)(pc)) -#else -/* One-slot frame info, sufficient for 32 bit PC/GCRef: -** -** base-1 | base base+1 ... -** lo hi | -** [func | PC/delta/ft] | [slots ...] -** ^-- frame | ^-- base ^-- top -** -** Continuation frames: -** -** base-2 base-1 | base base+1 ... -** lo hi lo hi | -** [cont | PC] [func | PC/delta/ft] | [slots ...] -** ^-- frame | ^-- base ^-- top -*/ -#define frame_gc(f) (gcref((f)->fr.func)) -#define frame_ftsz(f) ((ptrdiff_t)(f)->fr.tp.ftsz) -#define frame_pc(f) (mref((f)->fr.tp.pcr, const BCIns)) -#define setframe_gc(f, p, tp) (setgcref((f)->fr.func, (p)), UNUSED(tp)) -#define setframe_ftsz(f, sz) ((f)->fr.tp.ftsz = (int32_t)(sz)) -#define setframe_pc(f, pc) (setmref((f)->fr.tp.pcr, (pc))) -#endif - -#define frame_type(f) (frame_ftsz(f) & FRAME_TYPE) -#define frame_typep(f) (frame_ftsz(f) & FRAME_TYPEP) -#define frame_islua(f) (frame_type(f) == FRAME_LUA) -#define frame_isc(f) (frame_type(f) == FRAME_C) -#define frame_iscont(f) (frame_typep(f) == FRAME_CONT) -#define frame_isvarg(f) (frame_typep(f) == FRAME_VARG) -#define frame_ispcall(f) ((frame_ftsz(f) & 6) == FRAME_PCALL) - -#define frame_func(f) (&frame_gc(f)->fn) -#define frame_delta(f) (frame_ftsz(f) >> 3) -#define frame_sized(f) (frame_ftsz(f) & ~FRAME_TYPEP) - -enum { LJ_CONT_TAILCALL, LJ_CONT_FFI_CALLBACK }; /* Special continuations. */ - -#if LJ_FR2 -#define frame_contpc(f) (frame_pc((f)-2)) -#define frame_contv(f) (((f)-3)->u64) -#else -#define frame_contpc(f) (frame_pc((f)-1)) -#define frame_contv(f) (((f)-1)->u32.lo) -#endif -#if LJ_FR2 -#define frame_contf(f) ((ASMFunction)(uintptr_t)((f)-3)->u64) -#elif LJ_64 -#define frame_contf(f) \ - ((ASMFunction)(void *)((intptr_t)lj_vm_asm_begin + \ - (intptr_t)(int32_t)((f)-1)->u32.lo)) -#else -#define frame_contf(f) ((ASMFunction)gcrefp(((f)-1)->gcr, void)) -#endif -#define frame_iscont_fficb(f) \ - (LJ_HASFFI && frame_contv(f) == LJ_CONT_FFI_CALLBACK) - -#define frame_prevl(f) ((f) - (1+LJ_FR2+bc_a(frame_pc(f)[-1]))) -#define frame_prevd(f) ((TValue *)((char *)(f) - frame_sized(f))) -#define frame_prev(f) (frame_islua(f)?frame_prevl(f):frame_prevd(f)) -/* Note: this macro does not skip over FRAME_VARG. */ - -/* -- C stack frame ------------------------------------------------------- */ - -/* Macros to access and modify the C stack frame chain. */ - -/* These definitions must match with the arch-specific *.dasc files. */ -#if LJ_TARGET_X86 -#define CFRAME_OFS_ERRF (15*4) -#define CFRAME_OFS_NRES (14*4) -#define CFRAME_OFS_PREV (13*4) -#define CFRAME_OFS_L (12*4) -#define CFRAME_OFS_PC (6*4) -#define CFRAME_OFS_MULTRES (5*4) -#define CFRAME_SIZE (12*4) -#define CFRAME_SHIFT_MULTRES 0 -#elif LJ_TARGET_X64 -#if LJ_ABI_WIN -#define CFRAME_OFS_PREV (13*8) -#if LJ_GC64 -#define CFRAME_OFS_PC (12*8) -#define CFRAME_OFS_L (11*8) -#define CFRAME_OFS_ERRF (21*4) -#define CFRAME_OFS_NRES (20*4) -#define CFRAME_OFS_MULTRES (8*4) -#else -#define CFRAME_OFS_PC (25*4) -#define CFRAME_OFS_L (24*4) -#define CFRAME_OFS_ERRF (23*4) -#define CFRAME_OFS_NRES (22*4) -#define CFRAME_OFS_MULTRES (21*4) -#endif -#define CFRAME_SIZE (10*8) -#define CFRAME_SIZE_JIT (CFRAME_SIZE + 9*16 + 4*8) -#define CFRAME_SHIFT_MULTRES 0 -#else -#define CFRAME_OFS_PREV (4*8) -#if LJ_GC64 -#define CFRAME_OFS_PC (3*8) -#define CFRAME_OFS_L (2*8) -#define CFRAME_OFS_ERRF (3*4) -#define CFRAME_OFS_NRES (2*4) -#define CFRAME_OFS_MULTRES (0*4) -#else -#define CFRAME_OFS_PC (7*4) -#define CFRAME_OFS_L (6*4) -#define CFRAME_OFS_ERRF (5*4) -#define CFRAME_OFS_NRES (4*4) -#define CFRAME_OFS_MULTRES (1*4) -#endif -#if LJ_NO_UNWIND -#define CFRAME_SIZE (12*8) -#else -#define CFRAME_SIZE (10*8) -#endif -#define CFRAME_SIZE_JIT (CFRAME_SIZE + 16) -#define CFRAME_SHIFT_MULTRES 0 -#endif -#elif LJ_TARGET_ARM -#define CFRAME_OFS_ERRF 24 -#define CFRAME_OFS_NRES 20 -#define CFRAME_OFS_PREV 16 -#define CFRAME_OFS_L 12 -#define CFRAME_OFS_PC 8 -#define CFRAME_OFS_MULTRES 4 -#if LJ_ARCH_HASFPU -#define CFRAME_SIZE 128 -#else -#define CFRAME_SIZE 64 -#endif -#define CFRAME_SHIFT_MULTRES 3 -#elif LJ_TARGET_ARM64 -#define CFRAME_OFS_ERRF 196 -#define CFRAME_OFS_NRES 200 -#define CFRAME_OFS_PREV 160 -#define CFRAME_OFS_L 176 -#define CFRAME_OFS_PC 168 -#define CFRAME_OFS_MULTRES 192 -#define CFRAME_SIZE 208 -#define CFRAME_SHIFT_MULTRES 3 -#elif LJ_TARGET_PPC -#if LJ_TARGET_XBOX360 -#define CFRAME_OFS_ERRF 424 -#define CFRAME_OFS_NRES 420 -#define CFRAME_OFS_PREV 400 -#define CFRAME_OFS_L 416 -#define CFRAME_OFS_PC 412 -#define CFRAME_OFS_MULTRES 408 -#define CFRAME_SIZE 384 -#define CFRAME_SHIFT_MULTRES 3 -#elif LJ_ARCH_PPC32ON64 -#define CFRAME_OFS_ERRF 472 -#define CFRAME_OFS_NRES 468 -#define CFRAME_OFS_PREV 448 -#define CFRAME_OFS_L 464 -#define CFRAME_OFS_PC 460 -#define CFRAME_OFS_MULTRES 456 -#define CFRAME_SIZE 400 -#define CFRAME_SHIFT_MULTRES 3 -#else -#define CFRAME_OFS_ERRF 48 -#define CFRAME_OFS_NRES 44 -#define CFRAME_OFS_PREV 40 -#define CFRAME_OFS_L 36 -#define CFRAME_OFS_PC 32 -#define CFRAME_OFS_MULTRES 28 -#define CFRAME_SIZE 272 -#define CFRAME_SHIFT_MULTRES 3 -#endif -#elif LJ_TARGET_MIPS -#define CFRAME_OFS_ERRF 124 -#define CFRAME_OFS_NRES 120 -#define CFRAME_OFS_PREV 116 -#define CFRAME_OFS_L 112 -#define CFRAME_OFS_PC 20 -#define CFRAME_OFS_MULTRES 16 -#define CFRAME_SIZE 112 -#define CFRAME_SHIFT_MULTRES 3 -#else -#error "Missing CFRAME_* definitions for this architecture" -#endif - -#ifndef CFRAME_SIZE_JIT -#define CFRAME_SIZE_JIT CFRAME_SIZE -#endif - -#define CFRAME_RESUME 1 -#define CFRAME_UNWIND_FF 2 /* Only used in unwinder. */ -#define CFRAME_RAWMASK (~(intptr_t)(CFRAME_RESUME|CFRAME_UNWIND_FF)) - -#define cframe_errfunc(cf) (*(int32_t *)(((char *)(cf))+CFRAME_OFS_ERRF)) -#define cframe_nres(cf) (*(int32_t *)(((char *)(cf))+CFRAME_OFS_NRES)) -#define cframe_prev(cf) (*(void **)(((char *)(cf))+CFRAME_OFS_PREV)) -#define cframe_multres(cf) (*(uint32_t *)(((char *)(cf))+CFRAME_OFS_MULTRES)) -#define cframe_multres_n(cf) (cframe_multres((cf)) >> CFRAME_SHIFT_MULTRES) -#define cframe_L(cf) \ - (&gcref(*(GCRef *)(((char *)(cf))+CFRAME_OFS_L))->th) -#define cframe_pc(cf) \ - (mref(*(MRef *)(((char *)(cf))+CFRAME_OFS_PC), const BCIns)) -#define setcframe_L(cf, L) \ - (setmref(*(MRef *)(((char *)(cf))+CFRAME_OFS_L), (L))) -#define setcframe_pc(cf, pc) \ - (setmref(*(MRef *)(((char *)(cf))+CFRAME_OFS_PC), (pc))) -#define cframe_canyield(cf) ((intptr_t)(cf) & CFRAME_RESUME) -#define cframe_unwind_ff(cf) ((intptr_t)(cf) & CFRAME_UNWIND_FF) -#define cframe_raw(cf) ((void *)((intptr_t)(cf) & CFRAME_RAWMASK)) -#define cframe_Lpc(L) cframe_pc(cframe_raw(L->cframe)) - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_func.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_func.c deleted file mode 100644 index eb8a9dbbfd8..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_func.c +++ /dev/null @@ -1,185 +0,0 @@ -/* -** Function handling (prototypes, functions and upvalues). -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -** -** Portions taken verbatim or adapted from the Lua interpreter. -** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h -*/ - -#define lj_func_c -#define LUA_CORE - -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_func.h" -#include "lj_trace.h" -#include "lj_vm.h" - -/* -- Prototypes ---------------------------------------------------------- */ - -void LJ_FASTCALL lj_func_freeproto(global_State *g, GCproto *pt) -{ - lj_mem_free(g, pt, pt->sizept); -} - -/* -- Upvalues ------------------------------------------------------------ */ - -static void unlinkuv(GCupval *uv) -{ - lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv); - setgcrefr(uvnext(uv)->prev, uv->prev); - setgcrefr(uvprev(uv)->next, uv->next); -} - -/* Find existing open upvalue for a stack slot or create a new one. */ -static GCupval *func_finduv(lua_State *L, TValue *slot) -{ - global_State *g = G(L); - GCRef *pp = &L->openupval; - GCupval *p; - GCupval *uv; - /* Search the sorted list of open upvalues. */ - while (gcref(*pp) != NULL && uvval((p = gco2uv(gcref(*pp)))) >= slot) { - lua_assert(!p->closed && uvval(p) != &p->tv); - if (uvval(p) == slot) { /* Found open upvalue pointing to same slot? */ - if (isdead(g, obj2gco(p))) /* Resurrect it, if it's dead. */ - flipwhite(obj2gco(p)); - return p; - } - pp = &p->nextgc; - } - /* No matching upvalue found. Create a new one. */ - uv = lj_mem_newt(L, sizeof(GCupval), GCupval); - newwhite(g, uv); - uv->gct = ~LJ_TUPVAL; - uv->closed = 0; /* Still open. */ - setmref(uv->v, slot); /* Pointing to the stack slot. */ - /* NOBARRIER: The GCupval is new (marked white) and open. */ - setgcrefr(uv->nextgc, *pp); /* Insert into sorted list of open upvalues. */ - setgcref(*pp, obj2gco(uv)); - setgcref(uv->prev, obj2gco(&g->uvhead)); /* Insert into GC list, too. */ - setgcrefr(uv->next, g->uvhead.next); - setgcref(uvnext(uv)->prev, obj2gco(uv)); - setgcref(g->uvhead.next, obj2gco(uv)); - lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv); - return uv; -} - -/* Create an empty and closed upvalue. */ -static GCupval *func_emptyuv(lua_State *L) -{ - GCupval *uv = (GCupval *)lj_mem_newgco(L, sizeof(GCupval)); - uv->gct = ~LJ_TUPVAL; - uv->closed = 1; - setnilV(&uv->tv); - setmref(uv->v, &uv->tv); - return uv; -} - -/* Close all open upvalues pointing to some stack level or above. */ -void LJ_FASTCALL lj_func_closeuv(lua_State *L, TValue *level) -{ - GCupval *uv; - global_State *g = G(L); - while (gcref(L->openupval) != NULL && - uvval((uv = gco2uv(gcref(L->openupval)))) >= level) { - GCobj *o = obj2gco(uv); - lua_assert(!isblack(o) && !uv->closed && uvval(uv) != &uv->tv); - setgcrefr(L->openupval, uv->nextgc); /* No longer in open list. */ - if (isdead(g, o)) { - lj_func_freeuv(g, uv); - } else { - unlinkuv(uv); - lj_gc_closeuv(g, uv); - } - } -} - -void LJ_FASTCALL lj_func_freeuv(global_State *g, GCupval *uv) -{ - if (!uv->closed) - unlinkuv(uv); - lj_mem_freet(g, uv); -} - -/* -- Functions (closures) ------------------------------------------------ */ - -GCfunc *lj_func_newC(lua_State *L, MSize nelems, GCtab *env) -{ - GCfunc *fn = (GCfunc *)lj_mem_newgco(L, sizeCfunc(nelems)); - fn->c.gct = ~LJ_TFUNC; - fn->c.ffid = FF_C; - fn->c.nupvalues = (uint8_t)nelems; - /* NOBARRIER: The GCfunc is new (marked white). */ - setmref(fn->c.pc, &G(L)->bc_cfunc_ext); - setgcref(fn->c.env, obj2gco(env)); - return fn; -} - -static GCfunc *func_newL(lua_State *L, GCproto *pt, GCtab *env) -{ - uint32_t count; - GCfunc *fn = (GCfunc *)lj_mem_newgco(L, sizeLfunc((MSize)pt->sizeuv)); - fn->l.gct = ~LJ_TFUNC; - fn->l.ffid = FF_LUA; - fn->l.nupvalues = 0; /* Set to zero until upvalues are initialized. */ - /* NOBARRIER: Really a setgcref. But the GCfunc is new (marked white). */ - setmref(fn->l.pc, proto_bc(pt)); - setgcref(fn->l.env, obj2gco(env)); - /* Saturating 3 bit counter (0..7) for created closures. */ - count = (uint32_t)pt->flags + PROTO_CLCOUNT; - pt->flags = (uint8_t)(count - ((count >> PROTO_CLC_BITS) & PROTO_CLCOUNT)); - return fn; -} - -/* Create a new Lua function with empty upvalues. */ -GCfunc *lj_func_newL_empty(lua_State *L, GCproto *pt, GCtab *env) -{ - GCfunc *fn = func_newL(L, pt, env); - MSize i, nuv = pt->sizeuv; - /* NOBARRIER: The GCfunc is new (marked white). */ - for (i = 0; i < nuv; i++) { - GCupval *uv = func_emptyuv(L); - uv->dhash = (uint32_t)(uintptr_t)pt ^ ((uint32_t)proto_uv(pt)[i] << 24); - setgcref(fn->l.uvptr[i], obj2gco(uv)); - } - fn->l.nupvalues = (uint8_t)nuv; - return fn; -} - -/* Do a GC check and create a new Lua function with inherited upvalues. */ -GCfunc *lj_func_newL_gc(lua_State *L, GCproto *pt, GCfuncL *parent) -{ - GCfunc *fn; - GCRef *puv; - MSize i, nuv; - TValue *base; - lj_gc_check_fixtop(L); - fn = func_newL(L, pt, tabref(parent->env)); - /* NOBARRIER: The GCfunc is new (marked white). */ - puv = parent->uvptr; - nuv = pt->sizeuv; - base = L->base; - for (i = 0; i < nuv; i++) { - uint32_t v = proto_uv(pt)[i]; - GCupval *uv; - if ((v & PROTO_UV_LOCAL)) { - uv = func_finduv(L, base + (v & 0xff)); - uv->immutable = ((v / PROTO_UV_IMMUTABLE) & 1); - uv->dhash = (uint32_t)(uintptr_t)mref(parent->pc, char) ^ (v << 24); - } else { - uv = &gcref(puv[v])->uv; - } - setgcref(fn->l.uvptr[i], obj2gco(uv)); - } - fn->l.nupvalues = (uint8_t)nuv; - return fn; -} - -void LJ_FASTCALL lj_func_free(global_State *g, GCfunc *fn) -{ - MSize size = isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) : - sizeCfunc((MSize)fn->c.nupvalues); - lj_mem_free(g, fn, size); -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_func.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_func.h deleted file mode 100644 index a6e534e7739..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_func.h +++ /dev/null @@ -1,24 +0,0 @@ -/* -** Function handling (prototypes, functions and upvalues). -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_FUNC_H -#define _LJ_FUNC_H - -#include "lj_obj.h" - -/* Prototypes. */ -LJ_FUNC void LJ_FASTCALL lj_func_freeproto(global_State *g, GCproto *pt); - -/* Upvalues. */ -LJ_FUNCA void LJ_FASTCALL lj_func_closeuv(lua_State *L, TValue *level); -LJ_FUNC void LJ_FASTCALL lj_func_freeuv(global_State *g, GCupval *uv); - -/* Functions (closures). */ -LJ_FUNC GCfunc *lj_func_newC(lua_State *L, MSize nelems, GCtab *env); -LJ_FUNC GCfunc *lj_func_newL_empty(lua_State *L, GCproto *pt, GCtab *env); -LJ_FUNCA GCfunc *lj_func_newL_gc(lua_State *L, GCproto *pt, GCfuncL *parent); -LJ_FUNC void LJ_FASTCALL lj_func_free(global_State *g, GCfunc *c); - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_gc.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_gc.c deleted file mode 100644 index 99d664aa2a1..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_gc.c +++ /dev/null @@ -1,845 +0,0 @@ -/* -** Garbage collector. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -** -** Major portions taken verbatim or adapted from the Lua interpreter. -** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h -*/ - -#define lj_gc_c -#define LUA_CORE - -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_buf.h" -#include "lj_str.h" -#include "lj_tab.h" -#include "lj_func.h" -#include "lj_udata.h" -#include "lj_meta.h" -#include "lj_state.h" -#include "lj_frame.h" -#if LJ_HASFFI -#include "lj_ctype.h" -#include "lj_cdata.h" -#endif -#include "lj_trace.h" -#include "lj_vm.h" - -#define GCSTEPSIZE 1024u -#define GCSWEEPMAX 40 -#define GCSWEEPCOST 10 -#define GCFINALIZECOST 100 - -/* Macros to set GCobj colors and flags. */ -#define white2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES) -#define gray2black(x) ((x)->gch.marked |= LJ_GC_BLACK) -#define isfinalized(u) ((u)->marked & LJ_GC_FINALIZED) - -/* -- Mark phase ---------------------------------------------------------- */ - -/* Mark a TValue (if needed). */ -#define gc_marktv(g, tv) \ - { lua_assert(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct)); \ - if (tviswhite(tv)) gc_mark(g, gcV(tv)); } - -/* Mark a GCobj (if needed). */ -#define gc_markobj(g, o) \ - { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); } - -/* Mark a string object. */ -#define gc_mark_str(s) ((s)->marked &= (uint8_t)~LJ_GC_WHITES) - -/* Mark a white GCobj. */ -static void gc_mark(global_State *g, GCobj *o) -{ - int gct = o->gch.gct; - lua_assert(iswhite(o) && !isdead(g, o)); - white2gray(o); - if (LJ_UNLIKELY(gct == ~LJ_TUDATA)) { - GCtab *mt = tabref(gco2ud(o)->metatable); - gray2black(o); /* Userdata are never gray. */ - if (mt) gc_markobj(g, mt); - gc_markobj(g, tabref(gco2ud(o)->env)); - } else if (LJ_UNLIKELY(gct == ~LJ_TUPVAL)) { - GCupval *uv = gco2uv(o); - gc_marktv(g, uvval(uv)); - if (uv->closed) - gray2black(o); /* Closed upvalues are never gray. */ - } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) { - lua_assert(gct == ~LJ_TFUNC || gct == ~LJ_TTAB || - gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO); - setgcrefr(o->gch.gclist, g->gc.gray); - setgcref(g->gc.gray, o); - } -} - -/* Mark GC roots. */ -static void gc_mark_gcroot(global_State *g) -{ - ptrdiff_t i; - for (i = 0; i < GCROOT_MAX; i++) - if (gcref(g->gcroot[i]) != NULL) - gc_markobj(g, gcref(g->gcroot[i])); -} - -/* Start a GC cycle and mark the root set. */ -static void gc_mark_start(global_State *g) -{ - setgcrefnull(g->gc.gray); - setgcrefnull(g->gc.grayagain); - setgcrefnull(g->gc.weak); - gc_markobj(g, mainthread(g)); - gc_markobj(g, tabref(mainthread(g)->env)); - gc_marktv(g, &g->registrytv); - gc_mark_gcroot(g); - g->gc.state = GCSpropagate; -} - -/* Mark open upvalues. */ -static void gc_mark_uv(global_State *g) -{ - GCupval *uv; - for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) { - lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv); - if (isgray(obj2gco(uv))) - gc_marktv(g, uvval(uv)); - } -} - -/* Mark userdata in mmudata list. */ -static void gc_mark_mmudata(global_State *g) -{ - GCobj *root = gcref(g->gc.mmudata); - GCobj *u = root; - if (u) { - do { - u = gcnext(u); - makewhite(g, u); /* Could be from previous GC. */ - gc_mark(g, u); - } while (u != root); - } -} - -/* Separate userdata objects to be finalized to mmudata list. */ -size_t lj_gc_separateudata(global_State *g, int all) -{ - size_t m = 0; - GCRef *p = &mainthread(g)->nextgc; - GCobj *o; - while ((o = gcref(*p)) != NULL) { - if (!(iswhite(o) || all) || isfinalized(gco2ud(o))) { - p = &o->gch.nextgc; /* Nothing to do. */ - } else if (!lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc)) { - markfinalized(o); /* Done, as there's no __gc metamethod. */ - p = &o->gch.nextgc; - } else { /* Otherwise move userdata to be finalized to mmudata list. */ - m += sizeudata(gco2ud(o)); - markfinalized(o); - *p = o->gch.nextgc; - if (gcref(g->gc.mmudata)) { /* Link to end of mmudata list. */ - GCobj *root = gcref(g->gc.mmudata); - setgcrefr(o->gch.nextgc, root->gch.nextgc); - setgcref(root->gch.nextgc, o); - setgcref(g->gc.mmudata, o); - } else { /* Create circular list. */ - setgcref(o->gch.nextgc, o); - setgcref(g->gc.mmudata, o); - } - } - } - return m; -} - -/* -- Propagation phase --------------------------------------------------- */ - -/* Traverse a table. */ -static int gc_traverse_tab(global_State *g, GCtab *t) -{ - int weak = 0; - cTValue *mode; - GCtab *mt = tabref(t->metatable); - if (mt) - gc_markobj(g, mt); - mode = lj_meta_fastg(g, mt, MM_mode); - if (mode && tvisstr(mode)) { /* Valid __mode field? */ - const char *modestr = strVdata(mode); - int c; - while ((c = *modestr++)) { - if (c == 'k') weak |= LJ_GC_WEAKKEY; - else if (c == 'v') weak |= LJ_GC_WEAKVAL; - else if (c == 'K') weak = (int)(~0u & ~LJ_GC_WEAKVAL); - } - if (weak > 0) { /* Weak tables are cleared in the atomic phase. */ - t->marked = (uint8_t)((t->marked & ~LJ_GC_WEAK) | weak); - setgcrefr(t->gclist, g->gc.weak); - setgcref(g->gc.weak, obj2gco(t)); - } - } - if (weak == LJ_GC_WEAK) /* Nothing to mark if both keys/values are weak. */ - return 1; - if (!(weak & LJ_GC_WEAKVAL)) { /* Mark array part. */ - MSize i, asize = t->asize; - for (i = 0; i < asize; i++) - gc_marktv(g, arrayslot(t, i)); - } - if (t->hmask > 0) { /* Mark hash part. */ - Node *node = noderef(t->node); - MSize i, hmask = t->hmask; - for (i = 0; i <= hmask; i++) { - Node *n = &node[i]; - if (!tvisnil(&n->val)) { /* Mark non-empty slot. */ - lua_assert(!tvisnil(&n->key)); - if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key); - if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val); - } - } - } - return weak; -} - -/* Traverse a function. */ -static void gc_traverse_func(global_State *g, GCfunc *fn) -{ - gc_markobj(g, tabref(fn->c.env)); - if (isluafunc(fn)) { - uint32_t i; - lua_assert(fn->l.nupvalues <= funcproto(fn)->sizeuv); - gc_markobj(g, funcproto(fn)); - for (i = 0; i < fn->l.nupvalues; i++) /* Mark Lua function upvalues. */ - gc_markobj(g, &gcref(fn->l.uvptr[i])->uv); - } else { - uint32_t i; - for (i = 0; i < fn->c.nupvalues; i++) /* Mark C function upvalues. */ - gc_marktv(g, &fn->c.upvalue[i]); - } -} - -#if LJ_HASJIT -/* Mark a trace. */ -static void gc_marktrace(global_State *g, TraceNo traceno) -{ - GCobj *o = obj2gco(traceref(G2J(g), traceno)); - lua_assert(traceno != G2J(g)->cur.traceno); - if (iswhite(o)) { - white2gray(o); - setgcrefr(o->gch.gclist, g->gc.gray); - setgcref(g->gc.gray, o); - } -} - -/* Traverse a trace. */ -static void gc_traverse_trace(global_State *g, GCtrace *T) -{ - IRRef ref; - if (T->traceno == 0) return; - for (ref = T->nk; ref < REF_TRUE; ref++) { - IRIns *ir = &T->ir[ref]; - if (ir->o == IR_KGC) - gc_markobj(g, ir_kgc(ir)); - } - if (T->link) gc_marktrace(g, T->link); - if (T->nextroot) gc_marktrace(g, T->nextroot); - if (T->nextside) gc_marktrace(g, T->nextside); - gc_markobj(g, gcref(T->startpt)); -} - -/* The current trace is a GC root while not anchored in the prototype (yet). */ -#define gc_traverse_curtrace(g) gc_traverse_trace(g, &G2J(g)->cur) -#else -#define gc_traverse_curtrace(g) UNUSED(g) -#endif - -/* Traverse a prototype. */ -static void gc_traverse_proto(global_State *g, GCproto *pt) -{ - ptrdiff_t i; - gc_mark_str(proto_chunkname(pt)); - for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++) /* Mark collectable consts. */ - gc_markobj(g, proto_kgc(pt, i)); -#if LJ_HASJIT - if (pt->trace) gc_marktrace(g, pt->trace); -#endif -} - -/* Traverse the frame structure of a stack. */ -static MSize gc_traverse_frames(global_State *g, lua_State *th) -{ - TValue *frame, *top = th->top-1, *bot = tvref(th->stack); - /* Note: extra vararg frame not skipped, marks function twice (harmless). */ - for (frame = th->base-1; frame > bot+LJ_FR2; frame = frame_prev(frame)) { - GCfunc *fn = frame_func(frame); - TValue *ftop = frame; - if (isluafunc(fn)) ftop += funcproto(fn)->framesize; - if (ftop > top) top = ftop; - if (!LJ_FR2) gc_markobj(g, fn); /* Need to mark hidden function (or L). */ - } - top++; /* Correct bias of -1 (frame == base-1). */ - if (top > tvref(th->maxstack)) top = tvref(th->maxstack); - return (MSize)(top - bot); /* Return minimum needed stack size. */ -} - -/* Traverse a thread object. */ -static void gc_traverse_thread(global_State *g, lua_State *th) -{ - TValue *o, *top = th->top; - for (o = tvref(th->stack)+1+LJ_FR2; o < top; o++) - gc_marktv(g, o); - if (g->gc.state == GCSatomic) { - top = tvref(th->stack) + th->stacksize; - for (; o < top; o++) /* Clear unmarked slots. */ - setnilV(o); - } - gc_markobj(g, tabref(th->env)); - lj_state_shrinkstack(th, gc_traverse_frames(g, th)); -} - -/* Propagate one gray object. Traverse it and turn it black. */ -static size_t propagatemark(global_State *g) -{ - GCobj *o = gcref(g->gc.gray); - int gct = o->gch.gct; - lua_assert(isgray(o)); - gray2black(o); - setgcrefr(g->gc.gray, o->gch.gclist); /* Remove from gray list. */ - if (LJ_LIKELY(gct == ~LJ_TTAB)) { - GCtab *t = gco2tab(o); - if (gc_traverse_tab(g, t) > 0) - black2gray(o); /* Keep weak tables gray. */ - return sizeof(GCtab) + sizeof(TValue) * t->asize + - sizeof(Node) * (t->hmask + 1); - } else if (LJ_LIKELY(gct == ~LJ_TFUNC)) { - GCfunc *fn = gco2func(o); - gc_traverse_func(g, fn); - return isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) : - sizeCfunc((MSize)fn->c.nupvalues); - } else if (LJ_LIKELY(gct == ~LJ_TPROTO)) { - GCproto *pt = gco2pt(o); - gc_traverse_proto(g, pt); - return pt->sizept; - } else if (LJ_LIKELY(gct == ~LJ_TTHREAD)) { - lua_State *th = gco2th(o); - setgcrefr(th->gclist, g->gc.grayagain); - setgcref(g->gc.grayagain, o); - black2gray(o); /* Threads are never black. */ - gc_traverse_thread(g, th); - return sizeof(lua_State) + sizeof(TValue) * th->stacksize; - } else { -#if LJ_HASJIT - GCtrace *T = gco2trace(o); - gc_traverse_trace(g, T); - return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) + - T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry); -#else - lua_assert(0); - return 0; -#endif - } -} - -/* Propagate all gray objects. */ -static size_t gc_propagate_gray(global_State *g) -{ - size_t m = 0; - while (gcref(g->gc.gray) != NULL) - m += propagatemark(g); - return m; -} - -/* -- Sweep phase --------------------------------------------------------- */ - -/* Type of GC free functions. */ -typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o); - -/* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */ -static const GCFreeFunc gc_freefunc[] = { - (GCFreeFunc)lj_str_free, - (GCFreeFunc)lj_func_freeuv, - (GCFreeFunc)lj_state_free, - (GCFreeFunc)lj_func_freeproto, - (GCFreeFunc)lj_func_free, -#if LJ_HASJIT - (GCFreeFunc)lj_trace_free, -#else - (GCFreeFunc)0, -#endif -#if LJ_HASFFI - (GCFreeFunc)lj_cdata_free, -#else - (GCFreeFunc)0, -#endif - (GCFreeFunc)lj_tab_free, - (GCFreeFunc)lj_udata_free -}; - -/* Full sweep of a GC list. */ -#define gc_fullsweep(g, p) gc_sweep(g, (p), ~(uint32_t)0) - -/* Partial sweep of a GC list. */ -static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim) -{ - /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */ - int ow = otherwhite(g); - GCobj *o; - while ((o = gcref(*p)) != NULL && lim-- > 0) { - if (o->gch.gct == ~LJ_TTHREAD) /* Need to sweep open upvalues, too. */ - gc_fullsweep(g, &gco2th(o)->openupval); - if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */ - lua_assert(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED)); - makewhite(g, o); /* Value is alive, change to the current white. */ - p = &o->gch.nextgc; - } else { /* Otherwise value is dead, free it. */ - lua_assert(isdead(g, o) || ow == LJ_GC_SFIXED); - setgcrefr(*p, o->gch.nextgc); - if (o == gcref(g->gc.root)) - setgcrefr(g->gc.root, o->gch.nextgc); /* Adjust list anchor. */ - gc_freefunc[o->gch.gct - ~LJ_TSTR](g, o); - } - } - return p; -} - -/* Check whether we can clear a key or a value slot from a table. */ -static int gc_mayclear(cTValue *o, int val) -{ - if (tvisgcv(o)) { /* Only collectable objects can be weak references. */ - if (tvisstr(o)) { /* But strings cannot be used as weak references. */ - gc_mark_str(strV(o)); /* And need to be marked. */ - return 0; - } - if (iswhite(gcV(o))) - return 1; /* Object is about to be collected. */ - if (tvisudata(o) && val && isfinalized(udataV(o))) - return 1; /* Finalized userdata is dropped only from values. */ - } - return 0; /* Cannot clear. */ -} - -/* Clear collected entries from weak tables. */ -static void gc_clearweak(GCobj *o) -{ - while (o) { - GCtab *t = gco2tab(o); - lua_assert((t->marked & LJ_GC_WEAK)); - if ((t->marked & LJ_GC_WEAKVAL)) { - MSize i, asize = t->asize; - for (i = 0; i < asize; i++) { - /* Clear array slot when value is about to be collected. */ - TValue *tv = arrayslot(t, i); - if (gc_mayclear(tv, 1)) - setnilV(tv); - } - } - if (t->hmask > 0) { - Node *node = noderef(t->node); - MSize i, hmask = t->hmask; - for (i = 0; i <= hmask; i++) { - Node *n = &node[i]; - /* Clear hash slot when key or value is about to be collected. */ - if (!tvisnil(&n->val) && (gc_mayclear(&n->key, 0) || - gc_mayclear(&n->val, 1))) - setnilV(&n->val); - } - } - o = gcref(t->gclist); - } -} - -/* Call a userdata or cdata finalizer. */ -static void gc_call_finalizer(global_State *g, lua_State *L, - cTValue *mo, GCobj *o) -{ - /* Save and restore lots of state around the __gc callback. */ - uint8_t oldh = hook_save(g); - GCSize oldt = g->gc.threshold; - int errcode; - TValue *top; - lj_trace_abort(g); - hook_entergc(g); /* Disable hooks and new traces during __gc. */ - g->gc.threshold = LJ_MAX_MEM; /* Prevent GC steps. */ - top = L->top; - copyTV(L, top++, mo); - if (LJ_FR2) setnilV(top++); - setgcV(L, top, o, ~o->gch.gct); - L->top = top+1; - errcode = lj_vm_pcall(L, top, 1+0, -1); /* Stack: |mo|o| -> | */ - hook_restore(g, oldh); - g->gc.threshold = oldt; /* Restore GC threshold. */ - if (errcode) - lj_err_throw(L, errcode); /* Propagate errors. */ -} - -/* Finalize one userdata or cdata object from the mmudata list. */ -static void gc_finalize(lua_State *L) -{ - global_State *g = G(L); - GCobj *o = gcnext(gcref(g->gc.mmudata)); - cTValue *mo; - lua_assert(tvref(g->jit_base) == NULL); /* Must not be called on trace. */ - /* Unchain from list of userdata to be finalized. */ - if (o == gcref(g->gc.mmudata)) - setgcrefnull(g->gc.mmudata); - else - setgcrefr(gcref(g->gc.mmudata)->gch.nextgc, o->gch.nextgc); -#if LJ_HASFFI - if (o->gch.gct == ~LJ_TCDATA) { - TValue tmp, *tv; - /* Add cdata back to the GC list and make it white. */ - setgcrefr(o->gch.nextgc, g->gc.root); - setgcref(g->gc.root, o); - makewhite(g, o); - o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN; - /* Resolve finalizer. */ - setcdataV(L, &tmp, gco2cd(o)); - tv = lj_tab_set(L, ctype_ctsG(g)->finalizer, &tmp); - if (!tvisnil(tv)) { - g->gc.nocdatafin = 0; - copyTV(L, &tmp, tv); - setnilV(tv); /* Clear entry in finalizer table. */ - gc_call_finalizer(g, L, &tmp, o); - } - return; - } -#endif - /* Add userdata back to the main userdata list and make it white. */ - setgcrefr(o->gch.nextgc, mainthread(g)->nextgc); - setgcref(mainthread(g)->nextgc, o); - makewhite(g, o); - /* Resolve the __gc metamethod. */ - mo = lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc); - if (mo) - gc_call_finalizer(g, L, mo, o); -} - -/* Finalize all userdata objects from mmudata list. */ -void lj_gc_finalize_udata(lua_State *L) -{ - while (gcref(G(L)->gc.mmudata) != NULL) - gc_finalize(L); -} - -#if LJ_HASFFI -/* Finalize all cdata objects from finalizer table. */ -void lj_gc_finalize_cdata(lua_State *L) -{ - global_State *g = G(L); - CTState *cts = ctype_ctsG(g); - if (cts) { - GCtab *t = cts->finalizer; - Node *node = noderef(t->node); - ptrdiff_t i; - setgcrefnull(t->metatable); /* Mark finalizer table as disabled. */ - for (i = (ptrdiff_t)t->hmask; i >= 0; i--) - if (!tvisnil(&node[i].val) && tviscdata(&node[i].key)) { - GCobj *o = gcV(&node[i].key); - TValue tmp; - makewhite(g, o); - o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN; - copyTV(L, &tmp, &node[i].val); - setnilV(&node[i].val); - gc_call_finalizer(g, L, &tmp, o); - } - } -} -#endif - -/* Free all remaining GC objects. */ -void lj_gc_freeall(global_State *g) -{ - MSize i, strmask; - /* Free everything, except super-fixed objects (the main thread). */ - g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED; - gc_fullsweep(g, &g->gc.root); - strmask = g->strmask; - for (i = 0; i <= strmask; i++) /* Free all string hash chains. */ - gc_fullsweep(g, &g->strhash[i]); -} - -/* -- Collector ----------------------------------------------------------- */ - -/* Atomic part of the GC cycle, transitioning from mark to sweep phase. */ -static void atomic(global_State *g, lua_State *L) -{ - size_t udsize; - - gc_mark_uv(g); /* Need to remark open upvalues (the thread may be dead). */ - gc_propagate_gray(g); /* Propagate any left-overs. */ - - setgcrefr(g->gc.gray, g->gc.weak); /* Empty the list of weak tables. */ - setgcrefnull(g->gc.weak); - lua_assert(!iswhite(obj2gco(mainthread(g)))); - gc_markobj(g, L); /* Mark running thread. */ - gc_traverse_curtrace(g); /* Traverse current trace. */ - gc_mark_gcroot(g); /* Mark GC roots (again). */ - gc_propagate_gray(g); /* Propagate all of the above. */ - - setgcrefr(g->gc.gray, g->gc.grayagain); /* Empty the 2nd chance list. */ - setgcrefnull(g->gc.grayagain); - gc_propagate_gray(g); /* Propagate it. */ - - udsize = lj_gc_separateudata(g, 0); /* Separate userdata to be finalized. */ - gc_mark_mmudata(g); /* Mark them. */ - udsize += gc_propagate_gray(g); /* And propagate the marks. */ - - /* All marking done, clear weak tables. */ - gc_clearweak(gcref(g->gc.weak)); - - lj_buf_shrink(L, &g->tmpbuf); /* Shrink temp buffer. */ - - /* Prepare for sweep phase. */ - g->gc.currentwhite = (uint8_t)otherwhite(g); /* Flip current white. */ - g->strempty.marked = g->gc.currentwhite; - setmref(g->gc.sweep, &g->gc.root); - g->gc.estimate = g->gc.total - (GCSize)udsize; /* Initial estimate. */ -} - -/* GC state machine. Returns a cost estimate for each step performed. */ -static size_t gc_onestep(lua_State *L) -{ - global_State *g = G(L); - switch (g->gc.state) { - case GCSpause: - gc_mark_start(g); /* Start a new GC cycle by marking all GC roots. */ - return 0; - case GCSpropagate: - if (gcref(g->gc.gray) != NULL) - return propagatemark(g); /* Propagate one gray object. */ - g->gc.state = GCSatomic; /* End of mark phase. */ - return 0; - case GCSatomic: - if (tvref(g->jit_base)) /* Don't run atomic phase on trace. */ - return LJ_MAX_MEM; - atomic(g, L); - g->gc.state = GCSsweepstring; /* Start of sweep phase. */ - g->gc.sweepstr = 0; - return 0; - case GCSsweepstring: { - GCSize old = g->gc.total; - gc_fullsweep(g, &g->strhash[g->gc.sweepstr++]); /* Sweep one chain. */ - if (g->gc.sweepstr > g->strmask) - g->gc.state = GCSsweep; /* All string hash chains sweeped. */ - lua_assert(old >= g->gc.total); - g->gc.estimate -= old - g->gc.total; - return GCSWEEPCOST; - } - case GCSsweep: { - GCSize old = g->gc.total; - setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX)); - lua_assert(old >= g->gc.total); - g->gc.estimate -= old - g->gc.total; - if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) { - if (g->strnum <= (g->strmask >> 2) && g->strmask > LJ_MIN_STRTAB*2-1) - lj_str_resize(L, g->strmask >> 1); /* Shrink string table. */ - if (gcref(g->gc.mmudata)) { /* Need any finalizations? */ - g->gc.state = GCSfinalize; -#if LJ_HASFFI - g->gc.nocdatafin = 1; -#endif - } else { /* Otherwise skip this phase to help the JIT. */ - g->gc.state = GCSpause; /* End of GC cycle. */ - g->gc.debt = 0; - } - } - return GCSWEEPMAX*GCSWEEPCOST; - } - case GCSfinalize: - if (gcref(g->gc.mmudata) != NULL) { - if (tvref(g->jit_base)) /* Don't call finalizers on trace. */ - return LJ_MAX_MEM; - gc_finalize(L); /* Finalize one userdata object. */ - if (g->gc.estimate > GCFINALIZECOST) - g->gc.estimate -= GCFINALIZECOST; - return GCFINALIZECOST; - } -#if LJ_HASFFI - if (!g->gc.nocdatafin) lj_tab_rehash(L, ctype_ctsG(g)->finalizer); -#endif - g->gc.state = GCSpause; /* End of GC cycle. */ - g->gc.debt = 0; - return 0; - default: - lua_assert(0); - return 0; - } -} - -/* Perform a limited amount of incremental GC steps. */ -int LJ_FASTCALL lj_gc_step(lua_State *L) -{ - global_State *g = G(L); - GCSize lim; - int32_t ostate = g->vmstate; - setvmstate(g, GC); - lim = (GCSTEPSIZE/100) * g->gc.stepmul; - if (lim == 0) - lim = LJ_MAX_MEM; - if (g->gc.total > g->gc.threshold) - g->gc.debt += g->gc.total - g->gc.threshold; - do { - lim -= (GCSize)gc_onestep(L); - if (g->gc.state == GCSpause) { - g->gc.threshold = (g->gc.estimate/100) * g->gc.pause; - g->vmstate = ostate; - return 1; /* Finished a GC cycle. */ - } - } while (sizeof(lim) == 8 ? ((int64_t)lim > 0) : ((int32_t)lim > 0)); - if (g->gc.debt < GCSTEPSIZE) { - g->gc.threshold = g->gc.total + GCSTEPSIZE; - g->vmstate = ostate; - return -1; - } else { - g->gc.debt -= GCSTEPSIZE; - g->gc.threshold = g->gc.total; - g->vmstate = ostate; - return 0; - } -} - -/* Ditto, but fix the stack top first. */ -void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L) -{ - if (curr_funcisL(L)) L->top = curr_topL(L); - lj_gc_step(L); -} - -#if LJ_HASJIT -/* Perform multiple GC steps. Called from JIT-compiled code. */ -int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps) -{ - lua_State *L = gco2th(gcref(g->cur_L)); - L->base = tvref(G(L)->jit_base); - L->top = curr_topL(L); - while (steps-- > 0 && lj_gc_step(L) == 0) - ; - /* Return 1 to force a trace exit. */ - return (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize); -} -#endif - -/* Perform a full GC cycle. */ -void lj_gc_fullgc(lua_State *L) -{ - global_State *g = G(L); - int32_t ostate = g->vmstate; - setvmstate(g, GC); - if (g->gc.state <= GCSatomic) { /* Caught somewhere in the middle. */ - setmref(g->gc.sweep, &g->gc.root); /* Sweep everything (preserving it). */ - setgcrefnull(g->gc.gray); /* Reset lists from partial propagation. */ - setgcrefnull(g->gc.grayagain); - setgcrefnull(g->gc.weak); - g->gc.state = GCSsweepstring; /* Fast forward to the sweep phase. */ - g->gc.sweepstr = 0; - } - while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep) - gc_onestep(L); /* Finish sweep. */ - lua_assert(g->gc.state == GCSfinalize || g->gc.state == GCSpause); - /* Now perform a full GC. */ - g->gc.state = GCSpause; - do { gc_onestep(L); } while (g->gc.state != GCSpause); - g->gc.threshold = (g->gc.estimate/100) * g->gc.pause; - g->vmstate = ostate; -} - -/* -- Write barriers ------------------------------------------------------ */ - -/* Move the GC propagation frontier forward. */ -void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v) -{ - lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o)); - lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause); - lua_assert(o->gch.gct != ~LJ_TTAB); - /* Preserve invariant during propagation. Otherwise it doesn't matter. */ - if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) - gc_mark(g, v); /* Move frontier forward. */ - else - makewhite(g, o); /* Make it white to avoid the following barrier. */ -} - -/* Specialized barrier for closed upvalue. Pass &uv->tv. */ -void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv) -{ -#define TV2MARKED(x) \ - (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked))) - if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) - gc_mark(g, gcV(tv)); - else - TV2MARKED(tv) = (TV2MARKED(tv) & (uint8_t)~LJ_GC_COLORS) | curwhite(g); -#undef TV2MARKED -} - -/* Close upvalue. Also needs a write barrier. */ -void lj_gc_closeuv(global_State *g, GCupval *uv) -{ - GCobj *o = obj2gco(uv); - /* Copy stack slot to upvalue itself and point to the copy. */ - copyTV(mainthread(g), &uv->tv, uvval(uv)); - setmref(uv->v, &uv->tv); - uv->closed = 1; - setgcrefr(o->gch.nextgc, g->gc.root); - setgcref(g->gc.root, o); - if (isgray(o)) { /* A closed upvalue is never gray, so fix this. */ - if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) { - gray2black(o); /* Make it black and preserve invariant. */ - if (tviswhite(&uv->tv)) - lj_gc_barrierf(g, o, gcV(&uv->tv)); - } else { - makewhite(g, o); /* Make it white, i.e. sweep the upvalue. */ - lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause); - } - } -} - -#if LJ_HASJIT -/* Mark a trace if it's saved during the propagation phase. */ -void lj_gc_barriertrace(global_State *g, uint32_t traceno) -{ - if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) - gc_marktrace(g, traceno); -} -#endif - -/* -- Allocator ----------------------------------------------------------- */ - -/* Call pluggable memory allocator to allocate or resize a fragment. */ -void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz) -{ - global_State *g = G(L); - lua_assert((osz == 0) == (p == NULL)); - p = g->allocf(g->allocd, p, osz, nsz); - if (p == NULL && nsz > 0) - lj_err_mem(L); - lua_assert((nsz == 0) == (p == NULL)); - lua_assert(checkptrGC(p)); - g->gc.total = (g->gc.total - osz) + nsz; - return p; -} - -/* Allocate new GC object and link it to the root set. */ -void * LJ_FASTCALL lj_mem_newgco(lua_State *L, GCSize size) -{ - global_State *g = G(L); - GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size); - if (o == NULL) - lj_err_mem(L); - lua_assert(checkptrGC(o)); - g->gc.total += size; - setgcrefr(o->gch.nextgc, g->gc.root); - setgcref(g->gc.root, o); - newwhite(g, o); - return o; -} - -/* Resize growable vector. */ -void *lj_mem_grow(lua_State *L, void *p, MSize *szp, MSize lim, MSize esz) -{ - MSize sz = (*szp) << 1; - if (sz < LJ_MIN_VECSZ) - sz = LJ_MIN_VECSZ; - if (sz > lim) - sz = lim; - p = lj_mem_realloc(L, p, (*szp)*esz, sz*esz); - *szp = sz; - return p; -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_gc.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_gc.h deleted file mode 100644 index 847eb7835d9..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_gc.h +++ /dev/null @@ -1,134 +0,0 @@ -/* -** Garbage collector. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_GC_H -#define _LJ_GC_H - -#include "lj_obj.h" - -/* Garbage collector states. Order matters. */ -enum { - GCSpause, GCSpropagate, GCSatomic, GCSsweepstring, GCSsweep, GCSfinalize -}; - -/* Bitmasks for marked field of GCobj. */ -#define LJ_GC_WHITE0 0x01 -#define LJ_GC_WHITE1 0x02 -#define LJ_GC_BLACK 0x04 -#define LJ_GC_FINALIZED 0x08 -#define LJ_GC_WEAKKEY 0x08 -#define LJ_GC_WEAKVAL 0x10 -#define LJ_GC_CDATA_FIN 0x10 -#define LJ_GC_FIXED 0x20 -#define LJ_GC_SFIXED 0x40 - -#define LJ_GC_WHITES (LJ_GC_WHITE0 | LJ_GC_WHITE1) -#define LJ_GC_COLORS (LJ_GC_WHITES | LJ_GC_BLACK) -#define LJ_GC_WEAK (LJ_GC_WEAKKEY | LJ_GC_WEAKVAL) - -/* Macros to test and set GCobj colors. */ -#define iswhite(x) ((x)->gch.marked & LJ_GC_WHITES) -#define isblack(x) ((x)->gch.marked & LJ_GC_BLACK) -#define isgray(x) (!((x)->gch.marked & (LJ_GC_BLACK|LJ_GC_WHITES))) -#define tviswhite(x) (tvisgcv(x) && iswhite(gcV(x))) -#define otherwhite(g) (g->gc.currentwhite ^ LJ_GC_WHITES) -#define isdead(g, v) ((v)->gch.marked & otherwhite(g) & LJ_GC_WHITES) - -#define curwhite(g) ((g)->gc.currentwhite & LJ_GC_WHITES) -#define newwhite(g, x) (obj2gco(x)->gch.marked = (uint8_t)curwhite(g)) -#define makewhite(g, x) \ - ((x)->gch.marked = ((x)->gch.marked & (uint8_t)~LJ_GC_COLORS) | curwhite(g)) -#define flipwhite(x) ((x)->gch.marked ^= LJ_GC_WHITES) -#define black2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_BLACK) -#define fixstring(s) ((s)->marked |= LJ_GC_FIXED) -#define markfinalized(x) ((x)->gch.marked |= LJ_GC_FINALIZED) - -/* Collector. */ -LJ_FUNC size_t lj_gc_separateudata(global_State *g, int all); -LJ_FUNC void lj_gc_finalize_udata(lua_State *L); -#if LJ_HASFFI -LJ_FUNC void lj_gc_finalize_cdata(lua_State *L); -#else -#define lj_gc_finalize_cdata(L) UNUSED(L) -#endif -LJ_FUNC void lj_gc_freeall(global_State *g); -LJ_FUNCA int LJ_FASTCALL lj_gc_step(lua_State *L); -LJ_FUNCA void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L); -#if LJ_HASJIT -LJ_FUNC int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps); -#endif -LJ_FUNC void lj_gc_fullgc(lua_State *L); - -/* GC check: drive collector forward if the GC threshold has been reached. */ -#define lj_gc_check(L) \ - { if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) \ - lj_gc_step(L); } -#define lj_gc_check_fixtop(L) \ - { if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) \ - lj_gc_step_fixtop(L); } - -/* Write barriers. */ -LJ_FUNC void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v); -LJ_FUNCA void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv); -LJ_FUNC void lj_gc_closeuv(global_State *g, GCupval *uv); -#if LJ_HASJIT -LJ_FUNC void lj_gc_barriertrace(global_State *g, uint32_t traceno); -#endif - -/* Move the GC propagation frontier back for tables (make it gray again). */ -static LJ_AINLINE void lj_gc_barrierback(global_State *g, GCtab *t) -{ - GCobj *o = obj2gco(t); - lua_assert(isblack(o) && !isdead(g, o)); - lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause); - black2gray(o); - setgcrefr(t->gclist, g->gc.grayagain); - setgcref(g->gc.grayagain, o); -} - -/* Barrier for stores to table objects. TValue and GCobj variant. */ -#define lj_gc_anybarriert(L, t) \ - { if (LJ_UNLIKELY(isblack(obj2gco(t)))) lj_gc_barrierback(G(L), (t)); } -#define lj_gc_barriert(L, t, tv) \ - { if (tviswhite(tv) && isblack(obj2gco(t))) \ - lj_gc_barrierback(G(L), (t)); } -#define lj_gc_objbarriert(L, t, o) \ - { if (iswhite(obj2gco(o)) && isblack(obj2gco(t))) \ - lj_gc_barrierback(G(L), (t)); } - -/* Barrier for stores to any other object. TValue and GCobj variant. */ -#define lj_gc_barrier(L, p, tv) \ - { if (tviswhite(tv) && isblack(obj2gco(p))) \ - lj_gc_barrierf(G(L), obj2gco(p), gcV(tv)); } -#define lj_gc_objbarrier(L, p, o) \ - { if (iswhite(obj2gco(o)) && isblack(obj2gco(p))) \ - lj_gc_barrierf(G(L), obj2gco(p), obj2gco(o)); } - -/* Allocator. */ -LJ_FUNC void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz); -LJ_FUNC void * LJ_FASTCALL lj_mem_newgco(lua_State *L, GCSize size); -LJ_FUNC void *lj_mem_grow(lua_State *L, void *p, - MSize *szp, MSize lim, MSize esz); - -#define lj_mem_new(L, s) lj_mem_realloc(L, NULL, 0, (s)) - -static LJ_AINLINE void lj_mem_free(global_State *g, void *p, size_t osize) -{ - g->gc.total -= (GCSize)osize; - g->allocf(g->allocd, p, osize, 0); -} - -#define lj_mem_newvec(L, n, t) ((t *)lj_mem_new(L, (GCSize)((n)*sizeof(t)))) -#define lj_mem_reallocvec(L, p, on, n, t) \ - ((p) = (t *)lj_mem_realloc(L, p, (on)*sizeof(t), (GCSize)((n)*sizeof(t)))) -#define lj_mem_growvec(L, p, n, m, t) \ - ((p) = (t *)lj_mem_grow(L, (p), &(n), (m), (MSize)sizeof(t))) -#define lj_mem_freevec(g, p, n, t) lj_mem_free(g, (p), (n)*sizeof(t)) - -#define lj_mem_newobj(L, t) ((t *)lj_mem_newgco(L, sizeof(t))) -#define lj_mem_newt(L, s, t) ((t *)lj_mem_new(L, (s))) -#define lj_mem_freet(g, p) lj_mem_free(g, (p), sizeof(*(p))) - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_gdbjit.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_gdbjit.c deleted file mode 100644 index 9b95e525840..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_gdbjit.c +++ /dev/null @@ -1,787 +0,0 @@ -/* -** Client for the GDB JIT API. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_gdbjit_c -#define LUA_CORE - -#include "lj_obj.h" - -#if LJ_HASJIT - -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_debug.h" -#include "lj_frame.h" -#include "lj_buf.h" -#include "lj_strfmt.h" -#include "lj_jit.h" -#include "lj_dispatch.h" - -/* This is not compiled in by default. -** Enable with -DLUAJIT_USE_GDBJIT in the Makefile and recompile everything. -*/ -#ifdef LUAJIT_USE_GDBJIT - -/* The GDB JIT API allows JIT compilers to pass debug information about -** JIT-compiled code back to GDB. You need at least GDB 7.0 or higher -** to see it in action. -** -** This is a passive API, so it works even when not running under GDB -** or when attaching to an already running process. Alas, this implies -** enabling it always has a non-negligible overhead -- do not use in -** release mode! -** -** The LuaJIT GDB JIT client is rather minimal at the moment. It gives -** each trace a symbol name and adds a source location and frame unwind -** information. Obviously LuaJIT itself and any embedding C application -** should be compiled with debug symbols, too (see the Makefile). -** -** Traces are named TRACE_1, TRACE_2, ... these correspond to the trace -** numbers from -jv or -jdump. Use "break TRACE_1" or "tbreak TRACE_1" etc. -** to set breakpoints on specific traces (even ahead of their creation). -** -** The source location for each trace allows listing the corresponding -** source lines with the GDB command "list" (but only if the Lua source -** has been loaded from a file). Currently this is always set to the -** location where the trace has been started. -** -** Frame unwind information can be inspected with the GDB command -** "info frame". This also allows proper backtraces across JIT-compiled -** code with the GDB command "bt". -** -** You probably want to add the following settings to a .gdbinit file -** (or add them to ~/.gdbinit): -** set disassembly-flavor intel -** set breakpoint pending on -** -** Here's a sample GDB session: -** ------------------------------------------------------------------------ - -$ cat >x.lua -for outer=1,100 do - for inner=1,100 do end -end -^D - -$ luajit -jv x.lua -[TRACE 1 x.lua:2] -[TRACE 2 (1/3) x.lua:1 -> 1] - -$ gdb --quiet --args luajit x.lua -(gdb) tbreak TRACE_1 -Function "TRACE_1" not defined. -Temporary breakpoint 1 (TRACE_1) pending. -(gdb) run -Starting program: luajit x.lua - -Temporary breakpoint 1, TRACE_1 () at x.lua:2 -2 for inner=1,100 do end -(gdb) list -1 for outer=1,100 do -2 for inner=1,100 do end -3 end -(gdb) bt -#0 TRACE_1 () at x.lua:2 -#1 0x08053690 in lua_pcall [...] -[...] -#7 0x0806ff90 in main [...] -(gdb) disass TRACE_1 -Dump of assembler code for function TRACE_1: -0xf7fd9fba : mov DWORD PTR ds:0xf7e0e2a0,0x1 -0xf7fd9fc4 : movsd xmm7,QWORD PTR [edx+0x20] -[...] -0xf7fd9ff8 : jmp 0xf7fd2014 -End of assembler dump. -(gdb) tbreak TRACE_2 -Function "TRACE_2" not defined. -Temporary breakpoint 2 (TRACE_2) pending. -(gdb) cont -Continuing. - -Temporary breakpoint 2, TRACE_2 () at x.lua:1 -1 for outer=1,100 do -(gdb) info frame -Stack level 0, frame at 0xffffd7c0: - eip = 0xf7fd9f60 in TRACE_2 (x.lua:1); saved eip 0x8053690 - called by frame at 0xffffd7e0 - source language unknown. - Arglist at 0xffffd78c, args: - Locals at 0xffffd78c, Previous frame's sp is 0xffffd7c0 - Saved registers: - ebx at 0xffffd7ac, ebp at 0xffffd7b8, esi at 0xffffd7b0, edi at 0xffffd7b4, - eip at 0xffffd7bc -(gdb) - -** ------------------------------------------------------------------------ -*/ - -/* -- GDB JIT API --------------------------------------------------------- */ - -/* GDB JIT actions. */ -enum { - GDBJIT_NOACTION = 0, - GDBJIT_REGISTER, - GDBJIT_UNREGISTER -}; - -/* GDB JIT entry. */ -typedef struct GDBJITentry { - struct GDBJITentry *next_entry; - struct GDBJITentry *prev_entry; - const char *symfile_addr; - uint64_t symfile_size; -} GDBJITentry; - -/* GDB JIT descriptor. */ -typedef struct GDBJITdesc { - uint32_t version; - uint32_t action_flag; - GDBJITentry *relevant_entry; - GDBJITentry *first_entry; -} GDBJITdesc; - -GDBJITdesc __jit_debug_descriptor = { - 1, GDBJIT_NOACTION, NULL, NULL -}; - -/* GDB sets a breakpoint at this function. */ -void LJ_NOINLINE __jit_debug_register_code() -{ - __asm__ __volatile__(""); -}; - -/* -- In-memory ELF object definitions ------------------------------------ */ - -/* ELF definitions. */ -typedef struct ELFheader { - uint8_t emagic[4]; - uint8_t eclass; - uint8_t eendian; - uint8_t eversion; - uint8_t eosabi; - uint8_t eabiversion; - uint8_t epad[7]; - uint16_t type; - uint16_t machine; - uint32_t version; - uintptr_t entry; - uintptr_t phofs; - uintptr_t shofs; - uint32_t flags; - uint16_t ehsize; - uint16_t phentsize; - uint16_t phnum; - uint16_t shentsize; - uint16_t shnum; - uint16_t shstridx; -} ELFheader; - -typedef struct ELFsectheader { - uint32_t name; - uint32_t type; - uintptr_t flags; - uintptr_t addr; - uintptr_t ofs; - uintptr_t size; - uint32_t link; - uint32_t info; - uintptr_t align; - uintptr_t entsize; -} ELFsectheader; - -#define ELFSECT_IDX_ABS 0xfff1 - -enum { - ELFSECT_TYPE_PROGBITS = 1, - ELFSECT_TYPE_SYMTAB = 2, - ELFSECT_TYPE_STRTAB = 3, - ELFSECT_TYPE_NOBITS = 8 -}; - -#define ELFSECT_FLAGS_WRITE 1 -#define ELFSECT_FLAGS_ALLOC 2 -#define ELFSECT_FLAGS_EXEC 4 - -typedef struct ELFsymbol { -#if LJ_64 - uint32_t name; - uint8_t info; - uint8_t other; - uint16_t sectidx; - uintptr_t value; - uint64_t size; -#else - uint32_t name; - uintptr_t value; - uint32_t size; - uint8_t info; - uint8_t other; - uint16_t sectidx; -#endif -} ELFsymbol; - -enum { - ELFSYM_TYPE_FUNC = 2, - ELFSYM_TYPE_FILE = 4, - ELFSYM_BIND_LOCAL = 0 << 4, - ELFSYM_BIND_GLOBAL = 1 << 4, -}; - -/* DWARF definitions. */ -#define DW_CIE_VERSION 1 - -enum { - DW_CFA_nop = 0x0, - DW_CFA_offset_extended = 0x5, - DW_CFA_def_cfa = 0xc, - DW_CFA_def_cfa_offset = 0xe, - DW_CFA_offset_extended_sf = 0x11, - DW_CFA_advance_loc = 0x40, - DW_CFA_offset = 0x80 -}; - -enum { - DW_EH_PE_udata4 = 3, - DW_EH_PE_textrel = 0x20 -}; - -enum { - DW_TAG_compile_unit = 0x11 -}; - -enum { - DW_children_no = 0, - DW_children_yes = 1 -}; - -enum { - DW_AT_name = 0x03, - DW_AT_stmt_list = 0x10, - DW_AT_low_pc = 0x11, - DW_AT_high_pc = 0x12 -}; - -enum { - DW_FORM_addr = 0x01, - DW_FORM_data4 = 0x06, - DW_FORM_string = 0x08 -}; - -enum { - DW_LNS_extended_op = 0, - DW_LNS_copy = 1, - DW_LNS_advance_pc = 2, - DW_LNS_advance_line = 3 -}; - -enum { - DW_LNE_end_sequence = 1, - DW_LNE_set_address = 2 -}; - -enum { -#if LJ_TARGET_X86 - DW_REG_AX, DW_REG_CX, DW_REG_DX, DW_REG_BX, - DW_REG_SP, DW_REG_BP, DW_REG_SI, DW_REG_DI, - DW_REG_RA, -#elif LJ_TARGET_X64 - /* Yes, the order is strange, but correct. */ - DW_REG_AX, DW_REG_DX, DW_REG_CX, DW_REG_BX, - DW_REG_SI, DW_REG_DI, DW_REG_BP, DW_REG_SP, - DW_REG_8, DW_REG_9, DW_REG_10, DW_REG_11, - DW_REG_12, DW_REG_13, DW_REG_14, DW_REG_15, - DW_REG_RA, -#elif LJ_TARGET_ARM - DW_REG_SP = 13, - DW_REG_RA = 14, -#elif LJ_TARGET_PPC - DW_REG_SP = 1, - DW_REG_RA = 65, - DW_REG_CR = 70, -#elif LJ_TARGET_MIPS - DW_REG_SP = 29, - DW_REG_RA = 31, -#else -#error "Unsupported target architecture" -#endif -}; - -/* Minimal list of sections for the in-memory ELF object. */ -enum { - GDBJIT_SECT_NULL, - GDBJIT_SECT_text, - GDBJIT_SECT_eh_frame, - GDBJIT_SECT_shstrtab, - GDBJIT_SECT_strtab, - GDBJIT_SECT_symtab, - GDBJIT_SECT_debug_info, - GDBJIT_SECT_debug_abbrev, - GDBJIT_SECT_debug_line, - GDBJIT_SECT__MAX -}; - -enum { - GDBJIT_SYM_UNDEF, - GDBJIT_SYM_FILE, - GDBJIT_SYM_FUNC, - GDBJIT_SYM__MAX -}; - -/* In-memory ELF object. */ -typedef struct GDBJITobj { - ELFheader hdr; /* ELF header. */ - ELFsectheader sect[GDBJIT_SECT__MAX]; /* ELF sections. */ - ELFsymbol sym[GDBJIT_SYM__MAX]; /* ELF symbol table. */ - uint8_t space[4096]; /* Space for various section data. */ -} GDBJITobj; - -/* Combined structure for GDB JIT entry and ELF object. */ -typedef struct GDBJITentryobj { - GDBJITentry entry; - size_t sz; - GDBJITobj obj; -} GDBJITentryobj; - -/* Template for in-memory ELF header. */ -static const ELFheader elfhdr_template = { - .emagic = { 0x7f, 'E', 'L', 'F' }, - .eclass = LJ_64 ? 2 : 1, - .eendian = LJ_ENDIAN_SELECT(1, 2), - .eversion = 1, -#if LJ_TARGET_LINUX - .eosabi = 0, /* Nope, it's not 3. */ -#elif defined(__FreeBSD__) - .eosabi = 9, -#elif defined(__NetBSD__) - .eosabi = 2, -#elif defined(__OpenBSD__) - .eosabi = 12, -#elif defined(__DragonFly__) - .eosabi = 0, -#elif (defined(__sun__) && defined(__svr4__)) - .eosabi = 6, -#else - .eosabi = 0, -#endif - .eabiversion = 0, - .epad = { 0, 0, 0, 0, 0, 0, 0 }, - .type = 1, -#if LJ_TARGET_X86 - .machine = 3, -#elif LJ_TARGET_X64 - .machine = 62, -#elif LJ_TARGET_ARM - .machine = 40, -#elif LJ_TARGET_PPC - .machine = 20, -#elif LJ_TARGET_MIPS - .machine = 8, -#else -#error "Unsupported target architecture" -#endif - .version = 1, - .entry = 0, - .phofs = 0, - .shofs = offsetof(GDBJITobj, sect), - .flags = 0, - .ehsize = sizeof(ELFheader), - .phentsize = 0, - .phnum = 0, - .shentsize = sizeof(ELFsectheader), - .shnum = GDBJIT_SECT__MAX, - .shstridx = GDBJIT_SECT_shstrtab -}; - -/* -- In-memory ELF object generation ------------------------------------- */ - -/* Context for generating the ELF object for the GDB JIT API. */ -typedef struct GDBJITctx { - uint8_t *p; /* Pointer to next address in obj.space. */ - uint8_t *startp; /* Pointer to start address in obj.space. */ - GCtrace *T; /* Generate symbols for this trace. */ - uintptr_t mcaddr; /* Machine code address. */ - MSize szmcode; /* Size of machine code. */ - MSize spadjp; /* Stack adjustment for parent trace or interpreter. */ - MSize spadj; /* Stack adjustment for trace itself. */ - BCLine lineno; /* Starting line number. */ - const char *filename; /* Starting file name. */ - size_t objsize; /* Final size of ELF object. */ - GDBJITobj obj; /* In-memory ELF object. */ -} GDBJITctx; - -/* Add a zero-terminated string. */ -static uint32_t gdbjit_strz(GDBJITctx *ctx, const char *str) -{ - uint8_t *p = ctx->p; - uint32_t ofs = (uint32_t)(p - ctx->startp); - do { - *p++ = (uint8_t)*str; - } while (*str++); - ctx->p = p; - return ofs; -} - -/* Append a decimal number. */ -static void gdbjit_catnum(GDBJITctx *ctx, uint32_t n) -{ - if (n >= 10) { uint32_t m = n / 10; n = n % 10; gdbjit_catnum(ctx, m); } - *ctx->p++ = '0' + n; -} - -/* Add a SLEB128 value. */ -static void gdbjit_sleb128(GDBJITctx *ctx, int32_t v) -{ - uint8_t *p = ctx->p; - for (; (uint32_t)(v+0x40) >= 0x80; v >>= 7) - *p++ = (uint8_t)((v & 0x7f) | 0x80); - *p++ = (uint8_t)(v & 0x7f); - ctx->p = p; -} - -/* Shortcuts to generate DWARF structures. */ -#define DB(x) (*p++ = (x)) -#define DI8(x) (*(int8_t *)p = (x), p++) -#define DU16(x) (*(uint16_t *)p = (x), p += 2) -#define DU32(x) (*(uint32_t *)p = (x), p += 4) -#define DADDR(x) (*(uintptr_t *)p = (x), p += sizeof(uintptr_t)) -#define DUV(x) (p = (uint8_t *)lj_strfmt_wuleb128((char *)p, (x))) -#define DSV(x) (ctx->p = p, gdbjit_sleb128(ctx, (x)), p = ctx->p) -#define DSTR(str) (ctx->p = p, gdbjit_strz(ctx, (str)), p = ctx->p) -#define DALIGNNOP(s) while ((uintptr_t)p & ((s)-1)) *p++ = DW_CFA_nop -#define DSECT(name, stmt) \ - { uint32_t *szp_##name = (uint32_t *)p; p += 4; stmt \ - *szp_##name = (uint32_t)((p-(uint8_t *)szp_##name)-4); } \ - -/* Initialize ELF section headers. */ -static void LJ_FASTCALL gdbjit_secthdr(GDBJITctx *ctx) -{ - ELFsectheader *sect; - - *ctx->p++ = '\0'; /* Empty string at start of string table. */ - -#define SECTDEF(id, tp, al) \ - sect = &ctx->obj.sect[GDBJIT_SECT_##id]; \ - sect->name = gdbjit_strz(ctx, "." #id); \ - sect->type = ELFSECT_TYPE_##tp; \ - sect->align = (al) - - SECTDEF(text, NOBITS, 16); - sect->flags = ELFSECT_FLAGS_ALLOC|ELFSECT_FLAGS_EXEC; - sect->addr = ctx->mcaddr; - sect->ofs = 0; - sect->size = ctx->szmcode; - - SECTDEF(eh_frame, PROGBITS, sizeof(uintptr_t)); - sect->flags = ELFSECT_FLAGS_ALLOC; - - SECTDEF(shstrtab, STRTAB, 1); - SECTDEF(strtab, STRTAB, 1); - - SECTDEF(symtab, SYMTAB, sizeof(uintptr_t)); - sect->ofs = offsetof(GDBJITobj, sym); - sect->size = sizeof(ctx->obj.sym); - sect->link = GDBJIT_SECT_strtab; - sect->entsize = sizeof(ELFsymbol); - sect->info = GDBJIT_SYM_FUNC; - - SECTDEF(debug_info, PROGBITS, 1); - SECTDEF(debug_abbrev, PROGBITS, 1); - SECTDEF(debug_line, PROGBITS, 1); - -#undef SECTDEF -} - -/* Initialize symbol table. */ -static void LJ_FASTCALL gdbjit_symtab(GDBJITctx *ctx) -{ - ELFsymbol *sym; - - *ctx->p++ = '\0'; /* Empty string at start of string table. */ - - sym = &ctx->obj.sym[GDBJIT_SYM_FILE]; - sym->name = gdbjit_strz(ctx, "JIT mcode"); - sym->sectidx = ELFSECT_IDX_ABS; - sym->info = ELFSYM_TYPE_FILE|ELFSYM_BIND_LOCAL; - - sym = &ctx->obj.sym[GDBJIT_SYM_FUNC]; - sym->name = gdbjit_strz(ctx, "TRACE_"); ctx->p--; - gdbjit_catnum(ctx, ctx->T->traceno); *ctx->p++ = '\0'; - sym->sectidx = GDBJIT_SECT_text; - sym->value = 0; - sym->size = ctx->szmcode; - sym->info = ELFSYM_TYPE_FUNC|ELFSYM_BIND_GLOBAL; -} - -/* Initialize .eh_frame section. */ -static void LJ_FASTCALL gdbjit_ehframe(GDBJITctx *ctx) -{ - uint8_t *p = ctx->p; - uint8_t *framep = p; - - /* Emit DWARF EH CIE. */ - DSECT(CIE, - DU32(0); /* Offset to CIE itself. */ - DB(DW_CIE_VERSION); - DSTR("zR"); /* Augmentation. */ - DUV(1); /* Code alignment factor. */ - DSV(-(int32_t)sizeof(uintptr_t)); /* Data alignment factor. */ - DB(DW_REG_RA); /* Return address register. */ - DB(1); DB(DW_EH_PE_textrel|DW_EH_PE_udata4); /* Augmentation data. */ - DB(DW_CFA_def_cfa); DUV(DW_REG_SP); DUV(sizeof(uintptr_t)); -#if LJ_TARGET_PPC - DB(DW_CFA_offset_extended_sf); DB(DW_REG_RA); DSV(-1); -#else - DB(DW_CFA_offset|DW_REG_RA); DUV(1); -#endif - DALIGNNOP(sizeof(uintptr_t)); - ) - - /* Emit DWARF EH FDE. */ - DSECT(FDE, - DU32((uint32_t)(p-framep)); /* Offset to CIE. */ - DU32(0); /* Machine code offset relative to .text. */ - DU32(ctx->szmcode); /* Machine code length. */ - DB(0); /* Augmentation data. */ - /* Registers saved in CFRAME. */ -#if LJ_TARGET_X86 - DB(DW_CFA_offset|DW_REG_BP); DUV(2); - DB(DW_CFA_offset|DW_REG_DI); DUV(3); - DB(DW_CFA_offset|DW_REG_SI); DUV(4); - DB(DW_CFA_offset|DW_REG_BX); DUV(5); -#elif LJ_TARGET_X64 - DB(DW_CFA_offset|DW_REG_BP); DUV(2); - DB(DW_CFA_offset|DW_REG_BX); DUV(3); - DB(DW_CFA_offset|DW_REG_15); DUV(4); - DB(DW_CFA_offset|DW_REG_14); DUV(5); - /* Extra registers saved for JIT-compiled code. */ - DB(DW_CFA_offset|DW_REG_13); DUV(LJ_GC64 ? 10 : 9); - DB(DW_CFA_offset|DW_REG_12); DUV(LJ_GC64 ? 11 : 10); -#elif LJ_TARGET_ARM - { - int i; - for (i = 11; i >= 4; i--) { DB(DW_CFA_offset|i); DUV(2+(11-i)); } - } -#elif LJ_TARGET_PPC - { - int i; - DB(DW_CFA_offset_extended); DB(DW_REG_CR); DUV(55); - for (i = 14; i <= 31; i++) { - DB(DW_CFA_offset|i); DUV(37+(31-i)); - DB(DW_CFA_offset|32|i); DUV(2+2*(31-i)); - } - } -#elif LJ_TARGET_MIPS - { - int i; - DB(DW_CFA_offset|30); DUV(2); - for (i = 23; i >= 16; i--) { DB(DW_CFA_offset|i); DUV(26-i); } - for (i = 30; i >= 20; i -= 2) { DB(DW_CFA_offset|32|i); DUV(42-i); } - } -#else -#error "Unsupported target architecture" -#endif - if (ctx->spadjp != ctx->spadj) { /* Parent/interpreter stack frame size. */ - DB(DW_CFA_def_cfa_offset); DUV(ctx->spadjp); - DB(DW_CFA_advance_loc|1); /* Only an approximation. */ - } - DB(DW_CFA_def_cfa_offset); DUV(ctx->spadj); /* Trace stack frame size. */ - DALIGNNOP(sizeof(uintptr_t)); - ) - - ctx->p = p; -} - -/* Initialize .debug_info section. */ -static void LJ_FASTCALL gdbjit_debuginfo(GDBJITctx *ctx) -{ - uint8_t *p = ctx->p; - - DSECT(info, - DU16(2); /* DWARF version. */ - DU32(0); /* Abbrev offset. */ - DB(sizeof(uintptr_t)); /* Pointer size. */ - - DUV(1); /* Abbrev #1: DW_TAG_compile_unit. */ - DSTR(ctx->filename); /* DW_AT_name. */ - DADDR(ctx->mcaddr); /* DW_AT_low_pc. */ - DADDR(ctx->mcaddr + ctx->szmcode); /* DW_AT_high_pc. */ - DU32(0); /* DW_AT_stmt_list. */ - ) - - ctx->p = p; -} - -/* Initialize .debug_abbrev section. */ -static void LJ_FASTCALL gdbjit_debugabbrev(GDBJITctx *ctx) -{ - uint8_t *p = ctx->p; - - /* Abbrev #1: DW_TAG_compile_unit. */ - DUV(1); DUV(DW_TAG_compile_unit); - DB(DW_children_no); - DUV(DW_AT_name); DUV(DW_FORM_string); - DUV(DW_AT_low_pc); DUV(DW_FORM_addr); - DUV(DW_AT_high_pc); DUV(DW_FORM_addr); - DUV(DW_AT_stmt_list); DUV(DW_FORM_data4); - DB(0); DB(0); - - ctx->p = p; -} - -#define DLNE(op, s) (DB(DW_LNS_extended_op), DUV(1+(s)), DB((op))) - -/* Initialize .debug_line section. */ -static void LJ_FASTCALL gdbjit_debugline(GDBJITctx *ctx) -{ - uint8_t *p = ctx->p; - - DSECT(line, - DU16(2); /* DWARF version. */ - DSECT(header, - DB(1); /* Minimum instruction length. */ - DB(1); /* is_stmt. */ - DI8(0); /* Line base for special opcodes. */ - DB(2); /* Line range for special opcodes. */ - DB(3+1); /* Opcode base at DW_LNS_advance_line+1. */ - DB(0); DB(1); DB(1); /* Standard opcode lengths. */ - /* Directory table. */ - DB(0); - /* File name table. */ - DSTR(ctx->filename); DUV(0); DUV(0); DUV(0); - DB(0); - ) - - DLNE(DW_LNE_set_address, sizeof(uintptr_t)); DADDR(ctx->mcaddr); - if (ctx->lineno) { - DB(DW_LNS_advance_line); DSV(ctx->lineno-1); - } - DB(DW_LNS_copy); - DB(DW_LNS_advance_pc); DUV(ctx->szmcode); - DLNE(DW_LNE_end_sequence, 0); - ) - - ctx->p = p; -} - -#undef DLNE - -/* Undef shortcuts. */ -#undef DB -#undef DI8 -#undef DU16 -#undef DU32 -#undef DADDR -#undef DUV -#undef DSV -#undef DSTR -#undef DALIGNNOP -#undef DSECT - -/* Type of a section initializer callback. */ -typedef void (LJ_FASTCALL *GDBJITinitf)(GDBJITctx *ctx); - -/* Call section initializer and set the section offset and size. */ -static void gdbjit_initsect(GDBJITctx *ctx, int sect, GDBJITinitf initf) -{ - ctx->startp = ctx->p; - ctx->obj.sect[sect].ofs = (uintptr_t)((char *)ctx->p - (char *)&ctx->obj); - initf(ctx); - ctx->obj.sect[sect].size = (uintptr_t)(ctx->p - ctx->startp); -} - -#define SECTALIGN(p, a) \ - ((p) = (uint8_t *)(((uintptr_t)(p) + ((a)-1)) & ~(uintptr_t)((a)-1))) - -/* Build in-memory ELF object. */ -static void gdbjit_buildobj(GDBJITctx *ctx) -{ - GDBJITobj *obj = &ctx->obj; - /* Fill in ELF header and clear structures. */ - memcpy(&obj->hdr, &elfhdr_template, sizeof(ELFheader)); - memset(&obj->sect, 0, sizeof(ELFsectheader)*GDBJIT_SECT__MAX); - memset(&obj->sym, 0, sizeof(ELFsymbol)*GDBJIT_SYM__MAX); - /* Initialize sections. */ - ctx->p = obj->space; - gdbjit_initsect(ctx, GDBJIT_SECT_shstrtab, gdbjit_secthdr); - gdbjit_initsect(ctx, GDBJIT_SECT_strtab, gdbjit_symtab); - gdbjit_initsect(ctx, GDBJIT_SECT_debug_info, gdbjit_debuginfo); - gdbjit_initsect(ctx, GDBJIT_SECT_debug_abbrev, gdbjit_debugabbrev); - gdbjit_initsect(ctx, GDBJIT_SECT_debug_line, gdbjit_debugline); - SECTALIGN(ctx->p, sizeof(uintptr_t)); - gdbjit_initsect(ctx, GDBJIT_SECT_eh_frame, gdbjit_ehframe); - ctx->objsize = (size_t)((char *)ctx->p - (char *)obj); - lua_assert(ctx->objsize < sizeof(GDBJITobj)); -} - -#undef SECTALIGN - -/* -- Interface to GDB JIT API -------------------------------------------- */ - -/* Add new entry to GDB JIT symbol chain. */ -static void gdbjit_newentry(lua_State *L, GDBJITctx *ctx) -{ - /* Allocate memory for GDB JIT entry and ELF object. */ - MSize sz = (MSize)(sizeof(GDBJITentryobj) - sizeof(GDBJITobj) + ctx->objsize); - GDBJITentryobj *eo = lj_mem_newt(L, sz, GDBJITentryobj); - memcpy(&eo->obj, &ctx->obj, ctx->objsize); /* Copy ELF object. */ - eo->sz = sz; - ctx->T->gdbjit_entry = (void *)eo; - /* Link new entry to chain and register it. */ - eo->entry.prev_entry = NULL; - eo->entry.next_entry = __jit_debug_descriptor.first_entry; - if (eo->entry.next_entry) - eo->entry.next_entry->prev_entry = &eo->entry; - eo->entry.symfile_addr = (const char *)&eo->obj; - eo->entry.symfile_size = ctx->objsize; - __jit_debug_descriptor.first_entry = &eo->entry; - __jit_debug_descriptor.relevant_entry = &eo->entry; - __jit_debug_descriptor.action_flag = GDBJIT_REGISTER; - __jit_debug_register_code(); -} - -/* Add debug info for newly compiled trace and notify GDB. */ -void lj_gdbjit_addtrace(jit_State *J, GCtrace *T) -{ - GDBJITctx ctx; - GCproto *pt = &gcref(T->startpt)->pt; - TraceNo parent = T->ir[REF_BASE].op1; - const BCIns *startpc = mref(T->startpc, const BCIns); - ctx.T = T; - ctx.mcaddr = (uintptr_t)T->mcode; - ctx.szmcode = T->szmcode; - ctx.spadjp = CFRAME_SIZE_JIT + - (MSize)(parent ? traceref(J, parent)->spadjust : 0); - ctx.spadj = CFRAME_SIZE_JIT + T->spadjust; - lua_assert(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc); - ctx.lineno = lj_debug_line(pt, proto_bcpos(pt, startpc)); - ctx.filename = proto_chunknamestr(pt); - if (*ctx.filename == '@' || *ctx.filename == '=') - ctx.filename++; - else - ctx.filename = "(string)"; - gdbjit_buildobj(&ctx); - gdbjit_newentry(J->L, &ctx); -} - -/* Delete debug info for trace and notify GDB. */ -void lj_gdbjit_deltrace(jit_State *J, GCtrace *T) -{ - GDBJITentryobj *eo = (GDBJITentryobj *)T->gdbjit_entry; - if (eo) { - if (eo->entry.prev_entry) - eo->entry.prev_entry->next_entry = eo->entry.next_entry; - else - __jit_debug_descriptor.first_entry = eo->entry.next_entry; - if (eo->entry.next_entry) - eo->entry.next_entry->prev_entry = eo->entry.prev_entry; - __jit_debug_descriptor.relevant_entry = &eo->entry; - __jit_debug_descriptor.action_flag = GDBJIT_UNREGISTER; - __jit_debug_register_code(); - lj_mem_free(J2G(J), eo, eo->sz); - } -} - -#endif -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_gdbjit.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_gdbjit.h deleted file mode 100644 index 49c586301a8..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_gdbjit.h +++ /dev/null @@ -1,22 +0,0 @@ -/* -** Client for the GDB JIT API. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_GDBJIT_H -#define _LJ_GDBJIT_H - -#include "lj_obj.h" -#include "lj_jit.h" - -#if LJ_HASJIT && defined(LUAJIT_USE_GDBJIT) - -LJ_FUNC void lj_gdbjit_addtrace(jit_State *J, GCtrace *T); -LJ_FUNC void lj_gdbjit_deltrace(jit_State *J, GCtrace *T); - -#else -#define lj_gdbjit_addtrace(J, T) UNUSED(T) -#define lj_gdbjit_deltrace(J, T) UNUSED(T) -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ir.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ir.c deleted file mode 100644 index 9682e05e577..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ir.c +++ /dev/null @@ -1,505 +0,0 @@ -/* -** SSA IR (Intermediate Representation) emitter. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_ir_c -#define LUA_CORE - -/* For pointers to libc/libm functions. */ -#include -#include - -#include "lj_obj.h" - -#if LJ_HASJIT - -#include "lj_gc.h" -#include "lj_buf.h" -#include "lj_str.h" -#include "lj_tab.h" -#include "lj_ir.h" -#include "lj_jit.h" -#include "lj_ircall.h" -#include "lj_iropt.h" -#include "lj_trace.h" -#if LJ_HASFFI -#include "lj_ctype.h" -#include "lj_cdata.h" -#include "lj_carith.h" -#endif -#include "lj_vm.h" -#include "lj_strscan.h" -#include "lj_strfmt.h" -#include "lj_lib.h" - -/* Some local macros to save typing. Undef'd at the end. */ -#define IR(ref) (&J->cur.ir[(ref)]) -#define fins (&J->fold.ins) - -/* Pass IR on to next optimization in chain (FOLD). */ -#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) - -/* -- IR tables ----------------------------------------------------------- */ - -/* IR instruction modes. */ -LJ_DATADEF const uint8_t lj_ir_mode[IR__MAX+1] = { -IRDEF(IRMODE) - 0 -}; - -/* IR type sizes. */ -LJ_DATADEF const uint8_t lj_ir_type_size[IRT__MAX+1] = { -#define IRTSIZE(name, size) size, -IRTDEF(IRTSIZE) -#undef IRTSIZE - 0 -}; - -/* C call info for CALL* instructions. */ -LJ_DATADEF const CCallInfo lj_ir_callinfo[] = { -#define IRCALLCI(cond, name, nargs, kind, type, flags) \ - { (ASMFunction)IRCALLCOND_##cond(name), \ - (nargs)|(CCI_CALL_##kind)|(IRT_##type<irbuf + J->irbotlim; - MSize szins = J->irtoplim - J->irbotlim; - if (szins) { - baseir = (IRIns *)lj_mem_realloc(J->L, baseir, szins*sizeof(IRIns), - 2*szins*sizeof(IRIns)); - J->irtoplim = J->irbotlim + 2*szins; - } else { - baseir = (IRIns *)lj_mem_realloc(J->L, NULL, 0, LJ_MIN_IRSZ*sizeof(IRIns)); - J->irbotlim = REF_BASE - LJ_MIN_IRSZ/4; - J->irtoplim = J->irbotlim + LJ_MIN_IRSZ; - } - J->cur.ir = J->irbuf = baseir - J->irbotlim; -} - -/* Grow IR buffer at the bottom or shift it up. */ -static void lj_ir_growbot(jit_State *J) -{ - IRIns *baseir = J->irbuf + J->irbotlim; - MSize szins = J->irtoplim - J->irbotlim; - lua_assert(szins != 0); - lua_assert(J->cur.nk == J->irbotlim); - if (J->cur.nins + (szins >> 1) < J->irtoplim) { - /* More than half of the buffer is free on top: shift up by a quarter. */ - MSize ofs = szins >> 2; - memmove(baseir + ofs, baseir, (J->cur.nins - J->irbotlim)*sizeof(IRIns)); - J->irbotlim -= ofs; - J->irtoplim -= ofs; - J->cur.ir = J->irbuf = baseir - J->irbotlim; - } else { - /* Double the buffer size, but split the growth amongst top/bottom. */ - IRIns *newbase = lj_mem_newt(J->L, 2*szins*sizeof(IRIns), IRIns); - MSize ofs = szins >= 256 ? 128 : (szins >> 1); /* Limit bottom growth. */ - memcpy(newbase + ofs, baseir, (J->cur.nins - J->irbotlim)*sizeof(IRIns)); - lj_mem_free(G(J->L), baseir, szins*sizeof(IRIns)); - J->irbotlim -= ofs; - J->irtoplim = J->irbotlim + 2*szins; - J->cur.ir = J->irbuf = newbase - J->irbotlim; - } -} - -/* Emit IR without any optimizations. */ -TRef LJ_FASTCALL lj_ir_emit(jit_State *J) -{ - IRRef ref = lj_ir_nextins(J); - IRIns *ir = IR(ref); - IROp op = fins->o; - ir->prev = J->chain[op]; - J->chain[op] = (IRRef1)ref; - ir->o = op; - ir->op1 = fins->op1; - ir->op2 = fins->op2; - J->guardemit.irt |= fins->t.irt; - return TREF(ref, irt_t((ir->t = fins->t))); -} - -/* Emit call to a C function. */ -TRef lj_ir_call(jit_State *J, IRCallID id, ...) -{ - const CCallInfo *ci = &lj_ir_callinfo[id]; - uint32_t n = CCI_NARGS(ci); - TRef tr = TREF_NIL; - va_list argp; - va_start(argp, id); - if ((ci->flags & CCI_L)) n--; - if (n > 0) - tr = va_arg(argp, IRRef); - while (n-- > 1) - tr = emitir(IRT(IR_CARG, IRT_NIL), tr, va_arg(argp, IRRef)); - va_end(argp); - if (CCI_OP(ci) == IR_CALLS) - J->needsnap = 1; /* Need snapshot after call with side effect. */ - return emitir(CCI_OPTYPE(ci), tr, id); -} - -/* -- Interning of constants ---------------------------------------------- */ - -/* -** IR instructions for constants are kept between J->cur.nk >= ref < REF_BIAS. -** They are chained like all other instructions, but grow downwards. -** The are interned (like strings in the VM) to facilitate reference -** comparisons. The same constant must get the same reference. -*/ - -/* Get ref of next IR constant and optionally grow IR. -** Note: this may invalidate all IRIns *! -*/ -static LJ_AINLINE IRRef ir_nextk(jit_State *J) -{ - IRRef ref = J->cur.nk; - if (LJ_UNLIKELY(ref <= J->irbotlim)) lj_ir_growbot(J); - J->cur.nk = --ref; - return ref; -} - -/* Intern int32_t constant. */ -TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k) -{ - IRIns *ir, *cir = J->cur.ir; - IRRef ref; - for (ref = J->chain[IR_KINT]; ref; ref = cir[ref].prev) - if (cir[ref].i == k) - goto found; - ref = ir_nextk(J); - ir = IR(ref); - ir->i = k; - ir->t.irt = IRT_INT; - ir->o = IR_KINT; - ir->prev = J->chain[IR_KINT]; - J->chain[IR_KINT] = (IRRef1)ref; -found: - return TREF(ref, IRT_INT); -} - -/* The MRef inside the KNUM/KINT64 IR instructions holds the address of the -** 64 bit constant. The constants themselves are stored in a chained array -** and shared across traces. -** -** Rationale for choosing this data structure: -** - The address of the constants is embedded in the generated machine code -** and must never move. A resizable array or hash table wouldn't work. -** - Most apps need very few non-32 bit integer constants (less than a dozen). -** - Linear search is hard to beat in terms of speed and low complexity. -*/ -typedef struct K64Array { - MRef next; /* Pointer to next list. */ - MSize numk; /* Number of used elements in this array. */ - TValue k[LJ_MIN_K64SZ]; /* Array of constants. */ -} K64Array; - -/* Free all chained arrays. */ -void lj_ir_k64_freeall(jit_State *J) -{ - K64Array *k; - for (k = mref(J->k64, K64Array); k; ) { - K64Array *next = mref(k->next, K64Array); - lj_mem_free(J2G(J), k, sizeof(K64Array)); - k = next; - } -} - -/* Find 64 bit constant in chained array or add it. */ -cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64) -{ - K64Array *k, *kp = NULL; - TValue *ntv; - MSize idx; - /* Search for the constant in the whole chain of arrays. */ - for (k = mref(J->k64, K64Array); k; k = mref(k->next, K64Array)) { - kp = k; /* Remember previous element in list. */ - for (idx = 0; idx < k->numk; idx++) { /* Search one array. */ - TValue *tv = &k->k[idx]; - if (tv->u64 == u64) /* Needed for +-0/NaN/absmask. */ - return tv; - } - } - /* Constant was not found, need to add it. */ - if (!(kp && kp->numk < LJ_MIN_K64SZ)) { /* Allocate a new array. */ - K64Array *kn = lj_mem_newt(J->L, sizeof(K64Array), K64Array); - setmref(kn->next, NULL); - kn->numk = 0; - if (kp) - setmref(kp->next, kn); /* Chain to the end of the list. */ - else - setmref(J->k64, kn); /* Link first array. */ - kp = kn; - } - ntv = &kp->k[kp->numk++]; /* Add to current array. */ - ntv->u64 = u64; - return ntv; -} - -/* Intern 64 bit constant, given by its address. */ -TRef lj_ir_k64(jit_State *J, IROp op, cTValue *tv) -{ - IRIns *ir, *cir = J->cur.ir; - IRRef ref; - IRType t = op == IR_KNUM ? IRT_NUM : IRT_I64; - for (ref = J->chain[op]; ref; ref = cir[ref].prev) - if (ir_k64(&cir[ref]) == tv) - goto found; - ref = ir_nextk(J); - ir = IR(ref); - lua_assert(checkptrGC(tv)); - setmref(ir->ptr, tv); - ir->t.irt = t; - ir->o = op; - ir->prev = J->chain[op]; - J->chain[op] = (IRRef1)ref; -found: - return TREF(ref, t); -} - -/* Intern FP constant, given by its 64 bit pattern. */ -TRef lj_ir_knum_u64(jit_State *J, uint64_t u64) -{ - return lj_ir_k64(J, IR_KNUM, lj_ir_k64_find(J, u64)); -} - -/* Intern 64 bit integer constant. */ -TRef lj_ir_kint64(jit_State *J, uint64_t u64) -{ - return lj_ir_k64(J, IR_KINT64, lj_ir_k64_find(J, u64)); -} - -/* Check whether a number is int and return it. -0 is NOT considered an int. */ -static int numistrueint(lua_Number n, int32_t *kp) -{ - int32_t k = lj_num2int(n); - if (n == (lua_Number)k) { - if (kp) *kp = k; - if (k == 0) { /* Special check for -0. */ - TValue tv; - setnumV(&tv, n); - if (tv.u32.hi != 0) - return 0; - } - return 1; - } - return 0; -} - -/* Intern number as int32_t constant if possible, otherwise as FP constant. */ -TRef lj_ir_knumint(jit_State *J, lua_Number n) -{ - int32_t k; - if (numistrueint(n, &k)) - return lj_ir_kint(J, k); - else - return lj_ir_knum(J, n); -} - -/* Intern GC object "constant". */ -TRef lj_ir_kgc(jit_State *J, GCobj *o, IRType t) -{ - IRIns *ir, *cir = J->cur.ir; - IRRef ref; - lua_assert(!LJ_GC64); /* TODO_GC64: major changes required. */ - lua_assert(!isdead(J2G(J), o)); - for (ref = J->chain[IR_KGC]; ref; ref = cir[ref].prev) - if (ir_kgc(&cir[ref]) == o) - goto found; - ref = ir_nextk(J); - ir = IR(ref); - /* NOBARRIER: Current trace is a GC root. */ - setgcref(ir->gcr, o); - ir->t.irt = (uint8_t)t; - ir->o = IR_KGC; - ir->prev = J->chain[IR_KGC]; - J->chain[IR_KGC] = (IRRef1)ref; -found: - return TREF(ref, t); -} - -/* Intern 32 bit pointer constant. */ -TRef lj_ir_kptr_(jit_State *J, IROp op, void *ptr) -{ - IRIns *ir, *cir = J->cur.ir; - IRRef ref; - lua_assert((void *)(intptr_t)i32ptr(ptr) == ptr); - for (ref = J->chain[op]; ref; ref = cir[ref].prev) - if (mref(cir[ref].ptr, void) == ptr) - goto found; - ref = ir_nextk(J); - ir = IR(ref); - setmref(ir->ptr, ptr); - ir->t.irt = IRT_P32; - ir->o = op; - ir->prev = J->chain[op]; - J->chain[op] = (IRRef1)ref; -found: - return TREF(ref, IRT_P32); -} - -/* Intern typed NULL constant. */ -TRef lj_ir_knull(jit_State *J, IRType t) -{ - IRIns *ir, *cir = J->cur.ir; - IRRef ref; - for (ref = J->chain[IR_KNULL]; ref; ref = cir[ref].prev) - if (irt_t(cir[ref].t) == t) - goto found; - ref = ir_nextk(J); - ir = IR(ref); - ir->i = 0; - ir->t.irt = (uint8_t)t; - ir->o = IR_KNULL; - ir->prev = J->chain[IR_KNULL]; - J->chain[IR_KNULL] = (IRRef1)ref; -found: - return TREF(ref, t); -} - -/* Intern key slot. */ -TRef lj_ir_kslot(jit_State *J, TRef key, IRRef slot) -{ - IRIns *ir, *cir = J->cur.ir; - IRRef2 op12 = IRREF2((IRRef1)key, (IRRef1)slot); - IRRef ref; - /* Const part is not touched by CSE/DCE, so 0-65535 is ok for IRMlit here. */ - lua_assert(tref_isk(key) && slot == (IRRef)(IRRef1)slot); - for (ref = J->chain[IR_KSLOT]; ref; ref = cir[ref].prev) - if (cir[ref].op12 == op12) - goto found; - ref = ir_nextk(J); - ir = IR(ref); - ir->op12 = op12; - ir->t.irt = IRT_P32; - ir->o = IR_KSLOT; - ir->prev = J->chain[IR_KSLOT]; - J->chain[IR_KSLOT] = (IRRef1)ref; -found: - return TREF(ref, IRT_P32); -} - -/* -- Access to IR constants ---------------------------------------------- */ - -/* Copy value of IR constant. */ -void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir) -{ - UNUSED(L); - lua_assert(ir->o != IR_KSLOT); /* Common mistake. */ - switch (ir->o) { - case IR_KPRI: setpriV(tv, irt_toitype(ir->t)); break; - case IR_KINT: setintV(tv, ir->i); break; - case IR_KGC: setgcV(L, tv, ir_kgc(ir), irt_toitype(ir->t)); break; - case IR_KPTR: case IR_KKPTR: case IR_KNULL: - setlightudV(tv, mref(ir->ptr, void)); - break; - case IR_KNUM: setnumV(tv, ir_knum(ir)->n); break; -#if LJ_HASFFI - case IR_KINT64: { - GCcdata *cd = lj_cdata_new_(L, CTID_INT64, 8); - *(uint64_t *)cdataptr(cd) = ir_kint64(ir)->u64; - setcdataV(L, tv, cd); - break; - } -#endif - default: lua_assert(0); break; - } -} - -/* -- Convert IR operand types -------------------------------------------- */ - -/* Convert from string to number. */ -TRef LJ_FASTCALL lj_ir_tonumber(jit_State *J, TRef tr) -{ - if (!tref_isnumber(tr)) { - if (tref_isstr(tr)) - tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0); - else - lj_trace_err(J, LJ_TRERR_BADTYPE); - } - return tr; -} - -/* Convert from integer or string to number. */ -TRef LJ_FASTCALL lj_ir_tonum(jit_State *J, TRef tr) -{ - if (!tref_isnum(tr)) { - if (tref_isinteger(tr)) - tr = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT); - else if (tref_isstr(tr)) - tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0); - else - lj_trace_err(J, LJ_TRERR_BADTYPE); - } - return tr; -} - -/* Convert from integer or number to string. */ -TRef LJ_FASTCALL lj_ir_tostr(jit_State *J, TRef tr) -{ - if (!tref_isstr(tr)) { - if (!tref_isnumber(tr)) - lj_trace_err(J, LJ_TRERR_BADTYPE); - tr = emitir(IRT(IR_TOSTR, IRT_STR), tr, - tref_isnum(tr) ? IRTOSTR_NUM : IRTOSTR_INT); - } - return tr; -} - -/* -- Miscellaneous IR ops ------------------------------------------------ */ - -/* Evaluate numeric comparison. */ -int lj_ir_numcmp(lua_Number a, lua_Number b, IROp op) -{ - switch (op) { - case IR_EQ: return (a == b); - case IR_NE: return (a != b); - case IR_LT: return (a < b); - case IR_GE: return (a >= b); - case IR_LE: return (a <= b); - case IR_GT: return (a > b); - case IR_ULT: return !(a >= b); - case IR_UGE: return !(a < b); - case IR_ULE: return !(a > b); - case IR_UGT: return !(a <= b); - default: lua_assert(0); return 0; - } -} - -/* Evaluate string comparison. */ -int lj_ir_strcmp(GCstr *a, GCstr *b, IROp op) -{ - int res = lj_str_cmp(a, b); - switch (op) { - case IR_LT: return (res < 0); - case IR_GE: return (res >= 0); - case IR_LE: return (res <= 0); - case IR_GT: return (res > 0); - default: lua_assert(0); return 0; - } -} - -/* Rollback IR to previous state. */ -void lj_ir_rollback(jit_State *J, IRRef ref) -{ - IRRef nins = J->cur.nins; - while (nins > ref) { - IRIns *ir; - nins--; - ir = IR(nins); - J->chain[ir->o] = ir->prev; - } - J->cur.nins = nins; -} - -#undef IR -#undef fins -#undef emitir - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ir.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ir.h deleted file mode 100644 index 56e19774c97..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ir.h +++ /dev/null @@ -1,577 +0,0 @@ -/* -** SSA IR (Intermediate Representation) format. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_IR_H -#define _LJ_IR_H - -#include "lj_obj.h" - -/* -- IR instructions ----------------------------------------------------- */ - -/* IR instruction definition. Order matters, see below. ORDER IR */ -#define IRDEF(_) \ - /* Guarded assertions. */ \ - /* Must be properly aligned to flip opposites (^1) and (un)ordered (^4). */ \ - _(LT, N , ref, ref) \ - _(GE, N , ref, ref) \ - _(LE, N , ref, ref) \ - _(GT, N , ref, ref) \ - \ - _(ULT, N , ref, ref) \ - _(UGE, N , ref, ref) \ - _(ULE, N , ref, ref) \ - _(UGT, N , ref, ref) \ - \ - _(EQ, C , ref, ref) \ - _(NE, C , ref, ref) \ - \ - _(ABC, N , ref, ref) \ - _(RETF, S , ref, ref) \ - \ - /* Miscellaneous ops. */ \ - _(NOP, N , ___, ___) \ - _(BASE, N , lit, lit) \ - _(PVAL, N , lit, ___) \ - _(GCSTEP, S , ___, ___) \ - _(HIOP, S , ref, ref) \ - _(LOOP, S , ___, ___) \ - _(USE, S , ref, ___) \ - _(PHI, S , ref, ref) \ - _(RENAME, S , ref, lit) \ - _(PROF, S , ___, ___) \ - \ - /* Constants. */ \ - _(KPRI, N , ___, ___) \ - _(KINT, N , cst, ___) \ - _(KGC, N , cst, ___) \ - _(KPTR, N , cst, ___) \ - _(KKPTR, N , cst, ___) \ - _(KNULL, N , cst, ___) \ - _(KNUM, N , cst, ___) \ - _(KINT64, N , cst, ___) \ - _(KSLOT, N , ref, lit) \ - \ - /* Bit ops. */ \ - _(BNOT, N , ref, ___) \ - _(BSWAP, N , ref, ___) \ - _(BAND, C , ref, ref) \ - _(BOR, C , ref, ref) \ - _(BXOR, C , ref, ref) \ - _(BSHL, N , ref, ref) \ - _(BSHR, N , ref, ref) \ - _(BSAR, N , ref, ref) \ - _(BROL, N , ref, ref) \ - _(BROR, N , ref, ref) \ - \ - /* Arithmetic ops. ORDER ARITH */ \ - _(ADD, C , ref, ref) \ - _(SUB, N , ref, ref) \ - _(MUL, C , ref, ref) \ - _(DIV, N , ref, ref) \ - _(MOD, N , ref, ref) \ - _(POW, N , ref, ref) \ - _(NEG, N , ref, ref) \ - \ - _(ABS, N , ref, ref) \ - _(ATAN2, N , ref, ref) \ - _(LDEXP, N , ref, ref) \ - _(MIN, C , ref, ref) \ - _(MAX, C , ref, ref) \ - _(FPMATH, N , ref, lit) \ - \ - /* Overflow-checking arithmetic ops. */ \ - _(ADDOV, CW, ref, ref) \ - _(SUBOV, NW, ref, ref) \ - _(MULOV, CW, ref, ref) \ - \ - /* Memory ops. A = array, H = hash, U = upvalue, F = field, S = stack. */ \ - \ - /* Memory references. */ \ - _(AREF, R , ref, ref) \ - _(HREFK, R , ref, ref) \ - _(HREF, L , ref, ref) \ - _(NEWREF, S , ref, ref) \ - _(UREFO, LW, ref, lit) \ - _(UREFC, LW, ref, lit) \ - _(FREF, R , ref, lit) \ - _(STRREF, N , ref, ref) \ - _(LREF, L , ___, ___) \ - \ - /* Loads and Stores. These must be in the same order. */ \ - _(ALOAD, L , ref, ___) \ - _(HLOAD, L , ref, ___) \ - _(ULOAD, L , ref, ___) \ - _(FLOAD, L , ref, lit) \ - _(XLOAD, L , ref, lit) \ - _(SLOAD, L , lit, lit) \ - _(VLOAD, L , ref, ___) \ - \ - _(ASTORE, S , ref, ref) \ - _(HSTORE, S , ref, ref) \ - _(USTORE, S , ref, ref) \ - _(FSTORE, S , ref, ref) \ - _(XSTORE, S , ref, ref) \ - \ - /* Allocations. */ \ - _(SNEW, N , ref, ref) /* CSE is ok, not marked as A. */ \ - _(XSNEW, A , ref, ref) \ - _(TNEW, AW, lit, lit) \ - _(TDUP, AW, ref, ___) \ - _(CNEW, AW, ref, ref) \ - _(CNEWI, NW, ref, ref) /* CSE is ok, not marked as A. */ \ - \ - /* Buffer operations. */ \ - _(BUFHDR, L , ref, lit) \ - _(BUFPUT, L , ref, ref) \ - _(BUFSTR, A , ref, ref) \ - \ - /* Barriers. */ \ - _(TBAR, S , ref, ___) \ - _(OBAR, S , ref, ref) \ - _(XBAR, S , ___, ___) \ - \ - /* Type conversions. */ \ - _(CONV, NW, ref, lit) \ - _(TOBIT, N , ref, ref) \ - _(TOSTR, N , ref, lit) \ - _(STRTO, N , ref, ___) \ - \ - /* Calls. */ \ - _(CALLN, N , ref, lit) \ - _(CALLA, A , ref, lit) \ - _(CALLL, L , ref, lit) \ - _(CALLS, S , ref, lit) \ - _(CALLXS, S , ref, ref) \ - _(CARG, N , ref, ref) \ - \ - /* End of list. */ - -/* IR opcodes (max. 256). */ -typedef enum { -#define IRENUM(name, m, m1, m2) IR_##name, -IRDEF(IRENUM) -#undef IRENUM - IR__MAX -} IROp; - -/* Stored opcode. */ -typedef uint8_t IROp1; - -LJ_STATIC_ASSERT(((int)IR_EQ^1) == (int)IR_NE); -LJ_STATIC_ASSERT(((int)IR_LT^1) == (int)IR_GE); -LJ_STATIC_ASSERT(((int)IR_LE^1) == (int)IR_GT); -LJ_STATIC_ASSERT(((int)IR_LT^3) == (int)IR_GT); -LJ_STATIC_ASSERT(((int)IR_LT^4) == (int)IR_ULT); - -/* Delta between xLOAD and xSTORE. */ -#define IRDELTA_L2S ((int)IR_ASTORE - (int)IR_ALOAD) - -LJ_STATIC_ASSERT((int)IR_HLOAD + IRDELTA_L2S == (int)IR_HSTORE); -LJ_STATIC_ASSERT((int)IR_ULOAD + IRDELTA_L2S == (int)IR_USTORE); -LJ_STATIC_ASSERT((int)IR_FLOAD + IRDELTA_L2S == (int)IR_FSTORE); -LJ_STATIC_ASSERT((int)IR_XLOAD + IRDELTA_L2S == (int)IR_XSTORE); - -/* -- Named IR literals --------------------------------------------------- */ - -/* FPMATH sub-functions. ORDER FPM. */ -#define IRFPMDEF(_) \ - _(FLOOR) _(CEIL) _(TRUNC) /* Must be first and in this order. */ \ - _(SQRT) _(EXP) _(EXP2) _(LOG) _(LOG2) _(LOG10) \ - _(SIN) _(COS) _(TAN) \ - _(OTHER) - -typedef enum { -#define FPMENUM(name) IRFPM_##name, -IRFPMDEF(FPMENUM) -#undef FPMENUM - IRFPM__MAX -} IRFPMathOp; - -/* FLOAD fields. */ -#define IRFLDEF(_) \ - _(STR_LEN, offsetof(GCstr, len)) \ - _(FUNC_ENV, offsetof(GCfunc, l.env)) \ - _(FUNC_PC, offsetof(GCfunc, l.pc)) \ - _(FUNC_FFID, offsetof(GCfunc, l.ffid)) \ - _(THREAD_ENV, offsetof(lua_State, env)) \ - _(TAB_META, offsetof(GCtab, metatable)) \ - _(TAB_ARRAY, offsetof(GCtab, array)) \ - _(TAB_NODE, offsetof(GCtab, node)) \ - _(TAB_ASIZE, offsetof(GCtab, asize)) \ - _(TAB_HMASK, offsetof(GCtab, hmask)) \ - _(TAB_NOMM, offsetof(GCtab, nomm)) \ - _(UDATA_META, offsetof(GCudata, metatable)) \ - _(UDATA_UDTYPE, offsetof(GCudata, udtype)) \ - _(UDATA_FILE, sizeof(GCudata)) \ - _(CDATA_CTYPEID, offsetof(GCcdata, ctypeid)) \ - _(CDATA_PTR, sizeof(GCcdata)) \ - _(CDATA_INT, sizeof(GCcdata)) \ - _(CDATA_INT64, sizeof(GCcdata)) \ - _(CDATA_INT64_4, sizeof(GCcdata) + 4) - -typedef enum { -#define FLENUM(name, ofs) IRFL_##name, -IRFLDEF(FLENUM) -#undef FLENUM - IRFL__MAX -} IRFieldID; - -/* SLOAD mode bits, stored in op2. */ -#define IRSLOAD_PARENT 0x01 /* Coalesce with parent trace. */ -#define IRSLOAD_FRAME 0x02 /* Load hiword of frame. */ -#define IRSLOAD_TYPECHECK 0x04 /* Needs type check. */ -#define IRSLOAD_CONVERT 0x08 /* Number to integer conversion. */ -#define IRSLOAD_READONLY 0x10 /* Read-only, omit slot store. */ -#define IRSLOAD_INHERIT 0x20 /* Inherited by exits/side traces. */ - -/* XLOAD mode, stored in op2. */ -#define IRXLOAD_READONLY 1 /* Load from read-only data. */ -#define IRXLOAD_VOLATILE 2 /* Load from volatile data. */ -#define IRXLOAD_UNALIGNED 4 /* Unaligned load. */ - -/* BUFHDR mode, stored in op2. */ -#define IRBUFHDR_RESET 0 /* Reset buffer. */ -#define IRBUFHDR_APPEND 1 /* Append to buffer. */ - -/* CONV mode, stored in op2. */ -#define IRCONV_SRCMASK 0x001f /* Source IRType. */ -#define IRCONV_DSTMASK 0x03e0 /* Dest. IRType (also in ir->t). */ -#define IRCONV_DSH 5 -#define IRCONV_NUM_INT ((IRT_NUM<>2)&3)) -#define irm_iscomm(m) ((m) & IRM_C) -#define irm_kind(m) ((m) & IRM_S) - -#define IRMODE(name, m, m1, m2) (((IRM##m1)|((IRM##m2)<<2)|(IRM_##m))^IRM_W), - -LJ_DATA const uint8_t lj_ir_mode[IR__MAX+1]; - -/* -- IR instruction types ------------------------------------------------ */ - -/* Map of itypes to non-negative numbers. ORDER LJ_T. -** LJ_TUPVAL/LJ_TTRACE never appear in a TValue. Use these itypes for -** IRT_P32 and IRT_P64, which never escape the IR. -** The various integers are only used in the IR and can only escape to -** a TValue after implicit or explicit conversion. Their types must be -** contiguous and next to IRT_NUM (see the typerange macros below). -*/ -#define IRTDEF(_) \ - _(NIL, 4) _(FALSE, 4) _(TRUE, 4) _(LIGHTUD, LJ_64 ? 8 : 4) _(STR, 4) \ - _(P32, 4) _(THREAD, 4) _(PROTO, 4) _(FUNC, 4) _(P64, 8) _(CDATA, 4) \ - _(TAB, 4) _(UDATA, 4) \ - _(FLOAT, 4) _(NUM, 8) _(I8, 1) _(U8, 1) _(I16, 2) _(U16, 2) \ - _(INT, 4) _(U32, 4) _(I64, 8) _(U64, 8) \ - _(SOFTFP, 4) /* There is room for 9 more types. */ - -/* IR result type and flags (8 bit). */ -typedef enum { -#define IRTENUM(name, size) IRT_##name, -IRTDEF(IRTENUM) -#undef IRTENUM - IRT__MAX, - - /* Native pointer type and the corresponding integer type. */ - IRT_PTR = LJ_64 ? IRT_P64 : IRT_P32, - IRT_INTP = LJ_64 ? IRT_I64 : IRT_INT, - IRT_UINTP = LJ_64 ? IRT_U64 : IRT_U32, - /* TODO_GC64: major changes required for all uses of IRT_P32. */ - - /* Additional flags. */ - IRT_MARK = 0x20, /* Marker for misc. purposes. */ - IRT_ISPHI = 0x40, /* Instruction is left or right PHI operand. */ - IRT_GUARD = 0x80, /* Instruction is a guard. */ - - /* Masks. */ - IRT_TYPE = 0x1f, - IRT_T = 0xff -} IRType; - -#define irtype_ispri(irt) ((uint32_t)(irt) <= IRT_TRUE) - -/* Stored IRType. */ -typedef struct IRType1 { uint8_t irt; } IRType1; - -#define IRT(o, t) ((uint32_t)(((o)<<8) | (t))) -#define IRTI(o) (IRT((o), IRT_INT)) -#define IRTN(o) (IRT((o), IRT_NUM)) -#define IRTG(o, t) (IRT((o), IRT_GUARD|(t))) -#define IRTGI(o) (IRT((o), IRT_GUARD|IRT_INT)) - -#define irt_t(t) ((IRType)(t).irt) -#define irt_type(t) ((IRType)((t).irt & IRT_TYPE)) -#define irt_sametype(t1, t2) ((((t1).irt ^ (t2).irt) & IRT_TYPE) == 0) -#define irt_typerange(t, first, last) \ - ((uint32_t)((t).irt & IRT_TYPE) - (uint32_t)(first) <= (uint32_t)(last-first)) - -#define irt_isnil(t) (irt_type(t) == IRT_NIL) -#define irt_ispri(t) ((uint32_t)irt_type(t) <= IRT_TRUE) -#define irt_islightud(t) (irt_type(t) == IRT_LIGHTUD) -#define irt_isstr(t) (irt_type(t) == IRT_STR) -#define irt_istab(t) (irt_type(t) == IRT_TAB) -#define irt_iscdata(t) (irt_type(t) == IRT_CDATA) -#define irt_isfloat(t) (irt_type(t) == IRT_FLOAT) -#define irt_isnum(t) (irt_type(t) == IRT_NUM) -#define irt_isint(t) (irt_type(t) == IRT_INT) -#define irt_isi8(t) (irt_type(t) == IRT_I8) -#define irt_isu8(t) (irt_type(t) == IRT_U8) -#define irt_isi16(t) (irt_type(t) == IRT_I16) -#define irt_isu16(t) (irt_type(t) == IRT_U16) -#define irt_isu32(t) (irt_type(t) == IRT_U32) -#define irt_isi64(t) (irt_type(t) == IRT_I64) -#define irt_isu64(t) (irt_type(t) == IRT_U64) - -#define irt_isfp(t) (irt_isnum(t) || irt_isfloat(t)) -#define irt_isinteger(t) (irt_typerange((t), IRT_I8, IRT_INT)) -#define irt_isgcv(t) (irt_typerange((t), IRT_STR, IRT_UDATA)) -#define irt_isaddr(t) (irt_typerange((t), IRT_LIGHTUD, IRT_UDATA)) -#define irt_isint64(t) (irt_typerange((t), IRT_I64, IRT_U64)) - -#if LJ_GC64 -#define IRT_IS64 \ - ((1u<> irt_type(t)) & 1) -#define irt_is64orfp(t) (((IRT_IS64|(1u<>irt_type(t)) & 1) - -#define irt_size(t) (lj_ir_type_size[irt_t((t))]) - -LJ_DATA const uint8_t lj_ir_type_size[]; - -static LJ_AINLINE IRType itype2irt(const TValue *tv) -{ - if (tvisint(tv)) - return IRT_INT; - else if (tvisnum(tv)) - return IRT_NUM; -#if LJ_64 && !LJ_GC64 - else if (tvislightud(tv)) - return IRT_LIGHTUD; -#endif - else - return (IRType)~itype(tv); -} - -static LJ_AINLINE uint32_t irt_toitype_(IRType t) -{ - lua_assert(!LJ_64 || t != IRT_LIGHTUD); - if (LJ_DUALNUM && t > IRT_NUM) { - return LJ_TISNUM; - } else { - lua_assert(t <= IRT_NUM); - return ~(uint32_t)t; - } -} - -#define irt_toitype(t) irt_toitype_(irt_type((t))) - -#define irt_isguard(t) ((t).irt & IRT_GUARD) -#define irt_ismarked(t) ((t).irt & IRT_MARK) -#define irt_setmark(t) ((t).irt |= IRT_MARK) -#define irt_clearmark(t) ((t).irt &= ~IRT_MARK) -#define irt_isphi(t) ((t).irt & IRT_ISPHI) -#define irt_setphi(t) ((t).irt |= IRT_ISPHI) -#define irt_clearphi(t) ((t).irt &= ~IRT_ISPHI) - -/* Stored combined IR opcode and type. */ -typedef uint16_t IROpT; - -/* -- IR references ------------------------------------------------------- */ - -/* IR references. */ -typedef uint16_t IRRef1; /* One stored reference. */ -typedef uint32_t IRRef2; /* Two stored references. */ -typedef uint32_t IRRef; /* Used to pass around references. */ - -/* Fixed references. */ -enum { - REF_BIAS = 0x8000, - REF_TRUE = REF_BIAS-3, - REF_FALSE = REF_BIAS-2, - REF_NIL = REF_BIAS-1, /* \--- Constants grow downwards. */ - REF_BASE = REF_BIAS, /* /--- IR grows upwards. */ - REF_FIRST = REF_BIAS+1, - REF_DROP = 0xffff -}; - -/* Note: IRMlit operands must be < REF_BIAS, too! -** This allows for fast and uniform manipulation of all operands -** without looking up the operand mode in lj_ir_mode: -** - CSE calculates the maximum reference of two operands. -** This must work with mixed reference/literal operands, too. -** - DCE marking only checks for operand >= REF_BIAS. -** - LOOP needs to substitute reference operands. -** Constant references and literals must not be modified. -*/ - -#define IRREF2(lo, hi) ((IRRef2)(lo) | ((IRRef2)(hi) << 16)) - -#define irref_isk(ref) ((ref) < REF_BIAS) - -/* Tagged IR references (32 bit). -** -** +-------+-------+---------------+ -** | irt | flags | ref | -** +-------+-------+---------------+ -** -** The tag holds a copy of the IRType and speeds up IR type checks. -*/ -typedef uint32_t TRef; - -#define TREF_REFMASK 0x0000ffff -#define TREF_FRAME 0x00010000 -#define TREF_CONT 0x00020000 - -#define TREF(ref, t) ((TRef)((ref) + ((t)<<24))) - -#define tref_ref(tr) ((IRRef1)(tr)) -#define tref_t(tr) ((IRType)((tr)>>24)) -#define tref_type(tr) ((IRType)(((tr)>>24) & IRT_TYPE)) -#define tref_typerange(tr, first, last) \ - ((((tr)>>24) & IRT_TYPE) - (TRef)(first) <= (TRef)(last-first)) - -#define tref_istype(tr, t) (((tr) & (IRT_TYPE<<24)) == ((t)<<24)) -#define tref_isnil(tr) (tref_istype((tr), IRT_NIL)) -#define tref_isfalse(tr) (tref_istype((tr), IRT_FALSE)) -#define tref_istrue(tr) (tref_istype((tr), IRT_TRUE)) -#define tref_islightud(tr) (tref_istype((tr), IRT_LIGHTUD)) -#define tref_isstr(tr) (tref_istype((tr), IRT_STR)) -#define tref_isfunc(tr) (tref_istype((tr), IRT_FUNC)) -#define tref_iscdata(tr) (tref_istype((tr), IRT_CDATA)) -#define tref_istab(tr) (tref_istype((tr), IRT_TAB)) -#define tref_isudata(tr) (tref_istype((tr), IRT_UDATA)) -#define tref_isnum(tr) (tref_istype((tr), IRT_NUM)) -#define tref_isint(tr) (tref_istype((tr), IRT_INT)) - -#define tref_isbool(tr) (tref_typerange((tr), IRT_FALSE, IRT_TRUE)) -#define tref_ispri(tr) (tref_typerange((tr), IRT_NIL, IRT_TRUE)) -#define tref_istruecond(tr) (!tref_typerange((tr), IRT_NIL, IRT_FALSE)) -#define tref_isinteger(tr) (tref_typerange((tr), IRT_I8, IRT_INT)) -#define tref_isnumber(tr) (tref_typerange((tr), IRT_NUM, IRT_INT)) -#define tref_isnumber_str(tr) (tref_isnumber((tr)) || tref_isstr((tr))) -#define tref_isgcv(tr) (tref_typerange((tr), IRT_STR, IRT_UDATA)) - -#define tref_isk(tr) (irref_isk(tref_ref((tr)))) -#define tref_isk2(tr1, tr2) (irref_isk(tref_ref((tr1) | (tr2)))) - -#define TREF_PRI(t) (TREF(REF_NIL-(t), (t))) -#define TREF_NIL (TREF_PRI(IRT_NIL)) -#define TREF_FALSE (TREF_PRI(IRT_FALSE)) -#define TREF_TRUE (TREF_PRI(IRT_TRUE)) - -/* -- IR format ----------------------------------------------------------- */ - -/* IR instruction format (64 bit). -** -** 16 16 8 8 8 8 -** +-------+-------+---+---+---+---+ -** | op1 | op2 | t | o | r | s | -** +-------+-------+---+---+---+---+ -** | op12/i/gco | ot | prev | (alternative fields in union) -** +---------------+-------+-------+ -** 32 16 16 -** -** prev is only valid prior to register allocation and then reused for r + s. -*/ - -typedef union IRIns { - struct { - LJ_ENDIAN_LOHI( - IRRef1 op1; /* IR operand 1. */ - , IRRef1 op2; /* IR operand 2. */ - ) - IROpT ot; /* IR opcode and type (overlaps t and o). */ - IRRef1 prev; /* Previous ins in same chain (overlaps r and s). */ - }; - struct { - IRRef2 op12; /* IR operand 1 and 2 (overlaps op1 and op2). */ - LJ_ENDIAN_LOHI( - IRType1 t; /* IR type. */ - , IROp1 o; /* IR opcode. */ - ) - LJ_ENDIAN_LOHI( - uint8_t r; /* Register allocation (overlaps prev). */ - , uint8_t s; /* Spill slot allocation (overlaps prev). */ - ) - }; - int32_t i; /* 32 bit signed integer literal (overlaps op12). */ - GCRef gcr; /* GCobj constant (overlaps op12). */ - MRef ptr; /* Pointer constant (overlaps op12). */ -} IRIns; - -/* TODO_GC64: major changes required. */ -#define ir_kgc(ir) check_exp((ir)->o == IR_KGC, gcref((ir)->gcr)) -#define ir_kstr(ir) (gco2str(ir_kgc((ir)))) -#define ir_ktab(ir) (gco2tab(ir_kgc((ir)))) -#define ir_kfunc(ir) (gco2func(ir_kgc((ir)))) -#define ir_kcdata(ir) (gco2cd(ir_kgc((ir)))) -#define ir_knum(ir) check_exp((ir)->o == IR_KNUM, mref((ir)->ptr, cTValue)) -#define ir_kint64(ir) check_exp((ir)->o == IR_KINT64, mref((ir)->ptr,cTValue)) -#define ir_k64(ir) \ - check_exp((ir)->o == IR_KNUM || (ir)->o == IR_KINT64, mref((ir)->ptr,cTValue)) -#define ir_kptr(ir) \ - check_exp((ir)->o == IR_KPTR || (ir)->o == IR_KKPTR, mref((ir)->ptr, void)) - -/* A store or any other op with a non-weak guard has a side-effect. */ -static LJ_AINLINE int ir_sideeff(IRIns *ir) -{ - return (((ir->t.irt | ~IRT_GUARD) & lj_ir_mode[ir->o]) >= IRM_S); -} - -LJ_STATIC_ASSERT((int)IRT_GUARD == (int)IRM_W); - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ircall.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ircall.h deleted file mode 100644 index 84e41ecfccb..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_ircall.h +++ /dev/null @@ -1,321 +0,0 @@ -/* -** IR CALL* instruction definitions. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_IRCALL_H -#define _LJ_IRCALL_H - -#include "lj_obj.h" -#include "lj_ir.h" -#include "lj_jit.h" - -/* C call info for CALL* instructions. */ -typedef struct CCallInfo { - ASMFunction func; /* Function pointer. */ - uint32_t flags; /* Number of arguments and flags. */ -} CCallInfo; - -#define CCI_NARGS(ci) ((ci)->flags & 0xff) /* # of args. */ -#define CCI_NARGS_MAX 32 /* Max. # of args. */ - -#define CCI_OTSHIFT 16 -#define CCI_OPTYPE(ci) ((ci)->flags >> CCI_OTSHIFT) /* Get op/type. */ -#define CCI_OPSHIFT 24 -#define CCI_OP(ci) ((ci)->flags >> CCI_OPSHIFT) /* Get op. */ - -#define CCI_CALL_N (IR_CALLN << CCI_OPSHIFT) -#define CCI_CALL_A (IR_CALLA << CCI_OPSHIFT) -#define CCI_CALL_L (IR_CALLL << CCI_OPSHIFT) -#define CCI_CALL_S (IR_CALLS << CCI_OPSHIFT) -#define CCI_CALL_FN (CCI_CALL_N|CCI_CC_FASTCALL) -#define CCI_CALL_FL (CCI_CALL_L|CCI_CC_FASTCALL) -#define CCI_CALL_FS (CCI_CALL_S|CCI_CC_FASTCALL) - -/* C call info flags. */ -#define CCI_L 0x0100 /* Implicit L arg. */ -#define CCI_CASTU64 0x0200 /* Cast u64 result to number. */ -#define CCI_NOFPRCLOBBER 0x0400 /* Does not clobber any FPRs. */ -#define CCI_VARARG 0x0800 /* Vararg function. */ - -#define CCI_CC_MASK 0x3000 /* Calling convention mask. */ -#define CCI_CC_SHIFT 12 -/* ORDER CC */ -#define CCI_CC_CDECL 0x0000 /* Default cdecl calling convention. */ -#define CCI_CC_THISCALL 0x1000 /* Thiscall calling convention. */ -#define CCI_CC_FASTCALL 0x2000 /* Fastcall calling convention. */ -#define CCI_CC_STDCALL 0x3000 /* Stdcall calling convention. */ - -/* Extra args for SOFTFP, SPLIT 64 bit. */ -#define CCI_XARGS_SHIFT 14 -#define CCI_XARGS(ci) (((ci)->flags >> CCI_XARGS_SHIFT) & 3) -#define CCI_XA (1u << CCI_XARGS_SHIFT) - -#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI) -#define CCI_XNARGS(ci) (CCI_NARGS((ci)) + CCI_XARGS((ci))) -#else -#define CCI_XNARGS(ci) CCI_NARGS((ci)) -#endif - -/* Helpers for conditional function definitions. */ -#define IRCALLCOND_ANY(x) x - -#if LJ_TARGET_X86ORX64 -#define IRCALLCOND_FPMATH(x) NULL -#else -#define IRCALLCOND_FPMATH(x) x -#endif - -#if LJ_SOFTFP -#define IRCALLCOND_SOFTFP(x) x -#if LJ_HASFFI -#define IRCALLCOND_SOFTFP_FFI(x) x -#else -#define IRCALLCOND_SOFTFP_FFI(x) NULL -#endif -#else -#define IRCALLCOND_SOFTFP(x) NULL -#define IRCALLCOND_SOFTFP_FFI(x) NULL -#endif - -#define LJ_NEED_FP64 (LJ_TARGET_ARM || LJ_TARGET_PPC || LJ_TARGET_MIPS) - -#if LJ_HASFFI && (LJ_SOFTFP || LJ_NEED_FP64) -#define IRCALLCOND_FP64_FFI(x) x -#else -#define IRCALLCOND_FP64_FFI(x) NULL -#endif - -#if LJ_HASFFI -#define IRCALLCOND_FFI(x) x -#if LJ_32 -#define IRCALLCOND_FFI32(x) x -#else -#define IRCALLCOND_FFI32(x) NULL -#endif -#else -#define IRCALLCOND_FFI(x) NULL -#define IRCALLCOND_FFI32(x) NULL -#endif - -#if LJ_TARGET_X86 -#define CCI_RANDFPR 0 /* Clang on OSX/x86 is overzealous. */ -#else -#define CCI_RANDFPR CCI_NOFPRCLOBBER -#endif - -#if LJ_SOFTFP -#define XA_FP CCI_XA -#define XA2_FP (CCI_XA+CCI_XA) -#else -#define XA_FP 0 -#define XA2_FP 0 -#endif - -#if LJ_32 -#define XA_64 CCI_XA -#define XA2_64 (CCI_XA+CCI_XA) -#else -#define XA_64 0 -#define XA2_64 0 -#endif - -/* Function definitions for CALL* instructions. */ -#define IRCALLDEF(_) \ - _(ANY, lj_str_cmp, 2, FN, INT, CCI_NOFPRCLOBBER) \ - _(ANY, lj_str_find, 4, N, P32, 0) \ - _(ANY, lj_str_new, 3, S, STR, CCI_L) \ - _(ANY, lj_strscan_num, 2, FN, INT, 0) \ - _(ANY, lj_strfmt_int, 2, FN, STR, CCI_L) \ - _(ANY, lj_strfmt_num, 2, FN, STR, CCI_L) \ - _(ANY, lj_strfmt_char, 2, FN, STR, CCI_L) \ - _(ANY, lj_strfmt_putint, 2, FL, P32, 0) \ - _(ANY, lj_strfmt_putnum, 2, FL, P32, 0) \ - _(ANY, lj_strfmt_putquoted, 2, FL, P32, 0) \ - _(ANY, lj_strfmt_putfxint, 3, L, P32, XA_64) \ - _(ANY, lj_strfmt_putfnum_int, 3, L, P32, XA_FP) \ - _(ANY, lj_strfmt_putfnum_uint, 3, L, P32, XA_FP) \ - _(ANY, lj_strfmt_putfnum, 3, L, P32, XA_FP) \ - _(ANY, lj_strfmt_putfstr, 3, L, P32, 0) \ - _(ANY, lj_strfmt_putfchar, 3, L, P32, 0) \ - _(ANY, lj_buf_putmem, 3, S, P32, 0) \ - _(ANY, lj_buf_putstr, 2, FL, P32, 0) \ - _(ANY, lj_buf_putchar, 2, FL, P32, 0) \ - _(ANY, lj_buf_putstr_reverse, 2, FL, P32, 0) \ - _(ANY, lj_buf_putstr_lower, 2, FL, P32, 0) \ - _(ANY, lj_buf_putstr_upper, 2, FL, P32, 0) \ - _(ANY, lj_buf_putstr_rep, 3, L, P32, 0) \ - _(ANY, lj_buf_puttab, 5, L, P32, 0) \ - _(ANY, lj_buf_tostr, 1, FL, STR, 0) \ - _(ANY, lj_tab_new_ah, 3, A, TAB, CCI_L) \ - _(ANY, lj_tab_new1, 2, FS, TAB, CCI_L) \ - _(ANY, lj_tab_dup, 2, FS, TAB, CCI_L) \ - _(ANY, lj_tab_clear, 1, FS, NIL, 0) \ - _(ANY, lj_tab_newkey, 3, S, P32, CCI_L) \ - _(ANY, lj_tab_len, 1, FL, INT, 0) \ - _(ANY, lj_gc_step_jit, 2, FS, NIL, CCI_L) \ - _(ANY, lj_gc_barrieruv, 2, FS, NIL, 0) \ - _(ANY, lj_mem_newgco, 2, FS, P32, CCI_L) \ - _(ANY, lj_math_random_step, 1, FS, NUM, CCI_CASTU64|CCI_RANDFPR)\ - _(ANY, lj_vm_modi, 2, FN, INT, 0) \ - _(ANY, sinh, 1, N, NUM, XA_FP) \ - _(ANY, cosh, 1, N, NUM, XA_FP) \ - _(ANY, tanh, 1, N, NUM, XA_FP) \ - _(ANY, fputc, 2, S, INT, 0) \ - _(ANY, fwrite, 4, S, INT, 0) \ - _(ANY, fflush, 1, S, INT, 0) \ - /* ORDER FPM */ \ - _(FPMATH, lj_vm_floor, 1, N, NUM, XA_FP) \ - _(FPMATH, lj_vm_ceil, 1, N, NUM, XA_FP) \ - _(FPMATH, lj_vm_trunc, 1, N, NUM, XA_FP) \ - _(FPMATH, sqrt, 1, N, NUM, XA_FP) \ - _(ANY, exp, 1, N, NUM, XA_FP) \ - _(ANY, lj_vm_exp2, 1, N, NUM, XA_FP) \ - _(ANY, log, 1, N, NUM, XA_FP) \ - _(ANY, lj_vm_log2, 1, N, NUM, XA_FP) \ - _(ANY, log10, 1, N, NUM, XA_FP) \ - _(ANY, sin, 1, N, NUM, XA_FP) \ - _(ANY, cos, 1, N, NUM, XA_FP) \ - _(ANY, tan, 1, N, NUM, XA_FP) \ - _(ANY, lj_vm_powi, 2, N, NUM, XA_FP) \ - _(ANY, pow, 2, N, NUM, XA2_FP) \ - _(ANY, atan2, 2, N, NUM, XA2_FP) \ - _(ANY, ldexp, 2, N, NUM, XA_FP) \ - _(SOFTFP, lj_vm_tobit, 2, N, INT, 0) \ - _(SOFTFP, softfp_add, 4, N, NUM, 0) \ - _(SOFTFP, softfp_sub, 4, N, NUM, 0) \ - _(SOFTFP, softfp_mul, 4, N, NUM, 0) \ - _(SOFTFP, softfp_div, 4, N, NUM, 0) \ - _(SOFTFP, softfp_cmp, 4, N, NIL, 0) \ - _(SOFTFP, softfp_i2d, 1, N, NUM, 0) \ - _(SOFTFP, softfp_d2i, 2, N, INT, 0) \ - _(SOFTFP_FFI, softfp_ui2d, 1, N, NUM, 0) \ - _(SOFTFP_FFI, softfp_f2d, 1, N, NUM, 0) \ - _(SOFTFP_FFI, softfp_d2ui, 2, N, INT, 0) \ - _(SOFTFP_FFI, softfp_d2f, 2, N, FLOAT, 0) \ - _(SOFTFP_FFI, softfp_i2f, 1, N, FLOAT, 0) \ - _(SOFTFP_FFI, softfp_ui2f, 1, N, FLOAT, 0) \ - _(SOFTFP_FFI, softfp_f2i, 1, N, INT, 0) \ - _(SOFTFP_FFI, softfp_f2ui, 1, N, INT, 0) \ - _(FP64_FFI, fp64_l2d, 1, N, NUM, XA_64) \ - _(FP64_FFI, fp64_ul2d, 1, N, NUM, XA_64) \ - _(FP64_FFI, fp64_l2f, 1, N, FLOAT, XA_64) \ - _(FP64_FFI, fp64_ul2f, 1, N, FLOAT, XA_64) \ - _(FP64_FFI, fp64_d2l, 1, N, I64, XA_FP) \ - _(FP64_FFI, fp64_d2ul, 1, N, U64, XA_FP) \ - _(FP64_FFI, fp64_f2l, 1, N, I64, 0) \ - _(FP64_FFI, fp64_f2ul, 1, N, U64, 0) \ - _(FFI, lj_carith_divi64, 2, N, I64, XA2_64|CCI_NOFPRCLOBBER) \ - _(FFI, lj_carith_divu64, 2, N, U64, XA2_64|CCI_NOFPRCLOBBER) \ - _(FFI, lj_carith_modi64, 2, N, I64, XA2_64|CCI_NOFPRCLOBBER) \ - _(FFI, lj_carith_modu64, 2, N, U64, XA2_64|CCI_NOFPRCLOBBER) \ - _(FFI, lj_carith_powi64, 2, N, I64, XA2_64|CCI_NOFPRCLOBBER) \ - _(FFI, lj_carith_powu64, 2, N, U64, XA2_64|CCI_NOFPRCLOBBER) \ - _(FFI, lj_cdata_newv, 4, S, CDATA, CCI_L) \ - _(FFI, lj_cdata_setfin, 4, S, NIL, CCI_L) \ - _(FFI, strlen, 1, L, INTP, 0) \ - _(FFI, memcpy, 3, S, PTR, 0) \ - _(FFI, memset, 3, S, PTR, 0) \ - _(FFI, lj_vm_errno, 0, S, INT, CCI_NOFPRCLOBBER) \ - _(FFI32, lj_carith_mul64, 2, N, I64, XA2_64|CCI_NOFPRCLOBBER) \ - _(FFI32, lj_carith_shl64, 2, N, U64, XA_64|CCI_NOFPRCLOBBER) \ - _(FFI32, lj_carith_shr64, 2, N, U64, XA_64|CCI_NOFPRCLOBBER) \ - _(FFI32, lj_carith_sar64, 2, N, U64, XA_64|CCI_NOFPRCLOBBER) \ - _(FFI32, lj_carith_rol64, 2, N, U64, XA_64|CCI_NOFPRCLOBBER) \ - _(FFI32, lj_carith_ror64, 2, N, U64, XA_64|CCI_NOFPRCLOBBER) \ - \ - /* End of list. */ - -typedef enum { -#define IRCALLENUM(cond, name, nargs, kind, type, flags) IRCALL_##name, -IRCALLDEF(IRCALLENUM) -#undef IRCALLENUM - IRCALL__MAX -} IRCallID; - -LJ_FUNC TRef lj_ir_call(jit_State *J, IRCallID id, ...); - -LJ_DATA const CCallInfo lj_ir_callinfo[IRCALL__MAX+1]; - -/* Soft-float declarations. */ -#if LJ_SOFTFP -#if LJ_TARGET_ARM -#define softfp_add __aeabi_dadd -#define softfp_sub __aeabi_dsub -#define softfp_mul __aeabi_dmul -#define softfp_div __aeabi_ddiv -#define softfp_cmp __aeabi_cdcmple -#define softfp_i2d __aeabi_i2d -#define softfp_d2i __aeabi_d2iz -#define softfp_ui2d __aeabi_ui2d -#define softfp_f2d __aeabi_f2d -#define softfp_d2ui __aeabi_d2uiz -#define softfp_d2f __aeabi_d2f -#define softfp_i2f __aeabi_i2f -#define softfp_ui2f __aeabi_ui2f -#define softfp_f2i __aeabi_f2iz -#define softfp_f2ui __aeabi_f2uiz -#define fp64_l2d __aeabi_l2d -#define fp64_ul2d __aeabi_ul2d -#define fp64_l2f __aeabi_l2f -#define fp64_ul2f __aeabi_ul2f -#if LJ_TARGET_IOS -#define fp64_d2l __fixdfdi -#define fp64_d2ul __fixunsdfdi -#define fp64_f2l __fixsfdi -#define fp64_f2ul __fixunssfdi -#else -#define fp64_d2l __aeabi_d2lz -#define fp64_d2ul __aeabi_d2ulz -#define fp64_f2l __aeabi_f2lz -#define fp64_f2ul __aeabi_f2ulz -#endif -#else -#error "Missing soft-float definitions for target architecture" -#endif -extern double softfp_add(double a, double b); -extern double softfp_sub(double a, double b); -extern double softfp_mul(double a, double b); -extern double softfp_div(double a, double b); -extern void softfp_cmp(double a, double b); -extern double softfp_i2d(int32_t a); -extern int32_t softfp_d2i(double a); -#if LJ_HASFFI -extern double softfp_ui2d(uint32_t a); -extern double softfp_f2d(float a); -extern uint32_t softfp_d2ui(double a); -extern float softfp_d2f(double a); -extern float softfp_i2f(int32_t a); -extern float softfp_ui2f(uint32_t a); -extern int32_t softfp_f2i(float a); -extern uint32_t softfp_f2ui(float a); -#endif -#endif - -#if LJ_HASFFI && LJ_NEED_FP64 && !(LJ_TARGET_ARM && LJ_SOFTFP) -#ifdef __GNUC__ -#define fp64_l2d __floatdidf -#define fp64_ul2d __floatundidf -#define fp64_l2f __floatdisf -#define fp64_ul2f __floatundisf -#define fp64_d2l __fixdfdi -#define fp64_d2ul __fixunsdfdi -#define fp64_f2l __fixsfdi -#define fp64_f2ul __fixunssfdi -#else -#error "Missing fp64 helper definitions for this compiler" -#endif -#endif - -#if LJ_HASFFI && (LJ_SOFTFP || LJ_NEED_FP64) -extern double fp64_l2d(int64_t a); -extern double fp64_ul2d(uint64_t a); -extern float fp64_l2f(int64_t a); -extern float fp64_ul2f(uint64_t a); -extern int64_t fp64_d2l(double a); -extern uint64_t fp64_d2ul(double a); -extern int64_t fp64_f2l(float a); -extern uint64_t fp64_f2ul(float a); -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_iropt.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_iropt.h deleted file mode 100644 index 4e424e70045..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_iropt.h +++ /dev/null @@ -1,161 +0,0 @@ -/* -** Common header for IR emitter and optimizations. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_IROPT_H -#define _LJ_IROPT_H - -#include - -#include "lj_obj.h" -#include "lj_jit.h" - -#if LJ_HASJIT -/* IR emitter. */ -LJ_FUNC void LJ_FASTCALL lj_ir_growtop(jit_State *J); -LJ_FUNC TRef LJ_FASTCALL lj_ir_emit(jit_State *J); - -/* Save current IR in J->fold.ins, but do not emit it (yet). */ -static LJ_AINLINE void lj_ir_set_(jit_State *J, uint16_t ot, IRRef1 a, IRRef1 b) -{ - J->fold.ins.ot = ot; J->fold.ins.op1 = a; J->fold.ins.op2 = b; -} - -#define lj_ir_set(J, ot, a, b) \ - lj_ir_set_(J, (uint16_t)(ot), (IRRef1)(a), (IRRef1)(b)) - -/* Get ref of next IR instruction and optionally grow IR. -** Note: this may invalidate all IRIns*! -*/ -static LJ_AINLINE IRRef lj_ir_nextins(jit_State *J) -{ - IRRef ref = J->cur.nins; - if (LJ_UNLIKELY(ref >= J->irtoplim)) lj_ir_growtop(J); - J->cur.nins = ref + 1; - return ref; -} - -/* Interning of constants. */ -LJ_FUNC TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k); -LJ_FUNC void lj_ir_k64_freeall(jit_State *J); -LJ_FUNC TRef lj_ir_k64(jit_State *J, IROp op, cTValue *tv); -LJ_FUNC cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64); -LJ_FUNC TRef lj_ir_knum_u64(jit_State *J, uint64_t u64); -LJ_FUNC TRef lj_ir_knumint(jit_State *J, lua_Number n); -LJ_FUNC TRef lj_ir_kint64(jit_State *J, uint64_t u64); -LJ_FUNC TRef lj_ir_kgc(jit_State *J, GCobj *o, IRType t); -LJ_FUNC TRef lj_ir_kptr_(jit_State *J, IROp op, void *ptr); -LJ_FUNC TRef lj_ir_knull(jit_State *J, IRType t); -LJ_FUNC TRef lj_ir_kslot(jit_State *J, TRef key, IRRef slot); - -#if LJ_64 -#define lj_ir_kintp(J, k) lj_ir_kint64(J, (uint64_t)(k)) -#else -#define lj_ir_kintp(J, k) lj_ir_kint(J, (int32_t)(k)) -#endif - -static LJ_AINLINE TRef lj_ir_knum(jit_State *J, lua_Number n) -{ - TValue tv; - tv.n = n; - return lj_ir_knum_u64(J, tv.u64); -} - -#define lj_ir_kstr(J, str) lj_ir_kgc(J, obj2gco((str)), IRT_STR) -#define lj_ir_ktab(J, tab) lj_ir_kgc(J, obj2gco((tab)), IRT_TAB) -#define lj_ir_kfunc(J, func) lj_ir_kgc(J, obj2gco((func)), IRT_FUNC) -#define lj_ir_kptr(J, ptr) lj_ir_kptr_(J, IR_KPTR, (ptr)) -#define lj_ir_kkptr(J, ptr) lj_ir_kptr_(J, IR_KKPTR, (ptr)) - -/* Special FP constants. */ -#define lj_ir_knum_zero(J) lj_ir_knum_u64(J, U64x(00000000,00000000)) -#define lj_ir_knum_one(J) lj_ir_knum_u64(J, U64x(3ff00000,00000000)) -#define lj_ir_knum_tobit(J) lj_ir_knum_u64(J, U64x(43380000,00000000)) - -/* Special 128 bit SIMD constants. */ -#define lj_ir_knum_abs(J) lj_ir_k64(J, IR_KNUM, LJ_KSIMD(J, LJ_KSIMD_ABS)) -#define lj_ir_knum_neg(J) lj_ir_k64(J, IR_KNUM, LJ_KSIMD(J, LJ_KSIMD_NEG)) - -/* Access to constants. */ -LJ_FUNC void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir); - -/* Convert IR operand types. */ -LJ_FUNC TRef LJ_FASTCALL lj_ir_tonumber(jit_State *J, TRef tr); -LJ_FUNC TRef LJ_FASTCALL lj_ir_tonum(jit_State *J, TRef tr); -LJ_FUNC TRef LJ_FASTCALL lj_ir_tostr(jit_State *J, TRef tr); - -/* Miscellaneous IR ops. */ -LJ_FUNC int lj_ir_numcmp(lua_Number a, lua_Number b, IROp op); -LJ_FUNC int lj_ir_strcmp(GCstr *a, GCstr *b, IROp op); -LJ_FUNC void lj_ir_rollback(jit_State *J, IRRef ref); - -/* Emit IR instructions with on-the-fly optimizations. */ -LJ_FUNC TRef LJ_FASTCALL lj_opt_fold(jit_State *J); -LJ_FUNC TRef LJ_FASTCALL lj_opt_cse(jit_State *J); -LJ_FUNC TRef LJ_FASTCALL lj_opt_cselim(jit_State *J, IRRef lim); - -/* Special return values for the fold functions. */ -enum { - NEXTFOLD, /* Couldn't fold, pass on. */ - RETRYFOLD, /* Retry fold with modified fins. */ - KINTFOLD, /* Return ref for int constant in fins->i. */ - FAILFOLD, /* Guard would always fail. */ - DROPFOLD, /* Guard eliminated. */ - MAX_FOLD -}; - -#define INTFOLD(k) ((J->fold.ins.i = (k)), (TRef)KINTFOLD) -#define INT64FOLD(k) (lj_ir_kint64(J, (k))) -#define CONDFOLD(cond) ((TRef)FAILFOLD + (TRef)(cond)) -#define LEFTFOLD (J->fold.ins.op1) -#define RIGHTFOLD (J->fold.ins.op2) -#define CSEFOLD (lj_opt_cse(J)) -#define EMITFOLD (lj_ir_emit(J)) - -/* Load/store forwarding. */ -LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J); -LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J); -LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J); -LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J); -LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J); -LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_tab_len(jit_State *J); -LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_hrefk(jit_State *J); -LJ_FUNC int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J); -LJ_FUNC int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim); -LJ_FUNC int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref); - -/* Dead-store elimination. */ -LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J); -LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J); -LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J); -LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_xstore(jit_State *J); - -/* Narrowing. */ -LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_convert(jit_State *J); -LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_index(jit_State *J, TRef key); -LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_toint(jit_State *J, TRef tr); -LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_tobit(jit_State *J, TRef tr); -#if LJ_HASFFI -LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_cindex(jit_State *J, TRef key); -#endif -LJ_FUNC TRef lj_opt_narrow_arith(jit_State *J, TRef rb, TRef rc, - TValue *vb, TValue *vc, IROp op); -LJ_FUNC TRef lj_opt_narrow_unm(jit_State *J, TRef rc, TValue *vc); -LJ_FUNC TRef lj_opt_narrow_mod(jit_State *J, TRef rb, TRef rc, TValue *vc); -LJ_FUNC TRef lj_opt_narrow_pow(jit_State *J, TRef rb, TRef rc, TValue *vc); -LJ_FUNC IRType lj_opt_narrow_forl(jit_State *J, cTValue *forbase); - -/* Optimization passes. */ -LJ_FUNC void lj_opt_dce(jit_State *J); -LJ_FUNC int lj_opt_loop(jit_State *J); -#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI) -LJ_FUNC void lj_opt_split(jit_State *J); -#else -#define lj_opt_split(J) UNUSED(J) -#endif -LJ_FUNC void lj_opt_sink(jit_State *J); - -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_jit.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_jit.h deleted file mode 100644 index 1df56cae55e..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_jit.h +++ /dev/null @@ -1,440 +0,0 @@ -/* -** Common definitions for the JIT compiler. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_JIT_H -#define _LJ_JIT_H - -#include "lj_obj.h" -#include "lj_ir.h" - -/* JIT engine flags. */ -#define JIT_F_ON 0x00000001 - -/* CPU-specific JIT engine flags. */ -#if LJ_TARGET_X86ORX64 -#define JIT_F_SSE2 0x00000010 -#define JIT_F_SSE3 0x00000020 -#define JIT_F_SSE4_1 0x00000040 -#define JIT_F_PREFER_IMUL 0x00000080 -#define JIT_F_LEA_AGU 0x00000100 - -/* Names for the CPU-specific flags. Must match the order above. */ -#define JIT_F_CPU_FIRST JIT_F_SSE2 -#define JIT_F_CPUSTRING "\4SSE2\4SSE3\6SSE4.1\3AMD\4ATOM" -#elif LJ_TARGET_ARM -#define JIT_F_ARMV6_ 0x00000010 -#define JIT_F_ARMV6T2_ 0x00000020 -#define JIT_F_ARMV7 0x00000040 -#define JIT_F_VFPV2 0x00000080 -#define JIT_F_VFPV3 0x00000100 - -#define JIT_F_ARMV6 (JIT_F_ARMV6_|JIT_F_ARMV6T2_|JIT_F_ARMV7) -#define JIT_F_ARMV6T2 (JIT_F_ARMV6T2_|JIT_F_ARMV7) -#define JIT_F_VFP (JIT_F_VFPV2|JIT_F_VFPV3) - -/* Names for the CPU-specific flags. Must match the order above. */ -#define JIT_F_CPU_FIRST JIT_F_ARMV6_ -#define JIT_F_CPUSTRING "\5ARMv6\7ARMv6T2\5ARMv7\5VFPv2\5VFPv3" -#elif LJ_TARGET_PPC -#define JIT_F_SQRT 0x00000010 -#define JIT_F_ROUND 0x00000020 - -/* Names for the CPU-specific flags. Must match the order above. */ -#define JIT_F_CPU_FIRST JIT_F_SQRT -#define JIT_F_CPUSTRING "\4SQRT\5ROUND" -#elif LJ_TARGET_MIPS -#define JIT_F_MIPS32R2 0x00000010 - -/* Names for the CPU-specific flags. Must match the order above. */ -#define JIT_F_CPU_FIRST JIT_F_MIPS32R2 -#define JIT_F_CPUSTRING "\010MIPS32R2" -#else -#define JIT_F_CPU_FIRST 0 -#define JIT_F_CPUSTRING "" -#endif - -/* Optimization flags. */ -#define JIT_F_OPT_MASK 0x0fff0000 - -#define JIT_F_OPT_FOLD 0x00010000 -#define JIT_F_OPT_CSE 0x00020000 -#define JIT_F_OPT_DCE 0x00040000 -#define JIT_F_OPT_FWD 0x00080000 -#define JIT_F_OPT_DSE 0x00100000 -#define JIT_F_OPT_NARROW 0x00200000 -#define JIT_F_OPT_LOOP 0x00400000 -#define JIT_F_OPT_ABC 0x00800000 -#define JIT_F_OPT_SINK 0x01000000 -#define JIT_F_OPT_FUSE 0x02000000 - -/* Optimizations names for -O. Must match the order above. */ -#define JIT_F_OPT_FIRST JIT_F_OPT_FOLD -#define JIT_F_OPTSTRING \ - "\4fold\3cse\3dce\3fwd\3dse\6narrow\4loop\3abc\4sink\4fuse" - -/* Optimization levels set a fixed combination of flags. */ -#define JIT_F_OPT_0 0 -#define JIT_F_OPT_1 (JIT_F_OPT_FOLD|JIT_F_OPT_CSE|JIT_F_OPT_DCE) -#define JIT_F_OPT_2 (JIT_F_OPT_1|JIT_F_OPT_NARROW|JIT_F_OPT_LOOP) -#define JIT_F_OPT_3 (JIT_F_OPT_2|\ - JIT_F_OPT_FWD|JIT_F_OPT_DSE|JIT_F_OPT_ABC|JIT_F_OPT_SINK|JIT_F_OPT_FUSE) -#define JIT_F_OPT_DEFAULT JIT_F_OPT_3 - -#if LJ_TARGET_WINDOWS || LJ_64 -/* See: http://blogs.msdn.com/oldnewthing/archive/2003/10/08/55239.aspx */ -#define JIT_P_sizemcode_DEFAULT 64 -#else -/* Could go as low as 4K, but the mmap() overhead would be rather high. */ -#define JIT_P_sizemcode_DEFAULT 32 -#endif - -/* Optimization parameters and their defaults. Length is a char in octal! */ -#define JIT_PARAMDEF(_) \ - _(\010, maxtrace, 1000) /* Max. # of traces in cache. */ \ - _(\011, maxrecord, 4000) /* Max. # of recorded IR instructions. */ \ - _(\012, maxirconst, 500) /* Max. # of IR constants of a trace. */ \ - _(\007, maxside, 100) /* Max. # of side traces of a root trace. */ \ - _(\007, maxsnap, 500) /* Max. # of snapshots for a trace. */ \ - _(\011, minstitch, 0) /* Min. # of IR ins for a stitched trace. */ \ - \ - _(\007, hotloop, 56) /* # of iter. to detect a hot loop/call. */ \ - _(\007, hotexit, 10) /* # of taken exits to start a side trace. */ \ - _(\007, tryside, 4) /* # of attempts to compile a side trace. */ \ - \ - _(\012, instunroll, 4) /* Max. unroll for instable loops. */ \ - _(\012, loopunroll, 15) /* Max. unroll for loop ops in side traces. */ \ - _(\012, callunroll, 3) /* Max. unroll for recursive calls. */ \ - _(\011, recunroll, 2) /* Min. unroll for true recursion. */ \ - \ - /* Size of each machine code area (in KBytes). */ \ - _(\011, sizemcode, JIT_P_sizemcode_DEFAULT) \ - /* Max. total size of all machine code areas (in KBytes). */ \ - _(\010, maxmcode, 512) \ - /* End of list. */ - -enum { -#define JIT_PARAMENUM(len, name, value) JIT_P_##name, -JIT_PARAMDEF(JIT_PARAMENUM) -#undef JIT_PARAMENUM - JIT_P__MAX -}; - -#define JIT_PARAMSTR(len, name, value) #len #name -#define JIT_P_STRING JIT_PARAMDEF(JIT_PARAMSTR) - -/* Trace compiler state. */ -typedef enum { - LJ_TRACE_IDLE, /* Trace compiler idle. */ - LJ_TRACE_ACTIVE = 0x10, - LJ_TRACE_RECORD, /* Bytecode recording active. */ - LJ_TRACE_START, /* New trace started. */ - LJ_TRACE_END, /* End of trace. */ - LJ_TRACE_ASM, /* Assemble trace. */ - LJ_TRACE_ERR /* Trace aborted with error. */ -} TraceState; - -/* Post-processing action. */ -typedef enum { - LJ_POST_NONE, /* No action. */ - LJ_POST_FIXCOMP, /* Fixup comparison and emit pending guard. */ - LJ_POST_FIXGUARD, /* Fixup and emit pending guard. */ - LJ_POST_FIXGUARDSNAP, /* Fixup and emit pending guard and snapshot. */ - LJ_POST_FIXBOOL, /* Fixup boolean result. */ - LJ_POST_FIXCONST, /* Fixup constant results. */ - LJ_POST_FFRETRY /* Suppress recording of retried fast functions. */ -} PostProc; - -/* Machine code type. */ -#if LJ_TARGET_X86ORX64 -typedef uint8_t MCode; -#else -typedef uint32_t MCode; -#endif - -/* Stack snapshot header. */ -typedef struct SnapShot { - uint16_t mapofs; /* Offset into snapshot map. */ - IRRef1 ref; /* First IR ref for this snapshot. */ - uint8_t nslots; /* Number of valid slots. */ - uint8_t topslot; /* Maximum frame extent. */ - uint8_t nent; /* Number of compressed entries. */ - uint8_t count; /* Count of taken exits for this snapshot. */ -} SnapShot; - -#define SNAPCOUNT_DONE 255 /* Already compiled and linked a side trace. */ - -/* Compressed snapshot entry. */ -typedef uint32_t SnapEntry; - -#define SNAP_FRAME 0x010000 /* Frame slot. */ -#define SNAP_CONT 0x020000 /* Continuation slot. */ -#define SNAP_NORESTORE 0x040000 /* No need to restore slot. */ -#define SNAP_SOFTFPNUM 0x080000 /* Soft-float number. */ -LJ_STATIC_ASSERT(SNAP_FRAME == TREF_FRAME); -LJ_STATIC_ASSERT(SNAP_CONT == TREF_CONT); - -#define SNAP(slot, flags, ref) (((SnapEntry)(slot) << 24) + (flags) + (ref)) -#define SNAP_TR(slot, tr) \ - (((SnapEntry)(slot) << 24) + ((tr) & (TREF_CONT|TREF_FRAME|TREF_REFMASK))) -#define SNAP_MKPC(pc) ((SnapEntry)u32ptr(pc)) -#define SNAP_MKFTSZ(ftsz) ((SnapEntry)(ftsz)) -#define snap_ref(sn) ((sn) & 0xffff) -#define snap_slot(sn) ((BCReg)((sn) >> 24)) -#define snap_isframe(sn) ((sn) & SNAP_FRAME) -#define snap_pc(sn) ((const BCIns *)(uintptr_t)(sn)) -#define snap_setref(sn, ref) (((sn) & (0xffff0000&~SNAP_NORESTORE)) | (ref)) - -/* Snapshot and exit numbers. */ -typedef uint32_t SnapNo; -typedef uint32_t ExitNo; - -/* Trace number. */ -typedef uint32_t TraceNo; /* Used to pass around trace numbers. */ -typedef uint16_t TraceNo1; /* Stored trace number. */ - -/* Type of link. ORDER LJ_TRLINK */ -typedef enum { - LJ_TRLINK_NONE, /* Incomplete trace. No link, yet. */ - LJ_TRLINK_ROOT, /* Link to other root trace. */ - LJ_TRLINK_LOOP, /* Loop to same trace. */ - LJ_TRLINK_TAILREC, /* Tail-recursion. */ - LJ_TRLINK_UPREC, /* Up-recursion. */ - LJ_TRLINK_DOWNREC, /* Down-recursion. */ - LJ_TRLINK_INTERP, /* Fallback to interpreter. */ - LJ_TRLINK_RETURN, /* Return to interpreter. */ - LJ_TRLINK_STITCH /* Trace stitching. */ -} TraceLink; - -/* Trace object. */ -typedef struct GCtrace { - GCHeader; - uint8_t topslot; /* Top stack slot already checked to be allocated. */ - uint8_t linktype; /* Type of link. */ - IRRef nins; /* Next IR instruction. Biased with REF_BIAS. */ -#if LJ_GC64 - uint32_t unused_gc64; -#endif - GCRef gclist; - IRIns *ir; /* IR instructions/constants. Biased with REF_BIAS. */ - IRRef nk; /* Lowest IR constant. Biased with REF_BIAS. */ - uint16_t nsnap; /* Number of snapshots. */ - uint16_t nsnapmap; /* Number of snapshot map elements. */ - SnapShot *snap; /* Snapshot array. */ - SnapEntry *snapmap; /* Snapshot map. */ - GCRef startpt; /* Starting prototype. */ - MRef startpc; /* Bytecode PC of starting instruction. */ - BCIns startins; /* Original bytecode of starting instruction. */ - MSize szmcode; /* Size of machine code. */ - MCode *mcode; /* Start of machine code. */ - MSize mcloop; /* Offset of loop start in machine code. */ - uint16_t nchild; /* Number of child traces (root trace only). */ - uint16_t spadjust; /* Stack pointer adjustment (offset in bytes). */ - TraceNo1 traceno; /* Trace number. */ - TraceNo1 link; /* Linked trace (or self for loops). */ - TraceNo1 root; /* Root trace of side trace (or 0 for root traces). */ - TraceNo1 nextroot; /* Next root trace for same prototype. */ - TraceNo1 nextside; /* Next side trace of same root trace. */ - uint8_t sinktags; /* Trace has SINK tags. */ - uint8_t unused1; -#ifdef LUAJIT_USE_GDBJIT - void *gdbjit_entry; /* GDB JIT entry. */ -#endif -} GCtrace; - -#define gco2trace(o) check_exp((o)->gch.gct == ~LJ_TTRACE, (GCtrace *)(o)) -#define traceref(J, n) \ - check_exp((n)>0 && (MSize)(n)sizetrace, (GCtrace *)gcref(J->trace[(n)])) - -LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCtrace, gclist)); - -static LJ_AINLINE MSize snap_nextofs(GCtrace *T, SnapShot *snap) -{ - if (snap+1 == &T->snap[T->nsnap]) - return T->nsnapmap; - else - return (snap+1)->mapofs; -} - -/* Round-robin penalty cache for bytecodes leading to aborted traces. */ -typedef struct HotPenalty { - MRef pc; /* Starting bytecode PC. */ - uint16_t val; /* Penalty value, i.e. hotcount start. */ - uint16_t reason; /* Abort reason (really TraceErr). */ -} HotPenalty; - -#define PENALTY_SLOTS 64 /* Penalty cache slot. Must be a power of 2. */ -#define PENALTY_MIN (36*2) /* Minimum penalty value. */ -#define PENALTY_MAX 60000 /* Maximum penalty value. */ -#define PENALTY_RNDBITS 4 /* # of random bits to add to penalty value. */ - -/* Round-robin backpropagation cache for narrowing conversions. */ -typedef struct BPropEntry { - IRRef1 key; /* Key: original reference. */ - IRRef1 val; /* Value: reference after conversion. */ - IRRef mode; /* Mode for this entry (currently IRCONV_*). */ -} BPropEntry; - -/* Number of slots for the backpropagation cache. Must be a power of 2. */ -#define BPROP_SLOTS 16 - -/* Scalar evolution analysis cache. */ -typedef struct ScEvEntry { - MRef pc; /* Bytecode PC of FORI. */ - IRRef1 idx; /* Index reference. */ - IRRef1 start; /* Constant start reference. */ - IRRef1 stop; /* Constant stop reference. */ - IRRef1 step; /* Constant step reference. */ - IRType1 t; /* Scalar type. */ - uint8_t dir; /* Direction. 1: +, 0: -. */ -} ScEvEntry; - -/* Reverse bytecode map (IRRef -> PC). Only for selected instructions. */ -typedef struct RBCHashEntry { - MRef pc; /* Bytecode PC. */ - GCRef pt; /* Prototype. */ - IRRef ref; /* IR reference. */ -} RBCHashEntry; - -/* Number of slots in the reverse bytecode hash table. Must be a power of 2. */ -#define RBCHASH_SLOTS 8 - -/* 128 bit SIMD constants. */ -enum { - LJ_KSIMD_ABS, - LJ_KSIMD_NEG, - LJ_KSIMD__MAX -}; - -/* Get 16 byte aligned pointer to SIMD constant. */ -#define LJ_KSIMD(J, n) \ - ((TValue *)(((intptr_t)&J->ksimd[2*(n)] + 15) & ~(intptr_t)15)) - -/* Set/reset flag to activate the SPLIT pass for the current trace. */ -#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI) -#define lj_needsplit(J) (J->needsplit = 1) -#define lj_resetsplit(J) (J->needsplit = 0) -#else -#define lj_needsplit(J) UNUSED(J) -#define lj_resetsplit(J) UNUSED(J) -#endif - -/* Fold state is used to fold instructions on-the-fly. */ -typedef struct FoldState { - IRIns ins; /* Currently emitted instruction. */ - IRIns left; /* Instruction referenced by left operand. */ - IRIns right; /* Instruction referenced by right operand. */ -} FoldState; - -/* JIT compiler state. */ -typedef struct jit_State { - GCtrace cur; /* Current trace. */ - - lua_State *L; /* Current Lua state. */ - const BCIns *pc; /* Current PC. */ - GCfunc *fn; /* Current function. */ - GCproto *pt; /* Current prototype. */ - TRef *base; /* Current frame base, points into J->slots. */ - - uint32_t flags; /* JIT engine flags. */ - BCReg maxslot; /* Relative to baseslot. */ - BCReg baseslot; /* Current frame base, offset into J->slots. */ - - uint8_t mergesnap; /* Allowed to merge with next snapshot. */ - uint8_t needsnap; /* Need snapshot before recording next bytecode. */ - IRType1 guardemit; /* Accumulated IRT_GUARD for emitted instructions. */ - uint8_t bcskip; /* Number of bytecode instructions to skip. */ - - FoldState fold; /* Fold state. */ - - const BCIns *bc_min; /* Start of allowed bytecode range for root trace. */ - MSize bc_extent; /* Extent of the range. */ - - TraceState state; /* Trace compiler state. */ - - int32_t instunroll; /* Unroll counter for instable loops. */ - int32_t loopunroll; /* Unroll counter for loop ops in side traces. */ - int32_t tailcalled; /* Number of successive tailcalls. */ - int32_t framedepth; /* Current frame depth. */ - int32_t retdepth; /* Return frame depth (count of RETF). */ - - MRef k64; /* Pointer to chained array of 64 bit constants. */ - TValue ksimd[LJ_KSIMD__MAX*2+1]; /* 16 byte aligned SIMD constants. */ - - IRIns *irbuf; /* Temp. IR instruction buffer. Biased with REF_BIAS. */ - IRRef irtoplim; /* Upper limit of instuction buffer (biased). */ - IRRef irbotlim; /* Lower limit of instuction buffer (biased). */ - IRRef loopref; /* Last loop reference or ref of final LOOP (or 0). */ - - MSize sizesnap; /* Size of temp. snapshot buffer. */ - SnapShot *snapbuf; /* Temp. snapshot buffer. */ - SnapEntry *snapmapbuf; /* Temp. snapshot map buffer. */ - MSize sizesnapmap; /* Size of temp. snapshot map buffer. */ - - PostProc postproc; /* Required post-processing after execution. */ -#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI) - uint8_t needsplit; /* Need SPLIT pass. */ -#endif - uint8_t retryrec; /* Retry recording. */ - - GCRef *trace; /* Array of traces. */ - TraceNo freetrace; /* Start of scan for next free trace. */ - MSize sizetrace; /* Size of trace array. */ - - IRRef1 chain[IR__MAX]; /* IR instruction skip-list chain anchors. */ - TRef slot[LJ_MAX_JSLOTS+LJ_STACK_EXTRA]; /* Stack slot map. */ - - int32_t param[JIT_P__MAX]; /* JIT engine parameters. */ - - MCode *exitstubgroup[LJ_MAX_EXITSTUBGR]; /* Exit stub group addresses. */ - - HotPenalty penalty[PENALTY_SLOTS]; /* Penalty slots. */ - uint32_t penaltyslot; /* Round-robin index into penalty slots. */ - uint32_t prngstate; /* PRNG state. */ - -#ifdef LUAJIT_ENABLE_TABLE_BUMP - RBCHashEntry rbchash[RBCHASH_SLOTS]; /* Reverse bytecode map. */ -#endif - - BPropEntry bpropcache[BPROP_SLOTS]; /* Backpropagation cache slots. */ - uint32_t bpropslot; /* Round-robin index into bpropcache slots. */ - - ScEvEntry scev; /* Scalar evolution analysis cache slots. */ - - const BCIns *startpc; /* Bytecode PC of starting instruction. */ - TraceNo parent; /* Parent of current side trace (0 for root traces). */ - ExitNo exitno; /* Exit number in parent of current side trace. */ - - BCIns *patchpc; /* PC for pending re-patch. */ - BCIns patchins; /* Instruction for pending re-patch. */ - - int mcprot; /* Protection of current mcode area. */ - MCode *mcarea; /* Base of current mcode area. */ - MCode *mctop; /* Top of current mcode area. */ - MCode *mcbot; /* Bottom of current mcode area. */ - size_t szmcarea; /* Size of current mcode area. */ - size_t szallmcarea; /* Total size of all allocated mcode areas. */ - - TValue errinfo; /* Additional info element for trace errors. */ - -#if LJ_HASPROFILE - GCproto *prev_pt; /* Previous prototype. */ - BCLine prev_line; /* Previous line. */ - int prof_mode; /* Profiling mode: 0, 'f', 'l'. */ -#endif -} -#if LJ_TARGET_ARM -LJ_ALIGN(16) /* For DISPATCH-relative addresses in assembler part. */ -#endif -jit_State; - -/* Trivial PRNG e.g. used for penalty randomization. */ -static LJ_AINLINE uint32_t LJ_PRNG_BITS(jit_State *J, int bits) -{ - /* Yes, this LCG is very weak, but that doesn't matter for our use case. */ - J->prngstate = J->prngstate * 1103515245 + 12345; - return J->prngstate >> (32-bits); -} - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_lex.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_lex.c deleted file mode 100644 index 8409cd78a71..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_lex.c +++ /dev/null @@ -1,482 +0,0 @@ -/* -** Lexical analyzer. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -** -** Major portions taken verbatim or adapted from the Lua interpreter. -** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h -*/ - -#define lj_lex_c -#define LUA_CORE - -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_buf.h" -#include "lj_str.h" -#if LJ_HASFFI -#include "lj_tab.h" -#include "lj_ctype.h" -#include "lj_cdata.h" -#include "lualib.h" -#endif -#include "lj_state.h" -#include "lj_lex.h" -#include "lj_parse.h" -#include "lj_char.h" -#include "lj_strscan.h" -#include "lj_strfmt.h" - -/* Lua lexer token names. */ -static const char *const tokennames[] = { -#define TKSTR1(name) #name, -#define TKSTR2(name, sym) #sym, -TKDEF(TKSTR1, TKSTR2) -#undef TKSTR1 -#undef TKSTR2 - NULL -}; - -/* -- Buffer handling ----------------------------------------------------- */ - -#define LEX_EOF (-1) -#define lex_iseol(ls) (ls->c == '\n' || ls->c == '\r') - -/* Get more input from reader. */ -static LJ_NOINLINE LexChar lex_more(LexState *ls) -{ - size_t sz; - const char *p = ls->rfunc(ls->L, ls->rdata, &sz); - if (p == NULL || sz == 0) return LEX_EOF; - ls->pe = p + sz; - ls->p = p + 1; - return (LexChar)(uint8_t)p[0]; -} - -/* Get next character. */ -static LJ_AINLINE LexChar lex_next(LexState *ls) -{ - return (ls->c = ls->p < ls->pe ? (LexChar)(uint8_t)*ls->p++ : lex_more(ls)); -} - -/* Save character. */ -static LJ_AINLINE void lex_save(LexState *ls, LexChar c) -{ - lj_buf_putb(&ls->sb, c); -} - -/* Save previous character and get next character. */ -static LJ_AINLINE LexChar lex_savenext(LexState *ls) -{ - lex_save(ls, ls->c); - return lex_next(ls); -} - -/* Skip line break. Handles "\n", "\r", "\r\n" or "\n\r". */ -static void lex_newline(LexState *ls) -{ - LexChar old = ls->c; - lua_assert(lex_iseol(ls)); - lex_next(ls); /* Skip "\n" or "\r". */ - if (lex_iseol(ls) && ls->c != old) lex_next(ls); /* Skip "\n\r" or "\r\n". */ - if (++ls->linenumber >= LJ_MAX_LINE) - lj_lex_error(ls, ls->tok, LJ_ERR_XLINES); -} - -/* -- Scanner for terminals ----------------------------------------------- */ - -/* Parse a number literal. */ -static void lex_number(LexState *ls, TValue *tv) -{ - StrScanFmt fmt; - LexChar c, xp = 'e'; - lua_assert(lj_char_isdigit(ls->c)); - if ((c = ls->c) == '0' && (lex_savenext(ls) | 0x20) == 'x') - xp = 'p'; - while (lj_char_isident(ls->c) || ls->c == '.' || - ((ls->c == '-' || ls->c == '+') && (c | 0x20) == xp)) { - c = ls->c; - lex_savenext(ls); - } - lex_save(ls, '\0'); - fmt = lj_strscan_scan((const uint8_t *)sbufB(&ls->sb), tv, - (LJ_DUALNUM ? STRSCAN_OPT_TOINT : STRSCAN_OPT_TONUM) | - (LJ_HASFFI ? (STRSCAN_OPT_LL|STRSCAN_OPT_IMAG) : 0)); - if (LJ_DUALNUM && fmt == STRSCAN_INT) { - setitype(tv, LJ_TISNUM); - } else if (fmt == STRSCAN_NUM) { - /* Already in correct format. */ -#if LJ_HASFFI - } else if (fmt != STRSCAN_ERROR) { - lua_State *L = ls->L; - GCcdata *cd; - lua_assert(fmt == STRSCAN_I64 || fmt == STRSCAN_U64 || fmt == STRSCAN_IMAG); - if (!ctype_ctsG(G(L))) { - ptrdiff_t oldtop = savestack(L, L->top); - luaopen_ffi(L); /* Load FFI library on-demand. */ - L->top = restorestack(L, oldtop); - } - if (fmt == STRSCAN_IMAG) { - cd = lj_cdata_new_(L, CTID_COMPLEX_DOUBLE, 2*sizeof(double)); - ((double *)cdataptr(cd))[0] = 0; - ((double *)cdataptr(cd))[1] = numV(tv); - } else { - cd = lj_cdata_new_(L, fmt==STRSCAN_I64 ? CTID_INT64 : CTID_UINT64, 8); - *(uint64_t *)cdataptr(cd) = tv->u64; - } - lj_parse_keepcdata(ls, tv, cd); -#endif - } else { - lua_assert(fmt == STRSCAN_ERROR); - lj_lex_error(ls, TK_number, LJ_ERR_XNUMBER); - } -} - -/* Skip equal signs for "[=...=[" and "]=...=]" and return their count. */ -static int lex_skipeq(LexState *ls) -{ - int count = 0; - LexChar s = ls->c; - lua_assert(s == '[' || s == ']'); - while (lex_savenext(ls) == '=') - count++; - return (ls->c == s) ? count : (-count) - 1; -} - -/* Parse a long string or long comment (tv set to NULL). */ -static void lex_longstring(LexState *ls, TValue *tv, int sep) -{ - lex_savenext(ls); /* Skip second '['. */ - if (lex_iseol(ls)) /* Skip initial newline. */ - lex_newline(ls); - for (;;) { - switch (ls->c) { - case LEX_EOF: - lj_lex_error(ls, TK_eof, tv ? LJ_ERR_XLSTR : LJ_ERR_XLCOM); - break; - case ']': - if (lex_skipeq(ls) == sep) { - lex_savenext(ls); /* Skip second ']'. */ - goto endloop; - } - break; - case '\n': - case '\r': - lex_save(ls, '\n'); - lex_newline(ls); - if (!tv) lj_buf_reset(&ls->sb); /* Don't waste space for comments. */ - break; - default: - lex_savenext(ls); - break; - } - } endloop: - if (tv) { - GCstr *str = lj_parse_keepstr(ls, sbufB(&ls->sb) + (2 + (MSize)sep), - sbuflen(&ls->sb) - 2*(2 + (MSize)sep)); - setstrV(ls->L, tv, str); - } -} - -/* Parse a string. */ -static void lex_string(LexState *ls, TValue *tv) -{ - LexChar delim = ls->c; /* Delimiter is '\'' or '"'. */ - lex_savenext(ls); - while (ls->c != delim) { - switch (ls->c) { - case LEX_EOF: - lj_lex_error(ls, TK_eof, LJ_ERR_XSTR); - continue; - case '\n': - case '\r': - lj_lex_error(ls, TK_string, LJ_ERR_XSTR); - continue; - case '\\': { - LexChar c = lex_next(ls); /* Skip the '\\'. */ - switch (c) { - case 'a': c = '\a'; break; - case 'b': c = '\b'; break; - case 'f': c = '\f'; break; - case 'n': c = '\n'; break; - case 'r': c = '\r'; break; - case 't': c = '\t'; break; - case 'v': c = '\v'; break; - case 'x': /* Hexadecimal escape '\xXX'. */ - c = (lex_next(ls) & 15u) << 4; - if (!lj_char_isdigit(ls->c)) { - if (!lj_char_isxdigit(ls->c)) goto err_xesc; - c += 9 << 4; - } - c += (lex_next(ls) & 15u); - if (!lj_char_isdigit(ls->c)) { - if (!lj_char_isxdigit(ls->c)) goto err_xesc; - c += 9; - } - break; - case 'z': /* Skip whitespace. */ - lex_next(ls); - while (lj_char_isspace(ls->c)) - if (lex_iseol(ls)) lex_newline(ls); else lex_next(ls); - continue; - case '\n': case '\r': lex_save(ls, '\n'); lex_newline(ls); continue; - case '\\': case '\"': case '\'': break; - case LEX_EOF: continue; - default: - if (!lj_char_isdigit(c)) - goto err_xesc; - c -= '0'; /* Decimal escape '\ddd'. */ - if (lj_char_isdigit(lex_next(ls))) { - c = c*10 + (ls->c - '0'); - if (lj_char_isdigit(lex_next(ls))) { - c = c*10 + (ls->c - '0'); - if (c > 255) { - err_xesc: - lj_lex_error(ls, TK_string, LJ_ERR_XESC); - } - lex_next(ls); - } - } - lex_save(ls, c); - continue; - } - lex_save(ls, c); - lex_next(ls); - continue; - } - default: - lex_savenext(ls); - break; - } - } - lex_savenext(ls); /* Skip trailing delimiter. */ - setstrV(ls->L, tv, - lj_parse_keepstr(ls, sbufB(&ls->sb)+1, sbuflen(&ls->sb)-2)); -} - -/* -- Main lexical scanner ------------------------------------------------ */ - -/* Get next lexical token. */ -static LexToken lex_scan(LexState *ls, TValue *tv) -{ - lj_buf_reset(&ls->sb); - for (;;) { - if (lj_char_isident(ls->c)) { - GCstr *s; - if (lj_char_isdigit(ls->c)) { /* Numeric literal. */ - lex_number(ls, tv); - return TK_number; - } - /* Identifier or reserved word. */ - do { - lex_savenext(ls); - } while (lj_char_isident(ls->c)); - s = lj_parse_keepstr(ls, sbufB(&ls->sb), sbuflen(&ls->sb)); - setstrV(ls->L, tv, s); - if (s->reserved > 0) /* Reserved word? */ - return TK_OFS + s->reserved; - return TK_name; - } - switch (ls->c) { - case '\n': - case '\r': - lex_newline(ls); - continue; - case ' ': - case '\t': - case '\v': - case '\f': - lex_next(ls); - continue; - case '-': - lex_next(ls); - if (ls->c != '-') return '-'; - lex_next(ls); - if (ls->c == '[') { /* Long comment "--[=*[...]=*]". */ - int sep = lex_skipeq(ls); - lj_buf_reset(&ls->sb); /* `lex_skipeq' may dirty the buffer */ - if (sep >= 0) { - lex_longstring(ls, NULL, sep); - lj_buf_reset(&ls->sb); - continue; - } - } - /* Short comment "--.*\n". */ - while (!lex_iseol(ls) && ls->c != LEX_EOF) - lex_next(ls); - continue; - case '[': { - int sep = lex_skipeq(ls); - if (sep >= 0) { - lex_longstring(ls, tv, sep); - return TK_string; - } else if (sep == -1) { - return '['; - } else { - lj_lex_error(ls, TK_string, LJ_ERR_XLDELIM); - continue; - } - } - case '=': - lex_next(ls); - if (ls->c != '=') return '='; else { lex_next(ls); return TK_eq; } - case '<': - lex_next(ls); - if (ls->c != '=') return '<'; else { lex_next(ls); return TK_le; } - case '>': - lex_next(ls); - if (ls->c != '=') return '>'; else { lex_next(ls); return TK_ge; } - case '~': - lex_next(ls); - if (ls->c != '=') return '~'; else { lex_next(ls); return TK_ne; } - case ':': - lex_next(ls); - if (ls->c != ':') return ':'; else { lex_next(ls); return TK_label; } - case '"': - case '\'': - lex_string(ls, tv); - return TK_string; - case '.': - if (lex_savenext(ls) == '.') { - lex_next(ls); - if (ls->c == '.') { - lex_next(ls); - return TK_dots; /* ... */ - } - return TK_concat; /* .. */ - } else if (!lj_char_isdigit(ls->c)) { - return '.'; - } else { - lex_number(ls, tv); - return TK_number; - } - case LEX_EOF: - return TK_eof; - default: { - LexChar c = ls->c; - lex_next(ls); - return c; /* Single-char tokens (+ - / ...). */ - } - } - } -} - -/* -- Lexer API ----------------------------------------------------------- */ - -/* Setup lexer state. */ -int lj_lex_setup(lua_State *L, LexState *ls) -{ - int header = 0; - ls->L = L; - ls->fs = NULL; - ls->pe = ls->p = NULL; - ls->vstack = NULL; - ls->sizevstack = 0; - ls->vtop = 0; - ls->bcstack = NULL; - ls->sizebcstack = 0; - ls->tok = 0; - ls->lookahead = TK_eof; /* No look-ahead token. */ - ls->linenumber = 1; - ls->lastline = 1; - lex_next(ls); /* Read-ahead first char. */ - if (ls->c == 0xef && ls->p + 2 <= ls->pe && (uint8_t)ls->p[0] == 0xbb && - (uint8_t)ls->p[1] == 0xbf) { /* Skip UTF-8 BOM (if buffered). */ - ls->p += 2; - lex_next(ls); - header = 1; - } - if (ls->c == '#') { /* Skip POSIX #! header line. */ - do { - lex_next(ls); - if (ls->c == LEX_EOF) return 0; - } while (!lex_iseol(ls)); - lex_newline(ls); - header = 1; - } - if (ls->c == LUA_SIGNATURE[0]) { /* Bytecode dump. */ - if (header) { - /* - ** Loading bytecode with an extra header is disabled for security - ** reasons. This may circumvent the usual check for bytecode vs. - ** Lua code by looking at the first char. Since this is a potential - ** security violation no attempt is made to echo the chunkname either. - */ - setstrV(L, L->top++, lj_err_str(L, LJ_ERR_BCBAD)); - lj_err_throw(L, LUA_ERRSYNTAX); - } - return 1; - } - return 0; -} - -/* Cleanup lexer state. */ -void lj_lex_cleanup(lua_State *L, LexState *ls) -{ - global_State *g = G(L); - lj_mem_freevec(g, ls->bcstack, ls->sizebcstack, BCInsLine); - lj_mem_freevec(g, ls->vstack, ls->sizevstack, VarInfo); - lj_buf_free(g, &ls->sb); -} - -/* Return next lexical token. */ -void lj_lex_next(LexState *ls) -{ - ls->lastline = ls->linenumber; - if (LJ_LIKELY(ls->lookahead == TK_eof)) { /* No lookahead token? */ - ls->tok = lex_scan(ls, &ls->tokval); /* Get next token. */ - } else { /* Otherwise return lookahead token. */ - ls->tok = ls->lookahead; - ls->lookahead = TK_eof; - ls->tokval = ls->lookaheadval; - } -} - -/* Look ahead for the next token. */ -LexToken lj_lex_lookahead(LexState *ls) -{ - lua_assert(ls->lookahead == TK_eof); - ls->lookahead = lex_scan(ls, &ls->lookaheadval); - return ls->lookahead; -} - -/* Convert token to string. */ -const char *lj_lex_token2str(LexState *ls, LexToken tok) -{ - if (tok > TK_OFS) - return tokennames[tok-TK_OFS-1]; - else if (!lj_char_iscntrl(tok)) - return lj_strfmt_pushf(ls->L, "%c", tok); - else - return lj_strfmt_pushf(ls->L, "char(%d)", tok); -} - -/* Lexer error. */ -void lj_lex_error(LexState *ls, LexToken tok, ErrMsg em, ...) -{ - const char *tokstr; - va_list argp; - if (tok == 0) { - tokstr = NULL; - } else if (tok == TK_name || tok == TK_string || tok == TK_number) { - lex_save(ls, '\0'); - tokstr = sbufB(&ls->sb); - } else { - tokstr = lj_lex_token2str(ls, tok); - } - va_start(argp, em); - lj_err_lex(ls->L, ls->chunkname, tokstr, ls->linenumber, em, argp); - va_end(argp); -} - -/* Initialize strings for reserved words. */ -void lj_lex_init(lua_State *L) -{ - uint32_t i; - for (i = 0; i < TK_RESERVED; i++) { - GCstr *s = lj_str_newz(L, tokennames[i]); - fixstring(s); /* Reserved words are never collected. */ - s->reserved = (uint8_t)(i+1); - } -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_lex.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_lex.h deleted file mode 100644 index acd2285d789..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_lex.h +++ /dev/null @@ -1,86 +0,0 @@ -/* -** Lexical analyzer. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_LEX_H -#define _LJ_LEX_H - -#include - -#include "lj_obj.h" -#include "lj_err.h" - -/* Lua lexer tokens. */ -#define TKDEF(_, __) \ - _(and) _(break) _(do) _(else) _(elseif) _(end) _(false) \ - _(for) _(function) _(goto) _(if) _(in) _(local) _(nil) _(not) _(or) \ - _(repeat) _(return) _(then) _(true) _(until) _(while) \ - __(concat, ..) __(dots, ...) __(eq, ==) __(ge, >=) __(le, <=) __(ne, ~=) \ - __(label, ::) __(number, ) __(name, ) __(string, ) \ - __(eof, ) - -enum { - TK_OFS = 256, -#define TKENUM1(name) TK_##name, -#define TKENUM2(name, sym) TK_##name, -TKDEF(TKENUM1, TKENUM2) -#undef TKENUM1 -#undef TKENUM2 - TK_RESERVED = TK_while - TK_OFS -}; - -typedef int LexChar; /* Lexical character. Unsigned ext. from char. */ -typedef int LexToken; /* Lexical token. */ - -/* Combined bytecode ins/line. Only used during bytecode generation. */ -typedef struct BCInsLine { - BCIns ins; /* Bytecode instruction. */ - BCLine line; /* Line number for this bytecode. */ -} BCInsLine; - -/* Info for local variables. Only used during bytecode generation. */ -typedef struct VarInfo { - GCRef name; /* Local variable name or goto/label name. */ - BCPos startpc; /* First point where the local variable is active. */ - BCPos endpc; /* First point where the local variable is dead. */ - uint8_t slot; /* Variable slot. */ - uint8_t info; /* Variable/goto/label info. */ -} VarInfo; - -/* Lua lexer state. */ -typedef struct LexState { - struct FuncState *fs; /* Current FuncState. Defined in lj_parse.c. */ - struct lua_State *L; /* Lua state. */ - TValue tokval; /* Current token value. */ - TValue lookaheadval; /* Lookahead token value. */ - const char *p; /* Current position in input buffer. */ - const char *pe; /* End of input buffer. */ - LexChar c; /* Current character. */ - LexToken tok; /* Current token. */ - LexToken lookahead; /* Lookahead token. */ - SBuf sb; /* String buffer for tokens. */ - lua_Reader rfunc; /* Reader callback. */ - void *rdata; /* Reader callback data. */ - BCLine linenumber; /* Input line counter. */ - BCLine lastline; /* Line of last token. */ - GCstr *chunkname; /* Current chunk name (interned string). */ - const char *chunkarg; /* Chunk name argument. */ - const char *mode; /* Allow loading bytecode (b) and/or source text (t). */ - VarInfo *vstack; /* Stack for names and extents of local variables. */ - MSize sizevstack; /* Size of variable stack. */ - MSize vtop; /* Top of variable stack. */ - BCInsLine *bcstack; /* Stack for bytecode instructions/line numbers. */ - MSize sizebcstack; /* Size of bytecode stack. */ - uint32_t level; /* Syntactical nesting level. */ -} LexState; - -LJ_FUNC int lj_lex_setup(lua_State *L, LexState *ls); -LJ_FUNC void lj_lex_cleanup(lua_State *L, LexState *ls); -LJ_FUNC void lj_lex_next(LexState *ls); -LJ_FUNC LexToken lj_lex_lookahead(LexState *ls); -LJ_FUNC const char *lj_lex_token2str(LexState *ls, LexToken tok); -LJ_FUNC_NORET void lj_lex_error(LexState *ls, LexToken tok, ErrMsg em, ...); -LJ_FUNC void lj_lex_init(lua_State *L); - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_lib.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_lib.c deleted file mode 100644 index b16d0564fa7..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_lib.c +++ /dev/null @@ -1,303 +0,0 @@ -/* -** Library function support. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_lib_c -#define LUA_CORE - -#include "lauxlib.h" - -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_str.h" -#include "lj_tab.h" -#include "lj_func.h" -#include "lj_bc.h" -#include "lj_dispatch.h" -#include "lj_vm.h" -#include "lj_strscan.h" -#include "lj_strfmt.h" -#include "lj_lex.h" -#include "lj_bcdump.h" -#include "lj_lib.h" - -/* -- Library initialization ---------------------------------------------- */ - -static GCtab *lib_create_table(lua_State *L, const char *libname, int hsize) -{ - if (libname) { - luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16); - lua_getfield(L, -1, libname); - if (!tvistab(L->top-1)) { - L->top--; - if (luaL_findtable(L, LUA_GLOBALSINDEX, libname, hsize) != NULL) - lj_err_callerv(L, LJ_ERR_BADMODN, libname); - settabV(L, L->top, tabV(L->top-1)); - L->top++; - lua_setfield(L, -3, libname); /* _LOADED[libname] = new table */ - } - L->top--; - settabV(L, L->top-1, tabV(L->top)); - } else { - lua_createtable(L, 0, hsize); - } - return tabV(L->top-1); -} - -static const uint8_t *lib_read_lfunc(lua_State *L, const uint8_t *p, GCtab *tab) -{ - int len = *p++; - GCstr *name = lj_str_new(L, (const char *)p, len); - LexState ls; - GCproto *pt; - GCfunc *fn; - memset(&ls, 0, sizeof(ls)); - ls.L = L; - ls.p = (const char *)(p+len); - ls.pe = (const char *)~(uintptr_t)0; - ls.c = -1; - ls.level = (BCDUMP_F_STRIP|(LJ_BE*BCDUMP_F_BE)); - ls.chunkname = name; - pt = lj_bcread_proto(&ls); - pt->firstline = ~(BCLine)0; - fn = lj_func_newL_empty(L, pt, tabref(L->env)); - /* NOBARRIER: See below for common barrier. */ - setfuncV(L, lj_tab_setstr(L, tab, name), fn); - return (const uint8_t *)ls.p; -} - -void lj_lib_register(lua_State *L, const char *libname, - const uint8_t *p, const lua_CFunction *cf) -{ - GCtab *env = tabref(L->env); - GCfunc *ofn = NULL; - int ffid = *p++; - BCIns *bcff = &L2GG(L)->bcff[*p++]; - GCtab *tab = lib_create_table(L, libname, *p++); - ptrdiff_t tpos = L->top - L->base; - - /* Avoid barriers further down. */ - lj_gc_anybarriert(L, tab); - tab->nomm = 0; - - for (;;) { - uint32_t tag = *p++; - MSize len = tag & LIBINIT_LENMASK; - tag &= LIBINIT_TAGMASK; - if (tag != LIBINIT_STRING) { - const char *name; - MSize nuv = (MSize)(L->top - L->base - tpos); - GCfunc *fn = lj_func_newC(L, nuv, env); - if (nuv) { - L->top = L->base + tpos; - memcpy(fn->c.upvalue, L->top, sizeof(TValue)*nuv); - } - fn->c.ffid = (uint8_t)(ffid++); - name = (const char *)p; - p += len; - if (tag == LIBINIT_CF) - setmref(fn->c.pc, &G(L)->bc_cfunc_int); - else - setmref(fn->c.pc, bcff++); - if (tag == LIBINIT_ASM_) - fn->c.f = ofn->c.f; /* Copy handler from previous function. */ - else - fn->c.f = *cf++; /* Get cf or handler from C function table. */ - if (len) { - /* NOBARRIER: See above for common barrier. */ - setfuncV(L, lj_tab_setstr(L, tab, lj_str_new(L, name, len)), fn); - } - ofn = fn; - } else { - switch (tag | len) { - case LIBINIT_LUA: - p = lib_read_lfunc(L, p, tab); - break; - case LIBINIT_SET: - L->top -= 2; - if (tvisstr(L->top+1) && strV(L->top+1)->len == 0) - env = tabV(L->top); - else /* NOBARRIER: See above for common barrier. */ - copyTV(L, lj_tab_set(L, tab, L->top+1), L->top); - break; - case LIBINIT_NUMBER: - memcpy(&L->top->n, p, sizeof(double)); - L->top++; - p += sizeof(double); - break; - case LIBINIT_COPY: - copyTV(L, L->top, L->top - *p++); - L->top++; - break; - case LIBINIT_LASTCL: - setfuncV(L, L->top++, ofn); - break; - case LIBINIT_FFID: - ffid++; - break; - case LIBINIT_END: - return; - default: - setstrV(L, L->top++, lj_str_new(L, (const char *)p, len)); - p += len; - break; - } - } - } -} - -/* Push internal function on the stack. */ -GCfunc *lj_lib_pushcc(lua_State *L, lua_CFunction f, int id, int n) -{ - GCfunc *fn; - lua_pushcclosure(L, f, n); - fn = funcV(L->top-1); - fn->c.ffid = (uint8_t)id; - setmref(fn->c.pc, &G(L)->bc_cfunc_int); - return fn; -} - -void lj_lib_prereg(lua_State *L, const char *name, lua_CFunction f, GCtab *env) -{ - luaL_findtable(L, LUA_REGISTRYINDEX, "_PRELOAD", 4); - lua_pushcfunction(L, f); - /* NOBARRIER: The function is new (marked white). */ - setgcref(funcV(L->top-1)->c.env, obj2gco(env)); - lua_setfield(L, -2, name); - L->top--; -} - -int lj_lib_postreg(lua_State *L, lua_CFunction cf, int id, const char *name) -{ - GCfunc *fn = lj_lib_pushcf(L, cf, id); - GCtab *t = tabref(curr_func(L)->c.env); /* Reference to parent table. */ - setfuncV(L, lj_tab_setstr(L, t, lj_str_newz(L, name)), fn); - lj_gc_anybarriert(L, t); - setfuncV(L, L->top++, fn); - return 1; -} - -/* -- Type checks --------------------------------------------------------- */ - -TValue *lj_lib_checkany(lua_State *L, int narg) -{ - TValue *o = L->base + narg-1; - if (o >= L->top) - lj_err_arg(L, narg, LJ_ERR_NOVAL); - return o; -} - -GCstr *lj_lib_checkstr(lua_State *L, int narg) -{ - TValue *o = L->base + narg-1; - if (o < L->top) { - if (LJ_LIKELY(tvisstr(o))) { - return strV(o); - } else if (tvisnumber(o)) { - GCstr *s = lj_strfmt_number(L, o); - setstrV(L, o, s); - return s; - } - } - lj_err_argt(L, narg, LUA_TSTRING); - return NULL; /* unreachable */ -} - -GCstr *lj_lib_optstr(lua_State *L, int narg) -{ - TValue *o = L->base + narg-1; - return (o < L->top && !tvisnil(o)) ? lj_lib_checkstr(L, narg) : NULL; -} - -#if LJ_DUALNUM -void lj_lib_checknumber(lua_State *L, int narg) -{ - TValue *o = L->base + narg-1; - if (!(o < L->top && lj_strscan_numberobj(o))) - lj_err_argt(L, narg, LUA_TNUMBER); -} -#endif - -lua_Number lj_lib_checknum(lua_State *L, int narg) -{ - TValue *o = L->base + narg-1; - if (!(o < L->top && - (tvisnumber(o) || (tvisstr(o) && lj_strscan_num(strV(o), o))))) - lj_err_argt(L, narg, LUA_TNUMBER); - if (LJ_UNLIKELY(tvisint(o))) { - lua_Number n = (lua_Number)intV(o); - setnumV(o, n); - return n; - } else { - return numV(o); - } -} - -int32_t lj_lib_checkint(lua_State *L, int narg) -{ - TValue *o = L->base + narg-1; - if (!(o < L->top && lj_strscan_numberobj(o))) - lj_err_argt(L, narg, LUA_TNUMBER); - if (LJ_LIKELY(tvisint(o))) { - return intV(o); - } else { - int32_t i = lj_num2int(numV(o)); - if (LJ_DUALNUM) setintV(o, i); - return i; - } -} - -int32_t lj_lib_optint(lua_State *L, int narg, int32_t def) -{ - TValue *o = L->base + narg-1; - return (o < L->top && !tvisnil(o)) ? lj_lib_checkint(L, narg) : def; -} - -GCfunc *lj_lib_checkfunc(lua_State *L, int narg) -{ - TValue *o = L->base + narg-1; - if (!(o < L->top && tvisfunc(o))) - lj_err_argt(L, narg, LUA_TFUNCTION); - return funcV(o); -} - -GCtab *lj_lib_checktab(lua_State *L, int narg) -{ - TValue *o = L->base + narg-1; - if (!(o < L->top && tvistab(o))) - lj_err_argt(L, narg, LUA_TTABLE); - return tabV(o); -} - -GCtab *lj_lib_checktabornil(lua_State *L, int narg) -{ - TValue *o = L->base + narg-1; - if (o < L->top) { - if (tvistab(o)) - return tabV(o); - else if (tvisnil(o)) - return NULL; - } - lj_err_arg(L, narg, LJ_ERR_NOTABN); - return NULL; /* unreachable */ -} - -int lj_lib_checkopt(lua_State *L, int narg, int def, const char *lst) -{ - GCstr *s = def >= 0 ? lj_lib_optstr(L, narg) : lj_lib_checkstr(L, narg); - if (s) { - const char *opt = strdata(s); - MSize len = s->len; - int i; - for (i = 0; *(const uint8_t *)lst; i++) { - if (*(const uint8_t *)lst == len && memcmp(opt, lst+1, len) == 0) - return i; - lst += 1+*(const uint8_t *)lst; - } - lj_err_argv(L, narg, LJ_ERR_INVOPTM, opt); - } - return def; -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_lib.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_lib.h deleted file mode 100644 index 3fa7aa17eff..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_lib.h +++ /dev/null @@ -1,115 +0,0 @@ -/* -** Library function support. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_LIB_H -#define _LJ_LIB_H - -#include "lj_obj.h" - -/* -** A fallback handler is called by the assembler VM if the fast path fails: -** -** - too few arguments: unrecoverable. -** - wrong argument type: recoverable, if coercion succeeds. -** - bad argument value: unrecoverable. -** - stack overflow: recoverable, if stack reallocation succeeds. -** - extra handling: recoverable. -** -** The unrecoverable cases throw an error with lj_err_arg(), lj_err_argtype(), -** lj_err_caller() or lj_err_callermsg(). -** The recoverable cases return 0 or the number of results + 1. -** The assembler VM retries the fast path only if 0 is returned. -** This time the fallback must not be called again or it gets stuck in a loop. -*/ - -/* Return values from fallback handler. */ -#define FFH_RETRY 0 -#define FFH_UNREACHABLE FFH_RETRY -#define FFH_RES(n) ((n)+1) -#define FFH_TAILCALL (-1) - -LJ_FUNC TValue *lj_lib_checkany(lua_State *L, int narg); -LJ_FUNC GCstr *lj_lib_checkstr(lua_State *L, int narg); -LJ_FUNC GCstr *lj_lib_optstr(lua_State *L, int narg); -#if LJ_DUALNUM -LJ_FUNC void lj_lib_checknumber(lua_State *L, int narg); -#else -#define lj_lib_checknumber(L, narg) lj_lib_checknum((L), (narg)) -#endif -LJ_FUNC lua_Number lj_lib_checknum(lua_State *L, int narg); -LJ_FUNC int32_t lj_lib_checkint(lua_State *L, int narg); -LJ_FUNC int32_t lj_lib_optint(lua_State *L, int narg, int32_t def); -LJ_FUNC GCfunc *lj_lib_checkfunc(lua_State *L, int narg); -LJ_FUNC GCtab *lj_lib_checktab(lua_State *L, int narg); -LJ_FUNC GCtab *lj_lib_checktabornil(lua_State *L, int narg); -LJ_FUNC int lj_lib_checkopt(lua_State *L, int narg, int def, const char *lst); - -/* Avoid including lj_frame.h. */ -#if LJ_GC64 -#define lj_lib_upvalue(L, n) \ - (&gcval(L->base-2)->fn.c.upvalue[(n)-1]) -#elif LJ_FR2 -#define lj_lib_upvalue(L, n) \ - (&gcref((L->base-2)->gcr)->fn.c.upvalue[(n)-1]) -#else -#define lj_lib_upvalue(L, n) \ - (&gcref((L->base-1)->fr.func)->fn.c.upvalue[(n)-1]) -#endif - -#if LJ_TARGET_WINDOWS -#define lj_lib_checkfpu(L) \ - do { setnumV(L->top++, (lua_Number)1437217655); \ - if (lua_tointeger(L, -1) != 1437217655) lj_err_caller(L, LJ_ERR_BADFPU); \ - L->top--; } while (0) -#else -#define lj_lib_checkfpu(L) UNUSED(L) -#endif - -LJ_FUNC GCfunc *lj_lib_pushcc(lua_State *L, lua_CFunction f, int id, int n); -#define lj_lib_pushcf(L, fn, id) (lj_lib_pushcc(L, (fn), (id), 0)) - -/* Library function declarations. Scanned by buildvm. */ -#define LJLIB_CF(name) static int lj_cf_##name(lua_State *L) -#define LJLIB_ASM(name) static int lj_ffh_##name(lua_State *L) -#define LJLIB_ASM_(name) -#define LJLIB_LUA(name) -#define LJLIB_SET(name) -#define LJLIB_PUSH(arg) -#define LJLIB_REC(handler) -#define LJLIB_NOREGUV -#define LJLIB_NOREG - -#define LJ_LIB_REG(L, regname, name) \ - lj_lib_register(L, regname, lj_lib_init_##name, lj_lib_cf_##name) - -LJ_FUNC void lj_lib_register(lua_State *L, const char *libname, - const uint8_t *init, const lua_CFunction *cf); -LJ_FUNC void lj_lib_prereg(lua_State *L, const char *name, lua_CFunction f, - GCtab *env); -LJ_FUNC int lj_lib_postreg(lua_State *L, lua_CFunction cf, int id, - const char *name); - -/* Library init data tags. */ -#define LIBINIT_LENMASK 0x3f -#define LIBINIT_TAGMASK 0xc0 -#define LIBINIT_CF 0x00 -#define LIBINIT_ASM 0x40 -#define LIBINIT_ASM_ 0x80 -#define LIBINIT_STRING 0xc0 -#define LIBINIT_MAXSTR 0x38 -#define LIBINIT_LUA 0xf9 -#define LIBINIT_SET 0xfa -#define LIBINIT_NUMBER 0xfb -#define LIBINIT_COPY 0xfc -#define LIBINIT_LASTCL 0xfd -#define LIBINIT_FFID 0xfe -#define LIBINIT_END 0xff - -/* Exported library functions. */ - -typedef struct RandomState RandomState; -LJ_FUNC uint64_t LJ_FASTCALL lj_math_random_step(RandomState *rs); - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_load.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_load.c deleted file mode 100644 index 95a6ab0d4de..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_load.c +++ /dev/null @@ -1,168 +0,0 @@ -/* -** Load and dump code. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#include -#include - -#define lj_load_c -#define LUA_CORE - -#include "lua.h" -#include "lauxlib.h" - -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_buf.h" -#include "lj_func.h" -#include "lj_frame.h" -#include "lj_vm.h" -#include "lj_lex.h" -#include "lj_bcdump.h" -#include "lj_parse.h" - -/* -- Load Lua source code and bytecode ----------------------------------- */ - -static TValue *cpparser(lua_State *L, lua_CFunction dummy, void *ud) -{ - LexState *ls = (LexState *)ud; - GCproto *pt; - GCfunc *fn; - int bc; - UNUSED(dummy); - cframe_errfunc(L->cframe) = -1; /* Inherit error function. */ - bc = lj_lex_setup(L, ls); - if (ls->mode && !strchr(ls->mode, bc ? 'b' : 't')) { - setstrV(L, L->top++, lj_err_str(L, LJ_ERR_XMODE)); - lj_err_throw(L, LUA_ERRSYNTAX); - } - pt = bc ? lj_bcread(ls) : lj_parse(ls); - fn = lj_func_newL_empty(L, pt, tabref(L->env)); - /* Don't combine above/below into one statement. */ - setfuncV(L, L->top++, fn); - return NULL; -} - -LUA_API int lua_loadx(lua_State *L, lua_Reader reader, void *data, - const char *chunkname, const char *mode) -{ - LexState ls; - int status; - ls.rfunc = reader; - ls.rdata = data; - ls.chunkarg = chunkname ? chunkname : "?"; - ls.mode = mode; - lj_buf_init(L, &ls.sb); - status = lj_vm_cpcall(L, NULL, &ls, cpparser); - lj_lex_cleanup(L, &ls); - lj_gc_check(L); - return status; -} - -LUA_API int lua_load(lua_State *L, lua_Reader reader, void *data, - const char *chunkname) -{ - return lua_loadx(L, reader, data, chunkname, NULL); -} - -typedef struct FileReaderCtx { - FILE *fp; - char buf[LUAL_BUFFERSIZE]; -} FileReaderCtx; - -static const char *reader_file(lua_State *L, void *ud, size_t *size) -{ - FileReaderCtx *ctx = (FileReaderCtx *)ud; - UNUSED(L); - if (feof(ctx->fp)) return NULL; - *size = fread(ctx->buf, 1, sizeof(ctx->buf), ctx->fp); - return *size > 0 ? ctx->buf : NULL; -} - -LUALIB_API int luaL_loadfilex(lua_State *L, const char *filename, - const char *mode) -{ - FileReaderCtx ctx; - int status; - const char *chunkname; - if (filename) { - ctx.fp = fopen(filename, "rb"); - if (ctx.fp == NULL) { - lua_pushfstring(L, "cannot open %s: %s", filename, strerror(errno)); - return LUA_ERRFILE; - } - chunkname = lua_pushfstring(L, "@%s", filename); - } else { - ctx.fp = stdin; - chunkname = "=stdin"; - } - status = lua_loadx(L, reader_file, &ctx, chunkname, mode); - if (ferror(ctx.fp)) { - L->top -= filename ? 2 : 1; - lua_pushfstring(L, "cannot read %s: %s", chunkname+1, strerror(errno)); - if (filename) - fclose(ctx.fp); - return LUA_ERRFILE; - } - if (filename) { - L->top--; - copyTV(L, L->top-1, L->top); - fclose(ctx.fp); - } - return status; -} - -LUALIB_API int luaL_loadfile(lua_State *L, const char *filename) -{ - return luaL_loadfilex(L, filename, NULL); -} - -typedef struct StringReaderCtx { - const char *str; - size_t size; -} StringReaderCtx; - -static const char *reader_string(lua_State *L, void *ud, size_t *size) -{ - StringReaderCtx *ctx = (StringReaderCtx *)ud; - UNUSED(L); - if (ctx->size == 0) return NULL; - *size = ctx->size; - ctx->size = 0; - return ctx->str; -} - -LUALIB_API int luaL_loadbufferx(lua_State *L, const char *buf, size_t size, - const char *name, const char *mode) -{ - StringReaderCtx ctx; - ctx.str = buf; - ctx.size = size; - return lua_loadx(L, reader_string, &ctx, name, mode); -} - -LUALIB_API int luaL_loadbuffer(lua_State *L, const char *buf, size_t size, - const char *name) -{ - return luaL_loadbufferx(L, buf, size, name, NULL); -} - -LUALIB_API int luaL_loadstring(lua_State *L, const char *s) -{ - return luaL_loadbuffer(L, s, strlen(s), s); -} - -/* -- Dump bytecode ------------------------------------------------------- */ - -LUA_API int lua_dump(lua_State *L, lua_Writer writer, void *data) -{ - cTValue *o = L->top-1; - api_check(L, L->top > L->base); - if (tvisfunc(o) && isluafunc(funcV(o))) - return lj_bcwrite(L, funcproto(funcV(o)), writer, data, 0); - else - return 1; -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_mcode.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_mcode.c deleted file mode 100644 index d95ebeb1fe7..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_mcode.c +++ /dev/null @@ -1,386 +0,0 @@ -/* -** Machine code management. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_mcode_c -#define LUA_CORE - -#include "lj_obj.h" -#if LJ_HASJIT -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_jit.h" -#include "lj_mcode.h" -#include "lj_trace.h" -#include "lj_dispatch.h" -#endif -#if LJ_HASJIT || LJ_HASFFI -#include "lj_vm.h" -#endif - -/* -- OS-specific functions ----------------------------------------------- */ - -#if LJ_HASJIT || LJ_HASFFI - -/* Define this if you want to run LuaJIT with Valgrind. */ -#ifdef LUAJIT_USE_VALGRIND -#include -#endif - -#if LJ_TARGET_IOS -void sys_icache_invalidate(void *start, size_t len); -#endif - -/* Synchronize data/instruction cache. */ -void lj_mcode_sync(void *start, void *end) -{ -#ifdef LUAJIT_USE_VALGRIND - VALGRIND_DISCARD_TRANSLATIONS(start, (char *)end-(char *)start); -#endif -#if LJ_TARGET_X86ORX64 - UNUSED(start); UNUSED(end); -#elif LJ_TARGET_IOS - sys_icache_invalidate(start, (char *)end-(char *)start); -#elif LJ_TARGET_PPC - lj_vm_cachesync(start, end); -#elif defined(__GNUC__) - __clear_cache(start, end); -#else -#error "Missing builtin to flush instruction cache" -#endif -} - -#endif - -#if LJ_HASJIT - -#if LJ_TARGET_WINDOWS - -#define WIN32_LEAN_AND_MEAN -#include - -#define MCPROT_RW PAGE_READWRITE -#define MCPROT_RX PAGE_EXECUTE_READ -#define MCPROT_RWX PAGE_EXECUTE_READWRITE - -static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, DWORD prot) -{ - void *p = VirtualAlloc((void *)hint, sz, - MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, prot); - if (!p && !hint) - lj_trace_err(J, LJ_TRERR_MCODEAL); - return p; -} - -static void mcode_free(jit_State *J, void *p, size_t sz) -{ - UNUSED(J); UNUSED(sz); - VirtualFree(p, 0, MEM_RELEASE); -} - -static int mcode_setprot(void *p, size_t sz, DWORD prot) -{ - DWORD oprot; - return !VirtualProtect(p, sz, prot, &oprot); -} - -#elif LJ_TARGET_POSIX - -#include - -#ifndef MAP_ANONYMOUS -#define MAP_ANONYMOUS MAP_ANON -#endif - -#define MCPROT_RW (PROT_READ|PROT_WRITE) -#define MCPROT_RX (PROT_READ|PROT_EXEC) -#define MCPROT_RWX (PROT_READ|PROT_WRITE|PROT_EXEC) - -static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot) -{ - void *p = mmap((void *)hint, sz, prot, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); - if (p == MAP_FAILED) { - if (!hint) lj_trace_err(J, LJ_TRERR_MCODEAL); - p = NULL; - } - return p; -} - -static void mcode_free(jit_State *J, void *p, size_t sz) -{ - UNUSED(J); - munmap(p, sz); -} - -static int mcode_setprot(void *p, size_t sz, int prot) -{ - return mprotect(p, sz, prot); -} - -#elif LJ_64 - -#error "Missing OS support for explicit placement of executable memory" - -#else - -/* Fallback allocator. This will fail if memory is not executable by default. */ -#define LUAJIT_UNPROTECT_MCODE -#define MCPROT_RW 0 -#define MCPROT_RX 0 -#define MCPROT_RWX 0 - -static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot) -{ - UNUSED(hint); UNUSED(prot); - return lj_mem_new(J->L, sz); -} - -static void mcode_free(jit_State *J, void *p, size_t sz) -{ - lj_mem_free(J2G(J), p, sz); -} - -#endif - -/* -- MCode area protection ----------------------------------------------- */ - -/* Define this ONLY if page protection twiddling becomes a bottleneck. */ -#ifdef LUAJIT_UNPROTECT_MCODE - -/* It's generally considered to be a potential security risk to have -** pages with simultaneous write *and* execute access in a process. -** -** Do not even think about using this mode for server processes or -** apps handling untrusted external data (such as a browser). -** -** The security risk is not in LuaJIT itself -- but if an adversary finds -** any *other* flaw in your C application logic, then any RWX memory page -** simplifies writing an exploit considerably. -*/ -#define MCPROT_GEN MCPROT_RWX -#define MCPROT_RUN MCPROT_RWX - -static void mcode_protect(jit_State *J, int prot) -{ - UNUSED(J); UNUSED(prot); -} - -#else - -/* This is the default behaviour and much safer: -** -** Most of the time the memory pages holding machine code are executable, -** but NONE of them is writable. -** -** The current memory area is marked read-write (but NOT executable) only -** during the short time window while the assembler generates machine code. -*/ -#define MCPROT_GEN MCPROT_RW -#define MCPROT_RUN MCPROT_RX - -/* Protection twiddling failed. Probably due to kernel security. */ -static LJ_NOINLINE void mcode_protfail(jit_State *J) -{ - lua_CFunction panic = J2G(J)->panic; - if (panic) { - lua_State *L = J->L; - setstrV(L, L->top++, lj_err_str(L, LJ_ERR_JITPROT)); - panic(L); - } -} - -/* Change protection of MCode area. */ -static void mcode_protect(jit_State *J, int prot) -{ - if (J->mcprot != prot) { - if (LJ_UNLIKELY(mcode_setprot(J->mcarea, J->szmcarea, prot))) - mcode_protfail(J); - J->mcprot = prot; - } -} - -#endif - -/* -- MCode area allocation ----------------------------------------------- */ - -#if LJ_TARGET_X64 -#define mcode_validptr(p) ((p) && (uintptr_t)(p) < (uintptr_t)1<<47) -#else -#define mcode_validptr(p) ((p) && (uintptr_t)(p) < 0xffff0000) -#endif - -#ifdef LJ_TARGET_JUMPRANGE - -/* Get memory within relative jump distance of our code in 64 bit mode. */ -static void *mcode_alloc(jit_State *J, size_t sz) -{ - /* Target an address in the static assembler code (64K aligned). - ** Try addresses within a distance of target-range/2+1MB..target+range/2-1MB. - ** Use half the jump range so every address in the range can reach any other. - */ -#if LJ_TARGET_MIPS - /* Use the middle of the 256MB-aligned region. */ - uintptr_t target = ((uintptr_t)(void *)lj_vm_exit_handler & 0xf0000000u) + - 0x08000000u; -#else - uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff; -#endif - const uintptr_t range = (1u << (LJ_TARGET_JUMPRANGE-1)) - (1u << 21); - /* First try a contiguous area below the last one. */ - uintptr_t hint = J->mcarea ? (uintptr_t)J->mcarea - sz : 0; - int i; - for (i = 0; i < 32; i++) { /* 32 attempts ought to be enough ... */ - if (mcode_validptr(hint)) { - void *p = mcode_alloc_at(J, hint, sz, MCPROT_GEN); - - if (mcode_validptr(p) && - ((uintptr_t)p + sz - target < range || target - (uintptr_t)p < range)) - return p; - if (p) mcode_free(J, p, sz); /* Free badly placed area. */ - } - /* Next try probing pseudo-random addresses. */ - do { - hint = (0x78fb ^ LJ_PRNG_BITS(J, 15)) << 16; /* 64K aligned. */ - } while (!(hint + sz < range)); - hint = target + hint - (range>>1); - } - lj_trace_err(J, LJ_TRERR_MCODEAL); /* Give up. OS probably ignores hints? */ - return NULL; -} - -#else - -/* All memory addresses are reachable by relative jumps. */ -static void *mcode_alloc(jit_State *J, size_t sz) -{ -#ifdef __OpenBSD__ - /* Allow better executable memory allocation for OpenBSD W^X mode. */ - void *p = mcode_alloc_at(J, 0, sz, MCPROT_RUN); - if (p && mcode_setprot(p, sz, MCPROT_GEN)) { - mcode_free(J, p, sz); - return NULL; - } - return p; -#else - return mcode_alloc_at(J, 0, sz, MCPROT_GEN); -#endif -} - -#endif - -/* -- MCode area management ----------------------------------------------- */ - -/* Linked list of MCode areas. */ -typedef struct MCLink { - MCode *next; /* Next area. */ - size_t size; /* Size of current area. */ -} MCLink; - -/* Allocate a new MCode area. */ -static void mcode_allocarea(jit_State *J) -{ - MCode *oldarea = J->mcarea; - size_t sz = (size_t)J->param[JIT_P_sizemcode] << 10; - sz = (sz + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1); - J->mcarea = (MCode *)mcode_alloc(J, sz); - J->szmcarea = sz; - J->mcprot = MCPROT_GEN; - J->mctop = (MCode *)((char *)J->mcarea + J->szmcarea); - J->mcbot = (MCode *)((char *)J->mcarea + sizeof(MCLink)); - ((MCLink *)J->mcarea)->next = oldarea; - ((MCLink *)J->mcarea)->size = sz; - J->szallmcarea += sz; -} - -/* Free all MCode areas. */ -void lj_mcode_free(jit_State *J) -{ - MCode *mc = J->mcarea; - J->mcarea = NULL; - J->szallmcarea = 0; - while (mc) { - MCode *next = ((MCLink *)mc)->next; - mcode_free(J, mc, ((MCLink *)mc)->size); - mc = next; - } -} - -/* -- MCode transactions -------------------------------------------------- */ - -/* Reserve the remainder of the current MCode area. */ -MCode *lj_mcode_reserve(jit_State *J, MCode **lim) -{ - if (!J->mcarea) - mcode_allocarea(J); - else - mcode_protect(J, MCPROT_GEN); - *lim = J->mcbot; - return J->mctop; -} - -/* Commit the top part of the current MCode area. */ -void lj_mcode_commit(jit_State *J, MCode *top) -{ - J->mctop = top; - mcode_protect(J, MCPROT_RUN); -} - -/* Abort the reservation. */ -void lj_mcode_abort(jit_State *J) -{ - if (J->mcarea) - mcode_protect(J, MCPROT_RUN); -} - -/* Set/reset protection to allow patching of MCode areas. */ -MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish) -{ -#ifdef LUAJIT_UNPROTECT_MCODE - UNUSED(J); UNUSED(ptr); UNUSED(finish); - return NULL; -#else - if (finish) { - if (J->mcarea == ptr) - mcode_protect(J, MCPROT_RUN); - else if (LJ_UNLIKELY(mcode_setprot(ptr, ((MCLink *)ptr)->size, MCPROT_RUN))) - mcode_protfail(J); - return NULL; - } else { - MCode *mc = J->mcarea; - /* Try current area first to use the protection cache. */ - if (ptr >= mc && ptr < (MCode *)((char *)mc + J->szmcarea)) { - mcode_protect(J, MCPROT_GEN); - return mc; - } - /* Otherwise search through the list of MCode areas. */ - for (;;) { - mc = ((MCLink *)mc)->next; - lua_assert(mc != NULL); - if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) { - if (LJ_UNLIKELY(mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN))) - mcode_protfail(J); - return mc; - } - } - } -#endif -} - -/* Limit of MCode reservation reached. */ -void lj_mcode_limiterr(jit_State *J, size_t need) -{ - size_t sizemcode, maxmcode; - lj_mcode_abort(J); - sizemcode = (size_t)J->param[JIT_P_sizemcode] << 10; - sizemcode = (sizemcode + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1); - maxmcode = (size_t)J->param[JIT_P_maxmcode] << 10; - if ((size_t)need > sizemcode) - lj_trace_err(J, LJ_TRERR_MCODEOV); /* Too long for any area. */ - if (J->szallmcarea + sizemcode > maxmcode) - lj_trace_err(J, LJ_TRERR_MCODEAL); - mcode_allocarea(J); - lj_trace_err(J, LJ_TRERR_MCODELM); /* Retry with new area. */ -} - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_mcode.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_mcode.h deleted file mode 100644 index ee604523496..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_mcode.h +++ /dev/null @@ -1,30 +0,0 @@ -/* -** Machine code management. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_MCODE_H -#define _LJ_MCODE_H - -#include "lj_obj.h" - -#if LJ_HASJIT || LJ_HASFFI -LJ_FUNC void lj_mcode_sync(void *start, void *end); -#endif - -#if LJ_HASJIT - -#include "lj_jit.h" - -LJ_FUNC void lj_mcode_free(jit_State *J); -LJ_FUNC MCode *lj_mcode_reserve(jit_State *J, MCode **lim); -LJ_FUNC void lj_mcode_commit(jit_State *J, MCode *m); -LJ_FUNC void lj_mcode_abort(jit_State *J); -LJ_FUNC MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish); -LJ_FUNC_NORET void lj_mcode_limiterr(jit_State *J, size_t need); - -#define lj_mcode_commitbot(J, m) (J->mcbot = (m)) - -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_meta.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_meta.c deleted file mode 100644 index 104ecf07c3d..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_meta.c +++ /dev/null @@ -1,477 +0,0 @@ -/* -** Metamethod handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -** -** Portions taken verbatim or adapted from the Lua interpreter. -** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h -*/ - -#define lj_meta_c -#define LUA_CORE - -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_buf.h" -#include "lj_str.h" -#include "lj_tab.h" -#include "lj_meta.h" -#include "lj_frame.h" -#include "lj_bc.h" -#include "lj_vm.h" -#include "lj_strscan.h" -#include "lj_strfmt.h" -#include "lj_lib.h" - -/* -- Metamethod handling ------------------------------------------------- */ - -/* String interning of metamethod names for fast indexing. */ -void lj_meta_init(lua_State *L) -{ -#define MMNAME(name) "__" #name - const char *metanames = MMDEF(MMNAME); -#undef MMNAME - global_State *g = G(L); - const char *p, *q; - uint32_t mm; - for (mm = 0, p = metanames; *p; mm++, p = q) { - GCstr *s; - for (q = p+2; *q && *q != '_'; q++) ; - s = lj_str_new(L, p, (size_t)(q-p)); - /* NOBARRIER: g->gcroot[] is a GC root. */ - setgcref(g->gcroot[GCROOT_MMNAME+mm], obj2gco(s)); - } -} - -/* Negative caching of a few fast metamethods. See the lj_meta_fast() macro. */ -cTValue *lj_meta_cache(GCtab *mt, MMS mm, GCstr *name) -{ - cTValue *mo = lj_tab_getstr(mt, name); - lua_assert(mm <= MM_FAST); - if (!mo || tvisnil(mo)) { /* No metamethod? */ - mt->nomm |= (uint8_t)(1u<metatable); - else if (tvisudata(o)) - mt = tabref(udataV(o)->metatable); - else - mt = tabref(basemt_obj(G(L), o)); - if (mt) { - cTValue *mo = lj_tab_getstr(mt, mmname_str(G(L), mm)); - if (mo) - return mo; - } - return niltv(L); -} - -#if LJ_HASFFI -/* Tailcall from C function. */ -int lj_meta_tailcall(lua_State *L, cTValue *tv) -{ - TValue *base = L->base; - TValue *top = L->top; - const BCIns *pc = frame_pc(base-1); /* Preserve old PC from frame. */ - copyTV(L, base-1-LJ_FR2, tv); /* Replace frame with new object. */ - if (LJ_FR2) - (top++)->u64 = LJ_CONT_TAILCALL; - else - top->u32.lo = LJ_CONT_TAILCALL; - setframe_pc(top++, pc); - if (LJ_FR2) top++; - setframe_gc(top, obj2gco(L), LJ_TTHREAD); /* Dummy frame object. */ - setframe_ftsz(top, ((char *)(top+1) - (char *)base) + FRAME_CONT); - L->base = L->top = top+1; - /* - ** before: [old_mo|PC] [... ...] - ** ^base ^top - ** after: [new_mo|itype] [... ...] [NULL|PC] [dummy|delta] - ** ^base/top - ** tailcall: [new_mo|PC] [... ...] - ** ^base ^top - */ - return 0; -} -#endif - -/* Setup call to metamethod to be run by Assembler VM. */ -static TValue *mmcall(lua_State *L, ASMFunction cont, cTValue *mo, - cTValue *a, cTValue *b) -{ - /* - ** |-- framesize -> top top+1 top+2 top+3 - ** before: [func slots ...] - ** mm setup: [func slots ...] [cont|?] [mo|tmtype] [a] [b] - ** in asm: [func slots ...] [cont|PC] [mo|delta] [a] [b] - ** ^-- func base ^-- mm base - ** after mm: [func slots ...] [result] - ** ^-- copy to base[PC_RA] --/ for lj_cont_ra - ** istruecond + branch for lj_cont_cond* - ** ignore for lj_cont_nop - ** next PC: [func slots ...] - */ - TValue *top = L->top; - if (curr_funcisL(L)) top = curr_topL(L); - setcont(top++, cont); /* Assembler VM stores PC in upper word or FR2. */ - if (LJ_FR2) setnilV(top++); - copyTV(L, top++, mo); /* Store metamethod and two arguments. */ - if (LJ_FR2) setnilV(top++); - copyTV(L, top, a); - copyTV(L, top+1, b); - return top; /* Return new base. */ -} - -/* -- C helpers for some instructions, called from assembler VM ----------- */ - -/* Helper for TGET*. __index chain and metamethod. */ -cTValue *lj_meta_tget(lua_State *L, cTValue *o, cTValue *k) -{ - int loop; - for (loop = 0; loop < LJ_MAX_IDXCHAIN; loop++) { - cTValue *mo; - if (LJ_LIKELY(tvistab(o))) { - GCtab *t = tabV(o); - cTValue *tv = lj_tab_get(L, t, k); - if (!tvisnil(tv) || - !(mo = lj_meta_fast(L, tabref(t->metatable), MM_index))) - return tv; - } else if (tvisnil(mo = lj_meta_lookup(L, o, MM_index))) { - lj_err_optype(L, o, LJ_ERR_OPINDEX); - return NULL; /* unreachable */ - } - if (tvisfunc(mo)) { - L->top = mmcall(L, lj_cont_ra, mo, o, k); - return NULL; /* Trigger metamethod call. */ - } - o = mo; - } - lj_err_msg(L, LJ_ERR_GETLOOP); - return NULL; /* unreachable */ -} - -/* Helper for TSET*. __newindex chain and metamethod. */ -TValue *lj_meta_tset(lua_State *L, cTValue *o, cTValue *k) -{ - TValue tmp; - int loop; - for (loop = 0; loop < LJ_MAX_IDXCHAIN; loop++) { - cTValue *mo; - if (LJ_LIKELY(tvistab(o))) { - GCtab *t = tabV(o); - cTValue *tv = lj_tab_get(L, t, k); - if (LJ_LIKELY(!tvisnil(tv))) { - t->nomm = 0; /* Invalidate negative metamethod cache. */ - lj_gc_anybarriert(L, t); - return (TValue *)tv; - } else if (!(mo = lj_meta_fast(L, tabref(t->metatable), MM_newindex))) { - t->nomm = 0; /* Invalidate negative metamethod cache. */ - lj_gc_anybarriert(L, t); - if (tv != niltv(L)) - return (TValue *)tv; - if (tvisnil(k)) lj_err_msg(L, LJ_ERR_NILIDX); - else if (tvisint(k)) { setnumV(&tmp, (lua_Number)intV(k)); k = &tmp; } - else if (tvisnum(k) && tvisnan(k)) lj_err_msg(L, LJ_ERR_NANIDX); - return lj_tab_newkey(L, t, k); - } - } else if (tvisnil(mo = lj_meta_lookup(L, o, MM_newindex))) { - lj_err_optype(L, o, LJ_ERR_OPINDEX); - return NULL; /* unreachable */ - } - if (tvisfunc(mo)) { - L->top = mmcall(L, lj_cont_nop, mo, o, k); - /* L->top+2 = v filled in by caller. */ - return NULL; /* Trigger metamethod call. */ - } - copyTV(L, &tmp, mo); - o = &tmp; - } - lj_err_msg(L, LJ_ERR_SETLOOP); - return NULL; /* unreachable */ -} - -static cTValue *str2num(cTValue *o, TValue *n) -{ - if (tvisnum(o)) - return o; - else if (tvisint(o)) - return (setnumV(n, (lua_Number)intV(o)), n); - else if (tvisstr(o) && lj_strscan_num(strV(o), n)) - return n; - else - return NULL; -} - -/* Helper for arithmetic instructions. Coercion, metamethod. */ -TValue *lj_meta_arith(lua_State *L, TValue *ra, cTValue *rb, cTValue *rc, - BCReg op) -{ - MMS mm = bcmode_mm(op); - TValue tempb, tempc; - cTValue *b, *c; - if ((b = str2num(rb, &tempb)) != NULL && - (c = str2num(rc, &tempc)) != NULL) { /* Try coercion first. */ - setnumV(ra, lj_vm_foldarith(numV(b), numV(c), (int)mm-MM_add)); - return NULL; - } else { - cTValue *mo = lj_meta_lookup(L, rb, mm); - if (tvisnil(mo)) { - mo = lj_meta_lookup(L, rc, mm); - if (tvisnil(mo)) { - if (str2num(rb, &tempb) == NULL) rc = rb; - lj_err_optype(L, rc, LJ_ERR_OPARITH); - return NULL; /* unreachable */ - } - } - return mmcall(L, lj_cont_ra, mo, rb, rc); - } -} - -/* Helper for CAT. Coercion, iterative concat, __concat metamethod. */ -TValue *lj_meta_cat(lua_State *L, TValue *top, int left) -{ - int fromc = 0; - if (left < 0) { left = -left; fromc = 1; } - do { - if (!(tvisstr(top) || tvisnumber(top)) || - !(tvisstr(top-1) || tvisnumber(top-1))) { - cTValue *mo = lj_meta_lookup(L, top-1, MM_concat); - if (tvisnil(mo)) { - mo = lj_meta_lookup(L, top, MM_concat); - if (tvisnil(mo)) { - if (tvisstr(top-1) || tvisnumber(top-1)) top++; - lj_err_optype(L, top-1, LJ_ERR_OPCAT); - return NULL; /* unreachable */ - } - } - /* One of the top two elements is not a string, call __cat metamethod: - ** - ** before: [...][CAT stack .........................] - ** top-1 top top+1 top+2 - ** pick two: [...][CAT stack ...] [o1] [o2] - ** setup mm: [...][CAT stack ...] [cont|?] [mo|tmtype] [o1] [o2] - ** in asm: [...][CAT stack ...] [cont|PC] [mo|delta] [o1] [o2] - ** ^-- func base ^-- mm base - ** after mm: [...][CAT stack ...] <--push-- [result] - ** next step: [...][CAT stack .............] - */ - copyTV(L, top+2*LJ_FR2+2, top); /* Carefully ordered stack copies! */ - copyTV(L, top+2*LJ_FR2+1, top-1); - copyTV(L, top+LJ_FR2, mo); - setcont(top-1, lj_cont_cat); - if (LJ_FR2) { setnilV(top); setnilV(top+2); top += 2; } - return top+1; /* Trigger metamethod call. */ - } else { - /* Pick as many strings as possible from the top and concatenate them: - ** - ** before: [...][CAT stack ...........................] - ** pick str: [...][CAT stack ...] [...... strings ......] - ** concat: [...][CAT stack ...] [result] - ** next step: [...][CAT stack ............] - */ - TValue *e, *o = top; - uint64_t tlen = tvisstr(o) ? strV(o)->len : STRFMT_MAXBUF_NUM; - char *p, *buf; - do { - o--; tlen += tvisstr(o) ? strV(o)->len : STRFMT_MAXBUF_NUM; - } while (--left > 0 && (tvisstr(o-1) || tvisnumber(o-1))); - if (tlen >= LJ_MAX_STR) lj_err_msg(L, LJ_ERR_STROV); - p = buf = lj_buf_tmp(L, (MSize)tlen); - for (e = top, top = o; o <= e; o++) { - if (tvisstr(o)) { - GCstr *s = strV(o); - MSize len = s->len; - p = lj_buf_wmem(p, strdata(s), len); - } else if (tvisint(o)) { - p = lj_strfmt_wint(p, intV(o)); - } else { - lua_assert(tvisnum(o)); - p = lj_strfmt_wnum(p, o); - } - } - setstrV(L, top, lj_str_new(L, buf, (size_t)(p-buf))); - } - } while (left >= 1); - if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) { - if (!fromc) L->top = curr_topL(L); - lj_gc_step(L); - } - return NULL; -} - -/* Helper for LEN. __len metamethod. */ -TValue * LJ_FASTCALL lj_meta_len(lua_State *L, cTValue *o) -{ - cTValue *mo = lj_meta_lookup(L, o, MM_len); - if (tvisnil(mo)) { - if (LJ_52 && tvistab(o)) - tabref(tabV(o)->metatable)->nomm |= (uint8_t)(1u<gch.metatable), MM_eq); - if (mo) { - TValue *top; - uint32_t it; - if (tabref(o1->gch.metatable) != tabref(o2->gch.metatable)) { - cTValue *mo2 = lj_meta_fast(L, tabref(o2->gch.metatable), MM_eq); - if (mo2 == NULL || !lj_obj_equal(mo, mo2)) - return (TValue *)(intptr_t)ne; - } - top = curr_top(L); - setcont(top++, ne ? lj_cont_condf : lj_cont_condt); - if (LJ_FR2) setnilV(top++); - copyTV(L, top++, mo); - if (LJ_FR2) setnilV(top++); - it = ~(uint32_t)o1->gch.gct; - setgcV(L, top, o1, it); - setgcV(L, top+1, o2, it); - return top; /* Trigger metamethod call. */ - } - return (TValue *)(intptr_t)ne; -} - -#if LJ_HASFFI -TValue * LJ_FASTCALL lj_meta_equal_cd(lua_State *L, BCIns ins) -{ - ASMFunction cont = (bc_op(ins) & 1) ? lj_cont_condf : lj_cont_condt; - int op = (int)bc_op(ins) & ~1; - TValue tv; - cTValue *mo, *o2, *o1 = &L->base[bc_a(ins)]; - cTValue *o1mm = o1; - if (op == BC_ISEQV) { - o2 = &L->base[bc_d(ins)]; - if (!tviscdata(o1mm)) o1mm = o2; - } else if (op == BC_ISEQS) { - setstrV(L, &tv, gco2str(proto_kgc(curr_proto(L), ~(ptrdiff_t)bc_d(ins)))); - o2 = &tv; - } else if (op == BC_ISEQN) { - o2 = &mref(curr_proto(L)->k, cTValue)[bc_d(ins)]; - } else { - lua_assert(op == BC_ISEQP); - setpriV(&tv, ~bc_d(ins)); - o2 = &tv; - } - mo = lj_meta_lookup(L, o1mm, MM_eq); - if (LJ_LIKELY(!tvisnil(mo))) - return mmcall(L, cont, mo, o1, o2); - else - return (TValue *)(intptr_t)(bc_op(ins) & 1); -} -#endif - -/* Helper for ordered comparisons. String compare, __lt/__le metamethods. */ -TValue *lj_meta_comp(lua_State *L, cTValue *o1, cTValue *o2, int op) -{ - if (LJ_HASFFI && (tviscdata(o1) || tviscdata(o2))) { - ASMFunction cont = (op & 1) ? lj_cont_condf : lj_cont_condt; - MMS mm = (op & 2) ? MM_le : MM_lt; - cTValue *mo = lj_meta_lookup(L, tviscdata(o1) ? o1 : o2, mm); - if (LJ_UNLIKELY(tvisnil(mo))) goto err; - return mmcall(L, cont, mo, o1, o2); - } else if (LJ_52 || itype(o1) == itype(o2)) { - /* Never called with two numbers. */ - if (tvisstr(o1) && tvisstr(o2)) { - int32_t res = lj_str_cmp(strV(o1), strV(o2)); - return (TValue *)(intptr_t)(((op&2) ? res <= 0 : res < 0) ^ (op&1)); - } else { - trymt: - while (1) { - ASMFunction cont = (op & 1) ? lj_cont_condf : lj_cont_condt; - MMS mm = (op & 2) ? MM_le : MM_lt; - cTValue *mo = lj_meta_lookup(L, o1, mm); -#if LJ_52 - if (tvisnil(mo) && tvisnil((mo = lj_meta_lookup(L, o2, mm)))) -#else - cTValue *mo2 = lj_meta_lookup(L, o2, mm); - if (tvisnil(mo) || !lj_obj_equal(mo, mo2)) -#endif - { - if (op & 2) { /* MM_le not found: retry with MM_lt. */ - cTValue *ot = o1; o1 = o2; o2 = ot; /* Swap operands. */ - op ^= 3; /* Use LT and flip condition. */ - continue; - } - goto err; - } - return mmcall(L, cont, mo, o1, o2); - } - } - } else if (tvisbool(o1) && tvisbool(o2)) { - goto trymt; - } else { - err: - lj_err_comp(L, o1, o2); - return NULL; - } -} - -/* Helper for ISTYPE and ISNUM. Implicit coercion or error. */ -void lj_meta_istype(lua_State *L, BCReg ra, BCReg tp) -{ - L->top = curr_topL(L); - ra++; tp--; - lua_assert(LJ_DUALNUM || tp != ~LJ_TNUMX); /* ISTYPE -> ISNUM broken. */ - if (LJ_DUALNUM && tp == ~LJ_TNUMX) lj_lib_checkint(L, ra); - else if (tp == ~LJ_TNUMX+1) lj_lib_checknum(L, ra); - else if (tp == ~LJ_TSTR) lj_lib_checkstr(L, ra); - else lj_err_argtype(L, ra, lj_obj_itypename[tp]); -} - -/* Helper for calls. __call metamethod. */ -void lj_meta_call(lua_State *L, TValue *func, TValue *top) -{ - cTValue *mo = lj_meta_lookup(L, func, MM_call); - TValue *p; - if (!tvisfunc(mo)) - lj_err_optype_call(L, func); - for (p = top; p > func+2*LJ_FR2; p--) copyTV(L, p, p-1); - if (LJ_FR2) copyTV(L, func+2, func); - copyTV(L, func, mo); -} - -/* Helper for FORI. Coercion. */ -void LJ_FASTCALL lj_meta_for(lua_State *L, TValue *o) -{ - if (!lj_strscan_numberobj(o)) lj_err_msg(L, LJ_ERR_FORINIT); - if (!lj_strscan_numberobj(o+1)) lj_err_msg(L, LJ_ERR_FORLIM); - if (!lj_strscan_numberobj(o+2)) lj_err_msg(L, LJ_ERR_FORSTEP); - if (LJ_DUALNUM) { - /* Ensure all slots are integers or all slots are numbers. */ - int32_t k[3]; - int nint = 0; - ptrdiff_t i; - for (i = 0; i <= 2; i++) { - if (tvisint(o+i)) { - k[i] = intV(o+i); nint++; - } else { - k[i] = lj_num2int(numV(o+i)); nint += ((lua_Number)k[i] == numV(o+i)); - } - } - if (nint == 3) { /* Narrow to integers. */ - setintV(o, k[0]); - setintV(o+1, k[1]); - setintV(o+2, k[2]); - } else if (nint != 0) { /* Widen to numbers. */ - if (tvisint(o)) setnumV(o, (lua_Number)intV(o)); - if (tvisint(o+1)) setnumV(o+1, (lua_Number)intV(o+1)); - if (tvisint(o+2)) setnumV(o+2, (lua_Number)intV(o+2)); - } - } -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_meta.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_meta.h deleted file mode 100644 index 7f71633362b..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_meta.h +++ /dev/null @@ -1,38 +0,0 @@ -/* -** Metamethod handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_META_H -#define _LJ_META_H - -#include "lj_obj.h" - -/* Metamethod handling */ -LJ_FUNC void lj_meta_init(lua_State *L); -LJ_FUNC cTValue *lj_meta_cache(GCtab *mt, MMS mm, GCstr *name); -LJ_FUNC cTValue *lj_meta_lookup(lua_State *L, cTValue *o, MMS mm); -#if LJ_HASFFI -LJ_FUNC int lj_meta_tailcall(lua_State *L, cTValue *tv); -#endif - -#define lj_meta_fastg(g, mt, mm) \ - ((mt) == NULL ? NULL : ((mt)->nomm & (1u<<(mm))) ? NULL : \ - lj_meta_cache(mt, mm, mmname_str(g, mm))) -#define lj_meta_fast(L, mt, mm) lj_meta_fastg(G(L), mt, mm) - -/* C helpers for some instructions, called from assembler VM. */ -LJ_FUNCA cTValue *lj_meta_tget(lua_State *L, cTValue *o, cTValue *k); -LJ_FUNCA TValue *lj_meta_tset(lua_State *L, cTValue *o, cTValue *k); -LJ_FUNCA TValue *lj_meta_arith(lua_State *L, TValue *ra, cTValue *rb, - cTValue *rc, BCReg op); -LJ_FUNCA TValue *lj_meta_cat(lua_State *L, TValue *top, int left); -LJ_FUNCA TValue * LJ_FASTCALL lj_meta_len(lua_State *L, cTValue *o); -LJ_FUNCA TValue *lj_meta_equal(lua_State *L, GCobj *o1, GCobj *o2, int ne); -LJ_FUNCA TValue * LJ_FASTCALL lj_meta_equal_cd(lua_State *L, BCIns ins); -LJ_FUNCA TValue *lj_meta_comp(lua_State *L, cTValue *o1, cTValue *o2, int op); -LJ_FUNCA void lj_meta_istype(lua_State *L, BCReg ra, BCReg tp); -LJ_FUNCA void lj_meta_call(lua_State *L, TValue *func, TValue *top); -LJ_FUNCA void LJ_FASTCALL lj_meta_for(lua_State *L, TValue *o); - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_obj.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_obj.c deleted file mode 100644 index b78d2c8d835..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_obj.c +++ /dev/null @@ -1,50 +0,0 @@ -/* -** Miscellaneous object handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_obj_c -#define LUA_CORE - -#include "lj_obj.h" - -/* Object type names. */ -LJ_DATADEF const char *const lj_obj_typename[] = { /* ORDER LUA_T */ - "no value", "nil", "boolean", "userdata", "number", "string", - "table", "function", "userdata", "thread", "proto", "cdata" -}; - -LJ_DATADEF const char *const lj_obj_itypename[] = { /* ORDER LJ_T */ - "nil", "boolean", "boolean", "userdata", "string", "upval", "thread", - "proto", "function", "trace", "cdata", "table", "userdata", "number" -}; - -/* Compare two objects without calling metamethods. */ -int LJ_FASTCALL lj_obj_equal(cTValue *o1, cTValue *o2) -{ - if (itype(o1) == itype(o2)) { - if (tvispri(o1)) - return 1; - if (!tvisnum(o1)) - return gcrefeq(o1->gcr, o2->gcr); - } else if (!tvisnumber(o1) || !tvisnumber(o2)) { - return 0; - } - return numberVnum(o1) == numberVnum(o2); -} - -/* Return pointer to object or its object data. */ -const void * LJ_FASTCALL lj_obj_ptr(cTValue *o) -{ - if (tvisudata(o)) - return uddata(udataV(o)); - else if (tvislightud(o)) - return lightudV(o); - else if (LJ_HASFFI && tviscdata(o)) - return cdataptr(cdataV(o)); - else if (tvisgcv(o)) - return gcV(o); - else - return NULL; -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_obj.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_obj.h deleted file mode 100644 index 74ed59bc726..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_obj.h +++ /dev/null @@ -1,976 +0,0 @@ -/* -** LuaJIT VM tags, values and objects. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -** -** Portions taken verbatim or adapted from the Lua interpreter. -** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h -*/ - -#ifndef _LJ_OBJ_H -#define _LJ_OBJ_H - -#include "lua.h" -#include "lj_def.h" -#include "lj_arch.h" - -/* -- Memory references (32 bit address space) ---------------------------- */ - -/* Memory and GC object sizes. */ -typedef uint32_t MSize; -#if LJ_GC64 -typedef uint64_t GCSize; -#else -typedef uint32_t GCSize; -#endif - -/* Memory reference */ -typedef struct MRef { -#if LJ_GC64 - uint64_t ptr64; /* True 64 bit pointer. */ -#else - uint32_t ptr32; /* Pseudo 32 bit pointer. */ -#endif -} MRef; - -#if LJ_GC64 -#define mref(r, t) ((t *)(void *)(r).ptr64) - -#define setmref(r, p) ((r).ptr64 = (uint64_t)(void *)(p)) -#define setmrefr(r, v) ((r).ptr64 = (v).ptr64) -#else -#define mref(r, t) ((t *)(void *)(uintptr_t)(r).ptr32) - -#define setmref(r, p) ((r).ptr32 = (uint32_t)(uintptr_t)(void *)(p)) -#define setmrefr(r, v) ((r).ptr32 = (v).ptr32) -#endif - -/* -- GC object references (32 bit address space) ------------------------- */ - -/* GCobj reference */ -typedef struct GCRef { -#if LJ_GC64 - uint64_t gcptr64; /* True 64 bit pointer. */ -#else - uint32_t gcptr32; /* Pseudo 32 bit pointer. */ -#endif -} GCRef; - -/* Common GC header for all collectable objects. */ -#define GCHeader GCRef nextgc; uint8_t marked; uint8_t gct -/* This occupies 6 bytes, so use the next 2 bytes for non-32 bit fields. */ - -#if LJ_GC64 -#define gcref(r) ((GCobj *)(r).gcptr64) -#define gcrefp(r, t) ((t *)(void *)(r).gcptr64) -#define gcrefu(r) ((r).gcptr64) -#define gcrefeq(r1, r2) ((r1).gcptr64 == (r2).gcptr64) - -#define setgcref(r, gc) ((r).gcptr64 = (uint64_t)&(gc)->gch) -#define setgcreft(r, gc, it) \ - (r).gcptr64 = (uint64_t)&(gc)->gch | (((uint64_t)(it)) << 47) -#define setgcrefp(r, p) ((r).gcptr64 = (uint64_t)(p)) -#define setgcrefnull(r) ((r).gcptr64 = 0) -#define setgcrefr(r, v) ((r).gcptr64 = (v).gcptr64) -#else -#define gcref(r) ((GCobj *)(uintptr_t)(r).gcptr32) -#define gcrefp(r, t) ((t *)(void *)(uintptr_t)(r).gcptr32) -#define gcrefu(r) ((r).gcptr32) -#define gcrefeq(r1, r2) ((r1).gcptr32 == (r2).gcptr32) - -#define setgcref(r, gc) ((r).gcptr32 = (uint32_t)(uintptr_t)&(gc)->gch) -#define setgcrefp(r, p) ((r).gcptr32 = (uint32_t)(uintptr_t)(p)) -#define setgcrefnull(r) ((r).gcptr32 = 0) -#define setgcrefr(r, v) ((r).gcptr32 = (v).gcptr32) -#endif - -#define gcnext(gc) (gcref((gc)->gch.nextgc)) - -/* IMPORTANT NOTE: -** -** All uses of the setgcref* macros MUST be accompanied with a write barrier. -** -** This is to ensure the integrity of the incremental GC. The invariant -** to preserve is that a black object never points to a white object. -** I.e. never store a white object into a field of a black object. -** -** It's ok to LEAVE OUT the write barrier ONLY in the following cases: -** - The source is not a GC object (NULL). -** - The target is a GC root. I.e. everything in global_State. -** - The target is a lua_State field (threads are never black). -** - The target is a stack slot, see setgcV et al. -** - The target is an open upvalue, i.e. pointing to a stack slot. -** - The target is a newly created object (i.e. marked white). But make -** sure nothing invokes the GC inbetween. -** - The target and the source are the same object (self-reference). -** - The target already contains the object (e.g. moving elements around). -** -** The most common case is a store to a stack slot. All other cases where -** a barrier has been omitted are annotated with a NOBARRIER comment. -** -** The same logic applies for stores to table slots (array part or hash -** part). ALL uses of lj_tab_set* require a barrier for the stored value -** *and* the stored key, based on the above rules. In practice this means -** a barrier is needed if *either* of the key or value are a GC object. -** -** It's ok to LEAVE OUT the write barrier in the following special cases: -** - The stored value is nil. The key doesn't matter because it's either -** not resurrected or lj_tab_newkey() will take care of the key barrier. -** - The key doesn't matter if the *previously* stored value is guaranteed -** to be non-nil (because the key is kept alive in the table). -** - The key doesn't matter if it's guaranteed not to be part of the table, -** since lj_tab_newkey() takes care of the key barrier. This applies -** trivially to new tables, but watch out for resurrected keys. Storing -** a nil value leaves the key in the table! -** -** In case of doubt use lj_gc_anybarriert() as it's rather cheap. It's used -** by the interpreter for all table stores. -** -** Note: In contrast to Lua's GC, LuaJIT's GC does *not* specially mark -** dead keys in tables. The reference is left in, but it's guaranteed to -** be never dereferenced as long as the value is nil. It's ok if the key is -** freed or if any object subsequently gets the same address. -** -** Not destroying dead keys helps to keep key hash slots stable. This avoids -** specialization back-off for HREFK when a value flips between nil and -** non-nil and the GC gets in the way. It also allows safely hoisting -** HREF/HREFK across GC steps. Dead keys are only removed if a table is -** resized (i.e. by NEWREF) and xREF must not be CSEd across a resize. -** -** The trade-off is that a write barrier for tables must take the key into -** account, too. Implicitly resurrecting the key by storing a non-nil value -** may invalidate the incremental GC invariant. -*/ - -/* -- Common type definitions --------------------------------------------- */ - -/* Types for handling bytecodes. Need this here, details in lj_bc.h. */ -typedef uint32_t BCIns; /* Bytecode instruction. */ -typedef uint32_t BCPos; /* Bytecode position. */ -typedef uint32_t BCReg; /* Bytecode register. */ -typedef int32_t BCLine; /* Bytecode line number. */ - -/* Internal assembler functions. Never call these directly from C. */ -typedef void (*ASMFunction)(void); - -/* Resizable string buffer. Need this here, details in lj_buf.h. */ -typedef struct SBuf { - MRef p; /* String buffer pointer. */ - MRef e; /* String buffer end pointer. */ - MRef b; /* String buffer base. */ - MRef L; /* lua_State, used for buffer resizing. */ -} SBuf; - -/* -- Tags and values ----------------------------------------------------- */ - -/* Frame link. */ -typedef union { - int32_t ftsz; /* Frame type and size of previous frame. */ - MRef pcr; /* Or PC for Lua frames. */ -} FrameLink; - -/* Tagged value. */ -typedef LJ_ALIGN(8) union TValue { - uint64_t u64; /* 64 bit pattern overlaps number. */ - lua_Number n; /* Number object overlaps split tag/value object. */ -#if LJ_GC64 - GCRef gcr; /* GCobj reference with tag. */ - int64_t it64; - struct { - LJ_ENDIAN_LOHI( - int32_t i; /* Integer value. */ - , uint32_t it; /* Internal object tag. Must overlap MSW of number. */ - ) - }; -#else - struct { - LJ_ENDIAN_LOHI( - union { - GCRef gcr; /* GCobj reference (if any). */ - int32_t i; /* Integer value. */ - }; - , uint32_t it; /* Internal object tag. Must overlap MSW of number. */ - ) - }; -#endif -#if LJ_FR2 - int64_t ftsz; /* Frame type and size of previous frame, or PC. */ -#else - struct { - LJ_ENDIAN_LOHI( - GCRef func; /* Function for next frame (or dummy L). */ - , FrameLink tp; /* Link to previous frame. */ - ) - } fr; -#endif - struct { - LJ_ENDIAN_LOHI( - uint32_t lo; /* Lower 32 bits of number. */ - , uint32_t hi; /* Upper 32 bits of number. */ - ) - } u32; -} TValue; - -typedef const TValue cTValue; - -#define tvref(r) (mref(r, TValue)) - -/* More external and GCobj tags for internal objects. */ -#define LAST_TT LUA_TTHREAD -#define LUA_TPROTO (LAST_TT+1) -#define LUA_TCDATA (LAST_TT+2) - -/* Internal object tags. -** -** Format for 32 bit GC references (!LJ_GC64): -** -** Internal tags overlap the MSW of a number object (must be a double). -** Interpreted as a double these are special NaNs. The FPU only generates -** one type of NaN (0xfff8_0000_0000_0000). So MSWs > 0xfff80000 are available -** for use as internal tags. Small negative numbers are used to shorten the -** encoding of type comparisons (reg/mem against sign-ext. 8 bit immediate). -** -** ---MSW---.---LSW--- -** primitive types | itype | | -** lightuserdata | itype | void * | (32 bit platforms) -** lightuserdata |ffff| void * | (64 bit platforms, 47 bit pointers) -** GC objects | itype | GCRef | -** int (LJ_DUALNUM)| itype | int | -** number -------double------ -** -** Format for 64 bit GC references (LJ_GC64): -** -** The upper 13 bits must be 1 (0xfff8...) for a special NaN. The next -** 4 bits hold the internal tag. The lowest 47 bits either hold a pointer, -** a zero-extended 32 bit integer or all bits set to 1 for primitive types. -** -** ------MSW------.------LSW------ -** primitive types |1..1|itype|1..................1| -** GC objects/lightud |1..1|itype|-------GCRef--------| -** int (LJ_DUALNUM) |1..1|itype|0..0|-----int-------| -** number ------------double------------- -** -** ORDER LJ_T -** Primitive types nil/false/true must be first, lightuserdata next. -** GC objects are at the end, table/userdata must be lowest. -** Also check lj_ir.h for similar ordering constraints. -*/ -#define LJ_TNIL (~0u) -#define LJ_TFALSE (~1u) -#define LJ_TTRUE (~2u) -#define LJ_TLIGHTUD (~3u) -#define LJ_TSTR (~4u) -#define LJ_TUPVAL (~5u) -#define LJ_TTHREAD (~6u) -#define LJ_TPROTO (~7u) -#define LJ_TFUNC (~8u) -#define LJ_TTRACE (~9u) -#define LJ_TCDATA (~10u) -#define LJ_TTAB (~11u) -#define LJ_TUDATA (~12u) -/* This is just the canonical number type used in some places. */ -#define LJ_TNUMX (~13u) - -/* Integers have itype == LJ_TISNUM doubles have itype < LJ_TISNUM */ -#if LJ_64 && !LJ_GC64 -#define LJ_TISNUM 0xfffeffffu -#else -#define LJ_TISNUM LJ_TNUMX -#endif -#define LJ_TISTRUECOND LJ_TFALSE -#define LJ_TISPRI LJ_TTRUE -#define LJ_TISGCV (LJ_TSTR+1) -#define LJ_TISTABUD LJ_TTAB - -#if LJ_GC64 -#define LJ_GCVMASK (((uint64_t)1 << 47) - 1) -#endif - -/* -- String object ------------------------------------------------------- */ - -/* String object header. String payload follows. */ -typedef struct GCstr { - GCHeader; - uint8_t reserved; /* Used by lexer for fast lookup of reserved words. */ - uint8_t unused; - MSize hash; /* Hash of string. */ - MSize len; /* Size of string. */ -} GCstr; - -#define strref(r) (&gcref((r))->str) -#define strdata(s) ((const char *)((s)+1)) -#define strdatawr(s) ((char *)((s)+1)) -#define strVdata(o) strdata(strV(o)) -#define sizestring(s) (sizeof(struct GCstr)+(s)->len+1) - -/* -- Userdata object ----------------------------------------------------- */ - -/* Userdata object. Payload follows. */ -typedef struct GCudata { - GCHeader; - uint8_t udtype; /* Userdata type. */ - uint8_t unused2; - GCRef env; /* Should be at same offset in GCfunc. */ - MSize len; /* Size of payload. */ - GCRef metatable; /* Must be at same offset in GCtab. */ - uint32_t align1; /* To force 8 byte alignment of the payload. */ -} GCudata; - -/* Userdata types. */ -enum { - UDTYPE_USERDATA, /* Regular userdata. */ - UDTYPE_IO_FILE, /* I/O library FILE. */ - UDTYPE_FFI_CLIB, /* FFI C library namespace. */ - UDTYPE__MAX -}; - -#define uddata(u) ((void *)((u)+1)) -#define sizeudata(u) (sizeof(struct GCudata)+(u)->len) - -/* -- C data object ------------------------------------------------------- */ - -/* C data object. Payload follows. */ -typedef struct GCcdata { - GCHeader; - uint16_t ctypeid; /* C type ID. */ -} GCcdata; - -/* Prepended to variable-sized or realigned C data objects. */ -typedef struct GCcdataVar { - uint16_t offset; /* Offset to allocated memory (relative to GCcdata). */ - uint16_t extra; /* Extra space allocated (incl. GCcdata + GCcdatav). */ - MSize len; /* Size of payload. */ -} GCcdataVar; - -#define cdataptr(cd) ((void *)((cd)+1)) -#define cdataisv(cd) ((cd)->marked & 0x80) -#define cdatav(cd) ((GCcdataVar *)((char *)(cd) - sizeof(GCcdataVar))) -#define cdatavlen(cd) check_exp(cdataisv(cd), cdatav(cd)->len) -#define sizecdatav(cd) (cdatavlen(cd) + cdatav(cd)->extra) -#define memcdatav(cd) ((void *)((char *)(cd) - cdatav(cd)->offset)) - -/* -- Prototype object ---------------------------------------------------- */ - -#define SCALE_NUM_GCO ((int32_t)sizeof(lua_Number)/sizeof(GCRef)) -#define round_nkgc(n) (((n) + SCALE_NUM_GCO-1) & ~(SCALE_NUM_GCO-1)) - -typedef struct GCproto { - GCHeader; - uint8_t numparams; /* Number of parameters. */ - uint8_t framesize; /* Fixed frame size. */ - MSize sizebc; /* Number of bytecode instructions. */ -#if LJ_GC64 - uint32_t unused_gc64; -#endif - GCRef gclist; - MRef k; /* Split constant array (points to the middle). */ - MRef uv; /* Upvalue list. local slot|0x8000 or parent uv idx. */ - MSize sizekgc; /* Number of collectable constants. */ - MSize sizekn; /* Number of lua_Number constants. */ - MSize sizept; /* Total size including colocated arrays. */ - uint8_t sizeuv; /* Number of upvalues. */ - uint8_t flags; /* Miscellaneous flags (see below). */ - uint16_t trace; /* Anchor for chain of root traces. */ - /* ------ The following fields are for debugging/tracebacks only ------ */ - GCRef chunkname; /* Name of the chunk this function was defined in. */ - BCLine firstline; /* First line of the function definition. */ - BCLine numline; /* Number of lines for the function definition. */ - MRef lineinfo; /* Compressed map from bytecode ins. to source line. */ - MRef uvinfo; /* Upvalue names. */ - MRef varinfo; /* Names and compressed extents of local variables. */ -} GCproto; - -/* Flags for prototype. */ -#define PROTO_CHILD 0x01 /* Has child prototypes. */ -#define PROTO_VARARG 0x02 /* Vararg function. */ -#define PROTO_FFI 0x04 /* Uses BC_KCDATA for FFI datatypes. */ -#define PROTO_NOJIT 0x08 /* JIT disabled for this function. */ -#define PROTO_ILOOP 0x10 /* Patched bytecode with ILOOP etc. */ -/* Only used during parsing. */ -#define PROTO_HAS_RETURN 0x20 /* Already emitted a return. */ -#define PROTO_FIXUP_RETURN 0x40 /* Need to fixup emitted returns. */ -/* Top bits used for counting created closures. */ -#define PROTO_CLCOUNT 0x20 /* Base of saturating 3 bit counter. */ -#define PROTO_CLC_BITS 3 -#define PROTO_CLC_POLY (3*PROTO_CLCOUNT) /* Polymorphic threshold. */ - -#define PROTO_UV_LOCAL 0x8000 /* Upvalue for local slot. */ -#define PROTO_UV_IMMUTABLE 0x4000 /* Immutable upvalue. */ - -#define proto_kgc(pt, idx) \ - check_exp((uintptr_t)(intptr_t)(idx) >= (uintptr_t)-(intptr_t)(pt)->sizekgc, \ - gcref(mref((pt)->k, GCRef)[(idx)])) -#define proto_knumtv(pt, idx) \ - check_exp((uintptr_t)(idx) < (pt)->sizekn, &mref((pt)->k, TValue)[(idx)]) -#define proto_bc(pt) ((BCIns *)((char *)(pt) + sizeof(GCproto))) -#define proto_bcpos(pt, pc) ((BCPos)((pc) - proto_bc(pt))) -#define proto_uv(pt) (mref((pt)->uv, uint16_t)) - -#define proto_chunkname(pt) (strref((pt)->chunkname)) -#define proto_chunknamestr(pt) (strdata(proto_chunkname((pt)))) -#define proto_lineinfo(pt) (mref((pt)->lineinfo, const void)) -#define proto_uvinfo(pt) (mref((pt)->uvinfo, const uint8_t)) -#define proto_varinfo(pt) (mref((pt)->varinfo, const uint8_t)) - -/* -- Upvalue object ------------------------------------------------------ */ - -typedef struct GCupval { - GCHeader; - uint8_t closed; /* Set if closed (i.e. uv->v == &uv->u.value). */ - uint8_t immutable; /* Immutable value. */ - union { - TValue tv; /* If closed: the value itself. */ - struct { /* If open: double linked list, anchored at thread. */ - GCRef prev; - GCRef next; - }; - }; - MRef v; /* Points to stack slot (open) or above (closed). */ - uint32_t dhash; /* Disambiguation hash: dh1 != dh2 => cannot alias. */ -} GCupval; - -#define uvprev(uv_) (&gcref((uv_)->prev)->uv) -#define uvnext(uv_) (&gcref((uv_)->next)->uv) -#define uvval(uv_) (mref((uv_)->v, TValue)) - -/* -- Function object (closures) ------------------------------------------ */ - -/* Common header for functions. env should be at same offset in GCudata. */ -#define GCfuncHeader \ - GCHeader; uint8_t ffid; uint8_t nupvalues; \ - GCRef env; GCRef gclist; MRef pc - -typedef struct GCfuncC { - GCfuncHeader; - lua_CFunction f; /* C function to be called. */ - TValue upvalue[1]; /* Array of upvalues (TValue). */ -} GCfuncC; - -typedef struct GCfuncL { - GCfuncHeader; - GCRef uvptr[1]; /* Array of _pointers_ to upvalue objects (GCupval). */ -} GCfuncL; - -typedef union GCfunc { - GCfuncC c; - GCfuncL l; -} GCfunc; - -#define FF_LUA 0 -#define FF_C 1 -#define isluafunc(fn) ((fn)->c.ffid == FF_LUA) -#define iscfunc(fn) ((fn)->c.ffid == FF_C) -#define isffunc(fn) ((fn)->c.ffid > FF_C) -#define funcproto(fn) \ - check_exp(isluafunc(fn), (GCproto *)(mref((fn)->l.pc, char)-sizeof(GCproto))) -#define sizeCfunc(n) (sizeof(GCfuncC)-sizeof(TValue)+sizeof(TValue)*(n)) -#define sizeLfunc(n) (sizeof(GCfuncL)-sizeof(GCRef)+sizeof(GCRef)*(n)) - -/* -- Table object -------------------------------------------------------- */ - -/* Hash node. */ -typedef struct Node { - TValue val; /* Value object. Must be first field. */ - TValue key; /* Key object. */ - MRef next; /* Hash chain. */ -#if !LJ_GC64 - MRef freetop; /* Top of free elements (stored in t->node[0]). */ -#endif -} Node; - -LJ_STATIC_ASSERT(offsetof(Node, val) == 0); - -typedef struct GCtab { - GCHeader; - uint8_t nomm; /* Negative cache for fast metamethods. */ - int8_t colo; /* Array colocation. */ - MRef array; /* Array part. */ - GCRef gclist; - GCRef metatable; /* Must be at same offset in GCudata. */ - MRef node; /* Hash part. */ - uint32_t asize; /* Size of array part (keys [0, asize-1]). */ - uint32_t hmask; /* Hash part mask (size of hash part - 1). */ -#if LJ_GC64 - MRef freetop; /* Top of free elements. */ -#endif -} GCtab; - -#define sizetabcolo(n) ((n)*sizeof(TValue) + sizeof(GCtab)) -#define tabref(r) (&gcref((r))->tab) -#define noderef(r) (mref((r), Node)) -#define nextnode(n) (mref((n)->next, Node)) -#if LJ_GC64 -#define getfreetop(t, n) (noderef((t)->freetop)) -#define setfreetop(t, n, v) (setmref((t)->freetop, (v))) -#else -#define getfreetop(t, n) (noderef((n)->freetop)) -#define setfreetop(t, n, v) (setmref((n)->freetop, (v))) -#endif - -/* -- State objects ------------------------------------------------------- */ - -/* VM states. */ -enum { - LJ_VMST_INTERP, /* Interpreter. */ - LJ_VMST_C, /* C function. */ - LJ_VMST_GC, /* Garbage collector. */ - LJ_VMST_EXIT, /* Trace exit handler. */ - LJ_VMST_RECORD, /* Trace recorder. */ - LJ_VMST_OPT, /* Optimizer. */ - LJ_VMST_ASM, /* Assembler. */ - LJ_VMST__MAX -}; - -#define setvmstate(g, st) ((g)->vmstate = ~LJ_VMST_##st) - -/* Metamethods. ORDER MM */ -#ifdef LJ_HASFFI -#define MMDEF_FFI(_) _(new) -#else -#define MMDEF_FFI(_) -#endif - -#if LJ_52 || LJ_HASFFI -#define MMDEF_PAIRS(_) _(pairs) _(ipairs) -#else -#define MMDEF_PAIRS(_) -#define MM_pairs 255 -#define MM_ipairs 255 -#endif - -#define MMDEF(_) \ - _(index) _(newindex) _(gc) _(mode) _(eq) _(len) \ - /* Only the above (fast) metamethods are negative cached (max. 8). */ \ - _(lt) _(le) _(concat) _(call) \ - /* The following must be in ORDER ARITH. */ \ - _(add) _(sub) _(mul) _(div) _(mod) _(pow) _(unm) \ - /* The following are used in the standard libraries. */ \ - _(metatable) _(tostring) MMDEF_FFI(_) MMDEF_PAIRS(_) - -typedef enum { -#define MMENUM(name) MM_##name, -MMDEF(MMENUM) -#undef MMENUM - MM__MAX, - MM____ = MM__MAX, - MM_FAST = MM_len -} MMS; - -/* GC root IDs. */ -typedef enum { - GCROOT_MMNAME, /* Metamethod names. */ - GCROOT_MMNAME_LAST = GCROOT_MMNAME + MM__MAX-1, - GCROOT_BASEMT, /* Metatables for base types. */ - GCROOT_BASEMT_NUM = GCROOT_BASEMT + ~LJ_TNUMX, - GCROOT_IO_INPUT, /* Userdata for default I/O input file. */ - GCROOT_IO_OUTPUT, /* Userdata for default I/O output file. */ - GCROOT_MAX -} GCRootID; - -#define basemt_it(g, it) ((g)->gcroot[GCROOT_BASEMT+~(it)]) -#define basemt_obj(g, o) ((g)->gcroot[GCROOT_BASEMT+itypemap(o)]) -#define mmname_str(g, mm) (strref((g)->gcroot[GCROOT_MMNAME+(mm)])) - -typedef struct GCState { - GCSize total; /* Memory currently allocated. */ - GCSize threshold; /* Memory threshold. */ - uint8_t currentwhite; /* Current white color. */ - uint8_t state; /* GC state. */ - uint8_t nocdatafin; /* No cdata finalizer called. */ - uint8_t unused2; - MSize sweepstr; /* Sweep position in string table. */ - GCRef root; /* List of all collectable objects. */ - MRef sweep; /* Sweep position in root list. */ - GCRef gray; /* List of gray objects. */ - GCRef grayagain; /* List of objects for atomic traversal. */ - GCRef weak; /* List of weak tables (to be cleared). */ - GCRef mmudata; /* List of userdata (to be finalized). */ - GCSize debt; /* Debt (how much GC is behind schedule). */ - GCSize estimate; /* Estimate of memory actually in use. */ - MSize stepmul; /* Incremental GC step granularity. */ - MSize pause; /* Pause between successive GC cycles. */ -} GCState; - -/* Global state, shared by all threads of a Lua universe. */ -typedef struct global_State { - GCRef *strhash; /* String hash table (hash chain anchors). */ - MSize strmask; /* String hash mask (size of hash table - 1). */ - MSize strnum; /* Number of strings in hash table. */ - lua_Alloc allocf; /* Memory allocator. */ - void *allocd; /* Memory allocator data. */ - GCState gc; /* Garbage collector. */ - volatile int32_t vmstate; /* VM state or current JIT code trace number. */ - SBuf tmpbuf; /* Temporary string buffer. */ - GCstr strempty; /* Empty string. */ - uint8_t stremptyz; /* Zero terminator of empty string. */ - uint8_t hookmask; /* Hook mask. */ - uint8_t dispatchmode; /* Dispatch mode. */ - uint8_t vmevmask; /* VM event mask. */ - GCRef mainthref; /* Link to main thread. */ - TValue registrytv; /* Anchor for registry. */ - TValue tmptv, tmptv2; /* Temporary TValues. */ - Node nilnode; /* Fallback 1-element hash part (nil key and value). */ - GCupval uvhead; /* Head of double-linked list of all open upvalues. */ - int32_t hookcount; /* Instruction hook countdown. */ - int32_t hookcstart; /* Start count for instruction hook counter. */ - lua_Hook hookf; /* Hook function. */ - lua_CFunction wrapf; /* Wrapper for C function calls. */ - lua_CFunction panic; /* Called as a last resort for errors. */ - BCIns bc_cfunc_int; /* Bytecode for internal C function calls. */ - BCIns bc_cfunc_ext; /* Bytecode for external C function calls. */ - GCRef cur_L; /* Currently executing lua_State. */ - MRef jit_base; /* Current JIT code L->base or NULL. */ - MRef ctype_state; /* Pointer to C type state. */ - GCRef gcroot[GCROOT_MAX]; /* GC roots. */ -} global_State; - -#define mainthread(g) (&gcref(g->mainthref)->th) -#define niltv(L) \ - check_exp(tvisnil(&G(L)->nilnode.val), &G(L)->nilnode.val) -#define niltvg(g) \ - check_exp(tvisnil(&(g)->nilnode.val), &(g)->nilnode.val) - -/* Hook management. Hook event masks are defined in lua.h. */ -#define HOOK_EVENTMASK 0x0f -#define HOOK_ACTIVE 0x10 -#define HOOK_ACTIVE_SHIFT 4 -#define HOOK_VMEVENT 0x20 -#define HOOK_GC 0x40 -#define HOOK_PROFILE 0x80 -#define hook_active(g) ((g)->hookmask & HOOK_ACTIVE) -#define hook_enter(g) ((g)->hookmask |= HOOK_ACTIVE) -#define hook_entergc(g) ((g)->hookmask |= (HOOK_ACTIVE|HOOK_GC)) -#define hook_vmevent(g) ((g)->hookmask |= (HOOK_ACTIVE|HOOK_VMEVENT)) -#define hook_leave(g) ((g)->hookmask &= ~HOOK_ACTIVE) -#define hook_save(g) ((g)->hookmask & ~HOOK_EVENTMASK) -#define hook_restore(g, h) \ - ((g)->hookmask = ((g)->hookmask & HOOK_EVENTMASK) | (h)) - -/* Per-thread state object. */ -struct lua_State { - GCHeader; - uint8_t dummy_ffid; /* Fake FF_C for curr_funcisL() on dummy frames. */ - uint8_t status; /* Thread status. */ - MRef glref; /* Link to global state. */ - GCRef gclist; /* GC chain. */ - TValue *base; /* Base of currently executing function. */ - TValue *top; /* First free slot in the stack. */ - MRef maxstack; /* Last free slot in the stack. */ - MRef stack; /* Stack base. */ - GCRef openupval; /* List of open upvalues in the stack. */ - GCRef env; /* Thread environment (table of globals). */ - void *cframe; /* End of C stack frame chain. */ - MSize stacksize; /* True stack size (incl. LJ_STACK_EXTRA). */ -}; - -#define G(L) (mref(L->glref, global_State)) -#define registry(L) (&G(L)->registrytv) - -/* Macros to access the currently executing (Lua) function. */ -#if LJ_GC64 -#define curr_func(L) (&gcval(L->base-2)->fn) -#elif LJ_FR2 -#define curr_func(L) (&gcref((L->base-2)->gcr)->fn) -#else -#define curr_func(L) (&gcref((L->base-1)->fr.func)->fn) -#endif -#define curr_funcisL(L) (isluafunc(curr_func(L))) -#define curr_proto(L) (funcproto(curr_func(L))) -#define curr_topL(L) (L->base + curr_proto(L)->framesize) -#define curr_top(L) (curr_funcisL(L) ? curr_topL(L) : L->top) - -/* -- GC object definition and conversions -------------------------------- */ - -/* GC header for generic access to common fields of GC objects. */ -typedef struct GChead { - GCHeader; - uint8_t unused1; - uint8_t unused2; - GCRef env; - GCRef gclist; - GCRef metatable; -} GChead; - -/* The env field SHOULD be at the same offset for all GC objects. */ -LJ_STATIC_ASSERT(offsetof(GChead, env) == offsetof(GCfuncL, env)); -LJ_STATIC_ASSERT(offsetof(GChead, env) == offsetof(GCudata, env)); - -/* The metatable field MUST be at the same offset for all GC objects. */ -LJ_STATIC_ASSERT(offsetof(GChead, metatable) == offsetof(GCtab, metatable)); -LJ_STATIC_ASSERT(offsetof(GChead, metatable) == offsetof(GCudata, metatable)); - -/* The gclist field MUST be at the same offset for all GC objects. */ -LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(lua_State, gclist)); -LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCproto, gclist)); -LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCfuncL, gclist)); -LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCtab, gclist)); - -typedef union GCobj { - GChead gch; - GCstr str; - GCupval uv; - lua_State th; - GCproto pt; - GCfunc fn; - GCcdata cd; - GCtab tab; - GCudata ud; -} GCobj; - -/* Macros to convert a GCobj pointer into a specific value. */ -#define gco2str(o) check_exp((o)->gch.gct == ~LJ_TSTR, &(o)->str) -#define gco2uv(o) check_exp((o)->gch.gct == ~LJ_TUPVAL, &(o)->uv) -#define gco2th(o) check_exp((o)->gch.gct == ~LJ_TTHREAD, &(o)->th) -#define gco2pt(o) check_exp((o)->gch.gct == ~LJ_TPROTO, &(o)->pt) -#define gco2func(o) check_exp((o)->gch.gct == ~LJ_TFUNC, &(o)->fn) -#define gco2cd(o) check_exp((o)->gch.gct == ~LJ_TCDATA, &(o)->cd) -#define gco2tab(o) check_exp((o)->gch.gct == ~LJ_TTAB, &(o)->tab) -#define gco2ud(o) check_exp((o)->gch.gct == ~LJ_TUDATA, &(o)->ud) - -/* Macro to convert any collectable object into a GCobj pointer. */ -#define obj2gco(v) ((GCobj *)(v)) - -/* -- TValue getters/setters ---------------------------------------------- */ - -#ifdef LUA_USE_ASSERT -#include "lj_gc.h" -#endif - -/* Macros to test types. */ -#if LJ_GC64 -#define itype(o) ((uint32_t)((o)->it64 >> 47)) -#define tvisnil(o) ((o)->it64 == -1) -#else -#define itype(o) ((o)->it) -#define tvisnil(o) (itype(o) == LJ_TNIL) -#endif -#define tvisfalse(o) (itype(o) == LJ_TFALSE) -#define tvistrue(o) (itype(o) == LJ_TTRUE) -#define tvisbool(o) (tvisfalse(o) || tvistrue(o)) -#if LJ_64 && !LJ_GC64 -#define tvislightud(o) (((int32_t)itype(o) >> 15) == -2) -#else -#define tvislightud(o) (itype(o) == LJ_TLIGHTUD) -#endif -#define tvisstr(o) (itype(o) == LJ_TSTR) -#define tvisfunc(o) (itype(o) == LJ_TFUNC) -#define tvisthread(o) (itype(o) == LJ_TTHREAD) -#define tvisproto(o) (itype(o) == LJ_TPROTO) -#define tviscdata(o) (itype(o) == LJ_TCDATA) -#define tvistab(o) (itype(o) == LJ_TTAB) -#define tvisudata(o) (itype(o) == LJ_TUDATA) -#define tvisnumber(o) (itype(o) <= LJ_TISNUM) -#define tvisint(o) (LJ_DUALNUM && itype(o) == LJ_TISNUM) -#define tvisnum(o) (itype(o) < LJ_TISNUM) - -#define tvistruecond(o) (itype(o) < LJ_TISTRUECOND) -#define tvispri(o) (itype(o) >= LJ_TISPRI) -#define tvistabud(o) (itype(o) <= LJ_TISTABUD) /* && !tvisnum() */ -#define tvisgcv(o) ((itype(o) - LJ_TISGCV) > (LJ_TNUMX - LJ_TISGCV)) - -/* Special macros to test numbers for NaN, +0, -0, +1 and raw equality. */ -#define tvisnan(o) ((o)->n != (o)->n) -#if LJ_64 -#define tviszero(o) (((o)->u64 << 1) == 0) -#else -#define tviszero(o) (((o)->u32.lo | ((o)->u32.hi << 1)) == 0) -#endif -#define tvispzero(o) ((o)->u64 == 0) -#define tvismzero(o) ((o)->u64 == U64x(80000000,00000000)) -#define tvispone(o) ((o)->u64 == U64x(3ff00000,00000000)) -#define rawnumequal(o1, o2) ((o1)->u64 == (o2)->u64) - -/* Macros to convert type ids. */ -#if LJ_64 && !LJ_GC64 -#define itypemap(o) \ - (tvisnumber(o) ? ~LJ_TNUMX : tvislightud(o) ? ~LJ_TLIGHTUD : ~itype(o)) -#else -#define itypemap(o) (tvisnumber(o) ? ~LJ_TNUMX : ~itype(o)) -#endif - -/* Macros to get tagged values. */ -#if LJ_GC64 -#define gcval(o) ((GCobj *)(gcrefu((o)->gcr) & LJ_GCVMASK)) -#else -#define gcval(o) (gcref((o)->gcr)) -#endif -#define boolV(o) check_exp(tvisbool(o), (LJ_TFALSE - itype(o))) -#if LJ_64 -#define lightudV(o) \ - check_exp(tvislightud(o), (void *)((o)->u64 & U64x(00007fff,ffffffff))) -#else -#define lightudV(o) check_exp(tvislightud(o), gcrefp((o)->gcr, void)) -#endif -#define gcV(o) check_exp(tvisgcv(o), gcval(o)) -#define strV(o) check_exp(tvisstr(o), &gcval(o)->str) -#define funcV(o) check_exp(tvisfunc(o), &gcval(o)->fn) -#define threadV(o) check_exp(tvisthread(o), &gcval(o)->th) -#define protoV(o) check_exp(tvisproto(o), &gcval(o)->pt) -#define cdataV(o) check_exp(tviscdata(o), &gcval(o)->cd) -#define tabV(o) check_exp(tvistab(o), &gcval(o)->tab) -#define udataV(o) check_exp(tvisudata(o), &gcval(o)->ud) -#define numV(o) check_exp(tvisnum(o), (o)->n) -#define intV(o) check_exp(tvisint(o), (int32_t)(o)->i) - -/* Macros to set tagged values. */ -#if LJ_GC64 -#define setitype(o, i) ((o)->it = ((i) << 15)) -#define setnilV(o) ((o)->it64 = -1) -#define setpriV(o, x) ((o)->it64 = (int64_t)~((uint64_t)~(x)<<47)) -#define setboolV(o, x) ((o)->it64 = (int64_t)~((uint64_t)((x)+1)<<47)) -#else -#define setitype(o, i) ((o)->it = (i)) -#define setnilV(o) ((o)->it = LJ_TNIL) -#define setboolV(o, x) ((o)->it = LJ_TFALSE-(uint32_t)(x)) -#define setpriV(o, i) (setitype((o), (i))) -#endif - -static LJ_AINLINE void setlightudV(TValue *o, void *p) -{ -#if LJ_GC64 - o->u64 = (uint64_t)p | (((uint64_t)LJ_TLIGHTUD) << 47); -#elif LJ_64 - o->u64 = (uint64_t)p | (((uint64_t)0xffff) << 48); -#else - setgcrefp(o->gcr, p); setitype(o, LJ_TLIGHTUD); -#endif -} - -#if LJ_64 -#define checklightudptr(L, p) \ - (((uint64_t)(p) >> 47) ? (lj_err_msg(L, LJ_ERR_BADLU), NULL) : (p)) -#else -#define checklightudptr(L, p) (p) -#endif - -#if LJ_FR2 -#define setcont(o, f) ((o)->u64 = (uint64_t)(uintptr_t)(void *)(f)) -#elif LJ_64 -#define setcont(o, f) \ - ((o)->u64 = (uint64_t)(void *)(f) - (uint64_t)lj_vm_asm_begin) -#else -#define setcont(o, f) setlightudV((o), (void *)(f)) -#endif - -#define tvchecklive(L, o) \ - UNUSED(L), lua_assert(!tvisgcv(o) || \ - ((~itype(o) == gcval(o)->gch.gct) && !isdead(G(L), gcval(o)))) - -static LJ_AINLINE void setgcVraw(TValue *o, GCobj *v, uint32_t itype) -{ -#if LJ_GC64 - setgcreft(o->gcr, v, itype); -#else - setgcref(o->gcr, v); setitype(o, itype); -#endif -} - -static LJ_AINLINE void setgcV(lua_State *L, TValue *o, GCobj *v, uint32_t it) -{ - setgcVraw(o, v, it); tvchecklive(L, o); -} - -#define define_setV(name, type, tag) \ -static LJ_AINLINE void name(lua_State *L, TValue *o, type *v) \ -{ \ - setgcV(L, o, obj2gco(v), tag); \ -} -define_setV(setstrV, GCstr, LJ_TSTR) -define_setV(setthreadV, lua_State, LJ_TTHREAD) -define_setV(setprotoV, GCproto, LJ_TPROTO) -define_setV(setfuncV, GCfunc, LJ_TFUNC) -define_setV(setcdataV, GCcdata, LJ_TCDATA) -define_setV(settabV, GCtab, LJ_TTAB) -define_setV(setudataV, GCudata, LJ_TUDATA) - -#define setnumV(o, x) ((o)->n = (x)) -#define setnanV(o) ((o)->u64 = U64x(fff80000,00000000)) -#define setpinfV(o) ((o)->u64 = U64x(7ff00000,00000000)) -#define setminfV(o) ((o)->u64 = U64x(fff00000,00000000)) - -static LJ_AINLINE void setintV(TValue *o, int32_t i) -{ -#if LJ_DUALNUM - o->i = (uint32_t)i; setitype(o, LJ_TISNUM); -#else - o->n = (lua_Number)i; -#endif -} - -static LJ_AINLINE void setint64V(TValue *o, int64_t i) -{ - if (LJ_DUALNUM && LJ_LIKELY(i == (int64_t)(int32_t)i)) - setintV(o, (int32_t)i); - else - setnumV(o, (lua_Number)i); -} - -#if LJ_64 -#define setintptrV(o, i) setint64V((o), (i)) -#else -#define setintptrV(o, i) setintV((o), (i)) -#endif - -/* Copy tagged values. */ -static LJ_AINLINE void copyTV(lua_State *L, TValue *o1, const TValue *o2) -{ - *o1 = *o2; tvchecklive(L, o1); -} - -/* -- Number to integer conversion ---------------------------------------- */ - -#if LJ_SOFTFP -LJ_ASMF int32_t lj_vm_tobit(double x); -#endif - -static LJ_AINLINE int32_t lj_num2bit(lua_Number n) -{ -#if LJ_SOFTFP - return lj_vm_tobit(n); -#else - TValue o; - o.n = n + 6755399441055744.0; /* 2^52 + 2^51 */ - return (int32_t)o.u32.lo; -#endif -} - -#define lj_num2int(n) ((int32_t)(n)) - -static LJ_AINLINE uint64_t lj_num2u64(lua_Number n) -{ -#ifdef _MSC_VER - if (n >= 9223372036854775808.0) /* They think it's a feature. */ - return (uint64_t)(int64_t)(n - 18446744073709551616.0); - else -#endif - return (uint64_t)n; -} - -static LJ_AINLINE int32_t numberVint(cTValue *o) -{ - if (LJ_LIKELY(tvisint(o))) - return intV(o); - else - return lj_num2int(numV(o)); -} - -static LJ_AINLINE lua_Number numberVnum(cTValue *o) -{ - if (LJ_UNLIKELY(tvisint(o))) - return (lua_Number)intV(o); - else - return numV(o); -} - -/* -- Miscellaneous object handling --------------------------------------- */ - -/* Names and maps for internal and external object tags. */ -LJ_DATA const char *const lj_obj_typename[1+LUA_TCDATA+1]; -LJ_DATA const char *const lj_obj_itypename[~LJ_TNUMX+1]; - -#define lj_typename(o) (lj_obj_itypename[itypemap(o)]) - -/* Compare two objects without calling metamethods. */ -LJ_FUNC int LJ_FASTCALL lj_obj_equal(cTValue *o1, cTValue *o2); -LJ_FUNC const void * LJ_FASTCALL lj_obj_ptr(cTValue *o); - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_dce.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_dce.c deleted file mode 100644 index 7f1faafe647..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_dce.c +++ /dev/null @@ -1,78 +0,0 @@ -/* -** DCE: Dead Code Elimination. Pre-LOOP only -- ASM already performs DCE. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_opt_dce_c -#define LUA_CORE - -#include "lj_obj.h" - -#if LJ_HASJIT - -#include "lj_ir.h" -#include "lj_jit.h" -#include "lj_iropt.h" - -/* Some local macros to save typing. Undef'd at the end. */ -#define IR(ref) (&J->cur.ir[(ref)]) - -/* Scan through all snapshots and mark all referenced instructions. */ -static void dce_marksnap(jit_State *J) -{ - SnapNo i, nsnap = J->cur.nsnap; - for (i = 0; i < nsnap; i++) { - SnapShot *snap = &J->cur.snap[i]; - SnapEntry *map = &J->cur.snapmap[snap->mapofs]; - MSize n, nent = snap->nent; - for (n = 0; n < nent; n++) { - IRRef ref = snap_ref(map[n]); - if (ref >= REF_FIRST) - irt_setmark(IR(ref)->t); - } - } -} - -/* Backwards propagate marks. Replace unused instructions with NOPs. */ -static void dce_propagate(jit_State *J) -{ - IRRef1 *pchain[IR__MAX]; - IRRef ins; - uint32_t i; - for (i = 0; i < IR__MAX; i++) pchain[i] = &J->chain[i]; - for (ins = J->cur.nins-1; ins >= REF_FIRST; ins--) { - IRIns *ir = IR(ins); - if (irt_ismarked(ir->t)) { - irt_clearmark(ir->t); - pchain[ir->o] = &ir->prev; - } else if (!ir_sideeff(ir)) { - *pchain[ir->o] = ir->prev; /* Reroute original instruction chain. */ - ir->t.irt = IRT_NIL; - ir->o = IR_NOP; /* Replace instruction with NOP. */ - ir->op1 = ir->op2 = 0; - ir->prev = 0; - continue; - } - if (ir->op1 >= REF_FIRST) irt_setmark(IR(ir->op1)->t); - if (ir->op2 >= REF_FIRST) irt_setmark(IR(ir->op2)->t); - } -} - -/* Dead Code Elimination. -** -** First backpropagate marks for all used instructions. Then replace -** the unused ones with a NOP. Note that compressing the IR to eliminate -** the NOPs does not pay off. -*/ -void lj_opt_dce(jit_State *J) -{ - if ((J->flags & JIT_F_OPT_DCE)) { - dce_marksnap(J); - dce_propagate(J); - memset(J->bpropcache, 0, sizeof(J->bpropcache)); /* Invalidate cache. */ - } -} - -#undef IR - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_fold.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_fold.c deleted file mode 100644 index f809a991a73..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_fold.c +++ /dev/null @@ -1,2488 +0,0 @@ -/* -** FOLD: Constant Folding, Algebraic Simplifications and Reassociation. -** ABCelim: Array Bounds Check Elimination. -** CSE: Common-Subexpression Elimination. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_opt_fold_c -#define LUA_CORE - -#include - -#include "lj_obj.h" - -#if LJ_HASJIT - -#include "lj_buf.h" -#include "lj_str.h" -#include "lj_tab.h" -#include "lj_ir.h" -#include "lj_jit.h" -#include "lj_ircall.h" -#include "lj_iropt.h" -#include "lj_trace.h" -#if LJ_HASFFI -#include "lj_ctype.h" -#include "lj_carith.h" -#endif -#include "lj_vm.h" -#include "lj_strscan.h" -#include "lj_strfmt.h" - -/* Here's a short description how the FOLD engine processes instructions: -** -** The FOLD engine receives a single instruction stored in fins (J->fold.ins). -** The instruction and its operands are used to select matching fold rules. -** These are applied iteratively until a fixed point is reached. -** -** The 8 bit opcode of the instruction itself plus the opcodes of the -** two instructions referenced by its operands form a 24 bit key -** 'ins left right' (unused operands -> 0, literals -> lowest 8 bits). -** -** This key is used for partial matching against the fold rules. The -** left/right operand fields of the key are successively masked with -** the 'any' wildcard, from most specific to least specific: -** -** ins left right -** ins any right -** ins left any -** ins any any -** -** The masked key is used to lookup a matching fold rule in a semi-perfect -** hash table. If a matching rule is found, the related fold function is run. -** Multiple rules can share the same fold function. A fold rule may return -** one of several special values: -** -** - NEXTFOLD means no folding was applied, because an additional test -** inside the fold function failed. Matching continues against less -** specific fold rules. Finally the instruction is passed on to CSE. -** -** - RETRYFOLD means the instruction was modified in-place. Folding is -** retried as if this instruction had just been received. -** -** All other return values are terminal actions -- no further folding is -** applied: -** -** - INTFOLD(i) returns a reference to the integer constant i. -** -** - LEFTFOLD and RIGHTFOLD return the left/right operand reference -** without emitting an instruction. -** -** - CSEFOLD and EMITFOLD pass the instruction directly to CSE or emit -** it without passing through any further optimizations. -** -** - FAILFOLD, DROPFOLD and CONDFOLD only apply to instructions which have -** no result (e.g. guarded assertions): FAILFOLD means the guard would -** always fail, i.e. the current trace is pointless. DROPFOLD means -** the guard is always true and has been eliminated. CONDFOLD is a -** shortcut for FAILFOLD + cond (i.e. drop if true, otherwise fail). -** -** - Any other return value is interpreted as an IRRef or TRef. This -** can be a reference to an existing or a newly created instruction. -** Only the least-significant 16 bits (IRRef1) are used to form a TRef -** which is finally returned to the caller. -** -** The FOLD engine receives instructions both from the trace recorder and -** substituted instructions from LOOP unrolling. This means all types -** of instructions may end up here, even though the recorder bypasses -** FOLD in some cases. Thus all loads, stores and allocations must have -** an any/any rule to avoid being passed on to CSE. -** -** Carefully read the following requirements before adding or modifying -** any fold rules: -** -** Requirement #1: All fold rules must preserve their destination type. -** -** Consistently use INTFOLD() (KINT result) or lj_ir_knum() (KNUM result). -** Never use lj_ir_knumint() which can have either a KINT or KNUM result. -** -** Requirement #2: Fold rules should not create *new* instructions which -** reference operands *across* PHIs. -** -** E.g. a RETRYFOLD with 'fins->op1 = fleft->op1' is invalid if the -** left operand is a PHI. Then fleft->op1 would point across the PHI -** frontier to an invariant instruction. Adding a PHI for this instruction -** would be counterproductive. The solution is to add a barrier which -** prevents folding across PHIs, i.e. 'PHIBARRIER(fleft)' in this case. -** The only exception is for recurrences with high latencies like -** repeated int->num->int conversions. -** -** One could relax this condition a bit if the referenced instruction is -** a PHI, too. But this often leads to worse code due to excessive -** register shuffling. -** -** Note: returning *existing* instructions (e.g. LEFTFOLD) is ok, though. -** Even returning fleft->op1 would be ok, because a new PHI will added, -** if needed. But again, this leads to excessive register shuffling and -** should be avoided. -** -** Requirement #3: The set of all fold rules must be monotonic to guarantee -** termination. -** -** The goal is optimization, so one primarily wants to add strength-reducing -** rules. This means eliminating an instruction or replacing an instruction -** with one or more simpler instructions. Don't add fold rules which point -** into the other direction. -** -** Some rules (like commutativity) do not directly reduce the strength of -** an instruction, but enable other fold rules (e.g. by moving constants -** to the right operand). These rules must be made unidirectional to avoid -** cycles. -** -** Rule of thumb: the trace recorder expands the IR and FOLD shrinks it. -*/ - -/* Some local macros to save typing. Undef'd at the end. */ -#define IR(ref) (&J->cur.ir[(ref)]) -#define fins (&J->fold.ins) -#define fleft (&J->fold.left) -#define fright (&J->fold.right) -#define knumleft (ir_knum(fleft)->n) -#define knumright (ir_knum(fright)->n) - -/* Pass IR on to next optimization in chain (FOLD). */ -#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) - -/* Fold function type. Fastcall on x86 significantly reduces their size. */ -typedef IRRef (LJ_FASTCALL *FoldFunc)(jit_State *J); - -/* Macros for the fold specs, so buildvm can recognize them. */ -#define LJFOLD(x) -#define LJFOLDX(x) -#define LJFOLDF(name) static TRef LJ_FASTCALL fold_##name(jit_State *J) -/* Note: They must be at the start of a line or buildvm ignores them! */ - -/* Barrier to prevent using operands across PHIs. */ -#define PHIBARRIER(ir) if (irt_isphi((ir)->t)) return NEXTFOLD - -/* Barrier to prevent folding across a GC step. -** GC steps can only happen at the head of a trace and at LOOP. -** And the GC is only driven forward if there's at least one allocation. -*/ -#define gcstep_barrier(J, ref) \ - ((ref) < J->chain[IR_LOOP] && \ - (J->chain[IR_SNEW] || J->chain[IR_XSNEW] || \ - J->chain[IR_TNEW] || J->chain[IR_TDUP] || \ - J->chain[IR_CNEW] || J->chain[IR_CNEWI] || \ - J->chain[IR_BUFSTR] || J->chain[IR_TOSTR] || J->chain[IR_CALLA])) - -/* -- Constant folding for FP numbers ------------------------------------- */ - -LJFOLD(ADD KNUM KNUM) -LJFOLD(SUB KNUM KNUM) -LJFOLD(MUL KNUM KNUM) -LJFOLD(DIV KNUM KNUM) -LJFOLD(NEG KNUM KNUM) -LJFOLD(ABS KNUM KNUM) -LJFOLD(ATAN2 KNUM KNUM) -LJFOLD(LDEXP KNUM KNUM) -LJFOLD(MIN KNUM KNUM) -LJFOLD(MAX KNUM KNUM) -LJFOLDF(kfold_numarith) -{ - lua_Number a = knumleft; - lua_Number b = knumright; - lua_Number y = lj_vm_foldarith(a, b, fins->o - IR_ADD); - return lj_ir_knum(J, y); -} - -LJFOLD(LDEXP KNUM KINT) -LJFOLDF(kfold_ldexp) -{ -#if LJ_TARGET_X86ORX64 - UNUSED(J); - return NEXTFOLD; -#else - return lj_ir_knum(J, ldexp(knumleft, fright->i)); -#endif -} - -LJFOLD(FPMATH KNUM any) -LJFOLDF(kfold_fpmath) -{ - lua_Number a = knumleft; - lua_Number y = lj_vm_foldfpm(a, fins->op2); - return lj_ir_knum(J, y); -} - -LJFOLD(POW KNUM KINT) -LJFOLDF(kfold_numpow) -{ - lua_Number a = knumleft; - lua_Number b = (lua_Number)fright->i; - lua_Number y = lj_vm_foldarith(a, b, IR_POW - IR_ADD); - return lj_ir_knum(J, y); -} - -/* Must not use kfold_kref for numbers (could be NaN). */ -LJFOLD(EQ KNUM KNUM) -LJFOLD(NE KNUM KNUM) -LJFOLD(LT KNUM KNUM) -LJFOLD(GE KNUM KNUM) -LJFOLD(LE KNUM KNUM) -LJFOLD(GT KNUM KNUM) -LJFOLD(ULT KNUM KNUM) -LJFOLD(UGE KNUM KNUM) -LJFOLD(ULE KNUM KNUM) -LJFOLD(UGT KNUM KNUM) -LJFOLDF(kfold_numcomp) -{ - return CONDFOLD(lj_ir_numcmp(knumleft, knumright, (IROp)fins->o)); -} - -/* -- Constant folding for 32 bit integers -------------------------------- */ - -static int32_t kfold_intop(int32_t k1, int32_t k2, IROp op) -{ - switch (op) { - case IR_ADD: k1 += k2; break; - case IR_SUB: k1 -= k2; break; - case IR_MUL: k1 *= k2; break; - case IR_MOD: k1 = lj_vm_modi(k1, k2); break; - case IR_NEG: k1 = -k1; break; - case IR_BAND: k1 &= k2; break; - case IR_BOR: k1 |= k2; break; - case IR_BXOR: k1 ^= k2; break; - case IR_BSHL: k1 <<= (k2 & 31); break; - case IR_BSHR: k1 = (int32_t)((uint32_t)k1 >> (k2 & 31)); break; - case IR_BSAR: k1 >>= (k2 & 31); break; - case IR_BROL: k1 = (int32_t)lj_rol((uint32_t)k1, (k2 & 31)); break; - case IR_BROR: k1 = (int32_t)lj_ror((uint32_t)k1, (k2 & 31)); break; - case IR_MIN: k1 = k1 < k2 ? k1 : k2; break; - case IR_MAX: k1 = k1 > k2 ? k1 : k2; break; - default: lua_assert(0); break; - } - return k1; -} - -LJFOLD(ADD KINT KINT) -LJFOLD(SUB KINT KINT) -LJFOLD(MUL KINT KINT) -LJFOLD(MOD KINT KINT) -LJFOLD(NEG KINT KINT) -LJFOLD(BAND KINT KINT) -LJFOLD(BOR KINT KINT) -LJFOLD(BXOR KINT KINT) -LJFOLD(BSHL KINT KINT) -LJFOLD(BSHR KINT KINT) -LJFOLD(BSAR KINT KINT) -LJFOLD(BROL KINT KINT) -LJFOLD(BROR KINT KINT) -LJFOLD(MIN KINT KINT) -LJFOLD(MAX KINT KINT) -LJFOLDF(kfold_intarith) -{ - return INTFOLD(kfold_intop(fleft->i, fright->i, (IROp)fins->o)); -} - -LJFOLD(ADDOV KINT KINT) -LJFOLD(SUBOV KINT KINT) -LJFOLD(MULOV KINT KINT) -LJFOLDF(kfold_intovarith) -{ - lua_Number n = lj_vm_foldarith((lua_Number)fleft->i, (lua_Number)fright->i, - fins->o - IR_ADDOV); - int32_t k = lj_num2int(n); - if (n != (lua_Number)k) - return FAILFOLD; - return INTFOLD(k); -} - -LJFOLD(BNOT KINT) -LJFOLDF(kfold_bnot) -{ - return INTFOLD(~fleft->i); -} - -LJFOLD(BSWAP KINT) -LJFOLDF(kfold_bswap) -{ - return INTFOLD((int32_t)lj_bswap((uint32_t)fleft->i)); -} - -LJFOLD(LT KINT KINT) -LJFOLD(GE KINT KINT) -LJFOLD(LE KINT KINT) -LJFOLD(GT KINT KINT) -LJFOLD(ULT KINT KINT) -LJFOLD(UGE KINT KINT) -LJFOLD(ULE KINT KINT) -LJFOLD(UGT KINT KINT) -LJFOLD(ABC KINT KINT) -LJFOLDF(kfold_intcomp) -{ - int32_t a = fleft->i, b = fright->i; - switch ((IROp)fins->o) { - case IR_LT: return CONDFOLD(a < b); - case IR_GE: return CONDFOLD(a >= b); - case IR_LE: return CONDFOLD(a <= b); - case IR_GT: return CONDFOLD(a > b); - case IR_ULT: return CONDFOLD((uint32_t)a < (uint32_t)b); - case IR_UGE: return CONDFOLD((uint32_t)a >= (uint32_t)b); - case IR_ULE: return CONDFOLD((uint32_t)a <= (uint32_t)b); - case IR_ABC: - case IR_UGT: return CONDFOLD((uint32_t)a > (uint32_t)b); - default: lua_assert(0); return FAILFOLD; - } -} - -LJFOLD(UGE any KINT) -LJFOLDF(kfold_intcomp0) -{ - if (fright->i == 0) - return DROPFOLD; - return NEXTFOLD; -} - -/* -- Constant folding for 64 bit integers -------------------------------- */ - -static uint64_t kfold_int64arith(uint64_t k1, uint64_t k2, IROp op) -{ - switch (op) { -#if LJ_HASFFI - case IR_ADD: k1 += k2; break; - case IR_SUB: k1 -= k2; break; - case IR_MUL: k1 *= k2; break; - case IR_BAND: k1 &= k2; break; - case IR_BOR: k1 |= k2; break; - case IR_BXOR: k1 ^= k2; break; -#endif - default: UNUSED(k2); lua_assert(0); break; - } - return k1; -} - -LJFOLD(ADD KINT64 KINT64) -LJFOLD(SUB KINT64 KINT64) -LJFOLD(MUL KINT64 KINT64) -LJFOLD(BAND KINT64 KINT64) -LJFOLD(BOR KINT64 KINT64) -LJFOLD(BXOR KINT64 KINT64) -LJFOLDF(kfold_int64arith) -{ - return INT64FOLD(kfold_int64arith(ir_k64(fleft)->u64, - ir_k64(fright)->u64, (IROp)fins->o)); -} - -LJFOLD(DIV KINT64 KINT64) -LJFOLD(MOD KINT64 KINT64) -LJFOLD(POW KINT64 KINT64) -LJFOLDF(kfold_int64arith2) -{ -#if LJ_HASFFI - uint64_t k1 = ir_k64(fleft)->u64, k2 = ir_k64(fright)->u64; - if (irt_isi64(fins->t)) { - k1 = fins->o == IR_DIV ? lj_carith_divi64((int64_t)k1, (int64_t)k2) : - fins->o == IR_MOD ? lj_carith_modi64((int64_t)k1, (int64_t)k2) : - lj_carith_powi64((int64_t)k1, (int64_t)k2); - } else { - k1 = fins->o == IR_DIV ? lj_carith_divu64(k1, k2) : - fins->o == IR_MOD ? lj_carith_modu64(k1, k2) : - lj_carith_powu64(k1, k2); - } - return INT64FOLD(k1); -#else - UNUSED(J); lua_assert(0); return FAILFOLD; -#endif -} - -LJFOLD(BSHL KINT64 KINT) -LJFOLD(BSHR KINT64 KINT) -LJFOLD(BSAR KINT64 KINT) -LJFOLD(BROL KINT64 KINT) -LJFOLD(BROR KINT64 KINT) -LJFOLDF(kfold_int64shift) -{ -#if LJ_HASFFI - uint64_t k = ir_k64(fleft)->u64; - int32_t sh = (fright->i & 63); - return INT64FOLD(lj_carith_shift64(k, sh, fins->o - IR_BSHL)); -#else - UNUSED(J); lua_assert(0); return FAILFOLD; -#endif -} - -LJFOLD(BNOT KINT64) -LJFOLDF(kfold_bnot64) -{ -#if LJ_HASFFI - return INT64FOLD(~ir_k64(fleft)->u64); -#else - UNUSED(J); lua_assert(0); return FAILFOLD; -#endif -} - -LJFOLD(BSWAP KINT64) -LJFOLDF(kfold_bswap64) -{ -#if LJ_HASFFI - return INT64FOLD(lj_bswap64(ir_k64(fleft)->u64)); -#else - UNUSED(J); lua_assert(0); return FAILFOLD; -#endif -} - -LJFOLD(LT KINT64 KINT64) -LJFOLD(GE KINT64 KINT64) -LJFOLD(LE KINT64 KINT64) -LJFOLD(GT KINT64 KINT64) -LJFOLD(ULT KINT64 KINT64) -LJFOLD(UGE KINT64 KINT64) -LJFOLD(ULE KINT64 KINT64) -LJFOLD(UGT KINT64 KINT64) -LJFOLDF(kfold_int64comp) -{ -#if LJ_HASFFI - uint64_t a = ir_k64(fleft)->u64, b = ir_k64(fright)->u64; - switch ((IROp)fins->o) { - case IR_LT: return CONDFOLD(a < b); - case IR_GE: return CONDFOLD(a >= b); - case IR_LE: return CONDFOLD(a <= b); - case IR_GT: return CONDFOLD(a > b); - case IR_ULT: return CONDFOLD((uint64_t)a < (uint64_t)b); - case IR_UGE: return CONDFOLD((uint64_t)a >= (uint64_t)b); - case IR_ULE: return CONDFOLD((uint64_t)a <= (uint64_t)b); - case IR_UGT: return CONDFOLD((uint64_t)a > (uint64_t)b); - default: lua_assert(0); return FAILFOLD; - } -#else - UNUSED(J); lua_assert(0); return FAILFOLD; -#endif -} - -LJFOLD(UGE any KINT64) -LJFOLDF(kfold_int64comp0) -{ -#if LJ_HASFFI - if (ir_k64(fright)->u64 == 0) - return DROPFOLD; - return NEXTFOLD; -#else - UNUSED(J); lua_assert(0); return FAILFOLD; -#endif -} - -/* -- Constant folding for strings ---------------------------------------- */ - -LJFOLD(SNEW KKPTR KINT) -LJFOLDF(kfold_snew_kptr) -{ - GCstr *s = lj_str_new(J->L, (const char *)ir_kptr(fleft), (size_t)fright->i); - return lj_ir_kstr(J, s); -} - -LJFOLD(SNEW any KINT) -LJFOLDF(kfold_snew_empty) -{ - if (fright->i == 0) - return lj_ir_kstr(J, &J2G(J)->strempty); - return NEXTFOLD; -} - -LJFOLD(STRREF KGC KINT) -LJFOLDF(kfold_strref) -{ - GCstr *str = ir_kstr(fleft); - lua_assert((MSize)fright->i <= str->len); - return lj_ir_kkptr(J, (char *)strdata(str) + fright->i); -} - -LJFOLD(STRREF SNEW any) -LJFOLDF(kfold_strref_snew) -{ - PHIBARRIER(fleft); - if (irref_isk(fins->op2) && fright->i == 0) { - return fleft->op1; /* strref(snew(ptr, len), 0) ==> ptr */ - } else { - /* Reassociate: strref(snew(strref(str, a), len), b) ==> strref(str, a+b) */ - IRIns *ir = IR(fleft->op1); - if (ir->o == IR_STRREF) { - IRRef1 str = ir->op1; /* IRIns * is not valid across emitir. */ - PHIBARRIER(ir); - fins->op2 = emitir(IRTI(IR_ADD), ir->op2, fins->op2); /* Clobbers fins! */ - fins->op1 = str; - fins->ot = IRT(IR_STRREF, IRT_P32); - return RETRYFOLD; - } - } - return NEXTFOLD; -} - -LJFOLD(CALLN CARG IRCALL_lj_str_cmp) -LJFOLDF(kfold_strcmp) -{ - if (irref_isk(fleft->op1) && irref_isk(fleft->op2)) { - GCstr *a = ir_kstr(IR(fleft->op1)); - GCstr *b = ir_kstr(IR(fleft->op2)); - return INTFOLD(lj_str_cmp(a, b)); - } - return NEXTFOLD; -} - -/* -- Constant folding and forwarding for buffers ------------------------- */ - -/* -** Buffer ops perform stores, but their effect is limited to the buffer -** itself. Also, buffer ops are chained: a use of an op implies a use of -** all other ops up the chain. Conversely, if an op is unused, all ops -** up the chain can go unsed. This largely eliminates the need to treat -** them as stores. -** -** Alas, treating them as normal (IRM_N) ops doesn't work, because they -** cannot be CSEd in isolation. CSE for IRM_N is implicitly done in LOOP -** or if FOLD is disabled. -** -** The compromise is to declare them as loads, emit them like stores and -** CSE whole chains manually when the BUFSTR is to be emitted. Any chain -** fragments left over from CSE are eliminated by DCE. -*/ - -/* BUFHDR is emitted like a store, see below. */ - -LJFOLD(BUFPUT BUFHDR BUFSTR) -LJFOLDF(bufput_append) -{ - /* New buffer, no other buffer op inbetween and same buffer? */ - if ((J->flags & JIT_F_OPT_FWD) && - !(fleft->op2 & IRBUFHDR_APPEND) && - fleft->prev == fright->op2 && - fleft->op1 == IR(fright->op2)->op1) { - IRRef ref = fins->op1; - IR(ref)->op2 = (fleft->op2 | IRBUFHDR_APPEND); /* Modify BUFHDR. */ - IR(ref)->op1 = fright->op1; - return ref; - } - return EMITFOLD; /* Always emit, CSE later. */ -} - -LJFOLD(BUFPUT any any) -LJFOLDF(bufput_kgc) -{ - if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && fright->o == IR_KGC) { - GCstr *s2 = ir_kstr(fright); - if (s2->len == 0) { /* Empty string? */ - return LEFTFOLD; - } else { - if (fleft->o == IR_BUFPUT && irref_isk(fleft->op2) && - !irt_isphi(fleft->t)) { /* Join two constant string puts in a row. */ - GCstr *s1 = ir_kstr(IR(fleft->op2)); - IRRef kref = lj_ir_kstr(J, lj_buf_cat2str(J->L, s1, s2)); - /* lj_ir_kstr() may realloc the IR and invalidates any IRIns *. */ - IR(fins->op1)->op2 = kref; /* Modify previous BUFPUT. */ - return fins->op1; - } - } - } - return EMITFOLD; /* Always emit, CSE later. */ -} - -LJFOLD(BUFSTR any any) -LJFOLDF(bufstr_kfold_cse) -{ - lua_assert(fleft->o == IR_BUFHDR || fleft->o == IR_BUFPUT || - fleft->o == IR_CALLL); - if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) { - if (fleft->o == IR_BUFHDR) { /* No put operations? */ - if (!(fleft->op2 & IRBUFHDR_APPEND)) /* Empty buffer? */ - return lj_ir_kstr(J, &J2G(J)->strempty); - fins->op1 = fleft->op1; - fins->op2 = fleft->prev; /* Relies on checks in bufput_append. */ - return CSEFOLD; - } else if (fleft->o == IR_BUFPUT) { - IRIns *irb = IR(fleft->op1); - if (irb->o == IR_BUFHDR && !(irb->op2 & IRBUFHDR_APPEND)) - return fleft->op2; /* Shortcut for a single put operation. */ - } - } - /* Try to CSE the whole chain. */ - if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) { - IRRef ref = J->chain[IR_BUFSTR]; - while (ref) { - IRIns *irs = IR(ref), *ira = fleft, *irb = IR(irs->op1); - while (ira->o == irb->o && ira->op2 == irb->op2) { - lua_assert(ira->o == IR_BUFHDR || ira->o == IR_BUFPUT || - ira->o == IR_CALLL || ira->o == IR_CARG); - if (ira->o == IR_BUFHDR && !(ira->op2 & IRBUFHDR_APPEND)) - return ref; /* CSE succeeded. */ - if (ira->o == IR_CALLL && ira->op2 == IRCALL_lj_buf_puttab) - break; - ira = IR(ira->op1); - irb = IR(irb->op1); - } - ref = irs->prev; - } - } - return EMITFOLD; /* No CSE possible. */ -} - -LJFOLD(CALLL CARG IRCALL_lj_buf_putstr_reverse) -LJFOLD(CALLL CARG IRCALL_lj_buf_putstr_upper) -LJFOLD(CALLL CARG IRCALL_lj_buf_putstr_lower) -LJFOLD(CALLL CARG IRCALL_lj_strfmt_putquoted) -LJFOLDF(bufput_kfold_op) -{ - if (irref_isk(fleft->op2)) { - const CCallInfo *ci = &lj_ir_callinfo[fins->op2]; - SBuf *sb = lj_buf_tmp_(J->L); - sb = ((SBuf * (LJ_FASTCALL *)(SBuf *, GCstr *))ci->func)(sb, - ir_kstr(IR(fleft->op2))); - fins->o = IR_BUFPUT; - fins->op1 = fleft->op1; - fins->op2 = lj_ir_kstr(J, lj_buf_tostr(sb)); - return RETRYFOLD; - } - return EMITFOLD; /* Always emit, CSE later. */ -} - -LJFOLD(CALLL CARG IRCALL_lj_buf_putstr_rep) -LJFOLDF(bufput_kfold_rep) -{ - if (irref_isk(fleft->op2)) { - IRIns *irc = IR(fleft->op1); - if (irref_isk(irc->op2)) { - SBuf *sb = lj_buf_tmp_(J->L); - sb = lj_buf_putstr_rep(sb, ir_kstr(IR(irc->op2)), IR(fleft->op2)->i); - fins->o = IR_BUFPUT; - fins->op1 = irc->op1; - fins->op2 = lj_ir_kstr(J, lj_buf_tostr(sb)); - return RETRYFOLD; - } - } - return EMITFOLD; /* Always emit, CSE later. */ -} - -LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfxint) -LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfnum_int) -LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfnum_uint) -LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfnum) -LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfstr) -LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfchar) -LJFOLDF(bufput_kfold_fmt) -{ - IRIns *irc = IR(fleft->op1); - lua_assert(irref_isk(irc->op2)); /* SFormat must be const. */ - if (irref_isk(fleft->op2)) { - SFormat sf = (SFormat)IR(irc->op2)->i; - IRIns *ira = IR(fleft->op2); - SBuf *sb = lj_buf_tmp_(J->L); - switch (fins->op2) { - case IRCALL_lj_strfmt_putfxint: - sb = lj_strfmt_putfxint(sb, sf, ir_k64(ira)->u64); - break; - case IRCALL_lj_strfmt_putfstr: - sb = lj_strfmt_putfstr(sb, sf, ir_kstr(ira)); - break; - case IRCALL_lj_strfmt_putfchar: - sb = lj_strfmt_putfchar(sb, sf, ira->i); - break; - case IRCALL_lj_strfmt_putfnum_int: - case IRCALL_lj_strfmt_putfnum_uint: - case IRCALL_lj_strfmt_putfnum: - default: { - const CCallInfo *ci = &lj_ir_callinfo[fins->op2]; - sb = ((SBuf * (*)(SBuf *, SFormat, lua_Number))ci->func)(sb, sf, - ir_knum(ira)->n); - break; - } - } - fins->o = IR_BUFPUT; - fins->op1 = irc->op1; - fins->op2 = lj_ir_kstr(J, lj_buf_tostr(sb)); - return RETRYFOLD; - } - return EMITFOLD; /* Always emit, CSE later. */ -} - -/* -- Constant folding of pointer arithmetic ------------------------------ */ - -LJFOLD(ADD KGC KINT) -LJFOLD(ADD KGC KINT64) -LJFOLDF(kfold_add_kgc) -{ - GCobj *o = ir_kgc(fleft); -#if LJ_64 - ptrdiff_t ofs = (ptrdiff_t)ir_kint64(fright)->u64; -#else - ptrdiff_t ofs = fright->i; -#endif -#if LJ_HASFFI - if (irt_iscdata(fleft->t)) { - CType *ct = ctype_raw(ctype_ctsG(J2G(J)), gco2cd(o)->ctypeid); - if (ctype_isnum(ct->info) || ctype_isenum(ct->info) || - ctype_isptr(ct->info) || ctype_isfunc(ct->info) || - ctype_iscomplex(ct->info) || ctype_isvector(ct->info)) - return lj_ir_kkptr(J, (char *)o + ofs); - } -#endif - return lj_ir_kptr(J, (char *)o + ofs); -} - -LJFOLD(ADD KPTR KINT) -LJFOLD(ADD KPTR KINT64) -LJFOLD(ADD KKPTR KINT) -LJFOLD(ADD KKPTR KINT64) -LJFOLDF(kfold_add_kptr) -{ - void *p = ir_kptr(fleft); -#if LJ_64 - ptrdiff_t ofs = (ptrdiff_t)ir_kint64(fright)->u64; -#else - ptrdiff_t ofs = fright->i; -#endif - return lj_ir_kptr_(J, fleft->o, (char *)p + ofs); -} - -LJFOLD(ADD any KGC) -LJFOLD(ADD any KPTR) -LJFOLD(ADD any KKPTR) -LJFOLDF(kfold_add_kright) -{ - if (fleft->o == IR_KINT || fleft->o == IR_KINT64) { - IRRef1 tmp = fins->op1; fins->op1 = fins->op2; fins->op2 = tmp; - return RETRYFOLD; - } - return NEXTFOLD; -} - -/* -- Constant folding of conversions ------------------------------------- */ - -LJFOLD(TOBIT KNUM KNUM) -LJFOLDF(kfold_tobit) -{ - return INTFOLD(lj_num2bit(knumleft)); -} - -LJFOLD(CONV KINT IRCONV_NUM_INT) -LJFOLDF(kfold_conv_kint_num) -{ - return lj_ir_knum(J, (lua_Number)fleft->i); -} - -LJFOLD(CONV KINT IRCONV_NUM_U32) -LJFOLDF(kfold_conv_kintu32_num) -{ - return lj_ir_knum(J, (lua_Number)(uint32_t)fleft->i); -} - -LJFOLD(CONV KINT IRCONV_INT_I8) -LJFOLD(CONV KINT IRCONV_INT_U8) -LJFOLD(CONV KINT IRCONV_INT_I16) -LJFOLD(CONV KINT IRCONV_INT_U16) -LJFOLDF(kfold_conv_kint_ext) -{ - int32_t k = fleft->i; - if ((fins->op2 & IRCONV_SRCMASK) == IRT_I8) k = (int8_t)k; - else if ((fins->op2 & IRCONV_SRCMASK) == IRT_U8) k = (uint8_t)k; - else if ((fins->op2 & IRCONV_SRCMASK) == IRT_I16) k = (int16_t)k; - else k = (uint16_t)k; - return INTFOLD(k); -} - -LJFOLD(CONV KINT IRCONV_I64_INT) -LJFOLD(CONV KINT IRCONV_U64_INT) -LJFOLD(CONV KINT IRCONV_I64_U32) -LJFOLD(CONV KINT IRCONV_U64_U32) -LJFOLDF(kfold_conv_kint_i64) -{ - if ((fins->op2 & IRCONV_SEXT)) - return INT64FOLD((uint64_t)(int64_t)fleft->i); - else - return INT64FOLD((uint64_t)(int64_t)(uint32_t)fleft->i); -} - -LJFOLD(CONV KINT64 IRCONV_NUM_I64) -LJFOLDF(kfold_conv_kint64_num_i64) -{ - return lj_ir_knum(J, (lua_Number)(int64_t)ir_kint64(fleft)->u64); -} - -LJFOLD(CONV KINT64 IRCONV_NUM_U64) -LJFOLDF(kfold_conv_kint64_num_u64) -{ - return lj_ir_knum(J, (lua_Number)ir_kint64(fleft)->u64); -} - -LJFOLD(CONV KINT64 IRCONV_INT_I64) -LJFOLD(CONV KINT64 IRCONV_U32_I64) -LJFOLDF(kfold_conv_kint64_int_i64) -{ - return INTFOLD((int32_t)ir_kint64(fleft)->u64); -} - -LJFOLD(CONV KNUM IRCONV_INT_NUM) -LJFOLDF(kfold_conv_knum_int_num) -{ - lua_Number n = knumleft; - int32_t k = lj_num2int(n); - if (irt_isguard(fins->t) && n != (lua_Number)k) { - /* We're about to create a guard which always fails, like CONV +1.5. - ** Some pathological loops cause this during LICM, e.g.: - ** local x,k,t = 0,1.5,{1,[1.5]=2} - ** for i=1,200 do x = x+ t[k]; k = k == 1 and 1.5 or 1 end - ** assert(x == 300) - */ - return FAILFOLD; - } - return INTFOLD(k); -} - -LJFOLD(CONV KNUM IRCONV_U32_NUM) -LJFOLDF(kfold_conv_knum_u32_num) -{ -#ifdef _MSC_VER - { /* Workaround for MSVC bug. */ - volatile uint32_t u = (uint32_t)knumleft; - return INTFOLD((int32_t)u); - } -#else - return INTFOLD((int32_t)(uint32_t)knumleft); -#endif -} - -LJFOLD(CONV KNUM IRCONV_I64_NUM) -LJFOLDF(kfold_conv_knum_i64_num) -{ - return INT64FOLD((uint64_t)(int64_t)knumleft); -} - -LJFOLD(CONV KNUM IRCONV_U64_NUM) -LJFOLDF(kfold_conv_knum_u64_num) -{ - return INT64FOLD(lj_num2u64(knumleft)); -} - -LJFOLD(TOSTR KNUM any) -LJFOLDF(kfold_tostr_knum) -{ - return lj_ir_kstr(J, lj_strfmt_num(J->L, ir_knum(fleft))); -} - -LJFOLD(TOSTR KINT any) -LJFOLDF(kfold_tostr_kint) -{ - return lj_ir_kstr(J, fins->op2 == IRTOSTR_INT ? - lj_strfmt_int(J->L, fleft->i) : - lj_strfmt_char(J->L, fleft->i)); -} - -LJFOLD(STRTO KGC) -LJFOLDF(kfold_strto) -{ - TValue n; - if (lj_strscan_num(ir_kstr(fleft), &n)) - return lj_ir_knum(J, numV(&n)); - return FAILFOLD; -} - -/* -- Constant folding of equality checks --------------------------------- */ - -/* Don't constant-fold away FLOAD checks against KNULL. */ -LJFOLD(EQ FLOAD KNULL) -LJFOLD(NE FLOAD KNULL) -LJFOLDX(lj_opt_cse) - -/* But fold all other KNULL compares, since only KNULL is equal to KNULL. */ -LJFOLD(EQ any KNULL) -LJFOLD(NE any KNULL) -LJFOLD(EQ KNULL any) -LJFOLD(NE KNULL any) -LJFOLD(EQ KINT KINT) /* Constants are unique, so same refs <==> same value. */ -LJFOLD(NE KINT KINT) -LJFOLD(EQ KINT64 KINT64) -LJFOLD(NE KINT64 KINT64) -LJFOLD(EQ KGC KGC) -LJFOLD(NE KGC KGC) -LJFOLDF(kfold_kref) -{ - return CONDFOLD((fins->op1 == fins->op2) ^ (fins->o == IR_NE)); -} - -/* -- Algebraic shortcuts ------------------------------------------------- */ - -LJFOLD(FPMATH FPMATH IRFPM_FLOOR) -LJFOLD(FPMATH FPMATH IRFPM_CEIL) -LJFOLD(FPMATH FPMATH IRFPM_TRUNC) -LJFOLDF(shortcut_round) -{ - IRFPMathOp op = (IRFPMathOp)fleft->op2; - if (op == IRFPM_FLOOR || op == IRFPM_CEIL || op == IRFPM_TRUNC) - return LEFTFOLD; /* round(round_left(x)) = round_left(x) */ - return NEXTFOLD; -} - -LJFOLD(ABS ABS KNUM) -LJFOLDF(shortcut_left) -{ - return LEFTFOLD; /* f(g(x)) ==> g(x) */ -} - -LJFOLD(ABS NEG KNUM) -LJFOLDF(shortcut_dropleft) -{ - PHIBARRIER(fleft); - fins->op1 = fleft->op1; /* abs(neg(x)) ==> abs(x) */ - return RETRYFOLD; -} - -/* Note: no safe shortcuts with STRTO and TOSTR ("1e2" ==> +100 ==> "100"). */ -LJFOLD(NEG NEG any) -LJFOLD(BNOT BNOT) -LJFOLD(BSWAP BSWAP) -LJFOLDF(shortcut_leftleft) -{ - PHIBARRIER(fleft); /* See above. Fold would be ok, but not beneficial. */ - return fleft->op1; /* f(g(x)) ==> x */ -} - -/* -- FP algebraic simplifications ---------------------------------------- */ - -/* FP arithmetic is tricky -- there's not much to simplify. -** Please note the following common pitfalls before sending "improvements": -** x+0 ==> x is INVALID for x=-0 -** 0-x ==> -x is INVALID for x=+0 -** x*0 ==> 0 is INVALID for x=-0, x=+-Inf or x=NaN -*/ - -LJFOLD(ADD NEG any) -LJFOLDF(simplify_numadd_negx) -{ - PHIBARRIER(fleft); - fins->o = IR_SUB; /* (-a) + b ==> b - a */ - fins->op1 = fins->op2; - fins->op2 = fleft->op1; - return RETRYFOLD; -} - -LJFOLD(ADD any NEG) -LJFOLDF(simplify_numadd_xneg) -{ - PHIBARRIER(fright); - fins->o = IR_SUB; /* a + (-b) ==> a - b */ - fins->op2 = fright->op1; - return RETRYFOLD; -} - -LJFOLD(SUB any KNUM) -LJFOLDF(simplify_numsub_k) -{ - lua_Number n = knumright; - if (n == 0.0) /* x - (+-0) ==> x */ - return LEFTFOLD; - return NEXTFOLD; -} - -LJFOLD(SUB NEG KNUM) -LJFOLDF(simplify_numsub_negk) -{ - PHIBARRIER(fleft); - fins->op2 = fleft->op1; /* (-x) - k ==> (-k) - x */ - fins->op1 = (IRRef1)lj_ir_knum(J, -knumright); - return RETRYFOLD; -} - -LJFOLD(SUB any NEG) -LJFOLDF(simplify_numsub_xneg) -{ - PHIBARRIER(fright); - fins->o = IR_ADD; /* a - (-b) ==> a + b */ - fins->op2 = fright->op1; - return RETRYFOLD; -} - -LJFOLD(MUL any KNUM) -LJFOLD(DIV any KNUM) -LJFOLDF(simplify_nummuldiv_k) -{ - lua_Number n = knumright; - if (n == 1.0) { /* x o 1 ==> x */ - return LEFTFOLD; - } else if (n == -1.0) { /* x o -1 ==> -x */ - fins->o = IR_NEG; - fins->op2 = (IRRef1)lj_ir_knum_neg(J); - return RETRYFOLD; - } else if (fins->o == IR_MUL && n == 2.0) { /* x * 2 ==> x + x */ - fins->o = IR_ADD; - fins->op2 = fins->op1; - return RETRYFOLD; - } else if (fins->o == IR_DIV) { /* x / 2^k ==> x * 2^-k */ - uint64_t u = ir_knum(fright)->u64; - uint32_t ex = ((uint32_t)(u >> 52) & 0x7ff); - if ((u & U64x(000fffff,ffffffff)) == 0 && ex - 1 < 0x7fd) { - u = (u & ((uint64_t)1 << 63)) | ((uint64_t)(0x7fe - ex) << 52); - fins->o = IR_MUL; /* Multiply by exact reciprocal. */ - fins->op2 = lj_ir_knum_u64(J, u); - return RETRYFOLD; - } - } - return NEXTFOLD; -} - -LJFOLD(MUL NEG KNUM) -LJFOLD(DIV NEG KNUM) -LJFOLDF(simplify_nummuldiv_negk) -{ - PHIBARRIER(fleft); - fins->op1 = fleft->op1; /* (-a) o k ==> a o (-k) */ - fins->op2 = (IRRef1)lj_ir_knum(J, -knumright); - return RETRYFOLD; -} - -LJFOLD(MUL NEG NEG) -LJFOLD(DIV NEG NEG) -LJFOLDF(simplify_nummuldiv_negneg) -{ - PHIBARRIER(fleft); - PHIBARRIER(fright); - fins->op1 = fleft->op1; /* (-a) o (-b) ==> a o b */ - fins->op2 = fright->op1; - return RETRYFOLD; -} - -LJFOLD(POW any KINT) -LJFOLDF(simplify_numpow_xk) -{ - int32_t k = fright->i; - TRef ref = fins->op1; - if (k == 0) /* x ^ 0 ==> 1 */ - return lj_ir_knum_one(J); /* Result must be a number, not an int. */ - if (k == 1) /* x ^ 1 ==> x */ - return LEFTFOLD; - if ((uint32_t)(k+65536) > 2*65536u) /* Limit code explosion. */ - return NEXTFOLD; - if (k < 0) { /* x ^ (-k) ==> (1/x) ^ k. */ - ref = emitir(IRTN(IR_DIV), lj_ir_knum_one(J), ref); - k = -k; - } - /* Unroll x^k for 1 <= k <= 65536. */ - for (; (k & 1) == 0; k >>= 1) /* Handle leading zeros. */ - ref = emitir(IRTN(IR_MUL), ref, ref); - if ((k >>= 1) != 0) { /* Handle trailing bits. */ - TRef tmp = emitir(IRTN(IR_MUL), ref, ref); - for (; k != 1; k >>= 1) { - if (k & 1) - ref = emitir(IRTN(IR_MUL), ref, tmp); - tmp = emitir(IRTN(IR_MUL), tmp, tmp); - } - ref = emitir(IRTN(IR_MUL), ref, tmp); - } - return ref; -} - -LJFOLD(POW KNUM any) -LJFOLDF(simplify_numpow_kx) -{ - lua_Number n = knumleft; - if (n == 2.0) { /* 2.0 ^ i ==> ldexp(1.0, tonum(i)) */ - fins->o = IR_CONV; -#if LJ_TARGET_X86ORX64 - fins->op1 = fins->op2; - fins->op2 = IRCONV_NUM_INT; - fins->op2 = (IRRef1)lj_opt_fold(J); -#endif - fins->op1 = (IRRef1)lj_ir_knum_one(J); - fins->o = IR_LDEXP; - return RETRYFOLD; - } - return NEXTFOLD; -} - -/* -- Simplify conversions ------------------------------------------------ */ - -LJFOLD(CONV CONV IRCONV_NUM_INT) /* _NUM */ -LJFOLDF(shortcut_conv_num_int) -{ - PHIBARRIER(fleft); - /* Only safe with a guarded conversion to int. */ - if ((fleft->op2 & IRCONV_SRCMASK) == IRT_NUM && irt_isguard(fleft->t)) - return fleft->op1; /* f(g(x)) ==> x */ - return NEXTFOLD; -} - -LJFOLD(CONV CONV IRCONV_INT_NUM) /* _INT */ -LJFOLD(CONV CONV IRCONV_U32_NUM) /* _U32*/ -LJFOLDF(simplify_conv_int_num) -{ - /* Fold even across PHI to avoid expensive num->int conversions in loop. */ - if ((fleft->op2 & IRCONV_SRCMASK) == - ((fins->op2 & IRCONV_DSTMASK) >> IRCONV_DSH)) - return fleft->op1; - return NEXTFOLD; -} - -LJFOLD(CONV CONV IRCONV_I64_NUM) /* _INT or _U32 */ -LJFOLD(CONV CONV IRCONV_U64_NUM) /* _INT or _U32 */ -LJFOLDF(simplify_conv_i64_num) -{ - PHIBARRIER(fleft); - if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT) { - /* Reduce to a sign-extension. */ - fins->op1 = fleft->op1; - fins->op2 = ((IRT_I64<<5)|IRT_INT|IRCONV_SEXT); - return RETRYFOLD; - } else if ((fleft->op2 & IRCONV_SRCMASK) == IRT_U32) { -#if LJ_TARGET_X64 - return fleft->op1; -#else - /* Reduce to a zero-extension. */ - fins->op1 = fleft->op1; - fins->op2 = (IRT_I64<<5)|IRT_U32; - return RETRYFOLD; -#endif - } - return NEXTFOLD; -} - -LJFOLD(CONV CONV IRCONV_INT_I64) /* _INT or _U32 */ -LJFOLD(CONV CONV IRCONV_INT_U64) /* _INT or _U32 */ -LJFOLD(CONV CONV IRCONV_U32_I64) /* _INT or _U32 */ -LJFOLD(CONV CONV IRCONV_U32_U64) /* _INT or _U32 */ -LJFOLDF(simplify_conv_int_i64) -{ - int src; - PHIBARRIER(fleft); - src = (fleft->op2 & IRCONV_SRCMASK); - if (src == IRT_INT || src == IRT_U32) { - if (src == ((fins->op2 & IRCONV_DSTMASK) >> IRCONV_DSH)) { - return fleft->op1; - } else { - fins->op2 = ((fins->op2 & IRCONV_DSTMASK) | src); - fins->op1 = fleft->op1; - return RETRYFOLD; - } - } - return NEXTFOLD; -} - -LJFOLD(CONV CONV IRCONV_FLOAT_NUM) /* _FLOAT */ -LJFOLDF(simplify_conv_flt_num) -{ - PHIBARRIER(fleft); - if ((fleft->op2 & IRCONV_SRCMASK) == IRT_FLOAT) - return fleft->op1; - return NEXTFOLD; -} - -/* Shortcut TOBIT + IRT_NUM <- IRT_INT/IRT_U32 conversion. */ -LJFOLD(TOBIT CONV KNUM) -LJFOLDF(simplify_tobit_conv) -{ - /* Fold even across PHI to avoid expensive num->int conversions in loop. */ - if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT) { - lua_assert(irt_isnum(fleft->t)); - return fleft->op1; - } else if ((fleft->op2 & IRCONV_SRCMASK) == IRT_U32) { - lua_assert(irt_isnum(fleft->t)); - fins->o = IR_CONV; - fins->op1 = fleft->op1; - fins->op2 = (IRT_INT<<5)|IRT_U32; - return RETRYFOLD; - } - return NEXTFOLD; -} - -/* Shortcut floor/ceil/round + IRT_NUM <- IRT_INT/IRT_U32 conversion. */ -LJFOLD(FPMATH CONV IRFPM_FLOOR) -LJFOLD(FPMATH CONV IRFPM_CEIL) -LJFOLD(FPMATH CONV IRFPM_TRUNC) -LJFOLDF(simplify_floor_conv) -{ - if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT || - (fleft->op2 & IRCONV_SRCMASK) == IRT_U32) - return LEFTFOLD; - return NEXTFOLD; -} - -/* Strength reduction of widening. */ -LJFOLD(CONV any IRCONV_I64_INT) -LJFOLD(CONV any IRCONV_U64_INT) -LJFOLDF(simplify_conv_sext) -{ - IRRef ref = fins->op1; - int64_t ofs = 0; - if (!(fins->op2 & IRCONV_SEXT)) - return NEXTFOLD; - PHIBARRIER(fleft); - if (fleft->o == IR_XLOAD && (irt_isu8(fleft->t) || irt_isu16(fleft->t))) - goto ok_reduce; - if (fleft->o == IR_ADD && irref_isk(fleft->op2)) { - ofs = (int64_t)IR(fleft->op2)->i; - ref = fleft->op1; - } - /* Use scalar evolution analysis results to strength-reduce sign-extension. */ - if (ref == J->scev.idx) { - IRRef lo = J->scev.dir ? J->scev.start : J->scev.stop; - lua_assert(irt_isint(J->scev.t)); - if (lo && IR(lo)->i + ofs >= 0) { - ok_reduce: -#if LJ_TARGET_X64 - /* Eliminate widening. All 32 bit ops do an implicit zero-extension. */ - return LEFTFOLD; -#else - /* Reduce to a (cheaper) zero-extension. */ - fins->op2 &= ~IRCONV_SEXT; - return RETRYFOLD; -#endif - } - } - return NEXTFOLD; -} - -/* Strength reduction of narrowing. */ -LJFOLD(CONV ADD IRCONV_INT_I64) -LJFOLD(CONV SUB IRCONV_INT_I64) -LJFOLD(CONV MUL IRCONV_INT_I64) -LJFOLD(CONV ADD IRCONV_INT_U64) -LJFOLD(CONV SUB IRCONV_INT_U64) -LJFOLD(CONV MUL IRCONV_INT_U64) -LJFOLD(CONV ADD IRCONV_U32_I64) -LJFOLD(CONV SUB IRCONV_U32_I64) -LJFOLD(CONV MUL IRCONV_U32_I64) -LJFOLD(CONV ADD IRCONV_U32_U64) -LJFOLD(CONV SUB IRCONV_U32_U64) -LJFOLD(CONV MUL IRCONV_U32_U64) -LJFOLDF(simplify_conv_narrow) -{ - IROp op = (IROp)fleft->o; - IRType t = irt_type(fins->t); - IRRef op1 = fleft->op1, op2 = fleft->op2, mode = fins->op2; - PHIBARRIER(fleft); - op1 = emitir(IRTI(IR_CONV), op1, mode); - op2 = emitir(IRTI(IR_CONV), op2, mode); - fins->ot = IRT(op, t); - fins->op1 = op1; - fins->op2 = op2; - return RETRYFOLD; -} - -/* Special CSE rule for CONV. */ -LJFOLD(CONV any any) -LJFOLDF(cse_conv) -{ - if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) { - IRRef op1 = fins->op1, op2 = (fins->op2 & IRCONV_MODEMASK); - uint8_t guard = irt_isguard(fins->t); - IRRef ref = J->chain[IR_CONV]; - while (ref > op1) { - IRIns *ir = IR(ref); - /* Commoning with stronger checks is ok. */ - if (ir->op1 == op1 && (ir->op2 & IRCONV_MODEMASK) == op2 && - irt_isguard(ir->t) >= guard) - return ref; - ref = ir->prev; - } - } - return EMITFOLD; /* No fallthrough to regular CSE. */ -} - -/* FP conversion narrowing. */ -LJFOLD(TOBIT ADD KNUM) -LJFOLD(TOBIT SUB KNUM) -LJFOLD(CONV ADD IRCONV_INT_NUM) -LJFOLD(CONV SUB IRCONV_INT_NUM) -LJFOLD(CONV ADD IRCONV_I64_NUM) -LJFOLD(CONV SUB IRCONV_I64_NUM) -LJFOLDF(narrow_convert) -{ - PHIBARRIER(fleft); - /* Narrowing ignores PHIs and repeating it inside the loop is not useful. */ - if (J->chain[IR_LOOP]) - return NEXTFOLD; - lua_assert(fins->o != IR_CONV || (fins->op2&IRCONV_CONVMASK) != IRCONV_TOBIT); - return lj_opt_narrow_convert(J); -} - -/* -- Integer algebraic simplifications ----------------------------------- */ - -LJFOLD(ADD any KINT) -LJFOLD(ADDOV any KINT) -LJFOLD(SUBOV any KINT) -LJFOLDF(simplify_intadd_k) -{ - if (fright->i == 0) /* i o 0 ==> i */ - return LEFTFOLD; - return NEXTFOLD; -} - -LJFOLD(MULOV any KINT) -LJFOLDF(simplify_intmul_k) -{ - if (fright->i == 0) /* i * 0 ==> 0 */ - return RIGHTFOLD; - if (fright->i == 1) /* i * 1 ==> i */ - return LEFTFOLD; - if (fright->i == 2) { /* i * 2 ==> i + i */ - fins->o = IR_ADDOV; - fins->op2 = fins->op1; - return RETRYFOLD; - } - return NEXTFOLD; -} - -LJFOLD(SUB any KINT) -LJFOLDF(simplify_intsub_k) -{ - if (fright->i == 0) /* i - 0 ==> i */ - return LEFTFOLD; - fins->o = IR_ADD; /* i - k ==> i + (-k) */ - fins->op2 = (IRRef1)lj_ir_kint(J, -fright->i); /* Overflow for -2^31 ok. */ - return RETRYFOLD; -} - -LJFOLD(SUB KINT any) -LJFOLD(SUB KINT64 any) -LJFOLDF(simplify_intsub_kleft) -{ - if (fleft->o == IR_KINT ? (fleft->i == 0) : (ir_kint64(fleft)->u64 == 0)) { - fins->o = IR_NEG; /* 0 - i ==> -i */ - fins->op1 = fins->op2; - return RETRYFOLD; - } - return NEXTFOLD; -} - -LJFOLD(ADD any KINT64) -LJFOLDF(simplify_intadd_k64) -{ - if (ir_kint64(fright)->u64 == 0) /* i + 0 ==> i */ - return LEFTFOLD; - return NEXTFOLD; -} - -LJFOLD(SUB any KINT64) -LJFOLDF(simplify_intsub_k64) -{ - uint64_t k = ir_kint64(fright)->u64; - if (k == 0) /* i - 0 ==> i */ - return LEFTFOLD; - fins->o = IR_ADD; /* i - k ==> i + (-k) */ - fins->op2 = (IRRef1)lj_ir_kint64(J, (uint64_t)-(int64_t)k); - return RETRYFOLD; -} - -static TRef simplify_intmul_k(jit_State *J, int32_t k) -{ - /* Note: many more simplifications are possible, e.g. 2^k1 +- 2^k2. - ** But this is mainly intended for simple address arithmetic. - ** Also it's easier for the backend to optimize the original multiplies. - */ - if (k == 0) { /* i * 0 ==> 0 */ - return RIGHTFOLD; - } else if (k == 1) { /* i * 1 ==> i */ - return LEFTFOLD; - } else if ((k & (k-1)) == 0) { /* i * 2^k ==> i << k */ - fins->o = IR_BSHL; - fins->op2 = lj_ir_kint(J, lj_fls((uint32_t)k)); - return RETRYFOLD; - } - return NEXTFOLD; -} - -LJFOLD(MUL any KINT) -LJFOLDF(simplify_intmul_k32) -{ - if (fright->i >= 0) - return simplify_intmul_k(J, fright->i); - return NEXTFOLD; -} - -LJFOLD(MUL any KINT64) -LJFOLDF(simplify_intmul_k64) -{ -#if LJ_HASFFI - if (ir_kint64(fright)->u64 < 0x80000000u) - return simplify_intmul_k(J, (int32_t)ir_kint64(fright)->u64); - return NEXTFOLD; -#else - UNUSED(J); lua_assert(0); return FAILFOLD; -#endif -} - -LJFOLD(MOD any KINT) -LJFOLDF(simplify_intmod_k) -{ - int32_t k = fright->i; - lua_assert(k != 0); - if (k > 0 && (k & (k-1)) == 0) { /* i % (2^k) ==> i & (2^k-1) */ - fins->o = IR_BAND; - fins->op2 = lj_ir_kint(J, k-1); - return RETRYFOLD; - } - return NEXTFOLD; -} - -LJFOLD(MOD KINT any) -LJFOLDF(simplify_intmod_kleft) -{ - if (fleft->i == 0) - return INTFOLD(0); - return NEXTFOLD; -} - -LJFOLD(SUB any any) -LJFOLD(SUBOV any any) -LJFOLDF(simplify_intsub) -{ - if (fins->op1 == fins->op2 && !irt_isnum(fins->t)) /* i - i ==> 0 */ - return irt_is64(fins->t) ? INT64FOLD(0) : INTFOLD(0); - return NEXTFOLD; -} - -LJFOLD(SUB ADD any) -LJFOLDF(simplify_intsubadd_leftcancel) -{ - if (!irt_isnum(fins->t)) { - PHIBARRIER(fleft); - if (fins->op2 == fleft->op1) /* (i + j) - i ==> j */ - return fleft->op2; - if (fins->op2 == fleft->op2) /* (i + j) - j ==> i */ - return fleft->op1; - } - return NEXTFOLD; -} - -LJFOLD(SUB SUB any) -LJFOLDF(simplify_intsubsub_leftcancel) -{ - if (!irt_isnum(fins->t)) { - PHIBARRIER(fleft); - if (fins->op2 == fleft->op1) { /* (i - j) - i ==> 0 - j */ - fins->op1 = (IRRef1)lj_ir_kint(J, 0); - fins->op2 = fleft->op2; - return RETRYFOLD; - } - } - return NEXTFOLD; -} - -LJFOLD(SUB any SUB) -LJFOLDF(simplify_intsubsub_rightcancel) -{ - if (!irt_isnum(fins->t)) { - PHIBARRIER(fright); - if (fins->op1 == fright->op1) /* i - (i - j) ==> j */ - return fright->op2; - } - return NEXTFOLD; -} - -LJFOLD(SUB any ADD) -LJFOLDF(simplify_intsubadd_rightcancel) -{ - if (!irt_isnum(fins->t)) { - PHIBARRIER(fright); - if (fins->op1 == fright->op1) { /* i - (i + j) ==> 0 - j */ - fins->op2 = fright->op2; - fins->op1 = (IRRef1)lj_ir_kint(J, 0); - return RETRYFOLD; - } - if (fins->op1 == fright->op2) { /* i - (j + i) ==> 0 - j */ - fins->op2 = fright->op1; - fins->op1 = (IRRef1)lj_ir_kint(J, 0); - return RETRYFOLD; - } - } - return NEXTFOLD; -} - -LJFOLD(SUB ADD ADD) -LJFOLDF(simplify_intsubaddadd_cancel) -{ - if (!irt_isnum(fins->t)) { - PHIBARRIER(fleft); - PHIBARRIER(fright); - if (fleft->op1 == fright->op1) { /* (i + j1) - (i + j2) ==> j1 - j2 */ - fins->op1 = fleft->op2; - fins->op2 = fright->op2; - return RETRYFOLD; - } - if (fleft->op1 == fright->op2) { /* (i + j1) - (j2 + i) ==> j1 - j2 */ - fins->op1 = fleft->op2; - fins->op2 = fright->op1; - return RETRYFOLD; - } - if (fleft->op2 == fright->op1) { /* (j1 + i) - (i + j2) ==> j1 - j2 */ - fins->op1 = fleft->op1; - fins->op2 = fright->op2; - return RETRYFOLD; - } - if (fleft->op2 == fright->op2) { /* (j1 + i) - (j2 + i) ==> j1 - j2 */ - fins->op1 = fleft->op1; - fins->op2 = fright->op1; - return RETRYFOLD; - } - } - return NEXTFOLD; -} - -LJFOLD(BAND any KINT) -LJFOLD(BAND any KINT64) -LJFOLDF(simplify_band_k) -{ - int64_t k = fright->o == IR_KINT ? (int64_t)fright->i : - (int64_t)ir_k64(fright)->u64; - if (k == 0) /* i & 0 ==> 0 */ - return RIGHTFOLD; - if (k == -1) /* i & -1 ==> i */ - return LEFTFOLD; - return NEXTFOLD; -} - -LJFOLD(BOR any KINT) -LJFOLD(BOR any KINT64) -LJFOLDF(simplify_bor_k) -{ - int64_t k = fright->o == IR_KINT ? (int64_t)fright->i : - (int64_t)ir_k64(fright)->u64; - if (k == 0) /* i | 0 ==> i */ - return LEFTFOLD; - if (k == -1) /* i | -1 ==> -1 */ - return RIGHTFOLD; - return NEXTFOLD; -} - -LJFOLD(BXOR any KINT) -LJFOLD(BXOR any KINT64) -LJFOLDF(simplify_bxor_k) -{ - int64_t k = fright->o == IR_KINT ? (int64_t)fright->i : - (int64_t)ir_k64(fright)->u64; - if (k == 0) /* i xor 0 ==> i */ - return LEFTFOLD; - if (k == -1) { /* i xor -1 ==> ~i */ - fins->o = IR_BNOT; - fins->op2 = 0; - return RETRYFOLD; - } - return NEXTFOLD; -} - -LJFOLD(BSHL any KINT) -LJFOLD(BSHR any KINT) -LJFOLD(BSAR any KINT) -LJFOLD(BROL any KINT) -LJFOLD(BROR any KINT) -LJFOLDF(simplify_shift_ik) -{ - int32_t mask = irt_is64(fins->t) ? 63 : 31; - int32_t k = (fright->i & mask); - if (k == 0) /* i o 0 ==> i */ - return LEFTFOLD; - if (k == 1 && fins->o == IR_BSHL) { /* i << 1 ==> i + i */ - fins->o = IR_ADD; - fins->op2 = fins->op1; - return RETRYFOLD; - } - if (k != fright->i) { /* i o k ==> i o (k & mask) */ - fins->op2 = (IRRef1)lj_ir_kint(J, k); - return RETRYFOLD; - } -#ifndef LJ_TARGET_UNIFYROT - if (fins->o == IR_BROR) { /* bror(i, k) ==> brol(i, (-k)&mask) */ - fins->o = IR_BROL; - fins->op2 = (IRRef1)lj_ir_kint(J, (-k)&mask); - return RETRYFOLD; - } -#endif - return NEXTFOLD; -} - -LJFOLD(BSHL any BAND) -LJFOLD(BSHR any BAND) -LJFOLD(BSAR any BAND) -LJFOLD(BROL any BAND) -LJFOLD(BROR any BAND) -LJFOLDF(simplify_shift_andk) -{ - IRIns *irk = IR(fright->op2); - PHIBARRIER(fright); - if ((fins->o < IR_BROL ? LJ_TARGET_MASKSHIFT : LJ_TARGET_MASKROT) && - irk->o == IR_KINT) { /* i o (j & mask) ==> i o j */ - int32_t mask = irt_is64(fins->t) ? 63 : 31; - int32_t k = irk->i & mask; - if (k == mask) { - fins->op2 = fright->op1; - return RETRYFOLD; - } - } - return NEXTFOLD; -} - -LJFOLD(BSHL KINT any) -LJFOLD(BSHR KINT any) -LJFOLD(BSHL KINT64 any) -LJFOLD(BSHR KINT64 any) -LJFOLDF(simplify_shift1_ki) -{ - int64_t k = fleft->o == IR_KINT ? (int64_t)fleft->i : - (int64_t)ir_k64(fleft)->u64; - if (k == 0) /* 0 o i ==> 0 */ - return LEFTFOLD; - return NEXTFOLD; -} - -LJFOLD(BSAR KINT any) -LJFOLD(BROL KINT any) -LJFOLD(BROR KINT any) -LJFOLD(BSAR KINT64 any) -LJFOLD(BROL KINT64 any) -LJFOLD(BROR KINT64 any) -LJFOLDF(simplify_shift2_ki) -{ - int64_t k = fleft->o == IR_KINT ? (int64_t)fleft->i : - (int64_t)ir_k64(fleft)->u64; - if (k == 0 || k == -1) /* 0 o i ==> 0; -1 o i ==> -1 */ - return LEFTFOLD; - return NEXTFOLD; -} - -LJFOLD(BSHL BAND KINT) -LJFOLD(BSHR BAND KINT) -LJFOLD(BROL BAND KINT) -LJFOLD(BROR BAND KINT) -LJFOLDF(simplify_shiftk_andk) -{ - IRIns *irk = IR(fleft->op2); - PHIBARRIER(fleft); - if (irk->o == IR_KINT) { /* (i & k1) o k2 ==> (i o k2) & (k1 o k2) */ - int32_t k = kfold_intop(irk->i, fright->i, (IROp)fins->o); - fins->op1 = fleft->op1; - fins->op1 = (IRRef1)lj_opt_fold(J); - fins->op2 = (IRRef1)lj_ir_kint(J, k); - fins->ot = IRTI(IR_BAND); - return RETRYFOLD; - } - return NEXTFOLD; -} - -LJFOLD(BAND BSHL KINT) -LJFOLD(BAND BSHR KINT) -LJFOLDF(simplify_andk_shiftk) -{ - IRIns *irk = IR(fleft->op2); - if (irk->o == IR_KINT && - kfold_intop(-1, irk->i, (IROp)fleft->o) == fright->i) - return LEFTFOLD; /* (i o k1) & k2 ==> i, if (-1 o k1) == k2 */ - return NEXTFOLD; -} - -/* -- Reassociation ------------------------------------------------------- */ - -LJFOLD(ADD ADD KINT) -LJFOLD(MUL MUL KINT) -LJFOLD(BAND BAND KINT) -LJFOLD(BOR BOR KINT) -LJFOLD(BXOR BXOR KINT) -LJFOLDF(reassoc_intarith_k) -{ - IRIns *irk = IR(fleft->op2); - if (irk->o == IR_KINT) { - int32_t k = kfold_intop(irk->i, fright->i, (IROp)fins->o); - if (k == irk->i) /* (i o k1) o k2 ==> i o k1, if (k1 o k2) == k1. */ - return LEFTFOLD; - PHIBARRIER(fleft); - fins->op1 = fleft->op1; - fins->op2 = (IRRef1)lj_ir_kint(J, k); - return RETRYFOLD; /* (i o k1) o k2 ==> i o (k1 o k2) */ - } - return NEXTFOLD; -} - -LJFOLD(ADD ADD KINT64) -LJFOLD(MUL MUL KINT64) -LJFOLD(BAND BAND KINT64) -LJFOLD(BOR BOR KINT64) -LJFOLD(BXOR BXOR KINT64) -LJFOLDF(reassoc_intarith_k64) -{ -#if LJ_HASFFI - IRIns *irk = IR(fleft->op2); - if (irk->o == IR_KINT64) { - uint64_t k = kfold_int64arith(ir_k64(irk)->u64, - ir_k64(fright)->u64, (IROp)fins->o); - PHIBARRIER(fleft); - fins->op1 = fleft->op1; - fins->op2 = (IRRef1)lj_ir_kint64(J, k); - return RETRYFOLD; /* (i o k1) o k2 ==> i o (k1 o k2) */ - } - return NEXTFOLD; -#else - UNUSED(J); lua_assert(0); return FAILFOLD; -#endif -} - -LJFOLD(MIN MIN any) -LJFOLD(MAX MAX any) -LJFOLD(BAND BAND any) -LJFOLD(BOR BOR any) -LJFOLDF(reassoc_dup) -{ - if (fins->op2 == fleft->op1 || fins->op2 == fleft->op2) - return LEFTFOLD; /* (a o b) o a ==> a o b; (a o b) o b ==> a o b */ - return NEXTFOLD; -} - -LJFOLD(BXOR BXOR any) -LJFOLDF(reassoc_bxor) -{ - PHIBARRIER(fleft); - if (fins->op2 == fleft->op1) /* (a xor b) xor a ==> b */ - return fleft->op2; - if (fins->op2 == fleft->op2) /* (a xor b) xor b ==> a */ - return fleft->op1; - return NEXTFOLD; -} - -LJFOLD(BSHL BSHL KINT) -LJFOLD(BSHR BSHR KINT) -LJFOLD(BSAR BSAR KINT) -LJFOLD(BROL BROL KINT) -LJFOLD(BROR BROR KINT) -LJFOLDF(reassoc_shift) -{ - IRIns *irk = IR(fleft->op2); - PHIBARRIER(fleft); /* The (shift any KINT) rule covers k2 == 0 and more. */ - if (irk->o == IR_KINT) { /* (i o k1) o k2 ==> i o (k1 + k2) */ - int32_t mask = irt_is64(fins->t) ? 63 : 31; - int32_t k = (irk->i & mask) + (fright->i & mask); - if (k > mask) { /* Combined shift too wide? */ - if (fins->o == IR_BSHL || fins->o == IR_BSHR) - return mask == 31 ? INTFOLD(0) : INT64FOLD(0); - else if (fins->o == IR_BSAR) - k = mask; - else - k &= mask; - } - fins->op1 = fleft->op1; - fins->op2 = (IRRef1)lj_ir_kint(J, k); - return RETRYFOLD; - } - return NEXTFOLD; -} - -LJFOLD(MIN MIN KNUM) -LJFOLD(MAX MAX KNUM) -LJFOLD(MIN MIN KINT) -LJFOLD(MAX MAX KINT) -LJFOLDF(reassoc_minmax_k) -{ - IRIns *irk = IR(fleft->op2); - if (irk->o == IR_KNUM) { - lua_Number a = ir_knum(irk)->n; - lua_Number y = lj_vm_foldarith(a, knumright, fins->o - IR_ADD); - if (a == y) /* (x o k1) o k2 ==> x o k1, if (k1 o k2) == k1. */ - return LEFTFOLD; - PHIBARRIER(fleft); - fins->op1 = fleft->op1; - fins->op2 = (IRRef1)lj_ir_knum(J, y); - return RETRYFOLD; /* (x o k1) o k2 ==> x o (k1 o k2) */ - } else if (irk->o == IR_KINT) { - int32_t a = irk->i; - int32_t y = kfold_intop(a, fright->i, fins->o); - if (a == y) /* (x o k1) o k2 ==> x o k1, if (k1 o k2) == k1. */ - return LEFTFOLD; - PHIBARRIER(fleft); - fins->op1 = fleft->op1; - fins->op2 = (IRRef1)lj_ir_kint(J, y); - return RETRYFOLD; /* (x o k1) o k2 ==> x o (k1 o k2) */ - } - return NEXTFOLD; -} - -LJFOLD(MIN MAX any) -LJFOLD(MAX MIN any) -LJFOLDF(reassoc_minmax_left) -{ - if (fins->op2 == fleft->op1 || fins->op2 == fleft->op2) - return RIGHTFOLD; /* (b o1 a) o2 b ==> b; (a o1 b) o2 b ==> b */ - return NEXTFOLD; -} - -LJFOLD(MIN any MAX) -LJFOLD(MAX any MIN) -LJFOLDF(reassoc_minmax_right) -{ - if (fins->op1 == fright->op1 || fins->op1 == fright->op2) - return LEFTFOLD; /* a o2 (a o1 b) ==> a; a o2 (b o1 a) ==> a */ - return NEXTFOLD; -} - -/* -- Array bounds check elimination -------------------------------------- */ - -/* Eliminate ABC across PHIs to handle t[i-1] forwarding case. -** ABC(asize, (i+k)+(-k)) ==> ABC(asize, i), but only if it already exists. -** Could be generalized to (i+k1)+k2 ==> i+(k1+k2), but needs better disambig. -*/ -LJFOLD(ABC any ADD) -LJFOLDF(abc_fwd) -{ - if (LJ_LIKELY(J->flags & JIT_F_OPT_ABC)) { - if (irref_isk(fright->op2)) { - IRIns *add2 = IR(fright->op1); - if (add2->o == IR_ADD && irref_isk(add2->op2) && - IR(fright->op2)->i == -IR(add2->op2)->i) { - IRRef ref = J->chain[IR_ABC]; - IRRef lim = add2->op1; - if (fins->op1 > lim) lim = fins->op1; - while (ref > lim) { - IRIns *ir = IR(ref); - if (ir->op1 == fins->op1 && ir->op2 == add2->op1) - return DROPFOLD; - ref = ir->prev; - } - } - } - } - return NEXTFOLD; -} - -/* Eliminate ABC for constants. -** ABC(asize, k1), ABC(asize k2) ==> ABC(asize, max(k1, k2)) -** Drop second ABC if k2 is lower. Otherwise patch first ABC with k2. -*/ -LJFOLD(ABC any KINT) -LJFOLDF(abc_k) -{ - if (LJ_LIKELY(J->flags & JIT_F_OPT_ABC)) { - IRRef ref = J->chain[IR_ABC]; - IRRef asize = fins->op1; - while (ref > asize) { - IRIns *ir = IR(ref); - if (ir->op1 == asize && irref_isk(ir->op2)) { - int32_t k = IR(ir->op2)->i; - if (fright->i > k) - ir->op2 = fins->op2; - return DROPFOLD; - } - ref = ir->prev; - } - return EMITFOLD; /* Already performed CSE. */ - } - return NEXTFOLD; -} - -/* Eliminate invariant ABC inside loop. */ -LJFOLD(ABC any any) -LJFOLDF(abc_invar) -{ - /* Invariant ABC marked as PTR. Drop if op1 is invariant, too. */ - if (!irt_isint(fins->t) && fins->op1 < J->chain[IR_LOOP] && - !irt_isphi(IR(fins->op1)->t)) - return DROPFOLD; - return NEXTFOLD; -} - -/* -- Commutativity ------------------------------------------------------- */ - -/* The refs of commutative ops are canonicalized. Lower refs go to the right. -** Rationale behind this: -** - It (also) moves constants to the right. -** - It reduces the number of FOLD rules (e.g. (BOR any KINT) suffices). -** - It helps CSE to find more matches. -** - The assembler generates better code with constants at the right. -*/ - -LJFOLD(ADD any any) -LJFOLD(MUL any any) -LJFOLD(ADDOV any any) -LJFOLD(MULOV any any) -LJFOLDF(comm_swap) -{ - if (fins->op1 < fins->op2) { /* Move lower ref to the right. */ - IRRef1 tmp = fins->op1; - fins->op1 = fins->op2; - fins->op2 = tmp; - return RETRYFOLD; - } - return NEXTFOLD; -} - -LJFOLD(EQ any any) -LJFOLD(NE any any) -LJFOLDF(comm_equal) -{ - /* For non-numbers only: x == x ==> drop; x ~= x ==> fail */ - if (fins->op1 == fins->op2 && !irt_isnum(fins->t)) - return CONDFOLD(fins->o == IR_EQ); - return fold_comm_swap(J); -} - -LJFOLD(LT any any) -LJFOLD(GE any any) -LJFOLD(LE any any) -LJFOLD(GT any any) -LJFOLD(ULT any any) -LJFOLD(UGE any any) -LJFOLD(ULE any any) -LJFOLD(UGT any any) -LJFOLDF(comm_comp) -{ - /* For non-numbers only: x <=> x ==> drop; x <> x ==> fail */ - if (fins->op1 == fins->op2 && !irt_isnum(fins->t)) - return CONDFOLD((fins->o ^ (fins->o >> 1)) & 1); - if (fins->op1 < fins->op2) { /* Move lower ref to the right. */ - IRRef1 tmp = fins->op1; - fins->op1 = fins->op2; - fins->op2 = tmp; - fins->o ^= 3; /* GT <-> LT, GE <-> LE, does not affect U */ - return RETRYFOLD; - } - return NEXTFOLD; -} - -LJFOLD(BAND any any) -LJFOLD(BOR any any) -LJFOLD(MIN any any) -LJFOLD(MAX any any) -LJFOLDF(comm_dup) -{ - if (fins->op1 == fins->op2) /* x o x ==> x */ - return LEFTFOLD; - return fold_comm_swap(J); -} - -LJFOLD(BXOR any any) -LJFOLDF(comm_bxor) -{ - if (fins->op1 == fins->op2) /* i xor i ==> 0 */ - return irt_is64(fins->t) ? INT64FOLD(0) : INTFOLD(0); - return fold_comm_swap(J); -} - -/* -- Simplification of compound expressions ------------------------------ */ - -static TRef kfold_xload(jit_State *J, IRIns *ir, const void *p) -{ - int32_t k; - switch (irt_type(ir->t)) { - case IRT_NUM: return lj_ir_knum_u64(J, *(uint64_t *)p); - case IRT_I8: k = (int32_t)*(int8_t *)p; break; - case IRT_U8: k = (int32_t)*(uint8_t *)p; break; - case IRT_I16: k = (int32_t)(int16_t)lj_getu16(p); break; - case IRT_U16: k = (int32_t)(uint16_t)lj_getu16(p); break; - case IRT_INT: case IRT_U32: k = (int32_t)lj_getu32(p); break; - case IRT_I64: case IRT_U64: return lj_ir_kint64(J, *(uint64_t *)p); - default: return 0; - } - return lj_ir_kint(J, k); -} - -/* Turn: string.sub(str, a, b) == kstr -** into: string.byte(str, a) == string.byte(kstr, 1) etc. -** Note: this creates unaligned XLOADs on x86/x64. -*/ -LJFOLD(EQ SNEW KGC) -LJFOLD(NE SNEW KGC) -LJFOLDF(merge_eqne_snew_kgc) -{ - GCstr *kstr = ir_kstr(fright); - int32_t len = (int32_t)kstr->len; - lua_assert(irt_isstr(fins->t)); - -#if LJ_TARGET_UNALIGNED -#define FOLD_SNEW_MAX_LEN 4 /* Handle string lengths 0, 1, 2, 3, 4. */ -#define FOLD_SNEW_TYPE8 IRT_I8 /* Creates shorter immediates. */ -#else -#define FOLD_SNEW_MAX_LEN 1 /* Handle string lengths 0 or 1. */ -#define FOLD_SNEW_TYPE8 IRT_U8 /* Prefer unsigned loads. */ -#endif - - PHIBARRIER(fleft); - if (len <= FOLD_SNEW_MAX_LEN) { - IROp op = (IROp)fins->o; - IRRef strref = fleft->op1; - if (IR(strref)->o != IR_STRREF) - return NEXTFOLD; - if (op == IR_EQ) { - emitir(IRTGI(IR_EQ), fleft->op2, lj_ir_kint(J, len)); - /* Caveat: fins/fleft/fright is no longer valid after emitir. */ - } else { - /* NE is not expanded since this would need an OR of two conds. */ - if (!irref_isk(fleft->op2)) /* Only handle the constant length case. */ - return NEXTFOLD; - if (IR(fleft->op2)->i != len) - return DROPFOLD; - } - if (len > 0) { - /* A 4 byte load for length 3 is ok -- all strings have an extra NUL. */ - uint16_t ot = (uint16_t)(len == 1 ? IRT(IR_XLOAD, FOLD_SNEW_TYPE8) : - len == 2 ? IRT(IR_XLOAD, IRT_U16) : - IRTI(IR_XLOAD)); - TRef tmp = emitir(ot, strref, - IRXLOAD_READONLY | (len > 1 ? IRXLOAD_UNALIGNED : 0)); - TRef val = kfold_xload(J, IR(tref_ref(tmp)), strdata(kstr)); - if (len == 3) - tmp = emitir(IRTI(IR_BAND), tmp, - lj_ir_kint(J, LJ_ENDIAN_SELECT(0x00ffffff, 0xffffff00))); - fins->op1 = (IRRef1)tmp; - fins->op2 = (IRRef1)val; - fins->ot = (IROpT)IRTGI(op); - return RETRYFOLD; - } else { - return DROPFOLD; - } - } - return NEXTFOLD; -} - -/* -- Loads --------------------------------------------------------------- */ - -/* Loads cannot be folded or passed on to CSE in general. -** Alias analysis is needed to check for forwarding opportunities. -** -** Caveat: *all* loads must be listed here or they end up at CSE! -*/ - -LJFOLD(ALOAD any) -LJFOLDX(lj_opt_fwd_aload) - -/* From HREF fwd (see below). Must eliminate, not supported by fwd/backend. */ -LJFOLD(HLOAD KKPTR) -LJFOLDF(kfold_hload_kkptr) -{ - UNUSED(J); - lua_assert(ir_kptr(fleft) == niltvg(J2G(J))); - return TREF_NIL; -} - -LJFOLD(HLOAD any) -LJFOLDX(lj_opt_fwd_hload) - -LJFOLD(ULOAD any) -LJFOLDX(lj_opt_fwd_uload) - -LJFOLD(CALLL any IRCALL_lj_tab_len) -LJFOLDX(lj_opt_fwd_tab_len) - -/* Upvalue refs are really loads, but there are no corresponding stores. -** So CSE is ok for them, except for UREFO across a GC step (see below). -** If the referenced function is const, its upvalue addresses are const, too. -** This can be used to improve CSE by looking for the same address, -** even if the upvalues originate from a different function. -*/ -LJFOLD(UREFO KGC any) -LJFOLD(UREFC KGC any) -LJFOLDF(cse_uref) -{ - if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) { - IRRef ref = J->chain[fins->o]; - GCfunc *fn = ir_kfunc(fleft); - GCupval *uv = gco2uv(gcref(fn->l.uvptr[(fins->op2 >> 8)])); - while (ref > 0) { - IRIns *ir = IR(ref); - if (irref_isk(ir->op1)) { - GCfunc *fn2 = ir_kfunc(IR(ir->op1)); - if (gco2uv(gcref(fn2->l.uvptr[(ir->op2 >> 8)])) == uv) { - if (fins->o == IR_UREFO && gcstep_barrier(J, ref)) - break; - return ref; - } - } - ref = ir->prev; - } - } - return EMITFOLD; -} - -LJFOLD(HREFK any any) -LJFOLDX(lj_opt_fwd_hrefk) - -LJFOLD(HREF TNEW any) -LJFOLDF(fwd_href_tnew) -{ - if (lj_opt_fwd_href_nokey(J)) - return lj_ir_kkptr(J, niltvg(J2G(J))); - return NEXTFOLD; -} - -LJFOLD(HREF TDUP KPRI) -LJFOLD(HREF TDUP KGC) -LJFOLD(HREF TDUP KNUM) -LJFOLDF(fwd_href_tdup) -{ - TValue keyv; - lj_ir_kvalue(J->L, &keyv, fright); - if (lj_tab_get(J->L, ir_ktab(IR(fleft->op1)), &keyv) == niltvg(J2G(J)) && - lj_opt_fwd_href_nokey(J)) - return lj_ir_kkptr(J, niltvg(J2G(J))); - return NEXTFOLD; -} - -/* We can safely FOLD/CSE array/hash refs and field loads, since there -** are no corresponding stores. But we need to check for any NEWREF with -** an aliased table, as it may invalidate all of the pointers and fields. -** Only HREF needs the NEWREF check -- AREF and HREFK already depend on -** FLOADs. And NEWREF itself is treated like a store (see below). -** LREF is constant (per trace) since coroutine switches are not inlined. -*/ -LJFOLD(FLOAD TNEW IRFL_TAB_ASIZE) -LJFOLDF(fload_tab_tnew_asize) -{ - if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1)) - return INTFOLD(fleft->op1); - return NEXTFOLD; -} - -LJFOLD(FLOAD TNEW IRFL_TAB_HMASK) -LJFOLDF(fload_tab_tnew_hmask) -{ - if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1)) - return INTFOLD((1 << fleft->op2)-1); - return NEXTFOLD; -} - -LJFOLD(FLOAD TDUP IRFL_TAB_ASIZE) -LJFOLDF(fload_tab_tdup_asize) -{ - if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1)) - return INTFOLD((int32_t)ir_ktab(IR(fleft->op1))->asize); - return NEXTFOLD; -} - -LJFOLD(FLOAD TDUP IRFL_TAB_HMASK) -LJFOLDF(fload_tab_tdup_hmask) -{ - if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1)) - return INTFOLD((int32_t)ir_ktab(IR(fleft->op1))->hmask); - return NEXTFOLD; -} - -LJFOLD(HREF any any) -LJFOLD(FLOAD any IRFL_TAB_ARRAY) -LJFOLD(FLOAD any IRFL_TAB_NODE) -LJFOLD(FLOAD any IRFL_TAB_ASIZE) -LJFOLD(FLOAD any IRFL_TAB_HMASK) -LJFOLDF(fload_tab_ah) -{ - TRef tr = lj_opt_cse(J); - return lj_opt_fwd_tptr(J, tref_ref(tr)) ? tr : EMITFOLD; -} - -/* Strings are immutable, so we can safely FOLD/CSE the related FLOAD. */ -LJFOLD(FLOAD KGC IRFL_STR_LEN) -LJFOLDF(fload_str_len_kgc) -{ - if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) - return INTFOLD((int32_t)ir_kstr(fleft)->len); - return NEXTFOLD; -} - -LJFOLD(FLOAD SNEW IRFL_STR_LEN) -LJFOLDF(fload_str_len_snew) -{ - if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) { - PHIBARRIER(fleft); - return fleft->op2; - } - return NEXTFOLD; -} - -LJFOLD(FLOAD TOSTR IRFL_STR_LEN) -LJFOLDF(fload_str_len_tostr) -{ - if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && fleft->op2 == IRTOSTR_CHAR) - return INTFOLD(1); - return NEXTFOLD; -} - -/* The C type ID of cdata objects is immutable. */ -LJFOLD(FLOAD KGC IRFL_CDATA_CTYPEID) -LJFOLDF(fload_cdata_typeid_kgc) -{ - if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) - return INTFOLD((int32_t)ir_kcdata(fleft)->ctypeid); - return NEXTFOLD; -} - -/* Get the contents of immutable cdata objects. */ -LJFOLD(FLOAD KGC IRFL_CDATA_PTR) -LJFOLD(FLOAD KGC IRFL_CDATA_INT) -LJFOLD(FLOAD KGC IRFL_CDATA_INT64) -LJFOLDF(fload_cdata_int64_kgc) -{ - if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) { - void *p = cdataptr(ir_kcdata(fleft)); - if (irt_is64(fins->t)) - return INT64FOLD(*(uint64_t *)p); - else - return INTFOLD(*(int32_t *)p); - } - return NEXTFOLD; -} - -LJFOLD(FLOAD CNEW IRFL_CDATA_CTYPEID) -LJFOLD(FLOAD CNEWI IRFL_CDATA_CTYPEID) -LJFOLDF(fload_cdata_typeid_cnew) -{ - if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) - return fleft->op1; /* No PHI barrier needed. CNEW/CNEWI op1 is const. */ - return NEXTFOLD; -} - -/* Pointer, int and int64 cdata objects are immutable. */ -LJFOLD(FLOAD CNEWI IRFL_CDATA_PTR) -LJFOLD(FLOAD CNEWI IRFL_CDATA_INT) -LJFOLD(FLOAD CNEWI IRFL_CDATA_INT64) -LJFOLDF(fload_cdata_ptr_int64_cnew) -{ - if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) - return fleft->op2; /* Fold even across PHI to avoid allocations. */ - return NEXTFOLD; -} - -LJFOLD(FLOAD any IRFL_STR_LEN) -LJFOLD(FLOAD any IRFL_FUNC_ENV) -LJFOLD(FLOAD any IRFL_THREAD_ENV) -LJFOLD(FLOAD any IRFL_CDATA_CTYPEID) -LJFOLD(FLOAD any IRFL_CDATA_PTR) -LJFOLD(FLOAD any IRFL_CDATA_INT) -LJFOLD(FLOAD any IRFL_CDATA_INT64) -LJFOLD(VLOAD any any) /* Vararg loads have no corresponding stores. */ -LJFOLDX(lj_opt_cse) - -/* All other field loads need alias analysis. */ -LJFOLD(FLOAD any any) -LJFOLDX(lj_opt_fwd_fload) - -/* This is for LOOP only. Recording handles SLOADs internally. */ -LJFOLD(SLOAD any any) -LJFOLDF(fwd_sload) -{ - if ((fins->op2 & IRSLOAD_FRAME)) { - TRef tr = lj_opt_cse(J); - return tref_ref(tr) < J->chain[IR_RETF] ? EMITFOLD : tr; - } else { - lua_assert(J->slot[fins->op1] != 0); - return J->slot[fins->op1]; - } -} - -/* Only fold for KKPTR. The pointer _and_ the contents must be const. */ -LJFOLD(XLOAD KKPTR any) -LJFOLDF(xload_kptr) -{ - TRef tr = kfold_xload(J, fins, ir_kptr(fleft)); - return tr ? tr : NEXTFOLD; -} - -LJFOLD(XLOAD any any) -LJFOLDX(lj_opt_fwd_xload) - -/* -- Write barriers ------------------------------------------------------ */ - -/* Write barriers are amenable to CSE, but not across any incremental -** GC steps. -** -** The same logic applies to open upvalue references, because a stack -** may be resized during a GC step (not the current stack, but maybe that -** of a coroutine). -*/ -LJFOLD(TBAR any) -LJFOLD(OBAR any any) -LJFOLD(UREFO any any) -LJFOLDF(barrier_tab) -{ - TRef tr = lj_opt_cse(J); - if (gcstep_barrier(J, tref_ref(tr))) /* CSE across GC step? */ - return EMITFOLD; /* Raw emit. Assumes fins is left intact by CSE. */ - return tr; -} - -LJFOLD(TBAR TNEW) -LJFOLD(TBAR TDUP) -LJFOLDF(barrier_tnew_tdup) -{ - /* New tables are always white and never need a barrier. */ - if (fins->op1 < J->chain[IR_LOOP]) /* Except across a GC step. */ - return NEXTFOLD; - return DROPFOLD; -} - -/* -- Profiling ----------------------------------------------------------- */ - -LJFOLD(PROF any any) -LJFOLDF(prof) -{ - IRRef ref = J->chain[IR_PROF]; - if (ref+1 == J->cur.nins) /* Drop neighbouring IR_PROF. */ - return ref; - return EMITFOLD; -} - -/* -- Stores and allocations ---------------------------------------------- */ - -/* Stores and allocations cannot be folded or passed on to CSE in general. -** But some stores can be eliminated with dead-store elimination (DSE). -** -** Caveat: *all* stores and allocs must be listed here or they end up at CSE! -*/ - -LJFOLD(ASTORE any any) -LJFOLD(HSTORE any any) -LJFOLDX(lj_opt_dse_ahstore) - -LJFOLD(USTORE any any) -LJFOLDX(lj_opt_dse_ustore) - -LJFOLD(FSTORE any any) -LJFOLDX(lj_opt_dse_fstore) - -LJFOLD(XSTORE any any) -LJFOLDX(lj_opt_dse_xstore) - -LJFOLD(NEWREF any any) /* Treated like a store. */ -LJFOLD(CALLA any any) -LJFOLD(CALLL any any) /* Safeguard fallback. */ -LJFOLD(CALLS any any) -LJFOLD(CALLXS any any) -LJFOLD(XBAR) -LJFOLD(RETF any any) /* Modifies BASE. */ -LJFOLD(TNEW any any) -LJFOLD(TDUP any) -LJFOLD(CNEW any any) -LJFOLD(XSNEW any any) -LJFOLD(BUFHDR any any) -LJFOLDX(lj_ir_emit) - -/* ------------------------------------------------------------------------ */ - -/* Every entry in the generated hash table is a 32 bit pattern: -** -** xxxxxxxx iiiiiii lllllll rrrrrrrrrr -** -** xxxxxxxx = 8 bit index into fold function table -** iiiiiii = 7 bit folded instruction opcode -** lllllll = 7 bit left instruction opcode -** rrrrrrrrrr = 8 bit right instruction opcode or 10 bits from literal field -*/ - -#include "lj_folddef.h" - -/* ------------------------------------------------------------------------ */ - -/* Fold IR instruction. */ -TRef LJ_FASTCALL lj_opt_fold(jit_State *J) -{ - uint32_t key, any; - IRRef ref; - - if (LJ_UNLIKELY((J->flags & JIT_F_OPT_MASK) != JIT_F_OPT_DEFAULT)) { - lua_assert(((JIT_F_OPT_FOLD|JIT_F_OPT_FWD|JIT_F_OPT_CSE|JIT_F_OPT_DSE) | - JIT_F_OPT_DEFAULT) == JIT_F_OPT_DEFAULT); - /* Folding disabled? Chain to CSE, but not for loads/stores/allocs. */ - if (!(J->flags & JIT_F_OPT_FOLD) && irm_kind(lj_ir_mode[fins->o]) == IRM_N) - return lj_opt_cse(J); - - /* No FOLD, forwarding or CSE? Emit raw IR for loads, except for SLOAD. */ - if ((J->flags & (JIT_F_OPT_FOLD|JIT_F_OPT_FWD|JIT_F_OPT_CSE)) != - (JIT_F_OPT_FOLD|JIT_F_OPT_FWD|JIT_F_OPT_CSE) && - irm_kind(lj_ir_mode[fins->o]) == IRM_L && fins->o != IR_SLOAD) - return lj_ir_emit(J); - - /* No FOLD or DSE? Emit raw IR for stores. */ - if ((J->flags & (JIT_F_OPT_FOLD|JIT_F_OPT_DSE)) != - (JIT_F_OPT_FOLD|JIT_F_OPT_DSE) && - irm_kind(lj_ir_mode[fins->o]) == IRM_S) - return lj_ir_emit(J); - } - - /* Fold engine start/retry point. */ -retry: - /* Construct key from opcode and operand opcodes (unless literal/none). */ - key = ((uint32_t)fins->o << 17); - if (fins->op1 >= J->cur.nk) { - key += (uint32_t)IR(fins->op1)->o << 10; - *fleft = *IR(fins->op1); - } - if (fins->op2 >= J->cur.nk) { - key += (uint32_t)IR(fins->op2)->o; - *fright = *IR(fins->op2); - } else { - key += (fins->op2 & 0x3ffu); /* Literal mask. Must include IRCONV_*MASK. */ - } - - /* Check for a match in order from most specific to least specific. */ - any = 0; - for (;;) { - uint32_t k = key | (any & 0x1ffff); - uint32_t h = fold_hashkey(k); - uint32_t fh = fold_hash[h]; /* Lookup key in semi-perfect hash table. */ - if ((fh & 0xffffff) == k || (fh = fold_hash[h+1], (fh & 0xffffff) == k)) { - ref = (IRRef)tref_ref(fold_func[fh >> 24](J)); - if (ref != NEXTFOLD) - break; - } - if (any == 0xfffff) /* Exhausted folding. Pass on to CSE. */ - return lj_opt_cse(J); - any = (any | (any >> 10)) ^ 0xffc00; - } - - /* Return value processing, ordered by frequency. */ - if (LJ_LIKELY(ref >= MAX_FOLD)) - return TREF(ref, irt_t(IR(ref)->t)); - if (ref == RETRYFOLD) - goto retry; - if (ref == KINTFOLD) - return lj_ir_kint(J, fins->i); - if (ref == FAILFOLD) - lj_trace_err(J, LJ_TRERR_GFAIL); - lua_assert(ref == DROPFOLD); - return REF_DROP; -} - -/* -- Common-Subexpression Elimination ------------------------------------ */ - -/* CSE an IR instruction. This is very fast due to the skip-list chains. */ -TRef LJ_FASTCALL lj_opt_cse(jit_State *J) -{ - /* Avoid narrow to wide store-to-load forwarding stall */ - IRRef2 op12 = (IRRef2)fins->op1 + ((IRRef2)fins->op2 << 16); - IROp op = fins->o; - if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) { - /* Limited search for same operands in per-opcode chain. */ - IRRef ref = J->chain[op]; - IRRef lim = fins->op1; - if (fins->op2 > lim) lim = fins->op2; /* Relies on lit < REF_BIAS. */ - while (ref > lim) { - if (IR(ref)->op12 == op12) - return TREF(ref, irt_t(IR(ref)->t)); /* Common subexpression found. */ - ref = IR(ref)->prev; - } - } - /* Otherwise emit IR (inlined for speed). */ - { - IRRef ref = lj_ir_nextins(J); - IRIns *ir = IR(ref); - ir->prev = J->chain[op]; - ir->op12 = op12; - J->chain[op] = (IRRef1)ref; - ir->o = fins->o; - J->guardemit.irt |= fins->t.irt; - return TREF(ref, irt_t((ir->t = fins->t))); - } -} - -/* CSE with explicit search limit. */ -TRef LJ_FASTCALL lj_opt_cselim(jit_State *J, IRRef lim) -{ - IRRef ref = J->chain[fins->o]; - IRRef2 op12 = (IRRef2)fins->op1 + ((IRRef2)fins->op2 << 16); - while (ref > lim) { - if (IR(ref)->op12 == op12) - return ref; - ref = IR(ref)->prev; - } - return lj_ir_emit(J); -} - -/* ------------------------------------------------------------------------ */ - -#undef IR -#undef fins -#undef fleft -#undef fright -#undef knumleft -#undef knumright -#undef emitir - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_loop.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_loop.c deleted file mode 100644 index 4b4ab7dc3a3..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_loop.c +++ /dev/null @@ -1,449 +0,0 @@ -/* -** LOOP: Loop Optimizations. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_opt_loop_c -#define LUA_CORE - -#include "lj_obj.h" - -#if LJ_HASJIT - -#include "lj_err.h" -#include "lj_buf.h" -#include "lj_ir.h" -#include "lj_jit.h" -#include "lj_iropt.h" -#include "lj_trace.h" -#include "lj_snap.h" -#include "lj_vm.h" - -/* Loop optimization: -** -** Traditional Loop-Invariant Code Motion (LICM) splits the instructions -** of a loop into invariant and variant instructions. The invariant -** instructions are hoisted out of the loop and only the variant -** instructions remain inside the loop body. -** -** Unfortunately LICM is mostly useless for compiling dynamic languages. -** The IR has many guards and most of the subsequent instructions are -** control-dependent on them. The first non-hoistable guard would -** effectively prevent hoisting of all subsequent instructions. -** -** That's why we use a special form of unrolling using copy-substitution, -** combined with redundancy elimination: -** -** The recorded instruction stream is re-emitted to the compiler pipeline -** with substituted operands. The substitution table is filled with the -** refs returned by re-emitting each instruction. This can be done -** on-the-fly, because the IR is in strict SSA form, where every ref is -** defined before its use. -** -** This aproach generates two code sections, separated by the LOOP -** instruction: -** -** 1. The recorded instructions form a kind of pre-roll for the loop. It -** contains a mix of invariant and variant instructions and performs -** exactly one loop iteration (but not necessarily the 1st iteration). -** -** 2. The loop body contains only the variant instructions and performs -** all remaining loop iterations. -** -** On first sight that looks like a waste of space, because the variant -** instructions are present twice. But the key insight is that the -** pre-roll honors the control-dependencies for *both* the pre-roll itself -** *and* the loop body! -** -** It also means one doesn't have to explicitly model control-dependencies -** (which, BTW, wouldn't help LICM much). And it's much easier to -** integrate sparse snapshotting with this approach. -** -** One of the nicest aspects of this approach is that all of the -** optimizations of the compiler pipeline (FOLD, CSE, FWD, etc.) can be -** reused with only minor restrictions (e.g. one should not fold -** instructions across loop-carried dependencies). -** -** But in general all optimizations can be applied which only need to look -** backwards into the generated instruction stream. At any point in time -** during the copy-substitution process this contains both a static loop -** iteration (the pre-roll) and a dynamic one (from the to-be-copied -** instruction up to the end of the partial loop body). -** -** Since control-dependencies are implicitly kept, CSE also applies to all -** kinds of guards. The major advantage is that all invariant guards can -** be hoisted, too. -** -** Load/store forwarding works across loop iterations, too. This is -** important if loop-carried dependencies are kept in upvalues or tables. -** E.g. 'self.idx = self.idx + 1' deep down in some OO-style method may -** become a forwarded loop-recurrence after inlining. -** -** Since the IR is in SSA form, loop-carried dependencies have to be -** modeled with PHI instructions. The potential candidates for PHIs are -** collected on-the-fly during copy-substitution. After eliminating the -** redundant ones, PHI instructions are emitted *below* the loop body. -** -** Note that this departure from traditional SSA form doesn't change the -** semantics of the PHI instructions themselves. But it greatly simplifies -** on-the-fly generation of the IR and the machine code. -*/ - -/* Some local macros to save typing. Undef'd at the end. */ -#define IR(ref) (&J->cur.ir[(ref)]) - -/* Pass IR on to next optimization in chain (FOLD). */ -#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) - -/* Emit raw IR without passing through optimizations. */ -#define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J)) - -/* -- PHI elimination ----------------------------------------------------- */ - -/* Emit or eliminate collected PHIs. */ -static void loop_emit_phi(jit_State *J, IRRef1 *subst, IRRef1 *phi, IRRef nphi, - SnapNo onsnap) -{ - int passx = 0; - IRRef i, j, nslots; - IRRef invar = J->chain[IR_LOOP]; - /* Pass #1: mark redundant and potentially redundant PHIs. */ - for (i = 0, j = 0; i < nphi; i++) { - IRRef lref = phi[i]; - IRRef rref = subst[lref]; - if (lref == rref || rref == REF_DROP) { /* Invariants are redundant. */ - irt_clearphi(IR(lref)->t); - } else { - phi[j++] = (IRRef1)lref; - if (!(IR(rref)->op1 == lref || IR(rref)->op2 == lref)) { - /* Quick check for simple recurrences failed, need pass2. */ - irt_setmark(IR(lref)->t); - passx = 1; - } - } - } - nphi = j; - /* Pass #2: traverse variant part and clear marks of non-redundant PHIs. */ - if (passx) { - SnapNo s; - for (i = J->cur.nins-1; i > invar; i--) { - IRIns *ir = IR(i); - if (!irref_isk(ir->op2)) irt_clearmark(IR(ir->op2)->t); - if (!irref_isk(ir->op1)) { - irt_clearmark(IR(ir->op1)->t); - if (ir->op1 < invar && - ir->o >= IR_CALLN && ir->o <= IR_CARG) { /* ORDER IR */ - ir = IR(ir->op1); - while (ir->o == IR_CARG) { - if (!irref_isk(ir->op2)) irt_clearmark(IR(ir->op2)->t); - if (irref_isk(ir->op1)) break; - ir = IR(ir->op1); - irt_clearmark(ir->t); - } - } - } - } - for (s = J->cur.nsnap-1; s >= onsnap; s--) { - SnapShot *snap = &J->cur.snap[s]; - SnapEntry *map = &J->cur.snapmap[snap->mapofs]; - MSize n, nent = snap->nent; - for (n = 0; n < nent; n++) { - IRRef ref = snap_ref(map[n]); - if (!irref_isk(ref)) irt_clearmark(IR(ref)->t); - } - } - } - /* Pass #3: add PHIs for variant slots without a corresponding SLOAD. */ - nslots = J->baseslot+J->maxslot; - for (i = 1; i < nslots; i++) { - IRRef ref = tref_ref(J->slot[i]); - while (!irref_isk(ref) && ref != subst[ref]) { - IRIns *ir = IR(ref); - irt_clearmark(ir->t); /* Unmark potential uses, too. */ - if (irt_isphi(ir->t) || irt_ispri(ir->t)) - break; - irt_setphi(ir->t); - if (nphi >= LJ_MAX_PHI) - lj_trace_err(J, LJ_TRERR_PHIOV); - phi[nphi++] = (IRRef1)ref; - ref = subst[ref]; - if (ref > invar) - break; - } - } - /* Pass #4: propagate non-redundant PHIs. */ - while (passx) { - passx = 0; - for (i = 0; i < nphi; i++) { - IRRef lref = phi[i]; - IRIns *ir = IR(lref); - if (!irt_ismarked(ir->t)) { /* Propagate only from unmarked PHIs. */ - IRIns *irr = IR(subst[lref]); - if (irt_ismarked(irr->t)) { /* Right ref points to other PHI? */ - irt_clearmark(irr->t); /* Mark that PHI as non-redundant. */ - passx = 1; /* Retry. */ - } - } - } - } - /* Pass #5: emit PHI instructions or eliminate PHIs. */ - for (i = 0; i < nphi; i++) { - IRRef lref = phi[i]; - IRIns *ir = IR(lref); - if (!irt_ismarked(ir->t)) { /* Emit PHI if not marked. */ - IRRef rref = subst[lref]; - if (rref > invar) - irt_setphi(IR(rref)->t); - emitir_raw(IRT(IR_PHI, irt_type(ir->t)), lref, rref); - } else { /* Otherwise eliminate PHI. */ - irt_clearmark(ir->t); - irt_clearphi(ir->t); - } - } -} - -/* -- Loop unrolling using copy-substitution ------------------------------ */ - -/* Copy-substitute snapshot. */ -static void loop_subst_snap(jit_State *J, SnapShot *osnap, - SnapEntry *loopmap, IRRef1 *subst) -{ - SnapEntry *nmap, *omap = &J->cur.snapmap[osnap->mapofs]; - SnapEntry *nextmap = &J->cur.snapmap[snap_nextofs(&J->cur, osnap)]; - MSize nmapofs; - MSize on, ln, nn, onent = osnap->nent; - BCReg nslots = osnap->nslots; - SnapShot *snap = &J->cur.snap[J->cur.nsnap]; - if (irt_isguard(J->guardemit)) { /* Guard inbetween? */ - nmapofs = J->cur.nsnapmap; - J->cur.nsnap++; /* Add new snapshot. */ - } else { /* Otherwise overwrite previous snapshot. */ - snap--; - nmapofs = snap->mapofs; - } - J->guardemit.irt = 0; - /* Setup new snapshot. */ - snap->mapofs = (uint16_t)nmapofs; - snap->ref = (IRRef1)J->cur.nins; - snap->nslots = nslots; - snap->topslot = osnap->topslot; - snap->count = 0; - nmap = &J->cur.snapmap[nmapofs]; - /* Substitute snapshot slots. */ - on = ln = nn = 0; - while (on < onent) { - SnapEntry osn = omap[on], lsn = loopmap[ln]; - if (snap_slot(lsn) < snap_slot(osn)) { /* Copy slot from loop map. */ - nmap[nn++] = lsn; - ln++; - } else { /* Copy substituted slot from snapshot map. */ - if (snap_slot(lsn) == snap_slot(osn)) ln++; /* Shadowed loop slot. */ - if (!irref_isk(snap_ref(osn))) - osn = snap_setref(osn, subst[snap_ref(osn)]); - nmap[nn++] = osn; - on++; - } - } - while (snap_slot(loopmap[ln]) < nslots) /* Copy remaining loop slots. */ - nmap[nn++] = loopmap[ln++]; - snap->nent = (uint8_t)nn; - omap += onent; - nmap += nn; - while (omap < nextmap) /* Copy PC + frame links. */ - *nmap++ = *omap++; - J->cur.nsnapmap = (uint16_t)(nmap - J->cur.snapmap); -} - -typedef struct LoopState { - jit_State *J; - IRRef1 *subst; - MSize sizesubst; -} LoopState; - -/* Unroll loop. */ -static void loop_unroll(LoopState *lps) -{ - jit_State *J = lps->J; - IRRef1 phi[LJ_MAX_PHI]; - uint32_t nphi = 0; - IRRef1 *subst; - SnapNo onsnap; - SnapShot *osnap, *loopsnap; - SnapEntry *loopmap, *psentinel; - IRRef ins, invar; - - /* Allocate substitution table. - ** Only non-constant refs in [REF_BIAS,invar) are valid indexes. - */ - invar = J->cur.nins; - lps->sizesubst = invar - REF_BIAS; - lps->subst = lj_mem_newvec(J->L, lps->sizesubst, IRRef1); - subst = lps->subst - REF_BIAS; - subst[REF_BASE] = REF_BASE; - - /* LOOP separates the pre-roll from the loop body. */ - emitir_raw(IRTG(IR_LOOP, IRT_NIL), 0, 0); - - /* Grow snapshot buffer and map for copy-substituted snapshots. - ** Need up to twice the number of snapshots minus #0 and loop snapshot. - ** Need up to twice the number of entries plus fallback substitutions - ** from the loop snapshot entries for each new snapshot. - ** Caveat: both calls may reallocate J->cur.snap and J->cur.snapmap! - */ - onsnap = J->cur.nsnap; - lj_snap_grow_buf(J, 2*onsnap-2); - lj_snap_grow_map(J, J->cur.nsnapmap*2+(onsnap-2)*J->cur.snap[onsnap-1].nent); - - /* The loop snapshot is used for fallback substitutions. */ - loopsnap = &J->cur.snap[onsnap-1]; - loopmap = &J->cur.snapmap[loopsnap->mapofs]; - /* The PC of snapshot #0 and the loop snapshot must match. */ - psentinel = &loopmap[loopsnap->nent]; - lua_assert(*psentinel == J->cur.snapmap[J->cur.snap[0].nent]); - *psentinel = SNAP(255, 0, 0); /* Replace PC with temporary sentinel. */ - - /* Start substitution with snapshot #1 (#0 is empty for root traces). */ - osnap = &J->cur.snap[1]; - - /* Copy and substitute all recorded instructions and snapshots. */ - for (ins = REF_FIRST; ins < invar; ins++) { - IRIns *ir; - IRRef op1, op2; - - if (ins >= osnap->ref) /* Instruction belongs to next snapshot? */ - loop_subst_snap(J, osnap++, loopmap, subst); /* Copy-substitute it. */ - - /* Substitute instruction operands. */ - ir = IR(ins); - op1 = ir->op1; - if (!irref_isk(op1)) op1 = subst[op1]; - op2 = ir->op2; - if (!irref_isk(op2)) op2 = subst[op2]; - if (irm_kind(lj_ir_mode[ir->o]) == IRM_N && - op1 == ir->op1 && op2 == ir->op2) { /* Regular invariant ins? */ - subst[ins] = (IRRef1)ins; /* Shortcut. */ - } else { - /* Re-emit substituted instruction to the FOLD/CSE/etc. pipeline. */ - IRType1 t = ir->t; /* Get this first, since emitir may invalidate ir. */ - IRRef ref = tref_ref(emitir(ir->ot & ~IRT_ISPHI, op1, op2)); - subst[ins] = (IRRef1)ref; - if (ref != ins) { - IRIns *irr = IR(ref); - if (ref < invar) { /* Loop-carried dependency? */ - /* Potential PHI? */ - if (!irref_isk(ref) && !irt_isphi(irr->t) && !irt_ispri(irr->t)) { - irt_setphi(irr->t); - if (nphi >= LJ_MAX_PHI) - lj_trace_err(J, LJ_TRERR_PHIOV); - phi[nphi++] = (IRRef1)ref; - } - /* Check all loop-carried dependencies for type instability. */ - if (!irt_sametype(t, irr->t)) { - if (irt_isinteger(t) && irt_isinteger(irr->t)) - continue; - else if (irt_isnum(t) && irt_isinteger(irr->t)) /* Fix int->num. */ - ref = tref_ref(emitir(IRTN(IR_CONV), ref, IRCONV_NUM_INT)); - else if (irt_isnum(irr->t) && irt_isinteger(t)) /* Fix num->int. */ - ref = tref_ref(emitir(IRTGI(IR_CONV), ref, - IRCONV_INT_NUM|IRCONV_CHECK)); - else - lj_trace_err(J, LJ_TRERR_TYPEINS); - subst[ins] = (IRRef1)ref; - irr = IR(ref); - goto phiconv; - } - } else if (ref != REF_DROP && irr->o == IR_CONV && - ref > invar && irr->op1 < invar) { - /* May need an extra PHI for a CONV. */ - ref = irr->op1; - irr = IR(ref); - phiconv: - if (ref < invar && !irref_isk(ref) && !irt_isphi(irr->t)) { - irt_setphi(irr->t); - if (nphi >= LJ_MAX_PHI) - lj_trace_err(J, LJ_TRERR_PHIOV); - phi[nphi++] = (IRRef1)ref; - } - } - } - } - } - if (!irt_isguard(J->guardemit)) /* Drop redundant snapshot. */ - J->cur.nsnapmap = (uint16_t)J->cur.snap[--J->cur.nsnap].mapofs; - lua_assert(J->cur.nsnapmap <= J->sizesnapmap); - *psentinel = J->cur.snapmap[J->cur.snap[0].nent]; /* Restore PC. */ - - loop_emit_phi(J, subst, phi, nphi, onsnap); -} - -/* Undo any partial changes made by the loop optimization. */ -static void loop_undo(jit_State *J, IRRef ins, SnapNo nsnap, MSize nsnapmap) -{ - ptrdiff_t i; - SnapShot *snap = &J->cur.snap[nsnap-1]; - SnapEntry *map = J->cur.snapmap; - map[snap->mapofs + snap->nent] = map[J->cur.snap[0].nent]; /* Restore PC. */ - J->cur.nsnapmap = (uint16_t)nsnapmap; - J->cur.nsnap = nsnap; - J->guardemit.irt = 0; - lj_ir_rollback(J, ins); - for (i = 0; i < BPROP_SLOTS; i++) { /* Remove backprop. cache entries. */ - BPropEntry *bp = &J->bpropcache[i]; - if (bp->val >= ins) - bp->key = 0; - } - for (ins--; ins >= REF_FIRST; ins--) { /* Remove flags. */ - IRIns *ir = IR(ins); - irt_clearphi(ir->t); - irt_clearmark(ir->t); - } -} - -/* Protected callback for loop optimization. */ -static TValue *cploop_opt(lua_State *L, lua_CFunction dummy, void *ud) -{ - UNUSED(L); UNUSED(dummy); - loop_unroll((LoopState *)ud); - return NULL; -} - -/* Loop optimization. */ -int lj_opt_loop(jit_State *J) -{ - IRRef nins = J->cur.nins; - SnapNo nsnap = J->cur.nsnap; - MSize nsnapmap = J->cur.nsnapmap; - LoopState lps; - int errcode; - lps.J = J; - lps.subst = NULL; - lps.sizesubst = 0; - errcode = lj_vm_cpcall(J->L, NULL, &lps, cploop_opt); - lj_mem_freevec(J2G(J), lps.subst, lps.sizesubst, IRRef1); - if (LJ_UNLIKELY(errcode)) { - lua_State *L = J->L; - if (errcode == LUA_ERRRUN && tvisnumber(L->top-1)) { /* Trace error? */ - int32_t e = numberVint(L->top-1); - switch ((TraceError)e) { - case LJ_TRERR_TYPEINS: /* Type instability. */ - case LJ_TRERR_GFAIL: /* Guard would always fail. */ - /* Unrolling via recording fixes many cases, e.g. a flipped boolean. */ - if (--J->instunroll < 0) /* But do not unroll forever. */ - break; - L->top--; /* Remove error object. */ - loop_undo(J, nins, nsnap, nsnapmap); - return 1; /* Loop optimization failed, continue recording. */ - default: - break; - } - } - lj_err_throw(L, errcode); /* Propagate all other errors. */ - } - return 0; /* Loop optimization is ok. */ -} - -#undef IR -#undef emitir -#undef emitir_raw - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_mem.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_mem.c deleted file mode 100644 index e04a6228f4b..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_mem.c +++ /dev/null @@ -1,935 +0,0 @@ -/* -** Memory access optimizations. -** AA: Alias Analysis using high-level semantic disambiguation. -** FWD: Load Forwarding (L2L) + Store Forwarding (S2L). -** DSE: Dead-Store Elimination. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_opt_mem_c -#define LUA_CORE - -#include "lj_obj.h" - -#if LJ_HASJIT - -#include "lj_tab.h" -#include "lj_ir.h" -#include "lj_jit.h" -#include "lj_iropt.h" -#include "lj_ircall.h" - -/* Some local macros to save typing. Undef'd at the end. */ -#define IR(ref) (&J->cur.ir[(ref)]) -#define fins (&J->fold.ins) -#define fleft (&J->fold.left) -#define fright (&J->fold.right) - -/* -** Caveat #1: return value is not always a TRef -- only use with tref_ref(). -** Caveat #2: FWD relies on active CSE for xREF operands -- see lj_opt_fold(). -*/ - -/* Return values from alias analysis. */ -typedef enum { - ALIAS_NO, /* The two refs CANNOT alias (exact). */ - ALIAS_MAY, /* The two refs MAY alias (inexact). */ - ALIAS_MUST /* The two refs MUST alias (exact). */ -} AliasRet; - -/* -- ALOAD/HLOAD forwarding and ASTORE/HSTORE elimination ---------------- */ - -/* Simplified escape analysis: check for intervening stores. */ -static AliasRet aa_escape(jit_State *J, IRIns *ir, IRIns *stop) -{ - IRRef ref = (IRRef)(ir - J->cur.ir); /* The ref that might be stored. */ - for (ir++; ir < stop; ir++) - if (ir->op2 == ref && - (ir->o == IR_ASTORE || ir->o == IR_HSTORE || - ir->o == IR_USTORE || ir->o == IR_FSTORE)) - return ALIAS_MAY; /* Reference was stored and might alias. */ - return ALIAS_NO; /* Reference was not stored. */ -} - -/* Alias analysis for two different table references. */ -static AliasRet aa_table(jit_State *J, IRRef ta, IRRef tb) -{ - IRIns *taba = IR(ta), *tabb = IR(tb); - int newa, newb; - lua_assert(ta != tb); - lua_assert(irt_istab(taba->t) && irt_istab(tabb->t)); - /* Disambiguate new allocations. */ - newa = (taba->o == IR_TNEW || taba->o == IR_TDUP); - newb = (tabb->o == IR_TNEW || tabb->o == IR_TDUP); - if (newa && newb) - return ALIAS_NO; /* Two different allocations never alias. */ - if (newb) { /* At least one allocation? */ - IRIns *tmp = taba; taba = tabb; tabb = tmp; - } else if (!newa) { - return ALIAS_MAY; /* Anything else: we just don't know. */ - } - return aa_escape(J, taba, tabb); -} - -/* Alias analysis for array and hash access using key-based disambiguation. */ -static AliasRet aa_ahref(jit_State *J, IRIns *refa, IRIns *refb) -{ - IRRef ka = refa->op2; - IRRef kb = refb->op2; - IRIns *keya, *keyb; - IRRef ta, tb; - if (refa == refb) - return ALIAS_MUST; /* Shortcut for same refs. */ - keya = IR(ka); - if (keya->o == IR_KSLOT) { ka = keya->op1; keya = IR(ka); } - keyb = IR(kb); - if (keyb->o == IR_KSLOT) { kb = keyb->op1; keyb = IR(kb); } - ta = (refa->o==IR_HREFK || refa->o==IR_AREF) ? IR(refa->op1)->op1 : refa->op1; - tb = (refb->o==IR_HREFK || refb->o==IR_AREF) ? IR(refb->op1)->op1 : refb->op1; - if (ka == kb) { - /* Same key. Check for same table with different ref (NEWREF vs. HREF). */ - if (ta == tb) - return ALIAS_MUST; /* Same key, same table. */ - else - return aa_table(J, ta, tb); /* Same key, possibly different table. */ - } - if (irref_isk(ka) && irref_isk(kb)) - return ALIAS_NO; /* Different constant keys. */ - if (refa->o == IR_AREF) { - /* Disambiguate array references based on index arithmetic. */ - int32_t ofsa = 0, ofsb = 0; - IRRef basea = ka, baseb = kb; - lua_assert(refb->o == IR_AREF); - /* Gather base and offset from t[base] or t[base+-ofs]. */ - if (keya->o == IR_ADD && irref_isk(keya->op2)) { - basea = keya->op1; - ofsa = IR(keya->op2)->i; - if (basea == kb && ofsa != 0) - return ALIAS_NO; /* t[base+-ofs] vs. t[base]. */ - } - if (keyb->o == IR_ADD && irref_isk(keyb->op2)) { - baseb = keyb->op1; - ofsb = IR(keyb->op2)->i; - if (ka == baseb && ofsb != 0) - return ALIAS_NO; /* t[base] vs. t[base+-ofs]. */ - } - if (basea == baseb && ofsa != ofsb) - return ALIAS_NO; /* t[base+-o1] vs. t[base+-o2] and o1 != o2. */ - } else { - /* Disambiguate hash references based on the type of their keys. */ - lua_assert((refa->o==IR_HREF || refa->o==IR_HREFK || refa->o==IR_NEWREF) && - (refb->o==IR_HREF || refb->o==IR_HREFK || refb->o==IR_NEWREF)); - if (!irt_sametype(keya->t, keyb->t)) - return ALIAS_NO; /* Different key types. */ - } - if (ta == tb) - return ALIAS_MAY; /* Same table, cannot disambiguate keys. */ - else - return aa_table(J, ta, tb); /* Try to disambiguate tables. */ -} - -/* Array and hash load forwarding. */ -static TRef fwd_ahload(jit_State *J, IRRef xref) -{ - IRIns *xr = IR(xref); - IRRef lim = xref; /* Search limit. */ - IRRef ref; - - /* Search for conflicting stores. */ - ref = J->chain[fins->o+IRDELTA_L2S]; - while (ref > xref) { - IRIns *store = IR(ref); - switch (aa_ahref(J, xr, IR(store->op1))) { - case ALIAS_NO: break; /* Continue searching. */ - case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */ - case ALIAS_MUST: return store->op2; /* Store forwarding. */ - } - ref = store->prev; - } - - /* No conflicting store (yet): const-fold loads from allocations. */ - { - IRIns *ir = (xr->o == IR_HREFK || xr->o == IR_AREF) ? IR(xr->op1) : xr; - IRRef tab = ir->op1; - ir = IR(tab); - if (ir->o == IR_TNEW || (ir->o == IR_TDUP && irref_isk(xr->op2))) { - /* A NEWREF with a number key may end up pointing to the array part. - ** But it's referenced from HSTORE and not found in the ASTORE chain. - ** For now simply consider this a conflict without forwarding anything. - */ - if (xr->o == IR_AREF) { - IRRef ref2 = J->chain[IR_NEWREF]; - while (ref2 > tab) { - IRIns *newref = IR(ref2); - if (irt_isnum(IR(newref->op2)->t)) - goto cselim; - ref2 = newref->prev; - } - } - /* NEWREF inhibits CSE for HREF, and dependent FLOADs from HREFK/AREF. - ** But the above search for conflicting stores was limited by xref. - ** So continue searching, limited by the TNEW/TDUP. Store forwarding - ** is ok, too. A conflict does NOT limit the search for a matching load. - */ - while (ref > tab) { - IRIns *store = IR(ref); - switch (aa_ahref(J, xr, IR(store->op1))) { - case ALIAS_NO: break; /* Continue searching. */ - case ALIAS_MAY: goto cselim; /* Conflicting store. */ - case ALIAS_MUST: return store->op2; /* Store forwarding. */ - } - ref = store->prev; - } - lua_assert(ir->o != IR_TNEW || irt_isnil(fins->t)); - if (irt_ispri(fins->t)) { - return TREF_PRI(irt_type(fins->t)); - } else if (irt_isnum(fins->t) || (LJ_DUALNUM && irt_isint(fins->t)) || - irt_isstr(fins->t)) { - TValue keyv; - cTValue *tv; - IRIns *key = IR(xr->op2); - if (key->o == IR_KSLOT) key = IR(key->op1); - lj_ir_kvalue(J->L, &keyv, key); - tv = lj_tab_get(J->L, ir_ktab(IR(ir->op1)), &keyv); - lua_assert(itype2irt(tv) == irt_type(fins->t)); - if (irt_isnum(fins->t)) - return lj_ir_knum_u64(J, tv->u64); - else if (LJ_DUALNUM && irt_isint(fins->t)) - return lj_ir_kint(J, intV(tv)); - else - return lj_ir_kstr(J, strV(tv)); - } - /* Othwerwise: don't intern as a constant. */ - } - } - -cselim: - /* Try to find a matching load. Below the conflicting store, if any. */ - ref = J->chain[fins->o]; - while (ref > lim) { - IRIns *load = IR(ref); - if (load->op1 == xref) - return ref; /* Load forwarding. */ - ref = load->prev; - } - return 0; /* Conflict or no match. */ -} - -/* Reassociate ALOAD across PHIs to handle t[i-1] forwarding case. */ -static TRef fwd_aload_reassoc(jit_State *J) -{ - IRIns *irx = IR(fins->op1); - IRIns *key = IR(irx->op2); - if (key->o == IR_ADD && irref_isk(key->op2)) { - IRIns *add2 = IR(key->op1); - if (add2->o == IR_ADD && irref_isk(add2->op2) && - IR(key->op2)->i == -IR(add2->op2)->i) { - IRRef ref = J->chain[IR_AREF]; - IRRef lim = add2->op1; - if (irx->op1 > lim) lim = irx->op1; - while (ref > lim) { - IRIns *ir = IR(ref); - if (ir->op1 == irx->op1 && ir->op2 == add2->op1) - return fwd_ahload(J, ref); - ref = ir->prev; - } - } - } - return 0; -} - -/* ALOAD forwarding. */ -TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J) -{ - IRRef ref; - if ((ref = fwd_ahload(J, fins->op1)) || - (ref = fwd_aload_reassoc(J))) - return ref; - return EMITFOLD; -} - -/* HLOAD forwarding. */ -TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J) -{ - IRRef ref = fwd_ahload(J, fins->op1); - if (ref) - return ref; - return EMITFOLD; -} - -/* HREFK forwarding. */ -TRef LJ_FASTCALL lj_opt_fwd_hrefk(jit_State *J) -{ - IRRef tab = fleft->op1; - IRRef ref = J->chain[IR_NEWREF]; - while (ref > tab) { - IRIns *newref = IR(ref); - if (tab == newref->op1) { - if (fright->op1 == newref->op2) - return ref; /* Forward from NEWREF. */ - else - goto docse; - } else if (aa_table(J, tab, newref->op1) != ALIAS_NO) { - goto docse; - } - ref = newref->prev; - } - /* No conflicting NEWREF: key location unchanged for HREFK of TDUP. */ - if (IR(tab)->o == IR_TDUP) - fins->t.irt &= ~IRT_GUARD; /* Drop HREFK guard. */ -docse: - return CSEFOLD; -} - -/* Check whether HREF of TNEW/TDUP can be folded to niltv. */ -int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J) -{ - IRRef lim = fins->op1; /* Search limit. */ - IRRef ref; - - /* The key for an ASTORE may end up in the hash part after a NEWREF. */ - if (irt_isnum(fright->t) && J->chain[IR_NEWREF] > lim) { - ref = J->chain[IR_ASTORE]; - while (ref > lim) { - if (ref < J->chain[IR_NEWREF]) - return 0; /* Conflict. */ - ref = IR(ref)->prev; - } - } - - /* Search for conflicting stores. */ - ref = J->chain[IR_HSTORE]; - while (ref > lim) { - IRIns *store = IR(ref); - if (aa_ahref(J, fins, IR(store->op1)) != ALIAS_NO) - return 0; /* Conflict. */ - ref = store->prev; - } - - return 1; /* No conflict. Can fold to niltv. */ -} - -/* Check whether there's no aliasing table.clear. */ -static int fwd_aa_tab_clear(jit_State *J, IRRef lim, IRRef ta) -{ - IRRef ref = J->chain[IR_CALLS]; - while (ref > lim) { - IRIns *calls = IR(ref); - if (calls->op2 == IRCALL_lj_tab_clear && - (ta == calls->op1 || aa_table(J, ta, calls->op1) != ALIAS_NO)) - return 0; /* Conflict. */ - ref = calls->prev; - } - return 1; /* No conflict. Can safely FOLD/CSE. */ -} - -/* Check whether there's no aliasing NEWREF/table.clear for the left operand. */ -int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim) -{ - IRRef ta = fins->op1; - IRRef ref = J->chain[IR_NEWREF]; - while (ref > lim) { - IRIns *newref = IR(ref); - if (ta == newref->op1 || aa_table(J, ta, newref->op1) != ALIAS_NO) - return 0; /* Conflict. */ - ref = newref->prev; - } - return fwd_aa_tab_clear(J, lim, ta); -} - -/* ASTORE/HSTORE elimination. */ -TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J) -{ - IRRef xref = fins->op1; /* xREF reference. */ - IRRef val = fins->op2; /* Stored value reference. */ - IRIns *xr = IR(xref); - IRRef1 *refp = &J->chain[fins->o]; - IRRef ref = *refp; - while (ref > xref) { /* Search for redundant or conflicting stores. */ - IRIns *store = IR(ref); - switch (aa_ahref(J, xr, IR(store->op1))) { - case ALIAS_NO: - break; /* Continue searching. */ - case ALIAS_MAY: /* Store to MAYBE the same location. */ - if (store->op2 != val) /* Conflict if the value is different. */ - goto doemit; - break; /* Otherwise continue searching. */ - case ALIAS_MUST: /* Store to the same location. */ - if (store->op2 == val) /* Same value: drop the new store. */ - return DROPFOLD; - /* Different value: try to eliminate the redundant store. */ - if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */ - IRIns *ir; - /* Check for any intervening guards (includes conflicting loads). */ - for (ir = IR(J->cur.nins-1); ir > store; ir--) - if (irt_isguard(ir->t) || ir->o == IR_CALLL) - goto doemit; /* No elimination possible. */ - /* Remove redundant store from chain and replace with NOP. */ - *refp = store->prev; - store->o = IR_NOP; - store->t.irt = IRT_NIL; - store->op1 = store->op2 = 0; - store->prev = 0; - /* Now emit the new store instead. */ - } - goto doemit; - } - ref = *(refp = &store->prev); - } -doemit: - return EMITFOLD; /* Otherwise we have a conflict or simply no match. */ -} - -/* -- ULOAD forwarding ---------------------------------------------------- */ - -/* The current alias analysis for upvalues is very simplistic. It only -** disambiguates between the unique upvalues of the same function. -** This is good enough for now, since most upvalues are read-only. -** -** A more precise analysis would be feasible with the help of the parser: -** generate a unique key for every upvalue, even across all prototypes. -** Lacking a realistic use-case, it's unclear whether this is beneficial. -*/ -static AliasRet aa_uref(IRIns *refa, IRIns *refb) -{ - if (refa->o != refb->o) - return ALIAS_NO; /* Different UREFx type. */ - if (refa->op1 == refb->op1) { /* Same function. */ - if (refa->op2 == refb->op2) - return ALIAS_MUST; /* Same function, same upvalue idx. */ - else - return ALIAS_NO; /* Same function, different upvalue idx. */ - } else { /* Different functions, check disambiguation hash values. */ - if (((refa->op2 ^ refb->op2) & 0xff)) - return ALIAS_NO; /* Upvalues with different hash values cannot alias. */ - else - return ALIAS_MAY; /* No conclusion can be drawn for same hash value. */ - } -} - -/* ULOAD forwarding. */ -TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J) -{ - IRRef uref = fins->op1; - IRRef lim = REF_BASE; /* Search limit. */ - IRIns *xr = IR(uref); - IRRef ref; - - /* Search for conflicting stores. */ - ref = J->chain[IR_USTORE]; - while (ref > lim) { - IRIns *store = IR(ref); - switch (aa_uref(xr, IR(store->op1))) { - case ALIAS_NO: break; /* Continue searching. */ - case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */ - case ALIAS_MUST: return store->op2; /* Store forwarding. */ - } - ref = store->prev; - } - -cselim: - /* Try to find a matching load. Below the conflicting store, if any. */ - - ref = J->chain[IR_ULOAD]; - while (ref > lim) { - IRIns *ir = IR(ref); - if (ir->op1 == uref || - (IR(ir->op1)->op12 == IR(uref)->op12 && IR(ir->op1)->o == IR(uref)->o)) - return ref; /* Match for identical or equal UREFx (non-CSEable UREFO). */ - ref = ir->prev; - } - return lj_ir_emit(J); -} - -/* USTORE elimination. */ -TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J) -{ - IRRef xref = fins->op1; /* xREF reference. */ - IRRef val = fins->op2; /* Stored value reference. */ - IRIns *xr = IR(xref); - IRRef1 *refp = &J->chain[IR_USTORE]; - IRRef ref = *refp; - while (ref > xref) { /* Search for redundant or conflicting stores. */ - IRIns *store = IR(ref); - switch (aa_uref(xr, IR(store->op1))) { - case ALIAS_NO: - break; /* Continue searching. */ - case ALIAS_MAY: /* Store to MAYBE the same location. */ - if (store->op2 != val) /* Conflict if the value is different. */ - goto doemit; - break; /* Otherwise continue searching. */ - case ALIAS_MUST: /* Store to the same location. */ - if (store->op2 == val) /* Same value: drop the new store. */ - return DROPFOLD; - /* Different value: try to eliminate the redundant store. */ - if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */ - IRIns *ir; - /* Check for any intervening guards (includes conflicting loads). */ - for (ir = IR(J->cur.nins-1); ir > store; ir--) - if (irt_isguard(ir->t)) - goto doemit; /* No elimination possible. */ - /* Remove redundant store from chain and replace with NOP. */ - *refp = store->prev; - store->o = IR_NOP; - store->t.irt = IRT_NIL; - store->op1 = store->op2 = 0; - store->prev = 0; - if (ref+1 < J->cur.nins && - store[1].o == IR_OBAR && store[1].op1 == xref) { - IRRef1 *bp = &J->chain[IR_OBAR]; - IRIns *obar; - for (obar = IR(*bp); *bp > ref+1; obar = IR(*bp)) - bp = &obar->prev; - /* Remove OBAR, too. */ - *bp = obar->prev; - obar->o = IR_NOP; - obar->t.irt = IRT_NIL; - obar->op1 = obar->op2 = 0; - obar->prev = 0; - } - /* Now emit the new store instead. */ - } - goto doemit; - } - ref = *(refp = &store->prev); - } -doemit: - return EMITFOLD; /* Otherwise we have a conflict or simply no match. */ -} - -/* -- FLOAD forwarding and FSTORE elimination ----------------------------- */ - -/* Alias analysis for field access. -** Field loads are cheap and field stores are rare. -** Simple disambiguation based on field types is good enough. -*/ -static AliasRet aa_fref(jit_State *J, IRIns *refa, IRIns *refb) -{ - if (refa->op2 != refb->op2) - return ALIAS_NO; /* Different fields. */ - if (refa->op1 == refb->op1) - return ALIAS_MUST; /* Same field, same object. */ - else if (refa->op2 >= IRFL_TAB_META && refa->op2 <= IRFL_TAB_NOMM) - return aa_table(J, refa->op1, refb->op1); /* Disambiguate tables. */ - else - return ALIAS_MAY; /* Same field, possibly different object. */ -} - -/* Only the loads for mutable fields end up here (see FOLD). */ -TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J) -{ - IRRef oref = fins->op1; /* Object reference. */ - IRRef fid = fins->op2; /* Field ID. */ - IRRef lim = oref; /* Search limit. */ - IRRef ref; - - /* Search for conflicting stores. */ - ref = J->chain[IR_FSTORE]; - while (ref > oref) { - IRIns *store = IR(ref); - switch (aa_fref(J, fins, IR(store->op1))) { - case ALIAS_NO: break; /* Continue searching. */ - case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */ - case ALIAS_MUST: return store->op2; /* Store forwarding. */ - } - ref = store->prev; - } - - /* No conflicting store: const-fold field loads from allocations. */ - if (fid == IRFL_TAB_META) { - IRIns *ir = IR(oref); - if (ir->o == IR_TNEW || ir->o == IR_TDUP) - return lj_ir_knull(J, IRT_TAB); - } - -cselim: - /* Try to find a matching load. Below the conflicting store, if any. */ - return lj_opt_cselim(J, lim); -} - -/* FSTORE elimination. */ -TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J) -{ - IRRef fref = fins->op1; /* FREF reference. */ - IRRef val = fins->op2; /* Stored value reference. */ - IRIns *xr = IR(fref); - IRRef1 *refp = &J->chain[IR_FSTORE]; - IRRef ref = *refp; - while (ref > fref) { /* Search for redundant or conflicting stores. */ - IRIns *store = IR(ref); - switch (aa_fref(J, xr, IR(store->op1))) { - case ALIAS_NO: - break; /* Continue searching. */ - case ALIAS_MAY: - if (store->op2 != val) /* Conflict if the value is different. */ - goto doemit; - break; /* Otherwise continue searching. */ - case ALIAS_MUST: - if (store->op2 == val) /* Same value: drop the new store. */ - return DROPFOLD; - /* Different value: try to eliminate the redundant store. */ - if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */ - IRIns *ir; - /* Check for any intervening guards or conflicting loads. */ - for (ir = IR(J->cur.nins-1); ir > store; ir--) - if (irt_isguard(ir->t) || (ir->o == IR_FLOAD && ir->op2 == xr->op2)) - goto doemit; /* No elimination possible. */ - /* Remove redundant store from chain and replace with NOP. */ - *refp = store->prev; - store->o = IR_NOP; - store->t.irt = IRT_NIL; - store->op1 = store->op2 = 0; - store->prev = 0; - /* Now emit the new store instead. */ - } - goto doemit; - } - ref = *(refp = &store->prev); - } -doemit: - return EMITFOLD; /* Otherwise we have a conflict or simply no match. */ -} - -/* -- XLOAD forwarding and XSTORE elimination ----------------------------- */ - -/* Find cdata allocation for a reference (if any). */ -static IRIns *aa_findcnew(jit_State *J, IRIns *ir) -{ - while (ir->o == IR_ADD) { - if (!irref_isk(ir->op1)) { - IRIns *ir1 = aa_findcnew(J, IR(ir->op1)); /* Left-recursion. */ - if (ir1) return ir1; - } - if (irref_isk(ir->op2)) return NULL; - ir = IR(ir->op2); /* Flatten right-recursion. */ - } - return ir->o == IR_CNEW ? ir : NULL; -} - -/* Alias analysis for two cdata allocations. */ -static AliasRet aa_cnew(jit_State *J, IRIns *refa, IRIns *refb) -{ - IRIns *cnewa = aa_findcnew(J, refa); - IRIns *cnewb = aa_findcnew(J, refb); - if (cnewa == cnewb) - return ALIAS_MAY; /* Same allocation or neither is an allocation. */ - if (cnewa && cnewb) - return ALIAS_NO; /* Two different allocations never alias. */ - if (cnewb) { cnewa = cnewb; refb = refa; } - return aa_escape(J, cnewa, refb); -} - -/* Alias analysis for XLOAD/XSTORE. */ -static AliasRet aa_xref(jit_State *J, IRIns *refa, IRIns *xa, IRIns *xb) -{ - ptrdiff_t ofsa = 0, ofsb = 0; - IRIns *refb = IR(xb->op1); - IRIns *basea = refa, *baseb = refb; - if (refa == refb && irt_sametype(xa->t, xb->t)) - return ALIAS_MUST; /* Shortcut for same refs with identical type. */ - /* Offset-based disambiguation. */ - if (refa->o == IR_ADD && irref_isk(refa->op2)) { - IRIns *irk = IR(refa->op2); - basea = IR(refa->op1); - ofsa = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 : - (ptrdiff_t)irk->i; - } - if (refb->o == IR_ADD && irref_isk(refb->op2)) { - IRIns *irk = IR(refb->op2); - baseb = IR(refb->op1); - ofsb = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 : - (ptrdiff_t)irk->i; - } - /* Treat constified pointers like base vs. base+offset. */ - if (basea->o == IR_KPTR && baseb->o == IR_KPTR) { - ofsb += (char *)ir_kptr(baseb) - (char *)ir_kptr(basea); - baseb = basea; - } - /* This implements (very) strict aliasing rules. - ** Different types do NOT alias, except for differences in signedness. - ** Type punning through unions is allowed (but forces a reload). - */ - if (basea == baseb) { - ptrdiff_t sza = irt_size(xa->t), szb = irt_size(xb->t); - if (ofsa == ofsb) { - if (sza == szb && irt_isfp(xa->t) == irt_isfp(xb->t)) - return ALIAS_MUST; /* Same-sized, same-kind. May need to convert. */ - } else if (ofsa + sza <= ofsb || ofsb + szb <= ofsa) { - return ALIAS_NO; /* Non-overlapping base+-o1 vs. base+-o2. */ - } - /* NYI: extract, extend or reinterpret bits (int <-> fp). */ - return ALIAS_MAY; /* Overlapping or type punning: force reload. */ - } - if (!irt_sametype(xa->t, xb->t) && - !(irt_typerange(xa->t, IRT_I8, IRT_U64) && - ((xa->t.irt - IRT_I8) ^ (xb->t.irt - IRT_I8)) == 1)) - return ALIAS_NO; - /* NYI: structural disambiguation. */ - return aa_cnew(J, basea, baseb); /* Try to disambiguate allocations. */ -} - -/* Return CSEd reference or 0. Caveat: swaps lower ref to the right! */ -static IRRef reassoc_trycse(jit_State *J, IROp op, IRRef op1, IRRef op2) -{ - IRRef ref = J->chain[op]; - IRRef lim = op1; - if (op2 > lim) { lim = op2; op2 = op1; op1 = lim; } - while (ref > lim) { - IRIns *ir = IR(ref); - if (ir->op1 == op1 && ir->op2 == op2) - return ref; - ref = ir->prev; - } - return 0; -} - -/* Reassociate index references. */ -static IRRef reassoc_xref(jit_State *J, IRIns *ir) -{ - ptrdiff_t ofs = 0; - if (ir->o == IR_ADD && irref_isk(ir->op2)) { /* Get constant offset. */ - IRIns *irk = IR(ir->op2); - ofs = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 : - (ptrdiff_t)irk->i; - ir = IR(ir->op1); - } - if (ir->o == IR_ADD) { /* Add of base + index. */ - /* Index ref > base ref for loop-carried dependences. Only check op1. */ - IRIns *ir2, *ir1 = IR(ir->op1); - int32_t shift = 0; - IRRef idxref; - /* Determine index shifts. Don't bother with IR_MUL here. */ - if (ir1->o == IR_BSHL && irref_isk(ir1->op2)) - shift = IR(ir1->op2)->i; - else if (ir1->o == IR_ADD && ir1->op1 == ir1->op2) - shift = 1; - else - ir1 = ir; - ir2 = IR(ir1->op1); - /* A non-reassociated add. Must be a loop-carried dependence. */ - if (ir2->o == IR_ADD && irt_isint(ir2->t) && irref_isk(ir2->op2)) - ofs += (ptrdiff_t)IR(ir2->op2)->i << shift; - else - return 0; - idxref = ir2->op1; - /* Try to CSE the reassociated chain. Give up if not found. */ - if (ir1 != ir && - !(idxref = reassoc_trycse(J, ir1->o, idxref, - ir1->o == IR_BSHL ? ir1->op2 : idxref))) - return 0; - if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, ir->op2))) - return 0; - if (ofs != 0) { - IRRef refk = tref_ref(lj_ir_kintp(J, ofs)); - if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, refk))) - return 0; - } - return idxref; /* Success, found a reassociated index reference. Phew. */ - } - return 0; /* Failure. */ -} - -/* XLOAD forwarding. */ -TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J) -{ - IRRef xref = fins->op1; - IRIns *xr = IR(xref); - IRRef lim = xref; /* Search limit. */ - IRRef ref; - - if ((fins->op2 & IRXLOAD_READONLY)) - goto cselim; - if ((fins->op2 & IRXLOAD_VOLATILE)) - goto doemit; - - /* Search for conflicting stores. */ - ref = J->chain[IR_XSTORE]; -retry: - if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS]; - if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR]; - while (ref > lim) { - IRIns *store = IR(ref); - switch (aa_xref(J, xr, fins, store)) { - case ALIAS_NO: break; /* Continue searching. */ - case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */ - case ALIAS_MUST: - /* Emit conversion if the loaded type doesn't match the forwarded type. */ - if (!irt_sametype(fins->t, IR(store->op2)->t)) { - IRType dt = irt_type(fins->t), st = irt_type(IR(store->op2)->t); - if (dt == IRT_I8 || dt == IRT_I16) { /* Trunc + sign-extend. */ - st = dt | IRCONV_SEXT; - dt = IRT_INT; - } else if (dt == IRT_U8 || dt == IRT_U16) { /* Trunc + zero-extend. */ - st = dt; - dt = IRT_INT; - } - fins->ot = IRT(IR_CONV, dt); - fins->op1 = store->op2; - fins->op2 = (dt<<5)|st; - return RETRYFOLD; - } - return store->op2; /* Store forwarding. */ - } - ref = store->prev; - } - -cselim: - /* Try to find a matching load. Below the conflicting store, if any. */ - ref = J->chain[IR_XLOAD]; - while (ref > lim) { - /* CSE for XLOAD depends on the type, but not on the IRXLOAD_* flags. */ - if (IR(ref)->op1 == xref && irt_sametype(IR(ref)->t, fins->t)) - return ref; - ref = IR(ref)->prev; - } - - /* Reassociate XLOAD across PHIs to handle a[i-1] forwarding case. */ - if (!(fins->op2 & IRXLOAD_READONLY) && J->chain[IR_LOOP] && - xref == fins->op1 && (xref = reassoc_xref(J, xr)) != 0) { - ref = J->chain[IR_XSTORE]; - while (ref > lim) /* Skip stores that have already been checked. */ - ref = IR(ref)->prev; - lim = xref; - xr = IR(xref); - goto retry; /* Retry with the reassociated reference. */ - } -doemit: - return EMITFOLD; -} - -/* XSTORE elimination. */ -TRef LJ_FASTCALL lj_opt_dse_xstore(jit_State *J) -{ - IRRef xref = fins->op1; - IRIns *xr = IR(xref); - IRRef lim = xref; /* Search limit. */ - IRRef val = fins->op2; /* Stored value reference. */ - IRRef1 *refp = &J->chain[IR_XSTORE]; - IRRef ref = *refp; - if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS]; - if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR]; - if (J->chain[IR_XSNEW] > lim) lim = J->chain[IR_XSNEW]; - while (ref > lim) { /* Search for redundant or conflicting stores. */ - IRIns *store = IR(ref); - switch (aa_xref(J, xr, fins, store)) { - case ALIAS_NO: - break; /* Continue searching. */ - case ALIAS_MAY: - if (store->op2 != val) /* Conflict if the value is different. */ - goto doemit; - break; /* Otherwise continue searching. */ - case ALIAS_MUST: - if (store->op2 == val) /* Same value: drop the new store. */ - return DROPFOLD; - /* Different value: try to eliminate the redundant store. */ - if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */ - IRIns *ir; - /* Check for any intervening guards or any XLOADs (no AA performed). */ - for (ir = IR(J->cur.nins-1); ir > store; ir--) - if (irt_isguard(ir->t) || ir->o == IR_XLOAD) - goto doemit; /* No elimination possible. */ - /* Remove redundant store from chain and replace with NOP. */ - *refp = store->prev; - store->o = IR_NOP; - store->t.irt = IRT_NIL; - store->op1 = store->op2 = 0; - store->prev = 0; - /* Now emit the new store instead. */ - } - goto doemit; - } - ref = *(refp = &store->prev); - } -doemit: - return EMITFOLD; /* Otherwise we have a conflict or simply no match. */ -} - -/* -- Forwarding of lj_tab_len -------------------------------------------- */ - -/* This is rather simplistic right now, but better than nothing. */ -TRef LJ_FASTCALL lj_opt_fwd_tab_len(jit_State *J) -{ - IRRef tab = fins->op1; /* Table reference. */ - IRRef lim = tab; /* Search limit. */ - IRRef ref; - - /* Any ASTORE is a conflict and limits the search. */ - if (J->chain[IR_ASTORE] > lim) lim = J->chain[IR_ASTORE]; - - /* Search for conflicting HSTORE with numeric key. */ - ref = J->chain[IR_HSTORE]; - while (ref > lim) { - IRIns *store = IR(ref); - IRIns *href = IR(store->op1); - IRIns *key = IR(href->op2); - if (irt_isnum(key->o == IR_KSLOT ? IR(key->op1)->t : key->t)) { - lim = ref; /* Conflicting store found, limits search for TLEN. */ - break; - } - ref = store->prev; - } - - /* Search for aliasing table.clear. */ - if (!fwd_aa_tab_clear(J, lim, tab)) - return lj_ir_emit(J); - - /* Try to find a matching load. Below the conflicting store, if any. */ - return lj_opt_cselim(J, lim); -} - -/* -- ASTORE/HSTORE previous type analysis -------------------------------- */ - -/* Check whether the previous value for a table store is non-nil. -** This can be derived either from a previous store or from a previous -** load (because all loads from tables perform a type check). -** -** The result of the analysis can be used to avoid the metatable check -** and the guard against HREF returning niltv. Both of these are cheap, -** so let's not spend too much effort on the analysis. -** -** A result of 1 is exact: previous value CANNOT be nil. -** A result of 0 is inexact: previous value MAY be nil. -*/ -int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref) -{ - /* First check stores. */ - IRRef ref = J->chain[loadop+IRDELTA_L2S]; - while (ref > xref) { - IRIns *store = IR(ref); - if (store->op1 == xref) { /* Same xREF. */ - /* A nil store MAY alias, but a non-nil store MUST alias. */ - return !irt_isnil(store->t); - } else if (irt_isnil(store->t)) { /* Must check any nil store. */ - IRRef skref = IR(store->op1)->op2; - IRRef xkref = IR(xref)->op2; - /* Same key type MAY alias. Need ALOAD check due to multiple int types. */ - if (loadop == IR_ALOAD || irt_sametype(IR(skref)->t, IR(xkref)->t)) { - if (skref == xkref || !irref_isk(skref) || !irref_isk(xkref)) - return 0; /* A nil store with same const key or var key MAY alias. */ - /* Different const keys CANNOT alias. */ - } /* Different key types CANNOT alias. */ - } /* Other non-nil stores MAY alias. */ - ref = store->prev; - } - - /* Check loads since nothing could be derived from stores. */ - ref = J->chain[loadop]; - while (ref > xref) { - IRIns *load = IR(ref); - if (load->op1 == xref) { /* Same xREF. */ - /* A nil load MAY alias, but a non-nil load MUST alias. */ - return !irt_isnil(load->t); - } /* Other non-nil loads MAY alias. */ - ref = load->prev; - } - return 0; /* Nothing derived at all, previous value MAY be nil. */ -} - -/* ------------------------------------------------------------------------ */ - -#undef IR -#undef fins -#undef fleft -#undef fright - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_narrow.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_narrow.c deleted file mode 100644 index d1993452a6a..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_narrow.c +++ /dev/null @@ -1,652 +0,0 @@ -/* -** NARROW: Narrowing of numbers to integers (double to int32_t). -** STRIPOV: Stripping of overflow checks. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_opt_narrow_c -#define LUA_CORE - -#include "lj_obj.h" - -#if LJ_HASJIT - -#include "lj_bc.h" -#include "lj_ir.h" -#include "lj_jit.h" -#include "lj_iropt.h" -#include "lj_trace.h" -#include "lj_vm.h" -#include "lj_strscan.h" - -/* Rationale for narrowing optimizations: -** -** Lua has only a single number type and this is a FP double by default. -** Narrowing doubles to integers does not pay off for the interpreter on a -** current-generation x86/x64 machine. Most FP operations need the same -** amount of execution resources as their integer counterparts, except -** with slightly longer latencies. Longer latencies are a non-issue for -** the interpreter, since they are usually hidden by other overhead. -** -** The total CPU execution bandwidth is the sum of the bandwidth of the FP -** and the integer units, because they execute in parallel. The FP units -** have an equal or higher bandwidth than the integer units. Not using -** them means losing execution bandwidth. Moving work away from them to -** the already quite busy integer units is a losing proposition. -** -** The situation for JIT-compiled code is a bit different: the higher code -** density makes the extra latencies much more visible. Tight loops expose -** the latencies for updating the induction variables. Array indexing -** requires narrowing conversions with high latencies and additional -** guards (to check that the index is really an integer). And many common -** optimizations only work on integers. -** -** One solution would be speculative, eager narrowing of all number loads. -** This causes many problems, like losing -0 or the need to resolve type -** mismatches between traces. It also effectively forces the integer type -** to have overflow-checking semantics. This impedes many basic -** optimizations and requires adding overflow checks to all integer -** arithmetic operations (whereas FP arithmetics can do without). -** -** Always replacing an FP op with an integer op plus an overflow check is -** counter-productive on a current-generation super-scalar CPU. Although -** the overflow check branches are highly predictable, they will clog the -** execution port for the branch unit and tie up reorder buffers. This is -** turning a pure data-flow dependency into a different data-flow -** dependency (with slightly lower latency) *plus* a control dependency. -** In general, you don't want to do this since latencies due to data-flow -** dependencies can be well hidden by out-of-order execution. -** -** A better solution is to keep all numbers as FP values and only narrow -** when it's beneficial to do so. LuaJIT uses predictive narrowing for -** induction variables and demand-driven narrowing for index expressions, -** integer arguments and bit operations. Additionally it can eliminate or -** hoist most of the resulting overflow checks. Regular arithmetic -** computations are never narrowed to integers. -** -** The integer type in the IR has convenient wrap-around semantics and -** ignores overflow. Extra operations have been added for -** overflow-checking arithmetic (ADDOV/SUBOV) instead of an extra type. -** Apart from reducing overall complexity of the compiler, this also -** nicely solves the problem where you want to apply algebraic -** simplifications to ADD, but not to ADDOV. And the x86/x64 assembler can -** use lea instead of an add for integer ADD, but not for ADDOV (lea does -** not affect the flags, but it helps to avoid register moves). -** -** -** All of the above has to be reconsidered for architectures with slow FP -** operations or without a hardware FPU. The dual-number mode of LuaJIT -** addresses this issue. Arithmetic operations are performed on integers -** as far as possible and overflow checks are added as needed. -** -** This implies that narrowing for integer arguments and bit operations -** should also strip overflow checks, e.g. replace ADDOV with ADD. The -** original overflow guards are weak and can be eliminated by DCE, if -** there's no other use. -** -** A slight twist is that it's usually beneficial to use overflow-checked -** integer arithmetics if all inputs are already integers. This is the only -** change that affects the single-number mode, too. -*/ - -/* Some local macros to save typing. Undef'd at the end. */ -#define IR(ref) (&J->cur.ir[(ref)]) -#define fins (&J->fold.ins) - -/* Pass IR on to next optimization in chain (FOLD). */ -#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) - -#define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J)) - -/* -- Elimination of narrowing type conversions --------------------------- */ - -/* Narrowing of index expressions and bit operations is demand-driven. The -** trace recorder emits a narrowing type conversion (CONV.int.num or TOBIT) -** in all of these cases (e.g. array indexing or string indexing). FOLD -** already takes care of eliminating simple redundant conversions like -** CONV.int.num(CONV.num.int(x)) ==> x. -** -** But the surrounding code is FP-heavy and arithmetic operations are -** performed on FP numbers (for the single-number mode). Consider a common -** example such as 'x=t[i+1]', with 'i' already an integer (due to induction -** variable narrowing). The index expression would be recorded as -** CONV.int.num(ADD(CONV.num.int(i), 1)) -** which is clearly suboptimal. -** -** One can do better by recursively backpropagating the narrowing type -** conversion across FP arithmetic operations. This turns FP ops into -** their corresponding integer counterparts. Depending on the semantics of -** the conversion they also need to check for overflow. Currently only ADD -** and SUB are supported. -** -** The above example can be rewritten as -** ADDOV(CONV.int.num(CONV.num.int(i)), 1) -** and then into ADDOV(i, 1) after folding of the conversions. The original -** FP ops remain in the IR and are eliminated by DCE since all references to -** them are gone. -** -** [In dual-number mode the trace recorder already emits ADDOV etc., but -** this can be further reduced. See below.] -** -** Special care has to be taken to avoid narrowing across an operation -** which is potentially operating on non-integral operands. One obvious -** case is when an expression contains a non-integral constant, but ends -** up as an integer index at runtime (like t[x+1.5] with x=0.5). -** -** Operations with two non-constant operands illustrate a similar problem -** (like t[a+b] with a=1.5 and b=2.5). Backpropagation has to stop there, -** unless it can be proven that either operand is integral (e.g. by CSEing -** a previous conversion). As a not-so-obvious corollary this logic also -** applies for a whole expression tree (e.g. t[(a+1)+(b+1)]). -** -** Correctness of the transformation is guaranteed by avoiding to expand -** the tree by adding more conversions than the one we would need to emit -** if not backpropagating. TOBIT employs a more optimistic rule, because -** the conversion has special semantics, designed to make the life of the -** compiler writer easier. ;-) -** -** Using on-the-fly backpropagation of an expression tree doesn't work -** because it's unknown whether the transform is correct until the end. -** This either requires IR rollback and cache invalidation for every -** subtree or a two-pass algorithm. The former didn't work out too well, -** so the code now combines a recursive collector with a stack-based -** emitter. -** -** [A recursive backpropagation algorithm with backtracking, employing -** skip-list lookup and round-robin caching, emitting stack operations -** on-the-fly for a stack-based interpreter -- and all of that in a meager -** kilobyte? Yep, compilers are a great treasure chest. Throw away your -** textbooks and read the codebase of a compiler today!] -** -** There's another optimization opportunity for array indexing: it's -** always accompanied by an array bounds-check. The outermost overflow -** check may be delegated to the ABC operation. This works because ABC is -** an unsigned comparison and wrap-around due to overflow creates negative -** numbers. -** -** But this optimization is only valid for constants that cannot overflow -** an int32_t into the range of valid array indexes [0..2^27+1). A check -** for +-2^30 is safe since -2^31 - 2^30 wraps to 2^30 and 2^31-1 + 2^30 -** wraps to -2^30-1. -** -** It's also good enough in practice, since e.g. t[i+1] or t[i-10] are -** quite common. So the above example finally ends up as ADD(i, 1)! -** -** Later on, the assembler is able to fuse the whole array reference and -** the ADD into the memory operands of loads and other instructions. This -** is why LuaJIT is able to generate very pretty (and fast) machine code -** for array indexing. And that, my dear, concludes another story about -** one of the hidden secrets of LuaJIT ... -*/ - -/* Maximum backpropagation depth and maximum stack size. */ -#define NARROW_MAX_BACKPROP 100 -#define NARROW_MAX_STACK 256 - -/* The stack machine has a 32 bit instruction format: [IROpT | IRRef1] -** The lower 16 bits hold a reference (or 0). The upper 16 bits hold -** the IR opcode + type or one of the following special opcodes: -*/ -enum { - NARROW_REF, /* Push ref. */ - NARROW_CONV, /* Push conversion of ref. */ - NARROW_SEXT, /* Push sign-extension of ref. */ - NARROW_INT /* Push KINT ref. The next code holds an int32_t. */ -}; - -typedef uint32_t NarrowIns; - -#define NARROWINS(op, ref) (((op) << 16) + (ref)) -#define narrow_op(ins) ((IROpT)((ins) >> 16)) -#define narrow_ref(ins) ((IRRef1)(ins)) - -/* Context used for narrowing of type conversions. */ -typedef struct NarrowConv { - jit_State *J; /* JIT compiler state. */ - NarrowIns *sp; /* Current stack pointer. */ - NarrowIns *maxsp; /* Maximum stack pointer minus redzone. */ - IRRef mode; /* Conversion mode (IRCONV_*). */ - IRType t; /* Destination type: IRT_INT or IRT_I64. */ - NarrowIns stack[NARROW_MAX_STACK]; /* Stack holding stack-machine code. */ -} NarrowConv; - -/* Lookup a reference in the backpropagation cache. */ -static BPropEntry *narrow_bpc_get(jit_State *J, IRRef1 key, IRRef mode) -{ - ptrdiff_t i; - for (i = 0; i < BPROP_SLOTS; i++) { - BPropEntry *bp = &J->bpropcache[i]; - /* Stronger checks are ok, too. */ - if (bp->key == key && bp->mode >= mode && - ((bp->mode ^ mode) & IRCONV_MODEMASK) == 0) - return bp; - } - return NULL; -} - -/* Add an entry to the backpropagation cache. */ -static void narrow_bpc_set(jit_State *J, IRRef1 key, IRRef1 val, IRRef mode) -{ - uint32_t slot = J->bpropslot; - BPropEntry *bp = &J->bpropcache[slot]; - J->bpropslot = (slot + 1) & (BPROP_SLOTS-1); - bp->key = key; - bp->val = val; - bp->mode = mode; -} - -/* Backpropagate overflow stripping. */ -static void narrow_stripov_backprop(NarrowConv *nc, IRRef ref, int depth) -{ - jit_State *J = nc->J; - IRIns *ir = IR(ref); - if (ir->o == IR_ADDOV || ir->o == IR_SUBOV || - (ir->o == IR_MULOV && (nc->mode & IRCONV_CONVMASK) == IRCONV_ANY)) { - BPropEntry *bp = narrow_bpc_get(nc->J, ref, IRCONV_TOBIT); - if (bp) { - ref = bp->val; - } else if (++depth < NARROW_MAX_BACKPROP && nc->sp < nc->maxsp) { - NarrowIns *savesp = nc->sp; - narrow_stripov_backprop(nc, ir->op1, depth); - if (nc->sp < nc->maxsp) { - narrow_stripov_backprop(nc, ir->op2, depth); - if (nc->sp < nc->maxsp) { - *nc->sp++ = NARROWINS(IRT(ir->o - IR_ADDOV + IR_ADD, IRT_INT), ref); - return; - } - } - nc->sp = savesp; /* Path too deep, need to backtrack. */ - } - } - *nc->sp++ = NARROWINS(NARROW_REF, ref); -} - -/* Backpropagate narrowing conversion. Return number of needed conversions. */ -static int narrow_conv_backprop(NarrowConv *nc, IRRef ref, int depth) -{ - jit_State *J = nc->J; - IRIns *ir = IR(ref); - IRRef cref; - - if (nc->sp >= nc->maxsp) return 10; /* Path too deep. */ - - /* Check the easy cases first. */ - if (ir->o == IR_CONV && (ir->op2 & IRCONV_SRCMASK) == IRT_INT) { - if ((nc->mode & IRCONV_CONVMASK) <= IRCONV_ANY) - narrow_stripov_backprop(nc, ir->op1, depth+1); - else - *nc->sp++ = NARROWINS(NARROW_REF, ir->op1); /* Undo conversion. */ - if (nc->t == IRT_I64) - *nc->sp++ = NARROWINS(NARROW_SEXT, 0); /* Sign-extend integer. */ - return 0; - } else if (ir->o == IR_KNUM) { /* Narrow FP constant. */ - lua_Number n = ir_knum(ir)->n; - if ((nc->mode & IRCONV_CONVMASK) == IRCONV_TOBIT) { - /* Allows a wider range of constants. */ - int64_t k64 = (int64_t)n; - if (n == (lua_Number)k64) { /* Only if const doesn't lose precision. */ - *nc->sp++ = NARROWINS(NARROW_INT, 0); - *nc->sp++ = (NarrowIns)k64; /* But always truncate to 32 bits. */ - return 0; - } - } else { - int32_t k = lj_num2int(n); - /* Only if constant is a small integer. */ - if (checki16(k) && n == (lua_Number)k) { - *nc->sp++ = NARROWINS(NARROW_INT, 0); - *nc->sp++ = (NarrowIns)k; - return 0; - } - } - return 10; /* Never narrow other FP constants (this is rare). */ - } - - /* Try to CSE the conversion. Stronger checks are ok, too. */ - cref = J->chain[fins->o]; - while (cref > ref) { - IRIns *cr = IR(cref); - if (cr->op1 == ref && - (fins->o == IR_TOBIT || - ((cr->op2 & IRCONV_MODEMASK) == (nc->mode & IRCONV_MODEMASK) && - irt_isguard(cr->t) >= irt_isguard(fins->t)))) { - *nc->sp++ = NARROWINS(NARROW_REF, cref); - return 0; /* Already there, no additional conversion needed. */ - } - cref = cr->prev; - } - - /* Backpropagate across ADD/SUB. */ - if (ir->o == IR_ADD || ir->o == IR_SUB) { - /* Try cache lookup first. */ - IRRef mode = nc->mode; - BPropEntry *bp; - /* Inner conversions need a stronger check. */ - if ((mode & IRCONV_CONVMASK) == IRCONV_INDEX && depth > 0) - mode += IRCONV_CHECK-IRCONV_INDEX; - bp = narrow_bpc_get(nc->J, (IRRef1)ref, mode); - if (bp) { - *nc->sp++ = NARROWINS(NARROW_REF, bp->val); - return 0; - } else if (nc->t == IRT_I64) { - /* Try sign-extending from an existing (checked) conversion to int. */ - mode = (IRT_INT<<5)|IRT_NUM|IRCONV_INDEX; - bp = narrow_bpc_get(nc->J, (IRRef1)ref, mode); - if (bp) { - *nc->sp++ = NARROWINS(NARROW_REF, bp->val); - *nc->sp++ = NARROWINS(NARROW_SEXT, 0); - return 0; - } - } - if (++depth < NARROW_MAX_BACKPROP && nc->sp < nc->maxsp) { - NarrowIns *savesp = nc->sp; - int count = narrow_conv_backprop(nc, ir->op1, depth); - count += narrow_conv_backprop(nc, ir->op2, depth); - if (count <= 1) { /* Limit total number of conversions. */ - *nc->sp++ = NARROWINS(IRT(ir->o, nc->t), ref); - return count; - } - nc->sp = savesp; /* Too many conversions, need to backtrack. */ - } - } - - /* Otherwise add a conversion. */ - *nc->sp++ = NARROWINS(NARROW_CONV, ref); - return 1; -} - -/* Emit the conversions collected during backpropagation. */ -static IRRef narrow_conv_emit(jit_State *J, NarrowConv *nc) -{ - /* The fins fields must be saved now -- emitir() overwrites them. */ - IROpT guardot = irt_isguard(fins->t) ? IRTG(IR_ADDOV-IR_ADD, 0) : 0; - IROpT convot = fins->ot; - IRRef1 convop2 = fins->op2; - NarrowIns *next = nc->stack; /* List of instructions from backpropagation. */ - NarrowIns *last = nc->sp; - NarrowIns *sp = nc->stack; /* Recycle the stack to store operands. */ - while (next < last) { /* Simple stack machine to process the ins. list. */ - NarrowIns ref = *next++; - IROpT op = narrow_op(ref); - if (op == NARROW_REF) { - *sp++ = ref; - } else if (op == NARROW_CONV) { - *sp++ = emitir_raw(convot, ref, convop2); /* Raw emit avoids a loop. */ - } else if (op == NARROW_SEXT) { - lua_assert(sp >= nc->stack+1); - sp[-1] = emitir(IRT(IR_CONV, IRT_I64), sp[-1], - (IRT_I64<<5)|IRT_INT|IRCONV_SEXT); - } else if (op == NARROW_INT) { - lua_assert(next < last); - *sp++ = nc->t == IRT_I64 ? - lj_ir_kint64(J, (int64_t)(int32_t)*next++) : - lj_ir_kint(J, *next++); - } else { /* Regular IROpT. Pops two operands and pushes one result. */ - IRRef mode = nc->mode; - lua_assert(sp >= nc->stack+2); - sp--; - /* Omit some overflow checks for array indexing. See comments above. */ - if ((mode & IRCONV_CONVMASK) == IRCONV_INDEX) { - if (next == last && irref_isk(narrow_ref(sp[0])) && - (uint32_t)IR(narrow_ref(sp[0]))->i + 0x40000000u < 0x80000000u) - guardot = 0; - else /* Otherwise cache a stronger check. */ - mode += IRCONV_CHECK-IRCONV_INDEX; - } - sp[-1] = emitir(op+guardot, sp[-1], sp[0]); - /* Add to cache. */ - if (narrow_ref(ref)) - narrow_bpc_set(J, narrow_ref(ref), narrow_ref(sp[-1]), mode); - } - } - lua_assert(sp == nc->stack+1); - return nc->stack[0]; -} - -/* Narrow a type conversion of an arithmetic operation. */ -TRef LJ_FASTCALL lj_opt_narrow_convert(jit_State *J) -{ - if ((J->flags & JIT_F_OPT_NARROW)) { - NarrowConv nc; - nc.J = J; - nc.sp = nc.stack; - nc.maxsp = &nc.stack[NARROW_MAX_STACK-4]; - nc.t = irt_type(fins->t); - if (fins->o == IR_TOBIT) { - nc.mode = IRCONV_TOBIT; /* Used only in the backpropagation cache. */ - } else { - nc.mode = fins->op2; - } - if (narrow_conv_backprop(&nc, fins->op1, 0) <= 1) - return narrow_conv_emit(J, &nc); - } - return NEXTFOLD; -} - -/* -- Narrowing of implicit conversions ----------------------------------- */ - -/* Recursively strip overflow checks. */ -static TRef narrow_stripov(jit_State *J, TRef tr, int lastop, IRRef mode) -{ - IRRef ref = tref_ref(tr); - IRIns *ir = IR(ref); - int op = ir->o; - if (op >= IR_ADDOV && op <= lastop) { - BPropEntry *bp = narrow_bpc_get(J, ref, mode); - if (bp) { - return TREF(bp->val, irt_t(IR(bp->val)->t)); - } else { - IRRef op1 = ir->op1, op2 = ir->op2; /* The IR may be reallocated. */ - op1 = narrow_stripov(J, op1, lastop, mode); - op2 = narrow_stripov(J, op2, lastop, mode); - tr = emitir(IRT(op - IR_ADDOV + IR_ADD, - ((mode & IRCONV_DSTMASK) >> IRCONV_DSH)), op1, op2); - narrow_bpc_set(J, ref, tref_ref(tr), mode); - } - } else if (LJ_64 && (mode & IRCONV_SEXT) && !irt_is64(ir->t)) { - tr = emitir(IRT(IR_CONV, IRT_INTP), tr, mode); - } - return tr; -} - -/* Narrow array index. */ -TRef LJ_FASTCALL lj_opt_narrow_index(jit_State *J, TRef tr) -{ - IRIns *ir; - lua_assert(tref_isnumber(tr)); - if (tref_isnum(tr)) /* Conversion may be narrowed, too. See above. */ - return emitir(IRTGI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_INDEX); - /* Omit some overflow checks for array indexing. See comments above. */ - ir = IR(tref_ref(tr)); - if ((ir->o == IR_ADDOV || ir->o == IR_SUBOV) && irref_isk(ir->op2) && - (uint32_t)IR(ir->op2)->i + 0x40000000u < 0x80000000u) - return emitir(IRTI(ir->o - IR_ADDOV + IR_ADD), ir->op1, ir->op2); - return tr; -} - -/* Narrow conversion to integer operand (overflow undefined). */ -TRef LJ_FASTCALL lj_opt_narrow_toint(jit_State *J, TRef tr) -{ - if (tref_isstr(tr)) - tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0); - if (tref_isnum(tr)) /* Conversion may be narrowed, too. See above. */ - return emitir(IRTI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_ANY); - if (!tref_isinteger(tr)) - lj_trace_err(J, LJ_TRERR_BADTYPE); - /* - ** Undefined overflow semantics allow stripping of ADDOV, SUBOV and MULOV. - ** Use IRCONV_TOBIT for the cache entries, since the semantics are the same. - */ - return narrow_stripov(J, tr, IR_MULOV, (IRT_INT<<5)|IRT_INT|IRCONV_TOBIT); -} - -/* Narrow conversion to bitop operand (overflow wrapped). */ -TRef LJ_FASTCALL lj_opt_narrow_tobit(jit_State *J, TRef tr) -{ - if (tref_isstr(tr)) - tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0); - if (tref_isnum(tr)) /* Conversion may be narrowed, too. See above. */ - return emitir(IRTI(IR_TOBIT), tr, lj_ir_knum_tobit(J)); - if (!tref_isinteger(tr)) - lj_trace_err(J, LJ_TRERR_BADTYPE); - /* - ** Wrapped overflow semantics allow stripping of ADDOV and SUBOV. - ** MULOV cannot be stripped due to precision widening. - */ - return narrow_stripov(J, tr, IR_SUBOV, (IRT_INT<<5)|IRT_INT|IRCONV_TOBIT); -} - -#if LJ_HASFFI -/* Narrow C array index (overflow undefined). */ -TRef LJ_FASTCALL lj_opt_narrow_cindex(jit_State *J, TRef tr) -{ - lua_assert(tref_isnumber(tr)); - if (tref_isnum(tr)) - return emitir(IRT(IR_CONV, IRT_INTP), tr, (IRT_INTP<<5)|IRT_NUM|IRCONV_ANY); - /* Undefined overflow semantics allow stripping of ADDOV, SUBOV and MULOV. */ - return narrow_stripov(J, tr, IR_MULOV, - LJ_64 ? ((IRT_INTP<<5)|IRT_INT|IRCONV_SEXT) : - ((IRT_INTP<<5)|IRT_INT|IRCONV_TOBIT)); -} -#endif - -/* -- Narrowing of arithmetic operators ----------------------------------- */ - -/* Check whether a number fits into an int32_t (-0 is ok, too). */ -static int numisint(lua_Number n) -{ - return (n == (lua_Number)lj_num2int(n)); -} - -/* Narrowing of arithmetic operations. */ -TRef lj_opt_narrow_arith(jit_State *J, TRef rb, TRef rc, - TValue *vb, TValue *vc, IROp op) -{ - if (tref_isstr(rb)) { - rb = emitir(IRTG(IR_STRTO, IRT_NUM), rb, 0); - lj_strscan_num(strV(vb), vb); - } - if (tref_isstr(rc)) { - rc = emitir(IRTG(IR_STRTO, IRT_NUM), rc, 0); - lj_strscan_num(strV(vc), vc); - } - /* Must not narrow MUL in non-DUALNUM variant, because it loses -0. */ - if ((op >= IR_ADD && op <= (LJ_DUALNUM ? IR_MUL : IR_SUB)) && - tref_isinteger(rb) && tref_isinteger(rc) && - numisint(lj_vm_foldarith(numberVnum(vb), numberVnum(vc), - (int)op - (int)IR_ADD))) - return emitir(IRTGI((int)op - (int)IR_ADD + (int)IR_ADDOV), rb, rc); - if (!tref_isnum(rb)) rb = emitir(IRTN(IR_CONV), rb, IRCONV_NUM_INT); - if (!tref_isnum(rc)) rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT); - return emitir(IRTN(op), rb, rc); -} - -/* Narrowing of unary minus operator. */ -TRef lj_opt_narrow_unm(jit_State *J, TRef rc, TValue *vc) -{ - if (tref_isstr(rc)) { - rc = emitir(IRTG(IR_STRTO, IRT_NUM), rc, 0); - lj_strscan_num(strV(vc), vc); - } - if (tref_isinteger(rc)) { - if ((uint32_t)numberVint(vc) != 0x80000000u) - return emitir(IRTGI(IR_SUBOV), lj_ir_kint(J, 0), rc); - rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT); - } - return emitir(IRTN(IR_NEG), rc, lj_ir_knum_neg(J)); -} - -/* Narrowing of modulo operator. */ -TRef lj_opt_narrow_mod(jit_State *J, TRef rb, TRef rc, TValue *vc) -{ - TRef tmp; - if (tvisstr(vc) && !lj_strscan_num(strV(vc), vc)) - lj_trace_err(J, LJ_TRERR_BADTYPE); - if ((LJ_DUALNUM || (J->flags & JIT_F_OPT_NARROW)) && - tref_isinteger(rb) && tref_isinteger(rc) && - (tvisint(vc) ? intV(vc) != 0 : !tviszero(vc))) { - emitir(IRTGI(IR_NE), rc, lj_ir_kint(J, 0)); - return emitir(IRTI(IR_MOD), rb, rc); - } - /* b % c ==> b - floor(b/c)*c */ - rb = lj_ir_tonum(J, rb); - rc = lj_ir_tonum(J, rc); - tmp = emitir(IRTN(IR_DIV), rb, rc); - tmp = emitir(IRTN(IR_FPMATH), tmp, IRFPM_FLOOR); - tmp = emitir(IRTN(IR_MUL), tmp, rc); - return emitir(IRTN(IR_SUB), rb, tmp); -} - -/* Narrowing of power operator or math.pow. */ -TRef lj_opt_narrow_pow(jit_State *J, TRef rb, TRef rc, TValue *vc) -{ - if (tvisstr(vc) && !lj_strscan_num(strV(vc), vc)) - lj_trace_err(J, LJ_TRERR_BADTYPE); - /* Narrowing must be unconditional to preserve (-x)^i semantics. */ - if (tvisint(vc) || numisint(numV(vc))) { - int checkrange = 0; - /* Split pow is faster for bigger exponents. But do this only for (+k)^i. */ - if (tref_isk(rb) && (int32_t)ir_knum(IR(tref_ref(rb)))->u32.hi >= 0) { - int32_t k = numberVint(vc); - if (!(k >= -65536 && k <= 65536)) goto split_pow; - checkrange = 1; - } - if (!tref_isinteger(rc)) { - if (tref_isstr(rc)) - rc = emitir(IRTG(IR_STRTO, IRT_NUM), rc, 0); - /* Guarded conversion to integer! */ - rc = emitir(IRTGI(IR_CONV), rc, IRCONV_INT_NUM|IRCONV_CHECK); - } - if (checkrange && !tref_isk(rc)) { /* Range guard: -65536 <= i <= 65536 */ - TRef tmp = emitir(IRTI(IR_ADD), rc, lj_ir_kint(J, 65536)); - emitir(IRTGI(IR_ULE), tmp, lj_ir_kint(J, 2*65536)); - } - return emitir(IRTN(IR_POW), rb, rc); - } -split_pow: - /* FOLD covers most cases, but some are easier to do here. */ - if (tref_isk(rb) && tvispone(ir_knum(IR(tref_ref(rb))))) - return rb; /* 1 ^ x ==> 1 */ - rc = lj_ir_tonum(J, rc); - if (tref_isk(rc) && ir_knum(IR(tref_ref(rc)))->n == 0.5) - return emitir(IRTN(IR_FPMATH), rb, IRFPM_SQRT); /* x ^ 0.5 ==> sqrt(x) */ - /* Split up b^c into exp2(c*log2(b)). Assembler may rejoin later. */ - rb = emitir(IRTN(IR_FPMATH), rb, IRFPM_LOG2); - rc = emitir(IRTN(IR_MUL), rb, rc); - return emitir(IRTN(IR_FPMATH), rc, IRFPM_EXP2); -} - -/* -- Predictive narrowing of induction variables ------------------------- */ - -/* Narrow a single runtime value. */ -static int narrow_forl(jit_State *J, cTValue *o) -{ - if (tvisint(o)) return 1; - if (LJ_DUALNUM || (J->flags & JIT_F_OPT_NARROW)) return numisint(numV(o)); - return 0; -} - -/* Narrow the FORL index type by looking at the runtime values. */ -IRType lj_opt_narrow_forl(jit_State *J, cTValue *tv) -{ - lua_assert(tvisnumber(&tv[FORL_IDX]) && - tvisnumber(&tv[FORL_STOP]) && - tvisnumber(&tv[FORL_STEP])); - /* Narrow only if the runtime values of start/stop/step are all integers. */ - if (narrow_forl(J, &tv[FORL_IDX]) && - narrow_forl(J, &tv[FORL_STOP]) && - narrow_forl(J, &tv[FORL_STEP])) { - /* And if the loop index can't possibly overflow. */ - lua_Number step = numberVnum(&tv[FORL_STEP]); - lua_Number sum = numberVnum(&tv[FORL_STOP]) + step; - if (0 <= step ? (sum <= 2147483647.0) : (sum >= -2147483648.0)) - return IRT_INT; - } - return IRT_NUM; -} - -#undef IR -#undef fins -#undef emitir -#undef emitir_raw - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_sink.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_sink.c deleted file mode 100644 index a98e9dfe3b4..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_sink.c +++ /dev/null @@ -1,245 +0,0 @@ -/* -** SINK: Allocation Sinking and Store Sinking. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_opt_sink_c -#define LUA_CORE - -#include "lj_obj.h" - -#if LJ_HASJIT - -#include "lj_ir.h" -#include "lj_jit.h" -#include "lj_iropt.h" -#include "lj_target.h" - -/* Some local macros to save typing. Undef'd at the end. */ -#define IR(ref) (&J->cur.ir[(ref)]) - -/* Check whether the store ref points to an eligible allocation. */ -static IRIns *sink_checkalloc(jit_State *J, IRIns *irs) -{ - IRIns *ir = IR(irs->op1); - if (!irref_isk(ir->op2)) - return NULL; /* Non-constant key. */ - if (ir->o == IR_HREFK || ir->o == IR_AREF) - ir = IR(ir->op1); - else if (!(ir->o == IR_HREF || ir->o == IR_NEWREF || - ir->o == IR_FREF || ir->o == IR_ADD)) - return NULL; /* Unhandled reference type (for XSTORE). */ - ir = IR(ir->op1); - if (!(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW)) - return NULL; /* Not an allocation. */ - return ir; /* Return allocation. */ -} - -/* Recursively check whether a value depends on a PHI. */ -static int sink_phidep(jit_State *J, IRRef ref) -{ - IRIns *ir = IR(ref); - if (irt_isphi(ir->t)) return 1; - if (ir->op1 >= REF_FIRST && sink_phidep(J, ir->op1)) return 1; - if (ir->op2 >= REF_FIRST && sink_phidep(J, ir->op2)) return 1; - return 0; -} - -/* Check whether a value is a sinkable PHI or loop-invariant. */ -static int sink_checkphi(jit_State *J, IRIns *ira, IRRef ref) -{ - if (ref >= REF_FIRST) { - IRIns *ir = IR(ref); - if (irt_isphi(ir->t) || (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT && - irt_isphi(IR(ir->op1)->t))) { - ira->prev++; - return 1; /* Sinkable PHI. */ - } - /* Otherwise the value must be loop-invariant. */ - return ref < J->loopref && !sink_phidep(J, ref); - } - return 1; /* Constant (non-PHI). */ -} - -/* Mark non-sinkable allocations using single-pass backward propagation. -** -** Roots for the marking process are: -** - Some PHIs or snapshots (see below). -** - Non-PHI, non-constant values stored to PHI allocations. -** - All guards. -** - Any remaining loads not eliminated by store-to-load forwarding. -** - Stores with non-constant keys. -** - All stored values. -*/ -static void sink_mark_ins(jit_State *J) -{ - IRIns *ir, *irlast = IR(J->cur.nins-1); - for (ir = irlast ; ; ir--) { - switch (ir->o) { - case IR_BASE: - return; /* Finished. */ - case IR_CALLL: /* IRCALL_lj_tab_len */ - case IR_ALOAD: case IR_HLOAD: case IR_XLOAD: case IR_TBAR: - irt_setmark(IR(ir->op1)->t); /* Mark ref for remaining loads. */ - break; - case IR_FLOAD: - if (irt_ismarked(ir->t) || ir->op2 == IRFL_TAB_META) - irt_setmark(IR(ir->op1)->t); /* Mark table for remaining loads. */ - break; - case IR_ASTORE: case IR_HSTORE: case IR_FSTORE: case IR_XSTORE: { - IRIns *ira = sink_checkalloc(J, ir); - if (!ira || (irt_isphi(ira->t) && !sink_checkphi(J, ira, ir->op2))) - irt_setmark(IR(ir->op1)->t); /* Mark ineligible ref. */ - irt_setmark(IR(ir->op2)->t); /* Mark stored value. */ - break; - } -#if LJ_HASFFI - case IR_CNEWI: - if (irt_isphi(ir->t) && - (!sink_checkphi(J, ir, ir->op2) || - (LJ_32 && ir+1 < irlast && (ir+1)->o == IR_HIOP && - !sink_checkphi(J, ir, (ir+1)->op2)))) - irt_setmark(ir->t); /* Mark ineligible allocation. */ - /* fallthrough */ -#endif - case IR_USTORE: - irt_setmark(IR(ir->op2)->t); /* Mark stored value. */ - break; -#if LJ_HASFFI - case IR_CALLXS: -#endif - case IR_CALLS: - irt_setmark(IR(ir->op1)->t); /* Mark (potentially) stored values. */ - break; - case IR_PHI: { - IRIns *irl = IR(ir->op1), *irr = IR(ir->op2); - irl->prev = irr->prev = 0; /* Clear PHI value counts. */ - if (irl->o == irr->o && - (irl->o == IR_TNEW || irl->o == IR_TDUP || - (LJ_HASFFI && (irl->o == IR_CNEW || irl->o == IR_CNEWI)))) - break; - irt_setmark(irl->t); - irt_setmark(irr->t); - break; - } - default: - if (irt_ismarked(ir->t) || irt_isguard(ir->t)) { /* Propagate mark. */ - if (ir->op1 >= REF_FIRST) irt_setmark(IR(ir->op1)->t); - if (ir->op2 >= REF_FIRST) irt_setmark(IR(ir->op2)->t); - } - break; - } - } -} - -/* Mark all instructions referenced by a snapshot. */ -static void sink_mark_snap(jit_State *J, SnapShot *snap) -{ - SnapEntry *map = &J->cur.snapmap[snap->mapofs]; - MSize n, nent = snap->nent; - for (n = 0; n < nent; n++) { - IRRef ref = snap_ref(map[n]); - if (!irref_isk(ref)) - irt_setmark(IR(ref)->t); - } -} - -/* Iteratively remark PHI refs with differing marks or PHI value counts. */ -static void sink_remark_phi(jit_State *J) -{ - IRIns *ir; - int remark; - do { - remark = 0; - for (ir = IR(J->cur.nins-1); ir->o == IR_PHI; ir--) { - IRIns *irl = IR(ir->op1), *irr = IR(ir->op2); - if (((irl->t.irt ^ irr->t.irt) & IRT_MARK)) - remark = 1; - else if (irl->prev == irr->prev) - continue; - irt_setmark(IR(ir->op1)->t); - irt_setmark(IR(ir->op2)->t); - } - } while (remark); -} - -/* Sweep instructions and tag sunken allocations and stores. */ -static void sink_sweep_ins(jit_State *J) -{ - IRIns *ir, *irfirst = IR(J->cur.nk); - for (ir = IR(J->cur.nins-1) ; ir >= irfirst; ir--) { - switch (ir->o) { - case IR_ASTORE: case IR_HSTORE: case IR_FSTORE: case IR_XSTORE: { - IRIns *ira = sink_checkalloc(J, ir); - if (ira && !irt_ismarked(ira->t)) { - int delta = (int)(ir - ira); - ir->prev = REGSP(RID_SINK, delta > 255 ? 255 : delta); - } else { - ir->prev = REGSP_INIT; - } - break; - } - case IR_NEWREF: - if (!irt_ismarked(IR(ir->op1)->t)) { - ir->prev = REGSP(RID_SINK, 0); - } else { - irt_clearmark(ir->t); - ir->prev = REGSP_INIT; - } - break; -#if LJ_HASFFI - case IR_CNEW: case IR_CNEWI: -#endif - case IR_TNEW: case IR_TDUP: - if (!irt_ismarked(ir->t)) { - ir->t.irt &= ~IRT_GUARD; - ir->prev = REGSP(RID_SINK, 0); - J->cur.sinktags = 1; /* Signal present SINK tags to assembler. */ - } else { - irt_clearmark(ir->t); - ir->prev = REGSP_INIT; - } - break; - case IR_PHI: { - IRIns *ira = IR(ir->op2); - if (!irt_ismarked(ira->t) && - (ira->o == IR_TNEW || ira->o == IR_TDUP || - (LJ_HASFFI && (ira->o == IR_CNEW || ira->o == IR_CNEWI)))) { - ir->prev = REGSP(RID_SINK, 0); - } else { - ir->prev = REGSP_INIT; - } - break; - } - default: - irt_clearmark(ir->t); - ir->prev = REGSP_INIT; - break; - } - } -} - -/* Allocation sinking and store sinking. -** -** 1. Mark all non-sinkable allocations. -** 2. Then sink all remaining allocations and the related stores. -*/ -void lj_opt_sink(jit_State *J) -{ - const uint32_t need = (JIT_F_OPT_SINK|JIT_F_OPT_FWD| - JIT_F_OPT_DCE|JIT_F_OPT_CSE|JIT_F_OPT_FOLD); - if ((J->flags & need) == need && - (J->chain[IR_TNEW] || J->chain[IR_TDUP] || - (LJ_HASFFI && (J->chain[IR_CNEW] || J->chain[IR_CNEWI])))) { - if (!J->loopref) - sink_mark_snap(J, &J->cur.snap[J->cur.nsnap-1]); - sink_mark_ins(J); - if (J->loopref) - sink_remark_phi(J); - sink_sweep_ins(J); - } -} - -#undef IR - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_split.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_split.c deleted file mode 100644 index 81ded6c0a01..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_opt_split.c +++ /dev/null @@ -1,856 +0,0 @@ -/* -** SPLIT: Split 64 bit IR instructions into 32 bit IR instructions. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_opt_split_c -#define LUA_CORE - -#include "lj_obj.h" - -#if LJ_HASJIT && (LJ_SOFTFP || (LJ_32 && LJ_HASFFI)) - -#include "lj_err.h" -#include "lj_buf.h" -#include "lj_ir.h" -#include "lj_jit.h" -#include "lj_ircall.h" -#include "lj_iropt.h" -#include "lj_vm.h" - -/* SPLIT pass: -** -** This pass splits up 64 bit IR instructions into multiple 32 bit IR -** instructions. It's only active for soft-float targets or for 32 bit CPUs -** which lack native 64 bit integer operations (the FFI is currently the -** only emitter for 64 bit integer instructions). -** -** Splitting the IR in a separate pass keeps each 32 bit IR assembler -** backend simple. Only a small amount of extra functionality needs to be -** implemented. This is much easier than adding support for allocating -** register pairs to each backend (believe me, I tried). A few simple, but -** important optimizations can be performed by the SPLIT pass, which would -** be tedious to do in the backend. -** -** The basic idea is to replace each 64 bit IR instruction with its 32 bit -** equivalent plus an extra HIOP instruction. The splitted IR is not passed -** through FOLD or any other optimizations, so each HIOP is guaranteed to -** immediately follow it's counterpart. The actual functionality of HIOP is -** inferred from the previous instruction. -** -** The operands of HIOP hold the hiword input references. The output of HIOP -** is the hiword output reference, which is also used to hold the hiword -** register or spill slot information. The register allocator treats this -** instruction independently of any other instruction, which improves code -** quality compared to using fixed register pairs. -** -** It's easier to split up some instructions into two regular 32 bit -** instructions. E.g. XLOAD is split up into two XLOADs with two different -** addresses. Obviously 64 bit constants need to be split up into two 32 bit -** constants, too. Some hiword instructions can be entirely omitted, e.g. -** when zero-extending a 32 bit value to 64 bits. 64 bit arguments for calls -** are split up into two 32 bit arguments each. -** -** On soft-float targets, floating-point instructions are directly converted -** to soft-float calls by the SPLIT pass (except for comparisons and MIN/MAX). -** HIOP for number results has the type IRT_SOFTFP ("sfp" in -jdump). -** -** Here's the IR and x64 machine code for 'x.b = x.a + 1' for a struct with -** two int64_t fields: -** -** 0100 p32 ADD base +8 -** 0101 i64 XLOAD 0100 -** 0102 i64 ADD 0101 +1 -** 0103 p32 ADD base +16 -** 0104 i64 XSTORE 0103 0102 -** -** mov rax, [esi+0x8] -** add rax, +0x01 -** mov [esi+0x10], rax -** -** Here's the transformed IR and the x86 machine code after the SPLIT pass: -** -** 0100 p32 ADD base +8 -** 0101 int XLOAD 0100 -** 0102 p32 ADD base +12 -** 0103 int XLOAD 0102 -** 0104 int ADD 0101 +1 -** 0105 int HIOP 0103 +0 -** 0106 p32 ADD base +16 -** 0107 int XSTORE 0106 0104 -** 0108 int HIOP 0106 0105 -** -** mov eax, [esi+0x8] -** mov ecx, [esi+0xc] -** add eax, +0x01 -** adc ecx, +0x00 -** mov [esi+0x10], eax -** mov [esi+0x14], ecx -** -** You may notice the reassociated hiword address computation, which is -** later fused into the mov operands by the assembler. -*/ - -/* Some local macros to save typing. Undef'd at the end. */ -#define IR(ref) (&J->cur.ir[(ref)]) - -/* Directly emit the transformed IR without updating chains etc. */ -static IRRef split_emit(jit_State *J, uint16_t ot, IRRef1 op1, IRRef1 op2) -{ - IRRef nref = lj_ir_nextins(J); - IRIns *ir = IR(nref); - ir->ot = ot; - ir->op1 = op1; - ir->op2 = op2; - return nref; -} - -#if LJ_SOFTFP -/* Emit a (checked) number to integer conversion. */ -static IRRef split_num2int(jit_State *J, IRRef lo, IRRef hi, int check) -{ - IRRef tmp, res; -#if LJ_LE - tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), lo, hi); -#else - tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hi, lo); -#endif - res = split_emit(J, IRTI(IR_CALLN), tmp, IRCALL_softfp_d2i); - if (check) { - tmp = split_emit(J, IRTI(IR_CALLN), res, IRCALL_softfp_i2d); - split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp); - split_emit(J, IRTGI(IR_EQ), tmp, lo); - split_emit(J, IRTG(IR_HIOP, IRT_SOFTFP), tmp+1, hi); - } - return res; -} - -/* Emit a CALLN with one split 64 bit argument. */ -static IRRef split_call_l(jit_State *J, IRRef1 *hisubst, IRIns *oir, - IRIns *ir, IRCallID id) -{ - IRRef tmp, op1 = ir->op1; - J->cur.nins--; -#if LJ_LE - tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]); -#else - tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev); -#endif - ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id); - return split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp); -} -#endif - -/* Emit a CALLN with one split 64 bit argument and a 32 bit argument. */ -static IRRef split_call_li(jit_State *J, IRRef1 *hisubst, IRIns *oir, - IRIns *ir, IRCallID id) -{ - IRRef tmp, op1 = ir->op1, op2 = ir->op2; - J->cur.nins--; -#if LJ_LE - tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]); -#else - tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev); -#endif - tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, oir[op2].prev); - ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id); - return split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp); -} - -/* Emit a CALLN with two split 64 bit arguments. */ -static IRRef split_call_ll(jit_State *J, IRRef1 *hisubst, IRIns *oir, - IRIns *ir, IRCallID id) -{ - IRRef tmp, op1 = ir->op1, op2 = ir->op2; - J->cur.nins--; -#if LJ_LE - tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]); - tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, oir[op2].prev); - tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, hisubst[op2]); -#else - tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev); - tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, hisubst[op2]); - tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, oir[op2].prev); -#endif - ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id); - return split_emit(J, - IRT(IR_HIOP, (LJ_SOFTFP && irt_isnum(ir->t)) ? IRT_SOFTFP : IRT_INT), - tmp, tmp); -} - -/* Get a pointer to the other 32 bit word (LE: hiword, BE: loword). */ -static IRRef split_ptr(jit_State *J, IRIns *oir, IRRef ref) -{ - IRRef nref = oir[ref].prev; - IRIns *ir = IR(nref); - int32_t ofs = 4; - if (ir->o == IR_KPTR) - return lj_ir_kptr(J, (char *)ir_kptr(ir) + ofs); - if (ir->o == IR_ADD && irref_isk(ir->op2) && !irt_isphi(oir[ref].t)) { - /* Reassociate address. */ - ofs += IR(ir->op2)->i; - nref = ir->op1; - if (ofs == 0) return nref; - } - return split_emit(J, IRTI(IR_ADD), nref, lj_ir_kint(J, ofs)); -} - -#if LJ_HASFFI -static IRRef split_bitshift(jit_State *J, IRRef1 *hisubst, - IRIns *oir, IRIns *nir, IRIns *ir) -{ - IROp op = ir->o; - IRRef kref = nir->op2; - if (irref_isk(kref)) { /* Optimize constant shifts. */ - int32_t k = (IR(kref)->i & 63); - IRRef lo = nir->op1, hi = hisubst[ir->op1]; - if (op == IR_BROL || op == IR_BROR) { - if (op == IR_BROR) k = (-k & 63); - if (k >= 32) { IRRef t = lo; lo = hi; hi = t; k -= 32; } - if (k == 0) { - passthrough: - J->cur.nins--; - ir->prev = lo; - return hi; - } else { - TRef k1, k2; - IRRef t1, t2, t3, t4; - J->cur.nins--; - k1 = lj_ir_kint(J, k); - k2 = lj_ir_kint(J, (-k & 31)); - t1 = split_emit(J, IRTI(IR_BSHL), lo, k1); - t2 = split_emit(J, IRTI(IR_BSHL), hi, k1); - t3 = split_emit(J, IRTI(IR_BSHR), lo, k2); - t4 = split_emit(J, IRTI(IR_BSHR), hi, k2); - ir->prev = split_emit(J, IRTI(IR_BOR), t1, t4); - return split_emit(J, IRTI(IR_BOR), t2, t3); - } - } else if (k == 0) { - goto passthrough; - } else if (k < 32) { - if (op == IR_BSHL) { - IRRef t1 = split_emit(J, IRTI(IR_BSHL), hi, kref); - IRRef t2 = split_emit(J, IRTI(IR_BSHR), lo, lj_ir_kint(J, (-k&31))); - return split_emit(J, IRTI(IR_BOR), t1, t2); - } else { - IRRef t1 = ir->prev, t2; - lua_assert(op == IR_BSHR || op == IR_BSAR); - nir->o = IR_BSHR; - t2 = split_emit(J, IRTI(IR_BSHL), hi, lj_ir_kint(J, (-k&31))); - ir->prev = split_emit(J, IRTI(IR_BOR), t1, t2); - return split_emit(J, IRTI(op), hi, kref); - } - } else { - if (op == IR_BSHL) { - if (k == 32) - J->cur.nins--; - else - lo = ir->prev; - ir->prev = lj_ir_kint(J, 0); - return lo; - } else { - lua_assert(op == IR_BSHR || op == IR_BSAR); - if (k == 32) { - J->cur.nins--; - ir->prev = hi; - } else { - nir->op1 = hi; - } - if (op == IR_BSHR) - return lj_ir_kint(J, 0); - else - return split_emit(J, IRTI(IR_BSAR), hi, lj_ir_kint(J, 31)); - } - } - } - return split_call_li(J, hisubst, oir, ir, - op - IR_BSHL + IRCALL_lj_carith_shl64); -} - -static IRRef split_bitop(jit_State *J, IRRef1 *hisubst, - IRIns *nir, IRIns *ir) -{ - IROp op = ir->o; - IRRef hi, kref = nir->op2; - if (irref_isk(kref)) { /* Optimize bit operations with lo constant. */ - int32_t k = IR(kref)->i; - if (k == 0 || k == -1) { - if (op == IR_BAND) k = ~k; - if (k == 0) { - J->cur.nins--; - ir->prev = nir->op1; - } else if (op == IR_BXOR) { - nir->o = IR_BNOT; - nir->op2 = 0; - } else { - J->cur.nins--; - ir->prev = kref; - } - } - } - hi = hisubst[ir->op1]; - kref = hisubst[ir->op2]; - if (irref_isk(kref)) { /* Optimize bit operations with hi constant. */ - int32_t k = IR(kref)->i; - if (k == 0 || k == -1) { - if (op == IR_BAND) k = ~k; - if (k == 0) { - return hi; - } else if (op == IR_BXOR) { - return split_emit(J, IRTI(IR_BNOT), hi, 0); - } else { - return kref; - } - } - } - return split_emit(J, IRTI(op), hi, kref); -} -#endif - -/* Substitute references of a snapshot. */ -static void split_subst_snap(jit_State *J, SnapShot *snap, IRIns *oir) -{ - SnapEntry *map = &J->cur.snapmap[snap->mapofs]; - MSize n, nent = snap->nent; - for (n = 0; n < nent; n++) { - SnapEntry sn = map[n]; - IRIns *ir = &oir[snap_ref(sn)]; - if (!(LJ_SOFTFP && (sn & SNAP_SOFTFPNUM) && irref_isk(snap_ref(sn)))) - map[n] = ((sn & 0xffff0000) | ir->prev); - } -} - -/* Transform the old IR to the new IR. */ -static void split_ir(jit_State *J) -{ - IRRef nins = J->cur.nins, nk = J->cur.nk; - MSize irlen = nins - nk; - MSize need = (irlen+1)*(sizeof(IRIns) + sizeof(IRRef1)); - IRIns *oir = (IRIns *)lj_buf_tmp(J->L, need); - IRRef1 *hisubst; - IRRef ref, snref; - SnapShot *snap; - - /* Copy old IR to buffer. */ - memcpy(oir, IR(nk), irlen*sizeof(IRIns)); - /* Bias hiword substitution table and old IR. Loword kept in field prev. */ - hisubst = (IRRef1 *)&oir[irlen] - nk; - oir -= nk; - - /* Remove all IR instructions, but retain IR constants. */ - J->cur.nins = REF_FIRST; - J->loopref = 0; - - /* Process constants and fixed references. */ - for (ref = nk; ref <= REF_BASE; ref++) { - IRIns *ir = &oir[ref]; - if ((LJ_SOFTFP && ir->o == IR_KNUM) || ir->o == IR_KINT64) { - /* Split up 64 bit constant. */ - TValue tv = *ir_k64(ir); - ir->prev = lj_ir_kint(J, (int32_t)tv.u32.lo); - hisubst[ref] = lj_ir_kint(J, (int32_t)tv.u32.hi); - } else { - ir->prev = ref; /* Identity substitution for loword. */ - hisubst[ref] = 0; - } - } - - /* Process old IR instructions. */ - snap = J->cur.snap; - snref = snap->ref; - for (ref = REF_FIRST; ref < nins; ref++) { - IRIns *ir = &oir[ref]; - IRRef nref = lj_ir_nextins(J); - IRIns *nir = IR(nref); - IRRef hi = 0; - - if (ref >= snref) { - snap->ref = nref; - split_subst_snap(J, snap++, oir); - snref = snap < &J->cur.snap[J->cur.nsnap] ? snap->ref : ~(IRRef)0; - } - - /* Copy-substitute old instruction to new instruction. */ - nir->op1 = ir->op1 < nk ? ir->op1 : oir[ir->op1].prev; - nir->op2 = ir->op2 < nk ? ir->op2 : oir[ir->op2].prev; - ir->prev = nref; /* Loword substitution. */ - nir->o = ir->o; - nir->t.irt = ir->t.irt & ~(IRT_MARK|IRT_ISPHI); - hisubst[ref] = 0; - - /* Split 64 bit instructions. */ -#if LJ_SOFTFP - if (irt_isnum(ir->t)) { - nir->t.irt = IRT_INT | (nir->t.irt & IRT_GUARD); /* Turn into INT op. */ - /* Note: hi ref = lo ref + 1! Required for SNAP_SOFTFPNUM logic. */ - switch (ir->o) { - case IR_ADD: - hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_add); - break; - case IR_SUB: - hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_sub); - break; - case IR_MUL: - hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_mul); - break; - case IR_DIV: - hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_div); - break; - case IR_POW: - hi = split_call_li(J, hisubst, oir, ir, IRCALL_lj_vm_powi); - break; - case IR_FPMATH: - /* Try to rejoin pow from EXP2, MUL and LOG2. */ - if (nir->op2 == IRFPM_EXP2 && nir->op1 > J->loopref) { - IRIns *irp = IR(nir->op1); - if (irp->o == IR_CALLN && irp->op2 == IRCALL_softfp_mul) { - IRIns *irm4 = IR(irp->op1); - IRIns *irm3 = IR(irm4->op1); - IRIns *irm12 = IR(irm3->op1); - IRIns *irl1 = IR(irm12->op1); - if (irm12->op1 > J->loopref && irl1->o == IR_CALLN && - irl1->op2 == IRCALL_lj_vm_log2) { - IRRef tmp = irl1->op1; /* Recycle first two args from LOG2. */ - IRRef arg3 = irm3->op2, arg4 = irm4->op2; - J->cur.nins--; - tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, arg3); - tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, arg4); - ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, IRCALL_pow); - hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp); - break; - } - } - } - hi = split_call_l(J, hisubst, oir, ir, IRCALL_lj_vm_floor + ir->op2); - break; - case IR_ATAN2: - hi = split_call_ll(J, hisubst, oir, ir, IRCALL_atan2); - break; - case IR_LDEXP: - hi = split_call_li(J, hisubst, oir, ir, IRCALL_ldexp); - break; - case IR_NEG: case IR_ABS: - nir->o = IR_CONV; /* Pass through loword. */ - nir->op2 = (IRT_INT << 5) | IRT_INT; - hi = split_emit(J, IRT(ir->o == IR_NEG ? IR_BXOR : IR_BAND, IRT_SOFTFP), - hisubst[ir->op1], hisubst[ir->op2]); - break; - case IR_SLOAD: - if ((nir->op2 & IRSLOAD_CONVERT)) { /* Convert from int to number. */ - nir->op2 &= ~IRSLOAD_CONVERT; - ir->prev = nref = split_emit(J, IRTI(IR_CALLN), nref, - IRCALL_softfp_i2d); - hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref); - break; - } - /* fallthrough */ - case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: - case IR_STRTO: - hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref); - break; - case IR_XLOAD: { - IRIns inslo = *nir; /* Save/undo the emit of the lo XLOAD. */ - J->cur.nins--; - hi = split_ptr(J, oir, ir->op1); /* Insert the hiref ADD. */ - nref = lj_ir_nextins(J); - nir = IR(nref); - *nir = inslo; /* Re-emit lo XLOAD immediately before hi XLOAD. */ - hi = split_emit(J, IRT(IR_XLOAD, IRT_SOFTFP), hi, ir->op2); -#if LJ_LE - ir->prev = nref; -#else - ir->prev = hi; hi = nref; -#endif - break; - } - case IR_ASTORE: case IR_HSTORE: case IR_USTORE: case IR_XSTORE: - split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nir->op1, hisubst[ir->op2]); - break; - case IR_CONV: { /* Conversion to number. Others handled below. */ - IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); - UNUSED(st); -#if LJ_32 && LJ_HASFFI - if (st == IRT_I64 || st == IRT_U64) { - hi = split_call_l(J, hisubst, oir, ir, - st == IRT_I64 ? IRCALL_fp64_l2d : IRCALL_fp64_ul2d); - break; - } -#endif - lua_assert(st == IRT_INT || - (LJ_32 && LJ_HASFFI && (st == IRT_U32 || st == IRT_FLOAT))); - nir->o = IR_CALLN; -#if LJ_32 && LJ_HASFFI - nir->op2 = st == IRT_INT ? IRCALL_softfp_i2d : - st == IRT_FLOAT ? IRCALL_softfp_f2d : - IRCALL_softfp_ui2d; -#else - nir->op2 = IRCALL_softfp_i2d; -#endif - hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref); - break; - } - case IR_CALLN: - case IR_CALLL: - case IR_CALLS: - case IR_CALLXS: - goto split_call; - case IR_PHI: - if (nir->op1 == nir->op2) - J->cur.nins--; /* Drop useless PHIs. */ - if (hisubst[ir->op1] != hisubst[ir->op2]) - split_emit(J, IRT(IR_PHI, IRT_SOFTFP), - hisubst[ir->op1], hisubst[ir->op2]); - break; - case IR_HIOP: - J->cur.nins--; /* Drop joining HIOP. */ - ir->prev = nir->op1; - hi = nir->op2; - break; - default: - lua_assert(ir->o <= IR_NE || ir->o == IR_MIN || ir->o == IR_MAX); - hi = split_emit(J, IRTG(IR_HIOP, IRT_SOFTFP), - hisubst[ir->op1], hisubst[ir->op2]); - break; - } - } else -#endif -#if LJ_32 && LJ_HASFFI - if (irt_isint64(ir->t)) { - IRRef hiref = hisubst[ir->op1]; - nir->t.irt = IRT_INT | (nir->t.irt & IRT_GUARD); /* Turn into INT op. */ - switch (ir->o) { - case IR_ADD: - case IR_SUB: - /* Use plain op for hiword if loword cannot produce a carry/borrow. */ - if (irref_isk(nir->op2) && IR(nir->op2)->i == 0) { - ir->prev = nir->op1; /* Pass through loword. */ - nir->op1 = hiref; nir->op2 = hisubst[ir->op2]; - hi = nref; - break; - } - /* fallthrough */ - case IR_NEG: - hi = split_emit(J, IRTI(IR_HIOP), hiref, hisubst[ir->op2]); - break; - case IR_MUL: - hi = split_call_ll(J, hisubst, oir, ir, IRCALL_lj_carith_mul64); - break; - case IR_DIV: - hi = split_call_ll(J, hisubst, oir, ir, - irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 : - IRCALL_lj_carith_divu64); - break; - case IR_MOD: - hi = split_call_ll(J, hisubst, oir, ir, - irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 : - IRCALL_lj_carith_modu64); - break; - case IR_POW: - hi = split_call_ll(J, hisubst, oir, ir, - irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 : - IRCALL_lj_carith_powu64); - break; - case IR_BNOT: - hi = split_emit(J, IRTI(IR_BNOT), hiref, 0); - break; - case IR_BSWAP: - ir->prev = split_emit(J, IRTI(IR_BSWAP), hiref, 0); - hi = nref; - break; - case IR_BAND: case IR_BOR: case IR_BXOR: - hi = split_bitop(J, hisubst, nir, ir); - break; - case IR_BSHL: case IR_BSHR: case IR_BSAR: case IR_BROL: case IR_BROR: - hi = split_bitshift(J, hisubst, oir, nir, ir); - break; - case IR_FLOAD: - lua_assert(ir->op2 == IRFL_CDATA_INT64); - hi = split_emit(J, IRTI(IR_FLOAD), nir->op1, IRFL_CDATA_INT64_4); -#if LJ_BE - ir->prev = hi; hi = nref; -#endif - break; - case IR_XLOAD: - hi = split_emit(J, IRTI(IR_XLOAD), split_ptr(J, oir, ir->op1), ir->op2); -#if LJ_BE - ir->prev = hi; hi = nref; -#endif - break; - case IR_XSTORE: - split_emit(J, IRTI(IR_HIOP), nir->op1, hisubst[ir->op2]); - break; - case IR_CONV: { /* Conversion to 64 bit integer. Others handled below. */ - IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); -#if LJ_SOFTFP - if (st == IRT_NUM) { /* NUM to 64 bit int conv. */ - hi = split_call_l(J, hisubst, oir, ir, - irt_isi64(ir->t) ? IRCALL_fp64_d2l : IRCALL_fp64_d2ul); - } else if (st == IRT_FLOAT) { /* FLOAT to 64 bit int conv. */ - nir->o = IR_CALLN; - nir->op2 = irt_isi64(ir->t) ? IRCALL_fp64_f2l : IRCALL_fp64_f2ul; - hi = split_emit(J, IRTI(IR_HIOP), nref, nref); - } -#else - if (st == IRT_NUM || st == IRT_FLOAT) { /* FP to 64 bit int conv. */ - hi = split_emit(J, IRTI(IR_HIOP), nir->op1, nref); - } -#endif - else if (st == IRT_I64 || st == IRT_U64) { /* 64/64 bit cast. */ - /* Drop cast, since assembler doesn't care. */ - goto fwdlo; - } else if ((ir->op2 & IRCONV_SEXT)) { /* Sign-extend to 64 bit. */ - IRRef k31 = lj_ir_kint(J, 31); - nir = IR(nref); /* May have been reallocated. */ - ir->prev = nir->op1; /* Pass through loword. */ - nir->o = IR_BSAR; /* hi = bsar(lo, 31). */ - nir->op2 = k31; - hi = nref; - } else { /* Zero-extend to 64 bit. */ - hi = lj_ir_kint(J, 0); - goto fwdlo; - } - break; - } - case IR_CALLXS: - goto split_call; - case IR_PHI: { - IRRef hiref2; - if ((irref_isk(nir->op1) && irref_isk(nir->op2)) || - nir->op1 == nir->op2) - J->cur.nins--; /* Drop useless PHIs. */ - hiref2 = hisubst[ir->op2]; - if (!((irref_isk(hiref) && irref_isk(hiref2)) || hiref == hiref2)) - split_emit(J, IRTI(IR_PHI), hiref, hiref2); - break; - } - case IR_HIOP: - J->cur.nins--; /* Drop joining HIOP. */ - ir->prev = nir->op1; - hi = nir->op2; - break; - default: - lua_assert(ir->o <= IR_NE); /* Comparisons. */ - split_emit(J, IRTGI(IR_HIOP), hiref, hisubst[ir->op2]); - break; - } - } else -#endif -#if LJ_SOFTFP - if (ir->o == IR_SLOAD) { - if ((nir->op2 & IRSLOAD_CONVERT)) { /* Convert from number to int. */ - nir->op2 &= ~IRSLOAD_CONVERT; - if (!(nir->op2 & IRSLOAD_TYPECHECK)) - nir->t.irt = IRT_INT; /* Drop guard. */ - split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref); - ir->prev = split_num2int(J, nref, nref+1, irt_isguard(ir->t)); - } - } else if (ir->o == IR_TOBIT) { - IRRef tmp, op1 = ir->op1; - J->cur.nins--; -#if LJ_LE - tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]); -#else - tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev); -#endif - ir->prev = split_emit(J, IRTI(IR_CALLN), tmp, IRCALL_lj_vm_tobit); - } else if (ir->o == IR_TOSTR) { - if (hisubst[ir->op1]) { - if (irref_isk(ir->op1)) - nir->op1 = ir->op1; - else - split_emit(J, IRT(IR_HIOP, IRT_NIL), hisubst[ir->op1], nref); - } - } else if (ir->o == IR_HREF || ir->o == IR_NEWREF) { - if (irref_isk(ir->op2) && hisubst[ir->op2]) - nir->op2 = ir->op2; - } else -#endif - if (ir->o == IR_CONV) { /* See above, too. */ - IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); -#if LJ_32 && LJ_HASFFI - if (st == IRT_I64 || st == IRT_U64) { /* Conversion from 64 bit int. */ -#if LJ_SOFTFP - if (irt_isfloat(ir->t)) { - split_call_l(J, hisubst, oir, ir, - st == IRT_I64 ? IRCALL_fp64_l2f : IRCALL_fp64_ul2f); - J->cur.nins--; /* Drop unused HIOP. */ - } -#else - if (irt_isfp(ir->t)) { /* 64 bit integer to FP conversion. */ - ir->prev = split_emit(J, IRT(IR_HIOP, irt_type(ir->t)), - hisubst[ir->op1], nref); - } -#endif - else { /* Truncate to lower 32 bits. */ - fwdlo: - ir->prev = nir->op1; /* Forward loword. */ - /* Replace with NOP to avoid messing up the snapshot logic. */ - nir->ot = IRT(IR_NOP, IRT_NIL); - nir->op1 = nir->op2 = 0; - } - } -#endif -#if LJ_SOFTFP && LJ_32 && LJ_HASFFI - else if (irt_isfloat(ir->t)) { - if (st == IRT_NUM) { - split_call_l(J, hisubst, oir, ir, IRCALL_softfp_d2f); - J->cur.nins--; /* Drop unused HIOP. */ - } else { - nir->o = IR_CALLN; - nir->op2 = st == IRT_INT ? IRCALL_softfp_i2f : IRCALL_softfp_ui2f; - } - } else if (st == IRT_FLOAT) { - nir->o = IR_CALLN; - nir->op2 = irt_isint(ir->t) ? IRCALL_softfp_f2i : IRCALL_softfp_f2ui; - } else -#endif -#if LJ_SOFTFP - if (st == IRT_NUM || (LJ_32 && LJ_HASFFI && st == IRT_FLOAT)) { - if (irt_isguard(ir->t)) { - lua_assert(st == IRT_NUM && irt_isint(ir->t)); - J->cur.nins--; - ir->prev = split_num2int(J, nir->op1, hisubst[ir->op1], 1); - } else { - split_call_l(J, hisubst, oir, ir, -#if LJ_32 && LJ_HASFFI - st == IRT_NUM ? - (irt_isint(ir->t) ? IRCALL_softfp_d2i : IRCALL_softfp_d2ui) : - (irt_isint(ir->t) ? IRCALL_softfp_f2i : IRCALL_softfp_f2ui) -#else - IRCALL_softfp_d2i -#endif - ); - J->cur.nins--; /* Drop unused HIOP. */ - } - } -#endif - } else if (ir->o == IR_CALLXS) { - IRRef hiref; - split_call: - hiref = hisubst[ir->op1]; - if (hiref) { - IROpT ot = nir->ot; - IRRef op2 = nir->op2; - nir->ot = IRT(IR_CARG, IRT_NIL); -#if LJ_LE - nir->op2 = hiref; -#else - nir->op2 = nir->op1; nir->op1 = hiref; -#endif - ir->prev = nref = split_emit(J, ot, nref, op2); - } - if (LJ_SOFTFP ? irt_is64(ir->t) : irt_isint64(ir->t)) - hi = split_emit(J, - IRT(IR_HIOP, (LJ_SOFTFP && irt_isnum(ir->t)) ? IRT_SOFTFP : IRT_INT), - nref, nref); - } else if (ir->o == IR_CARG) { - IRRef hiref = hisubst[ir->op1]; - if (hiref) { - IRRef op2 = nir->op2; -#if LJ_LE - nir->op2 = hiref; -#else - nir->op2 = nir->op1; nir->op1 = hiref; -#endif - ir->prev = nref = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, op2); - nir = IR(nref); - } - hiref = hisubst[ir->op2]; - if (hiref) { -#if !LJ_TARGET_X86 - int carg = 0; - IRIns *cir; - for (cir = IR(nir->op1); cir->o == IR_CARG; cir = IR(cir->op1)) - carg++; - if ((carg & 1) == 0) { /* Align 64 bit arguments. */ - IRRef op2 = nir->op2; - nir->op2 = REF_NIL; - nref = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, op2); - nir = IR(nref); - } -#endif -#if LJ_BE - { IRRef tmp = nir->op2; nir->op2 = hiref; hiref = tmp; } -#endif - ir->prev = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, hiref); - } - } else if (ir->o == IR_CNEWI) { - if (hisubst[ir->op2]) - split_emit(J, IRT(IR_HIOP, IRT_NIL), nref, hisubst[ir->op2]); - } else if (ir->o == IR_LOOP) { - J->loopref = nref; /* Needed by assembler. */ - } - hisubst[ref] = hi; /* Store hiword substitution. */ - } - if (snref == nins) { /* Substitution for last snapshot. */ - snap->ref = J->cur.nins; - split_subst_snap(J, snap, oir); - } - - /* Add PHI marks. */ - for (ref = J->cur.nins-1; ref >= REF_FIRST; ref--) { - IRIns *ir = IR(ref); - if (ir->o != IR_PHI) break; - if (!irref_isk(ir->op1)) irt_setphi(IR(ir->op1)->t); - if (ir->op2 > J->loopref) irt_setphi(IR(ir->op2)->t); - } -} - -/* Protected callback for split pass. */ -static TValue *cpsplit(lua_State *L, lua_CFunction dummy, void *ud) -{ - jit_State *J = (jit_State *)ud; - split_ir(J); - UNUSED(L); UNUSED(dummy); - return NULL; -} - -#if defined(LUA_USE_ASSERT) || LJ_SOFTFP -/* Slow, but sure way to check whether a SPLIT pass is needed. */ -static int split_needsplit(jit_State *J) -{ - IRIns *ir, *irend; - IRRef ref; - for (ir = IR(REF_FIRST), irend = IR(J->cur.nins); ir < irend; ir++) - if (LJ_SOFTFP ? irt_is64orfp(ir->t) : irt_isint64(ir->t)) - return 1; - if (LJ_SOFTFP) { - for (ref = J->chain[IR_SLOAD]; ref; ref = IR(ref)->prev) - if ((IR(ref)->op2 & IRSLOAD_CONVERT)) - return 1; - if (J->chain[IR_TOBIT]) - return 1; - } - for (ref = J->chain[IR_CONV]; ref; ref = IR(ref)->prev) { - IRType st = (IR(ref)->op2 & IRCONV_SRCMASK); - if ((LJ_SOFTFP && (st == IRT_NUM || st == IRT_FLOAT)) || - st == IRT_I64 || st == IRT_U64) - return 1; - } - return 0; /* Nope. */ -} -#endif - -/* SPLIT pass. */ -void lj_opt_split(jit_State *J) -{ -#if LJ_SOFTFP - if (!J->needsplit) - J->needsplit = split_needsplit(J); -#else - lua_assert(J->needsplit >= split_needsplit(J)); /* Verify flag. */ -#endif - if (J->needsplit) { - int errcode = lj_vm_cpcall(J->L, NULL, J, cpsplit); - if (errcode) { - /* Completely reset the trace to avoid inconsistent dump on abort. */ - J->cur.nins = J->cur.nk = REF_BASE; - J->cur.nsnap = 0; - lj_err_throw(J->L, errcode); /* Propagate errors. */ - } - } -} - -#undef IR - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_parse.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_parse.c deleted file mode 100644 index 9891897ed6f..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_parse.c +++ /dev/null @@ -1,2725 +0,0 @@ -/* -** Lua parser (source code -> bytecode). -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -** -** Major portions taken verbatim or adapted from the Lua interpreter. -** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h -*/ - -#define lj_parse_c -#define LUA_CORE - -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_debug.h" -#include "lj_buf.h" -#include "lj_str.h" -#include "lj_tab.h" -#include "lj_func.h" -#include "lj_state.h" -#include "lj_bc.h" -#if LJ_HASFFI -#include "lj_ctype.h" -#endif -#include "lj_strfmt.h" -#include "lj_lex.h" -#include "lj_parse.h" -#include "lj_vm.h" -#include "lj_vmevent.h" - -/* -- Parser structures and definitions ----------------------------------- */ - -/* Expression kinds. */ -typedef enum { - /* Constant expressions must be first and in this order: */ - VKNIL, - VKFALSE, - VKTRUE, - VKSTR, /* sval = string value */ - VKNUM, /* nval = number value */ - VKLAST = VKNUM, - VKCDATA, /* nval = cdata value, not treated as a constant expression */ - /* Non-constant expressions follow: */ - VLOCAL, /* info = local register, aux = vstack index */ - VUPVAL, /* info = upvalue index, aux = vstack index */ - VGLOBAL, /* sval = string value */ - VINDEXED, /* info = table register, aux = index reg/byte/string const */ - VJMP, /* info = instruction PC */ - VRELOCABLE, /* info = instruction PC */ - VNONRELOC, /* info = result register */ - VCALL, /* info = instruction PC, aux = base */ - VVOID -} ExpKind; - -/* Expression descriptor. */ -typedef struct ExpDesc { - union { - struct { - uint32_t info; /* Primary info. */ - uint32_t aux; /* Secondary info. */ - } s; - TValue nval; /* Number value. */ - GCstr *sval; /* String value. */ - } u; - ExpKind k; - BCPos t; /* True condition jump list. */ - BCPos f; /* False condition jump list. */ -} ExpDesc; - -/* Macros for expressions. */ -#define expr_hasjump(e) ((e)->t != (e)->f) - -#define expr_isk(e) ((e)->k <= VKLAST) -#define expr_isk_nojump(e) (expr_isk(e) && !expr_hasjump(e)) -#define expr_isnumk(e) ((e)->k == VKNUM) -#define expr_isnumk_nojump(e) (expr_isnumk(e) && !expr_hasjump(e)) -#define expr_isstrk(e) ((e)->k == VKSTR) - -#define expr_numtv(e) check_exp(expr_isnumk((e)), &(e)->u.nval) -#define expr_numberV(e) numberVnum(expr_numtv((e))) - -/* Initialize expression. */ -static LJ_AINLINE void expr_init(ExpDesc *e, ExpKind k, uint32_t info) -{ - e->k = k; - e->u.s.info = info; - e->f = e->t = NO_JMP; -} - -/* Check number constant for +-0. */ -static int expr_numiszero(ExpDesc *e) -{ - TValue *o = expr_numtv(e); - return tvisint(o) ? (intV(o) == 0) : tviszero(o); -} - -/* Per-function linked list of scope blocks. */ -typedef struct FuncScope { - struct FuncScope *prev; /* Link to outer scope. */ - MSize vstart; /* Start of block-local variables. */ - uint8_t nactvar; /* Number of active vars outside the scope. */ - uint8_t flags; /* Scope flags. */ -} FuncScope; - -#define FSCOPE_LOOP 0x01 /* Scope is a (breakable) loop. */ -#define FSCOPE_BREAK 0x02 /* Break used in scope. */ -#define FSCOPE_GOLA 0x04 /* Goto or label used in scope. */ -#define FSCOPE_UPVAL 0x08 /* Upvalue in scope. */ -#define FSCOPE_NOCLOSE 0x10 /* Do not close upvalues. */ - -#define NAME_BREAK ((GCstr *)(uintptr_t)1) - -/* Index into variable stack. */ -typedef uint16_t VarIndex; -#define LJ_MAX_VSTACK (65536 - LJ_MAX_UPVAL) - -/* Variable/goto/label info. */ -#define VSTACK_VAR_RW 0x01 /* R/W variable. */ -#define VSTACK_GOTO 0x02 /* Pending goto. */ -#define VSTACK_LABEL 0x04 /* Label. */ - -/* Per-function state. */ -typedef struct FuncState { - GCtab *kt; /* Hash table for constants. */ - LexState *ls; /* Lexer state. */ - lua_State *L; /* Lua state. */ - FuncScope *bl; /* Current scope. */ - struct FuncState *prev; /* Enclosing function. */ - BCPos pc; /* Next bytecode position. */ - BCPos lasttarget; /* Bytecode position of last jump target. */ - BCPos jpc; /* Pending jump list to next bytecode. */ - BCReg freereg; /* First free register. */ - BCReg nactvar; /* Number of active local variables. */ - BCReg nkn, nkgc; /* Number of lua_Number/GCobj constants */ - BCLine linedefined; /* First line of the function definition. */ - BCInsLine *bcbase; /* Base of bytecode stack. */ - BCPos bclim; /* Limit of bytecode stack. */ - MSize vbase; /* Base of variable stack for this function. */ - uint8_t flags; /* Prototype flags. */ - uint8_t numparams; /* Number of parameters. */ - uint8_t framesize; /* Fixed frame size. */ - uint8_t nuv; /* Number of upvalues */ - VarIndex varmap[LJ_MAX_LOCVAR]; /* Map from register to variable idx. */ - VarIndex uvmap[LJ_MAX_UPVAL]; /* Map from upvalue to variable idx. */ - VarIndex uvtmp[LJ_MAX_UPVAL]; /* Temporary upvalue map. */ -} FuncState; - -/* Binary and unary operators. ORDER OPR */ -typedef enum BinOpr { - OPR_ADD, OPR_SUB, OPR_MUL, OPR_DIV, OPR_MOD, OPR_POW, /* ORDER ARITH */ - OPR_CONCAT, - OPR_NE, OPR_EQ, - OPR_LT, OPR_GE, OPR_LE, OPR_GT, - OPR_AND, OPR_OR, - OPR_NOBINOPR -} BinOpr; - -LJ_STATIC_ASSERT((int)BC_ISGE-(int)BC_ISLT == (int)OPR_GE-(int)OPR_LT); -LJ_STATIC_ASSERT((int)BC_ISLE-(int)BC_ISLT == (int)OPR_LE-(int)OPR_LT); -LJ_STATIC_ASSERT((int)BC_ISGT-(int)BC_ISLT == (int)OPR_GT-(int)OPR_LT); -LJ_STATIC_ASSERT((int)BC_SUBVV-(int)BC_ADDVV == (int)OPR_SUB-(int)OPR_ADD); -LJ_STATIC_ASSERT((int)BC_MULVV-(int)BC_ADDVV == (int)OPR_MUL-(int)OPR_ADD); -LJ_STATIC_ASSERT((int)BC_DIVVV-(int)BC_ADDVV == (int)OPR_DIV-(int)OPR_ADD); -LJ_STATIC_ASSERT((int)BC_MODVV-(int)BC_ADDVV == (int)OPR_MOD-(int)OPR_ADD); - -/* -- Error handling ------------------------------------------------------ */ - -LJ_NORET LJ_NOINLINE static void err_syntax(LexState *ls, ErrMsg em) -{ - lj_lex_error(ls, ls->tok, em); -} - -LJ_NORET LJ_NOINLINE static void err_token(LexState *ls, LexToken tok) -{ - lj_lex_error(ls, ls->tok, LJ_ERR_XTOKEN, lj_lex_token2str(ls, tok)); -} - -LJ_NORET static void err_limit(FuncState *fs, uint32_t limit, const char *what) -{ - if (fs->linedefined == 0) - lj_lex_error(fs->ls, 0, LJ_ERR_XLIMM, limit, what); - else - lj_lex_error(fs->ls, 0, LJ_ERR_XLIMF, fs->linedefined, limit, what); -} - -#define checklimit(fs, v, l, m) if ((v) >= (l)) err_limit(fs, l, m) -#define checklimitgt(fs, v, l, m) if ((v) > (l)) err_limit(fs, l, m) -#define checkcond(ls, c, em) { if (!(c)) err_syntax(ls, em); } - -/* -- Management of constants --------------------------------------------- */ - -/* Return bytecode encoding for primitive constant. */ -#define const_pri(e) check_exp((e)->k <= VKTRUE, (e)->k) - -#define tvhaskslot(o) ((o)->u32.hi == 0) -#define tvkslot(o) ((o)->u32.lo) - -/* Add a number constant. */ -static BCReg const_num(FuncState *fs, ExpDesc *e) -{ - lua_State *L = fs->L; - TValue *o; - lua_assert(expr_isnumk(e)); - o = lj_tab_set(L, fs->kt, &e->u.nval); - if (tvhaskslot(o)) - return tvkslot(o); - o->u64 = fs->nkn; - return fs->nkn++; -} - -/* Add a GC object constant. */ -static BCReg const_gc(FuncState *fs, GCobj *gc, uint32_t itype) -{ - lua_State *L = fs->L; - TValue key, *o; - setgcV(L, &key, gc, itype); - /* NOBARRIER: the key is new or kept alive. */ - o = lj_tab_set(L, fs->kt, &key); - if (tvhaskslot(o)) - return tvkslot(o); - o->u64 = fs->nkgc; - return fs->nkgc++; -} - -/* Add a string constant. */ -static BCReg const_str(FuncState *fs, ExpDesc *e) -{ - lua_assert(expr_isstrk(e) || e->k == VGLOBAL); - return const_gc(fs, obj2gco(e->u.sval), LJ_TSTR); -} - -/* Anchor string constant to avoid GC. */ -GCstr *lj_parse_keepstr(LexState *ls, const char *str, size_t len) -{ - /* NOBARRIER: the key is new or kept alive. */ - lua_State *L = ls->L; - GCstr *s = lj_str_new(L, str, len); - TValue *tv = lj_tab_setstr(L, ls->fs->kt, s); - if (tvisnil(tv)) setboolV(tv, 1); - lj_gc_check(L); - return s; -} - -#if LJ_HASFFI -/* Anchor cdata to avoid GC. */ -void lj_parse_keepcdata(LexState *ls, TValue *tv, GCcdata *cd) -{ - /* NOBARRIER: the key is new or kept alive. */ - lua_State *L = ls->L; - setcdataV(L, tv, cd); - setboolV(lj_tab_set(L, ls->fs->kt, tv), 1); -} -#endif - -/* -- Jump list handling -------------------------------------------------- */ - -/* Get next element in jump list. */ -static BCPos jmp_next(FuncState *fs, BCPos pc) -{ - ptrdiff_t delta = bc_j(fs->bcbase[pc].ins); - if ((BCPos)delta == NO_JMP) - return NO_JMP; - else - return (BCPos)(((ptrdiff_t)pc+1)+delta); -} - -/* Check if any of the instructions on the jump list produce no value. */ -static int jmp_novalue(FuncState *fs, BCPos list) -{ - for (; list != NO_JMP; list = jmp_next(fs, list)) { - BCIns p = fs->bcbase[list >= 1 ? list-1 : list].ins; - if (!(bc_op(p) == BC_ISTC || bc_op(p) == BC_ISFC || bc_a(p) == NO_REG)) - return 1; - } - return 0; -} - -/* Patch register of test instructions. */ -static int jmp_patchtestreg(FuncState *fs, BCPos pc, BCReg reg) -{ - BCInsLine *ilp = &fs->bcbase[pc >= 1 ? pc-1 : pc]; - BCOp op = bc_op(ilp->ins); - if (op == BC_ISTC || op == BC_ISFC) { - if (reg != NO_REG && reg != bc_d(ilp->ins)) { - setbc_a(&ilp->ins, reg); - } else { /* Nothing to store or already in the right register. */ - setbc_op(&ilp->ins, op+(BC_IST-BC_ISTC)); - setbc_a(&ilp->ins, 0); - } - } else if (bc_a(ilp->ins) == NO_REG) { - if (reg == NO_REG) { - ilp->ins = BCINS_AJ(BC_JMP, bc_a(fs->bcbase[pc].ins), 0); - } else { - setbc_a(&ilp->ins, reg); - if (reg >= bc_a(ilp[1].ins)) - setbc_a(&ilp[1].ins, reg+1); - } - } else { - return 0; /* Cannot patch other instructions. */ - } - return 1; -} - -/* Drop values for all instructions on jump list. */ -static void jmp_dropval(FuncState *fs, BCPos list) -{ - for (; list != NO_JMP; list = jmp_next(fs, list)) - jmp_patchtestreg(fs, list, NO_REG); -} - -/* Patch jump instruction to target. */ -static void jmp_patchins(FuncState *fs, BCPos pc, BCPos dest) -{ - BCIns *jmp = &fs->bcbase[pc].ins; - BCPos offset = dest-(pc+1)+BCBIAS_J; - lua_assert(dest != NO_JMP); - if (offset > BCMAX_D) - err_syntax(fs->ls, LJ_ERR_XJUMP); - setbc_d(jmp, offset); -} - -/* Append to jump list. */ -static void jmp_append(FuncState *fs, BCPos *l1, BCPos l2) -{ - if (l2 == NO_JMP) { - return; - } else if (*l1 == NO_JMP) { - *l1 = l2; - } else { - BCPos list = *l1; - BCPos next; - while ((next = jmp_next(fs, list)) != NO_JMP) /* Find last element. */ - list = next; - jmp_patchins(fs, list, l2); - } -} - -/* Patch jump list and preserve produced values. */ -static void jmp_patchval(FuncState *fs, BCPos list, BCPos vtarget, - BCReg reg, BCPos dtarget) -{ - while (list != NO_JMP) { - BCPos next = jmp_next(fs, list); - if (jmp_patchtestreg(fs, list, reg)) - jmp_patchins(fs, list, vtarget); /* Jump to target with value. */ - else - jmp_patchins(fs, list, dtarget); /* Jump to default target. */ - list = next; - } -} - -/* Jump to following instruction. Append to list of pending jumps. */ -static void jmp_tohere(FuncState *fs, BCPos list) -{ - fs->lasttarget = fs->pc; - jmp_append(fs, &fs->jpc, list); -} - -/* Patch jump list to target. */ -static void jmp_patch(FuncState *fs, BCPos list, BCPos target) -{ - if (target == fs->pc) { - jmp_tohere(fs, list); - } else { - lua_assert(target < fs->pc); - jmp_patchval(fs, list, target, NO_REG, target); - } -} - -/* -- Bytecode register allocator ----------------------------------------- */ - -/* Bump frame size. */ -static void bcreg_bump(FuncState *fs, BCReg n) -{ - BCReg sz = fs->freereg + n; - if (sz > fs->framesize) { - if (sz >= LJ_MAX_SLOTS) - err_syntax(fs->ls, LJ_ERR_XSLOTS); - fs->framesize = (uint8_t)sz; - } -} - -/* Reserve registers. */ -static void bcreg_reserve(FuncState *fs, BCReg n) -{ - bcreg_bump(fs, n); - fs->freereg += n; -} - -/* Free register. */ -static void bcreg_free(FuncState *fs, BCReg reg) -{ - if (reg >= fs->nactvar) { - fs->freereg--; - lua_assert(reg == fs->freereg); - } -} - -/* Free register for expression. */ -static void expr_free(FuncState *fs, ExpDesc *e) -{ - if (e->k == VNONRELOC) - bcreg_free(fs, e->u.s.info); -} - -/* -- Bytecode emitter ---------------------------------------------------- */ - -/* Emit bytecode instruction. */ -static BCPos bcemit_INS(FuncState *fs, BCIns ins) -{ - BCPos pc = fs->pc; - LexState *ls = fs->ls; - jmp_patchval(fs, fs->jpc, pc, NO_REG, pc); - fs->jpc = NO_JMP; - if (LJ_UNLIKELY(pc >= fs->bclim)) { - ptrdiff_t base = fs->bcbase - ls->bcstack; - checklimit(fs, ls->sizebcstack, LJ_MAX_BCINS, "bytecode instructions"); - lj_mem_growvec(fs->L, ls->bcstack, ls->sizebcstack, LJ_MAX_BCINS,BCInsLine); - fs->bclim = (BCPos)(ls->sizebcstack - base); - fs->bcbase = ls->bcstack + base; - } - fs->bcbase[pc].ins = ins; - fs->bcbase[pc].line = ls->lastline; - fs->pc = pc+1; - return pc; -} - -#define bcemit_ABC(fs, o, a, b, c) bcemit_INS(fs, BCINS_ABC(o, a, b, c)) -#define bcemit_AD(fs, o, a, d) bcemit_INS(fs, BCINS_AD(o, a, d)) -#define bcemit_AJ(fs, o, a, j) bcemit_INS(fs, BCINS_AJ(o, a, j)) - -#define bcptr(fs, e) (&(fs)->bcbase[(e)->u.s.info].ins) - -/* -- Bytecode emitter for expressions ------------------------------------ */ - -/* Discharge non-constant expression to any register. */ -static void expr_discharge(FuncState *fs, ExpDesc *e) -{ - BCIns ins; - if (e->k == VUPVAL) { - ins = BCINS_AD(BC_UGET, 0, e->u.s.info); - } else if (e->k == VGLOBAL) { - ins = BCINS_AD(BC_GGET, 0, const_str(fs, e)); - } else if (e->k == VINDEXED) { - BCReg rc = e->u.s.aux; - if ((int32_t)rc < 0) { - ins = BCINS_ABC(BC_TGETS, 0, e->u.s.info, ~rc); - } else if (rc > BCMAX_C) { - ins = BCINS_ABC(BC_TGETB, 0, e->u.s.info, rc-(BCMAX_C+1)); - } else { - bcreg_free(fs, rc); - ins = BCINS_ABC(BC_TGETV, 0, e->u.s.info, rc); - } - bcreg_free(fs, e->u.s.info); - } else if (e->k == VCALL) { - e->u.s.info = e->u.s.aux; - e->k = VNONRELOC; - return; - } else if (e->k == VLOCAL) { - e->k = VNONRELOC; - return; - } else { - return; - } - e->u.s.info = bcemit_INS(fs, ins); - e->k = VRELOCABLE; -} - -/* Emit bytecode to set a range of registers to nil. */ -static void bcemit_nil(FuncState *fs, BCReg from, BCReg n) -{ - if (fs->pc > fs->lasttarget) { /* No jumps to current position? */ - BCIns *ip = &fs->bcbase[fs->pc-1].ins; - BCReg pto, pfrom = bc_a(*ip); - switch (bc_op(*ip)) { /* Try to merge with the previous instruction. */ - case BC_KPRI: - if (bc_d(*ip) != ~LJ_TNIL) break; - if (from == pfrom) { - if (n == 1) return; - } else if (from == pfrom+1) { - from = pfrom; - n++; - } else { - break; - } - *ip = BCINS_AD(BC_KNIL, from, from+n-1); /* Replace KPRI. */ - return; - case BC_KNIL: - pto = bc_d(*ip); - if (pfrom <= from && from <= pto+1) { /* Can we connect both ranges? */ - if (from+n-1 > pto) - setbc_d(ip, from+n-1); /* Patch previous instruction range. */ - return; - } - break; - default: - break; - } - } - /* Emit new instruction or replace old instruction. */ - bcemit_INS(fs, n == 1 ? BCINS_AD(BC_KPRI, from, VKNIL) : - BCINS_AD(BC_KNIL, from, from+n-1)); -} - -/* Discharge an expression to a specific register. Ignore branches. */ -static void expr_toreg_nobranch(FuncState *fs, ExpDesc *e, BCReg reg) -{ - BCIns ins; - expr_discharge(fs, e); - if (e->k == VKSTR) { - ins = BCINS_AD(BC_KSTR, reg, const_str(fs, e)); - } else if (e->k == VKNUM) { -#if LJ_DUALNUM - cTValue *tv = expr_numtv(e); - if (tvisint(tv) && checki16(intV(tv))) - ins = BCINS_AD(BC_KSHORT, reg, (BCReg)(uint16_t)intV(tv)); - else -#else - lua_Number n = expr_numberV(e); - int32_t k = lj_num2int(n); - if (checki16(k) && n == (lua_Number)k) - ins = BCINS_AD(BC_KSHORT, reg, (BCReg)(uint16_t)k); - else -#endif - ins = BCINS_AD(BC_KNUM, reg, const_num(fs, e)); -#if LJ_HASFFI - } else if (e->k == VKCDATA) { - fs->flags |= PROTO_FFI; - ins = BCINS_AD(BC_KCDATA, reg, - const_gc(fs, obj2gco(cdataV(&e->u.nval)), LJ_TCDATA)); -#endif - } else if (e->k == VRELOCABLE) { - setbc_a(bcptr(fs, e), reg); - goto noins; - } else if (e->k == VNONRELOC) { - if (reg == e->u.s.info) - goto noins; - ins = BCINS_AD(BC_MOV, reg, e->u.s.info); - } else if (e->k == VKNIL) { - bcemit_nil(fs, reg, 1); - goto noins; - } else if (e->k <= VKTRUE) { - ins = BCINS_AD(BC_KPRI, reg, const_pri(e)); - } else { - lua_assert(e->k == VVOID || e->k == VJMP); - return; - } - bcemit_INS(fs, ins); -noins: - e->u.s.info = reg; - e->k = VNONRELOC; -} - -/* Forward declaration. */ -static BCPos bcemit_jmp(FuncState *fs); - -/* Discharge an expression to a specific register. */ -static void expr_toreg(FuncState *fs, ExpDesc *e, BCReg reg) -{ - expr_toreg_nobranch(fs, e, reg); - if (e->k == VJMP) - jmp_append(fs, &e->t, e->u.s.info); /* Add it to the true jump list. */ - if (expr_hasjump(e)) { /* Discharge expression with branches. */ - BCPos jend, jfalse = NO_JMP, jtrue = NO_JMP; - if (jmp_novalue(fs, e->t) || jmp_novalue(fs, e->f)) { - BCPos jval = (e->k == VJMP) ? NO_JMP : bcemit_jmp(fs); - jfalse = bcemit_AD(fs, BC_KPRI, reg, VKFALSE); - bcemit_AJ(fs, BC_JMP, fs->freereg, 1); - jtrue = bcemit_AD(fs, BC_KPRI, reg, VKTRUE); - jmp_tohere(fs, jval); - } - jend = fs->pc; - fs->lasttarget = jend; - jmp_patchval(fs, e->f, jend, reg, jfalse); - jmp_patchval(fs, e->t, jend, reg, jtrue); - } - e->f = e->t = NO_JMP; - e->u.s.info = reg; - e->k = VNONRELOC; -} - -/* Discharge an expression to the next free register. */ -static void expr_tonextreg(FuncState *fs, ExpDesc *e) -{ - expr_discharge(fs, e); - expr_free(fs, e); - bcreg_reserve(fs, 1); - expr_toreg(fs, e, fs->freereg - 1); -} - -/* Discharge an expression to any register. */ -static BCReg expr_toanyreg(FuncState *fs, ExpDesc *e) -{ - expr_discharge(fs, e); - if (e->k == VNONRELOC) { - if (!expr_hasjump(e)) return e->u.s.info; /* Already in a register. */ - if (e->u.s.info >= fs->nactvar) { - expr_toreg(fs, e, e->u.s.info); /* Discharge to temp. register. */ - return e->u.s.info; - } - } - expr_tonextreg(fs, e); /* Discharge to next register. */ - return e->u.s.info; -} - -/* Partially discharge expression to a value. */ -static void expr_toval(FuncState *fs, ExpDesc *e) -{ - if (expr_hasjump(e)) - expr_toanyreg(fs, e); - else - expr_discharge(fs, e); -} - -/* Emit store for LHS expression. */ -static void bcemit_store(FuncState *fs, ExpDesc *var, ExpDesc *e) -{ - BCIns ins; - if (var->k == VLOCAL) { - fs->ls->vstack[var->u.s.aux].info |= VSTACK_VAR_RW; - expr_free(fs, e); - expr_toreg(fs, e, var->u.s.info); - return; - } else if (var->k == VUPVAL) { - fs->ls->vstack[var->u.s.aux].info |= VSTACK_VAR_RW; - expr_toval(fs, e); - if (e->k <= VKTRUE) - ins = BCINS_AD(BC_USETP, var->u.s.info, const_pri(e)); - else if (e->k == VKSTR) - ins = BCINS_AD(BC_USETS, var->u.s.info, const_str(fs, e)); - else if (e->k == VKNUM) - ins = BCINS_AD(BC_USETN, var->u.s.info, const_num(fs, e)); - else - ins = BCINS_AD(BC_USETV, var->u.s.info, expr_toanyreg(fs, e)); - } else if (var->k == VGLOBAL) { - BCReg ra = expr_toanyreg(fs, e); - ins = BCINS_AD(BC_GSET, ra, const_str(fs, var)); - } else { - BCReg ra, rc; - lua_assert(var->k == VINDEXED); - ra = expr_toanyreg(fs, e); - rc = var->u.s.aux; - if ((int32_t)rc < 0) { - ins = BCINS_ABC(BC_TSETS, ra, var->u.s.info, ~rc); - } else if (rc > BCMAX_C) { - ins = BCINS_ABC(BC_TSETB, ra, var->u.s.info, rc-(BCMAX_C+1)); - } else { - /* Free late alloced key reg to avoid assert on free of value reg. */ - /* This can only happen when called from expr_table(). */ - lua_assert(e->k != VNONRELOC || ra < fs->nactvar || - rc < ra || (bcreg_free(fs, rc),1)); - ins = BCINS_ABC(BC_TSETV, ra, var->u.s.info, rc); - } - } - bcemit_INS(fs, ins); - expr_free(fs, e); -} - -/* Emit method lookup expression. */ -static void bcemit_method(FuncState *fs, ExpDesc *e, ExpDesc *key) -{ - BCReg idx, func, obj = expr_toanyreg(fs, e); - expr_free(fs, e); - func = fs->freereg; - bcemit_AD(fs, BC_MOV, func+1+LJ_FR2, obj); /* Copy object to 1st argument. */ - lua_assert(expr_isstrk(key)); - idx = const_str(fs, key); - if (idx <= BCMAX_C) { - bcreg_reserve(fs, 2+LJ_FR2); - bcemit_ABC(fs, BC_TGETS, func, obj, idx); - } else { - bcreg_reserve(fs, 3+LJ_FR2); - bcemit_AD(fs, BC_KSTR, func+2+LJ_FR2, idx); - bcemit_ABC(fs, BC_TGETV, func, obj, func+2+LJ_FR2); - fs->freereg--; - } - e->u.s.info = func; - e->k = VNONRELOC; -} - -/* -- Bytecode emitter for branches --------------------------------------- */ - -/* Emit unconditional branch. */ -static BCPos bcemit_jmp(FuncState *fs) -{ - BCPos jpc = fs->jpc; - BCPos j = fs->pc - 1; - BCIns *ip = &fs->bcbase[j].ins; - fs->jpc = NO_JMP; - if ((int32_t)j >= (int32_t)fs->lasttarget && bc_op(*ip) == BC_UCLO) { - setbc_j(ip, NO_JMP); - fs->lasttarget = j+1; - } else { - j = bcemit_AJ(fs, BC_JMP, fs->freereg, NO_JMP); - } - jmp_append(fs, &j, jpc); - return j; -} - -/* Invert branch condition of bytecode instruction. */ -static void invertcond(FuncState *fs, ExpDesc *e) -{ - BCIns *ip = &fs->bcbase[e->u.s.info - 1].ins; - setbc_op(ip, bc_op(*ip)^1); -} - -/* Emit conditional branch. */ -static BCPos bcemit_branch(FuncState *fs, ExpDesc *e, int cond) -{ - BCPos pc; - if (e->k == VRELOCABLE) { - BCIns *ip = bcptr(fs, e); - if (bc_op(*ip) == BC_NOT) { - *ip = BCINS_AD(cond ? BC_ISF : BC_IST, 0, bc_d(*ip)); - return bcemit_jmp(fs); - } - } - if (e->k != VNONRELOC) { - bcreg_reserve(fs, 1); - expr_toreg_nobranch(fs, e, fs->freereg-1); - } - bcemit_AD(fs, cond ? BC_ISTC : BC_ISFC, NO_REG, e->u.s.info); - pc = bcemit_jmp(fs); - expr_free(fs, e); - return pc; -} - -/* Emit branch on true condition. */ -static void bcemit_branch_t(FuncState *fs, ExpDesc *e) -{ - BCPos pc; - expr_discharge(fs, e); - if (e->k == VKSTR || e->k == VKNUM || e->k == VKTRUE) - pc = NO_JMP; /* Never jump. */ - else if (e->k == VJMP) - invertcond(fs, e), pc = e->u.s.info; - else if (e->k == VKFALSE || e->k == VKNIL) - expr_toreg_nobranch(fs, e, NO_REG), pc = bcemit_jmp(fs); - else - pc = bcemit_branch(fs, e, 0); - jmp_append(fs, &e->f, pc); - jmp_tohere(fs, e->t); - e->t = NO_JMP; -} - -/* Emit branch on false condition. */ -static void bcemit_branch_f(FuncState *fs, ExpDesc *e) -{ - BCPos pc; - expr_discharge(fs, e); - if (e->k == VKNIL || e->k == VKFALSE) - pc = NO_JMP; /* Never jump. */ - else if (e->k == VJMP) - pc = e->u.s.info; - else if (e->k == VKSTR || e->k == VKNUM || e->k == VKTRUE) - expr_toreg_nobranch(fs, e, NO_REG), pc = bcemit_jmp(fs); - else - pc = bcemit_branch(fs, e, 1); - jmp_append(fs, &e->t, pc); - jmp_tohere(fs, e->f); - e->f = NO_JMP; -} - -/* -- Bytecode emitter for operators -------------------------------------- */ - -/* Try constant-folding of arithmetic operators. */ -static int foldarith(BinOpr opr, ExpDesc *e1, ExpDesc *e2) -{ - TValue o; - lua_Number n; - if (!expr_isnumk_nojump(e1) || !expr_isnumk_nojump(e2)) return 0; - n = lj_vm_foldarith(expr_numberV(e1), expr_numberV(e2), (int)opr-OPR_ADD); - setnumV(&o, n); - if (tvisnan(&o) || tvismzero(&o)) return 0; /* Avoid NaN and -0 as consts. */ - if (LJ_DUALNUM) { - int32_t k = lj_num2int(n); - if ((lua_Number)k == n) { - setintV(&e1->u.nval, k); - return 1; - } - } - setnumV(&e1->u.nval, n); - return 1; -} - -/* Emit arithmetic operator. */ -static void bcemit_arith(FuncState *fs, BinOpr opr, ExpDesc *e1, ExpDesc *e2) -{ - BCReg rb, rc, t; - uint32_t op; - if (foldarith(opr, e1, e2)) - return; - if (opr == OPR_POW) { - op = BC_POW; - rc = expr_toanyreg(fs, e2); - rb = expr_toanyreg(fs, e1); - } else { - op = opr-OPR_ADD+BC_ADDVV; - /* Must discharge 2nd operand first since VINDEXED might free regs. */ - expr_toval(fs, e2); - if (expr_isnumk(e2) && (rc = const_num(fs, e2)) <= BCMAX_C) - op -= BC_ADDVV-BC_ADDVN; - else - rc = expr_toanyreg(fs, e2); - /* 1st operand discharged by bcemit_binop_left, but need KNUM/KSHORT. */ - lua_assert(expr_isnumk(e1) || e1->k == VNONRELOC); - expr_toval(fs, e1); - /* Avoid two consts to satisfy bytecode constraints. */ - if (expr_isnumk(e1) && !expr_isnumk(e2) && - (t = const_num(fs, e1)) <= BCMAX_B) { - rb = rc; rc = t; op -= BC_ADDVV-BC_ADDNV; - } else { - rb = expr_toanyreg(fs, e1); - } - } - /* Using expr_free might cause asserts if the order is wrong. */ - if (e1->k == VNONRELOC && e1->u.s.info >= fs->nactvar) fs->freereg--; - if (e2->k == VNONRELOC && e2->u.s.info >= fs->nactvar) fs->freereg--; - e1->u.s.info = bcemit_ABC(fs, op, 0, rb, rc); - e1->k = VRELOCABLE; -} - -/* Emit comparison operator. */ -static void bcemit_comp(FuncState *fs, BinOpr opr, ExpDesc *e1, ExpDesc *e2) -{ - ExpDesc *eret = e1; - BCIns ins; - expr_toval(fs, e1); - if (opr == OPR_EQ || opr == OPR_NE) { - BCOp op = opr == OPR_EQ ? BC_ISEQV : BC_ISNEV; - BCReg ra; - if (expr_isk(e1)) { e1 = e2; e2 = eret; } /* Need constant in 2nd arg. */ - ra = expr_toanyreg(fs, e1); /* First arg must be in a reg. */ - expr_toval(fs, e2); - switch (e2->k) { - case VKNIL: case VKFALSE: case VKTRUE: - ins = BCINS_AD(op+(BC_ISEQP-BC_ISEQV), ra, const_pri(e2)); - break; - case VKSTR: - ins = BCINS_AD(op+(BC_ISEQS-BC_ISEQV), ra, const_str(fs, e2)); - break; - case VKNUM: - ins = BCINS_AD(op+(BC_ISEQN-BC_ISEQV), ra, const_num(fs, e2)); - break; - default: - ins = BCINS_AD(op, ra, expr_toanyreg(fs, e2)); - break; - } - } else { - uint32_t op = opr-OPR_LT+BC_ISLT; - BCReg ra, rd; - if ((op-BC_ISLT) & 1) { /* GT -> LT, GE -> LE */ - e1 = e2; e2 = eret; /* Swap operands. */ - op = ((op-BC_ISLT)^3)+BC_ISLT; - expr_toval(fs, e1); - } - rd = expr_toanyreg(fs, e2); - ra = expr_toanyreg(fs, e1); - ins = BCINS_AD(op, ra, rd); - } - /* Using expr_free might cause asserts if the order is wrong. */ - if (e1->k == VNONRELOC && e1->u.s.info >= fs->nactvar) fs->freereg--; - if (e2->k == VNONRELOC && e2->u.s.info >= fs->nactvar) fs->freereg--; - bcemit_INS(fs, ins); - eret->u.s.info = bcemit_jmp(fs); - eret->k = VJMP; -} - -/* Fixup left side of binary operator. */ -static void bcemit_binop_left(FuncState *fs, BinOpr op, ExpDesc *e) -{ - if (op == OPR_AND) { - bcemit_branch_t(fs, e); - } else if (op == OPR_OR) { - bcemit_branch_f(fs, e); - } else if (op == OPR_CONCAT) { - expr_tonextreg(fs, e); - } else if (op == OPR_EQ || op == OPR_NE) { - if (!expr_isk_nojump(e)) expr_toanyreg(fs, e); - } else { - if (!expr_isnumk_nojump(e)) expr_toanyreg(fs, e); - } -} - -/* Emit binary operator. */ -static void bcemit_binop(FuncState *fs, BinOpr op, ExpDesc *e1, ExpDesc *e2) -{ - if (op <= OPR_POW) { - bcemit_arith(fs, op, e1, e2); - } else if (op == OPR_AND) { - lua_assert(e1->t == NO_JMP); /* List must be closed. */ - expr_discharge(fs, e2); - jmp_append(fs, &e2->f, e1->f); - *e1 = *e2; - } else if (op == OPR_OR) { - lua_assert(e1->f == NO_JMP); /* List must be closed. */ - expr_discharge(fs, e2); - jmp_append(fs, &e2->t, e1->t); - *e1 = *e2; - } else if (op == OPR_CONCAT) { - expr_toval(fs, e2); - if (e2->k == VRELOCABLE && bc_op(*bcptr(fs, e2)) == BC_CAT) { - lua_assert(e1->u.s.info == bc_b(*bcptr(fs, e2))-1); - expr_free(fs, e1); - setbc_b(bcptr(fs, e2), e1->u.s.info); - e1->u.s.info = e2->u.s.info; - } else { - expr_tonextreg(fs, e2); - expr_free(fs, e2); - expr_free(fs, e1); - e1->u.s.info = bcemit_ABC(fs, BC_CAT, 0, e1->u.s.info, e2->u.s.info); - } - e1->k = VRELOCABLE; - } else { - lua_assert(op == OPR_NE || op == OPR_EQ || - op == OPR_LT || op == OPR_GE || op == OPR_LE || op == OPR_GT); - bcemit_comp(fs, op, e1, e2); - } -} - -/* Emit unary operator. */ -static void bcemit_unop(FuncState *fs, BCOp op, ExpDesc *e) -{ - if (op == BC_NOT) { - /* Swap true and false lists. */ - { BCPos temp = e->f; e->f = e->t; e->t = temp; } - jmp_dropval(fs, e->f); - jmp_dropval(fs, e->t); - expr_discharge(fs, e); - if (e->k == VKNIL || e->k == VKFALSE) { - e->k = VKTRUE; - return; - } else if (expr_isk(e) || (LJ_HASFFI && e->k == VKCDATA)) { - e->k = VKFALSE; - return; - } else if (e->k == VJMP) { - invertcond(fs, e); - return; - } else if (e->k == VRELOCABLE) { - bcreg_reserve(fs, 1); - setbc_a(bcptr(fs, e), fs->freereg-1); - e->u.s.info = fs->freereg-1; - e->k = VNONRELOC; - } else { - lua_assert(e->k == VNONRELOC); - } - } else { - lua_assert(op == BC_UNM || op == BC_LEN); - if (op == BC_UNM && !expr_hasjump(e)) { /* Constant-fold negations. */ -#if LJ_HASFFI - if (e->k == VKCDATA) { /* Fold in-place since cdata is not interned. */ - GCcdata *cd = cdataV(&e->u.nval); - int64_t *p = (int64_t *)cdataptr(cd); - if (cd->ctypeid == CTID_COMPLEX_DOUBLE) - p[1] ^= (int64_t)U64x(80000000,00000000); - else - *p = -*p; - return; - } else -#endif - if (expr_isnumk(e) && !expr_numiszero(e)) { /* Avoid folding to -0. */ - TValue *o = expr_numtv(e); - if (tvisint(o)) { - int32_t k = intV(o); - if (k == -k) - setnumV(o, -(lua_Number)k); - else - setintV(o, -k); - return; - } else { - o->u64 ^= U64x(80000000,00000000); - return; - } - } - } - expr_toanyreg(fs, e); - } - expr_free(fs, e); - e->u.s.info = bcemit_AD(fs, op, 0, e->u.s.info); - e->k = VRELOCABLE; -} - -/* -- Lexer support ------------------------------------------------------- */ - -/* Check and consume optional token. */ -static int lex_opt(LexState *ls, LexToken tok) -{ - if (ls->tok == tok) { - lj_lex_next(ls); - return 1; - } - return 0; -} - -/* Check and consume token. */ -static void lex_check(LexState *ls, LexToken tok) -{ - if (ls->tok != tok) - err_token(ls, tok); - lj_lex_next(ls); -} - -/* Check for matching token. */ -static void lex_match(LexState *ls, LexToken what, LexToken who, BCLine line) -{ - if (!lex_opt(ls, what)) { - if (line == ls->linenumber) { - err_token(ls, what); - } else { - const char *swhat = lj_lex_token2str(ls, what); - const char *swho = lj_lex_token2str(ls, who); - lj_lex_error(ls, ls->tok, LJ_ERR_XMATCH, swhat, swho, line); - } - } -} - -/* Check for string token. */ -static GCstr *lex_str(LexState *ls) -{ - GCstr *s; - if (ls->tok != TK_name && (LJ_52 || ls->tok != TK_goto)) - err_token(ls, TK_name); - s = strV(&ls->tokval); - lj_lex_next(ls); - return s; -} - -/* -- Variable handling --------------------------------------------------- */ - -#define var_get(ls, fs, i) ((ls)->vstack[(fs)->varmap[(i)]]) - -/* Define a new local variable. */ -static void var_new(LexState *ls, BCReg n, GCstr *name) -{ - FuncState *fs = ls->fs; - MSize vtop = ls->vtop; - checklimit(fs, fs->nactvar+n, LJ_MAX_LOCVAR, "local variables"); - if (LJ_UNLIKELY(vtop >= ls->sizevstack)) { - if (ls->sizevstack >= LJ_MAX_VSTACK) - lj_lex_error(ls, 0, LJ_ERR_XLIMC, LJ_MAX_VSTACK); - lj_mem_growvec(ls->L, ls->vstack, ls->sizevstack, LJ_MAX_VSTACK, VarInfo); - } - lua_assert((uintptr_t)name < VARNAME__MAX || - lj_tab_getstr(fs->kt, name) != NULL); - /* NOBARRIER: name is anchored in fs->kt and ls->vstack is not a GCobj. */ - setgcref(ls->vstack[vtop].name, obj2gco(name)); - fs->varmap[fs->nactvar+n] = (uint16_t)vtop; - ls->vtop = vtop+1; -} - -#define var_new_lit(ls, n, v) \ - var_new(ls, (n), lj_parse_keepstr(ls, "" v, sizeof(v)-1)) - -#define var_new_fixed(ls, n, vn) \ - var_new(ls, (n), (GCstr *)(uintptr_t)(vn)) - -/* Add local variables. */ -static void var_add(LexState *ls, BCReg nvars) -{ - FuncState *fs = ls->fs; - BCReg nactvar = fs->nactvar; - while (nvars--) { - VarInfo *v = &var_get(ls, fs, nactvar); - v->startpc = fs->pc; - v->slot = nactvar++; - v->info = 0; - } - fs->nactvar = nactvar; -} - -/* Remove local variables. */ -static void var_remove(LexState *ls, BCReg tolevel) -{ - FuncState *fs = ls->fs; - while (fs->nactvar > tolevel) - var_get(ls, fs, --fs->nactvar).endpc = fs->pc; -} - -/* Lookup local variable name. */ -static BCReg var_lookup_local(FuncState *fs, GCstr *n) -{ - int i; - for (i = fs->nactvar-1; i >= 0; i--) { - if (n == strref(var_get(fs->ls, fs, i).name)) - return (BCReg)i; - } - return (BCReg)-1; /* Not found. */ -} - -/* Lookup or add upvalue index. */ -static MSize var_lookup_uv(FuncState *fs, MSize vidx, ExpDesc *e) -{ - MSize i, n = fs->nuv; - for (i = 0; i < n; i++) - if (fs->uvmap[i] == vidx) - return i; /* Already exists. */ - /* Otherwise create a new one. */ - checklimit(fs, fs->nuv, LJ_MAX_UPVAL, "upvalues"); - lua_assert(e->k == VLOCAL || e->k == VUPVAL); - fs->uvmap[n] = (uint16_t)vidx; - fs->uvtmp[n] = (uint16_t)(e->k == VLOCAL ? vidx : LJ_MAX_VSTACK+e->u.s.info); - fs->nuv = n+1; - return n; -} - -/* Forward declaration. */ -static void fscope_uvmark(FuncState *fs, BCReg level); - -/* Recursively lookup variables in enclosing functions. */ -static MSize var_lookup_(FuncState *fs, GCstr *name, ExpDesc *e, int first) -{ - if (fs) { - BCReg reg = var_lookup_local(fs, name); - if ((int32_t)reg >= 0) { /* Local in this function? */ - expr_init(e, VLOCAL, reg); - if (!first) - fscope_uvmark(fs, reg); /* Scope now has an upvalue. */ - return (MSize)(e->u.s.aux = (uint32_t)fs->varmap[reg]); - } else { - MSize vidx = var_lookup_(fs->prev, name, e, 0); /* Var in outer func? */ - if ((int32_t)vidx >= 0) { /* Yes, make it an upvalue here. */ - e->u.s.info = (uint8_t)var_lookup_uv(fs, vidx, e); - e->k = VUPVAL; - return vidx; - } - } - } else { /* Not found in any function, must be a global. */ - expr_init(e, VGLOBAL, 0); - e->u.sval = name; - } - return (MSize)-1; /* Global. */ -} - -/* Lookup variable name. */ -#define var_lookup(ls, e) \ - var_lookup_((ls)->fs, lex_str(ls), (e), 1) - -/* -- Goto an label handling ---------------------------------------------- */ - -/* Add a new goto or label. */ -static MSize gola_new(LexState *ls, GCstr *name, uint8_t info, BCPos pc) -{ - FuncState *fs = ls->fs; - MSize vtop = ls->vtop; - if (LJ_UNLIKELY(vtop >= ls->sizevstack)) { - if (ls->sizevstack >= LJ_MAX_VSTACK) - lj_lex_error(ls, 0, LJ_ERR_XLIMC, LJ_MAX_VSTACK); - lj_mem_growvec(ls->L, ls->vstack, ls->sizevstack, LJ_MAX_VSTACK, VarInfo); - } - lua_assert(name == NAME_BREAK || lj_tab_getstr(fs->kt, name) != NULL); - /* NOBARRIER: name is anchored in fs->kt and ls->vstack is not a GCobj. */ - setgcref(ls->vstack[vtop].name, obj2gco(name)); - ls->vstack[vtop].startpc = pc; - ls->vstack[vtop].slot = (uint8_t)fs->nactvar; - ls->vstack[vtop].info = info; - ls->vtop = vtop+1; - return vtop; -} - -#define gola_isgoto(v) ((v)->info & VSTACK_GOTO) -#define gola_islabel(v) ((v)->info & VSTACK_LABEL) -#define gola_isgotolabel(v) ((v)->info & (VSTACK_GOTO|VSTACK_LABEL)) - -/* Patch goto to jump to label. */ -static void gola_patch(LexState *ls, VarInfo *vg, VarInfo *vl) -{ - FuncState *fs = ls->fs; - BCPos pc = vg->startpc; - setgcrefnull(vg->name); /* Invalidate pending goto. */ - setbc_a(&fs->bcbase[pc].ins, vl->slot); - jmp_patch(fs, pc, vl->startpc); -} - -/* Patch goto to close upvalues. */ -static void gola_close(LexState *ls, VarInfo *vg) -{ - FuncState *fs = ls->fs; - BCPos pc = vg->startpc; - BCIns *ip = &fs->bcbase[pc].ins; - lua_assert(gola_isgoto(vg)); - lua_assert(bc_op(*ip) == BC_JMP || bc_op(*ip) == BC_UCLO); - setbc_a(ip, vg->slot); - if (bc_op(*ip) == BC_JMP) { - BCPos next = jmp_next(fs, pc); - if (next != NO_JMP) jmp_patch(fs, next, pc); /* Jump to UCLO. */ - setbc_op(ip, BC_UCLO); /* Turn into UCLO. */ - setbc_j(ip, NO_JMP); - } -} - -/* Resolve pending forward gotos for label. */ -static void gola_resolve(LexState *ls, FuncScope *bl, MSize idx) -{ - VarInfo *vg = ls->vstack + bl->vstart; - VarInfo *vl = ls->vstack + idx; - for (; vg < vl; vg++) - if (gcrefeq(vg->name, vl->name) && gola_isgoto(vg)) { - if (vg->slot < vl->slot) { - GCstr *name = strref(var_get(ls, ls->fs, vg->slot).name); - lua_assert((uintptr_t)name >= VARNAME__MAX); - ls->linenumber = ls->fs->bcbase[vg->startpc].line; - lua_assert(strref(vg->name) != NAME_BREAK); - lj_lex_error(ls, 0, LJ_ERR_XGSCOPE, - strdata(strref(vg->name)), strdata(name)); - } - gola_patch(ls, vg, vl); - } -} - -/* Fixup remaining gotos and labels for scope. */ -static void gola_fixup(LexState *ls, FuncScope *bl) -{ - VarInfo *v = ls->vstack + bl->vstart; - VarInfo *ve = ls->vstack + ls->vtop; - for (; v < ve; v++) { - GCstr *name = strref(v->name); - if (name != NULL) { /* Only consider remaining valid gotos/labels. */ - if (gola_islabel(v)) { - VarInfo *vg; - setgcrefnull(v->name); /* Invalidate label that goes out of scope. */ - for (vg = v+1; vg < ve; vg++) /* Resolve pending backward gotos. */ - if (strref(vg->name) == name && gola_isgoto(vg)) { - if ((bl->flags&FSCOPE_UPVAL) && vg->slot > v->slot) - gola_close(ls, vg); - gola_patch(ls, vg, v); - } - } else if (gola_isgoto(v)) { - if (bl->prev) { /* Propagate goto or break to outer scope. */ - bl->prev->flags |= name == NAME_BREAK ? FSCOPE_BREAK : FSCOPE_GOLA; - v->slot = bl->nactvar; - if ((bl->flags & FSCOPE_UPVAL)) - gola_close(ls, v); - } else { /* No outer scope: undefined goto label or no loop. */ - ls->linenumber = ls->fs->bcbase[v->startpc].line; - if (name == NAME_BREAK) - lj_lex_error(ls, 0, LJ_ERR_XBREAK); - else - lj_lex_error(ls, 0, LJ_ERR_XLUNDEF, strdata(name)); - } - } - } - } -} - -/* Find existing label. */ -static VarInfo *gola_findlabel(LexState *ls, GCstr *name) -{ - VarInfo *v = ls->vstack + ls->fs->bl->vstart; - VarInfo *ve = ls->vstack + ls->vtop; - for (; v < ve; v++) - if (strref(v->name) == name && gola_islabel(v)) - return v; - return NULL; -} - -/* -- Scope handling ------------------------------------------------------ */ - -/* Begin a scope. */ -static void fscope_begin(FuncState *fs, FuncScope *bl, int flags) -{ - bl->nactvar = (uint8_t)fs->nactvar; - bl->flags = flags; - bl->vstart = fs->ls->vtop; - bl->prev = fs->bl; - fs->bl = bl; - lua_assert(fs->freereg == fs->nactvar); -} - -/* End a scope. */ -static void fscope_end(FuncState *fs) -{ - FuncScope *bl = fs->bl; - LexState *ls = fs->ls; - fs->bl = bl->prev; - var_remove(ls, bl->nactvar); - fs->freereg = fs->nactvar; - lua_assert(bl->nactvar == fs->nactvar); - if ((bl->flags & (FSCOPE_UPVAL|FSCOPE_NOCLOSE)) == FSCOPE_UPVAL) - bcemit_AJ(fs, BC_UCLO, bl->nactvar, 0); - if ((bl->flags & FSCOPE_BREAK)) { - if ((bl->flags & FSCOPE_LOOP)) { - MSize idx = gola_new(ls, NAME_BREAK, VSTACK_LABEL, fs->pc); - ls->vtop = idx; /* Drop break label immediately. */ - gola_resolve(ls, bl, idx); - return; - } /* else: need the fixup step to propagate the breaks. */ - } else if (!(bl->flags & FSCOPE_GOLA)) { - return; - } - gola_fixup(ls, bl); -} - -/* Mark scope as having an upvalue. */ -static void fscope_uvmark(FuncState *fs, BCReg level) -{ - FuncScope *bl; - for (bl = fs->bl; bl && bl->nactvar > level; bl = bl->prev) - ; - if (bl) - bl->flags |= FSCOPE_UPVAL; -} - -/* -- Function state management ------------------------------------------- */ - -/* Fixup bytecode for prototype. */ -static void fs_fixup_bc(FuncState *fs, GCproto *pt, BCIns *bc, MSize n) -{ - BCInsLine *base = fs->bcbase; - MSize i; - pt->sizebc = n; - bc[0] = BCINS_AD((fs->flags & PROTO_VARARG) ? BC_FUNCV : BC_FUNCF, - fs->framesize, 0); - for (i = 1; i < n; i++) - bc[i] = base[i].ins; -} - -/* Fixup upvalues for child prototype, step #2. */ -static void fs_fixup_uv2(FuncState *fs, GCproto *pt) -{ - VarInfo *vstack = fs->ls->vstack; - uint16_t *uv = proto_uv(pt); - MSize i, n = pt->sizeuv; - for (i = 0; i < n; i++) { - VarIndex vidx = uv[i]; - if (vidx >= LJ_MAX_VSTACK) - uv[i] = vidx - LJ_MAX_VSTACK; - else if ((vstack[vidx].info & VSTACK_VAR_RW)) - uv[i] = vstack[vidx].slot | PROTO_UV_LOCAL; - else - uv[i] = vstack[vidx].slot | PROTO_UV_LOCAL | PROTO_UV_IMMUTABLE; - } -} - -/* Fixup constants for prototype. */ -static void fs_fixup_k(FuncState *fs, GCproto *pt, void *kptr) -{ - GCtab *kt; - TValue *array; - Node *node; - MSize i, hmask; - checklimitgt(fs, fs->nkn, BCMAX_D+1, "constants"); - checklimitgt(fs, fs->nkgc, BCMAX_D+1, "constants"); - setmref(pt->k, kptr); - pt->sizekn = fs->nkn; - pt->sizekgc = fs->nkgc; - kt = fs->kt; - array = tvref(kt->array); - for (i = 0; i < kt->asize; i++) - if (tvhaskslot(&array[i])) { - TValue *tv = &((TValue *)kptr)[tvkslot(&array[i])]; - if (LJ_DUALNUM) - setintV(tv, (int32_t)i); - else - setnumV(tv, (lua_Number)i); - } - node = noderef(kt->node); - hmask = kt->hmask; - for (i = 0; i <= hmask; i++) { - Node *n = &node[i]; - if (tvhaskslot(&n->val)) { - ptrdiff_t kidx = (ptrdiff_t)tvkslot(&n->val); - lua_assert(!tvisint(&n->key)); - if (tvisnum(&n->key)) { - TValue *tv = &((TValue *)kptr)[kidx]; - if (LJ_DUALNUM) { - lua_Number nn = numV(&n->key); - int32_t k = lj_num2int(nn); - lua_assert(!tvismzero(&n->key)); - if ((lua_Number)k == nn) - setintV(tv, k); - else - *tv = n->key; - } else { - *tv = n->key; - } - } else { - GCobj *o = gcV(&n->key); - setgcref(((GCRef *)kptr)[~kidx], o); - lj_gc_objbarrier(fs->L, pt, o); - if (tvisproto(&n->key)) - fs_fixup_uv2(fs, gco2pt(o)); - } - } - } -} - -/* Fixup upvalues for prototype, step #1. */ -static void fs_fixup_uv1(FuncState *fs, GCproto *pt, uint16_t *uv) -{ - setmref(pt->uv, uv); - pt->sizeuv = fs->nuv; - memcpy(uv, fs->uvtmp, fs->nuv*sizeof(VarIndex)); -} - -#ifndef LUAJIT_DISABLE_DEBUGINFO -/* Prepare lineinfo for prototype. */ -static size_t fs_prep_line(FuncState *fs, BCLine numline) -{ - return (fs->pc-1) << (numline < 256 ? 0 : numline < 65536 ? 1 : 2); -} - -/* Fixup lineinfo for prototype. */ -static void fs_fixup_line(FuncState *fs, GCproto *pt, - void *lineinfo, BCLine numline) -{ - BCInsLine *base = fs->bcbase + 1; - BCLine first = fs->linedefined; - MSize i = 0, n = fs->pc-1; - pt->firstline = fs->linedefined; - pt->numline = numline; - setmref(pt->lineinfo, lineinfo); - if (LJ_LIKELY(numline < 256)) { - uint8_t *li = (uint8_t *)lineinfo; - do { - BCLine delta = base[i].line - first; - lua_assert(delta >= 0 && delta < 256); - li[i] = (uint8_t)delta; - } while (++i < n); - } else if (LJ_LIKELY(numline < 65536)) { - uint16_t *li = (uint16_t *)lineinfo; - do { - BCLine delta = base[i].line - first; - lua_assert(delta >= 0 && delta < 65536); - li[i] = (uint16_t)delta; - } while (++i < n); - } else { - uint32_t *li = (uint32_t *)lineinfo; - do { - BCLine delta = base[i].line - first; - lua_assert(delta >= 0); - li[i] = (uint32_t)delta; - } while (++i < n); - } -} - -/* Prepare variable info for prototype. */ -static size_t fs_prep_var(LexState *ls, FuncState *fs, size_t *ofsvar) -{ - VarInfo *vs =ls->vstack, *ve; - MSize i, n; - BCPos lastpc; - lj_buf_reset(&ls->sb); /* Copy to temp. string buffer. */ - /* Store upvalue names. */ - for (i = 0, n = fs->nuv; i < n; i++) { - GCstr *s = strref(vs[fs->uvmap[i]].name); - MSize len = s->len+1; - char *p = lj_buf_more(&ls->sb, len); - p = lj_buf_wmem(p, strdata(s), len); - setsbufP(&ls->sb, p); - } - *ofsvar = sbuflen(&ls->sb); - lastpc = 0; - /* Store local variable names and compressed ranges. */ - for (ve = vs + ls->vtop, vs += fs->vbase; vs < ve; vs++) { - if (!gola_isgotolabel(vs)) { - GCstr *s = strref(vs->name); - BCPos startpc; - char *p; - if ((uintptr_t)s < VARNAME__MAX) { - p = lj_buf_more(&ls->sb, 1 + 2*5); - *p++ = (char)(uintptr_t)s; - } else { - MSize len = s->len+1; - p = lj_buf_more(&ls->sb, len + 2*5); - p = lj_buf_wmem(p, strdata(s), len); - } - startpc = vs->startpc; - p = lj_strfmt_wuleb128(p, startpc-lastpc); - p = lj_strfmt_wuleb128(p, vs->endpc-startpc); - setsbufP(&ls->sb, p); - lastpc = startpc; - } - } - lj_buf_putb(&ls->sb, '\0'); /* Terminator for varinfo. */ - return sbuflen(&ls->sb); -} - -/* Fixup variable info for prototype. */ -static void fs_fixup_var(LexState *ls, GCproto *pt, uint8_t *p, size_t ofsvar) -{ - setmref(pt->uvinfo, p); - setmref(pt->varinfo, (char *)p + ofsvar); - memcpy(p, sbufB(&ls->sb), sbuflen(&ls->sb)); /* Copy from temp. buffer. */ -} -#else - -/* Initialize with empty debug info, if disabled. */ -#define fs_prep_line(fs, numline) (UNUSED(numline), 0) -#define fs_fixup_line(fs, pt, li, numline) \ - pt->firstline = pt->numline = 0, setmref((pt)->lineinfo, NULL) -#define fs_prep_var(ls, fs, ofsvar) (UNUSED(ofsvar), 0) -#define fs_fixup_var(ls, pt, p, ofsvar) \ - setmref((pt)->uvinfo, NULL), setmref((pt)->varinfo, NULL) - -#endif - -/* Check if bytecode op returns. */ -static int bcopisret(BCOp op) -{ - switch (op) { - case BC_CALLMT: case BC_CALLT: - case BC_RETM: case BC_RET: case BC_RET0: case BC_RET1: - return 1; - default: - return 0; - } -} - -/* Fixup return instruction for prototype. */ -static void fs_fixup_ret(FuncState *fs) -{ - BCPos lastpc = fs->pc; - if (lastpc <= fs->lasttarget || !bcopisret(bc_op(fs->bcbase[lastpc-1].ins))) { - if ((fs->bl->flags & FSCOPE_UPVAL)) - bcemit_AJ(fs, BC_UCLO, 0, 0); - bcemit_AD(fs, BC_RET0, 0, 1); /* Need final return. */ - } - fs->bl->flags |= FSCOPE_NOCLOSE; /* Handled above. */ - fscope_end(fs); - lua_assert(fs->bl == NULL); - /* May need to fixup returns encoded before first function was created. */ - if (fs->flags & PROTO_FIXUP_RETURN) { - BCPos pc; - for (pc = 1; pc < lastpc; pc++) { - BCIns ins = fs->bcbase[pc].ins; - BCPos offset; - switch (bc_op(ins)) { - case BC_CALLMT: case BC_CALLT: - case BC_RETM: case BC_RET: case BC_RET0: case BC_RET1: - offset = bcemit_INS(fs, ins); /* Copy original instruction. */ - fs->bcbase[offset].line = fs->bcbase[pc].line; - offset = offset-(pc+1)+BCBIAS_J; - if (offset > BCMAX_D) - err_syntax(fs->ls, LJ_ERR_XFIXUP); - /* Replace with UCLO plus branch. */ - fs->bcbase[pc].ins = BCINS_AD(BC_UCLO, 0, offset); - break; - case BC_UCLO: - return; /* We're done. */ - default: - break; - } - } - } -} - -/* Finish a FuncState and return the new prototype. */ -static GCproto *fs_finish(LexState *ls, BCLine line) -{ - lua_State *L = ls->L; - FuncState *fs = ls->fs; - BCLine numline = line - fs->linedefined; - size_t sizept, ofsk, ofsuv, ofsli, ofsdbg, ofsvar; - GCproto *pt; - - /* Apply final fixups. */ - fs_fixup_ret(fs); - - /* Calculate total size of prototype including all colocated arrays. */ - sizept = sizeof(GCproto) + fs->pc*sizeof(BCIns) + fs->nkgc*sizeof(GCRef); - sizept = (sizept + sizeof(TValue)-1) & ~(sizeof(TValue)-1); - ofsk = sizept; sizept += fs->nkn*sizeof(TValue); - ofsuv = sizept; sizept += ((fs->nuv+1)&~1)*2; - ofsli = sizept; sizept += fs_prep_line(fs, numline); - ofsdbg = sizept; sizept += fs_prep_var(ls, fs, &ofsvar); - - /* Allocate prototype and initialize its fields. */ - pt = (GCproto *)lj_mem_newgco(L, (MSize)sizept); - pt->gct = ~LJ_TPROTO; - pt->sizept = (MSize)sizept; - pt->trace = 0; - pt->flags = (uint8_t)(fs->flags & ~(PROTO_HAS_RETURN|PROTO_FIXUP_RETURN)); - pt->numparams = fs->numparams; - pt->framesize = fs->framesize; - setgcref(pt->chunkname, obj2gco(ls->chunkname)); - - /* Close potentially uninitialized gap between bc and kgc. */ - *(uint32_t *)((char *)pt + ofsk - sizeof(GCRef)*(fs->nkgc+1)) = 0; - fs_fixup_bc(fs, pt, (BCIns *)((char *)pt + sizeof(GCproto)), fs->pc); - fs_fixup_k(fs, pt, (void *)((char *)pt + ofsk)); - fs_fixup_uv1(fs, pt, (uint16_t *)((char *)pt + ofsuv)); - fs_fixup_line(fs, pt, (void *)((char *)pt + ofsli), numline); - fs_fixup_var(ls, pt, (uint8_t *)((char *)pt + ofsdbg), ofsvar); - - lj_vmevent_send(L, BC, - setprotoV(L, L->top++, pt); - ); - - L->top--; /* Pop table of constants. */ - ls->vtop = fs->vbase; /* Reset variable stack. */ - ls->fs = fs->prev; - lua_assert(ls->fs != NULL || ls->tok == TK_eof); - return pt; -} - -/* Initialize a new FuncState. */ -static void fs_init(LexState *ls, FuncState *fs) -{ - lua_State *L = ls->L; - fs->prev = ls->fs; ls->fs = fs; /* Append to list. */ - fs->ls = ls; - fs->vbase = ls->vtop; - fs->L = L; - fs->pc = 0; - fs->lasttarget = 0; - fs->jpc = NO_JMP; - fs->freereg = 0; - fs->nkgc = 0; - fs->nkn = 0; - fs->nactvar = 0; - fs->nuv = 0; - fs->bl = NULL; - fs->flags = 0; - fs->framesize = 1; /* Minimum frame size. */ - fs->kt = lj_tab_new(L, 0, 0); - /* Anchor table of constants in stack to avoid being collected. */ - settabV(L, L->top, fs->kt); - incr_top(L); -} - -/* -- Expressions --------------------------------------------------------- */ - -/* Forward declaration. */ -static void expr(LexState *ls, ExpDesc *v); - -/* Return string expression. */ -static void expr_str(LexState *ls, ExpDesc *e) -{ - expr_init(e, VKSTR, 0); - e->u.sval = lex_str(ls); -} - -/* Return index expression. */ -static void expr_index(FuncState *fs, ExpDesc *t, ExpDesc *e) -{ - /* Already called: expr_toval(fs, e). */ - t->k = VINDEXED; - if (expr_isnumk(e)) { -#if LJ_DUALNUM - if (tvisint(expr_numtv(e))) { - int32_t k = intV(expr_numtv(e)); - if (checku8(k)) { - t->u.s.aux = BCMAX_C+1+(uint32_t)k; /* 256..511: const byte key */ - return; - } - } -#else - lua_Number n = expr_numberV(e); - int32_t k = lj_num2int(n); - if (checku8(k) && n == (lua_Number)k) { - t->u.s.aux = BCMAX_C+1+(uint32_t)k; /* 256..511: const byte key */ - return; - } -#endif - } else if (expr_isstrk(e)) { - BCReg idx = const_str(fs, e); - if (idx <= BCMAX_C) { - t->u.s.aux = ~idx; /* -256..-1: const string key */ - return; - } - } - t->u.s.aux = expr_toanyreg(fs, e); /* 0..255: register */ -} - -/* Parse index expression with named field. */ -static void expr_field(LexState *ls, ExpDesc *v) -{ - FuncState *fs = ls->fs; - ExpDesc key; - expr_toanyreg(fs, v); - lj_lex_next(ls); /* Skip dot or colon. */ - expr_str(ls, &key); - expr_index(fs, v, &key); -} - -/* Parse index expression with brackets. */ -static void expr_bracket(LexState *ls, ExpDesc *v) -{ - lj_lex_next(ls); /* Skip '['. */ - expr(ls, v); - expr_toval(ls->fs, v); - lex_check(ls, ']'); -} - -/* Get value of constant expression. */ -static void expr_kvalue(TValue *v, ExpDesc *e) -{ - if (e->k <= VKTRUE) { - setpriV(v, ~(uint32_t)e->k); - } else if (e->k == VKSTR) { - setgcVraw(v, obj2gco(e->u.sval), LJ_TSTR); - } else { - lua_assert(tvisnumber(expr_numtv(e))); - *v = *expr_numtv(e); - } -} - -/* Parse table constructor expression. */ -static void expr_table(LexState *ls, ExpDesc *e) -{ - FuncState *fs = ls->fs; - BCLine line = ls->linenumber; - GCtab *t = NULL; - int vcall = 0, needarr = 0, fixt = 0; - uint32_t narr = 1; /* First array index. */ - uint32_t nhash = 0; /* Number of hash entries. */ - BCReg freg = fs->freereg; - BCPos pc = bcemit_AD(fs, BC_TNEW, freg, 0); - expr_init(e, VNONRELOC, freg); - bcreg_reserve(fs, 1); - freg++; - lex_check(ls, '{'); - while (ls->tok != '}') { - ExpDesc key, val; - vcall = 0; - if (ls->tok == '[') { - expr_bracket(ls, &key); /* Already calls expr_toval. */ - if (!expr_isk(&key)) expr_index(fs, e, &key); - if (expr_isnumk(&key) && expr_numiszero(&key)) needarr = 1; else nhash++; - lex_check(ls, '='); - } else if ((ls->tok == TK_name || (!LJ_52 && ls->tok == TK_goto)) && - lj_lex_lookahead(ls) == '=') { - expr_str(ls, &key); - lex_check(ls, '='); - nhash++; - } else { - expr_init(&key, VKNUM, 0); - setintV(&key.u.nval, (int)narr); - narr++; - needarr = vcall = 1; - } - expr(ls, &val); - if (expr_isk(&key) && key.k != VKNIL && - (key.k == VKSTR || expr_isk_nojump(&val))) { - TValue k, *v; - if (!t) { /* Create template table on demand. */ - BCReg kidx; - t = lj_tab_new(fs->L, needarr ? narr : 0, hsize2hbits(nhash)); - kidx = const_gc(fs, obj2gco(t), LJ_TTAB); - fs->bcbase[pc].ins = BCINS_AD(BC_TDUP, freg-1, kidx); - } - vcall = 0; - expr_kvalue(&k, &key); - v = lj_tab_set(fs->L, t, &k); - lj_gc_anybarriert(fs->L, t); - if (expr_isk_nojump(&val)) { /* Add const key/value to template table. */ - expr_kvalue(v, &val); - } else { /* Otherwise create dummy string key (avoids lj_tab_newkey). */ - settabV(fs->L, v, t); /* Preserve key with table itself as value. */ - fixt = 1; /* Fix this later, after all resizes. */ - goto nonconst; - } - } else { - nonconst: - if (val.k != VCALL) { expr_toanyreg(fs, &val); vcall = 0; } - if (expr_isk(&key)) expr_index(fs, e, &key); - bcemit_store(fs, e, &val); - } - fs->freereg = freg; - if (!lex_opt(ls, ',') && !lex_opt(ls, ';')) break; - } - lex_match(ls, '}', '{', line); - if (vcall) { - BCInsLine *ilp = &fs->bcbase[fs->pc-1]; - ExpDesc en; - lua_assert(bc_a(ilp->ins) == freg && - bc_op(ilp->ins) == (narr > 256 ? BC_TSETV : BC_TSETB)); - expr_init(&en, VKNUM, 0); - en.u.nval.u32.lo = narr-1; - en.u.nval.u32.hi = 0x43300000; /* Biased integer to avoid denormals. */ - if (narr > 256) { fs->pc--; ilp--; } - ilp->ins = BCINS_AD(BC_TSETM, freg, const_num(fs, &en)); - setbc_b(&ilp[-1].ins, 0); - } - if (pc == fs->pc-1) { /* Make expr relocable if possible. */ - e->u.s.info = pc; - fs->freereg--; - e->k = VRELOCABLE; - } else { - e->k = VNONRELOC; /* May have been changed by expr_index. */ - } - if (!t) { /* Construct TNEW RD: hhhhhaaaaaaaaaaa. */ - BCIns *ip = &fs->bcbase[pc].ins; - if (!needarr) narr = 0; - else if (narr < 3) narr = 3; - else if (narr > 0x7ff) narr = 0x7ff; - setbc_d(ip, narr|(hsize2hbits(nhash)<<11)); - } else { - if (needarr && t->asize < narr) - lj_tab_reasize(fs->L, t, narr-1); - if (fixt) { /* Fix value for dummy keys in template table. */ - Node *node = noderef(t->node); - uint32_t i, hmask = t->hmask; - for (i = 0; i <= hmask; i++) { - Node *n = &node[i]; - if (tvistab(&n->val)) { - lua_assert(tabV(&n->val) == t); - setnilV(&n->val); /* Turn value into nil. */ - } - } - } - lj_gc_check(fs->L); - } -} - -/* Parse function parameters. */ -static BCReg parse_params(LexState *ls, int needself) -{ - FuncState *fs = ls->fs; - BCReg nparams = 0; - lex_check(ls, '('); - if (needself) - var_new_lit(ls, nparams++, "self"); - if (ls->tok != ')') { - do { - if (ls->tok == TK_name || (!LJ_52 && ls->tok == TK_goto)) { - var_new(ls, nparams++, lex_str(ls)); - } else if (ls->tok == TK_dots) { - lj_lex_next(ls); - fs->flags |= PROTO_VARARG; - break; - } else { - err_syntax(ls, LJ_ERR_XPARAM); - } - } while (lex_opt(ls, ',')); - } - var_add(ls, nparams); - lua_assert(fs->nactvar == nparams); - bcreg_reserve(fs, nparams); - lex_check(ls, ')'); - return nparams; -} - -/* Forward declaration. */ -static void parse_chunk(LexState *ls); - -/* Parse body of a function. */ -static void parse_body(LexState *ls, ExpDesc *e, int needself, BCLine line) -{ - FuncState fs, *pfs = ls->fs; - FuncScope bl; - GCproto *pt; - ptrdiff_t oldbase = pfs->bcbase - ls->bcstack; - fs_init(ls, &fs); - fscope_begin(&fs, &bl, 0); - fs.linedefined = line; - fs.numparams = (uint8_t)parse_params(ls, needself); - fs.bcbase = pfs->bcbase + pfs->pc; - fs.bclim = pfs->bclim - pfs->pc; - bcemit_AD(&fs, BC_FUNCF, 0, 0); /* Placeholder. */ - parse_chunk(ls); - if (ls->tok != TK_end) lex_match(ls, TK_end, TK_function, line); - pt = fs_finish(ls, (ls->lastline = ls->linenumber)); - pfs->bcbase = ls->bcstack + oldbase; /* May have been reallocated. */ - pfs->bclim = (BCPos)(ls->sizebcstack - oldbase); - /* Store new prototype in the constant array of the parent. */ - expr_init(e, VRELOCABLE, - bcemit_AD(pfs, BC_FNEW, 0, const_gc(pfs, obj2gco(pt), LJ_TPROTO))); -#if LJ_HASFFI - pfs->flags |= (fs.flags & PROTO_FFI); -#endif - if (!(pfs->flags & PROTO_CHILD)) { - if (pfs->flags & PROTO_HAS_RETURN) - pfs->flags |= PROTO_FIXUP_RETURN; - pfs->flags |= PROTO_CHILD; - } - lj_lex_next(ls); -} - -/* Parse expression list. Last expression is left open. */ -static BCReg expr_list(LexState *ls, ExpDesc *v) -{ - BCReg n = 1; - expr(ls, v); - while (lex_opt(ls, ',')) { - expr_tonextreg(ls->fs, v); - expr(ls, v); - n++; - } - return n; -} - -/* Parse function argument list. */ -static void parse_args(LexState *ls, ExpDesc *e) -{ - FuncState *fs = ls->fs; - ExpDesc args; - BCIns ins; - BCReg base; - BCLine line = ls->linenumber; - if (ls->tok == '(') { -#if !LJ_52 - if (line != ls->lastline) - err_syntax(ls, LJ_ERR_XAMBIG); -#endif - lj_lex_next(ls); - if (ls->tok == ')') { /* f(). */ - args.k = VVOID; - } else { - expr_list(ls, &args); - if (args.k == VCALL) /* f(a, b, g()) or f(a, b, ...). */ - setbc_b(bcptr(fs, &args), 0); /* Pass on multiple results. */ - } - lex_match(ls, ')', '(', line); - } else if (ls->tok == '{') { - expr_table(ls, &args); - } else if (ls->tok == TK_string) { - expr_init(&args, VKSTR, 0); - args.u.sval = strV(&ls->tokval); - lj_lex_next(ls); - } else { - err_syntax(ls, LJ_ERR_XFUNARG); - return; /* Silence compiler. */ - } - lua_assert(e->k == VNONRELOC); - base = e->u.s.info; /* Base register for call. */ - if (args.k == VCALL) { - ins = BCINS_ABC(BC_CALLM, base, 2, args.u.s.aux - base - 1 - LJ_FR2); - } else { - if (args.k != VVOID) - expr_tonextreg(fs, &args); - ins = BCINS_ABC(BC_CALL, base, 2, fs->freereg - base - LJ_FR2); - } - expr_init(e, VCALL, bcemit_INS(fs, ins)); - e->u.s.aux = base; - fs->bcbase[fs->pc - 1].line = line; - fs->freereg = base+1; /* Leave one result by default. */ -} - -/* Parse primary expression. */ -static void expr_primary(LexState *ls, ExpDesc *v) -{ - FuncState *fs = ls->fs; - /* Parse prefix expression. */ - if (ls->tok == '(') { - BCLine line = ls->linenumber; - lj_lex_next(ls); - expr(ls, v); - lex_match(ls, ')', '(', line); - expr_discharge(ls->fs, v); - } else if (ls->tok == TK_name || (!LJ_52 && ls->tok == TK_goto)) { - var_lookup(ls, v); - } else { - err_syntax(ls, LJ_ERR_XSYMBOL); - } - for (;;) { /* Parse multiple expression suffixes. */ - if (ls->tok == '.') { - expr_field(ls, v); - } else if (ls->tok == '[') { - ExpDesc key; - expr_toanyreg(fs, v); - expr_bracket(ls, &key); - expr_index(fs, v, &key); - } else if (ls->tok == ':') { - ExpDesc key; - lj_lex_next(ls); - expr_str(ls, &key); - bcemit_method(fs, v, &key); - parse_args(ls, v); - } else if (ls->tok == '(' || ls->tok == TK_string || ls->tok == '{') { - expr_tonextreg(fs, v); - if (LJ_FR2) bcreg_reserve(fs, 1); - parse_args(ls, v); - } else { - break; - } - } -} - -/* Parse simple expression. */ -static void expr_simple(LexState *ls, ExpDesc *v) -{ - switch (ls->tok) { - case TK_number: - expr_init(v, (LJ_HASFFI && tviscdata(&ls->tokval)) ? VKCDATA : VKNUM, 0); - copyTV(ls->L, &v->u.nval, &ls->tokval); - break; - case TK_string: - expr_init(v, VKSTR, 0); - v->u.sval = strV(&ls->tokval); - break; - case TK_nil: - expr_init(v, VKNIL, 0); - break; - case TK_true: - expr_init(v, VKTRUE, 0); - break; - case TK_false: - expr_init(v, VKFALSE, 0); - break; - case TK_dots: { /* Vararg. */ - FuncState *fs = ls->fs; - BCReg base; - checkcond(ls, fs->flags & PROTO_VARARG, LJ_ERR_XDOTS); - bcreg_reserve(fs, 1); - base = fs->freereg-1; - expr_init(v, VCALL, bcemit_ABC(fs, BC_VARG, base, 2, fs->numparams)); - v->u.s.aux = base; - break; - } - case '{': /* Table constructor. */ - expr_table(ls, v); - return; - case TK_function: - lj_lex_next(ls); - parse_body(ls, v, 0, ls->linenumber); - return; - default: - expr_primary(ls, v); - return; - } - lj_lex_next(ls); -} - -/* Manage syntactic levels to avoid blowing up the stack. */ -static void synlevel_begin(LexState *ls) -{ - if (++ls->level >= LJ_MAX_XLEVEL) - lj_lex_error(ls, 0, LJ_ERR_XLEVELS); -} - -#define synlevel_end(ls) ((ls)->level--) - -/* Convert token to binary operator. */ -static BinOpr token2binop(LexToken tok) -{ - switch (tok) { - case '+': return OPR_ADD; - case '-': return OPR_SUB; - case '*': return OPR_MUL; - case '/': return OPR_DIV; - case '%': return OPR_MOD; - case '^': return OPR_POW; - case TK_concat: return OPR_CONCAT; - case TK_ne: return OPR_NE; - case TK_eq: return OPR_EQ; - case '<': return OPR_LT; - case TK_le: return OPR_LE; - case '>': return OPR_GT; - case TK_ge: return OPR_GE; - case TK_and: return OPR_AND; - case TK_or: return OPR_OR; - default: return OPR_NOBINOPR; - } -} - -/* Priorities for each binary operator. ORDER OPR. */ -static const struct { - uint8_t left; /* Left priority. */ - uint8_t right; /* Right priority. */ -} priority[] = { - {6,6}, {6,6}, {7,7}, {7,7}, {7,7}, /* ADD SUB MUL DIV MOD */ - {10,9}, {5,4}, /* POW CONCAT (right associative) */ - {3,3}, {3,3}, /* EQ NE */ - {3,3}, {3,3}, {3,3}, {3,3}, /* LT GE GT LE */ - {2,2}, {1,1} /* AND OR */ -}; - -#define UNARY_PRIORITY 8 /* Priority for unary operators. */ - -/* Forward declaration. */ -static BinOpr expr_binop(LexState *ls, ExpDesc *v, uint32_t limit); - -/* Parse unary expression. */ -static void expr_unop(LexState *ls, ExpDesc *v) -{ - BCOp op; - if (ls->tok == TK_not) { - op = BC_NOT; - } else if (ls->tok == '-') { - op = BC_UNM; - } else if (ls->tok == '#') { - op = BC_LEN; - } else { - expr_simple(ls, v); - return; - } - lj_lex_next(ls); - expr_binop(ls, v, UNARY_PRIORITY); - bcemit_unop(ls->fs, op, v); -} - -/* Parse binary expressions with priority higher than the limit. */ -static BinOpr expr_binop(LexState *ls, ExpDesc *v, uint32_t limit) -{ - BinOpr op; - synlevel_begin(ls); - expr_unop(ls, v); - op = token2binop(ls->tok); - while (op != OPR_NOBINOPR && priority[op].left > limit) { - ExpDesc v2; - BinOpr nextop; - lj_lex_next(ls); - bcemit_binop_left(ls->fs, op, v); - /* Parse binary expression with higher priority. */ - nextop = expr_binop(ls, &v2, priority[op].right); - bcemit_binop(ls->fs, op, v, &v2); - op = nextop; - } - synlevel_end(ls); - return op; /* Return unconsumed binary operator (if any). */ -} - -/* Parse expression. */ -static void expr(LexState *ls, ExpDesc *v) -{ - expr_binop(ls, v, 0); /* Priority 0: parse whole expression. */ -} - -/* Assign expression to the next register. */ -static void expr_next(LexState *ls) -{ - ExpDesc e; - expr(ls, &e); - expr_tonextreg(ls->fs, &e); -} - -/* Parse conditional expression. */ -static BCPos expr_cond(LexState *ls) -{ - ExpDesc v; - expr(ls, &v); - if (v.k == VKNIL) v.k = VKFALSE; - bcemit_branch_t(ls->fs, &v); - return v.f; -} - -/* -- Assignments --------------------------------------------------------- */ - -/* List of LHS variables. */ -typedef struct LHSVarList { - ExpDesc v; /* LHS variable. */ - struct LHSVarList *prev; /* Link to previous LHS variable. */ -} LHSVarList; - -/* Eliminate write-after-read hazards for local variable assignment. */ -static void assign_hazard(LexState *ls, LHSVarList *lh, const ExpDesc *v) -{ - FuncState *fs = ls->fs; - BCReg reg = v->u.s.info; /* Check against this variable. */ - BCReg tmp = fs->freereg; /* Rename to this temp. register (if needed). */ - int hazard = 0; - for (; lh; lh = lh->prev) { - if (lh->v.k == VINDEXED) { - if (lh->v.u.s.info == reg) { /* t[i], t = 1, 2 */ - hazard = 1; - lh->v.u.s.info = tmp; - } - if (lh->v.u.s.aux == reg) { /* t[i], i = 1, 2 */ - hazard = 1; - lh->v.u.s.aux = tmp; - } - } - } - if (hazard) { - bcemit_AD(fs, BC_MOV, tmp, reg); /* Rename conflicting variable. */ - bcreg_reserve(fs, 1); - } -} - -/* Adjust LHS/RHS of an assignment. */ -static void assign_adjust(LexState *ls, BCReg nvars, BCReg nexps, ExpDesc *e) -{ - FuncState *fs = ls->fs; - int32_t extra = (int32_t)nvars - (int32_t)nexps; - if (e->k == VCALL) { - extra++; /* Compensate for the VCALL itself. */ - if (extra < 0) extra = 0; - setbc_b(bcptr(fs, e), extra+1); /* Fixup call results. */ - if (extra > 1) bcreg_reserve(fs, (BCReg)extra-1); - } else { - if (e->k != VVOID) - expr_tonextreg(fs, e); /* Close last expression. */ - if (extra > 0) { /* Leftover LHS are set to nil. */ - BCReg reg = fs->freereg; - bcreg_reserve(fs, (BCReg)extra); - bcemit_nil(fs, reg, (BCReg)extra); - } - } -} - -/* Recursively parse assignment statement. */ -static void parse_assignment(LexState *ls, LHSVarList *lh, BCReg nvars) -{ - ExpDesc e; - checkcond(ls, VLOCAL <= lh->v.k && lh->v.k <= VINDEXED, LJ_ERR_XSYNTAX); - if (lex_opt(ls, ',')) { /* Collect LHS list and recurse upwards. */ - LHSVarList vl; - vl.prev = lh; - expr_primary(ls, &vl.v); - if (vl.v.k == VLOCAL) - assign_hazard(ls, lh, &vl.v); - checklimit(ls->fs, ls->level + nvars, LJ_MAX_XLEVEL, "variable names"); - parse_assignment(ls, &vl, nvars+1); - } else { /* Parse RHS. */ - BCReg nexps; - lex_check(ls, '='); - nexps = expr_list(ls, &e); - if (nexps == nvars) { - if (e.k == VCALL) { - if (bc_op(*bcptr(ls->fs, &e)) == BC_VARG) { /* Vararg assignment. */ - ls->fs->freereg--; - e.k = VRELOCABLE; - } else { /* Multiple call results. */ - e.u.s.info = e.u.s.aux; /* Base of call is not relocatable. */ - e.k = VNONRELOC; - } - } - bcemit_store(ls->fs, &lh->v, &e); - return; - } - assign_adjust(ls, nvars, nexps, &e); - if (nexps > nvars) - ls->fs->freereg -= nexps - nvars; /* Drop leftover regs. */ - } - /* Assign RHS to LHS and recurse downwards. */ - expr_init(&e, VNONRELOC, ls->fs->freereg-1); - bcemit_store(ls->fs, &lh->v, &e); -} - -/* Parse call statement or assignment. */ -static void parse_call_assign(LexState *ls) -{ - FuncState *fs = ls->fs; - LHSVarList vl; - expr_primary(ls, &vl.v); - if (vl.v.k == VCALL) { /* Function call statement. */ - setbc_b(bcptr(fs, &vl.v), 1); /* No results. */ - } else { /* Start of an assignment. */ - vl.prev = NULL; - parse_assignment(ls, &vl, 1); - } -} - -/* Parse 'local' statement. */ -static void parse_local(LexState *ls) -{ - if (lex_opt(ls, TK_function)) { /* Local function declaration. */ - ExpDesc v, b; - FuncState *fs = ls->fs; - var_new(ls, 0, lex_str(ls)); - expr_init(&v, VLOCAL, fs->freereg); - v.u.s.aux = fs->varmap[fs->freereg]; - bcreg_reserve(fs, 1); - var_add(ls, 1); - parse_body(ls, &b, 0, ls->linenumber); - /* bcemit_store(fs, &v, &b) without setting VSTACK_VAR_RW. */ - expr_free(fs, &b); - expr_toreg(fs, &b, v.u.s.info); - /* The upvalue is in scope, but the local is only valid after the store. */ - var_get(ls, fs, fs->nactvar - 1).startpc = fs->pc; - } else { /* Local variable declaration. */ - ExpDesc e; - BCReg nexps, nvars = 0; - do { /* Collect LHS. */ - var_new(ls, nvars++, lex_str(ls)); - } while (lex_opt(ls, ',')); - if (lex_opt(ls, '=')) { /* Optional RHS. */ - nexps = expr_list(ls, &e); - } else { /* Or implicitly set to nil. */ - e.k = VVOID; - nexps = 0; - } - assign_adjust(ls, nvars, nexps, &e); - var_add(ls, nvars); - } -} - -/* Parse 'function' statement. */ -static void parse_func(LexState *ls, BCLine line) -{ - FuncState *fs; - ExpDesc v, b; - int needself = 0; - lj_lex_next(ls); /* Skip 'function'. */ - /* Parse function name. */ - var_lookup(ls, &v); - while (ls->tok == '.') /* Multiple dot-separated fields. */ - expr_field(ls, &v); - if (ls->tok == ':') { /* Optional colon to signify method call. */ - needself = 1; - expr_field(ls, &v); - } - parse_body(ls, &b, needself, line); - fs = ls->fs; - bcemit_store(fs, &v, &b); - fs->bcbase[fs->pc - 1].line = line; /* Set line for the store. */ -} - -/* -- Control transfer statements ----------------------------------------- */ - -/* Check for end of block. */ -static int parse_isend(LexToken tok) -{ - switch (tok) { - case TK_else: case TK_elseif: case TK_end: case TK_until: case TK_eof: - return 1; - default: - return 0; - } -} - -/* Parse 'return' statement. */ -static void parse_return(LexState *ls) -{ - BCIns ins; - FuncState *fs = ls->fs; - lj_lex_next(ls); /* Skip 'return'. */ - fs->flags |= PROTO_HAS_RETURN; - if (parse_isend(ls->tok) || ls->tok == ';') { /* Bare return. */ - ins = BCINS_AD(BC_RET0, 0, 1); - } else { /* Return with one or more values. */ - ExpDesc e; /* Receives the _last_ expression in the list. */ - BCReg nret = expr_list(ls, &e); - if (nret == 1) { /* Return one result. */ - if (e.k == VCALL) { /* Check for tail call. */ - BCIns *ip = bcptr(fs, &e); - /* It doesn't pay off to add BC_VARGT just for 'return ...'. */ - if (bc_op(*ip) == BC_VARG) goto notailcall; - fs->pc--; - ins = BCINS_AD(bc_op(*ip)-BC_CALL+BC_CALLT, bc_a(*ip), bc_c(*ip)); - } else { /* Can return the result from any register. */ - ins = BCINS_AD(BC_RET1, expr_toanyreg(fs, &e), 2); - } - } else { - if (e.k == VCALL) { /* Append all results from a call. */ - notailcall: - setbc_b(bcptr(fs, &e), 0); - ins = BCINS_AD(BC_RETM, fs->nactvar, e.u.s.aux - fs->nactvar); - } else { - expr_tonextreg(fs, &e); /* Force contiguous registers. */ - ins = BCINS_AD(BC_RET, fs->nactvar, nret+1); - } - } - } - if (fs->flags & PROTO_CHILD) - bcemit_AJ(fs, BC_UCLO, 0, 0); /* May need to close upvalues first. */ - bcemit_INS(fs, ins); -} - -/* Parse 'break' statement. */ -static void parse_break(LexState *ls) -{ - ls->fs->bl->flags |= FSCOPE_BREAK; - gola_new(ls, NAME_BREAK, VSTACK_GOTO, bcemit_jmp(ls->fs)); -} - -/* Parse 'goto' statement. */ -static void parse_goto(LexState *ls) -{ - FuncState *fs = ls->fs; - GCstr *name = lex_str(ls); - VarInfo *vl = gola_findlabel(ls, name); - if (vl) /* Treat backwards goto within same scope like a loop. */ - bcemit_AJ(fs, BC_LOOP, vl->slot, -1); /* No BC range check. */ - fs->bl->flags |= FSCOPE_GOLA; - gola_new(ls, name, VSTACK_GOTO, bcemit_jmp(fs)); -} - -/* Parse label. */ -static void parse_label(LexState *ls) -{ - FuncState *fs = ls->fs; - GCstr *name; - MSize idx; - fs->lasttarget = fs->pc; - fs->bl->flags |= FSCOPE_GOLA; - lj_lex_next(ls); /* Skip '::'. */ - name = lex_str(ls); - if (gola_findlabel(ls, name)) - lj_lex_error(ls, 0, LJ_ERR_XLDUP, strdata(name)); - idx = gola_new(ls, name, VSTACK_LABEL, fs->pc); - lex_check(ls, TK_label); - /* Recursively parse trailing statements: labels and ';' (Lua 5.2 only). */ - for (;;) { - if (ls->tok == TK_label) { - synlevel_begin(ls); - parse_label(ls); - synlevel_end(ls); - } else if (LJ_52 && ls->tok == ';') { - lj_lex_next(ls); - } else { - break; - } - } - /* Trailing label is considered to be outside of scope. */ - if (parse_isend(ls->tok) && ls->tok != TK_until) - ls->vstack[idx].slot = fs->bl->nactvar; - gola_resolve(ls, fs->bl, idx); -} - -/* -- Blocks, loops and conditional statements ---------------------------- */ - -/* Parse a block. */ -static void parse_block(LexState *ls) -{ - FuncState *fs = ls->fs; - FuncScope bl; - fscope_begin(fs, &bl, 0); - parse_chunk(ls); - fscope_end(fs); -} - -/* Parse 'while' statement. */ -static void parse_while(LexState *ls, BCLine line) -{ - FuncState *fs = ls->fs; - BCPos start, loop, condexit; - FuncScope bl; - lj_lex_next(ls); /* Skip 'while'. */ - start = fs->lasttarget = fs->pc; - condexit = expr_cond(ls); - fscope_begin(fs, &bl, FSCOPE_LOOP); - lex_check(ls, TK_do); - loop = bcemit_AD(fs, BC_LOOP, fs->nactvar, 0); - parse_block(ls); - jmp_patch(fs, bcemit_jmp(fs), start); - lex_match(ls, TK_end, TK_while, line); - fscope_end(fs); - jmp_tohere(fs, condexit); - jmp_patchins(fs, loop, fs->pc); -} - -/* Parse 'repeat' statement. */ -static void parse_repeat(LexState *ls, BCLine line) -{ - FuncState *fs = ls->fs; - BCPos loop = fs->lasttarget = fs->pc; - BCPos condexit; - FuncScope bl1, bl2; - fscope_begin(fs, &bl1, FSCOPE_LOOP); /* Breakable loop scope. */ - fscope_begin(fs, &bl2, 0); /* Inner scope. */ - lj_lex_next(ls); /* Skip 'repeat'. */ - bcemit_AD(fs, BC_LOOP, fs->nactvar, 0); - parse_chunk(ls); - lex_match(ls, TK_until, TK_repeat, line); - condexit = expr_cond(ls); /* Parse condition (still inside inner scope). */ - if (!(bl2.flags & FSCOPE_UPVAL)) { /* No upvalues? Just end inner scope. */ - fscope_end(fs); - } else { /* Otherwise generate: cond: UCLO+JMP out, !cond: UCLO+JMP loop. */ - parse_break(ls); /* Break from loop and close upvalues. */ - jmp_tohere(fs, condexit); - fscope_end(fs); /* End inner scope and close upvalues. */ - condexit = bcemit_jmp(fs); - } - jmp_patch(fs, condexit, loop); /* Jump backwards if !cond. */ - jmp_patchins(fs, loop, fs->pc); - fscope_end(fs); /* End loop scope. */ -} - -/* Parse numeric 'for'. */ -static void parse_for_num(LexState *ls, GCstr *varname, BCLine line) -{ - FuncState *fs = ls->fs; - BCReg base = fs->freereg; - FuncScope bl; - BCPos loop, loopend; - /* Hidden control variables. */ - var_new_fixed(ls, FORL_IDX, VARNAME_FOR_IDX); - var_new_fixed(ls, FORL_STOP, VARNAME_FOR_STOP); - var_new_fixed(ls, FORL_STEP, VARNAME_FOR_STEP); - /* Visible copy of index variable. */ - var_new(ls, FORL_EXT, varname); - lex_check(ls, '='); - expr_next(ls); - lex_check(ls, ','); - expr_next(ls); - if (lex_opt(ls, ',')) { - expr_next(ls); - } else { - bcemit_AD(fs, BC_KSHORT, fs->freereg, 1); /* Default step is 1. */ - bcreg_reserve(fs, 1); - } - var_add(ls, 3); /* Hidden control variables. */ - lex_check(ls, TK_do); - loop = bcemit_AJ(fs, BC_FORI, base, NO_JMP); - fscope_begin(fs, &bl, 0); /* Scope for visible variables. */ - var_add(ls, 1); - bcreg_reserve(fs, 1); - parse_block(ls); - fscope_end(fs); - /* Perform loop inversion. Loop control instructions are at the end. */ - loopend = bcemit_AJ(fs, BC_FORL, base, NO_JMP); - fs->bcbase[loopend].line = line; /* Fix line for control ins. */ - jmp_patchins(fs, loopend, loop+1); - jmp_patchins(fs, loop, fs->pc); -} - -/* Try to predict whether the iterator is next() and specialize the bytecode. -** Detecting next() and pairs() by name is simplistic, but quite effective. -** The interpreter backs off if the check for the closure fails at runtime. -*/ -static int predict_next(LexState *ls, FuncState *fs, BCPos pc) -{ - BCIns ins = fs->bcbase[pc].ins; - GCstr *name; - cTValue *o; - switch (bc_op(ins)) { - case BC_MOV: - name = gco2str(gcref(var_get(ls, fs, bc_d(ins)).name)); - break; - case BC_UGET: - name = gco2str(gcref(ls->vstack[fs->uvmap[bc_d(ins)]].name)); - break; - case BC_GGET: - /* There's no inverse index (yet), so lookup the strings. */ - o = lj_tab_getstr(fs->kt, lj_str_newlit(ls->L, "pairs")); - if (o && tvhaskslot(o) && tvkslot(o) == bc_d(ins)) - return 1; - o = lj_tab_getstr(fs->kt, lj_str_newlit(ls->L, "next")); - if (o && tvhaskslot(o) && tvkslot(o) == bc_d(ins)) - return 1; - return 0; - default: - return 0; - } - return (name->len == 5 && !strcmp(strdata(name), "pairs")) || - (name->len == 4 && !strcmp(strdata(name), "next")); -} - -/* Parse 'for' iterator. */ -static void parse_for_iter(LexState *ls, GCstr *indexname) -{ - FuncState *fs = ls->fs; - ExpDesc e; - BCReg nvars = 0; - BCLine line; - BCReg base = fs->freereg + 3; - BCPos loop, loopend, exprpc = fs->pc; - FuncScope bl; - int isnext; - /* Hidden control variables. */ - var_new_fixed(ls, nvars++, VARNAME_FOR_GEN); - var_new_fixed(ls, nvars++, VARNAME_FOR_STATE); - var_new_fixed(ls, nvars++, VARNAME_FOR_CTL); - /* Visible variables returned from iterator. */ - var_new(ls, nvars++, indexname); - while (lex_opt(ls, ',')) - var_new(ls, nvars++, lex_str(ls)); - lex_check(ls, TK_in); - line = ls->linenumber; - assign_adjust(ls, 3, expr_list(ls, &e), &e); - /* The iterator needs another 3 [4] slots (func [pc] | state ctl). */ - bcreg_bump(fs, 3+LJ_FR2); - isnext = (nvars <= 5 && predict_next(ls, fs, exprpc)); - var_add(ls, 3); /* Hidden control variables. */ - lex_check(ls, TK_do); - loop = bcemit_AJ(fs, isnext ? BC_ISNEXT : BC_JMP, base, NO_JMP); - fscope_begin(fs, &bl, 0); /* Scope for visible variables. */ - var_add(ls, nvars-3); - bcreg_reserve(fs, nvars-3); - parse_block(ls); - fscope_end(fs); - /* Perform loop inversion. Loop control instructions are at the end. */ - jmp_patchins(fs, loop, fs->pc); - bcemit_ABC(fs, isnext ? BC_ITERN : BC_ITERC, base, nvars-3+1, 2+1); - loopend = bcemit_AJ(fs, BC_ITERL, base, NO_JMP); - fs->bcbase[loopend-1].line = line; /* Fix line for control ins. */ - fs->bcbase[loopend].line = line; - jmp_patchins(fs, loopend, loop+1); -} - -/* Parse 'for' statement. */ -static void parse_for(LexState *ls, BCLine line) -{ - FuncState *fs = ls->fs; - GCstr *varname; - FuncScope bl; - fscope_begin(fs, &bl, FSCOPE_LOOP); - lj_lex_next(ls); /* Skip 'for'. */ - varname = lex_str(ls); /* Get first variable name. */ - if (ls->tok == '=') - parse_for_num(ls, varname, line); - else if (ls->tok == ',' || ls->tok == TK_in) - parse_for_iter(ls, varname); - else - err_syntax(ls, LJ_ERR_XFOR); - lex_match(ls, TK_end, TK_for, line); - fscope_end(fs); /* Resolve break list. */ -} - -/* Parse condition and 'then' block. */ -static BCPos parse_then(LexState *ls) -{ - BCPos condexit; - lj_lex_next(ls); /* Skip 'if' or 'elseif'. */ - condexit = expr_cond(ls); - lex_check(ls, TK_then); - parse_block(ls); - return condexit; -} - -/* Parse 'if' statement. */ -static void parse_if(LexState *ls, BCLine line) -{ - FuncState *fs = ls->fs; - BCPos flist; - BCPos escapelist = NO_JMP; - flist = parse_then(ls); - while (ls->tok == TK_elseif) { /* Parse multiple 'elseif' blocks. */ - jmp_append(fs, &escapelist, bcemit_jmp(fs)); - jmp_tohere(fs, flist); - flist = parse_then(ls); - } - if (ls->tok == TK_else) { /* Parse optional 'else' block. */ - jmp_append(fs, &escapelist, bcemit_jmp(fs)); - jmp_tohere(fs, flist); - lj_lex_next(ls); /* Skip 'else'. */ - parse_block(ls); - } else { - jmp_append(fs, &escapelist, flist); - } - jmp_tohere(fs, escapelist); - lex_match(ls, TK_end, TK_if, line); -} - -/* -- Parse statements ---------------------------------------------------- */ - -/* Parse a statement. Returns 1 if it must be the last one in a chunk. */ -static int parse_stmt(LexState *ls) -{ - BCLine line = ls->linenumber; - switch (ls->tok) { - case TK_if: - parse_if(ls, line); - break; - case TK_while: - parse_while(ls, line); - break; - case TK_do: - lj_lex_next(ls); - parse_block(ls); - lex_match(ls, TK_end, TK_do, line); - break; - case TK_for: - parse_for(ls, line); - break; - case TK_repeat: - parse_repeat(ls, line); - break; - case TK_function: - parse_func(ls, line); - break; - case TK_local: - lj_lex_next(ls); - parse_local(ls); - break; - case TK_return: - parse_return(ls); - return 1; /* Must be last. */ - case TK_break: - lj_lex_next(ls); - parse_break(ls); - return !LJ_52; /* Must be last in Lua 5.1. */ -#if LJ_52 - case ';': - lj_lex_next(ls); - break; -#endif - case TK_label: - parse_label(ls); - break; - case TK_goto: - if (LJ_52 || lj_lex_lookahead(ls) == TK_name) { - lj_lex_next(ls); - parse_goto(ls); - break; - } /* else: fallthrough */ - default: - parse_call_assign(ls); - break; - } - return 0; -} - -/* A chunk is a list of statements optionally separated by semicolons. */ -static void parse_chunk(LexState *ls) -{ - int islast = 0; - synlevel_begin(ls); - while (!islast && !parse_isend(ls->tok)) { - islast = parse_stmt(ls); - lex_opt(ls, ';'); - lua_assert(ls->fs->framesize >= ls->fs->freereg && - ls->fs->freereg >= ls->fs->nactvar); - ls->fs->freereg = ls->fs->nactvar; /* Free registers after each stmt. */ - } - synlevel_end(ls); -} - -/* Entry point of bytecode parser. */ -GCproto *lj_parse(LexState *ls) -{ - FuncState fs; - FuncScope bl; - GCproto *pt; - lua_State *L = ls->L; -#ifdef LUAJIT_DISABLE_DEBUGINFO - ls->chunkname = lj_str_newlit(L, "="); -#else - ls->chunkname = lj_str_newz(L, ls->chunkarg); -#endif - setstrV(L, L->top, ls->chunkname); /* Anchor chunkname string. */ - incr_top(L); - ls->level = 0; - fs_init(ls, &fs); - fs.linedefined = 0; - fs.numparams = 0; - fs.bcbase = NULL; - fs.bclim = 0; - fs.flags |= PROTO_VARARG; /* Main chunk is always a vararg func. */ - fscope_begin(&fs, &bl, 0); - bcemit_AD(&fs, BC_FUNCV, 0, 0); /* Placeholder. */ - lj_lex_next(ls); /* Read-ahead first token. */ - parse_chunk(ls); - if (ls->tok != TK_eof) - err_token(ls, TK_eof); - pt = fs_finish(ls, ls->linenumber); - L->top--; /* Drop chunkname. */ - lua_assert(fs.prev == NULL); - lua_assert(ls->fs == NULL); - lua_assert(pt->sizeuv == 0); - return pt; -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_parse.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_parse.h deleted file mode 100644 index dc4fd405d18..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_parse.h +++ /dev/null @@ -1,18 +0,0 @@ -/* -** Lua parser (source code -> bytecode). -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_PARSE_H -#define _LJ_PARSE_H - -#include "lj_obj.h" -#include "lj_lex.h" - -LJ_FUNC GCproto *lj_parse(LexState *ls); -LJ_FUNC GCstr *lj_parse_keepstr(LexState *ls, const char *str, size_t l); -#if LJ_HASFFI -LJ_FUNC void lj_parse_keepcdata(LexState *ls, TValue *tv, GCcdata *cd); -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_profile.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_profile.c deleted file mode 100644 index c7e53963b5b..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_profile.c +++ /dev/null @@ -1,368 +0,0 @@ -/* -** Low-overhead profiling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_profile_c -#define LUA_CORE - -#include "lj_obj.h" - -#if LJ_HASPROFILE - -#include "lj_buf.h" -#include "lj_frame.h" -#include "lj_debug.h" -#include "lj_dispatch.h" -#if LJ_HASJIT -#include "lj_jit.h" -#include "lj_trace.h" -#endif -#include "lj_profile.h" - -#include "luajit.h" - -#if LJ_PROFILE_SIGPROF - -#include -#include -#define profile_lock(ps) UNUSED(ps) -#define profile_unlock(ps) UNUSED(ps) - -#elif LJ_PROFILE_PTHREAD - -#include -#include -#if LJ_TARGET_PS3 -#include -#endif -#define profile_lock(ps) pthread_mutex_lock(&ps->lock) -#define profile_unlock(ps) pthread_mutex_unlock(&ps->lock) - -#elif LJ_PROFILE_WTHREAD - -#define WIN32_LEAN_AND_MEAN -#if LJ_TARGET_XBOX360 -#include -#include -#else -#include -#endif -typedef unsigned int (WINAPI *WMM_TPFUNC)(unsigned int); -#define profile_lock(ps) EnterCriticalSection(&ps->lock) -#define profile_unlock(ps) LeaveCriticalSection(&ps->lock) - -#endif - -/* Profiler state. */ -typedef struct ProfileState { - global_State *g; /* VM state that started the profiler. */ - luaJIT_profile_callback cb; /* Profiler callback. */ - void *data; /* Profiler callback data. */ - SBuf sb; /* String buffer for stack dumps. */ - int interval; /* Sample interval in milliseconds. */ - int samples; /* Number of samples for next callback. */ - int vmstate; /* VM state when profile timer triggered. */ -#if LJ_PROFILE_SIGPROF - struct sigaction oldsa; /* Previous SIGPROF state. */ -#elif LJ_PROFILE_PTHREAD - pthread_mutex_t lock; /* g->hookmask update lock. */ - pthread_t thread; /* Timer thread. */ - int abort; /* Abort timer thread. */ -#elif LJ_PROFILE_WTHREAD -#if LJ_TARGET_WINDOWS - HINSTANCE wmm; /* WinMM library handle. */ - WMM_TPFUNC wmm_tbp; /* WinMM timeBeginPeriod function. */ - WMM_TPFUNC wmm_tep; /* WinMM timeEndPeriod function. */ -#endif - CRITICAL_SECTION lock; /* g->hookmask update lock. */ - HANDLE thread; /* Timer thread. */ - int abort; /* Abort timer thread. */ -#endif -} ProfileState; - -/* Sadly, we have to use a static profiler state. -** -** The SIGPROF variant needs a static pointer to the global state, anyway. -** And it would be hard to extend for multiple threads. You can still use -** multiple VMs in multiple threads, but only profile one at a time. -*/ -static ProfileState profile_state; - -/* Default sample interval in milliseconds. */ -#define LJ_PROFILE_INTERVAL_DEFAULT 10 - -/* -- Profiler/hook interaction ------------------------------------------- */ - -#if !LJ_PROFILE_SIGPROF -void LJ_FASTCALL lj_profile_hook_enter(global_State *g) -{ - ProfileState *ps = &profile_state; - if (ps->g) { - profile_lock(ps); - hook_enter(g); - profile_unlock(ps); - } else { - hook_enter(g); - } -} - -void LJ_FASTCALL lj_profile_hook_leave(global_State *g) -{ - ProfileState *ps = &profile_state; - if (ps->g) { - profile_lock(ps); - hook_leave(g); - profile_unlock(ps); - } else { - hook_leave(g); - } -} -#endif - -/* -- Profile callbacks --------------------------------------------------- */ - -/* Callback from profile hook (HOOK_PROFILE already cleared). */ -void LJ_FASTCALL lj_profile_interpreter(lua_State *L) -{ - ProfileState *ps = &profile_state; - global_State *g = G(L); - uint8_t mask; - profile_lock(ps); - mask = (g->hookmask & ~HOOK_PROFILE); - if (!(mask & HOOK_VMEVENT)) { - int samples = ps->samples; - ps->samples = 0; - g->hookmask = HOOK_VMEVENT; - lj_dispatch_update(g); - profile_unlock(ps); - ps->cb(ps->data, L, samples, ps->vmstate); /* Invoke user callback. */ - profile_lock(ps); - mask |= (g->hookmask & HOOK_PROFILE); - } - g->hookmask = mask; - lj_dispatch_update(g); - profile_unlock(ps); -} - -/* Trigger profile hook. Asynchronous call from OS-specific profile timer. */ -static void profile_trigger(ProfileState *ps) -{ - global_State *g = ps->g; - uint8_t mask; - profile_lock(ps); - ps->samples++; /* Always increment number of samples. */ - mask = g->hookmask; - if (!(mask & (HOOK_PROFILE|HOOK_VMEVENT))) { /* Set profile hook. */ - int st = g->vmstate; - ps->vmstate = st >= 0 ? 'N' : - st == ~LJ_VMST_INTERP ? 'I' : - st == ~LJ_VMST_C ? 'C' : - st == ~LJ_VMST_GC ? 'G' : 'J'; - g->hookmask = (mask | HOOK_PROFILE); - lj_dispatch_update(g); - } - profile_unlock(ps); -} - -/* -- OS-specific profile timer handling ---------------------------------- */ - -#if LJ_PROFILE_SIGPROF - -/* SIGPROF handler. */ -static void profile_signal(int sig) -{ - UNUSED(sig); - profile_trigger(&profile_state); -} - -/* Start profiling timer. */ -static void profile_timer_start(ProfileState *ps) -{ - int interval = ps->interval; - struct itimerval tm; - struct sigaction sa; - tm.it_value.tv_sec = tm.it_interval.tv_sec = interval / 1000; - tm.it_value.tv_usec = tm.it_interval.tv_usec = (interval % 1000) * 1000; - setitimer(ITIMER_PROF, &tm, NULL); - sa.sa_flags = SA_RESTART; - sa.sa_handler = profile_signal; - sigemptyset(&sa.sa_mask); - sigaction(SIGPROF, &sa, &ps->oldsa); -} - -/* Stop profiling timer. */ -static void profile_timer_stop(ProfileState *ps) -{ - struct itimerval tm; - tm.it_value.tv_sec = tm.it_interval.tv_sec = 0; - tm.it_value.tv_usec = tm.it_interval.tv_usec = 0; - setitimer(ITIMER_PROF, &tm, NULL); - sigaction(SIGPROF, &ps->oldsa, NULL); -} - -#elif LJ_PROFILE_PTHREAD - -/* POSIX timer thread. */ -static void *profile_thread(ProfileState *ps) -{ - int interval = ps->interval; -#if !LJ_TARGET_PS3 - struct timespec ts; - ts.tv_sec = interval / 1000; - ts.tv_nsec = (interval % 1000) * 1000000; -#endif - while (1) { -#if LJ_TARGET_PS3 - sys_timer_usleep(interval * 1000); -#else - nanosleep(&ts, NULL); -#endif - if (ps->abort) break; - profile_trigger(ps); - } - return NULL; -} - -/* Start profiling timer thread. */ -static void profile_timer_start(ProfileState *ps) -{ - pthread_mutex_init(&ps->lock, 0); - ps->abort = 0; - pthread_create(&ps->thread, NULL, (void *(*)(void *))profile_thread, ps); -} - -/* Stop profiling timer thread. */ -static void profile_timer_stop(ProfileState *ps) -{ - ps->abort = 1; - pthread_join(ps->thread, NULL); - pthread_mutex_destroy(&ps->lock); -} - -#elif LJ_PROFILE_WTHREAD - -/* Windows timer thread. */ -static DWORD WINAPI profile_thread(void *psx) -{ - ProfileState *ps = (ProfileState *)psx; - int interval = ps->interval; -#if LJ_TARGET_WINDOWS - ps->wmm_tbp(interval); -#endif - while (1) { - Sleep(interval); - if (ps->abort) break; - profile_trigger(ps); - } -#if LJ_TARGET_WINDOWS - ps->wmm_tep(interval); -#endif - return 0; -} - -/* Start profiling timer thread. */ -static void profile_timer_start(ProfileState *ps) -{ -#if LJ_TARGET_WINDOWS - if (!ps->wmm) { /* Load WinMM library on-demand. */ - ps->wmm = LoadLibraryExA("winmm.dll", NULL, 0); - if (ps->wmm) { - ps->wmm_tbp = (WMM_TPFUNC)GetProcAddress(ps->wmm, "timeBeginPeriod"); - ps->wmm_tep = (WMM_TPFUNC)GetProcAddress(ps->wmm, "timeEndPeriod"); - if (!ps->wmm_tbp || !ps->wmm_tep) { - ps->wmm = NULL; - return; - } - } - } -#endif - InitializeCriticalSection(&ps->lock); - ps->abort = 0; - ps->thread = CreateThread(NULL, 0, profile_thread, ps, 0, NULL); -} - -/* Stop profiling timer thread. */ -static void profile_timer_stop(ProfileState *ps) -{ - ps->abort = 1; - WaitForSingleObject(ps->thread, INFINITE); - DeleteCriticalSection(&ps->lock); -} - -#endif - -/* -- Public profiling API ------------------------------------------------ */ - -/* Start profiling. */ -LUA_API void luaJIT_profile_start(lua_State *L, const char *mode, - luaJIT_profile_callback cb, void *data) -{ - ProfileState *ps = &profile_state; - int interval = LJ_PROFILE_INTERVAL_DEFAULT; - while (*mode) { - int m = *mode++; - switch (m) { - case 'i': - interval = 0; - while (*mode >= '0' && *mode <= '9') - interval = interval * 10 + (*mode++ - '0'); - if (interval <= 0) interval = 1; - break; -#if LJ_HASJIT - case 'l': case 'f': - L2J(L)->prof_mode = m; - lj_trace_flushall(L); - break; -#endif - default: /* Ignore unknown mode chars. */ - break; - } - } - if (ps->g) { - luaJIT_profile_stop(L); - if (ps->g) return; /* Profiler in use by another VM. */ - } - ps->g = G(L); - ps->interval = interval; - ps->cb = cb; - ps->data = data; - ps->samples = 0; - lj_buf_init(L, &ps->sb); - profile_timer_start(ps); -} - -/* Stop profiling. */ -LUA_API void luaJIT_profile_stop(lua_State *L) -{ - ProfileState *ps = &profile_state; - global_State *g = ps->g; - if (G(L) == g) { /* Only stop profiler if started by this VM. */ - profile_timer_stop(ps); - g->hookmask &= ~HOOK_PROFILE; - lj_dispatch_update(g); -#if LJ_HASJIT - G2J(g)->prof_mode = 0; - lj_trace_flushall(L); -#endif - lj_buf_free(g, &ps->sb); - setmref(ps->sb.b, NULL); - setmref(ps->sb.e, NULL); - ps->g = NULL; - } -} - -/* Return a compact stack dump. */ -LUA_API const char *luaJIT_profile_dumpstack(lua_State *L, const char *fmt, - int depth, size_t *len) -{ - ProfileState *ps = &profile_state; - SBuf *sb = &ps->sb; - setsbufL(sb, L); - lj_buf_reset(sb); - lj_debug_dumpstack(L, sb, fmt, depth); - *len = (size_t)sbuflen(sb); - return sbufB(sb); -} - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_profile.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_profile.h deleted file mode 100644 index 26cb9db380c..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_profile.h +++ /dev/null @@ -1,21 +0,0 @@ -/* -** Low-overhead profiling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_PROFILE_H -#define _LJ_PROFILE_H - -#include "lj_obj.h" - -#if LJ_HASPROFILE - -LJ_FUNC void LJ_FASTCALL lj_profile_interpreter(lua_State *L); -#if !LJ_PROFILE_SIGPROF -LJ_FUNC void LJ_FASTCALL lj_profile_hook_enter(global_State *g); -LJ_FUNC void LJ_FASTCALL lj_profile_hook_leave(global_State *g); -#endif - -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_record.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_record.c deleted file mode 100644 index dc5f2d547ae..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_record.c +++ /dev/null @@ -1,2554 +0,0 @@ -/* -** Trace recorder (bytecode -> SSA IR). -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_record_c -#define LUA_CORE - -#include "lj_obj.h" - -#if LJ_HASJIT - -#include "lj_err.h" -#include "lj_str.h" -#include "lj_tab.h" -#include "lj_meta.h" -#include "lj_frame.h" -#if LJ_HASFFI -#include "lj_ctype.h" -#endif -#include "lj_bc.h" -#include "lj_ff.h" -#if LJ_HASPROFILE -#include "lj_debug.h" -#endif -#include "lj_ir.h" -#include "lj_jit.h" -#include "lj_ircall.h" -#include "lj_iropt.h" -#include "lj_trace.h" -#include "lj_record.h" -#include "lj_ffrecord.h" -#include "lj_snap.h" -#include "lj_dispatch.h" -#include "lj_vm.h" - -/* Some local macros to save typing. Undef'd at the end. */ -#define IR(ref) (&J->cur.ir[(ref)]) - -/* Pass IR on to next optimization in chain (FOLD). */ -#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) - -/* Emit raw IR without passing through optimizations. */ -#define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J)) - -/* -- Sanity checks ------------------------------------------------------- */ - -#ifdef LUA_USE_ASSERT -/* Sanity check the whole IR -- sloooow. */ -static void rec_check_ir(jit_State *J) -{ - IRRef i, nins = J->cur.nins, nk = J->cur.nk; - lua_assert(nk <= REF_BIAS && nins >= REF_BIAS && nins < 65536); - for (i = nins-1; i >= nk; i--) { - IRIns *ir = IR(i); - uint32_t mode = lj_ir_mode[ir->o]; - IRRef op1 = ir->op1; - IRRef op2 = ir->op2; - switch (irm_op1(mode)) { - case IRMnone: lua_assert(op1 == 0); break; - case IRMref: lua_assert(op1 >= nk); - lua_assert(i >= REF_BIAS ? op1 < i : op1 > i); break; - case IRMlit: break; - case IRMcst: lua_assert(i < REF_BIAS); continue; - } - switch (irm_op2(mode)) { - case IRMnone: lua_assert(op2 == 0); break; - case IRMref: lua_assert(op2 >= nk); - lua_assert(i >= REF_BIAS ? op2 < i : op2 > i); break; - case IRMlit: break; - case IRMcst: lua_assert(0); break; - } - if (ir->prev) { - lua_assert(ir->prev >= nk); - lua_assert(i >= REF_BIAS ? ir->prev < i : ir->prev > i); - lua_assert(ir->o == IR_NOP || IR(ir->prev)->o == ir->o); - } - } -} - -/* Compare stack slots and frames of the recorder and the VM. */ -static void rec_check_slots(jit_State *J) -{ - BCReg s, nslots = J->baseslot + J->maxslot; - int32_t depth = 0; - cTValue *base = J->L->base - J->baseslot; - lua_assert(J->baseslot >= 1 && J->baseslot < LJ_MAX_JSLOTS); - lua_assert(J->baseslot == 1 || (J->slot[J->baseslot-1] & TREF_FRAME)); - lua_assert(nslots < LJ_MAX_JSLOTS); - for (s = 0; s < nslots; s++) { - TRef tr = J->slot[s]; - if (tr) { - cTValue *tv = &base[s]; - IRRef ref = tref_ref(tr); - IRIns *ir; - lua_assert(ref >= J->cur.nk && ref < J->cur.nins); - ir = IR(ref); - lua_assert(irt_t(ir->t) == tref_t(tr)); - if (s == 0) { - lua_assert(tref_isfunc(tr)); - } else if ((tr & TREF_FRAME)) { - GCfunc *fn = gco2func(frame_gc(tv)); - BCReg delta = (BCReg)(tv - frame_prev(tv)); - lua_assert(tref_isfunc(tr)); - if (tref_isk(tr)) lua_assert(fn == ir_kfunc(ir)); - lua_assert(s > delta ? (J->slot[s-delta] & TREF_FRAME) : (s == delta)); - depth++; - } else if ((tr & TREF_CONT)) { - lua_assert(ir_kptr(ir) == gcrefp(tv->gcr, void)); - lua_assert((J->slot[s+1] & TREF_FRAME)); - depth++; - } else { - if (tvisnumber(tv)) - lua_assert(tref_isnumber(tr)); /* Could be IRT_INT etc., too. */ - else - lua_assert(itype2irt(tv) == tref_type(tr)); - if (tref_isk(tr)) { /* Compare constants. */ - TValue tvk; - lj_ir_kvalue(J->L, &tvk, ir); - if (!(tvisnum(&tvk) && tvisnan(&tvk))) - lua_assert(lj_obj_equal(tv, &tvk)); - else - lua_assert(tvisnum(tv) && tvisnan(tv)); - } - } - } - } - lua_assert(J->framedepth == depth); -} -#endif - -/* -- Type handling and specialization ------------------------------------ */ - -/* Note: these functions return tagged references (TRef). */ - -/* Specialize a slot to a specific type. Note: slot can be negative! */ -static TRef sloadt(jit_State *J, int32_t slot, IRType t, int mode) -{ - /* Caller may set IRT_GUARD in t. */ - TRef ref = emitir_raw(IRT(IR_SLOAD, t), (int32_t)J->baseslot+slot, mode); - J->base[slot] = ref; - return ref; -} - -/* Specialize a slot to the runtime type. Note: slot can be negative! */ -static TRef sload(jit_State *J, int32_t slot) -{ - IRType t = itype2irt(&J->L->base[slot]); - TRef ref = emitir_raw(IRTG(IR_SLOAD, t), (int32_t)J->baseslot+slot, - IRSLOAD_TYPECHECK); - if (irtype_ispri(t)) ref = TREF_PRI(t); /* Canonicalize primitive refs. */ - J->base[slot] = ref; - return ref; -} - -/* Get TRef from slot. Load slot and specialize if not done already. */ -#define getslot(J, s) (J->base[(s)] ? J->base[(s)] : sload(J, (int32_t)(s))) - -/* Get TRef for current function. */ -static TRef getcurrf(jit_State *J) -{ - if (J->base[-1]) - return J->base[-1]; - lua_assert(J->baseslot == 1); - return sloadt(J, -1, IRT_FUNC, IRSLOAD_READONLY); -} - -/* Compare for raw object equality. -** Returns 0 if the objects are the same. -** Returns 1 if they are different, but the same type. -** Returns 2 for two different types. -** Comparisons between primitives always return 1 -- no caller cares about it. -*/ -int lj_record_objcmp(jit_State *J, TRef a, TRef b, cTValue *av, cTValue *bv) -{ - int diff = !lj_obj_equal(av, bv); - if (!tref_isk2(a, b)) { /* Shortcut, also handles primitives. */ - IRType ta = tref_isinteger(a) ? IRT_INT : tref_type(a); - IRType tb = tref_isinteger(b) ? IRT_INT : tref_type(b); - if (ta != tb) { - /* Widen mixed number/int comparisons to number/number comparison. */ - if (ta == IRT_INT && tb == IRT_NUM) { - a = emitir(IRTN(IR_CONV), a, IRCONV_NUM_INT); - ta = IRT_NUM; - } else if (ta == IRT_NUM && tb == IRT_INT) { - b = emitir(IRTN(IR_CONV), b, IRCONV_NUM_INT); - } else { - return 2; /* Two different types are never equal. */ - } - } - emitir(IRTG(diff ? IR_NE : IR_EQ, ta), a, b); - } - return diff; -} - -/* Constify a value. Returns 0 for non-representable object types. */ -TRef lj_record_constify(jit_State *J, cTValue *o) -{ - if (tvisgcv(o)) - return lj_ir_kgc(J, gcV(o), itype2irt(o)); - else if (tvisint(o)) - return lj_ir_kint(J, intV(o)); - else if (tvisnum(o)) - return lj_ir_knumint(J, numV(o)); - else if (tvisbool(o)) - return TREF_PRI(itype2irt(o)); - else - return 0; /* Can't represent lightuserdata (pointless). */ -} - -/* -- Record loop ops ----------------------------------------------------- */ - -/* Loop event. */ -typedef enum { - LOOPEV_LEAVE, /* Loop is left or not entered. */ - LOOPEV_ENTERLO, /* Loop is entered with a low iteration count left. */ - LOOPEV_ENTER /* Loop is entered. */ -} LoopEvent; - -/* Canonicalize slots: convert integers to numbers. */ -static void canonicalize_slots(jit_State *J) -{ - BCReg s; - if (LJ_DUALNUM) return; - for (s = J->baseslot+J->maxslot-1; s >= 1; s--) { - TRef tr = J->slot[s]; - if (tref_isinteger(tr)) { - IRIns *ir = IR(tref_ref(tr)); - if (!(ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_READONLY))) - J->slot[s] = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT); - } - } -} - -/* Stop recording. */ -void lj_record_stop(jit_State *J, TraceLink linktype, TraceNo lnk) -{ -#ifdef LUAJIT_ENABLE_TABLE_BUMP - if (J->retryrec) - lj_trace_err(J, LJ_TRERR_RETRY); -#endif - lj_trace_end(J); - J->cur.linktype = (uint8_t)linktype; - J->cur.link = (uint16_t)lnk; - /* Looping back at the same stack level? */ - if (lnk == J->cur.traceno && J->framedepth + J->retdepth == 0) { - if ((J->flags & JIT_F_OPT_LOOP)) /* Shall we try to create a loop? */ - goto nocanon; /* Do not canonicalize or we lose the narrowing. */ - if (J->cur.root) /* Otherwise ensure we always link to the root trace. */ - J->cur.link = J->cur.root; - } - canonicalize_slots(J); -nocanon: - /* Note: all loop ops must set J->pc to the following instruction! */ - lj_snap_add(J); /* Add loop snapshot. */ - J->needsnap = 0; - J->mergesnap = 1; /* In case recording continues. */ -} - -/* Search bytecode backwards for a int/num constant slot initializer. */ -static TRef find_kinit(jit_State *J, const BCIns *endpc, BCReg slot, IRType t) -{ - /* This algorithm is rather simplistic and assumes quite a bit about - ** how the bytecode is generated. It works fine for FORI initializers, - ** but it won't necessarily work in other cases (e.g. iterator arguments). - ** It doesn't do anything fancy, either (like backpropagating MOVs). - */ - const BCIns *pc, *startpc = proto_bc(J->pt); - for (pc = endpc-1; pc > startpc; pc--) { - BCIns ins = *pc; - BCOp op = bc_op(ins); - /* First try to find the last instruction that stores to this slot. */ - if (bcmode_a(op) == BCMbase && bc_a(ins) <= slot) { - return 0; /* Multiple results, e.g. from a CALL or KNIL. */ - } else if (bcmode_a(op) == BCMdst && bc_a(ins) == slot) { - if (op == BC_KSHORT || op == BC_KNUM) { /* Found const. initializer. */ - /* Now try to verify there's no forward jump across it. */ - const BCIns *kpc = pc; - for (; pc > startpc; pc--) - if (bc_op(*pc) == BC_JMP) { - const BCIns *target = pc+bc_j(*pc)+1; - if (target > kpc && target <= endpc) - return 0; /* Conditional assignment. */ - } - if (op == BC_KSHORT) { - int32_t k = (int32_t)(int16_t)bc_d(ins); - return t == IRT_INT ? lj_ir_kint(J, k) : lj_ir_knum(J, (lua_Number)k); - } else { - cTValue *tv = proto_knumtv(J->pt, bc_d(ins)); - if (t == IRT_INT) { - int32_t k = numberVint(tv); - if (tvisint(tv) || numV(tv) == (lua_Number)k) /* -0 is ok here. */ - return lj_ir_kint(J, k); - return 0; /* Type mismatch. */ - } else { - return lj_ir_knum(J, numberVnum(tv)); - } - } - } - return 0; /* Non-constant initializer. */ - } - } - return 0; /* No assignment to this slot found? */ -} - -/* Load and optionally convert a FORI argument from a slot. */ -static TRef fori_load(jit_State *J, BCReg slot, IRType t, int mode) -{ - int conv = (tvisint(&J->L->base[slot]) != (t==IRT_INT)) ? IRSLOAD_CONVERT : 0; - return sloadt(J, (int32_t)slot, - t + (((mode & IRSLOAD_TYPECHECK) || - (conv && t == IRT_INT && !(mode >> 16))) ? - IRT_GUARD : 0), - mode + conv); -} - -/* Peek before FORI to find a const initializer. Otherwise load from slot. */ -static TRef fori_arg(jit_State *J, const BCIns *fori, BCReg slot, - IRType t, int mode) -{ - TRef tr = J->base[slot]; - if (!tr) { - tr = find_kinit(J, fori, slot, t); - if (!tr) - tr = fori_load(J, slot, t, mode); - } - return tr; -} - -/* Return the direction of the FOR loop iterator. -** It's important to exactly reproduce the semantics of the interpreter. -*/ -static int rec_for_direction(cTValue *o) -{ - return (tvisint(o) ? intV(o) : (int32_t)o->u32.hi) >= 0; -} - -/* Simulate the runtime behavior of the FOR loop iterator. */ -static LoopEvent rec_for_iter(IROp *op, cTValue *o, int isforl) -{ - lua_Number stopv = numberVnum(&o[FORL_STOP]); - lua_Number idxv = numberVnum(&o[FORL_IDX]); - lua_Number stepv = numberVnum(&o[FORL_STEP]); - if (isforl) - idxv += stepv; - if (rec_for_direction(&o[FORL_STEP])) { - if (idxv <= stopv) { - *op = IR_LE; - return idxv + 2*stepv > stopv ? LOOPEV_ENTERLO : LOOPEV_ENTER; - } - *op = IR_GT; return LOOPEV_LEAVE; - } else { - if (stopv <= idxv) { - *op = IR_GE; - return idxv + 2*stepv < stopv ? LOOPEV_ENTERLO : LOOPEV_ENTER; - } - *op = IR_LT; return LOOPEV_LEAVE; - } -} - -/* Record checks for FOR loop overflow and step direction. */ -static void rec_for_check(jit_State *J, IRType t, int dir, - TRef stop, TRef step, int init) -{ - if (!tref_isk(step)) { - /* Non-constant step: need a guard for the direction. */ - TRef zero = (t == IRT_INT) ? lj_ir_kint(J, 0) : lj_ir_knum_zero(J); - emitir(IRTG(dir ? IR_GE : IR_LT, t), step, zero); - /* Add hoistable overflow checks for a narrowed FORL index. */ - if (init && t == IRT_INT) { - if (tref_isk(stop)) { - /* Constant stop: optimize check away or to a range check for step. */ - int32_t k = IR(tref_ref(stop))->i; - if (dir) { - if (k > 0) - emitir(IRTGI(IR_LE), step, lj_ir_kint(J, (int32_t)0x7fffffff-k)); - } else { - if (k < 0) - emitir(IRTGI(IR_GE), step, lj_ir_kint(J, (int32_t)0x80000000-k)); - } - } else { - /* Stop+step variable: need full overflow check. */ - TRef tr = emitir(IRTGI(IR_ADDOV), step, stop); - emitir(IRTI(IR_USE), tr, 0); /* ADDOV is weak. Avoid dead result. */ - } - } - } else if (init && t == IRT_INT && !tref_isk(stop)) { - /* Constant step: optimize overflow check to a range check for stop. */ - int32_t k = IR(tref_ref(step))->i; - k = (int32_t)(dir ? 0x7fffffff : 0x80000000) - k; - emitir(IRTGI(dir ? IR_LE : IR_GE), stop, lj_ir_kint(J, k)); - } -} - -/* Record a FORL instruction. */ -static void rec_for_loop(jit_State *J, const BCIns *fori, ScEvEntry *scev, - int init) -{ - BCReg ra = bc_a(*fori); - cTValue *tv = &J->L->base[ra]; - TRef idx = J->base[ra+FORL_IDX]; - IRType t = idx ? tref_type(idx) : - (init || LJ_DUALNUM) ? lj_opt_narrow_forl(J, tv) : IRT_NUM; - int mode = IRSLOAD_INHERIT + - ((!LJ_DUALNUM || tvisint(tv) == (t == IRT_INT)) ? IRSLOAD_READONLY : 0); - TRef stop = fori_arg(J, fori, ra+FORL_STOP, t, mode); - TRef step = fori_arg(J, fori, ra+FORL_STEP, t, mode); - int tc, dir = rec_for_direction(&tv[FORL_STEP]); - lua_assert(bc_op(*fori) == BC_FORI || bc_op(*fori) == BC_JFORI); - scev->t.irt = t; - scev->dir = dir; - scev->stop = tref_ref(stop); - scev->step = tref_ref(step); - rec_for_check(J, t, dir, stop, step, init); - scev->start = tref_ref(find_kinit(J, fori, ra+FORL_IDX, IRT_INT)); - tc = (LJ_DUALNUM && - !(scev->start && irref_isk(scev->stop) && irref_isk(scev->step) && - tvisint(&tv[FORL_IDX]) == (t == IRT_INT))) ? - IRSLOAD_TYPECHECK : 0; - if (tc) { - J->base[ra+FORL_STOP] = stop; - J->base[ra+FORL_STEP] = step; - } - if (!idx) - idx = fori_load(J, ra+FORL_IDX, t, - IRSLOAD_INHERIT + tc + (J->scev.start << 16)); - if (!init) - J->base[ra+FORL_IDX] = idx = emitir(IRT(IR_ADD, t), idx, step); - J->base[ra+FORL_EXT] = idx; - scev->idx = tref_ref(idx); - setmref(scev->pc, fori); - J->maxslot = ra+FORL_EXT+1; -} - -/* Record FORL/JFORL or FORI/JFORI. */ -static LoopEvent rec_for(jit_State *J, const BCIns *fori, int isforl) -{ - BCReg ra = bc_a(*fori); - TValue *tv = &J->L->base[ra]; - TRef *tr = &J->base[ra]; - IROp op; - LoopEvent ev; - TRef stop; - IRType t; - if (isforl) { /* Handle FORL/JFORL opcodes. */ - TRef idx = tr[FORL_IDX]; - if (mref(J->scev.pc, const BCIns) == fori && tref_ref(idx) == J->scev.idx) { - t = J->scev.t.irt; - stop = J->scev.stop; - idx = emitir(IRT(IR_ADD, t), idx, J->scev.step); - tr[FORL_EXT] = tr[FORL_IDX] = idx; - } else { - ScEvEntry scev; - rec_for_loop(J, fori, &scev, 0); - t = scev.t.irt; - stop = scev.stop; - } - } else { /* Handle FORI/JFORI opcodes. */ - BCReg i; - lj_meta_for(J->L, tv); - t = (LJ_DUALNUM || tref_isint(tr[FORL_IDX])) ? lj_opt_narrow_forl(J, tv) : - IRT_NUM; - for (i = FORL_IDX; i <= FORL_STEP; i++) { - if (!tr[i]) sload(J, ra+i); - lua_assert(tref_isnumber_str(tr[i])); - if (tref_isstr(tr[i])) - tr[i] = emitir(IRTG(IR_STRTO, IRT_NUM), tr[i], 0); - if (t == IRT_INT) { - if (!tref_isinteger(tr[i])) - tr[i] = emitir(IRTGI(IR_CONV), tr[i], IRCONV_INT_NUM|IRCONV_CHECK); - } else { - if (!tref_isnum(tr[i])) - tr[i] = emitir(IRTN(IR_CONV), tr[i], IRCONV_NUM_INT); - } - } - tr[FORL_EXT] = tr[FORL_IDX]; - stop = tr[FORL_STOP]; - rec_for_check(J, t, rec_for_direction(&tv[FORL_STEP]), - stop, tr[FORL_STEP], 1); - } - - ev = rec_for_iter(&op, tv, isforl); - if (ev == LOOPEV_LEAVE) { - J->maxslot = ra+FORL_EXT+1; - J->pc = fori+1; - } else { - J->maxslot = ra; - J->pc = fori+bc_j(*fori)+1; - } - lj_snap_add(J); - - emitir(IRTG(op, t), tr[FORL_IDX], stop); - - if (ev == LOOPEV_LEAVE) { - J->maxslot = ra; - J->pc = fori+bc_j(*fori)+1; - } else { - J->maxslot = ra+FORL_EXT+1; - J->pc = fori+1; - } - J->needsnap = 1; - return ev; -} - -/* Record ITERL/JITERL. */ -static LoopEvent rec_iterl(jit_State *J, const BCIns iterins) -{ - BCReg ra = bc_a(iterins); - lua_assert(!LJ_FR2); /* TODO_FR2: handle different frame setup. */ - if (!tref_isnil(getslot(J, ra))) { /* Looping back? */ - J->base[ra-1] = J->base[ra]; /* Copy result of ITERC to control var. */ - J->maxslot = ra-1+bc_b(J->pc[-1]); - J->pc += bc_j(iterins)+1; - return LOOPEV_ENTER; - } else { - J->maxslot = ra-3; - J->pc++; - return LOOPEV_LEAVE; - } -} - -/* Record LOOP/JLOOP. Now, that was easy. */ -static LoopEvent rec_loop(jit_State *J, BCReg ra) -{ - if (ra < J->maxslot) J->maxslot = ra; - J->pc++; - return LOOPEV_ENTER; -} - -/* Check if a loop repeatedly failed to trace because it didn't loop back. */ -static int innerloopleft(jit_State *J, const BCIns *pc) -{ - ptrdiff_t i; - for (i = 0; i < PENALTY_SLOTS; i++) - if (mref(J->penalty[i].pc, const BCIns) == pc) { - if ((J->penalty[i].reason == LJ_TRERR_LLEAVE || - J->penalty[i].reason == LJ_TRERR_LINNER) && - J->penalty[i].val >= 2*PENALTY_MIN) - return 1; - break; - } - return 0; -} - -/* Handle the case when an interpreted loop op is hit. */ -static void rec_loop_interp(jit_State *J, const BCIns *pc, LoopEvent ev) -{ - if (J->parent == 0 && J->exitno == 0) { - if (pc == J->startpc && J->framedepth + J->retdepth == 0) { - /* Same loop? */ - if (ev == LOOPEV_LEAVE) /* Must loop back to form a root trace. */ - lj_trace_err(J, LJ_TRERR_LLEAVE); - lj_record_stop(J, LJ_TRLINK_LOOP, J->cur.traceno); /* Looping trace. */ - } else if (ev != LOOPEV_LEAVE) { /* Entering inner loop? */ - /* It's usually better to abort here and wait until the inner loop - ** is traced. But if the inner loop repeatedly didn't loop back, - ** this indicates a low trip count. In this case try unrolling - ** an inner loop even in a root trace. But it's better to be a bit - ** more conservative here and only do it for very short loops. - */ - if (bc_j(*pc) != -1 && !innerloopleft(J, pc)) - lj_trace_err(J, LJ_TRERR_LINNER); /* Root trace hit an inner loop. */ - if ((ev != LOOPEV_ENTERLO && - J->loopref && J->cur.nins - J->loopref > 24) || --J->loopunroll < 0) - lj_trace_err(J, LJ_TRERR_LUNROLL); /* Limit loop unrolling. */ - J->loopref = J->cur.nins; - } - } else if (ev != LOOPEV_LEAVE) { /* Side trace enters an inner loop. */ - J->loopref = J->cur.nins; - if (--J->loopunroll < 0) - lj_trace_err(J, LJ_TRERR_LUNROLL); /* Limit loop unrolling. */ - } /* Side trace continues across a loop that's left or not entered. */ -} - -/* Handle the case when an already compiled loop op is hit. */ -static void rec_loop_jit(jit_State *J, TraceNo lnk, LoopEvent ev) -{ - if (J->parent == 0 && J->exitno == 0) { /* Root trace hit an inner loop. */ - /* Better let the inner loop spawn a side trace back here. */ - lj_trace_err(J, LJ_TRERR_LINNER); - } else if (ev != LOOPEV_LEAVE) { /* Side trace enters a compiled loop. */ - J->instunroll = 0; /* Cannot continue across a compiled loop op. */ - if (J->pc == J->startpc && J->framedepth + J->retdepth == 0) - lj_record_stop(J, LJ_TRLINK_LOOP, J->cur.traceno); /* Form extra loop. */ - else - lj_record_stop(J, LJ_TRLINK_ROOT, lnk); /* Link to the loop. */ - } /* Side trace continues across a loop that's left or not entered. */ -} - -/* -- Record profiler hook checks ----------------------------------------- */ - -#if LJ_HASPROFILE - -/* Need to insert profiler hook check? */ -static int rec_profile_need(jit_State *J, GCproto *pt, const BCIns *pc) -{ - GCproto *ppt; - lua_assert(J->prof_mode == 'f' || J->prof_mode == 'l'); - if (!pt) - return 0; - ppt = J->prev_pt; - J->prev_pt = pt; - if (pt != ppt && ppt) { - J->prev_line = -1; - return 1; - } - if (J->prof_mode == 'l') { - BCLine line = lj_debug_line(pt, proto_bcpos(pt, pc)); - BCLine pline = J->prev_line; - J->prev_line = line; - if (pline != line) - return 1; - } - return 0; -} - -static void rec_profile_ins(jit_State *J, const BCIns *pc) -{ - if (J->prof_mode && rec_profile_need(J, J->pt, pc)) { - emitir(IRTG(IR_PROF, IRT_NIL), 0, 0); - lj_snap_add(J); - } -} - -static void rec_profile_ret(jit_State *J) -{ - if (J->prof_mode == 'f') { - emitir(IRTG(IR_PROF, IRT_NIL), 0, 0); - J->prev_pt = NULL; - lj_snap_add(J); - } -} - -#endif - -/* -- Record calls and returns -------------------------------------------- */ - -/* Specialize to the runtime value of the called function or its prototype. */ -static TRef rec_call_specialize(jit_State *J, GCfunc *fn, TRef tr) -{ - TRef kfunc; - if (isluafunc(fn)) { - GCproto *pt = funcproto(fn); - /* Too many closures created? Probably not a monomorphic function. */ - if (pt->flags >= PROTO_CLC_POLY) { /* Specialize to prototype instead. */ - TRef trpt = emitir(IRT(IR_FLOAD, IRT_P32), tr, IRFL_FUNC_PC); - emitir(IRTG(IR_EQ, IRT_P32), trpt, lj_ir_kptr(J, proto_bc(pt))); - (void)lj_ir_kgc(J, obj2gco(pt), IRT_PROTO); /* Prevent GC of proto. */ - return tr; - } - } else { - /* Don't specialize to non-monomorphic builtins. */ - switch (fn->c.ffid) { - case FF_coroutine_wrap_aux: - case FF_string_gmatch_aux: - /* NYI: io_file_iter doesn't have an ffid, yet. */ - { /* Specialize to the ffid. */ - TRef trid = emitir(IRT(IR_FLOAD, IRT_U8), tr, IRFL_FUNC_FFID); - emitir(IRTG(IR_EQ, IRT_INT), trid, lj_ir_kint(J, fn->c.ffid)); - } - return tr; - default: - /* NYI: don't specialize to non-monomorphic C functions. */ - break; - } - } - /* Otherwise specialize to the function (closure) value itself. */ - kfunc = lj_ir_kfunc(J, fn); - emitir(IRTG(IR_EQ, IRT_FUNC), tr, kfunc); - return kfunc; -} - -/* Record call setup. */ -static void rec_call_setup(jit_State *J, BCReg func, ptrdiff_t nargs) -{ - RecordIndex ix; - TValue *functv = &J->L->base[func]; - TRef *fbase = &J->base[func]; - ptrdiff_t i; - lua_assert(!LJ_FR2); /* TODO_FR2: handle different frame setup. */ - for (i = 0; i <= nargs; i++) - (void)getslot(J, func+i); /* Ensure func and all args have a reference. */ - if (!tref_isfunc(fbase[0])) { /* Resolve __call metamethod. */ - ix.tab = fbase[0]; - copyTV(J->L, &ix.tabv, functv); - if (!lj_record_mm_lookup(J, &ix, MM_call) || !tref_isfunc(ix.mobj)) - lj_trace_err(J, LJ_TRERR_NOMM); - for (i = ++nargs; i > 0; i--) /* Shift arguments up. */ - fbase[i] = fbase[i-1]; - fbase[0] = ix.mobj; /* Replace function. */ - functv = &ix.mobjv; - } - fbase[0] = TREF_FRAME | rec_call_specialize(J, funcV(functv), fbase[0]); - J->maxslot = (BCReg)nargs; -} - -/* Record call. */ -void lj_record_call(jit_State *J, BCReg func, ptrdiff_t nargs) -{ - rec_call_setup(J, func, nargs); - /* Bump frame. */ - J->framedepth++; - J->base += func+1; - J->baseslot += func+1; -} - -/* Record tail call. */ -void lj_record_tailcall(jit_State *J, BCReg func, ptrdiff_t nargs) -{ - rec_call_setup(J, func, nargs); - if (frame_isvarg(J->L->base - 1)) { - BCReg cbase = (BCReg)frame_delta(J->L->base - 1); - if (--J->framedepth < 0) - lj_trace_err(J, LJ_TRERR_NYIRETL); - J->baseslot -= (BCReg)cbase; - J->base -= cbase; - func += cbase; - } - /* Move func + args down. */ - memmove(&J->base[-1], &J->base[func], sizeof(TRef)*(J->maxslot+1)); - /* Note: the new TREF_FRAME is now at J->base[-1] (even for slot #0). */ - /* Tailcalls can form a loop, so count towards the loop unroll limit. */ - if (++J->tailcalled > J->loopunroll) - lj_trace_err(J, LJ_TRERR_LUNROLL); -} - -/* Check unroll limits for down-recursion. */ -static int check_downrec_unroll(jit_State *J, GCproto *pt) -{ - IRRef ptref; - for (ptref = J->chain[IR_KGC]; ptref; ptref = IR(ptref)->prev) - if (ir_kgc(IR(ptref)) == obj2gco(pt)) { - int count = 0; - IRRef ref; - for (ref = J->chain[IR_RETF]; ref; ref = IR(ref)->prev) - if (IR(ref)->op1 == ptref) - count++; - if (count) { - if (J->pc == J->startpc) { - if (count + J->tailcalled > J->param[JIT_P_recunroll]) - return 1; - } else { - lj_trace_err(J, LJ_TRERR_DOWNREC); - } - } - } - return 0; -} - -static TRef rec_cat(jit_State *J, BCReg baseslot, BCReg topslot); - -/* Record return. */ -void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults) -{ - TValue *frame = J->L->base - 1; - ptrdiff_t i; - for (i = 0; i < gotresults; i++) - (void)getslot(J, rbase+i); /* Ensure all results have a reference. */ - while (frame_ispcall(frame)) { /* Immediately resolve pcall() returns. */ - BCReg cbase = (BCReg)frame_delta(frame); - if (--J->framedepth < 0) - lj_trace_err(J, LJ_TRERR_NYIRETL); - lua_assert(J->baseslot > 1); - gotresults++; - rbase += cbase; - J->baseslot -= (BCReg)cbase; - J->base -= cbase; - J->base[--rbase] = TREF_TRUE; /* Prepend true to results. */ - frame = frame_prevd(frame); - } - /* Return to lower frame via interpreter for unhandled cases. */ - if (J->framedepth == 0 && J->pt && bc_isret(bc_op(*J->pc)) && - (!frame_islua(frame) || - (J->parent == 0 && J->exitno == 0 && - !bc_isret(bc_op(J->cur.startins))))) { - /* NYI: specialize to frame type and return directly, not via RET*. */ - for (i = 0; i < (ptrdiff_t)rbase; i++) - J->base[i] = 0; /* Purge dead slots. */ - J->maxslot = rbase + (BCReg)gotresults; - lj_record_stop(J, LJ_TRLINK_RETURN, 0); /* Return to interpreter. */ - return; - } - if (frame_isvarg(frame)) { - BCReg cbase = (BCReg)frame_delta(frame); - if (--J->framedepth < 0) /* NYI: return of vararg func to lower frame. */ - lj_trace_err(J, LJ_TRERR_NYIRETL); - lua_assert(J->baseslot > 1); - rbase += cbase; - J->baseslot -= (BCReg)cbase; - J->base -= cbase; - frame = frame_prevd(frame); - } - if (frame_islua(frame)) { /* Return to Lua frame. */ - BCIns callins = *(frame_pc(frame)-1); - ptrdiff_t nresults = bc_b(callins) ? (ptrdiff_t)bc_b(callins)-1 :gotresults; - BCReg cbase = bc_a(callins); - GCproto *pt = funcproto(frame_func(frame - (cbase+1-LJ_FR2))); - lua_assert(!LJ_FR2); /* TODO_FR2: handle different frame teardown. */ - if ((pt->flags & PROTO_NOJIT)) - lj_trace_err(J, LJ_TRERR_CJITOFF); - if (J->framedepth == 0 && J->pt && frame == J->L->base - 1) { - if (check_downrec_unroll(J, pt)) { - J->maxslot = (BCReg)(rbase + gotresults); - lj_snap_purge(J); - lj_record_stop(J, LJ_TRLINK_DOWNREC, J->cur.traceno); /* Down-rec. */ - return; - } - lj_snap_add(J); - } - for (i = 0; i < nresults; i++) /* Adjust results. */ - J->base[i-1] = i < gotresults ? J->base[rbase+i] : TREF_NIL; - J->maxslot = cbase+(BCReg)nresults; - if (J->framedepth > 0) { /* Return to a frame that is part of the trace. */ - J->framedepth--; - lua_assert(J->baseslot > cbase+1); - J->baseslot -= cbase+1; - J->base -= cbase+1; - } else if (J->parent == 0 && J->exitno == 0 && - !bc_isret(bc_op(J->cur.startins))) { - /* Return to lower frame would leave the loop in a root trace. */ - lj_trace_err(J, LJ_TRERR_LLEAVE); - } else if (J->needsnap) { /* Tailcalled to ff with side-effects. */ - lj_trace_err(J, LJ_TRERR_NYIRETL); /* No way to insert snapshot here. */ - } else { /* Return to lower frame. Guard for the target we return to. */ - TRef trpt = lj_ir_kgc(J, obj2gco(pt), IRT_PROTO); - TRef trpc = lj_ir_kptr(J, (void *)frame_pc(frame)); - emitir(IRTG(IR_RETF, IRT_P32), trpt, trpc); - J->retdepth++; - J->needsnap = 1; - lua_assert(J->baseslot == 1); - /* Shift result slots up and clear the slots of the new frame below. */ - memmove(J->base + cbase, J->base-1, sizeof(TRef)*nresults); - memset(J->base-1, 0, sizeof(TRef)*(cbase+1)); - } - } else if (frame_iscont(frame)) { /* Return to continuation frame. */ - ASMFunction cont = frame_contf(frame); - BCReg cbase = (BCReg)frame_delta(frame); - if ((J->framedepth -= 2) < 0) - lj_trace_err(J, LJ_TRERR_NYIRETL); - J->baseslot -= (BCReg)cbase; - J->base -= cbase; - J->maxslot = cbase-2; - if (cont == lj_cont_ra) { - /* Copy result to destination slot. */ - BCReg dst = bc_a(*(frame_contpc(frame)-1)); - J->base[dst] = gotresults ? J->base[cbase+rbase] : TREF_NIL; - if (dst >= J->maxslot) J->maxslot = dst+1; - } else if (cont == lj_cont_nop) { - /* Nothing to do here. */ - } else if (cont == lj_cont_cat) { - BCReg bslot = bc_b(*(frame_contpc(frame)-1)); - TRef tr = gotresults ? J->base[cbase+rbase] : TREF_NIL; - if (bslot != cbase-2) { /* Concatenate the remainder. */ - TValue *b = J->L->base, save; /* Simulate lower frame and result. */ - J->base[cbase-2] = tr; - copyTV(J->L, &save, b-2); - if (gotresults) copyTV(J->L, b-2, b+rbase); else setnilV(b-2); - J->L->base = b - cbase; - tr = rec_cat(J, bslot, cbase-2); - b = J->L->base + cbase; /* Undo. */ - J->L->base = b; - copyTV(J->L, b-2, &save); - } - if (tr) { /* Store final result. */ - BCReg dst = bc_a(*(frame_contpc(frame)-1)); - J->base[dst] = tr; - if (dst >= J->maxslot) J->maxslot = dst+1; - } /* Otherwise continue with another __concat call. */ - } else { - /* Result type already specialized. */ - lua_assert(cont == lj_cont_condf || cont == lj_cont_condt); - } - } else { - lj_trace_err(J, LJ_TRERR_NYIRETL); /* NYI: handle return to C frame. */ - } - lua_assert(J->baseslot >= 1); -} - -/* -- Metamethod handling ------------------------------------------------- */ - -/* Prepare to record call to metamethod. */ -static BCReg rec_mm_prep(jit_State *J, ASMFunction cont) -{ - BCReg s, top = cont == lj_cont_cat ? J->maxslot : curr_proto(J->L)->framesize; -#if LJ_64 - TRef trcont = lj_ir_kptr(J, (void *)((int64_t)cont-(int64_t)lj_vm_asm_begin)); -#else - TRef trcont = lj_ir_kptr(J, (void *)cont); -#endif - J->base[top] = trcont | TREF_CONT; - J->framedepth++; - for (s = J->maxslot; s < top; s++) - J->base[s] = 0; /* Clear frame gap to avoid resurrecting previous refs. */ - return top+1; -} - -/* Record metamethod lookup. */ -int lj_record_mm_lookup(jit_State *J, RecordIndex *ix, MMS mm) -{ - RecordIndex mix; - GCtab *mt; - if (tref_istab(ix->tab)) { - mt = tabref(tabV(&ix->tabv)->metatable); - mix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_TAB_META); - } else if (tref_isudata(ix->tab)) { - int udtype = udataV(&ix->tabv)->udtype; - mt = tabref(udataV(&ix->tabv)->metatable); - /* The metatables of special userdata objects are treated as immutable. */ - if (udtype != UDTYPE_USERDATA) { - cTValue *mo; - if (LJ_HASFFI && udtype == UDTYPE_FFI_CLIB) { - /* Specialize to the C library namespace object. */ - emitir(IRTG(IR_EQ, IRT_P32), ix->tab, lj_ir_kptr(J, udataV(&ix->tabv))); - } else { - /* Specialize to the type of userdata. */ - TRef tr = emitir(IRT(IR_FLOAD, IRT_U8), ix->tab, IRFL_UDATA_UDTYPE); - emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, udtype)); - } - immutable_mt: - mo = lj_tab_getstr(mt, mmname_str(J2G(J), mm)); - if (!mo || tvisnil(mo)) - return 0; /* No metamethod. */ - /* Treat metamethod or index table as immutable, too. */ - if (!(tvisfunc(mo) || tvistab(mo))) - lj_trace_err(J, LJ_TRERR_BADTYPE); - copyTV(J->L, &ix->mobjv, mo); - ix->mobj = lj_ir_kgc(J, gcV(mo), tvisfunc(mo) ? IRT_FUNC : IRT_TAB); - ix->mtv = mt; - ix->mt = TREF_NIL; /* Dummy value for comparison semantics. */ - return 1; /* Got metamethod or index table. */ - } - mix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_UDATA_META); - } else { - /* Specialize to base metatable. Must flush mcode in lua_setmetatable(). */ - mt = tabref(basemt_obj(J2G(J), &ix->tabv)); - if (mt == NULL) { - ix->mt = TREF_NIL; - return 0; /* No metamethod. */ - } - /* The cdata metatable is treated as immutable. */ - if (LJ_HASFFI && tref_iscdata(ix->tab)) goto immutable_mt; - ix->mt = mix.tab = lj_ir_ktab(J, mt); - goto nocheck; - } - ix->mt = mt ? mix.tab : TREF_NIL; - emitir(IRTG(mt ? IR_NE : IR_EQ, IRT_TAB), mix.tab, lj_ir_knull(J, IRT_TAB)); -nocheck: - if (mt) { - GCstr *mmstr = mmname_str(J2G(J), mm); - cTValue *mo = lj_tab_getstr(mt, mmstr); - if (mo && !tvisnil(mo)) - copyTV(J->L, &ix->mobjv, mo); - ix->mtv = mt; - settabV(J->L, &mix.tabv, mt); - setstrV(J->L, &mix.keyv, mmstr); - mix.key = lj_ir_kstr(J, mmstr); - mix.val = 0; - mix.idxchain = 0; - ix->mobj = lj_record_idx(J, &mix); - return !tref_isnil(ix->mobj); /* 1 if metamethod found, 0 if not. */ - } - return 0; /* No metamethod. */ -} - -/* Record call to arithmetic metamethod. */ -static TRef rec_mm_arith(jit_State *J, RecordIndex *ix, MMS mm) -{ - /* Set up metamethod call first to save ix->tab and ix->tabv. */ - BCReg func = rec_mm_prep(J, mm == MM_concat ? lj_cont_cat : lj_cont_ra); - TRef *base = J->base + func; - TValue *basev = J->L->base + func; - base[1] = ix->tab; base[2] = ix->key; - copyTV(J->L, basev+1, &ix->tabv); - copyTV(J->L, basev+2, &ix->keyv); - if (!lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */ - if (mm != MM_unm) { - ix->tab = ix->key; - copyTV(J->L, &ix->tabv, &ix->keyv); - if (lj_record_mm_lookup(J, ix, mm)) /* Lookup mm on 2nd operand. */ - goto ok; - } - lj_trace_err(J, LJ_TRERR_NOMM); - } -ok: - lua_assert(!LJ_FR2); /* TODO_FR2: handle different frame setup. */ - base[0] = ix->mobj; - copyTV(J->L, basev+0, &ix->mobjv); - lj_record_call(J, func, 2); - return 0; /* No result yet. */ -} - -/* Record call to __len metamethod. */ -static TRef rec_mm_len(jit_State *J, TRef tr, TValue *tv) -{ - RecordIndex ix; - ix.tab = tr; - copyTV(J->L, &ix.tabv, tv); - if (lj_record_mm_lookup(J, &ix, MM_len)) { - BCReg func = rec_mm_prep(J, lj_cont_ra); - TRef *base = J->base + func; - TValue *basev = J->L->base + func; - lua_assert(!LJ_FR2); /* TODO_FR2: handle different frame setup. */ - base[0] = ix.mobj; copyTV(J->L, basev+0, &ix.mobjv); - base[1] = tr; copyTV(J->L, basev+1, tv); -#if LJ_52 - base[2] = tr; copyTV(J->L, basev+2, tv); -#else - base[2] = TREF_NIL; setnilV(basev+2); -#endif - lj_record_call(J, func, 2); - } else { - if (LJ_52 && tref_istab(tr)) - return lj_ir_call(J, IRCALL_lj_tab_len, tr); - lj_trace_err(J, LJ_TRERR_NOMM); - } - return 0; /* No result yet. */ -} - -/* Call a comparison metamethod. */ -static void rec_mm_callcomp(jit_State *J, RecordIndex *ix, int op) -{ - BCReg func = rec_mm_prep(J, (op&1) ? lj_cont_condf : lj_cont_condt); - TRef *base = J->base + func; - TValue *tv = J->L->base + func; - lua_assert(!LJ_FR2); /* TODO_FR2: handle different frame setup. */ - base[0] = ix->mobj; base[1] = ix->val; base[2] = ix->key; - copyTV(J->L, tv+0, &ix->mobjv); - copyTV(J->L, tv+1, &ix->valv); - copyTV(J->L, tv+2, &ix->keyv); - lj_record_call(J, func, 2); -} - -/* Record call to equality comparison metamethod (for tab and udata only). */ -static void rec_mm_equal(jit_State *J, RecordIndex *ix, int op) -{ - ix->tab = ix->val; - copyTV(J->L, &ix->tabv, &ix->valv); - if (lj_record_mm_lookup(J, ix, MM_eq)) { /* Lookup mm on 1st operand. */ - cTValue *bv; - TRef mo1 = ix->mobj; - TValue mo1v; - copyTV(J->L, &mo1v, &ix->mobjv); - /* Avoid the 2nd lookup and the objcmp if the metatables are equal. */ - bv = &ix->keyv; - if (tvistab(bv) && tabref(tabV(bv)->metatable) == ix->mtv) { - TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_TAB_META); - emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt); - } else if (tvisudata(bv) && tabref(udataV(bv)->metatable) == ix->mtv) { - TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_UDATA_META); - emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt); - } else { /* Lookup metamethod on 2nd operand and compare both. */ - ix->tab = ix->key; - copyTV(J->L, &ix->tabv, bv); - if (!lj_record_mm_lookup(J, ix, MM_eq) || - lj_record_objcmp(J, mo1, ix->mobj, &mo1v, &ix->mobjv)) - return; - } - rec_mm_callcomp(J, ix, op); - } -} - -/* Record call to ordered comparison metamethods (for arbitrary objects). */ -static void rec_mm_comp(jit_State *J, RecordIndex *ix, int op) -{ - ix->tab = ix->val; - copyTV(J->L, &ix->tabv, &ix->valv); - while (1) { - MMS mm = (op & 2) ? MM_le : MM_lt; /* Try __le + __lt or only __lt. */ -#if LJ_52 - if (!lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */ - ix->tab = ix->key; - copyTV(J->L, &ix->tabv, &ix->keyv); - if (!lj_record_mm_lookup(J, ix, mm)) /* Lookup mm on 2nd operand. */ - goto nomatch; - } - rec_mm_callcomp(J, ix, op); - return; -#else - if (lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */ - cTValue *bv; - TRef mo1 = ix->mobj; - TValue mo1v; - copyTV(J->L, &mo1v, &ix->mobjv); - /* Avoid the 2nd lookup and the objcmp if the metatables are equal. */ - bv = &ix->keyv; - if (tvistab(bv) && tabref(tabV(bv)->metatable) == ix->mtv) { - TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_TAB_META); - emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt); - } else if (tvisudata(bv) && tabref(udataV(bv)->metatable) == ix->mtv) { - TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_UDATA_META); - emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt); - } else { /* Lookup metamethod on 2nd operand and compare both. */ - ix->tab = ix->key; - copyTV(J->L, &ix->tabv, bv); - if (!lj_record_mm_lookup(J, ix, mm) || - lj_record_objcmp(J, mo1, ix->mobj, &mo1v, &ix->mobjv)) - goto nomatch; - } - rec_mm_callcomp(J, ix, op); - return; - } -#endif - nomatch: - /* Lookup failed. Retry with __lt and swapped operands. */ - if (!(op & 2)) break; /* Already at __lt. Interpreter will throw. */ - ix->tab = ix->key; ix->key = ix->val; ix->val = ix->tab; - copyTV(J->L, &ix->tabv, &ix->keyv); - copyTV(J->L, &ix->keyv, &ix->valv); - copyTV(J->L, &ix->valv, &ix->tabv); - op ^= 3; - } -} - -#if LJ_HASFFI -/* Setup call to cdata comparison metamethod. */ -static void rec_mm_comp_cdata(jit_State *J, RecordIndex *ix, int op, MMS mm) -{ - lj_snap_add(J); - if (tref_iscdata(ix->val)) { - ix->tab = ix->val; - copyTV(J->L, &ix->tabv, &ix->valv); - } else { - lua_assert(tref_iscdata(ix->key)); - ix->tab = ix->key; - copyTV(J->L, &ix->tabv, &ix->keyv); - } - lj_record_mm_lookup(J, ix, mm); - rec_mm_callcomp(J, ix, op); -} -#endif - -/* -- Indexed access ------------------------------------------------------ */ - -#ifdef LUAJIT_ENABLE_TABLE_BUMP -/* Bump table allocations in bytecode when they grow during recording. */ -static void rec_idx_bump(jit_State *J, RecordIndex *ix) -{ - RBCHashEntry *rbc = &J->rbchash[(ix->tab & (RBCHASH_SLOTS-1))]; - if (tref_ref(ix->tab) == rbc->ref) { - const BCIns *pc = mref(rbc->pc, const BCIns); - GCtab *tb = tabV(&ix->tabv); - uint32_t nhbits; - IRIns *ir; - if (!tvisnil(&ix->keyv)) - (void)lj_tab_set(J->L, tb, &ix->keyv); /* Grow table right now. */ - nhbits = tb->hmask > 0 ? lj_fls(tb->hmask)+1 : 0; - ir = IR(tref_ref(ix->tab)); - if (ir->o == IR_TNEW) { - uint32_t ah = bc_d(*pc); - uint32_t asize = ah & 0x7ff, hbits = ah >> 11; - if (nhbits > hbits) hbits = nhbits; - if (tb->asize > asize) { - asize = tb->asize <= 0x7ff ? tb->asize : 0x7ff; - } - if ((asize | (hbits<<11)) != ah) { /* Has the size changed? */ - /* Patch bytecode, but continue recording (for more patching). */ - setbc_d(pc, (asize | (hbits<<11))); - /* Patching TNEW operands is only safe if the trace is aborted. */ - ir->op1 = asize; ir->op2 = hbits; - J->retryrec = 1; /* Abort the trace at the end of recording. */ - } - } else if (ir->o == IR_TDUP) { - GCtab *tpl = gco2tab(proto_kgc(&gcref(rbc->pt)->pt, ~(ptrdiff_t)bc_d(*pc))); - /* Grow template table, but preserve keys with nil values. */ - if ((tb->asize > tpl->asize && (1u << nhbits)-1 == tpl->hmask) || - (tb->asize == tpl->asize && (1u << nhbits)-1 > tpl->hmask)) { - Node *node = noderef(tpl->node); - uint32_t i, hmask = tpl->hmask, asize; - TValue *array; - for (i = 0; i <= hmask; i++) { - if (!tvisnil(&node[i].key) && tvisnil(&node[i].val)) - settabV(J->L, &node[i].val, tpl); - } - if (!tvisnil(&ix->keyv) && tref_isk(ix->key)) { - TValue *o = lj_tab_set(J->L, tpl, &ix->keyv); - if (tvisnil(o)) settabV(J->L, o, tpl); - } - lj_tab_resize(J->L, tpl, tb->asize, nhbits); - node = noderef(tpl->node); - hmask = tpl->hmask; - for (i = 0; i <= hmask; i++) { - /* This is safe, since template tables only hold immutable values. */ - if (tvistab(&node[i].val)) - setnilV(&node[i].val); - } - /* The shape of the table may have changed. Clean up array part, too. */ - asize = tpl->asize; - array = tvref(tpl->array); - for (i = 0; i < asize; i++) { - if (tvistab(&array[i])) - setnilV(&array[i]); - } - J->retryrec = 1; /* Abort the trace at the end of recording. */ - } - } - } -} -#endif - -/* Record bounds-check. */ -static void rec_idx_abc(jit_State *J, TRef asizeref, TRef ikey, uint32_t asize) -{ - /* Try to emit invariant bounds checks. */ - if ((J->flags & (JIT_F_OPT_LOOP|JIT_F_OPT_ABC)) == - (JIT_F_OPT_LOOP|JIT_F_OPT_ABC)) { - IRRef ref = tref_ref(ikey); - IRIns *ir = IR(ref); - int32_t ofs = 0; - IRRef ofsref = 0; - /* Handle constant offsets. */ - if (ir->o == IR_ADD && irref_isk(ir->op2)) { - ofsref = ir->op2; - ofs = IR(ofsref)->i; - ref = ir->op1; - ir = IR(ref); - } - /* Got scalar evolution analysis results for this reference? */ - if (ref == J->scev.idx) { - int32_t stop; - lua_assert(irt_isint(J->scev.t) && ir->o == IR_SLOAD); - stop = numberVint(&(J->L->base - J->baseslot)[ir->op1 + FORL_STOP]); - /* Runtime value for stop of loop is within bounds? */ - if ((uint64_t)stop + ofs < (uint64_t)asize) { - /* Emit invariant bounds check for stop. */ - emitir(IRTG(IR_ABC, IRT_P32), asizeref, ofs == 0 ? J->scev.stop : - emitir(IRTI(IR_ADD), J->scev.stop, ofsref)); - /* Emit invariant bounds check for start, if not const or negative. */ - if (!(J->scev.dir && J->scev.start && - (int64_t)IR(J->scev.start)->i + ofs >= 0)) - emitir(IRTG(IR_ABC, IRT_P32), asizeref, ikey); - return; - } - } - } - emitir(IRTGI(IR_ABC), asizeref, ikey); /* Emit regular bounds check. */ -} - -/* Record indexed key lookup. */ -static TRef rec_idx_key(jit_State *J, RecordIndex *ix, IRRef *rbref) -{ - TRef key; - GCtab *t = tabV(&ix->tabv); - ix->oldv = lj_tab_get(J->L, t, &ix->keyv); /* Lookup previous value. */ - *rbref = 0; - - /* Integer keys are looked up in the array part first. */ - key = ix->key; - if (tref_isnumber(key)) { - int32_t k = numberVint(&ix->keyv); - if (!tvisint(&ix->keyv) && numV(&ix->keyv) != (lua_Number)k) - k = LJ_MAX_ASIZE; - if ((MSize)k < LJ_MAX_ASIZE) { /* Potential array key? */ - TRef ikey = lj_opt_narrow_index(J, key); - TRef asizeref = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_ASIZE); - if ((MSize)k < t->asize) { /* Currently an array key? */ - TRef arrayref; - rec_idx_abc(J, asizeref, ikey, t->asize); - arrayref = emitir(IRT(IR_FLOAD, IRT_P32), ix->tab, IRFL_TAB_ARRAY); - return emitir(IRT(IR_AREF, IRT_P32), arrayref, ikey); - } else { /* Currently not in array (may be an array extension)? */ - emitir(IRTGI(IR_ULE), asizeref, ikey); /* Inv. bounds check. */ - if (k == 0 && tref_isk(key)) - key = lj_ir_knum_zero(J); /* Canonicalize 0 or +-0.0 to +0.0. */ - /* And continue with the hash lookup. */ - } - } else if (!tref_isk(key)) { - /* We can rule out const numbers which failed the integerness test - ** above. But all other numbers are potential array keys. - */ - if (t->asize == 0) { /* True sparse tables have an empty array part. */ - /* Guard that the array part stays empty. */ - TRef tmp = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_ASIZE); - emitir(IRTGI(IR_EQ), tmp, lj_ir_kint(J, 0)); - } else { - lj_trace_err(J, LJ_TRERR_NYITMIX); - } - } - } - - /* Otherwise the key is located in the hash part. */ - if (t->hmask == 0) { /* Shortcut for empty hash part. */ - /* Guard that the hash part stays empty. */ - TRef tmp = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_HMASK); - emitir(IRTGI(IR_EQ), tmp, lj_ir_kint(J, 0)); - return lj_ir_kkptr(J, niltvg(J2G(J))); - } - if (tref_isinteger(key)) /* Hash keys are based on numbers, not ints. */ - key = emitir(IRTN(IR_CONV), key, IRCONV_NUM_INT); - if (tref_isk(key)) { - /* Optimize lookup of constant hash keys. */ - MSize hslot = (MSize)((char *)ix->oldv - (char *)&noderef(t->node)[0].val); - if (t->hmask > 0 && hslot <= t->hmask*(MSize)sizeof(Node) && - hslot <= 65535*(MSize)sizeof(Node)) { - TRef node, kslot, hm; - *rbref = J->cur.nins; /* Mark possible rollback point. */ - hm = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_HMASK); - emitir(IRTGI(IR_EQ), hm, lj_ir_kint(J, (int32_t)t->hmask)); - node = emitir(IRT(IR_FLOAD, IRT_P32), ix->tab, IRFL_TAB_NODE); - kslot = lj_ir_kslot(J, key, hslot / sizeof(Node)); - return emitir(IRTG(IR_HREFK, IRT_P32), node, kslot); - } - } - /* Fall back to a regular hash lookup. */ - return emitir(IRT(IR_HREF, IRT_P32), ix->tab, key); -} - -/* Determine whether a key is NOT one of the fast metamethod names. */ -static int nommstr(jit_State *J, TRef key) -{ - if (tref_isstr(key)) { - if (tref_isk(key)) { - GCstr *str = ir_kstr(IR(tref_ref(key))); - uint32_t mm; - for (mm = 0; mm <= MM_FAST; mm++) - if (mmname_str(J2G(J), mm) == str) - return 0; /* MUST be one the fast metamethod names. */ - } else { - return 0; /* Variable string key MAY be a metamethod name. */ - } - } - return 1; /* CANNOT be a metamethod name. */ -} - -/* Record indexed load/store. */ -TRef lj_record_idx(jit_State *J, RecordIndex *ix) -{ - TRef xref; - IROp xrefop, loadop; - IRRef rbref; - cTValue *oldv; - - while (!tref_istab(ix->tab)) { /* Handle non-table lookup. */ - /* Never call raw lj_record_idx() on non-table. */ - lua_assert(ix->idxchain != 0); - if (!lj_record_mm_lookup(J, ix, ix->val ? MM_newindex : MM_index)) - lj_trace_err(J, LJ_TRERR_NOMM); - handlemm: - if (tref_isfunc(ix->mobj)) { /* Handle metamethod call. */ - BCReg func = rec_mm_prep(J, ix->val ? lj_cont_nop : lj_cont_ra); - TRef *base = J->base + func; - TValue *tv = J->L->base + func; - lua_assert(!LJ_FR2); /* TODO_FR2: handle different frame setup. */ - base[0] = ix->mobj; base[1] = ix->tab; base[2] = ix->key; - setfuncV(J->L, tv+0, funcV(&ix->mobjv)); - copyTV(J->L, tv+1, &ix->tabv); - copyTV(J->L, tv+2, &ix->keyv); - if (ix->val) { - base[3] = ix->val; - copyTV(J->L, tv+3, &ix->valv); - lj_record_call(J, func, 3); /* mobj(tab, key, val) */ - return 0; - } else { - lj_record_call(J, func, 2); /* res = mobj(tab, key) */ - return 0; /* No result yet. */ - } - } - /* Otherwise retry lookup with metaobject. */ - ix->tab = ix->mobj; - copyTV(J->L, &ix->tabv, &ix->mobjv); - if (--ix->idxchain == 0) - lj_trace_err(J, LJ_TRERR_IDXLOOP); - } - - /* First catch nil and NaN keys for tables. */ - if (tvisnil(&ix->keyv) || (tvisnum(&ix->keyv) && tvisnan(&ix->keyv))) { - if (ix->val) /* Better fail early. */ - lj_trace_err(J, LJ_TRERR_STORENN); - if (tref_isk(ix->key)) { - if (ix->idxchain && lj_record_mm_lookup(J, ix, MM_index)) - goto handlemm; - return TREF_NIL; - } - } - - /* Record the key lookup. */ - xref = rec_idx_key(J, ix, &rbref); - xrefop = IR(tref_ref(xref))->o; - loadop = xrefop == IR_AREF ? IR_ALOAD : IR_HLOAD; - /* The lj_meta_tset() inconsistency is gone, but better play safe. */ - oldv = xrefop == IR_KKPTR ? (cTValue *)ir_kptr(IR(tref_ref(xref))) : ix->oldv; - - if (ix->val == 0) { /* Indexed load */ - IRType t = itype2irt(oldv); - TRef res; - if (oldv == niltvg(J2G(J))) { - emitir(IRTG(IR_EQ, IRT_P32), xref, lj_ir_kkptr(J, niltvg(J2G(J)))); - res = TREF_NIL; - } else { - res = emitir(IRTG(loadop, t), xref, 0); - } - if (tref_ref(res) < rbref) /* HREFK + load forwarded? */ - lj_ir_rollback(J, rbref); /* Rollback to eliminate hmask guard. */ - if (t == IRT_NIL && ix->idxchain && lj_record_mm_lookup(J, ix, MM_index)) - goto handlemm; - if (irtype_ispri(t)) res = TREF_PRI(t); /* Canonicalize primitives. */ - return res; - } else { /* Indexed store. */ - GCtab *mt = tabref(tabV(&ix->tabv)->metatable); - int keybarrier = tref_isgcv(ix->key) && !tref_isnil(ix->val); - if (tref_ref(xref) < rbref) /* HREFK forwarded? */ - lj_ir_rollback(J, rbref); /* Rollback to eliminate hmask guard. */ - if (tvisnil(oldv)) { /* Previous value was nil? */ - /* Need to duplicate the hasmm check for the early guards. */ - int hasmm = 0; - if (ix->idxchain && mt) { - cTValue *mo = lj_tab_getstr(mt, mmname_str(J2G(J), MM_newindex)); - hasmm = mo && !tvisnil(mo); - } - if (hasmm) - emitir(IRTG(loadop, IRT_NIL), xref, 0); /* Guard for nil value. */ - else if (xrefop == IR_HREF) - emitir(IRTG(oldv == niltvg(J2G(J)) ? IR_EQ : IR_NE, IRT_P32), - xref, lj_ir_kkptr(J, niltvg(J2G(J)))); - if (ix->idxchain && lj_record_mm_lookup(J, ix, MM_newindex)) { - lua_assert(hasmm); - goto handlemm; - } - lua_assert(!hasmm); - if (oldv == niltvg(J2G(J))) { /* Need to insert a new key. */ - TRef key = ix->key; - if (tref_isinteger(key)) /* NEWREF needs a TValue as a key. */ - key = emitir(IRTN(IR_CONV), key, IRCONV_NUM_INT); - xref = emitir(IRT(IR_NEWREF, IRT_P32), ix->tab, key); - keybarrier = 0; /* NEWREF already takes care of the key barrier. */ -#ifdef LUAJIT_ENABLE_TABLE_BUMP - if ((J->flags & JIT_F_OPT_SINK)) /* Avoid a separate flag. */ - rec_idx_bump(J, ix); -#endif - } - } else if (!lj_opt_fwd_wasnonnil(J, loadop, tref_ref(xref))) { - /* Cannot derive that the previous value was non-nil, must do checks. */ - if (xrefop == IR_HREF) /* Guard against store to niltv. */ - emitir(IRTG(IR_NE, IRT_P32), xref, lj_ir_kkptr(J, niltvg(J2G(J)))); - if (ix->idxchain) { /* Metamethod lookup required? */ - /* A check for NULL metatable is cheaper (hoistable) than a load. */ - if (!mt) { - TRef mtref = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_TAB_META); - emitir(IRTG(IR_EQ, IRT_TAB), mtref, lj_ir_knull(J, IRT_TAB)); - } else { - IRType t = itype2irt(oldv); - emitir(IRTG(loadop, t), xref, 0); /* Guard for non-nil value. */ - } - } - } else { - keybarrier = 0; /* Previous non-nil value kept the key alive. */ - } - /* Convert int to number before storing. */ - if (!LJ_DUALNUM && tref_isinteger(ix->val)) - ix->val = emitir(IRTN(IR_CONV), ix->val, IRCONV_NUM_INT); - emitir(IRT(loadop+IRDELTA_L2S, tref_type(ix->val)), xref, ix->val); - if (keybarrier || tref_isgcv(ix->val)) - emitir(IRT(IR_TBAR, IRT_NIL), ix->tab, 0); - /* Invalidate neg. metamethod cache for stores with certain string keys. */ - if (!nommstr(J, ix->key)) { - TRef fref = emitir(IRT(IR_FREF, IRT_P32), ix->tab, IRFL_TAB_NOMM); - emitir(IRT(IR_FSTORE, IRT_U8), fref, lj_ir_kint(J, 0)); - } - J->needsnap = 1; - return 0; - } -} - -static void rec_tsetm(jit_State *J, BCReg ra, BCReg rn, int32_t i) -{ - RecordIndex ix; - cTValue *basev = J->L->base; - GCtab *t = tabV(&basev[ra-1]); - settabV(J->L, &ix.tabv, t); - ix.tab = getslot(J, ra-1); - ix.idxchain = 0; -#ifdef LUAJIT_ENABLE_TABLE_BUMP - if ((J->flags & JIT_F_OPT_SINK)) { - if (t->asize < i+rn-ra) - lj_tab_reasize(J->L, t, i+rn-ra); - setnilV(&ix.keyv); - rec_idx_bump(J, &ix); - } -#endif - for (; ra < rn; i++, ra++) { - setintV(&ix.keyv, i); - ix.key = lj_ir_kint(J, i); - copyTV(J->L, &ix.valv, &basev[ra]); - ix.val = getslot(J, ra); - lj_record_idx(J, &ix); - } -} - -/* -- Upvalue access ------------------------------------------------------ */ - -/* Check whether upvalue is immutable and ok to constify. */ -static int rec_upvalue_constify(jit_State *J, GCupval *uvp) -{ - if (uvp->immutable) { - cTValue *o = uvval(uvp); - /* Don't constify objects that may retain large amounts of memory. */ -#if LJ_HASFFI - if (tviscdata(o)) { - GCcdata *cd = cdataV(o); - if (!cdataisv(cd) && !(cd->marked & LJ_GC_CDATA_FIN)) { - CType *ct = ctype_raw(ctype_ctsG(J2G(J)), cd->ctypeid); - if (!ctype_hassize(ct->info) || ct->size <= 16) - return 1; - } - return 0; - } -#else - UNUSED(J); -#endif - if (!(tvistab(o) || tvisudata(o) || tvisthread(o))) - return 1; - } - return 0; -} - -/* Record upvalue load/store. */ -static TRef rec_upvalue(jit_State *J, uint32_t uv, TRef val) -{ - GCupval *uvp = &gcref(J->fn->l.uvptr[uv])->uv; - TRef fn = getcurrf(J); - IRRef uref; - int needbarrier = 0; - if (rec_upvalue_constify(J, uvp)) { /* Try to constify immutable upvalue. */ - TRef tr, kfunc; - lua_assert(val == 0); - if (!tref_isk(fn)) { /* Late specialization of current function. */ - if (J->pt->flags >= PROTO_CLC_POLY) - goto noconstify; - kfunc = lj_ir_kfunc(J, J->fn); - emitir(IRTG(IR_EQ, IRT_FUNC), fn, kfunc); - J->base[-1] = TREF_FRAME | kfunc; - fn = kfunc; - } - tr = lj_record_constify(J, uvval(uvp)); - if (tr) - return tr; - } -noconstify: - /* Note: this effectively limits LJ_MAX_UPVAL to 127. */ - uv = (uv << 8) | (hashrot(uvp->dhash, uvp->dhash + HASH_BIAS) & 0xff); - if (!uvp->closed) { - /* In current stack? */ - if (uvval(uvp) >= tvref(J->L->stack) && - uvval(uvp) < tvref(J->L->maxstack)) { - int32_t slot = (int32_t)(uvval(uvp) - (J->L->base - J->baseslot)); - if (slot >= 0) { /* Aliases an SSA slot? */ - slot -= (int32_t)J->baseslot; /* Note: slot number may be negative! */ - /* NYI: add IR to guard that it's still aliasing the same slot. */ - if (val == 0) { - return getslot(J, slot); - } else { - J->base[slot] = val; - if (slot >= (int32_t)J->maxslot) J->maxslot = (BCReg)(slot+1); - return 0; - } - } - } - uref = tref_ref(emitir(IRTG(IR_UREFO, IRT_P32), fn, uv)); - } else { - needbarrier = 1; - uref = tref_ref(emitir(IRTG(IR_UREFC, IRT_P32), fn, uv)); - } - if (val == 0) { /* Upvalue load */ - IRType t = itype2irt(uvval(uvp)); - TRef res = emitir(IRTG(IR_ULOAD, t), uref, 0); - if (irtype_ispri(t)) res = TREF_PRI(t); /* Canonicalize primitive refs. */ - return res; - } else { /* Upvalue store. */ - /* Convert int to number before storing. */ - if (!LJ_DUALNUM && tref_isinteger(val)) - val = emitir(IRTN(IR_CONV), val, IRCONV_NUM_INT); - emitir(IRT(IR_USTORE, tref_type(val)), uref, val); - if (needbarrier && tref_isgcv(val)) - emitir(IRT(IR_OBAR, IRT_NIL), uref, val); - J->needsnap = 1; - return 0; - } -} - -/* -- Record calls to Lua functions --------------------------------------- */ - -/* Check unroll limits for calls. */ -static void check_call_unroll(jit_State *J, TraceNo lnk) -{ - cTValue *frame = J->L->base - 1; - void *pc = mref(frame_func(frame)->l.pc, void); - int32_t depth = J->framedepth; - int32_t count = 0; - if ((J->pt->flags & PROTO_VARARG)) depth--; /* Vararg frame still missing. */ - for (; depth > 0; depth--) { /* Count frames with same prototype. */ - if (frame_iscont(frame)) depth--; - frame = frame_prev(frame); - if (mref(frame_func(frame)->l.pc, void) == pc) - count++; - } - if (J->pc == J->startpc) { - if (count + J->tailcalled > J->param[JIT_P_recunroll]) { - J->pc++; - if (J->framedepth + J->retdepth == 0) - lj_record_stop(J, LJ_TRLINK_TAILREC, J->cur.traceno); /* Tail-rec. */ - else - lj_record_stop(J, LJ_TRLINK_UPREC, J->cur.traceno); /* Up-recursion. */ - } - } else { - if (count > J->param[JIT_P_callunroll]) { - if (lnk) { /* Possible tail- or up-recursion. */ - lj_trace_flush(J, lnk); /* Flush trace that only returns. */ - /* Set a small, pseudo-random hotcount for a quick retry of JFUNC*. */ - hotcount_set(J2GG(J), J->pc+1, LJ_PRNG_BITS(J, 4)); - } - lj_trace_err(J, LJ_TRERR_CUNROLL); - } - } -} - -/* Record Lua function setup. */ -static void rec_func_setup(jit_State *J) -{ - GCproto *pt = J->pt; - BCReg s, numparams = pt->numparams; - if ((pt->flags & PROTO_NOJIT)) - lj_trace_err(J, LJ_TRERR_CJITOFF); - if (J->baseslot + pt->framesize >= LJ_MAX_JSLOTS) - lj_trace_err(J, LJ_TRERR_STACKOV); - /* Fill up missing parameters with nil. */ - for (s = J->maxslot; s < numparams; s++) - J->base[s] = TREF_NIL; - /* The remaining slots should never be read before they are written. */ - J->maxslot = numparams; -} - -/* Record Lua vararg function setup. */ -static void rec_func_vararg(jit_State *J) -{ - GCproto *pt = J->pt; - BCReg s, fixargs, vframe = J->maxslot+1; - lua_assert((pt->flags & PROTO_VARARG)); - if (J->baseslot + vframe + pt->framesize >= LJ_MAX_JSLOTS) - lj_trace_err(J, LJ_TRERR_STACKOV); - J->base[vframe-1] = J->base[-1]; /* Copy function up. */ - /* Copy fixarg slots up and set their original slots to nil. */ - fixargs = pt->numparams < J->maxslot ? pt->numparams : J->maxslot; - for (s = 0; s < fixargs; s++) { - J->base[vframe+s] = J->base[s]; - J->base[s] = TREF_NIL; - } - J->maxslot = fixargs; - J->framedepth++; - J->base += vframe; - J->baseslot += vframe; -} - -/* Record entry to a Lua function. */ -static void rec_func_lua(jit_State *J) -{ - rec_func_setup(J); - check_call_unroll(J, 0); -} - -/* Record entry to an already compiled function. */ -static void rec_func_jit(jit_State *J, TraceNo lnk) -{ - GCtrace *T; - rec_func_setup(J); - T = traceref(J, lnk); - if (T->linktype == LJ_TRLINK_RETURN) { /* Trace returns to interpreter? */ - check_call_unroll(J, lnk); - /* Temporarily unpatch JFUNC* to continue recording across function. */ - J->patchins = *J->pc; - J->patchpc = (BCIns *)J->pc; - *J->patchpc = T->startins; - return; - } - J->instunroll = 0; /* Cannot continue across a compiled function. */ - if (J->pc == J->startpc && J->framedepth + J->retdepth == 0) - lj_record_stop(J, LJ_TRLINK_TAILREC, J->cur.traceno); /* Extra tail-rec. */ - else - lj_record_stop(J, LJ_TRLINK_ROOT, lnk); /* Link to the function. */ -} - -/* -- Vararg handling ----------------------------------------------------- */ - -/* Detect y = select(x, ...) idiom. */ -static int select_detect(jit_State *J) -{ - BCIns ins = J->pc[1]; - if (bc_op(ins) == BC_CALLM && bc_b(ins) == 2 && bc_c(ins) == 1) { - cTValue *func = &J->L->base[bc_a(ins)]; - if (tvisfunc(func) && funcV(func)->c.ffid == FF_select) - return 1; - } - return 0; -} - -/* Record vararg instruction. */ -static void rec_varg(jit_State *J, BCReg dst, ptrdiff_t nresults) -{ - int32_t numparams = J->pt->numparams; - ptrdiff_t nvararg = frame_delta(J->L->base-1) - numparams - 1; - lua_assert(frame_isvarg(J->L->base-1)); - if (J->framedepth > 0) { /* Simple case: varargs defined on-trace. */ - ptrdiff_t i; - if (nvararg < 0) nvararg = 0; - if (nresults == -1) { - nresults = nvararg; - J->maxslot = dst + (BCReg)nvararg; - } else if (dst + nresults > J->maxslot) { - J->maxslot = dst + (BCReg)nresults; - } - for (i = 0; i < nresults; i++) - J->base[dst+i] = i < nvararg ? getslot(J, i - nvararg - 1) : TREF_NIL; - } else { /* Unknown number of varargs passed to trace. */ - TRef fr = emitir(IRTI(IR_SLOAD), 0, IRSLOAD_READONLY|IRSLOAD_FRAME); - int32_t frofs = 8*(1+numparams)+FRAME_VARG; - if (nresults >= 0) { /* Known fixed number of results. */ - ptrdiff_t i; - if (nvararg > 0) { - ptrdiff_t nload = nvararg >= nresults ? nresults : nvararg; - TRef vbase; - if (nvararg >= nresults) - emitir(IRTGI(IR_GE), fr, lj_ir_kint(J, frofs+8*(int32_t)nresults)); - else - emitir(IRTGI(IR_EQ), fr, - lj_ir_kint(J, (int32_t)frame_ftsz(J->L->base-1))); - vbase = emitir(IRTI(IR_SUB), REF_BASE, fr); - vbase = emitir(IRT(IR_ADD, IRT_P32), vbase, lj_ir_kint(J, frofs-8)); - for (i = 0; i < nload; i++) { - IRType t = itype2irt(&J->L->base[i-1-nvararg]); - TRef aref = emitir(IRT(IR_AREF, IRT_P32), - vbase, lj_ir_kint(J, (int32_t)i)); - TRef tr = emitir(IRTG(IR_VLOAD, t), aref, 0); - if (irtype_ispri(t)) tr = TREF_PRI(t); /* Canonicalize primitives. */ - J->base[dst+i] = tr; - } - } else { - emitir(IRTGI(IR_LE), fr, lj_ir_kint(J, frofs)); - nvararg = 0; - } - for (i = nvararg; i < nresults; i++) - J->base[dst+i] = TREF_NIL; - if (dst + (BCReg)nresults > J->maxslot) - J->maxslot = dst + (BCReg)nresults; - } else if (select_detect(J)) { /* y = select(x, ...) */ - TRef tridx = J->base[dst-1]; - TRef tr = TREF_NIL; - ptrdiff_t idx = lj_ffrecord_select_mode(J, tridx, &J->L->base[dst-1]); - if (idx < 0) goto nyivarg; - if (idx != 0 && !tref_isinteger(tridx)) - tridx = emitir(IRTGI(IR_CONV), tridx, IRCONV_INT_NUM|IRCONV_INDEX); - if (idx != 0 && tref_isk(tridx)) { - emitir(IRTGI(idx <= nvararg ? IR_GE : IR_LT), - fr, lj_ir_kint(J, frofs+8*(int32_t)idx)); - frofs -= 8; /* Bias for 1-based index. */ - } else if (idx <= nvararg) { /* Compute size. */ - TRef tmp = emitir(IRTI(IR_ADD), fr, lj_ir_kint(J, -frofs)); - if (numparams) - emitir(IRTGI(IR_GE), tmp, lj_ir_kint(J, 0)); - tr = emitir(IRTI(IR_BSHR), tmp, lj_ir_kint(J, 3)); - if (idx != 0) { - tridx = emitir(IRTI(IR_ADD), tridx, lj_ir_kint(J, -1)); - rec_idx_abc(J, tr, tridx, (uint32_t)nvararg); - } - } else { - TRef tmp = lj_ir_kint(J, frofs); - if (idx != 0) { - TRef tmp2 = emitir(IRTI(IR_BSHL), tridx, lj_ir_kint(J, 3)); - tmp = emitir(IRTI(IR_ADD), tmp2, tmp); - } else { - tr = lj_ir_kint(J, 0); - } - emitir(IRTGI(IR_LT), fr, tmp); - } - if (idx != 0 && idx <= nvararg) { - IRType t; - TRef aref, vbase = emitir(IRTI(IR_SUB), REF_BASE, fr); - vbase = emitir(IRT(IR_ADD, IRT_P32), vbase, lj_ir_kint(J, frofs-8)); - t = itype2irt(&J->L->base[idx-2-nvararg]); - aref = emitir(IRT(IR_AREF, IRT_P32), vbase, tridx); - tr = emitir(IRTG(IR_VLOAD, t), aref, 0); - if (irtype_ispri(t)) tr = TREF_PRI(t); /* Canonicalize primitives. */ - } - J->base[dst-2] = tr; - J->maxslot = dst-1; - J->bcskip = 2; /* Skip CALLM + select. */ - } else { - nyivarg: - setintV(&J->errinfo, BC_VARG); - lj_trace_err_info(J, LJ_TRERR_NYIBC); - } - } -} - -/* -- Record allocations -------------------------------------------------- */ - -static TRef rec_tnew(jit_State *J, uint32_t ah) -{ - uint32_t asize = ah & 0x7ff; - uint32_t hbits = ah >> 11; - TRef tr; - if (asize == 0x7ff) asize = 0x801; - tr = emitir(IRTG(IR_TNEW, IRT_TAB), asize, hbits); -#ifdef LUAJIT_ENABLE_TABLE_BUMP - J->rbchash[(tr & (RBCHASH_SLOTS-1))].ref = tref_ref(tr); - setmref(J->rbchash[(tr & (RBCHASH_SLOTS-1))].pc, J->pc); - setgcref(J->rbchash[(tr & (RBCHASH_SLOTS-1))].pt, obj2gco(J->pt)); -#endif - return tr; -} - -/* -- Concatenation ------------------------------------------------------- */ - -static TRef rec_cat(jit_State *J, BCReg baseslot, BCReg topslot) -{ - TRef *top = &J->base[topslot]; - TValue savetv[5]; - BCReg s; - RecordIndex ix; - lua_assert(baseslot < topslot); - for (s = baseslot; s <= topslot; s++) - (void)getslot(J, s); /* Ensure all arguments have a reference. */ - if (tref_isnumber_str(top[0]) && tref_isnumber_str(top[-1])) { - TRef tr, hdr, *trp, *xbase, *base = &J->base[baseslot]; - /* First convert numbers to strings. */ - for (trp = top; trp >= base; trp--) { - if (tref_isnumber(*trp)) - *trp = emitir(IRT(IR_TOSTR, IRT_STR), *trp, - tref_isnum(*trp) ? IRTOSTR_NUM : IRTOSTR_INT); - else if (!tref_isstr(*trp)) - break; - } - xbase = ++trp; - tr = hdr = emitir(IRT(IR_BUFHDR, IRT_P32), - lj_ir_kptr(J, &J2G(J)->tmpbuf), IRBUFHDR_RESET); - do { - tr = emitir(IRT(IR_BUFPUT, IRT_P32), tr, *trp++); - } while (trp <= top); - tr = emitir(IRT(IR_BUFSTR, IRT_STR), tr, hdr); - J->maxslot = (BCReg)(xbase - J->base); - if (xbase == base) return tr; /* Return simple concatenation result. */ - /* Pass partial result. */ - topslot = J->maxslot--; - *xbase = tr; - top = xbase; - setstrV(J->L, &ix.keyv, &J2G(J)->strempty); /* Simulate string result. */ - } else { - J->maxslot = topslot-1; - copyTV(J->L, &ix.keyv, &J->L->base[topslot]); - } - copyTV(J->L, &ix.tabv, &J->L->base[topslot-1]); - ix.tab = top[-1]; - ix.key = top[0]; - memcpy(savetv, &J->L->base[topslot-1], sizeof(savetv)); /* Save slots. */ - rec_mm_arith(J, &ix, MM_concat); /* Call __concat metamethod. */ - memcpy(&J->L->base[topslot-1], savetv, sizeof(savetv)); /* Restore slots. */ - return 0; /* No result yet. */ -} - -/* -- Record bytecode ops ------------------------------------------------- */ - -/* Prepare for comparison. */ -static void rec_comp_prep(jit_State *J) -{ - /* Prevent merging with snapshot #0 (GC exit) since we fixup the PC. */ - if (J->cur.nsnap == 1 && J->cur.snap[0].ref == J->cur.nins) - emitir_raw(IRT(IR_NOP, IRT_NIL), 0, 0); - lj_snap_add(J); -} - -/* Fixup comparison. */ -static void rec_comp_fixup(jit_State *J, const BCIns *pc, int cond) -{ - BCIns jmpins = pc[1]; - const BCIns *npc = pc + 2 + (cond ? bc_j(jmpins) : 0); - SnapShot *snap = &J->cur.snap[J->cur.nsnap-1]; - /* Set PC to opposite target to avoid re-recording the comp. in side trace. */ - J->cur.snapmap[snap->mapofs + snap->nent] = SNAP_MKPC(npc); - J->needsnap = 1; - if (bc_a(jmpins) < J->maxslot) J->maxslot = bc_a(jmpins); - lj_snap_shrink(J); /* Shrink last snapshot if possible. */ -} - -/* Record the next bytecode instruction (_before_ it's executed). */ -void lj_record_ins(jit_State *J) -{ - cTValue *lbase; - RecordIndex ix; - const BCIns *pc; - BCIns ins; - BCOp op; - TRef ra, rb, rc; - - /* Perform post-processing action before recording the next instruction. */ - if (LJ_UNLIKELY(J->postproc != LJ_POST_NONE)) { - switch (J->postproc) { - case LJ_POST_FIXCOMP: /* Fixup comparison. */ - pc = (const BCIns *)(uintptr_t)J2G(J)->tmptv.u64; - rec_comp_fixup(J, pc, (!tvistruecond(&J2G(J)->tmptv2) ^ (bc_op(*pc)&1))); - /* fallthrough */ - case LJ_POST_FIXGUARD: /* Fixup and emit pending guard. */ - case LJ_POST_FIXGUARDSNAP: /* Fixup and emit pending guard and snapshot. */ - if (!tvistruecond(&J2G(J)->tmptv2)) { - J->fold.ins.o ^= 1; /* Flip guard to opposite. */ - if (J->postproc == LJ_POST_FIXGUARDSNAP) { - SnapShot *snap = &J->cur.snap[J->cur.nsnap-1]; - J->cur.snapmap[snap->mapofs+snap->nent-1]--; /* False -> true. */ - } - } - lj_opt_fold(J); /* Emit pending guard. */ - /* fallthrough */ - case LJ_POST_FIXBOOL: - if (!tvistruecond(&J2G(J)->tmptv2)) { - BCReg s; - TValue *tv = J->L->base; - for (s = 0; s < J->maxslot; s++) /* Fixup stack slot (if any). */ - if (J->base[s] == TREF_TRUE && tvisfalse(&tv[s])) { - J->base[s] = TREF_FALSE; - break; - } - } - break; - case LJ_POST_FIXCONST: - { - BCReg s; - TValue *tv = J->L->base; - for (s = 0; s < J->maxslot; s++) /* Constify stack slots (if any). */ - if (J->base[s] == TREF_NIL && !tvisnil(&tv[s])) - J->base[s] = lj_record_constify(J, &tv[s]); - } - break; - case LJ_POST_FFRETRY: /* Suppress recording of retried fast function. */ - if (bc_op(*J->pc) >= BC__MAX) - return; - break; - default: lua_assert(0); break; - } - J->postproc = LJ_POST_NONE; - } - - /* Need snapshot before recording next bytecode (e.g. after a store). */ - if (J->needsnap) { - J->needsnap = 0; - lj_snap_purge(J); - lj_snap_add(J); - J->mergesnap = 1; - } - - /* Skip some bytecodes. */ - if (LJ_UNLIKELY(J->bcskip > 0)) { - J->bcskip--; - return; - } - - /* Record only closed loops for root traces. */ - pc = J->pc; - if (J->framedepth == 0 && - (MSize)((char *)pc - (char *)J->bc_min) >= J->bc_extent) - lj_trace_err(J, LJ_TRERR_LLEAVE); - -#ifdef LUA_USE_ASSERT - rec_check_slots(J); - rec_check_ir(J); -#endif - -#if LJ_HASPROFILE - rec_profile_ins(J, pc); -#endif - - /* Keep a copy of the runtime values of var/num/str operands. */ -#define rav (&ix.valv) -#define rbv (&ix.tabv) -#define rcv (&ix.keyv) - - lbase = J->L->base; - ins = *pc; - op = bc_op(ins); - ra = bc_a(ins); - ix.val = 0; - switch (bcmode_a(op)) { - case BCMvar: - copyTV(J->L, rav, &lbase[ra]); ix.val = ra = getslot(J, ra); break; - default: break; /* Handled later. */ - } - rb = bc_b(ins); - rc = bc_c(ins); - switch (bcmode_b(op)) { - case BCMnone: rb = 0; rc = bc_d(ins); break; /* Upgrade rc to 'rd'. */ - case BCMvar: - copyTV(J->L, rbv, &lbase[rb]); ix.tab = rb = getslot(J, rb); break; - default: break; /* Handled later. */ - } - switch (bcmode_c(op)) { - case BCMvar: - copyTV(J->L, rcv, &lbase[rc]); ix.key = rc = getslot(J, rc); break; - case BCMpri: setpriV(rcv, ~rc); ix.key = rc = TREF_PRI(IRT_NIL+rc); break; - case BCMnum: { cTValue *tv = proto_knumtv(J->pt, rc); - copyTV(J->L, rcv, tv); ix.key = rc = tvisint(tv) ? lj_ir_kint(J, intV(tv)) : - lj_ir_knumint(J, numV(tv)); } break; - case BCMstr: { GCstr *s = gco2str(proto_kgc(J->pt, ~(ptrdiff_t)rc)); - setstrV(J->L, rcv, s); ix.key = rc = lj_ir_kstr(J, s); } break; - default: break; /* Handled later. */ - } - - switch (op) { - - /* -- Comparison ops ---------------------------------------------------- */ - - case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: -#if LJ_HASFFI - if (tref_iscdata(ra) || tref_iscdata(rc)) { - rec_mm_comp_cdata(J, &ix, op, ((int)op & 2) ? MM_le : MM_lt); - break; - } -#endif - /* Emit nothing for two numeric or string consts. */ - if (!(tref_isk2(ra,rc) && tref_isnumber_str(ra) && tref_isnumber_str(rc))) { - IRType ta = tref_isinteger(ra) ? IRT_INT : tref_type(ra); - IRType tc = tref_isinteger(rc) ? IRT_INT : tref_type(rc); - int irop; - if (ta != tc) { - /* Widen mixed number/int comparisons to number/number comparison. */ - if (ta == IRT_INT && tc == IRT_NUM) { - ra = emitir(IRTN(IR_CONV), ra, IRCONV_NUM_INT); - ta = IRT_NUM; - } else if (ta == IRT_NUM && tc == IRT_INT) { - rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT); - } else if (LJ_52) { - ta = IRT_NIL; /* Force metamethod for different types. */ - } else if (!((ta == IRT_FALSE || ta == IRT_TRUE) && - (tc == IRT_FALSE || tc == IRT_TRUE))) { - break; /* Interpreter will throw for two different types. */ - } - } - rec_comp_prep(J); - irop = (int)op - (int)BC_ISLT + (int)IR_LT; - if (ta == IRT_NUM) { - if ((irop & 1)) irop ^= 4; /* ISGE/ISGT are unordered. */ - if (!lj_ir_numcmp(numberVnum(rav), numberVnum(rcv), (IROp)irop)) - irop ^= 5; - } else if (ta == IRT_INT) { - if (!lj_ir_numcmp(numberVnum(rav), numberVnum(rcv), (IROp)irop)) - irop ^= 1; - } else if (ta == IRT_STR) { - if (!lj_ir_strcmp(strV(rav), strV(rcv), (IROp)irop)) irop ^= 1; - ra = lj_ir_call(J, IRCALL_lj_str_cmp, ra, rc); - rc = lj_ir_kint(J, 0); - ta = IRT_INT; - } else { - rec_mm_comp(J, &ix, (int)op); - break; - } - emitir(IRTG(irop, ta), ra, rc); - rec_comp_fixup(J, J->pc, ((int)op ^ irop) & 1); - } - break; - - case BC_ISEQV: case BC_ISNEV: - case BC_ISEQS: case BC_ISNES: - case BC_ISEQN: case BC_ISNEN: - case BC_ISEQP: case BC_ISNEP: -#if LJ_HASFFI - if (tref_iscdata(ra) || tref_iscdata(rc)) { - rec_mm_comp_cdata(J, &ix, op, MM_eq); - break; - } -#endif - /* Emit nothing for two non-table, non-udata consts. */ - if (!(tref_isk2(ra, rc) && !(tref_istab(ra) || tref_isudata(ra)))) { - int diff; - rec_comp_prep(J); - diff = lj_record_objcmp(J, ra, rc, rav, rcv); - if (diff == 2 || !(tref_istab(ra) || tref_isudata(ra))) - rec_comp_fixup(J, J->pc, ((int)op & 1) == !diff); - else if (diff == 1) /* Only check __eq if different, but same type. */ - rec_mm_equal(J, &ix, (int)op); - } - break; - - /* -- Unary test and copy ops ------------------------------------------- */ - - case BC_ISTC: case BC_ISFC: - if ((op & 1) == tref_istruecond(rc)) - rc = 0; /* Don't store if condition is not true. */ - /* fallthrough */ - case BC_IST: case BC_ISF: /* Type specialization suffices. */ - if (bc_a(pc[1]) < J->maxslot) - J->maxslot = bc_a(pc[1]); /* Shrink used slots. */ - break; - - case BC_ISTYPE: case BC_ISNUM: - /* These coercions need to correspond with lj_meta_istype(). */ - if (LJ_DUALNUM && rc == ~LJ_TNUMX+1) - ra = lj_opt_narrow_toint(J, ra); - else if (rc == ~LJ_TNUMX+2) - ra = lj_ir_tonum(J, ra); - else if (rc == ~LJ_TSTR+1) - ra = lj_ir_tostr(J, ra); - /* else: type specialization suffices. */ - J->base[bc_a(ins)] = ra; - break; - - /* -- Unary ops --------------------------------------------------------- */ - - case BC_NOT: - /* Type specialization already forces const result. */ - rc = tref_istruecond(rc) ? TREF_FALSE : TREF_TRUE; - break; - - case BC_LEN: - if (tref_isstr(rc)) - rc = emitir(IRTI(IR_FLOAD), rc, IRFL_STR_LEN); - else if (!LJ_52 && tref_istab(rc)) - rc = lj_ir_call(J, IRCALL_lj_tab_len, rc); - else - rc = rec_mm_len(J, rc, rcv); - break; - - /* -- Arithmetic ops ---------------------------------------------------- */ - - case BC_UNM: - if (tref_isnumber_str(rc)) { - rc = lj_opt_narrow_unm(J, rc, rcv); - } else { - ix.tab = rc; - copyTV(J->L, &ix.tabv, rcv); - rc = rec_mm_arith(J, &ix, MM_unm); - } - break; - - case BC_ADDNV: case BC_SUBNV: case BC_MULNV: case BC_DIVNV: case BC_MODNV: - /* Swap rb/rc and rbv/rcv. rav is temp. */ - ix.tab = rc; ix.key = rc = rb; rb = ix.tab; - copyTV(J->L, rav, rbv); - copyTV(J->L, rbv, rcv); - copyTV(J->L, rcv, rav); - if (op == BC_MODNV) - goto recmod; - /* fallthrough */ - case BC_ADDVN: case BC_SUBVN: case BC_MULVN: case BC_DIVVN: - case BC_ADDVV: case BC_SUBVV: case BC_MULVV: case BC_DIVVV: { - MMS mm = bcmode_mm(op); - if (tref_isnumber_str(rb) && tref_isnumber_str(rc)) - rc = lj_opt_narrow_arith(J, rb, rc, rbv, rcv, - (int)mm - (int)MM_add + (int)IR_ADD); - else - rc = rec_mm_arith(J, &ix, mm); - break; - } - - case BC_MODVN: case BC_MODVV: - recmod: - if (tref_isnumber_str(rb) && tref_isnumber_str(rc)) - rc = lj_opt_narrow_mod(J, rb, rc, rcv); - else - rc = rec_mm_arith(J, &ix, MM_mod); - break; - - case BC_POW: - if (tref_isnumber_str(rb) && tref_isnumber_str(rc)) - rc = lj_opt_narrow_pow(J, lj_ir_tonum(J, rb), rc, rcv); - else - rc = rec_mm_arith(J, &ix, MM_pow); - break; - - /* -- Miscellaneous ops ------------------------------------------------- */ - - case BC_CAT: - rc = rec_cat(J, rb, rc); - break; - - /* -- Constant and move ops --------------------------------------------- */ - - case BC_MOV: - /* Clear gap of method call to avoid resurrecting previous refs. */ - if (ra > J->maxslot) J->base[ra-1] = 0; - break; - case BC_KSTR: case BC_KNUM: case BC_KPRI: - break; - case BC_KSHORT: - rc = lj_ir_kint(J, (int32_t)(int16_t)rc); - break; - case BC_KNIL: - while (ra <= rc) - J->base[ra++] = TREF_NIL; - if (rc >= J->maxslot) J->maxslot = rc+1; - break; -#if LJ_HASFFI - case BC_KCDATA: - rc = lj_ir_kgc(J, proto_kgc(J->pt, ~(ptrdiff_t)rc), IRT_CDATA); - break; -#endif - - /* -- Upvalue and function ops ------------------------------------------ */ - - case BC_UGET: - rc = rec_upvalue(J, rc, 0); - break; - case BC_USETV: case BC_USETS: case BC_USETN: case BC_USETP: - rec_upvalue(J, ra, rc); - break; - - /* -- Table ops --------------------------------------------------------- */ - - case BC_GGET: case BC_GSET: - settabV(J->L, &ix.tabv, tabref(J->fn->l.env)); - ix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), getcurrf(J), IRFL_FUNC_ENV); - ix.idxchain = LJ_MAX_IDXCHAIN; - rc = lj_record_idx(J, &ix); - break; - - case BC_TGETB: case BC_TSETB: - setintV(&ix.keyv, (int32_t)rc); - ix.key = lj_ir_kint(J, (int32_t)rc); - /* fallthrough */ - case BC_TGETV: case BC_TGETS: case BC_TSETV: case BC_TSETS: - ix.idxchain = LJ_MAX_IDXCHAIN; - rc = lj_record_idx(J, &ix); - break; - case BC_TGETR: case BC_TSETR: - ix.idxchain = 0; - rc = lj_record_idx(J, &ix); - break; - - case BC_TSETM: - rec_tsetm(J, ra, (BCReg)(J->L->top - J->L->base), (int32_t)rcv->u32.lo); - break; - - case BC_TNEW: - rc = rec_tnew(J, rc); - break; - case BC_TDUP: - rc = emitir(IRTG(IR_TDUP, IRT_TAB), - lj_ir_ktab(J, gco2tab(proto_kgc(J->pt, ~(ptrdiff_t)rc))), 0); -#ifdef LUAJIT_ENABLE_TABLE_BUMP - J->rbchash[(rc & (RBCHASH_SLOTS-1))].ref = tref_ref(rc); - setmref(J->rbchash[(rc & (RBCHASH_SLOTS-1))].pc, pc); - setgcref(J->rbchash[(rc & (RBCHASH_SLOTS-1))].pt, obj2gco(J->pt)); -#endif - break; - - /* -- Calls and vararg handling ----------------------------------------- */ - - case BC_ITERC: - J->base[ra] = getslot(J, ra-3-LJ_FR2); - J->base[ra+1] = getslot(J, ra-2-LJ_FR2); - J->base[ra+2] = getslot(J, ra-1-LJ_FR2); - { /* Do the actual copy now because lj_record_call needs the values. */ - TValue *b = &J->L->base[ra]; - copyTV(J->L, b, b-3-LJ_FR2); - copyTV(J->L, b+1, b-2-LJ_FR2); - copyTV(J->L, b+2, b-1-LJ_FR2); - } - lj_record_call(J, ra, (ptrdiff_t)rc-1); - break; - - /* L->top is set to L->base+ra+rc+NARGS-1+1. See lj_dispatch_ins(). */ - case BC_CALLM: - rc = (BCReg)(J->L->top - J->L->base) - ra - LJ_FR2; - /* fallthrough */ - case BC_CALL: - lj_record_call(J, ra, (ptrdiff_t)rc-1); - break; - - case BC_CALLMT: - rc = (BCReg)(J->L->top - J->L->base) - ra - LJ_FR2; - /* fallthrough */ - case BC_CALLT: - lj_record_tailcall(J, ra, (ptrdiff_t)rc-1); - break; - - case BC_VARG: - rec_varg(J, ra, (ptrdiff_t)rb-1); - break; - - /* -- Returns ----------------------------------------------------------- */ - - case BC_RETM: - /* L->top is set to L->base+ra+rc+NRESULTS-1, see lj_dispatch_ins(). */ - rc = (BCReg)(J->L->top - J->L->base) - ra + 1; - /* fallthrough */ - case BC_RET: case BC_RET0: case BC_RET1: -#if LJ_HASPROFILE - rec_profile_ret(J); -#endif - lj_record_ret(J, ra, (ptrdiff_t)rc-1); - break; - - /* -- Loops and branches ------------------------------------------------ */ - - case BC_FORI: - if (rec_for(J, pc, 0) != LOOPEV_LEAVE) - J->loopref = J->cur.nins; - break; - case BC_JFORI: - lua_assert(bc_op(pc[(ptrdiff_t)rc-BCBIAS_J]) == BC_JFORL); - if (rec_for(J, pc, 0) != LOOPEV_LEAVE) /* Link to existing loop. */ - lj_record_stop(J, LJ_TRLINK_ROOT, bc_d(pc[(ptrdiff_t)rc-BCBIAS_J])); - /* Continue tracing if the loop is not entered. */ - break; - - case BC_FORL: - rec_loop_interp(J, pc, rec_for(J, pc+((ptrdiff_t)rc-BCBIAS_J), 1)); - break; - case BC_ITERL: - rec_loop_interp(J, pc, rec_iterl(J, *pc)); - break; - case BC_LOOP: - rec_loop_interp(J, pc, rec_loop(J, ra)); - break; - - case BC_JFORL: - rec_loop_jit(J, rc, rec_for(J, pc+bc_j(traceref(J, rc)->startins), 1)); - break; - case BC_JITERL: - rec_loop_jit(J, rc, rec_iterl(J, traceref(J, rc)->startins)); - break; - case BC_JLOOP: - rec_loop_jit(J, rc, rec_loop(J, ra)); - break; - - case BC_IFORL: - case BC_IITERL: - case BC_ILOOP: - case BC_IFUNCF: - case BC_IFUNCV: - lj_trace_err(J, LJ_TRERR_BLACKL); - break; - - case BC_JMP: - if (ra < J->maxslot) - J->maxslot = ra; /* Shrink used slots. */ - break; - - /* -- Function headers -------------------------------------------------- */ - - case BC_FUNCF: - rec_func_lua(J); - break; - case BC_JFUNCF: - rec_func_jit(J, rc); - break; - - case BC_FUNCV: - rec_func_vararg(J); - rec_func_lua(J); - break; - case BC_JFUNCV: - lua_assert(0); /* Cannot happen. No hotcall counting for varag funcs. */ - break; - - case BC_FUNCC: - case BC_FUNCCW: - lj_ffrecord_func(J); - break; - - default: - if (op >= BC__MAX) { - lj_ffrecord_func(J); - break; - } - /* fallthrough */ - case BC_ITERN: - case BC_ISNEXT: - case BC_UCLO: - case BC_FNEW: - setintV(&J->errinfo, (int32_t)op); - lj_trace_err_info(J, LJ_TRERR_NYIBC); - break; - } - - /* rc == 0 if we have no result yet, e.g. pending __index metamethod call. */ - if (bcmode_a(op) == BCMdst && rc) { - J->base[ra] = rc; - if (ra >= J->maxslot) J->maxslot = ra+1; - } - -#undef rav -#undef rbv -#undef rcv - - /* Limit the number of recorded IR instructions. */ - if (J->cur.nins > REF_FIRST+(IRRef)J->param[JIT_P_maxrecord]) - lj_trace_err(J, LJ_TRERR_TRACEOV); -} - -/* -- Recording setup ----------------------------------------------------- */ - -/* Setup recording for a root trace started by a hot loop. */ -static const BCIns *rec_setup_root(jit_State *J) -{ - /* Determine the next PC and the bytecode range for the loop. */ - const BCIns *pcj, *pc = J->pc; - BCIns ins = *pc; - BCReg ra = bc_a(ins); - switch (bc_op(ins)) { - case BC_FORL: - J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns); - pc += 1+bc_j(ins); - J->bc_min = pc; - break; - case BC_ITERL: - lua_assert(bc_op(pc[-1]) == BC_ITERC); - J->maxslot = ra + bc_b(pc[-1]) - 1; - J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns); - pc += 1+bc_j(ins); - lua_assert(bc_op(pc[-1]) == BC_JMP); - J->bc_min = pc; - break; - case BC_LOOP: - /* Only check BC range for real loops, but not for "repeat until true". */ - pcj = pc + bc_j(ins); - ins = *pcj; - if (bc_op(ins) == BC_JMP && bc_j(ins) < 0) { - J->bc_min = pcj+1 + bc_j(ins); - J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns); - } - J->maxslot = ra; - pc++; - break; - case BC_RET: - case BC_RET0: - case BC_RET1: - /* No bytecode range check for down-recursive root traces. */ - J->maxslot = ra + bc_d(ins) - 1; - break; - case BC_FUNCF: - /* No bytecode range check for root traces started by a hot call. */ - J->maxslot = J->pt->numparams; - pc++; - break; - case BC_CALLM: - case BC_CALL: - case BC_ITERC: - /* No bytecode range check for stitched traces. */ - pc++; - break; - default: - lua_assert(0); - break; - } - return pc; -} - -/* Setup for recording a new trace. */ -void lj_record_setup(jit_State *J) -{ - uint32_t i; - - /* Initialize state related to current trace. */ - memset(J->slot, 0, sizeof(J->slot)); - memset(J->chain, 0, sizeof(J->chain)); -#ifdef LUAJIT_ENABLE_TABLE_BUMP - memset(J->rbchash, 0, sizeof(J->rbchash)); -#endif - memset(J->bpropcache, 0, sizeof(J->bpropcache)); - J->scev.idx = REF_NIL; - setmref(J->scev.pc, NULL); - - J->baseslot = 1; /* Invoking function is at base[-1]. */ - J->base = J->slot + J->baseslot; - J->maxslot = 0; - J->framedepth = 0; - J->retdepth = 0; - - J->instunroll = J->param[JIT_P_instunroll]; - J->loopunroll = J->param[JIT_P_loopunroll]; - J->tailcalled = 0; - J->loopref = 0; - - J->bc_min = NULL; /* Means no limit. */ - J->bc_extent = ~(MSize)0; - - /* Emit instructions for fixed references. Also triggers initial IR alloc. */ - emitir_raw(IRT(IR_BASE, IRT_P32), J->parent, J->exitno); - for (i = 0; i <= 2; i++) { - IRIns *ir = IR(REF_NIL-i); - ir->i = 0; - ir->t.irt = (uint8_t)(IRT_NIL+i); - ir->o = IR_KPRI; - ir->prev = 0; - } - J->cur.nk = REF_TRUE; - - J->startpc = J->pc; - setmref(J->cur.startpc, J->pc); - if (J->parent) { /* Side trace. */ - GCtrace *T = traceref(J, J->parent); - TraceNo root = T->root ? T->root : J->parent; - J->cur.root = (uint16_t)root; - J->cur.startins = BCINS_AD(BC_JMP, 0, 0); - /* Check whether we could at least potentially form an extra loop. */ - if (J->exitno == 0 && T->snap[0].nent == 0) { - /* We can narrow a FORL for some side traces, too. */ - if (J->pc > proto_bc(J->pt) && bc_op(J->pc[-1]) == BC_JFORI && - bc_d(J->pc[bc_j(J->pc[-1])-1]) == root) { - lj_snap_add(J); - rec_for_loop(J, J->pc-1, &J->scev, 1); - goto sidecheck; - } - } else { - J->startpc = NULL; /* Prevent forming an extra loop. */ - } - lj_snap_replay(J, T); - sidecheck: - if (traceref(J, J->cur.root)->nchild >= J->param[JIT_P_maxside] || - T->snap[J->exitno].count >= J->param[JIT_P_hotexit] + - J->param[JIT_P_tryside]) { - lj_record_stop(J, LJ_TRLINK_INTERP, 0); - } - } else { /* Root trace. */ - J->cur.root = 0; - J->cur.startins = *J->pc; - J->pc = rec_setup_root(J); - /* Note: the loop instruction itself is recorded at the end and not - ** at the start! So snapshot #0 needs to point to the *next* instruction. - */ - lj_snap_add(J); - if (bc_op(J->cur.startins) == BC_FORL) - rec_for_loop(J, J->pc-1, &J->scev, 1); - else if (bc_op(J->cur.startins) == BC_ITERC) - J->startpc = NULL; - if (1 + J->pt->framesize >= LJ_MAX_JSLOTS) - lj_trace_err(J, LJ_TRERR_STACKOV); - } -#if LJ_HASPROFILE - J->prev_pt = NULL; - J->prev_line = -1; -#endif -#ifdef LUAJIT_ENABLE_CHECKHOOK - /* Regularly check for instruction/line hooks from compiled code and - ** exit to the interpreter if the hooks are set. - ** - ** This is a compile-time option and disabled by default, since the - ** hook checks may be quite expensive in tight loops. - ** - ** Note this is only useful if hooks are *not* set most of the time. - ** Use this only if you want to *asynchronously* interrupt the execution. - ** - ** You can set the instruction hook via lua_sethook() with a count of 1 - ** from a signal handler or another native thread. Please have a look - ** at the first few functions in luajit.c for an example (Ctrl-C handler). - */ - { - TRef tr = emitir(IRT(IR_XLOAD, IRT_U8), - lj_ir_kptr(J, &J2G(J)->hookmask), IRXLOAD_VOLATILE); - tr = emitir(IRTI(IR_BAND), tr, lj_ir_kint(J, (LUA_MASKLINE|LUA_MASKCOUNT))); - emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, 0)); - } -#endif -} - -#undef IR -#undef emitir_raw -#undef emitir - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_record.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_record.h deleted file mode 100644 index 732adb47ce2..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_record.h +++ /dev/null @@ -1,45 +0,0 @@ -/* -** Trace recorder (bytecode -> SSA IR). -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_RECORD_H -#define _LJ_RECORD_H - -#include "lj_obj.h" -#include "lj_jit.h" - -#if LJ_HASJIT -/* Context for recording an indexed load/store. */ -typedef struct RecordIndex { - TValue tabv; /* Runtime value of table (or indexed object). */ - TValue keyv; /* Runtime value of key. */ - TValue valv; /* Runtime value of stored value. */ - TValue mobjv; /* Runtime value of metamethod object. */ - GCtab *mtv; /* Runtime value of metatable object. */ - cTValue *oldv; /* Runtime value of previously stored value. */ - TRef tab; /* Table (or indexed object) reference. */ - TRef key; /* Key reference. */ - TRef val; /* Value reference for a store or 0 for a load. */ - TRef mt; /* Metatable reference. */ - TRef mobj; /* Metamethod object reference. */ - int idxchain; /* Index indirections left or 0 for raw lookup. */ -} RecordIndex; - -LJ_FUNC int lj_record_objcmp(jit_State *J, TRef a, TRef b, - cTValue *av, cTValue *bv); -LJ_FUNC void lj_record_stop(jit_State *J, TraceLink linktype, TraceNo lnk); -LJ_FUNC TRef lj_record_constify(jit_State *J, cTValue *o); - -LJ_FUNC void lj_record_call(jit_State *J, BCReg func, ptrdiff_t nargs); -LJ_FUNC void lj_record_tailcall(jit_State *J, BCReg func, ptrdiff_t nargs); -LJ_FUNC void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults); - -LJ_FUNC int lj_record_mm_lookup(jit_State *J, RecordIndex *ix, MMS mm); -LJ_FUNC TRef lj_record_idx(jit_State *J, RecordIndex *ix); - -LJ_FUNC void lj_record_ins(jit_State *J); -LJ_FUNC void lj_record_setup(jit_State *J); -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_snap.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_snap.c deleted file mode 100644 index d8e7987c5c2..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_snap.c +++ /dev/null @@ -1,870 +0,0 @@ -/* -** Snapshot handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_snap_c -#define LUA_CORE - -#include "lj_obj.h" - -#if LJ_HASJIT - -#include "lj_gc.h" -#include "lj_tab.h" -#include "lj_state.h" -#include "lj_frame.h" -#include "lj_bc.h" -#include "lj_ir.h" -#include "lj_jit.h" -#include "lj_iropt.h" -#include "lj_trace.h" -#include "lj_snap.h" -#include "lj_target.h" -#if LJ_HASFFI -#include "lj_ctype.h" -#include "lj_cdata.h" -#endif - -/* Some local macros to save typing. Undef'd at the end. */ -#define IR(ref) (&J->cur.ir[(ref)]) - -/* Pass IR on to next optimization in chain (FOLD). */ -#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) - -/* Emit raw IR without passing through optimizations. */ -#define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J)) - -/* -- Snapshot buffer allocation ------------------------------------------ */ - -/* Grow snapshot buffer. */ -void lj_snap_grow_buf_(jit_State *J, MSize need) -{ - MSize maxsnap = (MSize)J->param[JIT_P_maxsnap]; - if (need > maxsnap) - lj_trace_err(J, LJ_TRERR_SNAPOV); - lj_mem_growvec(J->L, J->snapbuf, J->sizesnap, maxsnap, SnapShot); - J->cur.snap = J->snapbuf; -} - -/* Grow snapshot map buffer. */ -void lj_snap_grow_map_(jit_State *J, MSize need) -{ - if (need < 2*J->sizesnapmap) - need = 2*J->sizesnapmap; - else if (need < 64) - need = 64; - J->snapmapbuf = (SnapEntry *)lj_mem_realloc(J->L, J->snapmapbuf, - J->sizesnapmap*sizeof(SnapEntry), need*sizeof(SnapEntry)); - J->cur.snapmap = J->snapmapbuf; - J->sizesnapmap = need; -} - -/* -- Snapshot generation ------------------------------------------------- */ - -/* Add all modified slots to the snapshot. */ -static MSize snapshot_slots(jit_State *J, SnapEntry *map, BCReg nslots) -{ - IRRef retf = J->chain[IR_RETF]; /* Limits SLOAD restore elimination. */ - BCReg s; - MSize n = 0; - for (s = 0; s < nslots; s++) { - TRef tr = J->slot[s]; - IRRef ref = tref_ref(tr); - if (ref) { - SnapEntry sn = SNAP_TR(s, tr); - IRIns *ir = IR(ref); - if (!(sn & (SNAP_CONT|SNAP_FRAME)) && - ir->o == IR_SLOAD && ir->op1 == s && ref > retf) { - /* No need to snapshot unmodified non-inherited slots. */ - if (!(ir->op2 & IRSLOAD_INHERIT)) - continue; - /* No need to restore readonly slots and unmodified non-parent slots. */ - if (!(LJ_DUALNUM && (ir->op2 & IRSLOAD_CONVERT)) && - (ir->op2 & (IRSLOAD_READONLY|IRSLOAD_PARENT)) != IRSLOAD_PARENT) - sn |= SNAP_NORESTORE; - } - if (LJ_SOFTFP && irt_isnum(ir->t)) - sn |= SNAP_SOFTFPNUM; - map[n++] = sn; - } - } - return n; -} - -/* Add frame links at the end of the snapshot. */ -static BCReg snapshot_framelinks(jit_State *J, SnapEntry *map) -{ - cTValue *frame = J->L->base - 1; - cTValue *lim = J->L->base - J->baseslot; - GCfunc *fn = frame_func(frame); - cTValue *ftop = isluafunc(fn) ? (frame+funcproto(fn)->framesize) : J->L->top; - MSize f = 0; - lua_assert(!LJ_FR2); /* TODO_FR2: store 64 bit PCs. */ - map[f++] = SNAP_MKPC(J->pc); /* The current PC is always the first entry. */ - while (frame > lim) { /* Backwards traversal of all frames above base. */ - if (frame_islua(frame)) { - map[f++] = SNAP_MKPC(frame_pc(frame)); - frame = frame_prevl(frame); - } else if (frame_iscont(frame)) { - map[f++] = SNAP_MKFTSZ(frame_ftsz(frame)); - map[f++] = SNAP_MKPC(frame_contpc(frame)); - frame = frame_prevd(frame); - } else { - lua_assert(!frame_isc(frame)); - map[f++] = SNAP_MKFTSZ(frame_ftsz(frame)); - frame = frame_prevd(frame); - continue; - } - if (frame + funcproto(frame_func(frame))->framesize > ftop) - ftop = frame + funcproto(frame_func(frame))->framesize; - } - lua_assert(f == (MSize)(1 + J->framedepth)); - return (BCReg)(ftop - lim); -} - -/* Take a snapshot of the current stack. */ -static void snapshot_stack(jit_State *J, SnapShot *snap, MSize nsnapmap) -{ - BCReg nslots = J->baseslot + J->maxslot; - MSize nent; - SnapEntry *p; - /* Conservative estimate. */ - lj_snap_grow_map(J, nsnapmap + nslots + (MSize)J->framedepth+1); - p = &J->cur.snapmap[nsnapmap]; - nent = snapshot_slots(J, p, nslots); - snap->topslot = (uint8_t)snapshot_framelinks(J, p + nent); - snap->mapofs = (uint16_t)nsnapmap; - snap->ref = (IRRef1)J->cur.nins; - snap->nent = (uint8_t)nent; - snap->nslots = (uint8_t)nslots; - snap->count = 0; - J->cur.nsnapmap = (uint16_t)(nsnapmap + nent + 1 + J->framedepth); -} - -/* Add or merge a snapshot. */ -void lj_snap_add(jit_State *J) -{ - MSize nsnap = J->cur.nsnap; - MSize nsnapmap = J->cur.nsnapmap; - /* Merge if no ins. inbetween or if requested and no guard inbetween. */ - if (J->mergesnap ? !irt_isguard(J->guardemit) : - (nsnap > 0 && J->cur.snap[nsnap-1].ref == J->cur.nins)) { - if (nsnap == 1) { /* But preserve snap #0 PC. */ - emitir_raw(IRT(IR_NOP, IRT_NIL), 0, 0); - goto nomerge; - } - nsnapmap = J->cur.snap[--nsnap].mapofs; - } else { - nomerge: - lj_snap_grow_buf(J, nsnap+1); - J->cur.nsnap = (uint16_t)(nsnap+1); - } - J->mergesnap = 0; - J->guardemit.irt = 0; - snapshot_stack(J, &J->cur.snap[nsnap], nsnapmap); -} - -/* -- Snapshot modification ----------------------------------------------- */ - -#define SNAP_USEDEF_SLOTS (LJ_MAX_JSLOTS+LJ_STACK_EXTRA) - -/* Find unused slots with reaching-definitions bytecode data-flow analysis. */ -static BCReg snap_usedef(jit_State *J, uint8_t *udf, - const BCIns *pc, BCReg maxslot) -{ - BCReg s; - GCobj *o; - - if (maxslot == 0) return 0; -#ifdef LUAJIT_USE_VALGRIND - /* Avoid errors for harmless reads beyond maxslot. */ - memset(udf, 1, SNAP_USEDEF_SLOTS); -#else - memset(udf, 1, maxslot); -#endif - - /* Treat open upvalues as used. */ - o = gcref(J->L->openupval); - while (o) { - if (uvval(gco2uv(o)) < J->L->base) break; - udf[uvval(gco2uv(o)) - J->L->base] = 0; - o = gcref(o->gch.nextgc); - } - -#define USE_SLOT(s) udf[(s)] &= ~1 -#define DEF_SLOT(s) udf[(s)] *= 3 - - /* Scan through following bytecode and check for uses/defs. */ - lua_assert(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc); - for (;;) { - BCIns ins = *pc++; - BCOp op = bc_op(ins); - switch (bcmode_b(op)) { - case BCMvar: USE_SLOT(bc_b(ins)); break; - default: break; - } - switch (bcmode_c(op)) { - case BCMvar: USE_SLOT(bc_c(ins)); break; - case BCMrbase: - lua_assert(op == BC_CAT); - for (s = bc_b(ins); s <= bc_c(ins); s++) USE_SLOT(s); - for (; s < maxslot; s++) DEF_SLOT(s); - break; - case BCMjump: - handle_jump: { - BCReg minslot = bc_a(ins); - if (op >= BC_FORI && op <= BC_JFORL) minslot += FORL_EXT; - else if (op >= BC_ITERL && op <= BC_JITERL) minslot += bc_b(pc[-2])-1; - else if (op == BC_UCLO) { pc += bc_j(ins); break; } - for (s = minslot; s < maxslot; s++) DEF_SLOT(s); - return minslot < maxslot ? minslot : maxslot; - } - case BCMlit: - if (op == BC_JFORL || op == BC_JITERL || op == BC_JLOOP) { - goto handle_jump; - } else if (bc_isret(op)) { - BCReg top = op == BC_RETM ? maxslot : (bc_a(ins) + bc_d(ins)-1); - for (s = 0; s < bc_a(ins); s++) DEF_SLOT(s); - for (; s < top; s++) USE_SLOT(s); - for (; s < maxslot; s++) DEF_SLOT(s); - return 0; - } - break; - case BCMfunc: return maxslot; /* NYI: will abort, anyway. */ - default: break; - } - switch (bcmode_a(op)) { - case BCMvar: USE_SLOT(bc_a(ins)); break; - case BCMdst: - if (!(op == BC_ISTC || op == BC_ISFC)) DEF_SLOT(bc_a(ins)); - break; - case BCMbase: - if (op >= BC_CALLM && op <= BC_VARG) { - BCReg top = (op == BC_CALLM || op == BC_CALLMT || bc_c(ins) == 0) ? - maxslot : (bc_a(ins) + bc_c(ins)+LJ_FR2); - if (LJ_FR2) DEF_SLOT(bc_a(ins)+1); - s = bc_a(ins) - ((op == BC_ITERC || op == BC_ITERN) ? 3 : 0); - for (; s < top; s++) USE_SLOT(s); - for (; s < maxslot; s++) DEF_SLOT(s); - if (op == BC_CALLT || op == BC_CALLMT) { - for (s = 0; s < bc_a(ins); s++) DEF_SLOT(s); - return 0; - } - } else if (op == BC_KNIL) { - for (s = bc_a(ins); s <= bc_d(ins); s++) DEF_SLOT(s); - } else if (op == BC_TSETM) { - for (s = bc_a(ins)-1; s < maxslot; s++) USE_SLOT(s); - } - break; - default: break; - } - lua_assert(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc); - } - -#undef USE_SLOT -#undef DEF_SLOT - - return 0; /* unreachable */ -} - -/* Purge dead slots before the next snapshot. */ -void lj_snap_purge(jit_State *J) -{ - uint8_t udf[SNAP_USEDEF_SLOTS]; - BCReg maxslot = J->maxslot; - BCReg s = snap_usedef(J, udf, J->pc, maxslot); - for (; s < maxslot; s++) - if (udf[s] != 0) - J->base[s] = 0; /* Purge dead slots. */ -} - -/* Shrink last snapshot. */ -void lj_snap_shrink(jit_State *J) -{ - SnapShot *snap = &J->cur.snap[J->cur.nsnap-1]; - SnapEntry *map = &J->cur.snapmap[snap->mapofs]; - MSize n, m, nlim, nent = snap->nent; - uint8_t udf[SNAP_USEDEF_SLOTS]; - BCReg maxslot = J->maxslot; - BCReg minslot = snap_usedef(J, udf, snap_pc(map[nent]), maxslot); - BCReg baseslot = J->baseslot; - maxslot += baseslot; - minslot += baseslot; - snap->nslots = (uint8_t)maxslot; - for (n = m = 0; n < nent; n++) { /* Remove unused slots from snapshot. */ - BCReg s = snap_slot(map[n]); - if (s < minslot || (s < maxslot && udf[s-baseslot] == 0)) - map[m++] = map[n]; /* Only copy used slots. */ - } - snap->nent = (uint8_t)m; - nlim = J->cur.nsnapmap - snap->mapofs - 1; - while (n <= nlim) map[m++] = map[n++]; /* Move PC + frame links down. */ - J->cur.nsnapmap = (uint16_t)(snap->mapofs + m); /* Free up space in map. */ -} - -/* -- Snapshot access ----------------------------------------------------- */ - -/* Initialize a Bloom Filter with all renamed refs. -** There are very few renames (often none), so the filter has -** very few bits set. This makes it suitable for negative filtering. -*/ -static BloomFilter snap_renamefilter(GCtrace *T, SnapNo lim) -{ - BloomFilter rfilt = 0; - IRIns *ir; - for (ir = &T->ir[T->nins-1]; ir->o == IR_RENAME; ir--) - if (ir->op2 <= lim) - bloomset(rfilt, ir->op1); - return rfilt; -} - -/* Process matching renames to find the original RegSP. */ -static RegSP snap_renameref(GCtrace *T, SnapNo lim, IRRef ref, RegSP rs) -{ - IRIns *ir; - for (ir = &T->ir[T->nins-1]; ir->o == IR_RENAME; ir--) - if (ir->op1 == ref && ir->op2 <= lim) - rs = ir->prev; - return rs; -} - -/* Copy RegSP from parent snapshot to the parent links of the IR. */ -IRIns *lj_snap_regspmap(GCtrace *T, SnapNo snapno, IRIns *ir) -{ - SnapShot *snap = &T->snap[snapno]; - SnapEntry *map = &T->snapmap[snap->mapofs]; - BloomFilter rfilt = snap_renamefilter(T, snapno); - MSize n = 0; - IRRef ref = 0; - for ( ; ; ir++) { - uint32_t rs; - if (ir->o == IR_SLOAD) { - if (!(ir->op2 & IRSLOAD_PARENT)) break; - for ( ; ; n++) { - lua_assert(n < snap->nent); - if (snap_slot(map[n]) == ir->op1) { - ref = snap_ref(map[n++]); - break; - } - } - } else if (LJ_SOFTFP && ir->o == IR_HIOP) { - ref++; - } else if (ir->o == IR_PVAL) { - ref = ir->op1 + REF_BIAS; - } else { - break; - } - rs = T->ir[ref].prev; - if (bloomtest(rfilt, ref)) - rs = snap_renameref(T, snapno, ref, rs); - ir->prev = (uint16_t)rs; - lua_assert(regsp_used(rs)); - } - return ir; -} - -/* -- Snapshot replay ----------------------------------------------------- */ - -/* Replay constant from parent trace. */ -static TRef snap_replay_const(jit_State *J, IRIns *ir) -{ - /* Only have to deal with constants that can occur in stack slots. */ - switch ((IROp)ir->o) { - case IR_KPRI: return TREF_PRI(irt_type(ir->t)); - case IR_KINT: return lj_ir_kint(J, ir->i); - case IR_KGC: return lj_ir_kgc(J, ir_kgc(ir), irt_t(ir->t)); - case IR_KNUM: return lj_ir_k64(J, IR_KNUM, ir_knum(ir)); - case IR_KINT64: return lj_ir_k64(J, IR_KINT64, ir_kint64(ir)); - case IR_KPTR: return lj_ir_kptr(J, ir_kptr(ir)); /* Continuation. */ - default: lua_assert(0); return TREF_NIL; break; - } -} - -/* De-duplicate parent reference. */ -static TRef snap_dedup(jit_State *J, SnapEntry *map, MSize nmax, IRRef ref) -{ - MSize j; - for (j = 0; j < nmax; j++) - if (snap_ref(map[j]) == ref) - return J->slot[snap_slot(map[j])] & ~(SNAP_CONT|SNAP_FRAME); - return 0; -} - -/* Emit parent reference with de-duplication. */ -static TRef snap_pref(jit_State *J, GCtrace *T, SnapEntry *map, MSize nmax, - BloomFilter seen, IRRef ref) -{ - IRIns *ir = &T->ir[ref]; - TRef tr; - if (irref_isk(ref)) - tr = snap_replay_const(J, ir); - else if (!regsp_used(ir->prev)) - tr = 0; - else if (!bloomtest(seen, ref) || (tr = snap_dedup(J, map, nmax, ref)) == 0) - tr = emitir(IRT(IR_PVAL, irt_type(ir->t)), ref - REF_BIAS, 0); - return tr; -} - -/* Check whether a sunk store corresponds to an allocation. Slow path. */ -static int snap_sunk_store2(jit_State *J, IRIns *ira, IRIns *irs) -{ - if (irs->o == IR_ASTORE || irs->o == IR_HSTORE || - irs->o == IR_FSTORE || irs->o == IR_XSTORE) { - IRIns *irk = IR(irs->op1); - if (irk->o == IR_AREF || irk->o == IR_HREFK) - irk = IR(irk->op1); - return (IR(irk->op1) == ira); - } - return 0; -} - -/* Check whether a sunk store corresponds to an allocation. Fast path. */ -static LJ_AINLINE int snap_sunk_store(jit_State *J, IRIns *ira, IRIns *irs) -{ - if (irs->s != 255) - return (ira + irs->s == irs); /* Fast check. */ - return snap_sunk_store2(J, ira, irs); -} - -/* Replay snapshot state to setup side trace. */ -void lj_snap_replay(jit_State *J, GCtrace *T) -{ - SnapShot *snap = &T->snap[J->exitno]; - SnapEntry *map = &T->snapmap[snap->mapofs]; - MSize n, nent = snap->nent; - BloomFilter seen = 0; - int pass23 = 0; - J->framedepth = 0; - /* Emit IR for slots inherited from parent snapshot. */ - for (n = 0; n < nent; n++) { - SnapEntry sn = map[n]; - BCReg s = snap_slot(sn); - IRRef ref = snap_ref(sn); - IRIns *ir = &T->ir[ref]; - TRef tr; - /* The bloom filter avoids O(nent^2) overhead for de-duping slots. */ - if (bloomtest(seen, ref) && (tr = snap_dedup(J, map, n, ref)) != 0) - goto setslot; - bloomset(seen, ref); - if (irref_isk(ref)) { - tr = snap_replay_const(J, ir); - } else if (!regsp_used(ir->prev)) { - pass23 = 1; - lua_assert(s != 0); - tr = s; - } else { - IRType t = irt_type(ir->t); - uint32_t mode = IRSLOAD_INHERIT|IRSLOAD_PARENT; - if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) t = IRT_NUM; - if (ir->o == IR_SLOAD) mode |= (ir->op2 & IRSLOAD_READONLY); - tr = emitir_raw(IRT(IR_SLOAD, t), s, mode); - } - setslot: - J->slot[s] = tr | (sn&(SNAP_CONT|SNAP_FRAME)); /* Same as TREF_* flags. */ - J->framedepth += ((sn & (SNAP_CONT|SNAP_FRAME)) && s); - if ((sn & SNAP_FRAME)) - J->baseslot = s+1; - } - if (pass23) { - IRIns *irlast = &T->ir[snap->ref]; - pass23 = 0; - /* Emit dependent PVALs. */ - for (n = 0; n < nent; n++) { - SnapEntry sn = map[n]; - IRRef refp = snap_ref(sn); - IRIns *ir = &T->ir[refp]; - if (regsp_reg(ir->r) == RID_SUNK) { - if (J->slot[snap_slot(sn)] != snap_slot(sn)) continue; - pass23 = 1; - lua_assert(ir->o == IR_TNEW || ir->o == IR_TDUP || - ir->o == IR_CNEW || ir->o == IR_CNEWI); - if (ir->op1 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op1); - if (ir->op2 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op2); - if (LJ_HASFFI && ir->o == IR_CNEWI) { - if (LJ_32 && refp+1 < T->nins && (ir+1)->o == IR_HIOP) - snap_pref(J, T, map, nent, seen, (ir+1)->op2); - } else { - IRIns *irs; - for (irs = ir+1; irs < irlast; irs++) - if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) { - if (snap_pref(J, T, map, nent, seen, irs->op2) == 0) - snap_pref(J, T, map, nent, seen, T->ir[irs->op2].op1); - else if ((LJ_SOFTFP || (LJ_32 && LJ_HASFFI)) && - irs+1 < irlast && (irs+1)->o == IR_HIOP) - snap_pref(J, T, map, nent, seen, (irs+1)->op2); - } - } - } else if (!irref_isk(refp) && !regsp_used(ir->prev)) { - lua_assert(ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT); - J->slot[snap_slot(sn)] = snap_pref(J, T, map, nent, seen, ir->op1); - } - } - /* Replay sunk instructions. */ - for (n = 0; pass23 && n < nent; n++) { - SnapEntry sn = map[n]; - IRRef refp = snap_ref(sn); - IRIns *ir = &T->ir[refp]; - if (regsp_reg(ir->r) == RID_SUNK) { - TRef op1, op2; - if (J->slot[snap_slot(sn)] != snap_slot(sn)) { /* De-dup allocs. */ - J->slot[snap_slot(sn)] = J->slot[J->slot[snap_slot(sn)]]; - continue; - } - op1 = ir->op1; - if (op1 >= T->nk) op1 = snap_pref(J, T, map, nent, seen, op1); - op2 = ir->op2; - if (op2 >= T->nk) op2 = snap_pref(J, T, map, nent, seen, op2); - if (LJ_HASFFI && ir->o == IR_CNEWI) { - if (LJ_32 && refp+1 < T->nins && (ir+1)->o == IR_HIOP) { - lj_needsplit(J); /* Emit joining HIOP. */ - op2 = emitir_raw(IRT(IR_HIOP, IRT_I64), op2, - snap_pref(J, T, map, nent, seen, (ir+1)->op2)); - } - J->slot[snap_slot(sn)] = emitir(ir->ot, op1, op2); - } else { - IRIns *irs; - TRef tr = emitir(ir->ot, op1, op2); - J->slot[snap_slot(sn)] = tr; - for (irs = ir+1; irs < irlast; irs++) - if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) { - IRIns *irr = &T->ir[irs->op1]; - TRef val, key = irr->op2, tmp = tr; - if (irr->o != IR_FREF) { - IRIns *irk = &T->ir[key]; - if (irr->o == IR_HREFK) - key = lj_ir_kslot(J, snap_replay_const(J, &T->ir[irk->op1]), - irk->op2); - else - key = snap_replay_const(J, irk); - if (irr->o == IR_HREFK || irr->o == IR_AREF) { - IRIns *irf = &T->ir[irr->op1]; - tmp = emitir(irf->ot, tmp, irf->op2); - } - } - tmp = emitir(irr->ot, tmp, key); - val = snap_pref(J, T, map, nent, seen, irs->op2); - if (val == 0) { - IRIns *irc = &T->ir[irs->op2]; - lua_assert(irc->o == IR_CONV && irc->op2 == IRCONV_NUM_INT); - val = snap_pref(J, T, map, nent, seen, irc->op1); - val = emitir(IRTN(IR_CONV), val, IRCONV_NUM_INT); - } else if ((LJ_SOFTFP || (LJ_32 && LJ_HASFFI)) && - irs+1 < irlast && (irs+1)->o == IR_HIOP) { - IRType t = IRT_I64; - if (LJ_SOFTFP && irt_type((irs+1)->t) == IRT_SOFTFP) - t = IRT_NUM; - lj_needsplit(J); - if (irref_isk(irs->op2) && irref_isk((irs+1)->op2)) { - uint64_t k = (uint32_t)T->ir[irs->op2].i + - ((uint64_t)T->ir[(irs+1)->op2].i << 32); - val = lj_ir_k64(J, t == IRT_I64 ? IR_KINT64 : IR_KNUM, - lj_ir_k64_find(J, k)); - } else { - val = emitir_raw(IRT(IR_HIOP, t), val, - snap_pref(J, T, map, nent, seen, (irs+1)->op2)); - } - tmp = emitir(IRT(irs->o, t), tmp, val); - continue; - } - tmp = emitir(irs->ot, tmp, val); - } else if (LJ_HASFFI && irs->o == IR_XBAR && ir->o == IR_CNEW) { - emitir(IRT(IR_XBAR, IRT_NIL), 0, 0); - } - } - } - } - } - J->base = J->slot + J->baseslot; - J->maxslot = snap->nslots - J->baseslot; - lj_snap_add(J); - if (pass23) /* Need explicit GC step _after_ initial snapshot. */ - emitir_raw(IRTG(IR_GCSTEP, IRT_NIL), 0, 0); -} - -/* -- Snapshot restore ---------------------------------------------------- */ - -static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex, - SnapNo snapno, BloomFilter rfilt, - IRIns *ir, TValue *o); - -/* Restore a value from the trace exit state. */ -static void snap_restoreval(jit_State *J, GCtrace *T, ExitState *ex, - SnapNo snapno, BloomFilter rfilt, - IRRef ref, TValue *o) -{ - IRIns *ir = &T->ir[ref]; - IRType1 t = ir->t; - RegSP rs = ir->prev; - if (irref_isk(ref)) { /* Restore constant slot. */ - lj_ir_kvalue(J->L, o, ir); - return; - } - if (LJ_UNLIKELY(bloomtest(rfilt, ref))) - rs = snap_renameref(T, snapno, ref, rs); - lua_assert(!LJ_GC64); /* TODO_GC64: handle 64 bit references. */ - if (ra_hasspill(regsp_spill(rs))) { /* Restore from spill slot. */ - int32_t *sps = &ex->spill[regsp_spill(rs)]; - if (irt_isinteger(t)) { - setintV(o, *sps); -#if !LJ_SOFTFP - } else if (irt_isnum(t)) { - o->u64 = *(uint64_t *)sps; -#endif - } else if (LJ_64 && irt_islightud(t)) { - /* 64 bit lightuserdata which may escape already has the tag bits. */ - o->u64 = *(uint64_t *)sps; - } else { - lua_assert(!irt_ispri(t)); /* PRI refs never have a spill slot. */ - setgcV(J->L, o, (GCobj *)(uintptr_t)*(GCSize *)sps, irt_toitype(t)); - } - } else { /* Restore from register. */ - Reg r = regsp_reg(rs); - if (ra_noreg(r)) { - lua_assert(ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT); - snap_restoreval(J, T, ex, snapno, rfilt, ir->op1, o); - if (LJ_DUALNUM) setnumV(o, (lua_Number)intV(o)); - return; - } else if (irt_isinteger(t)) { - setintV(o, (int32_t)ex->gpr[r-RID_MIN_GPR]); -#if !LJ_SOFTFP - } else if (irt_isnum(t)) { - setnumV(o, ex->fpr[r-RID_MIN_FPR]); -#endif - } else if (LJ_64 && irt_islightud(t)) { - /* 64 bit lightuserdata which may escape already has the tag bits. */ - o->u64 = ex->gpr[r-RID_MIN_GPR]; - } else if (irt_ispri(t)) { - setpriV(o, irt_toitype(t)); - } else { - setgcV(J->L, o, (GCobj *)ex->gpr[r-RID_MIN_GPR], irt_toitype(t)); - } - } -} - -#if LJ_HASFFI -/* Restore raw data from the trace exit state. */ -static void snap_restoredata(GCtrace *T, ExitState *ex, - SnapNo snapno, BloomFilter rfilt, - IRRef ref, void *dst, CTSize sz) -{ - IRIns *ir = &T->ir[ref]; - RegSP rs = ir->prev; - int32_t *src; - uint64_t tmp; - if (irref_isk(ref)) { - if (ir->o == IR_KNUM || ir->o == IR_KINT64) { - src = mref(ir->ptr, int32_t); - } else if (sz == 8) { - tmp = (uint64_t)(uint32_t)ir->i; - src = (int32_t *)&tmp; - } else { - src = &ir->i; - } - } else { - if (LJ_UNLIKELY(bloomtest(rfilt, ref))) - rs = snap_renameref(T, snapno, ref, rs); - if (ra_hasspill(regsp_spill(rs))) { - src = &ex->spill[regsp_spill(rs)]; - if (sz == 8 && !irt_is64(ir->t)) { - tmp = (uint64_t)(uint32_t)*src; - src = (int32_t *)&tmp; - } - } else { - Reg r = regsp_reg(rs); - if (ra_noreg(r)) { - /* Note: this assumes CNEWI is never used for SOFTFP split numbers. */ - lua_assert(sz == 8 && ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT); - snap_restoredata(T, ex, snapno, rfilt, ir->op1, dst, 4); - *(lua_Number *)dst = (lua_Number)*(int32_t *)dst; - return; - } - src = (int32_t *)&ex->gpr[r-RID_MIN_GPR]; -#if !LJ_SOFTFP - if (r >= RID_MAX_GPR) { - src = (int32_t *)&ex->fpr[r-RID_MIN_FPR]; -#if LJ_TARGET_PPC - if (sz == 4) { /* PPC FPRs are always doubles. */ - *(float *)dst = (float)*(double *)src; - return; - } -#else - if (LJ_BE && sz == 4) src++; -#endif - } -#endif - } - } - lua_assert(sz == 1 || sz == 2 || sz == 4 || sz == 8); - if (sz == 4) *(int32_t *)dst = *src; - else if (sz == 8) *(int64_t *)dst = *(int64_t *)src; - else if (sz == 1) *(int8_t *)dst = (int8_t)*src; - else *(int16_t *)dst = (int16_t)*src; -} -#endif - -/* Unsink allocation from the trace exit state. Unsink sunk stores. */ -static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex, - SnapNo snapno, BloomFilter rfilt, - IRIns *ir, TValue *o) -{ - lua_assert(ir->o == IR_TNEW || ir->o == IR_TDUP || - ir->o == IR_CNEW || ir->o == IR_CNEWI); -#if LJ_HASFFI - if (ir->o == IR_CNEW || ir->o == IR_CNEWI) { - CTState *cts = ctype_cts(J->L); - CTypeID id = (CTypeID)T->ir[ir->op1].i; - CTSize sz = lj_ctype_size(cts, id); - GCcdata *cd = lj_cdata_new(cts, id, sz); - setcdataV(J->L, o, cd); - if (ir->o == IR_CNEWI) { - uint8_t *p = (uint8_t *)cdataptr(cd); - lua_assert(sz == 4 || sz == 8); - if (LJ_32 && sz == 8 && ir+1 < T->ir + T->nins && (ir+1)->o == IR_HIOP) { - snap_restoredata(T, ex, snapno, rfilt, (ir+1)->op2, LJ_LE?p+4:p, 4); - if (LJ_BE) p += 4; - sz = 4; - } - snap_restoredata(T, ex, snapno, rfilt, ir->op2, p, sz); - } else { - IRIns *irs, *irlast = &T->ir[T->snap[snapno].ref]; - for (irs = ir+1; irs < irlast; irs++) - if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) { - IRIns *iro = &T->ir[T->ir[irs->op1].op2]; - uint8_t *p = (uint8_t *)cd; - CTSize szs; - lua_assert(irs->o == IR_XSTORE && T->ir[irs->op1].o == IR_ADD); - lua_assert(iro->o == IR_KINT || iro->o == IR_KINT64); - if (irt_is64(irs->t)) szs = 8; - else if (irt_isi8(irs->t) || irt_isu8(irs->t)) szs = 1; - else if (irt_isi16(irs->t) || irt_isu16(irs->t)) szs = 2; - else szs = 4; - if (LJ_64 && iro->o == IR_KINT64) - p += (int64_t)ir_k64(iro)->u64; - else - p += iro->i; - lua_assert(p >= (uint8_t *)cdataptr(cd) && - p + szs <= (uint8_t *)cdataptr(cd) + sz); - if (LJ_32 && irs+1 < T->ir + T->nins && (irs+1)->o == IR_HIOP) { - lua_assert(szs == 4); - snap_restoredata(T, ex, snapno, rfilt, (irs+1)->op2, LJ_LE?p+4:p,4); - if (LJ_BE) p += 4; - } - snap_restoredata(T, ex, snapno, rfilt, irs->op2, p, szs); - } - } - } else -#endif - { - IRIns *irs, *irlast; - GCtab *t = ir->o == IR_TNEW ? lj_tab_new(J->L, ir->op1, ir->op2) : - lj_tab_dup(J->L, ir_ktab(&T->ir[ir->op1])); - settabV(J->L, o, t); - irlast = &T->ir[T->snap[snapno].ref]; - for (irs = ir+1; irs < irlast; irs++) - if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) { - IRIns *irk = &T->ir[irs->op1]; - TValue tmp, *val; - lua_assert(irs->o == IR_ASTORE || irs->o == IR_HSTORE || - irs->o == IR_FSTORE); - if (irk->o == IR_FREF) { - lua_assert(irk->op2 == IRFL_TAB_META); - snap_restoreval(J, T, ex, snapno, rfilt, irs->op2, &tmp); - /* NOBARRIER: The table is new (marked white). */ - setgcref(t->metatable, obj2gco(tabV(&tmp))); - } else { - irk = &T->ir[irk->op2]; - if (irk->o == IR_KSLOT) irk = &T->ir[irk->op1]; - lj_ir_kvalue(J->L, &tmp, irk); - val = lj_tab_set(J->L, t, &tmp); - /* NOBARRIER: The table is new (marked white). */ - snap_restoreval(J, T, ex, snapno, rfilt, irs->op2, val); - if (LJ_SOFTFP && irs+1 < T->ir + T->nins && (irs+1)->o == IR_HIOP) { - snap_restoreval(J, T, ex, snapno, rfilt, (irs+1)->op2, &tmp); - val->u32.hi = tmp.u32.lo; - } - } - } - } -} - -/* Restore interpreter state from exit state with the help of a snapshot. */ -const BCIns *lj_snap_restore(jit_State *J, void *exptr) -{ - ExitState *ex = (ExitState *)exptr; - SnapNo snapno = J->exitno; /* For now, snapno == exitno. */ - GCtrace *T = traceref(J, J->parent); - SnapShot *snap = &T->snap[snapno]; - MSize n, nent = snap->nent; - SnapEntry *map = &T->snapmap[snap->mapofs]; - SnapEntry *flinks = &T->snapmap[snap_nextofs(T, snap)-1]; - ptrdiff_t ftsz0; - TValue *frame; - BloomFilter rfilt = snap_renamefilter(T, snapno); - const BCIns *pc = snap_pc(map[nent]); - lua_State *L = J->L; - - /* Set interpreter PC to the next PC to get correct error messages. */ - setcframe_pc(cframe_raw(L->cframe), pc+1); - - /* Make sure the stack is big enough for the slots from the snapshot. */ - if (LJ_UNLIKELY(L->base + snap->topslot >= tvref(L->maxstack))) { - L->top = curr_topL(L); - lj_state_growstack(L, snap->topslot - curr_proto(L)->framesize); - } - - /* Fill stack slots with data from the registers and spill slots. */ - frame = L->base-1; - ftsz0 = frame_ftsz(frame); /* Preserve link to previous frame in slot #0. */ - for (n = 0; n < nent; n++) { - SnapEntry sn = map[n]; - if (!(sn & SNAP_NORESTORE)) { - TValue *o = &frame[snap_slot(sn)]; - IRRef ref = snap_ref(sn); - IRIns *ir = &T->ir[ref]; - if (ir->r == RID_SUNK) { - MSize j; - for (j = 0; j < n; j++) - if (snap_ref(map[j]) == ref) { /* De-duplicate sunk allocations. */ - copyTV(L, o, &frame[snap_slot(map[j])]); - goto dupslot; - } - snap_unsink(J, T, ex, snapno, rfilt, ir, o); - dupslot: - continue; - } - snap_restoreval(J, T, ex, snapno, rfilt, ref, o); - if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM) && tvisint(o)) { - TValue tmp; - snap_restoreval(J, T, ex, snapno, rfilt, ref+1, &tmp); - o->u32.hi = tmp.u32.lo; - } else if ((sn & (SNAP_CONT|SNAP_FRAME))) { - lua_assert(!LJ_FR2); /* TODO_FR2: store 64 bit PCs. */ - /* Overwrite tag with frame link. */ - setframe_ftsz(o, snap_slot(sn) != 0 ? (int32_t)*flinks-- : ftsz0); - L->base = o+1; - } - } - } - lua_assert(map + nent == flinks); - - /* Compute current stack top. */ - switch (bc_op(*pc)) { - default: - if (bc_op(*pc) < BC_FUNCF) { - L->top = curr_topL(L); - break; - } - /* fallthrough */ - case BC_CALLM: case BC_CALLMT: case BC_RETM: case BC_TSETM: - L->top = frame + snap->nslots; - break; - } - return pc; -} - -#undef IR -#undef emitir_raw -#undef emitir - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_snap.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_snap.h deleted file mode 100644 index 9a125be7fb9..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_snap.h +++ /dev/null @@ -1,34 +0,0 @@ -/* -** Snapshot handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_SNAP_H -#define _LJ_SNAP_H - -#include "lj_obj.h" -#include "lj_jit.h" - -#if LJ_HASJIT -LJ_FUNC void lj_snap_add(jit_State *J); -LJ_FUNC void lj_snap_purge(jit_State *J); -LJ_FUNC void lj_snap_shrink(jit_State *J); -LJ_FUNC IRIns *lj_snap_regspmap(GCtrace *T, SnapNo snapno, IRIns *ir); -LJ_FUNC void lj_snap_replay(jit_State *J, GCtrace *T); -LJ_FUNC const BCIns *lj_snap_restore(jit_State *J, void *exptr); -LJ_FUNC void lj_snap_grow_buf_(jit_State *J, MSize need); -LJ_FUNC void lj_snap_grow_map_(jit_State *J, MSize need); - -static LJ_AINLINE void lj_snap_grow_buf(jit_State *J, MSize need) -{ - if (LJ_UNLIKELY(need > J->sizesnap)) lj_snap_grow_buf_(J, need); -} - -static LJ_AINLINE void lj_snap_grow_map(jit_State *J, MSize need) -{ - if (LJ_UNLIKELY(need > J->sizesnapmap)) lj_snap_grow_map_(J, need); -} - -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_state.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_state.c deleted file mode 100644 index 84b4d113a2b..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_state.c +++ /dev/null @@ -1,300 +0,0 @@ -/* -** State and stack handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -** -** Portions taken verbatim or adapted from the Lua interpreter. -** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h -*/ - -#define lj_state_c -#define LUA_CORE - -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_buf.h" -#include "lj_str.h" -#include "lj_tab.h" -#include "lj_func.h" -#include "lj_meta.h" -#include "lj_state.h" -#include "lj_frame.h" -#if LJ_HASFFI -#include "lj_ctype.h" -#endif -#include "lj_trace.h" -#include "lj_dispatch.h" -#include "lj_vm.h" -#include "lj_lex.h" -#include "lj_alloc.h" -#include "luajit.h" - -/* -- Stack handling ------------------------------------------------------ */ - -/* Stack sizes. */ -#define LJ_STACK_MIN LUA_MINSTACK /* Min. stack size. */ -#define LJ_STACK_MAX LUAI_MAXSTACK /* Max. stack size. */ -#define LJ_STACK_START (2*LJ_STACK_MIN) /* Starting stack size. */ -#define LJ_STACK_MAXEX (LJ_STACK_MAX + 1 + LJ_STACK_EXTRA) - -/* Explanation of LJ_STACK_EXTRA: -** -** Calls to metamethods store their arguments beyond the current top -** without checking for the stack limit. This avoids stack resizes which -** would invalidate passed TValue pointers. The stack check is performed -** later by the function header. This can safely resize the stack or raise -** an error. Thus we need some extra slots beyond the current stack limit. -** -** Most metamethods need 4 slots above top (cont, mobj, arg1, arg2) plus -** one extra slot if mobj is not a function. Only lj_meta_tset needs 5 -** slots above top, but then mobj is always a function. So we can get by -** with 5 extra slots. -** LJ_FR2: We need 2 more slots for the frame PC and the continuation PC. -*/ - -/* Resize stack slots and adjust pointers in state. */ -static void resizestack(lua_State *L, MSize n) -{ - TValue *st, *oldst = tvref(L->stack); - ptrdiff_t delta; - MSize oldsize = L->stacksize; - MSize realsize = n + 1 + LJ_STACK_EXTRA; - GCobj *up; - lua_assert((MSize)(tvref(L->maxstack)-oldst)==L->stacksize-LJ_STACK_EXTRA-1); - st = (TValue *)lj_mem_realloc(L, tvref(L->stack), - (MSize)(oldsize*sizeof(TValue)), - (MSize)(realsize*sizeof(TValue))); - setmref(L->stack, st); - delta = (char *)st - (char *)oldst; - setmref(L->maxstack, st + n); - while (oldsize < realsize) /* Clear new slots. */ - setnilV(st + oldsize++); - L->stacksize = realsize; - if ((size_t)(mref(G(L)->jit_base, char) - (char *)oldst) < oldsize) - setmref(G(L)->jit_base, mref(G(L)->jit_base, char) + delta); - L->base = (TValue *)((char *)L->base + delta); - L->top = (TValue *)((char *)L->top + delta); - for (up = gcref(L->openupval); up != NULL; up = gcnext(up)) - setmref(gco2uv(up)->v, (TValue *)((char *)uvval(gco2uv(up)) + delta)); -} - -/* Relimit stack after error, in case the limit was overdrawn. */ -void lj_state_relimitstack(lua_State *L) -{ - if (L->stacksize > LJ_STACK_MAXEX && L->top-tvref(L->stack) < LJ_STACK_MAX-1) - resizestack(L, LJ_STACK_MAX); -} - -/* Try to shrink the stack (called from GC). */ -void lj_state_shrinkstack(lua_State *L, MSize used) -{ - if (L->stacksize > LJ_STACK_MAXEX) - return; /* Avoid stack shrinking while handling stack overflow. */ - if (4*used < L->stacksize && - 2*(LJ_STACK_START+LJ_STACK_EXTRA) < L->stacksize && - /* Don't shrink stack of live trace. */ - (tvref(G(L)->jit_base) == NULL || obj2gco(L) != gcref(G(L)->cur_L))) - resizestack(L, L->stacksize >> 1); -} - -/* Try to grow stack. */ -void LJ_FASTCALL lj_state_growstack(lua_State *L, MSize need) -{ - MSize n; - if (L->stacksize > LJ_STACK_MAXEX) /* Overflow while handling overflow? */ - lj_err_throw(L, LUA_ERRERR); - n = L->stacksize + need; - if (n > LJ_STACK_MAX) { - n += 2*LUA_MINSTACK; - } else if (n < 2*L->stacksize) { - n = 2*L->stacksize; - if (n >= LJ_STACK_MAX) - n = LJ_STACK_MAX; - } - resizestack(L, n); - if (L->stacksize > LJ_STACK_MAXEX) - lj_err_msg(L, LJ_ERR_STKOV); -} - -void LJ_FASTCALL lj_state_growstack1(lua_State *L) -{ - lj_state_growstack(L, 1); -} - -/* Allocate basic stack for new state. */ -static void stack_init(lua_State *L1, lua_State *L) -{ - TValue *stend, *st = lj_mem_newvec(L, LJ_STACK_START+LJ_STACK_EXTRA, TValue); - setmref(L1->stack, st); - L1->stacksize = LJ_STACK_START + LJ_STACK_EXTRA; - stend = st + L1->stacksize; - setmref(L1->maxstack, stend - LJ_STACK_EXTRA - 1); - setthreadV(L1, st++, L1); /* Needed for curr_funcisL() on empty stack. */ - if (LJ_FR2) setnilV(st++); - L1->base = L1->top = st; - while (st < stend) /* Clear new slots. */ - setnilV(st++); -} - -/* -- State handling ------------------------------------------------------ */ - -/* Open parts that may cause memory-allocation errors. */ -static TValue *cpluaopen(lua_State *L, lua_CFunction dummy, void *ud) -{ - global_State *g = G(L); - UNUSED(dummy); - UNUSED(ud); - stack_init(L, L); - /* NOBARRIER: State initialization, all objects are white. */ - setgcref(L->env, obj2gco(lj_tab_new(L, 0, LJ_MIN_GLOBAL))); - settabV(L, registry(L), lj_tab_new(L, 0, LJ_MIN_REGISTRY)); - lj_str_resize(L, LJ_MIN_STRTAB-1); - lj_meta_init(L); - lj_lex_init(L); - fixstring(lj_err_str(L, LJ_ERR_ERRMEM)); /* Preallocate memory error msg. */ - g->gc.threshold = 4*g->gc.total; - lj_trace_initstate(g); - return NULL; -} - -static void close_state(lua_State *L) -{ - global_State *g = G(L); - lj_func_closeuv(L, tvref(L->stack)); - lj_gc_freeall(g); - lua_assert(gcref(g->gc.root) == obj2gco(L)); - lua_assert(g->strnum == 0); - lj_trace_freestate(g); -#if LJ_HASFFI - lj_ctype_freestate(g); -#endif - lj_mem_freevec(g, g->strhash, g->strmask+1, GCRef); - lj_buf_free(g, &g->tmpbuf); - lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue); - lua_assert(g->gc.total == sizeof(GG_State)); -#ifndef LUAJIT_USE_SYSMALLOC - if (g->allocf == lj_alloc_f) - lj_alloc_destroy(g->allocd); - else -#endif - g->allocf(g->allocd, G2GG(g), sizeof(GG_State), 0); -} - -#if LJ_64 && !(defined(LUAJIT_USE_VALGRIND) && defined(LUAJIT_USE_SYSMALLOC)) -lua_State *lj_state_newstate(lua_Alloc f, void *ud) -#else -LUA_API lua_State *lua_newstate(lua_Alloc f, void *ud) -#endif -{ - GG_State *GG = (GG_State *)f(ud, NULL, 0, sizeof(GG_State)); - lua_State *L = &GG->L; - global_State *g = &GG->g; - if (GG == NULL || !checkptrGC(GG)) return NULL; - memset(GG, 0, sizeof(GG_State)); - L->gct = ~LJ_TTHREAD; - L->marked = LJ_GC_WHITE0 | LJ_GC_FIXED | LJ_GC_SFIXED; /* Prevent free. */ - L->dummy_ffid = FF_C; - setmref(L->glref, g); - g->gc.currentwhite = LJ_GC_WHITE0 | LJ_GC_FIXED; - g->strempty.marked = LJ_GC_WHITE0; - g->strempty.gct = ~LJ_TSTR; - g->allocf = f; - g->allocd = ud; - setgcref(g->mainthref, obj2gco(L)); - setgcref(g->uvhead.prev, obj2gco(&g->uvhead)); - setgcref(g->uvhead.next, obj2gco(&g->uvhead)); - g->strmask = ~(MSize)0; - setnilV(registry(L)); - setnilV(&g->nilnode.val); - setnilV(&g->nilnode.key); -#if !LJ_GC64 - setmref(g->nilnode.freetop, &g->nilnode); -#endif - lj_buf_init(NULL, &g->tmpbuf); - g->gc.state = GCSpause; - setgcref(g->gc.root, obj2gco(L)); - setmref(g->gc.sweep, &g->gc.root); - g->gc.total = sizeof(GG_State); - g->gc.pause = LUAI_GCPAUSE; - g->gc.stepmul = LUAI_GCMUL; - lj_dispatch_init((GG_State *)L); - L->status = LUA_ERRERR+1; /* Avoid touching the stack upon memory error. */ - if (lj_vm_cpcall(L, NULL, NULL, cpluaopen) != 0) { - /* Memory allocation error: free partial state. */ - close_state(L); - return NULL; - } - L->status = 0; - return L; -} - -static TValue *cpfinalize(lua_State *L, lua_CFunction dummy, void *ud) -{ - UNUSED(dummy); - UNUSED(ud); - lj_gc_finalize_cdata(L); - lj_gc_finalize_udata(L); - /* Frame pop omitted. */ - return NULL; -} - -LUA_API void lua_close(lua_State *L) -{ - global_State *g = G(L); - int i; - L = mainthread(g); /* Only the main thread can be closed. */ -#if LJ_HASPROFILE - luaJIT_profile_stop(L); -#endif - setgcrefnull(g->cur_L); - lj_func_closeuv(L, tvref(L->stack)); - lj_gc_separateudata(g, 1); /* Separate udata which have GC metamethods. */ -#if LJ_HASJIT - G2J(g)->flags &= ~JIT_F_ON; - G2J(g)->state = LJ_TRACE_IDLE; - lj_dispatch_update(g); -#endif - for (i = 0;;) { - hook_enter(g); - L->status = 0; - L->base = L->top = tvref(L->stack) + 1 + LJ_FR2; - L->cframe = NULL; - if (lj_vm_cpcall(L, NULL, NULL, cpfinalize) == 0) { - if (++i >= 10) break; - lj_gc_separateudata(g, 1); /* Separate udata again. */ - if (gcref(g->gc.mmudata) == NULL) /* Until nothing is left to do. */ - break; - } - } - close_state(L); -} - -lua_State *lj_state_new(lua_State *L) -{ - lua_State *L1 = lj_mem_newobj(L, lua_State); - L1->gct = ~LJ_TTHREAD; - L1->dummy_ffid = FF_C; - L1->status = 0; - L1->stacksize = 0; - setmref(L1->stack, NULL); - L1->cframe = NULL; - /* NOBARRIER: The lua_State is new (marked white). */ - setgcrefnull(L1->openupval); - setmrefr(L1->glref, L->glref); - setgcrefr(L1->env, L->env); - stack_init(L1, L); /* init stack */ - lua_assert(iswhite(obj2gco(L1))); - return L1; -} - -void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L) -{ - lua_assert(L != mainthread(g)); - if (obj2gco(L) == gcref(g->cur_L)) - setgcrefnull(g->cur_L); - lj_func_closeuv(L, tvref(L->stack)); - lua_assert(gcref(L->openupval) == NULL); - lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue); - lj_mem_freet(g, L); -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_state.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_state.h deleted file mode 100644 index 687889a2613..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_state.h +++ /dev/null @@ -1,35 +0,0 @@ -/* -** State and stack handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_STATE_H -#define _LJ_STATE_H - -#include "lj_obj.h" - -#define incr_top(L) \ - (++L->top >= tvref(L->maxstack) && (lj_state_growstack1(L), 0)) - -#define savestack(L, p) ((char *)(p) - mref(L->stack, char)) -#define restorestack(L, n) ((TValue *)(mref(L->stack, char) + (n))) - -LJ_FUNC void lj_state_relimitstack(lua_State *L); -LJ_FUNC void lj_state_shrinkstack(lua_State *L, MSize used); -LJ_FUNCA void LJ_FASTCALL lj_state_growstack(lua_State *L, MSize need); -LJ_FUNC void LJ_FASTCALL lj_state_growstack1(lua_State *L); - -static LJ_AINLINE void lj_state_checkstack(lua_State *L, MSize need) -{ - if ((mref(L->maxstack, char) - (char *)L->top) <= - (ptrdiff_t)need*(ptrdiff_t)sizeof(TValue)) - lj_state_growstack(L, need); -} - -LJ_FUNC lua_State *lj_state_new(lua_State *L); -LJ_FUNC void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L); -#if LJ_64 -LJ_FUNC lua_State *lj_state_newstate(lua_Alloc f, void *ud); -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_str.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_str.c deleted file mode 100644 index 3bc8079f37c..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_str.c +++ /dev/null @@ -1,224 +0,0 @@ -/* -** String handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_str_c -#define LUA_CORE - -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_str.h" -#include "lj_char.h" - -/* -- String helpers ------------------------------------------------------ */ - -/* Ordered compare of strings. Assumes string data is 4-byte aligned. */ -int32_t LJ_FASTCALL lj_str_cmp(GCstr *a, GCstr *b) -{ - MSize i, n = a->len > b->len ? b->len : a->len; - for (i = 0; i < n; i += 4) { - /* Note: innocuous access up to end of string + 3. */ - uint32_t va = *(const uint32_t *)(strdata(a)+i); - uint32_t vb = *(const uint32_t *)(strdata(b)+i); - if (va != vb) { -#if LJ_LE - va = lj_bswap(va); vb = lj_bswap(vb); -#endif - i -= n; - if ((int32_t)i >= -3) { - va >>= 32+(i<<3); vb >>= 32+(i<<3); - if (va == vb) break; - } - return va < vb ? -1 : 1; - } - } - return (int32_t)(a->len - b->len); -} - -/* Fast string data comparison. Caveat: unaligned access to 1st string! */ -static LJ_AINLINE int str_fastcmp(const char *a, const char *b, MSize len) -{ - MSize i = 0; - lua_assert(len > 0); - lua_assert((((uintptr_t)a+len-1) & (LJ_PAGESIZE-1)) <= LJ_PAGESIZE-4); - do { /* Note: innocuous access up to end of string + 3. */ - uint32_t v = lj_getu32(a+i) ^ *(const uint32_t *)(b+i); - if (v) { - i -= len; -#if LJ_LE - return (int32_t)i >= -3 ? (v << (32+(i<<3))) : 1; -#else - return (int32_t)i >= -3 ? (v >> (32+(i<<3))) : 1; -#endif - } - i += 4; - } while (i < len); - return 0; -} - -/* Find fixed string p inside string s. */ -const char *lj_str_find(const char *s, const char *p, MSize slen, MSize plen) -{ - if (plen <= slen) { - if (plen == 0) { - return s; - } else { - int c = *(const uint8_t *)p++; - plen--; slen -= plen; - while (slen) { - const char *q = (const char *)memchr(s, c, slen); - if (!q) break; - if (memcmp(q+1, p, plen) == 0) return q; - q++; slen -= (MSize)(q-s); s = q; - } - } - } - return NULL; -} - -/* Check whether a string has a pattern matching character. */ -int lj_str_haspattern(GCstr *s) -{ - const char *p = strdata(s), *q = p + s->len; - while (p < q) { - int c = *(const uint8_t *)p++; - if (lj_char_ispunct(c) && strchr("^$*+?.([%-", c)) - return 1; /* Found a pattern matching char. */ - } - return 0; /* No pattern matching chars found. */ -} - -/* -- String interning ---------------------------------------------------- */ - -/* Resize the string hash table (grow and shrink). */ -void lj_str_resize(lua_State *L, MSize newmask) -{ - global_State *g = G(L); - GCRef *newhash; - MSize i; - if (g->gc.state == GCSsweepstring || newmask >= LJ_MAX_STRTAB-1) - return; /* No resizing during GC traversal or if already too big. */ - newhash = lj_mem_newvec(L, newmask+1, GCRef); - memset(newhash, 0, (newmask+1)*sizeof(GCRef)); - for (i = g->strmask; i != ~(MSize)0; i--) { /* Rehash old table. */ - GCobj *p = gcref(g->strhash[i]); - while (p) { /* Follow each hash chain and reinsert all strings. */ - MSize h = gco2str(p)->hash & newmask; - GCobj *next = gcnext(p); - /* NOBARRIER: The string table is a GC root. */ - setgcrefr(p->gch.nextgc, newhash[h]); - setgcref(newhash[h], p); - p = next; - } - } - lj_mem_freevec(g, g->strhash, g->strmask+1, GCRef); - g->strmask = newmask; - g->strhash = newhash; -} - -/* -** Lua will use at most ~(2^LUAI_HASHLIMIT) bytes from a string to -** compute its hash -*/ -#if !defined(LUAI_HASHLIMIT) -#define LUAI_HASHLIMIT 5 -#endif - -#define cast(t, exp) ((t)(exp)) -int luajittex_choose_hash_function = 0 ; -/* Intern a string and return string object. */ -GCstr *lj_str_new(lua_State *L, const char *str, size_t lenx) -{ - global_State *g; - GCstr *s; - GCobj *o; - MSize len = (MSize)lenx; - MSize a, b, h = len; - size_t step ; - size_t l1 ; - if (lenx >= LJ_MAX_STR) - lj_err_msg(L, LJ_ERR_STROV); - g = G(L); - - if (len==0) - return &g->strempty; - if (luajittex_choose_hash_function==0) { - /* Lua 5.1.5 hash function */ - /* for 5.2 max methods we also need to patch the vm eq */ - step = (len>>LUAI_HASHLIMIT)+1; /* if string is too long, don't hash all its chars */ - for (l1=len; l1>=step; l1-=step) /* compute hash */ - h = h ^ ((h<<5)+(h>>2)+cast(unsigned char, str[l1-1])); - } else { - /* LuaJIT 2.0.2 hash function */ - /* Compute string hash. Constants taken from lookup3 hash by Bob Jenkins. */ - if (len >= 4) { /* Caveat: unaligned access! */ - a = lj_getu32(str); - h ^= lj_getu32(str+len-4); - b = lj_getu32(str+(len>>1)-2); - h ^= b; h -= lj_rol(b, 14); - b += lj_getu32(str+(len>>2)-1); - } else if (len > 0) { - a = *(const uint8_t *)str; - h ^= *(const uint8_t *)(str+len-1); - b = *(const uint8_t *)(str+(len>>1)); - h ^= b; h -= lj_rol(b, 14); - } else { - /* Already done, kept for reference */ - return &g->strempty; - } - a ^= h; a -= lj_rol(h, 11); - b ^= a; b -= lj_rol(a, 25); - h ^= b; h -= lj_rol(b, 16); - } - - - /* Check if the string has already been interned. */ - o = gcref(g->strhash[h & g->strmask]); - if (LJ_LIKELY((((uintptr_t)str+len-1) & (LJ_PAGESIZE-1)) <= LJ_PAGESIZE-4)) { - while (o != NULL) { - GCstr *sx = gco2str(o); - if (sx->len == len && str_fastcmp(str, strdata(sx), len) == 0) { - /* Resurrect if dead. Can only happen with fixstring() (keywords). */ - if (isdead(g, o)) flipwhite(o); - return sx; /* Return existing string. */ - } - o = gcnext(o); - } - } else { /* Slow path: end of string is too close to a page boundary. */ - while (o != NULL) { - GCstr *sx = gco2str(o); - if (sx->len == len && memcmp(str, strdata(sx), len) == 0) { - /* Resurrect if dead. Can only happen with fixstring() (keywords). */ - if (isdead(g, o)) flipwhite(o); - return sx; /* Return existing string. */ - } - o = gcnext(o); - } - } - /* Nope, create a new string. */ - s = lj_mem_newt(L, sizeof(GCstr)+len+1, GCstr); - newwhite(g, s); - s->gct = ~LJ_TSTR; - s->len = len; - s->hash = h; - s->reserved = 0; - memcpy(strdatawr(s), str, len); - strdatawr(s)[len] = '\0'; /* Zero-terminate string. */ - /* Add it to string hash table. */ - h &= g->strmask; - s->nextgc = g->strhash[h]; - /* NOBARRIER: The string table is a GC root. */ - setgcref(g->strhash[h], obj2gco(s)); - if (g->strnum++ > g->strmask) /* Allow a 100% load factor. */ - lj_str_resize(L, (g->strmask<<1)+1); /* Grow string table. */ - return s; /* Return newly interned string. */ -} - -void LJ_FASTCALL lj_str_free(global_State *g, GCstr *s) -{ - g->strnum--; - lj_mem_free(g, s, sizestring(s)); -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_str.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_str.h deleted file mode 100644 index d8465de663b..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_str.h +++ /dev/null @@ -1,27 +0,0 @@ -/* -** String handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_STR_H -#define _LJ_STR_H - -#include - -#include "lj_obj.h" - -/* String helpers. */ -LJ_FUNC int32_t LJ_FASTCALL lj_str_cmp(GCstr *a, GCstr *b); -LJ_FUNC const char *lj_str_find(const char *s, const char *f, - MSize slen, MSize flen); -LJ_FUNC int lj_str_haspattern(GCstr *s); - -/* String interning. */ -LJ_FUNC void lj_str_resize(lua_State *L, MSize newmask); -LJ_FUNCA GCstr *lj_str_new(lua_State *L, const char *str, size_t len); -LJ_FUNC void LJ_FASTCALL lj_str_free(global_State *g, GCstr *s); - -#define lj_str_newz(L, s) (lj_str_new(L, s, strlen(s))) -#define lj_str_newlit(L, s) (lj_str_new(L, "" s, sizeof(s)-1)) - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_strfmt.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_strfmt.c deleted file mode 100644 index d54e796ae2a..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_strfmt.c +++ /dev/null @@ -1,554 +0,0 @@ -/* -** String formatting. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#include - -#define lj_strfmt_c -#define LUA_CORE - -#include "lj_obj.h" -#include "lj_buf.h" -#include "lj_str.h" -#include "lj_state.h" -#include "lj_char.h" -#include "lj_strfmt.h" - -/* -- Format parser ------------------------------------------------------- */ - -static const uint8_t strfmt_map[('x'-'A')+1] = { - STRFMT_A,0,0,0,STRFMT_E,0,STRFMT_G,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,STRFMT_X,0,0, - 0,0,0,0,0,0, - STRFMT_A,0,STRFMT_C,STRFMT_D,STRFMT_E,STRFMT_F,STRFMT_G,0,STRFMT_I,0,0,0,0, - 0,STRFMT_O,STRFMT_P,STRFMT_Q,0,STRFMT_S,0,STRFMT_U,0,0,STRFMT_X -}; - -SFormat LJ_FASTCALL lj_strfmt_parse(FormatState *fs) -{ - const uint8_t *p = fs->p, *e = fs->e; - fs->str = (const char *)p; - for (; p < e; p++) { - if (*p == '%') { /* Escape char? */ - if (p[1] == '%') { /* '%%'? */ - fs->p = ++p+1; - goto retlit; - } else { - SFormat sf = 0; - uint32_t c; - if (p != (const uint8_t *)fs->str) - break; - for (p++; (uint32_t)*p - ' ' <= (uint32_t)('0' - ' '); p++) { - /* Parse flags. */ - if (*p == '-') sf |= STRFMT_F_LEFT; - else if (*p == '+') sf |= STRFMT_F_PLUS; - else if (*p == '0') sf |= STRFMT_F_ZERO; - else if (*p == ' ') sf |= STRFMT_F_SPACE; - else if (*p == '#') sf |= STRFMT_F_ALT; - else break; - } - if ((uint32_t)*p - '0' < 10) { /* Parse width. */ - uint32_t width = (uint32_t)*p++ - '0'; - if ((uint32_t)*p - '0' < 10) - width = (uint32_t)*p++ - '0' + width*10; - sf |= (width << STRFMT_SH_WIDTH); - } - if (*p == '.') { /* Parse precision. */ - uint32_t prec = 0; - p++; - if ((uint32_t)*p - '0' < 10) { - prec = (uint32_t)*p++ - '0'; - if ((uint32_t)*p - '0' < 10) - prec = (uint32_t)*p++ - '0' + prec*10; - } - sf |= ((prec+1) << STRFMT_SH_PREC); - } - /* Parse conversion. */ - c = (uint32_t)*p - 'A'; - if (LJ_LIKELY(c <= (uint32_t)('x' - 'A'))) { - uint32_t sx = strfmt_map[c]; - if (sx) { - fs->p = p+1; - return (sf | sx | ((c & 0x20) ? 0 : STRFMT_F_UPPER)); - } - } - /* Return error location. */ - if (*p >= 32) p++; - fs->len = (MSize)(p - (const uint8_t *)fs->str); - fs->p = fs->e; - return STRFMT_ERR; - } - } - } - fs->p = p; -retlit: - fs->len = (MSize)(p - (const uint8_t *)fs->str); - return fs->len ? STRFMT_LIT : STRFMT_EOF; -} - -/* -- Raw conversions ----------------------------------------------------- */ - -/* Write number to bufer. */ -char * LJ_FASTCALL lj_strfmt_wnum(char *p, cTValue *o) -{ - if (LJ_LIKELY((o->u32.hi << 1) < 0xffe00000)) { /* Finite? */ -#if __BIONIC__ - if (tvismzero(o)) { *p++ = '-'; *p++ = '0'; return p; } -#endif - return p + lua_number2str(p, o->n); - } else if (((o->u32.hi & 0x000fffff) | o->u32.lo) != 0) { - *p++ = 'n'; *p++ = 'a'; *p++ = 'n'; - } else if ((o->u32.hi & 0x80000000) == 0) { - *p++ = 'i'; *p++ = 'n'; *p++ = 'f'; - } else { - *p++ = '-'; *p++ = 'i'; *p++ = 'n'; *p++ = 'f'; - } - return p; -} - -#define WINT_R(x, sh, sc) \ - { uint32_t d = (x*(((1<>sh; x -= d*sc; *p++ = (char)('0'+d); } - -/* Write integer to buffer. */ -char * LJ_FASTCALL lj_strfmt_wint(char *p, int32_t k) -{ - uint32_t u = (uint32_t)k; - if (k < 0) { u = (uint32_t)-k; *p++ = '-'; } - if (u < 10000) { - if (u < 10) goto dig1; if (u < 100) goto dig2; if (u < 1000) goto dig3; - } else { - uint32_t v = u / 10000; u -= v * 10000; - if (v < 10000) { - if (v < 10) goto dig5; if (v < 100) goto dig6; if (v < 1000) goto dig7; - } else { - uint32_t w = v / 10000; v -= w * 10000; - if (w >= 10) WINT_R(w, 10, 10) - *p++ = (char)('0'+w); - } - WINT_R(v, 23, 1000) - dig7: WINT_R(v, 12, 100) - dig6: WINT_R(v, 10, 10) - dig5: *p++ = (char)('0'+v); - } - WINT_R(u, 23, 1000) - dig3: WINT_R(u, 12, 100) - dig2: WINT_R(u, 10, 10) - dig1: *p++ = (char)('0'+u); - return p; -} -#undef WINT_R - -/* Write pointer to buffer. */ -char * LJ_FASTCALL lj_strfmt_wptr(char *p, const void *v) -{ - ptrdiff_t x = (ptrdiff_t)v; - MSize i, n = STRFMT_MAXBUF_PTR; - if (x == 0) { - *p++ = 'N'; *p++ = 'U'; *p++ = 'L'; *p++ = 'L'; - return p; - } -#if LJ_64 - /* Shorten output for 64 bit pointers. */ - n = 2+2*4+((x >> 32) ? 2+2*(lj_fls((uint32_t)(x >> 32))>>3) : 0); -#endif - p[0] = '0'; - p[1] = 'x'; - for (i = n-1; i >= 2; i--, x >>= 4) - p[i] = "0123456789abcdef"[(x & 15)]; - return p+n; -} - -/* Write ULEB128 to buffer. */ -char * LJ_FASTCALL lj_strfmt_wuleb128(char *p, uint32_t v) -{ - for (; v >= 0x80; v >>= 7) - *p++ = (char)((v & 0x7f) | 0x80); - *p++ = (char)v; - return p; -} - -/* Return string or write number to buffer and return pointer to start. */ -const char *lj_strfmt_wstrnum(char *buf, cTValue *o, MSize *lenp) -{ - if (tvisstr(o)) { - *lenp = strV(o)->len; - return strVdata(o); - } else if (tvisint(o)) { - *lenp = (MSize)(lj_strfmt_wint(buf, intV(o)) - buf); - return buf; - } else if (tvisnum(o)) { - *lenp = (MSize)(lj_strfmt_wnum(buf, o) - buf); - return buf; - } else { - return NULL; - } -} - -/* -- Unformatted conversions to buffer ----------------------------------- */ - -/* Add integer to buffer. */ -SBuf * LJ_FASTCALL lj_strfmt_putint(SBuf *sb, int32_t k) -{ - setsbufP(sb, lj_strfmt_wint(lj_buf_more(sb, STRFMT_MAXBUF_INT), k)); - return sb; -} - -#if LJ_HASJIT -/* Add number to buffer. */ -SBuf * LJ_FASTCALL lj_strfmt_putnum(SBuf *sb, cTValue *o) -{ - setsbufP(sb, lj_strfmt_wnum(lj_buf_more(sb, STRFMT_MAXBUF_NUM), o)); - return sb; -} -#endif - -SBuf * LJ_FASTCALL lj_strfmt_putptr(SBuf *sb, const void *v) -{ - setsbufP(sb, lj_strfmt_wptr(lj_buf_more(sb, STRFMT_MAXBUF_PTR), v)); - return sb; -} - -/* Add quoted string to buffer. */ -SBuf * LJ_FASTCALL lj_strfmt_putquoted(SBuf *sb, GCstr *str) -{ - const char *s = strdata(str); - MSize len = str->len; - lj_buf_putb(sb, '"'); - while (len--) { - uint32_t c = (uint32_t)(uint8_t)*s++; - char *p = lj_buf_more(sb, 4); - if (c == '"' || c == '\\' || c == '\n') { - *p++ = '\\'; - } else if (lj_char_iscntrl(c)) { /* This can only be 0-31 or 127. */ - uint32_t d; - *p++ = '\\'; - if (c >= 100 || lj_char_isdigit((uint8_t)*s)) { - *p++ = (char)('0'+(c >= 100)); if (c >= 100) c -= 100; - goto tens; - } else if (c >= 10) { - tens: - d = (c * 205) >> 11; c -= d * 10; *p++ = (char)('0'+d); - } - c += '0'; - } - *p++ = (char)c; - setsbufP(sb, p); - } - lj_buf_putb(sb, '"'); - return sb; -} - -/* -- Formatted conversions to buffer ------------------------------------- */ - -/* Add formatted char to buffer. */ -SBuf *lj_strfmt_putfchar(SBuf *sb, SFormat sf, int32_t c) -{ - MSize width = STRFMT_WIDTH(sf); - char *p = lj_buf_more(sb, width > 1 ? width : 1); - if ((sf & STRFMT_F_LEFT)) *p++ = (char)c; - while (width-- > 1) *p++ = ' '; - if (!(sf & STRFMT_F_LEFT)) *p++ = (char)c; - setsbufP(sb, p); - return sb; -} - -/* Add formatted string to buffer. */ -SBuf *lj_strfmt_putfstr(SBuf *sb, SFormat sf, GCstr *str) -{ - MSize len = str->len <= STRFMT_PREC(sf) ? str->len : STRFMT_PREC(sf); - MSize width = STRFMT_WIDTH(sf); - char *p = lj_buf_more(sb, width > len ? width : len); - if ((sf & STRFMT_F_LEFT)) p = lj_buf_wmem(p, strdata(str), len); - while (width-- > len) *p++ = ' '; - if (!(sf & STRFMT_F_LEFT)) p = lj_buf_wmem(p, strdata(str), len); - setsbufP(sb, p); - return sb; -} - -/* Add formatted signed/unsigned integer to buffer. */ -SBuf *lj_strfmt_putfxint(SBuf *sb, SFormat sf, uint64_t k) -{ - char buf[STRFMT_MAXBUF_XINT], *q = buf + sizeof(buf), *p; -#ifdef LUA_USE_ASSERT - char *ps; -#endif - MSize prefix = 0, len, prec, pprec, width, need; - - /* Figure out signed prefixes. */ - if (STRFMT_TYPE(sf) == STRFMT_INT) { - if ((int64_t)k < 0) { - k = (uint64_t)-(int64_t)k; - prefix = 256 + '-'; - } else if ((sf & STRFMT_F_PLUS)) { - prefix = 256 + '+'; - } else if ((sf & STRFMT_F_SPACE)) { - prefix = 256 + ' '; - } - } - - /* Convert number and store to fixed-size buffer in reverse order. */ - prec = STRFMT_PREC(sf); - if ((int32_t)prec >= 0) sf &= ~STRFMT_F_ZERO; - if (k == 0) { /* Special-case zero argument. */ - if (prec != 0 || - (sf & (STRFMT_T_OCT|STRFMT_F_ALT)) == (STRFMT_T_OCT|STRFMT_F_ALT)) - *--q = '0'; - } else if (!(sf & (STRFMT_T_HEX|STRFMT_T_OCT))) { /* Decimal. */ - uint32_t k2; - while ((k >> 32)) { *--q = (char)('0' + k % 10); k /= 10; } - k2 = (uint32_t)k; - do { *--q = (char)('0' + k2 % 10); k2 /= 10; } while (k2); - } else if ((sf & STRFMT_T_HEX)) { /* Hex. */ - const char *hexdig = (sf & STRFMT_F_UPPER) ? "0123456789ABCDEF" : - "0123456789abcdef"; - do { *--q = hexdig[(k & 15)]; k >>= 4; } while (k); - if ((sf & STRFMT_F_ALT)) prefix = 512 + ((sf & STRFMT_F_UPPER) ? 'X' : 'x'); - } else { /* Octal. */ - do { *--q = (char)('0' + (uint32_t)(k & 7)); k >>= 3; } while (k); - if ((sf & STRFMT_F_ALT)) *--q = '0'; - } - - /* Calculate sizes. */ - len = (MSize)(buf + sizeof(buf) - q); - if ((int32_t)len >= (int32_t)prec) prec = len; - width = STRFMT_WIDTH(sf); - pprec = prec + (prefix >> 8); - need = width > pprec ? width : pprec; - p = lj_buf_more(sb, need); -#ifdef LUA_USE_ASSERT - ps = p; -#endif - - /* Format number with leading/trailing whitespace and zeros. */ - if ((sf & (STRFMT_F_LEFT|STRFMT_F_ZERO)) == 0) - while (width-- > pprec) *p++ = ' '; - if (prefix) { - if ((char)prefix >= 'X') *p++ = '0'; - *p++ = (char)prefix; - } - if ((sf & (STRFMT_F_LEFT|STRFMT_F_ZERO)) == STRFMT_F_ZERO) - while (width-- > pprec) *p++ = '0'; - while (prec-- > len) *p++ = '0'; - while (q < buf + sizeof(buf)) *p++ = *q++; /* Add number itself. */ - if ((sf & STRFMT_F_LEFT)) - while (width-- > pprec) *p++ = ' '; - - lua_assert(need == (MSize)(p - ps)); - setsbufP(sb, p); - return sb; -} - -/* Add number formatted as signed integer to buffer. */ -SBuf *lj_strfmt_putfnum_int(SBuf *sb, SFormat sf, lua_Number n) -{ - int64_t k = (int64_t)n; - if (checki32(k) && sf == STRFMT_INT) - return lj_strfmt_putint(sb, (int32_t)k); /* Shortcut for plain %d. */ - else - return lj_strfmt_putfxint(sb, sf, (uint64_t)k); -} - -/* Add number formatted as unsigned integer to buffer. */ -SBuf *lj_strfmt_putfnum_uint(SBuf *sb, SFormat sf, lua_Number n) -{ - int64_t k; - if (n >= 9223372036854775808.0) - k = (int64_t)(n - 18446744073709551616.0); - else - k = (int64_t)n; - return lj_strfmt_putfxint(sb, sf, (uint64_t)k); -} - -/* Max. sprintf buffer size needed. At least #string.format("%.99f", -1e308). */ -#define STRFMT_FMTNUMBUF 512 - -/* Add formatted floating-point number to buffer. */ -SBuf *lj_strfmt_putfnum(SBuf *sb, SFormat sf, lua_Number n) -{ - TValue tv; - tv.n = n; - if (LJ_UNLIKELY((tv.u32.hi << 1) >= 0xffe00000)) { - /* Canonicalize output of non-finite values. */ - MSize width = STRFMT_WIDTH(sf), len = 3; - int prefix = 0, ch = (sf & STRFMT_F_UPPER) ? 0x202020 : 0; - char *p; - if (((tv.u32.hi & 0x000fffff) | tv.u32.lo) != 0) { - ch ^= ('n' << 16) | ('a' << 8) | 'n'; - if ((sf & STRFMT_F_SPACE)) prefix = ' '; - } else { - ch ^= ('i' << 16) | ('n' << 8) | 'f'; - if ((tv.u32.hi & 0x80000000)) prefix = '-'; - else if ((sf & STRFMT_F_PLUS)) prefix = '+'; - else if ((sf & STRFMT_F_SPACE)) prefix = ' '; - } - if (prefix) len = 4; - p = lj_buf_more(sb, width > len ? width : len); - if (!(sf & STRFMT_F_LEFT)) while (width-- > len) *p++ = ' '; - if (prefix) *p++ = prefix; - *p++ = (char)(ch >> 16); *p++ = (char)(ch >> 8); *p++ = (char)ch; - if ((sf & STRFMT_F_LEFT)) while (width-- > len) *p++ = ' '; - setsbufP(sb, p); - } else { /* Delegate to sprintf() for now. */ - uint8_t width = (uint8_t)STRFMT_WIDTH(sf), prec = (uint8_t)STRFMT_PREC(sf); - char fmt[1+5+2+3+1+1], *p = fmt; - *p++ = '%'; - if ((sf & STRFMT_F_LEFT)) *p++ = '-'; - if ((sf & STRFMT_F_PLUS)) *p++ = '+'; - if ((sf & STRFMT_F_ZERO)) *p++ = '0'; - if ((sf & STRFMT_F_SPACE)) *p++ = ' '; - if ((sf & STRFMT_F_ALT)) *p++ = '#'; - if (width) { - uint8_t x = width / 10, y = width % 10; - if (x) *p++ = '0' + x; - *p++ = '0' + y; - } - if (prec != 255) { - uint8_t x = prec / 10, y = prec % 10; - *p++ = '.'; - if (x) *p++ = '0' + x; - *p++ = '0' + y; - } - *p++ = (0x67666561 >> (STRFMT_FP(sf)<<3)) ^ ((sf & STRFMT_F_UPPER)?0x20:0); - *p = '\0'; - p = lj_buf_more(sb, STRFMT_FMTNUMBUF); - setsbufP(sb, p + sprintf(p, fmt, n)); - } - return sb; -} - -/* -- Conversions to strings ---------------------------------------------- */ - -/* Convert integer to string. */ -GCstr * LJ_FASTCALL lj_strfmt_int(lua_State *L, int32_t k) -{ - char buf[STRFMT_MAXBUF_INT]; - MSize len = (MSize)(lj_strfmt_wint(buf, k) - buf); - return lj_str_new(L, buf, len); -} - -/* Convert number to string. */ -GCstr * LJ_FASTCALL lj_strfmt_num(lua_State *L, cTValue *o) -{ - char buf[STRFMT_MAXBUF_NUM]; - MSize len = (MSize)(lj_strfmt_wnum(buf, o) - buf); - return lj_str_new(L, buf, len); -} - -/* Convert integer or number to string. */ -GCstr * LJ_FASTCALL lj_strfmt_number(lua_State *L, cTValue *o) -{ - return tvisint(o) ? lj_strfmt_int(L, intV(o)) : lj_strfmt_num(L, o); -} - -#if LJ_HASJIT -/* Convert char value to string. */ -GCstr * LJ_FASTCALL lj_strfmt_char(lua_State *L, int c) -{ - char buf[1]; - buf[0] = c; - return lj_str_new(L, buf, 1); -} -#endif - -/* Raw conversion of object to string. */ -GCstr * LJ_FASTCALL lj_strfmt_obj(lua_State *L, cTValue *o) -{ - if (tvisstr(o)) { - return strV(o); - } else if (tvisnumber(o)) { - return lj_strfmt_number(L, o); - } else if (tvisnil(o)) { - return lj_str_newlit(L, "nil"); - } else if (tvisfalse(o)) { - return lj_str_newlit(L, "false"); - } else if (tvistrue(o)) { - return lj_str_newlit(L, "true"); - } else { - char buf[8+2+2+16], *p = buf; - p = lj_buf_wmem(p, lj_typename(o), (MSize)strlen(lj_typename(o))); - *p++ = ':'; *p++ = ' '; - if (tvisfunc(o) && isffunc(funcV(o))) { - p = lj_buf_wmem(p, "builtin#", 8); - p = lj_strfmt_wint(p, funcV(o)->c.ffid); - } else { - p = lj_strfmt_wptr(p, lj_obj_ptr(o)); - } - return lj_str_new(L, buf, (size_t)(p - buf)); - } -} - -/* -- Internal string formatting ------------------------------------------ */ - -/* -** These functions are only used for lua_pushfstring(), lua_pushvfstring() -** and for internal string formatting (e.g. error messages). Caveat: unlike -** string.format(), only a limited subset of formats and flags are supported! -** -** LuaJIT has support for a couple more formats than Lua 5.1/5.2: -** - %d %u %o %x with full formatting, 32 bit integers only. -** - %f and other FP formats are really %.14g. -** - %s %c %p without formatting. -*/ - -/* Push formatted message as a string object to Lua stack. va_list variant. */ -const char *lj_strfmt_pushvf(lua_State *L, const char *fmt, va_list argp) -{ - SBuf *sb = lj_buf_tmp_(L); - FormatState fs; - SFormat sf; - GCstr *str; - lj_strfmt_init(&fs, fmt, (MSize)strlen(fmt)); - while ((sf = lj_strfmt_parse(&fs)) != STRFMT_EOF) { - switch (STRFMT_TYPE(sf)) { - case STRFMT_LIT: - lj_buf_putmem(sb, fs.str, fs.len); - break; - case STRFMT_INT: - lj_strfmt_putfxint(sb, sf, va_arg(argp, int32_t)); - break; - case STRFMT_UINT: - lj_strfmt_putfxint(sb, sf, va_arg(argp, uint32_t)); - break; - case STRFMT_NUM: { - TValue tv; - tv.n = va_arg(argp, lua_Number); - setsbufP(sb, lj_strfmt_wnum(lj_buf_more(sb, STRFMT_MAXBUF_NUM), &tv)); - break; - } - case STRFMT_STR: { - const char *s = va_arg(argp, char *); - if (s == NULL) s = "(null)"; - lj_buf_putmem(sb, s, (MSize)strlen(s)); - break; - } - case STRFMT_CHAR: - lj_buf_putb(sb, va_arg(argp, int)); - break; - case STRFMT_PTR: - lj_strfmt_putptr(sb, va_arg(argp, void *)); - break; - case STRFMT_ERR: - default: - lj_buf_putb(sb, '?'); - lua_assert(0); - break; - } - } - str = lj_buf_str(L, sb); - setstrV(L, L->top, str); - incr_top(L); - return strdata(str); -} - -/* Push formatted message as a string object to Lua stack. Vararg variant. */ -const char *lj_strfmt_pushf(lua_State *L, const char *fmt, ...) -{ - const char *msg; - va_list argp; - va_start(argp, fmt); - msg = lj_strfmt_pushvf(L, fmt, argp); - va_end(argp); - return msg; -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_strfmt.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_strfmt.h deleted file mode 100644 index dcfaf2e393b..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_strfmt.h +++ /dev/null @@ -1,125 +0,0 @@ -/* -** String formatting. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_STRFMT_H -#define _LJ_STRFMT_H - -#include "lj_obj.h" - -typedef uint32_t SFormat; /* Format indicator. */ - -/* Format parser state. */ -typedef struct FormatState { - const uint8_t *p; /* Current format string pointer. */ - const uint8_t *e; /* End of format string. */ - const char *str; /* Returned literal string. */ - MSize len; /* Size of literal string. */ -} FormatState; - -/* Format types (max. 16). */ -typedef enum FormatType { - STRFMT_EOF, STRFMT_ERR, STRFMT_LIT, - STRFMT_INT, STRFMT_UINT, STRFMT_NUM, STRFMT_STR, STRFMT_CHAR, STRFMT_PTR -} FormatType; - -/* Format subtypes (bits are reused). */ -#define STRFMT_T_HEX 0x0010 /* STRFMT_UINT */ -#define STRFMT_T_OCT 0x0020 /* STRFMT_UINT */ -#define STRFMT_T_FP_A 0x0000 /* STRFMT_NUM */ -#define STRFMT_T_FP_E 0x0010 /* STRFMT_NUM */ -#define STRFMT_T_FP_F 0x0020 /* STRFMT_NUM */ -#define STRFMT_T_FP_G 0x0030 /* STRFMT_NUM */ -#define STRFMT_T_QUOTED 0x0010 /* STRFMT_STR */ - -/* Format flags. */ -#define STRFMT_F_LEFT 0x0100 -#define STRFMT_F_PLUS 0x0200 -#define STRFMT_F_ZERO 0x0400 -#define STRFMT_F_SPACE 0x0800 -#define STRFMT_F_ALT 0x1000 -#define STRFMT_F_UPPER 0x2000 - -/* Format indicator fields. */ -#define STRFMT_SH_WIDTH 16 -#define STRFMT_SH_PREC 24 - -#define STRFMT_TYPE(sf) ((FormatType)((sf) & 15)) -#define STRFMT_WIDTH(sf) (((sf) >> STRFMT_SH_WIDTH) & 255u) -#define STRFMT_PREC(sf) ((((sf) >> STRFMT_SH_PREC) & 255u) - 1u) -#define STRFMT_FP(sf) (((sf) >> 4) & 3) - -/* Formats for conversion characters. */ -#define STRFMT_A (STRFMT_NUM|STRFMT_T_FP_A) -#define STRFMT_C (STRFMT_CHAR) -#define STRFMT_D (STRFMT_INT) -#define STRFMT_E (STRFMT_NUM|STRFMT_T_FP_E) -#define STRFMT_F (STRFMT_NUM|STRFMT_T_FP_F) -#define STRFMT_G (STRFMT_NUM|STRFMT_T_FP_G) -#define STRFMT_I STRFMT_D -#define STRFMT_O (STRFMT_UINT|STRFMT_T_OCT) -#define STRFMT_P (STRFMT_PTR) -#define STRFMT_Q (STRFMT_STR|STRFMT_T_QUOTED) -#define STRFMT_S (STRFMT_STR) -#define STRFMT_U (STRFMT_UINT) -#define STRFMT_X (STRFMT_UINT|STRFMT_T_HEX) - -/* Maximum buffer sizes for conversions. */ -#define STRFMT_MAXBUF_XINT (1+22) /* '0' prefix + uint64_t in octal. */ -#define STRFMT_MAXBUF_INT (1+10) /* Sign + int32_t in decimal. */ -#define STRFMT_MAXBUF_NUM LUAI_MAXNUMBER2STR -#define STRFMT_MAXBUF_PTR (2+2*sizeof(ptrdiff_t)) /* "0x" + hex ptr. */ - -/* Format parser. */ -LJ_FUNC SFormat LJ_FASTCALL lj_strfmt_parse(FormatState *fs); - -static LJ_AINLINE void lj_strfmt_init(FormatState *fs, const char *p, MSize len) -{ - fs->p = (const uint8_t *)p; - fs->e = (const uint8_t *)p + len; - lua_assert(*fs->e == 0); /* Must be NUL-terminated (may have NULs inside). */ -} - -/* Raw conversions. */ -LJ_FUNC char * LJ_FASTCALL lj_strfmt_wint(char *p, int32_t k); -LJ_FUNC char * LJ_FASTCALL lj_strfmt_wnum(char *p, cTValue *o); -LJ_FUNC char * LJ_FASTCALL lj_strfmt_wptr(char *p, const void *v); -LJ_FUNC char * LJ_FASTCALL lj_strfmt_wuleb128(char *p, uint32_t v); -LJ_FUNC const char *lj_strfmt_wstrnum(char *buf, cTValue *o, MSize *lenp); - -/* Unformatted conversions to buffer. */ -LJ_FUNC SBuf * LJ_FASTCALL lj_strfmt_putint(SBuf *sb, int32_t k); -#if LJ_HASJIT -LJ_FUNC SBuf * LJ_FASTCALL lj_strfmt_putnum(SBuf *sb, cTValue *o); -#endif -LJ_FUNC SBuf * LJ_FASTCALL lj_strfmt_putptr(SBuf *sb, const void *v); -LJ_FUNC SBuf * LJ_FASTCALL lj_strfmt_putquoted(SBuf *sb, GCstr *str); - -/* Formatted conversions to buffer. */ -LJ_FUNC SBuf *lj_strfmt_putfxint(SBuf *sb, SFormat sf, uint64_t k); -LJ_FUNC SBuf *lj_strfmt_putfnum_int(SBuf *sb, SFormat sf, lua_Number n); -LJ_FUNC SBuf *lj_strfmt_putfnum_uint(SBuf *sb, SFormat sf, lua_Number n); -LJ_FUNC SBuf *lj_strfmt_putfnum(SBuf *sb, SFormat, lua_Number n); -LJ_FUNC SBuf *lj_strfmt_putfchar(SBuf *sb, SFormat, int32_t c); -LJ_FUNC SBuf *lj_strfmt_putfstr(SBuf *sb, SFormat, GCstr *str); - -/* Conversions to strings. */ -LJ_FUNC GCstr * LJ_FASTCALL lj_strfmt_int(lua_State *L, int32_t k); -LJ_FUNCA GCstr * LJ_FASTCALL lj_strfmt_num(lua_State *L, cTValue *o); -LJ_FUNCA GCstr * LJ_FASTCALL lj_strfmt_number(lua_State *L, cTValue *o); -#if LJ_HASJIT -LJ_FUNC GCstr * LJ_FASTCALL lj_strfmt_char(lua_State *L, int c); -#endif -LJ_FUNC GCstr * LJ_FASTCALL lj_strfmt_obj(lua_State *L, cTValue *o); - -/* Internal string formatting. */ -LJ_FUNC const char *lj_strfmt_pushvf(lua_State *L, const char *fmt, - va_list argp); -LJ_FUNC const char *lj_strfmt_pushf(lua_State *L, const char *fmt, ...) -#ifdef __GNUC__ - __attribute__ ((format (printf, 2, 3))) -#endif - ; - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_strscan.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_strscan.c deleted file mode 100644 index d3c5ba9124f..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_strscan.c +++ /dev/null @@ -1,547 +0,0 @@ -/* -** String scanning. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#include - -#define lj_strscan_c -#define LUA_CORE - -#include "lj_obj.h" -#include "lj_char.h" -#include "lj_strscan.h" - -/* -- Scanning numbers ---------------------------------------------------- */ - -/* -** Rationale for the builtin string to number conversion library: -** -** It removes a dependency on libc's strtod(), which is a true portability -** nightmare. Mainly due to the plethora of supported OS and toolchain -** combinations. Sadly, the various implementations -** a) are often buggy, incomplete (no hex floats) and/or imprecise, -** b) sometimes crash or hang on certain inputs, -** c) return non-standard NaNs that need to be filtered out, and -** d) fail if the locale-specific decimal separator is not a dot, -** which can only be fixed with atrocious workarounds. -** -** Also, most of the strtod() implementations are hopelessly bloated, -** which is not just an I-cache hog, but a problem for static linkage -** on embedded systems, too. -** -** OTOH the builtin conversion function is very compact. Even though it -** does a lot more, like parsing long longs, octal or imaginary numbers -** and returning the result in different formats: -** a) It needs less than 3 KB (!) of machine code (on x64 with -Os), -** b) it doesn't perform any dynamic allocation and, -** c) it needs only around 600 bytes of stack space. -** -** The builtin function is faster than strtod() for typical inputs, e.g. -** "123", "1.5" or "1e6". Arguably, it's slower for very large exponents, -** which are not very common (this could be fixed, if needed). -** -** And most importantly, the builtin function is equally precise on all -** platforms. It correctly converts and rounds any input to a double. -** If this is not the case, please send a bug report -- but PLEASE verify -** that the implementation you're comparing to is not the culprit! -** -** The implementation quickly pre-scans the entire string first and -** handles simple integers on-the-fly. Otherwise, it dispatches to the -** base-specific parser. Hex and octal is straightforward. -** -** Decimal to binary conversion uses a fixed-length circular buffer in -** base 100. Some simple cases are handled directly. For other cases, the -** number in the buffer is up-scaled or down-scaled until the integer part -** is in the proper range. Then the integer part is rounded and converted -** to a double which is finally rescaled to the result. Denormals need -** special treatment to prevent incorrect 'double rounding'. -*/ - -/* Definitions for circular decimal digit buffer (base 100 = 2 digits/byte). */ -#define STRSCAN_DIG 1024 -#define STRSCAN_MAXDIG 800 /* 772 + extra are sufficient. */ -#define STRSCAN_DDIG (STRSCAN_DIG/2) -#define STRSCAN_DMASK (STRSCAN_DDIG-1) - -/* Helpers for circular buffer. */ -#define DNEXT(a) (((a)+1) & STRSCAN_DMASK) -#define DPREV(a) (((a)-1) & STRSCAN_DMASK) -#define DLEN(lo, hi) ((int32_t)(((lo)-(hi)) & STRSCAN_DMASK)) - -#define casecmp(c, k) (((c) | 0x20) == k) - -/* Final conversion to double. */ -static void strscan_double(uint64_t x, TValue *o, int32_t ex2, int32_t neg) -{ - double n; - - /* Avoid double rounding for denormals. */ - if (LJ_UNLIKELY(ex2 <= -1075 && x != 0)) { - /* NYI: all of this generates way too much code on 32 bit CPUs. */ -#if defined(__GNUC__) && LJ_64 - int32_t b = (int32_t)(__builtin_clzll(x)^63); -#else - int32_t b = (x>>32) ? 32+(int32_t)lj_fls((uint32_t)(x>>32)) : - (int32_t)lj_fls((uint32_t)x); -#endif - if ((int32_t)b + ex2 <= -1023 && (int32_t)b + ex2 >= -1075) { - uint64_t rb = (uint64_t)1 << (-1075-ex2); - if ((x & rb) && ((x & (rb+rb+rb-1)))) x += rb+rb; - x = (x & ~(rb+rb-1)); - } - } - - /* Convert to double using a signed int64_t conversion, then rescale. */ - lua_assert((int64_t)x >= 0); - n = (double)(int64_t)x; - if (neg) n = -n; - if (ex2) n = ldexp(n, ex2); - o->n = n; -} - -/* Parse hexadecimal number. */ -static StrScanFmt strscan_hex(const uint8_t *p, TValue *o, - StrScanFmt fmt, uint32_t opt, - int32_t ex2, int32_t neg, uint32_t dig) -{ - uint64_t x = 0; - uint32_t i; - - /* Scan hex digits. */ - for (i = dig > 16 ? 16 : dig ; i; i--, p++) { - uint32_t d = (*p != '.' ? *p : *++p); if (d > '9') d += 9; - x = (x << 4) + (d & 15); - } - - /* Summarize rounding-effect of excess digits. */ - for (i = 16; i < dig; i++, p++) - x |= ((*p != '.' ? *p : *++p) != '0'), ex2 += 4; - - /* Format-specific handling. */ - switch (fmt) { - case STRSCAN_INT: - if (!(opt & STRSCAN_OPT_TONUM) && x < 0x80000000u+neg) { - o->i = neg ? -(int32_t)x : (int32_t)x; - return STRSCAN_INT; /* Fast path for 32 bit integers. */ - } - if (!(opt & STRSCAN_OPT_C)) { fmt = STRSCAN_NUM; break; } - /* fallthrough */ - case STRSCAN_U32: - if (dig > 8) return STRSCAN_ERROR; - o->i = neg ? -(int32_t)x : (int32_t)x; - return STRSCAN_U32; - case STRSCAN_I64: - case STRSCAN_U64: - if (dig > 16) return STRSCAN_ERROR; - o->u64 = neg ? (uint64_t)-(int64_t)x : x; - return fmt; - default: - break; - } - - /* Reduce range, then convert to double. */ - if ((x & U64x(c0000000,0000000))) { x = (x >> 2) | (x & 3); ex2 += 2; } - strscan_double(x, o, ex2, neg); - return fmt; -} - -/* Parse octal number. */ -static StrScanFmt strscan_oct(const uint8_t *p, TValue *o, - StrScanFmt fmt, int32_t neg, uint32_t dig) -{ - uint64_t x = 0; - - /* Scan octal digits. */ - if (dig > 22 || (dig == 22 && *p > '1')) return STRSCAN_ERROR; - while (dig-- > 0) { - if (!(*p >= '0' && *p <= '7')) return STRSCAN_ERROR; - x = (x << 3) + (*p++ & 7); - } - - /* Format-specific handling. */ - switch (fmt) { - case STRSCAN_INT: - if (x >= 0x80000000u+neg) fmt = STRSCAN_U32; - /* fallthrough */ - case STRSCAN_U32: - if ((x >> 32)) return STRSCAN_ERROR; - o->i = neg ? -(int32_t)x : (int32_t)x; - break; - default: - case STRSCAN_I64: - case STRSCAN_U64: - o->u64 = neg ? (uint64_t)-(int64_t)x : x; - break; - } - return fmt; -} - -/* Parse decimal number. */ -static StrScanFmt strscan_dec(const uint8_t *p, TValue *o, - StrScanFmt fmt, uint32_t opt, - int32_t ex10, int32_t neg, uint32_t dig) -{ - uint8_t xi[STRSCAN_DDIG], *xip = xi; - - if (dig) { - uint32_t i = dig; - if (i > STRSCAN_MAXDIG) { - ex10 += (int32_t)(i - STRSCAN_MAXDIG); - i = STRSCAN_MAXDIG; - } - /* Scan unaligned leading digit. */ - if (((ex10^i) & 1)) - *xip++ = ((*p != '.' ? *p : *++p) & 15), i--, p++; - /* Scan aligned double-digits. */ - for ( ; i > 1; i -= 2) { - uint32_t d = 10 * ((*p != '.' ? *p : *++p) & 15); p++; - *xip++ = d + ((*p != '.' ? *p : *++p) & 15); p++; - } - /* Scan and realign trailing digit. */ - if (i) *xip++ = 10 * ((*p != '.' ? *p : *++p) & 15), ex10--, dig++, p++; - - /* Summarize rounding-effect of excess digits. */ - if (dig > STRSCAN_MAXDIG) { - do { - if ((*p != '.' ? *p : *++p) != '0') { xip[-1] |= 1; break; } - p++; - } while (--dig > STRSCAN_MAXDIG); - dig = STRSCAN_MAXDIG; - } else { /* Simplify exponent. */ - while (ex10 > 0 && dig <= 18) *xip++ = 0, ex10 -= 2, dig += 2; - } - } else { /* Only got zeros. */ - ex10 = 0; - xi[0] = 0; - } - - /* Fast path for numbers in integer format (but handles e.g. 1e6, too). */ - if (dig <= 20 && ex10 == 0) { - uint8_t *xis; - uint64_t x = xi[0]; - double n; - for (xis = xi+1; xis < xip; xis++) x = x * 100 + *xis; - if (!(dig == 20 && (xi[0] > 18 || (int64_t)x >= 0))) { /* No overflow? */ - /* Format-specific handling. */ - switch (fmt) { - case STRSCAN_INT: - if (!(opt & STRSCAN_OPT_TONUM) && x < 0x80000000u+neg) { - o->i = neg ? -(int32_t)x : (int32_t)x; - return STRSCAN_INT; /* Fast path for 32 bit integers. */ - } - if (!(opt & STRSCAN_OPT_C)) { fmt = STRSCAN_NUM; goto plainnumber; } - /* fallthrough */ - case STRSCAN_U32: - if ((x >> 32) != 0) return STRSCAN_ERROR; - o->i = neg ? -(int32_t)x : (int32_t)x; - return STRSCAN_U32; - case STRSCAN_I64: - case STRSCAN_U64: - o->u64 = neg ? (uint64_t)-(int64_t)x : x; - return fmt; - default: - plainnumber: /* Fast path for plain numbers < 2^63. */ - if ((int64_t)x < 0) break; - n = (double)(int64_t)x; - if (neg) n = -n; - o->n = n; - return fmt; - } - } - } - - /* Slow non-integer path. */ - if (fmt == STRSCAN_INT) { - if ((opt & STRSCAN_OPT_C)) return STRSCAN_ERROR; - fmt = STRSCAN_NUM; - } else if (fmt > STRSCAN_INT) { - return STRSCAN_ERROR; - } - { - uint32_t hi = 0, lo = (uint32_t)(xip-xi); - int32_t ex2 = 0, idig = (int32_t)lo + (ex10 >> 1); - - lua_assert(lo > 0 && (ex10 & 1) == 0); - - /* Handle simple overflow/underflow. */ - if (idig > 310/2) { if (neg) setminfV(o); else setpinfV(o); return fmt; } - else if (idig < -326/2) { o->n = neg ? -0.0 : 0.0; return fmt; } - - /* Scale up until we have at least 17 or 18 integer part digits. */ - while (idig < 9 && idig < DLEN(lo, hi)) { - uint32_t i, cy = 0; - ex2 -= 6; - for (i = DPREV(lo); ; i = DPREV(i)) { - uint32_t d = (xi[i] << 6) + cy; - cy = (((d >> 2) * 5243) >> 17); d = d - cy * 100; /* Div/mod 100. */ - xi[i] = (uint8_t)d; - if (i == hi) break; - if (d == 0 && i == DPREV(lo)) lo = i; - } - if (cy) { - hi = DPREV(hi); - if (xi[DPREV(lo)] == 0) lo = DPREV(lo); - else if (hi == lo) { lo = DPREV(lo); xi[DPREV(lo)] |= xi[lo]; } - xi[hi] = (uint8_t)cy; idig++; - } - } - - /* Scale down until no more than 17 or 18 integer part digits remain. */ - while (idig > 9) { - uint32_t i = hi, cy = 0; - ex2 += 6; - do { - cy += xi[i]; - xi[i] = (cy >> 6); - cy = 100 * (cy & 0x3f); - if (xi[i] == 0 && i == hi) hi = DNEXT(hi), idig--; - i = DNEXT(i); - } while (i != lo); - while (cy) { - if (hi == lo) { xi[DPREV(lo)] |= 1; break; } - xi[lo] = (cy >> 6); lo = DNEXT(lo); - cy = 100 * (cy & 0x3f); - } - } - - /* Collect integer part digits and convert to rescaled double. */ - { - uint64_t x = xi[hi]; - uint32_t i; - for (i = DNEXT(hi); --idig > 0 && i != lo; i = DNEXT(i)) - x = x * 100 + xi[i]; - if (i == lo) { - while (--idig >= 0) x = x * 100; - } else { /* Gather round bit from remaining digits. */ - x <<= 1; ex2--; - do { - if (xi[i]) { x |= 1; break; } - i = DNEXT(i); - } while (i != lo); - } - strscan_double(x, o, ex2, neg); - } - } - return fmt; -} - -/* Parse binary number. */ -static StrScanFmt strscan_bin(const uint8_t *p, TValue *o, - StrScanFmt fmt, uint32_t opt, - int32_t ex2, int32_t neg, uint32_t dig) -{ - uint64_t x = 0; - uint32_t i; - - if (ex2 || dig > 64) return STRSCAN_ERROR; - - /* Scan binary digits. */ - for (i = dig; i; i--, p++) { - if ((*p & ~1) != '0') return STRSCAN_ERROR; - x = (x << 1) | (*p & 1); - } - - /* Format-specific handling. */ - switch (fmt) { - case STRSCAN_INT: - if (!(opt & STRSCAN_OPT_TONUM) && x < 0x80000000u+neg) { - o->i = neg ? -(int32_t)x : (int32_t)x; - return STRSCAN_INT; /* Fast path for 32 bit integers. */ - } - if (!(opt & STRSCAN_OPT_C)) { fmt = STRSCAN_NUM; break; } - /* fallthrough */ - case STRSCAN_U32: - if (dig > 32) return STRSCAN_ERROR; - o->i = neg ? -(int32_t)x : (int32_t)x; - return STRSCAN_U32; - case STRSCAN_I64: - case STRSCAN_U64: - o->u64 = neg ? (uint64_t)-(int64_t)x : x; - return fmt; - default: - break; - } - - /* Reduce range, then convert to double. */ - if ((x & U64x(c0000000,0000000))) { x = (x >> 2) | (x & 3); ex2 += 2; } - strscan_double(x, o, ex2, neg); - return fmt; -} - -/* Scan string containing a number. Returns format. Returns value in o. */ -StrScanFmt lj_strscan_scan(const uint8_t *p, TValue *o, uint32_t opt) -{ - int32_t neg = 0; - - /* Remove leading space, parse sign and non-numbers. */ - if (LJ_UNLIKELY(!lj_char_isdigit(*p))) { - while (lj_char_isspace(*p)) p++; - if (*p == '+' || *p == '-') neg = (*p++ == '-'); - if (LJ_UNLIKELY(*p >= 'A')) { /* Parse "inf", "infinity" or "nan". */ - TValue tmp; - setnanV(&tmp); - if (casecmp(p[0],'i') && casecmp(p[1],'n') && casecmp(p[2],'f')) { - if (neg) setminfV(&tmp); else setpinfV(&tmp); - p += 3; - if (casecmp(p[0],'i') && casecmp(p[1],'n') && casecmp(p[2],'i') && - casecmp(p[3],'t') && casecmp(p[4],'y')) p += 5; - } else if (casecmp(p[0],'n') && casecmp(p[1],'a') && casecmp(p[2],'n')) { - p += 3; - } - while (lj_char_isspace(*p)) p++; - if (*p) return STRSCAN_ERROR; - o->u64 = tmp.u64; - return STRSCAN_NUM; - } - } - - /* Parse regular number. */ - { - StrScanFmt fmt = STRSCAN_INT; - int cmask = LJ_CHAR_DIGIT; - int base = (opt & STRSCAN_OPT_C) && *p == '0' ? 0 : 10; - const uint8_t *sp, *dp = NULL; - uint32_t dig = 0, hasdig = 0, x = 0; - int32_t ex = 0; - - /* Determine base and skip leading zeros. */ - if (LJ_UNLIKELY(*p <= '0')) { - if (*p == '0') { - if (casecmp(p[1], 'x')) - base = 16, cmask = LJ_CHAR_XDIGIT, p += 2; - else if (casecmp(p[1], 'b')) - base = 2, cmask = LJ_CHAR_DIGIT, p += 2; - } - for ( ; ; p++) { - if (*p == '0') { - hasdig = 1; - } else if (*p == '.') { - if (dp) return STRSCAN_ERROR; - dp = p; - } else { - break; - } - } - } - - /* Preliminary digit and decimal point scan. */ - for (sp = p; ; p++) { - if (LJ_LIKELY(lj_char_isa(*p, cmask))) { - x = x * 10 + (*p & 15); /* For fast path below. */ - dig++; - } else if (*p == '.') { - if (dp) return STRSCAN_ERROR; - dp = p; - } else { - break; - } - } - if (!(hasdig | dig)) return STRSCAN_ERROR; - - /* Handle decimal point. */ - if (dp) { - fmt = STRSCAN_NUM; - if (dig) { - ex = (int32_t)(dp-(p-1)); dp = p-1; - while (ex < 0 && *dp-- == '0') ex++, dig--; /* Skip trailing zeros. */ - if (base == 16) ex *= 4; - } - } - - /* Parse exponent. */ - if (base >= 10 && casecmp(*p, (uint32_t)(base == 16 ? 'p' : 'e'))) { - uint32_t xx; - int negx = 0; - fmt = STRSCAN_NUM; p++; - if (*p == '+' || *p == '-') negx = (*p++ == '-'); - if (!lj_char_isdigit(*p)) return STRSCAN_ERROR; - xx = (*p++ & 15); - while (lj_char_isdigit(*p)) { - if (xx < 65536) xx = xx * 10 + (*p & 15); - p++; - } - ex += negx ? -(int32_t)xx : (int32_t)xx; - } - - /* Parse suffix. */ - if (*p) { - /* I (IMAG), U (U32), LL (I64), ULL/LLU (U64), L (long), UL/LU (ulong). */ - /* NYI: f (float). Not needed until cp_number() handles non-integers. */ - if (casecmp(*p, 'i')) { - if (!(opt & STRSCAN_OPT_IMAG)) return STRSCAN_ERROR; - p++; fmt = STRSCAN_IMAG; - } else if (fmt == STRSCAN_INT) { - if (casecmp(*p, 'u')) p++, fmt = STRSCAN_U32; - if (casecmp(*p, 'l')) { - p++; - if (casecmp(*p, 'l')) p++, fmt += STRSCAN_I64 - STRSCAN_INT; - else if (!(opt & STRSCAN_OPT_C)) return STRSCAN_ERROR; - else if (sizeof(long) == 8) fmt += STRSCAN_I64 - STRSCAN_INT; - } - if (casecmp(*p, 'u') && (fmt == STRSCAN_INT || fmt == STRSCAN_I64)) - p++, fmt += STRSCAN_U32 - STRSCAN_INT; - if ((fmt == STRSCAN_U32 && !(opt & STRSCAN_OPT_C)) || - (fmt >= STRSCAN_I64 && !(opt & STRSCAN_OPT_LL))) - return STRSCAN_ERROR; - } - while (lj_char_isspace(*p)) p++; - if (*p) return STRSCAN_ERROR; - } - - /* Fast path for decimal 32 bit integers. */ - if (fmt == STRSCAN_INT && base == 10 && - (dig < 10 || (dig == 10 && *sp <= '2' && x < 0x80000000u+neg))) { - int32_t y = neg ? -(int32_t)x : (int32_t)x; - if ((opt & STRSCAN_OPT_TONUM)) { - o->n = (double)y; - return STRSCAN_NUM; - } else { - o->i = y; - return STRSCAN_INT; - } - } - - /* Dispatch to base-specific parser. */ - if (base == 0 && !(fmt == STRSCAN_NUM || fmt == STRSCAN_IMAG)) - return strscan_oct(sp, o, fmt, neg, dig); - if (base == 16) - fmt = strscan_hex(sp, o, fmt, opt, ex, neg, dig); - else if (base == 2) - fmt = strscan_bin(sp, o, fmt, opt, ex, neg, dig); - else - fmt = strscan_dec(sp, o, fmt, opt, ex, neg, dig); - - /* Try to convert number to integer, if requested. */ - if (fmt == STRSCAN_NUM && (opt & STRSCAN_OPT_TOINT)) { - double n = o->n; - int32_t i = lj_num2int(n); - if (n == (lua_Number)i) { o->i = i; return STRSCAN_INT; } - } - return fmt; - } -} - -int LJ_FASTCALL lj_strscan_num(GCstr *str, TValue *o) -{ - StrScanFmt fmt = lj_strscan_scan((const uint8_t *)strdata(str), o, - STRSCAN_OPT_TONUM); - lua_assert(fmt == STRSCAN_ERROR || fmt == STRSCAN_NUM); - return (fmt != STRSCAN_ERROR); -} - -#if LJ_DUALNUM -int LJ_FASTCALL lj_strscan_number(GCstr *str, TValue *o) -{ - StrScanFmt fmt = lj_strscan_scan((const uint8_t *)strdata(str), o, - STRSCAN_OPT_TOINT); - lua_assert(fmt == STRSCAN_ERROR || fmt == STRSCAN_NUM || fmt == STRSCAN_INT); - if (fmt == STRSCAN_INT) setitype(o, LJ_TISNUM); - return (fmt != STRSCAN_ERROR); -} -#endif - -#undef DNEXT -#undef DPREV -#undef DLEN - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_strscan.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_strscan.h deleted file mode 100644 index 77606893640..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_strscan.h +++ /dev/null @@ -1,39 +0,0 @@ -/* -** String scanning. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_STRSCAN_H -#define _LJ_STRSCAN_H - -#include "lj_obj.h" - -/* Options for accepted/returned formats. */ -#define STRSCAN_OPT_TOINT 0x01 /* Convert to int32_t, if possible. */ -#define STRSCAN_OPT_TONUM 0x02 /* Always convert to double. */ -#define STRSCAN_OPT_IMAG 0x04 -#define STRSCAN_OPT_LL 0x08 -#define STRSCAN_OPT_C 0x10 - -/* Returned format. */ -typedef enum { - STRSCAN_ERROR, - STRSCAN_NUM, STRSCAN_IMAG, - STRSCAN_INT, STRSCAN_U32, STRSCAN_I64, STRSCAN_U64, -} StrScanFmt; - -LJ_FUNC StrScanFmt lj_strscan_scan(const uint8_t *p, TValue *o, uint32_t opt); -LJ_FUNC int LJ_FASTCALL lj_strscan_num(GCstr *str, TValue *o); -#if LJ_DUALNUM -LJ_FUNC int LJ_FASTCALL lj_strscan_number(GCstr *str, TValue *o); -#else -#define lj_strscan_number(s, o) lj_strscan_num((s), (o)) -#endif - -/* Check for number or convert string to number/int in-place (!). */ -static LJ_AINLINE int lj_strscan_numberobj(TValue *o) -{ - return tvisnumber(o) || (tvisstr(o) && lj_strscan_number(strV(o), o)); -} - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_tab.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_tab.c deleted file mode 100644 index 88bf1089b5b..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_tab.c +++ /dev/null @@ -1,666 +0,0 @@ -/* -** Table handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -** -** Major portions taken verbatim or adapted from the Lua interpreter. -** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h -*/ - -#define lj_tab_c -#define LUA_CORE - -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_tab.h" - -/* -- Object hashing ------------------------------------------------------ */ - -/* Hash values are masked with the table hash mask and used as an index. */ -static LJ_AINLINE Node *hashmask(const GCtab *t, uint32_t hash) -{ - Node *n = noderef(t->node); - return &n[hash & t->hmask]; -} - -/* String hashes are precomputed when they are interned. */ -#define hashstr(t, s) hashmask(t, (s)->hash) - -#define hashlohi(t, lo, hi) hashmask((t), hashrot((lo), (hi))) -#define hashnum(t, o) hashlohi((t), (o)->u32.lo, ((o)->u32.hi << 1)) -#define hashptr(t, p) hashlohi((t), u32ptr(p), u32ptr(p) + HASH_BIAS) -#if LJ_GC64 -#define hashgcref(t, r) \ - hashlohi((t), (uint32_t)gcrefu(r), (uint32_t)(gcrefu(r) >> 32)) -#else -#define hashgcref(t, r) hashlohi((t), gcrefu(r), gcrefu(r) + HASH_BIAS) -#endif - -/* Hash an arbitrary key and return its anchor position in the hash table. */ -static Node *hashkey(const GCtab *t, cTValue *key) -{ - lua_assert(!tvisint(key)); - if (tvisstr(key)) - return hashstr(t, strV(key)); - else if (tvisnum(key)) - return hashnum(t, key); - else if (tvisbool(key)) - return hashmask(t, boolV(key)); - else - return hashgcref(t, key->gcr); - /* Only hash 32 bits of lightuserdata on a 64 bit CPU. Good enough? */ -} - -/* -- Table creation and destruction -------------------------------------- */ - -/* Create new hash part for table. */ -static LJ_AINLINE void newhpart(lua_State *L, GCtab *t, uint32_t hbits) -{ - uint32_t hsize; - Node *node; - lua_assert(hbits != 0); - if (hbits > LJ_MAX_HBITS) - lj_err_msg(L, LJ_ERR_TABOV); - hsize = 1u << hbits; - node = lj_mem_newvec(L, hsize, Node); - setmref(t->node, node); - setfreetop(t, node, &node[hsize]); - t->hmask = hsize-1; -} - -/* -** Q: Why all of these copies of t->hmask, t->node etc. to local variables? -** A: Because alias analysis for C is _really_ tough. -** Even state-of-the-art C compilers won't produce good code without this. -*/ - -/* Clear hash part of table. */ -static LJ_AINLINE void clearhpart(GCtab *t) -{ - uint32_t i, hmask = t->hmask; - Node *node = noderef(t->node); - lua_assert(t->hmask != 0); - for (i = 0; i <= hmask; i++) { - Node *n = &node[i]; - setmref(n->next, NULL); - setnilV(&n->key); - setnilV(&n->val); - } -} - -/* Clear array part of table. */ -static LJ_AINLINE void clearapart(GCtab *t) -{ - uint32_t i, asize = t->asize; - TValue *array = tvref(t->array); - for (i = 0; i < asize; i++) - setnilV(&array[i]); -} - -/* Create a new table. Note: the slots are not initialized (yet). */ -static GCtab *newtab(lua_State *L, uint32_t asize, uint32_t hbits) -{ - GCtab *t; - /* First try to colocate the array part. */ - if (LJ_MAX_COLOSIZE != 0 && asize > 0 && asize <= LJ_MAX_COLOSIZE) { - Node *nilnode; - lua_assert((sizeof(GCtab) & 7) == 0); - t = (GCtab *)lj_mem_newgco(L, sizetabcolo(asize)); - t->gct = ~LJ_TTAB; - t->nomm = (uint8_t)~0; - t->colo = (int8_t)asize; - setmref(t->array, (TValue *)((char *)t + sizeof(GCtab))); - setgcrefnull(t->metatable); - t->asize = asize; - t->hmask = 0; - nilnode = &G(L)->nilnode; - setmref(t->node, nilnode); -#if LJ_GC64 - setmref(t->freetop, nilnode); -#endif - } else { /* Otherwise separately allocate the array part. */ - Node *nilnode; - t = lj_mem_newobj(L, GCtab); - t->gct = ~LJ_TTAB; - t->nomm = (uint8_t)~0; - t->colo = 0; - setmref(t->array, NULL); - setgcrefnull(t->metatable); - t->asize = 0; /* In case the array allocation fails. */ - t->hmask = 0; - nilnode = &G(L)->nilnode; - setmref(t->node, nilnode); -#if LJ_GC64 - setmref(t->freetop, nilnode); -#endif - if (asize > 0) { - if (asize > LJ_MAX_ASIZE) - lj_err_msg(L, LJ_ERR_TABOV); - setmref(t->array, lj_mem_newvec(L, asize, TValue)); - t->asize = asize; - } - } - if (hbits) - newhpart(L, t, hbits); - return t; -} - -/* Create a new table. -** -** IMPORTANT NOTE: The API differs from lua_createtable()! -** -** The array size is non-inclusive. E.g. asize=128 creates array slots -** for 0..127, but not for 128. If you need slots 1..128, pass asize=129 -** (slot 0 is wasted in this case). -** -** The hash size is given in hash bits. hbits=0 means no hash part. -** hbits=1 creates 2 hash slots, hbits=2 creates 4 hash slots and so on. -*/ -GCtab *lj_tab_new(lua_State *L, uint32_t asize, uint32_t hbits) -{ - GCtab *t = newtab(L, asize, hbits); - clearapart(t); - if (t->hmask > 0) clearhpart(t); - return t; -} - -/* The API of this function conforms to lua_createtable(). */ -GCtab *lj_tab_new_ah(lua_State *L, int32_t a, int32_t h) -{ - return lj_tab_new(L, (uint32_t)(a > 0 ? a+1 : 0), hsize2hbits(h)); -} - -#if LJ_HASJIT -GCtab * LJ_FASTCALL lj_tab_new1(lua_State *L, uint32_t ahsize) -{ - GCtab *t = newtab(L, ahsize & 0xffffff, ahsize >> 24); - clearapart(t); - if (t->hmask > 0) clearhpart(t); - return t; -} -#endif - -/* Duplicate a table. */ -GCtab * LJ_FASTCALL lj_tab_dup(lua_State *L, const GCtab *kt) -{ - GCtab *t; - uint32_t asize, hmask; - t = newtab(L, kt->asize, kt->hmask > 0 ? lj_fls(kt->hmask)+1 : 0); - lua_assert(kt->asize == t->asize && kt->hmask == t->hmask); - t->nomm = 0; /* Keys with metamethod names may be present. */ - asize = kt->asize; - if (asize > 0) { - TValue *array = tvref(t->array); - TValue *karray = tvref(kt->array); - if (asize < 64) { /* An inlined loop beats memcpy for < 512 bytes. */ - uint32_t i; - for (i = 0; i < asize; i++) - copyTV(L, &array[i], &karray[i]); - } else { - memcpy(array, karray, asize*sizeof(TValue)); - } - } - hmask = kt->hmask; - if (hmask > 0) { - uint32_t i; - Node *node = noderef(t->node); - Node *knode = noderef(kt->node); - ptrdiff_t d = (char *)node - (char *)knode; - setfreetop(t, node, (Node *)((char *)getfreetop(kt, knode) + d)); - for (i = 0; i <= hmask; i++) { - Node *kn = &knode[i]; - Node *n = &node[i]; - Node *next = nextnode(kn); - /* Don't use copyTV here, since it asserts on a copy of a dead key. */ - n->val = kn->val; n->key = kn->key; - setmref(n->next, next == NULL? next : (Node *)((char *)next + d)); - } - } - return t; -} - -/* Clear a table. */ -void LJ_FASTCALL lj_tab_clear(GCtab *t) -{ - clearapart(t); - if (t->hmask > 0) { - Node *node = noderef(t->node); - setfreetop(t, node, &node[t->hmask+1]); - clearhpart(t); - } -} - -/* Free a table. */ -void LJ_FASTCALL lj_tab_free(global_State *g, GCtab *t) -{ - if (t->hmask > 0) - lj_mem_freevec(g, noderef(t->node), t->hmask+1, Node); - if (t->asize > 0 && LJ_MAX_COLOSIZE != 0 && t->colo <= 0) - lj_mem_freevec(g, tvref(t->array), t->asize, TValue); - if (LJ_MAX_COLOSIZE != 0 && t->colo) - lj_mem_free(g, t, sizetabcolo((uint32_t)t->colo & 0x7f)); - else - lj_mem_freet(g, t); -} - -/* -- Table resizing ------------------------------------------------------ */ - -/* Resize a table to fit the new array/hash part sizes. */ -void lj_tab_resize(lua_State *L, GCtab *t, uint32_t asize, uint32_t hbits) -{ - Node *oldnode = noderef(t->node); - uint32_t oldasize = t->asize; - uint32_t oldhmask = t->hmask; - if (asize > oldasize) { /* Array part grows? */ - TValue *array; - uint32_t i; - if (asize > LJ_MAX_ASIZE) - lj_err_msg(L, LJ_ERR_TABOV); - if (LJ_MAX_COLOSIZE != 0 && t->colo > 0) { - /* A colocated array must be separated and copied. */ - TValue *oarray = tvref(t->array); - array = lj_mem_newvec(L, asize, TValue); - t->colo = (int8_t)(t->colo | 0x80); /* Mark as separated (colo < 0). */ - for (i = 0; i < oldasize; i++) - copyTV(L, &array[i], &oarray[i]); - } else { - array = (TValue *)lj_mem_realloc(L, tvref(t->array), - oldasize*sizeof(TValue), asize*sizeof(TValue)); - } - setmref(t->array, array); - t->asize = asize; - for (i = oldasize; i < asize; i++) /* Clear newly allocated slots. */ - setnilV(&array[i]); - } - /* Create new (empty) hash part. */ - if (hbits) { - newhpart(L, t, hbits); - clearhpart(t); - } else { - global_State *g = G(L); - setmref(t->node, &g->nilnode); -#if LJ_GC64 - setmref(t->freetop, &g->nilnode); -#endif - t->hmask = 0; - } - if (asize < oldasize) { /* Array part shrinks? */ - TValue *array = tvref(t->array); - uint32_t i; - t->asize = asize; /* Note: This 'shrinks' even colocated arrays. */ - for (i = asize; i < oldasize; i++) /* Reinsert old array values. */ - if (!tvisnil(&array[i])) - copyTV(L, lj_tab_setinth(L, t, (int32_t)i), &array[i]); - /* Physically shrink only separated arrays. */ - if (LJ_MAX_COLOSIZE != 0 && t->colo <= 0) - setmref(t->array, lj_mem_realloc(L, array, - oldasize*sizeof(TValue), asize*sizeof(TValue))); - } - if (oldhmask > 0) { /* Reinsert pairs from old hash part. */ - global_State *g; - uint32_t i; - for (i = 0; i <= oldhmask; i++) { - Node *n = &oldnode[i]; - if (!tvisnil(&n->val)) - copyTV(L, lj_tab_set(L, t, &n->key), &n->val); - } - g = G(L); - lj_mem_freevec(g, oldnode, oldhmask+1, Node); - } -} - -static uint32_t countint(cTValue *key, uint32_t *bins) -{ - lua_assert(!tvisint(key)); - if (tvisnum(key)) { - lua_Number nk = numV(key); - int32_t k = lj_num2int(nk); - if ((uint32_t)k < LJ_MAX_ASIZE && nk == (lua_Number)k) { - bins[(k > 2 ? lj_fls((uint32_t)(k-1)) : 0)]++; - return 1; - } - } - return 0; -} - -static uint32_t countarray(const GCtab *t, uint32_t *bins) -{ - uint32_t na, b, i; - if (t->asize == 0) return 0; - for (na = i = b = 0; b < LJ_MAX_ABITS; b++) { - uint32_t n, top = 2u << b; - TValue *array; - if (top >= t->asize) { - top = t->asize-1; - if (i > top) - break; - } - array = tvref(t->array); - for (n = 0; i <= top; i++) - if (!tvisnil(&array[i])) - n++; - bins[b] += n; - na += n; - } - return na; -} - -static uint32_t counthash(const GCtab *t, uint32_t *bins, uint32_t *narray) -{ - uint32_t total, na, i, hmask = t->hmask; - Node *node = noderef(t->node); - for (total = na = 0, i = 0; i <= hmask; i++) { - Node *n = &node[i]; - if (!tvisnil(&n->val)) { - na += countint(&n->key, bins); - total++; - } - } - *narray += na; - return total; -} - -static uint32_t bestasize(uint32_t bins[], uint32_t *narray) -{ - uint32_t b, sum, na = 0, sz = 0, nn = *narray; - for (b = 0, sum = 0; 2*nn > (1u< 0 && 2*(sum += bins[b]) > (1u<hmask > 0 ? lj_fls(t->hmask)+1 : 0); -} - -/* -- Table getters ------------------------------------------------------- */ - -cTValue * LJ_FASTCALL lj_tab_getinth(GCtab *t, int32_t key) -{ - TValue k; - Node *n; - k.n = (lua_Number)key; - n = hashnum(t, &k); - do { - if (tvisnum(&n->key) && n->key.n == k.n) - return &n->val; - } while ((n = nextnode(n))); - return NULL; -} - -cTValue *lj_tab_getstr(GCtab *t, GCstr *key) -{ - Node *n = hashstr(t, key); - do { - if (tvisstr(&n->key) && strV(&n->key) == key) - return &n->val; - } while ((n = nextnode(n))); - return NULL; -} - -cTValue *lj_tab_get(lua_State *L, GCtab *t, cTValue *key) -{ - if (tvisstr(key)) { - cTValue *tv = lj_tab_getstr(t, strV(key)); - if (tv) - return tv; - } else if (tvisint(key)) { - cTValue *tv = lj_tab_getint(t, intV(key)); - if (tv) - return tv; - } else if (tvisnum(key)) { - lua_Number nk = numV(key); - int32_t k = lj_num2int(nk); - if (nk == (lua_Number)k) { - cTValue *tv = lj_tab_getint(t, k); - if (tv) - return tv; - } else { - goto genlookup; /* Else use the generic lookup. */ - } - } else if (!tvisnil(key)) { - Node *n; - genlookup: - n = hashkey(t, key); - do { - if (lj_obj_equal(&n->key, key)) - return &n->val; - } while ((n = nextnode(n))); - } - return niltv(L); -} - -/* -- Table setters ------------------------------------------------------- */ - -/* Insert new key. Use Brent's variation to optimize the chain length. */ -TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key) -{ - Node *n = hashkey(t, key); - if (!tvisnil(&n->val) || t->hmask == 0) { - Node *nodebase = noderef(t->node); - Node *collide, *freenode = getfreetop(t, nodebase); - lua_assert(freenode >= nodebase && freenode <= nodebase+t->hmask+1); - do { - if (freenode == nodebase) { /* No free node found? */ - rehashtab(L, t, key); /* Rehash table. */ - return lj_tab_set(L, t, key); /* Retry key insertion. */ - } - } while (!tvisnil(&(--freenode)->key)); - setfreetop(t, nodebase, freenode); - lua_assert(freenode != &G(L)->nilnode); - collide = hashkey(t, &n->key); - if (collide != n) { /* Colliding node not the main node? */ - while (noderef(collide->next) != n) /* Find predecessor. */ - collide = nextnode(collide); - setmref(collide->next, freenode); /* Relink chain. */ - /* Copy colliding node into free node and free main node. */ - freenode->val = n->val; - freenode->key = n->key; - freenode->next = n->next; - setmref(n->next, NULL); - setnilV(&n->val); - /* Rechain pseudo-resurrected string keys with colliding hashes. */ - while (nextnode(freenode)) { - Node *nn = nextnode(freenode); - if (tvisstr(&nn->key) && !tvisnil(&nn->val) && - hashstr(t, strV(&nn->key)) == n) { - freenode->next = nn->next; - nn->next = n->next; - setmref(n->next, nn); - } else { - freenode = nn; - } - } - } else { /* Otherwise use free node. */ - setmrefr(freenode->next, n->next); /* Insert into chain. */ - setmref(n->next, freenode); - n = freenode; - } - } - n->key.u64 = key->u64; - if (LJ_UNLIKELY(tvismzero(&n->key))) - n->key.u64 = 0; - lj_gc_anybarriert(L, t); - lua_assert(tvisnil(&n->val)); - return &n->val; -} - -TValue *lj_tab_setinth(lua_State *L, GCtab *t, int32_t key) -{ - TValue k; - Node *n; - k.n = (lua_Number)key; - n = hashnum(t, &k); - do { - if (tvisnum(&n->key) && n->key.n == k.n) - return &n->val; - } while ((n = nextnode(n))); - return lj_tab_newkey(L, t, &k); -} - -TValue *lj_tab_setstr(lua_State *L, GCtab *t, GCstr *key) -{ - TValue k; - Node *n = hashstr(t, key); - do { - if (tvisstr(&n->key) && strV(&n->key) == key) - return &n->val; - } while ((n = nextnode(n))); - setstrV(L, &k, key); - return lj_tab_newkey(L, t, &k); -} - -TValue *lj_tab_set(lua_State *L, GCtab *t, cTValue *key) -{ - Node *n; - t->nomm = 0; /* Invalidate negative metamethod cache. */ - if (tvisstr(key)) { - return lj_tab_setstr(L, t, strV(key)); - } else if (tvisint(key)) { - return lj_tab_setint(L, t, intV(key)); - } else if (tvisnum(key)) { - lua_Number nk = numV(key); - int32_t k = lj_num2int(nk); - if (nk == (lua_Number)k) - return lj_tab_setint(L, t, k); - if (tvisnan(key)) - lj_err_msg(L, LJ_ERR_NANIDX); - /* Else use the generic lookup. */ - } else if (tvisnil(key)) { - lj_err_msg(L, LJ_ERR_NILIDX); - } - n = hashkey(t, key); - do { - if (lj_obj_equal(&n->key, key)) - return &n->val; - } while ((n = nextnode(n))); - return lj_tab_newkey(L, t, key); -} - -/* -- Table traversal ----------------------------------------------------- */ - -/* Get the traversal index of a key. */ -static uint32_t keyindex(lua_State *L, GCtab *t, cTValue *key) -{ - TValue tmp; - if (tvisint(key)) { - int32_t k = intV(key); - if ((uint32_t)k < t->asize) - return (uint32_t)k; /* Array key indexes: [0..t->asize-1] */ - setnumV(&tmp, (lua_Number)k); - key = &tmp; - } else if (tvisnum(key)) { - lua_Number nk = numV(key); - int32_t k = lj_num2int(nk); - if ((uint32_t)k < t->asize && nk == (lua_Number)k) - return (uint32_t)k; /* Array key indexes: [0..t->asize-1] */ - } - if (!tvisnil(key)) { - Node *n = hashkey(t, key); - do { - if (lj_obj_equal(&n->key, key)) - return t->asize + (uint32_t)(n - noderef(t->node)); - /* Hash key indexes: [t->asize..t->asize+t->nmask] */ - } while ((n = nextnode(n))); - if (key->u32.hi == 0xfffe7fff) /* ITERN was despecialized while running. */ - return key->u32.lo - 1; - lj_err_msg(L, LJ_ERR_NEXTIDX); - return 0; /* unreachable */ - } - return ~0u; /* A nil key starts the traversal. */ -} - -/* Advance to the next step in a table traversal. */ -int lj_tab_next(lua_State *L, GCtab *t, TValue *key) -{ - uint32_t i = keyindex(L, t, key); /* Find predecessor key index. */ - for (i++; i < t->asize; i++) /* First traverse the array keys. */ - if (!tvisnil(arrayslot(t, i))) { - setintV(key, i); - copyTV(L, key+1, arrayslot(t, i)); - return 1; - } - for (i -= t->asize; i <= t->hmask; i++) { /* Then traverse the hash keys. */ - Node *n = &noderef(t->node)[i]; - if (!tvisnil(&n->val)) { - copyTV(L, key, &n->key); - copyTV(L, key+1, &n->val); - return 1; - } - } - return 0; /* End of traversal. */ -} - -/* -- Table length calculation -------------------------------------------- */ - -static MSize unbound_search(GCtab *t, MSize j) -{ - cTValue *tv; - MSize i = j; /* i is zero or a present index */ - j++; - /* find `i' and `j' such that i is present and j is not */ - while ((tv = lj_tab_getint(t, (int32_t)j)) && !tvisnil(tv)) { - i = j; - j *= 2; - if (j > (MSize)(INT_MAX-2)) { /* overflow? */ - /* table was built with bad purposes: resort to linear search */ - i = 1; - while ((tv = lj_tab_getint(t, (int32_t)i)) && !tvisnil(tv)) i++; - return i - 1; - } - } - /* now do a binary search between them */ - while (j - i > 1) { - MSize m = (i+j)/2; - cTValue *tvb = lj_tab_getint(t, (int32_t)m); - if (tvb && !tvisnil(tvb)) i = m; else j = m; - } - return i; -} - -/* -** Try to find a boundary in table `t'. A `boundary' is an integer index -** such that t[i] is non-nil and t[i+1] is nil (and 0 if t[1] is nil). -*/ -MSize LJ_FASTCALL lj_tab_len(GCtab *t) -{ - MSize j = (MSize)t->asize; - if (j > 1 && tvisnil(arrayslot(t, j-1))) { - MSize i = 1; - while (j - i > 1) { - MSize m = (i+j)/2; - if (tvisnil(arrayslot(t, m-1))) j = m; else i = m; - } - return i-1; - } - if (j) j--; - if (t->hmask <= 0) - return j; - return unbound_search(t, j); -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_tab.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_tab.h deleted file mode 100644 index 7cf031be9af..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_tab.h +++ /dev/null @@ -1,73 +0,0 @@ -/* -** Table handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_TAB_H -#define _LJ_TAB_H - -#include "lj_obj.h" - -/* Hash constants. Tuned using a brute force search. */ -#define HASH_BIAS (-0x04c11db7) -#define HASH_ROT1 14 -#define HASH_ROT2 5 -#define HASH_ROT3 13 - -/* Scramble the bits of numbers and pointers. */ -static LJ_AINLINE uint32_t hashrot(uint32_t lo, uint32_t hi) -{ -#if LJ_TARGET_X86ORX64 - /* Prefer variant that compiles well for a 2-operand CPU. */ - lo ^= hi; hi = lj_rol(hi, HASH_ROT1); - lo -= hi; hi = lj_rol(hi, HASH_ROT2); - hi ^= lo; hi -= lj_rol(lo, HASH_ROT3); -#else - lo ^= hi; - lo = lo - lj_rol(hi, HASH_ROT1); - hi = lo ^ lj_rol(hi, HASH_ROT1 + HASH_ROT2); - hi = hi - lj_rol(lo, HASH_ROT3); -#endif - return hi; -} - -#define hsize2hbits(s) ((s) ? ((s)==1 ? 1 : 1+lj_fls((uint32_t)((s)-1))) : 0) - -LJ_FUNCA GCtab *lj_tab_new(lua_State *L, uint32_t asize, uint32_t hbits); -LJ_FUNC GCtab *lj_tab_new_ah(lua_State *L, int32_t a, int32_t h); -#if LJ_HASJIT -LJ_FUNC GCtab * LJ_FASTCALL lj_tab_new1(lua_State *L, uint32_t ahsize); -#endif -LJ_FUNCA GCtab * LJ_FASTCALL lj_tab_dup(lua_State *L, const GCtab *kt); -LJ_FUNC void LJ_FASTCALL lj_tab_clear(GCtab *t); -LJ_FUNC void LJ_FASTCALL lj_tab_free(global_State *g, GCtab *t); -#if LJ_HASFFI -LJ_FUNC void lj_tab_rehash(lua_State *L, GCtab *t); -#endif -LJ_FUNC void lj_tab_resize(lua_State *L, GCtab *t, uint32_t asize, uint32_t hbits); -LJ_FUNCA void lj_tab_reasize(lua_State *L, GCtab *t, uint32_t nasize); - -/* Caveat: all getters except lj_tab_get() can return NULL! */ - -LJ_FUNCA cTValue * LJ_FASTCALL lj_tab_getinth(GCtab *t, int32_t key); -LJ_FUNC cTValue *lj_tab_getstr(GCtab *t, GCstr *key); -LJ_FUNCA cTValue *lj_tab_get(lua_State *L, GCtab *t, cTValue *key); - -/* Caveat: all setters require a write barrier for the stored value. */ - -LJ_FUNCA TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key); -LJ_FUNCA TValue *lj_tab_setinth(lua_State *L, GCtab *t, int32_t key); -LJ_FUNC TValue *lj_tab_setstr(lua_State *L, GCtab *t, GCstr *key); -LJ_FUNC TValue *lj_tab_set(lua_State *L, GCtab *t, cTValue *key); - -#define inarray(t, key) ((MSize)(key) < (MSize)(t)->asize) -#define arrayslot(t, i) (&tvref((t)->array)[(i)]) -#define lj_tab_getint(t, key) \ - (inarray((t), (key)) ? arrayslot((t), (key)) : lj_tab_getinth((t), (key))) -#define lj_tab_setint(L, t, key) \ - (inarray((t), (key)) ? arrayslot((t), (key)) : lj_tab_setinth(L, (t), (key))) - -LJ_FUNCA int lj_tab_next(lua_State *L, GCtab *t, TValue *key); -LJ_FUNCA MSize LJ_FASTCALL lj_tab_len(GCtab *t); - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target.h deleted file mode 100644 index 0daecb11c86..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target.h +++ /dev/null @@ -1,164 +0,0 @@ -/* -** Definitions for target CPU. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_TARGET_H -#define _LJ_TARGET_H - -#include "lj_def.h" -#include "lj_arch.h" - -/* -- Registers and spill slots ------------------------------------------- */ - -/* Register type (uint8_t in ir->r). */ -typedef uint32_t Reg; - -/* The hi-bit is NOT set for an allocated register. This means the value -** can be directly used without masking. The hi-bit is set for a register -** allocation hint or for RID_INIT, RID_SINK or RID_SUNK. -*/ -#define RID_NONE 0x80 -#define RID_MASK 0x7f -#define RID_INIT (RID_NONE|RID_MASK) -#define RID_SINK (RID_INIT-1) -#define RID_SUNK (RID_INIT-2) - -#define ra_noreg(r) ((r) & RID_NONE) -#define ra_hasreg(r) (!((r) & RID_NONE)) - -/* The ra_hashint() macro assumes a previous test for ra_noreg(). */ -#define ra_hashint(r) ((r) < RID_SUNK) -#define ra_gethint(r) ((Reg)((r) & RID_MASK)) -#define ra_sethint(rr, r) rr = (uint8_t)((r)|RID_NONE) -#define ra_samehint(r1, r2) (ra_gethint((r1)^(r2)) == 0) - -/* Spill slot 0 means no spill slot has been allocated. */ -#define SPS_NONE 0 - -#define ra_hasspill(s) ((s) != SPS_NONE) - -/* Combined register and spill slot (uint16_t in ir->prev). */ -typedef uint32_t RegSP; - -#define REGSP(r, s) ((r) + ((s) << 8)) -#define REGSP_HINT(r) ((r)|RID_NONE) -#define REGSP_INIT REGSP(RID_INIT, 0) - -#define regsp_reg(rs) ((rs) & 255) -#define regsp_spill(rs) ((rs) >> 8) -#define regsp_used(rs) \ - (((rs) & ~REGSP(RID_MASK, 0)) != REGSP(RID_NONE, 0)) - -/* -- Register sets ------------------------------------------------------- */ - -/* Bitset for registers. 32 registers suffice for most architectures. -** Note that one set holds bits for both GPRs and FPRs. -*/ -#if LJ_TARGET_PPC || LJ_TARGET_MIPS -typedef uint64_t RegSet; -#else -typedef uint32_t RegSet; -#endif - -#define RID2RSET(r) (((RegSet)1) << (r)) -#define RSET_EMPTY ((RegSet)0) -#define RSET_RANGE(lo, hi) ((RID2RSET((hi)-(lo))-1) << (lo)) - -#define rset_test(rs, r) ((int)((rs) >> (r)) & 1) -#define rset_set(rs, r) (rs |= RID2RSET(r)) -#define rset_clear(rs, r) (rs &= ~RID2RSET(r)) -#define rset_exclude(rs, r) (rs & ~RID2RSET(r)) -#if LJ_TARGET_PPC || LJ_TARGET_MIPS -#define rset_picktop(rs) ((Reg)(__builtin_clzll(rs)^63)) -#define rset_pickbot(rs) ((Reg)__builtin_ctzll(rs)) -#else -#define rset_picktop(rs) ((Reg)lj_fls(rs)) -#define rset_pickbot(rs) ((Reg)lj_ffs(rs)) -#endif - -/* -- Register allocation cost -------------------------------------------- */ - -/* The register allocation heuristic keeps track of the cost for allocating -** a specific register: -** -** A free register (obviously) has a cost of 0 and a 1-bit in the free mask. -** -** An already allocated register has the (non-zero) IR reference in the lowest -** bits and the result of a blended cost-model in the higher bits. -** -** The allocator first checks the free mask for a hit. Otherwise an (unrolled) -** linear search for the minimum cost is used. The search doesn't need to -** keep track of the position of the minimum, which makes it very fast. -** The lowest bits of the minimum cost show the desired IR reference whose -** register is the one to evict. -** -** Without the cost-model this degenerates to the standard heuristics for -** (reverse) linear-scan register allocation. Since code generation is done -** in reverse, a live interval extends from the last use to the first def. -** For an SSA IR the IR reference is the first (and only) def and thus -** trivially marks the end of the interval. The LSRA heuristics says to pick -** the register whose live interval has the furthest extent, i.e. the lowest -** IR reference in our case. -** -** A cost-model should take into account other factors, like spill-cost and -** restore- or rematerialization-cost, which depend on the kind of instruction. -** E.g. constants have zero spill costs, variant instructions have higher -** costs than invariants and PHIs should preferably never be spilled. -** -** Here's a first cut at simple, but effective blended cost-model for R-LSRA: -** - Due to careful design of the IR, constants already have lower IR -** references than invariants and invariants have lower IR references -** than variants. -** - The cost in the upper 16 bits is the sum of the IR reference and a -** weighted score. The score currently only takes into account whether -** the IRT_ISPHI bit is set in the instruction type. -** - The PHI weight is the minimum distance (in IR instructions) a PHI -** reference has to be further apart from a non-PHI reference to be spilled. -** - It should be a power of two (for speed) and must be between 2 and 32768. -** Good values for the PHI weight seem to be between 40 and 150. -** - Further study is required. -*/ -#define REGCOST_PHI_WEIGHT 64 - -/* Cost for allocating a specific register. */ -typedef uint32_t RegCost; - -/* Note: assumes 16 bit IRRef1. */ -#define REGCOST(cost, ref) ((RegCost)(ref) + ((RegCost)(cost) << 16)) -#define regcost_ref(rc) ((IRRef1)(rc)) - -#define REGCOST_T(t) \ - ((RegCost)((t)&IRT_ISPHI) * (((RegCost)(REGCOST_PHI_WEIGHT)<<16)/IRT_ISPHI)) -#define REGCOST_REF_T(ref, t) (REGCOST((ref), (ref)) + REGCOST_T((t))) - -/* -- Target-specific definitions ----------------------------------------- */ - -#if LJ_TARGET_X86ORX64 -#include "lj_target_x86.h" -#elif LJ_TARGET_ARM -#include "lj_target_arm.h" -#elif LJ_TARGET_ARM64 -#include "lj_target_arm64.h" -#elif LJ_TARGET_PPC -#include "lj_target_ppc.h" -#elif LJ_TARGET_MIPS -#include "lj_target_mips.h" -#else -#error "Missing include for target CPU" -#endif - -#ifdef EXITSTUBS_PER_GROUP -/* Return the address of an exit stub. */ -static LJ_AINLINE char *exitstub_addr_(char **group, uint32_t exitno) -{ - lua_assert(group[exitno / EXITSTUBS_PER_GROUP] != NULL); - return (char *)group[exitno / EXITSTUBS_PER_GROUP] + - EXITSTUB_SPACING*(exitno % EXITSTUBS_PER_GROUP); -} -/* Avoid dependence on lj_jit.h if only including lj_target.h. */ -#define exitstub_addr(J, exitno) \ - ((MCode *)exitstub_addr_((char **)((J)->exitstubgroup), (exitno))) -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target_arm.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target_arm.h deleted file mode 100644 index 0a243b3740b..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target_arm.h +++ /dev/null @@ -1,270 +0,0 @@ -/* -** Definitions for ARM CPUs. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_TARGET_ARM_H -#define _LJ_TARGET_ARM_H - -/* -- Registers IDs ------------------------------------------------------- */ - -#define GPRDEF(_) \ - _(R0) _(R1) _(R2) _(R3) _(R4) _(R5) _(R6) _(R7) \ - _(R8) _(R9) _(R10) _(R11) _(R12) _(SP) _(LR) _(PC) -#if LJ_SOFTFP -#define FPRDEF(_) -#else -#define FPRDEF(_) \ - _(D0) _(D1) _(D2) _(D3) _(D4) _(D5) _(D6) _(D7) \ - _(D8) _(D9) _(D10) _(D11) _(D12) _(D13) _(D14) _(D15) -#endif -#define VRIDDEF(_) - -#define RIDENUM(name) RID_##name, - -enum { - GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */ - FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */ - RID_MAX, - RID_TMP = RID_LR, - - /* Calling conventions. */ - RID_RET = RID_R0, - RID_RETLO = RID_R0, - RID_RETHI = RID_R1, -#if LJ_SOFTFP - RID_FPRET = RID_R0, -#else - RID_FPRET = RID_D0, -#endif - - /* These definitions must match with the *.dasc file(s): */ - RID_BASE = RID_R9, /* Interpreter BASE. */ - RID_LPC = RID_R6, /* Interpreter PC. */ - RID_DISPATCH = RID_R7, /* Interpreter DISPATCH table. */ - RID_LREG = RID_R8, /* Interpreter L. */ - - /* Register ranges [min, max) and number of registers. */ - RID_MIN_GPR = RID_R0, - RID_MAX_GPR = RID_PC+1, - RID_MIN_FPR = RID_MAX_GPR, -#if LJ_SOFTFP - RID_MAX_FPR = RID_MIN_FPR, -#else - RID_MAX_FPR = RID_D15+1, -#endif - RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR, - RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR -}; - -#define RID_NUM_KREF RID_NUM_GPR -#define RID_MIN_KREF RID_R0 - -/* -- Register sets ------------------------------------------------------- */ - -/* Make use of all registers, except sp, lr and pc. */ -#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_R12+1)) -#define RSET_GPREVEN \ - (RID2RSET(RID_R0)|RID2RSET(RID_R2)|RID2RSET(RID_R4)|RID2RSET(RID_R6)| \ - RID2RSET(RID_R8)|RID2RSET(RID_R10)) -#define RSET_GPRODD \ - (RID2RSET(RID_R1)|RID2RSET(RID_R3)|RID2RSET(RID_R5)|RID2RSET(RID_R7)| \ - RID2RSET(RID_R9)|RID2RSET(RID_R11)) -#if LJ_SOFTFP -#define RSET_FPR 0 -#else -#define RSET_FPR (RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR)) -#endif -#define RSET_ALL (RSET_GPR|RSET_FPR) -#define RSET_INIT RSET_ALL - -/* ABI-specific register sets. lr is an implicit scratch register. */ -#define RSET_SCRATCH_GPR_ (RSET_RANGE(RID_R0, RID_R3+1)|RID2RSET(RID_R12)) -#ifdef __APPLE__ -#define RSET_SCRATCH_GPR (RSET_SCRATCH_GPR_|RID2RSET(RID_R9)) -#else -#define RSET_SCRATCH_GPR RSET_SCRATCH_GPR_ -#endif -#if LJ_SOFTFP -#define RSET_SCRATCH_FPR 0 -#else -#define RSET_SCRATCH_FPR (RSET_RANGE(RID_D0, RID_D7+1)) -#endif -#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR) -#define REGARG_FIRSTGPR RID_R0 -#define REGARG_LASTGPR RID_R3 -#define REGARG_NUMGPR 4 -#if LJ_ABI_SOFTFP -#define REGARG_FIRSTFPR 0 -#define REGARG_LASTFPR 0 -#define REGARG_NUMFPR 0 -#else -#define REGARG_FIRSTFPR RID_D0 -#define REGARG_LASTFPR RID_D7 -#define REGARG_NUMFPR 8 -#endif - -/* -- Spill slots --------------------------------------------------------- */ - -/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs. -** -** SPS_FIXED: Available fixed spill slots in interpreter frame. -** This definition must match with the *.dasc file(s). -** -** SPS_FIRST: First spill slot for general use. Reserve min. two 32 bit slots. -*/ -#define SPS_FIXED 2 -#define SPS_FIRST 2 - -#define SPOFS_TMP 0 - -#define sps_scale(slot) (4 * (int32_t)(slot)) -#define sps_align(slot) (((slot) - SPS_FIXED + 1) & ~1) - -/* -- Exit state ---------------------------------------------------------- */ - -/* This definition must match with the *.dasc file(s). */ -typedef struct { -#if !LJ_SOFTFP - lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */ -#endif - int32_t gpr[RID_NUM_GPR]; /* General-purpose registers. */ - int32_t spill[256]; /* Spill slots. */ -} ExitState; - -/* PC after instruction that caused an exit. Used to find the trace number. */ -#define EXITSTATE_PCREG RID_PC -/* Highest exit + 1 indicates stack check. */ -#define EXITSTATE_CHECKEXIT 1 - -#define EXITSTUB_SPACING 4 -#define EXITSTUBS_PER_GROUP 32 - -/* -- Instructions -------------------------------------------------------- */ - -/* Instruction fields. */ -#define ARMF_CC(ai, cc) (((ai) ^ ARMI_CCAL) | ((cc) << 28)) -#define ARMF_N(r) ((r) << 16) -#define ARMF_D(r) ((r) << 12) -#define ARMF_S(r) ((r) << 8) -#define ARMF_M(r) (r) -#define ARMF_SH(sh, n) (((sh) << 5) | ((n) << 7)) -#define ARMF_RSH(sh, r) (0x10 | ((sh) << 5) | ARMF_S(r)) - -typedef enum ARMIns { - ARMI_CCAL = 0xe0000000, - ARMI_S = 0x000100000, - ARMI_K12 = 0x02000000, - ARMI_KNEG = 0x00200000, - ARMI_LS_W = 0x00200000, - ARMI_LS_U = 0x00800000, - ARMI_LS_P = 0x01000000, - ARMI_LS_R = 0x02000000, - ARMI_LSX_I = 0x00400000, - - ARMI_AND = 0xe0000000, - ARMI_EOR = 0xe0200000, - ARMI_SUB = 0xe0400000, - ARMI_RSB = 0xe0600000, - ARMI_ADD = 0xe0800000, - ARMI_ADC = 0xe0a00000, - ARMI_SBC = 0xe0c00000, - ARMI_RSC = 0xe0e00000, - ARMI_TST = 0xe1100000, - ARMI_TEQ = 0xe1300000, - ARMI_CMP = 0xe1500000, - ARMI_CMN = 0xe1700000, - ARMI_ORR = 0xe1800000, - ARMI_MOV = 0xe1a00000, - ARMI_BIC = 0xe1c00000, - ARMI_MVN = 0xe1e00000, - - ARMI_NOP = 0xe1a00000, - - ARMI_MUL = 0xe0000090, - ARMI_SMULL = 0xe0c00090, - - ARMI_LDR = 0xe4100000, - ARMI_LDRB = 0xe4500000, - ARMI_LDRH = 0xe01000b0, - ARMI_LDRSB = 0xe01000d0, - ARMI_LDRSH = 0xe01000f0, - ARMI_LDRD = 0xe00000d0, - ARMI_STR = 0xe4000000, - ARMI_STRB = 0xe4400000, - ARMI_STRH = 0xe00000b0, - ARMI_STRD = 0xe00000f0, - ARMI_PUSH = 0xe92d0000, - - ARMI_B = 0xea000000, - ARMI_BL = 0xeb000000, - ARMI_BLX = 0xfa000000, - ARMI_BLXr = 0xe12fff30, - - /* ARMv6 */ - ARMI_REV = 0xe6bf0f30, - ARMI_SXTB = 0xe6af0070, - ARMI_SXTH = 0xe6bf0070, - ARMI_UXTB = 0xe6ef0070, - ARMI_UXTH = 0xe6ff0070, - - /* ARMv6T2 */ - ARMI_MOVW = 0xe3000000, - ARMI_MOVT = 0xe3400000, - - /* VFP */ - ARMI_VMOV_D = 0xeeb00b40, - ARMI_VMOV_S = 0xeeb00a40, - ARMI_VMOVI_D = 0xeeb00b00, - - ARMI_VMOV_R_S = 0xee100a10, - ARMI_VMOV_S_R = 0xee000a10, - ARMI_VMOV_RR_D = 0xec500b10, - ARMI_VMOV_D_RR = 0xec400b10, - - ARMI_VADD_D = 0xee300b00, - ARMI_VSUB_D = 0xee300b40, - ARMI_VMUL_D = 0xee200b00, - ARMI_VMLA_D = 0xee000b00, - ARMI_VMLS_D = 0xee000b40, - ARMI_VNMLS_D = 0xee100b00, - ARMI_VDIV_D = 0xee800b00, - - ARMI_VABS_D = 0xeeb00bc0, - ARMI_VNEG_D = 0xeeb10b40, - ARMI_VSQRT_D = 0xeeb10bc0, - - ARMI_VCMP_D = 0xeeb40b40, - ARMI_VCMPZ_D = 0xeeb50b40, - - ARMI_VMRS = 0xeef1fa10, - - ARMI_VCVT_S32_F32 = 0xeebd0ac0, - ARMI_VCVT_S32_F64 = 0xeebd0bc0, - ARMI_VCVT_U32_F32 = 0xeebc0ac0, - ARMI_VCVT_U32_F64 = 0xeebc0bc0, - ARMI_VCVT_F32_S32 = 0xeeb80ac0, - ARMI_VCVT_F64_S32 = 0xeeb80bc0, - ARMI_VCVT_F32_U32 = 0xeeb80a40, - ARMI_VCVT_F64_U32 = 0xeeb80b40, - ARMI_VCVT_F32_F64 = 0xeeb70bc0, - ARMI_VCVT_F64_F32 = 0xeeb70ac0, - - ARMI_VLDR_S = 0xed100a00, - ARMI_VLDR_D = 0xed100b00, - ARMI_VSTR_S = 0xed000a00, - ARMI_VSTR_D = 0xed000b00, -} ARMIns; - -typedef enum ARMShift { - ARMSH_LSL, ARMSH_LSR, ARMSH_ASR, ARMSH_ROR -} ARMShift; - -/* ARM condition codes. */ -typedef enum ARMCC { - CC_EQ, CC_NE, CC_CS, CC_CC, CC_MI, CC_PL, CC_VS, CC_VC, - CC_HI, CC_LS, CC_GE, CC_LT, CC_GT, CC_LE, CC_AL, - CC_HS = CC_CS, CC_LO = CC_CC -} ARMCC; - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target_arm64.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target_arm64.h deleted file mode 100644 index 99e0adc9d66..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target_arm64.h +++ /dev/null @@ -1,97 +0,0 @@ -/* -** Definitions for ARM64 CPUs. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_TARGET_ARM64_H -#define _LJ_TARGET_ARM64_H - -/* -- Registers IDs ------------------------------------------------------- */ - -#define GPRDEF(_) \ - _(X0) _(X1) _(X2) _(X3) _(X4) _(X5) _(X6) _(X7) \ - _(X8) _(X9) _(X10) _(X11) _(X12) _(X13) _(X14) _(X15) \ - _(X16) _(X17) _(X18) _(X19) _(X20) _(X21) _(X22) _(X23) \ - _(X24) _(X25) _(X26) _(X27) _(X28) _(FP) _(LR) _(SP) -#define FPRDEF(_) \ - _(D0) _(D1) _(D2) _(D3) _(D4) _(D5) _(D6) _(D7) \ - _(D8) _(D9) _(D10) _(D11) _(D12) _(D13) _(D14) _(D15) \ - _(D16) _(D17) _(D18) _(D19) _(D20) _(D21) _(D22) _(D23) \ - _(D24) _(D25) _(D26) _(D27) _(D28) _(D29) _(D30) _(D31) -#define VRIDDEF(_) - -#define RIDENUM(name) RID_##name, - -enum { - GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */ - FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */ - RID_MAX, - RID_TMP = RID_LR, - RID_ZERO = RID_SP, - - /* Calling conventions. */ - RID_RET = RID_X0, - RID_FPRET = RID_D0, - - /* These definitions must match with the *.dasc file(s): */ - RID_BASE = RID_X19, /* Interpreter BASE. */ - RID_LPC = RID_X21, /* Interpreter PC. */ - RID_GL = RID_X22, /* Interpreter GL. */ - RID_LREG = RID_X23, /* Interpreter L. */ - - /* Register ranges [min, max) and number of registers. */ - RID_MIN_GPR = RID_X0, - RID_MAX_GPR = RID_SP+1, - RID_MIN_FPR = RID_MAX_GPR, - RID_MAX_FPR = RID_D31+1, - RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR, - RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR -}; - -#define RID_NUM_KREF RID_NUM_GPR -#define RID_MIN_KREF RID_X0 - -/* -- Register sets ------------------------------------------------------- */ - -/* Make use of all registers, except for x18, fp, lr and sp. */ -#define RSET_FIXED \ - (RID2RSET(RID_X18)|RID2RSET(RID_FP)|RID2RSET(RID_LR)|RID2RSET(RID_SP)) -#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR) - RSET_FIXED) -#define RSET_FPR RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR) -#define RSET_ALL (RSET_GPR|RSET_FPR) -#define RSET_INIT RSET_ALL - -/* lr is an implicit scratch register. */ -#define RSET_SCRATCH_GPR (RSET_RANGE(RID_X0, RID_X17+1)) -#define RSET_SCRATCH_FPR \ - (RSET_RANGE(RID_D0, RID_D7+1)|RSET_RANGE(RID_D16, RID_D31+1)) -#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR) -#define REGARG_FIRSTGPR RID_X0 -#define REGARG_LASTGPR RID_X7 -#define REGARG_NUMGPR 8 -#define REGARG_FIRSTFPR RID_D0 -#define REGARG_LASTFPR RID_D7 -#define REGARG_NUMFPR 8 - -/* -- Instructions -------------------------------------------------------- */ - -/* Instruction fields. */ -#define A64F_D(r) (r) -#define A64F_N(r) ((r) << 5) -#define A64F_A(r) ((r) << 10) -#define A64F_M(r) ((r) << 16) -#define A64F_U16(x) ((x) << 5) -#define A64F_S26(x) (x) -#define A64F_S19(x) ((x) << 5) - -typedef enum A64Ins { - A64I_MOVZw = 0x52800000, - A64I_MOVZx = 0xd2800000, - A64I_LDRLw = 0x18000000, - A64I_LDRLx = 0x58000000, - A64I_NOP = 0xd503201f, - A64I_B = 0x14000000, - A64I_BR = 0xd61f0000, -} A64Ins; - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target_mips.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target_mips.h deleted file mode 100644 index 76645bcac5e..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target_mips.h +++ /dev/null @@ -1,260 +0,0 @@ -/* -** Definitions for MIPS CPUs. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_TARGET_MIPS_H -#define _LJ_TARGET_MIPS_H - -/* -- Registers IDs ------------------------------------------------------- */ - -#define GPRDEF(_) \ - _(R0) _(R1) _(R2) _(R3) _(R4) _(R5) _(R6) _(R7) \ - _(R8) _(R9) _(R10) _(R11) _(R12) _(R13) _(R14) _(R15) \ - _(R16) _(R17) _(R18) _(R19) _(R20) _(R21) _(R22) _(R23) \ - _(R24) _(R25) _(SYS1) _(SYS2) _(R28) _(SP) _(R30) _(RA) -#define FPRDEF(_) \ - _(F0) _(F1) _(F2) _(F3) _(F4) _(F5) _(F6) _(F7) \ - _(F8) _(F9) _(F10) _(F11) _(F12) _(F13) _(F14) _(F15) \ - _(F16) _(F17) _(F18) _(F19) _(F20) _(F21) _(F22) _(F23) \ - _(F24) _(F25) _(F26) _(F27) _(F28) _(F29) _(F30) _(F31) -#define VRIDDEF(_) - -#define RIDENUM(name) RID_##name, - -enum { - GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */ - FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */ - RID_MAX, - RID_ZERO = RID_R0, - RID_TMP = RID_RA, - - /* Calling conventions. */ - RID_RET = RID_R2, -#if LJ_LE - RID_RETHI = RID_R3, - RID_RETLO = RID_R2, -#else - RID_RETHI = RID_R2, - RID_RETLO = RID_R3, -#endif - RID_FPRET = RID_F0, - RID_CFUNCADDR = RID_R25, - - /* These definitions must match with the *.dasc file(s): */ - RID_BASE = RID_R16, /* Interpreter BASE. */ - RID_LPC = RID_R18, /* Interpreter PC. */ - RID_DISPATCH = RID_R19, /* Interpreter DISPATCH table. */ - RID_LREG = RID_R20, /* Interpreter L. */ - RID_JGL = RID_R30, /* On-trace: global_State + 32768. */ - - /* Register ranges [min, max) and number of registers. */ - RID_MIN_GPR = RID_R0, - RID_MAX_GPR = RID_RA+1, - RID_MIN_FPR = RID_F0, - RID_MAX_FPR = RID_F31+1, - RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR, - RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR /* Only even regs are used. */ -}; - -#define RID_NUM_KREF RID_NUM_GPR -#define RID_MIN_KREF RID_R0 - -/* -- Register sets ------------------------------------------------------- */ - -/* Make use of all registers, except ZERO, TMP, SP, SYS1, SYS2 and JGL. */ -#define RSET_FIXED \ - (RID2RSET(RID_ZERO)|RID2RSET(RID_TMP)|RID2RSET(RID_SP)|\ - RID2RSET(RID_SYS1)|RID2RSET(RID_SYS2)|RID2RSET(RID_JGL)) -#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR) - RSET_FIXED) -#define RSET_FPR \ - (RID2RSET(RID_F0)|RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(RID_F6)|\ - RID2RSET(RID_F8)|RID2RSET(RID_F10)|RID2RSET(RID_F12)|RID2RSET(RID_F14)|\ - RID2RSET(RID_F16)|RID2RSET(RID_F18)|RID2RSET(RID_F20)|RID2RSET(RID_F22)|\ - RID2RSET(RID_F24)|RID2RSET(RID_F26)|RID2RSET(RID_F28)|RID2RSET(RID_F30)) -#define RSET_ALL (RSET_GPR|RSET_FPR) -#define RSET_INIT RSET_ALL - -#define RSET_SCRATCH_GPR \ - (RSET_RANGE(RID_R1, RID_R15+1)|\ - RID2RSET(RID_R24)|RID2RSET(RID_R25)|RID2RSET(RID_R28)) -#define RSET_SCRATCH_FPR \ - (RID2RSET(RID_F0)|RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(RID_F6)|\ - RID2RSET(RID_F8)|RID2RSET(RID_F10)|RID2RSET(RID_F12)|RID2RSET(RID_F14)|\ - RID2RSET(RID_F16)|RID2RSET(RID_F18)) -#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR) -#define REGARG_FIRSTGPR RID_R4 -#define REGARG_LASTGPR RID_R7 -#define REGARG_NUMGPR 4 -#define REGARG_FIRSTFPR RID_F12 -#define REGARG_LASTFPR RID_F14 -#define REGARG_NUMFPR 2 - -/* -- Spill slots --------------------------------------------------------- */ - -/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs. -** -** SPS_FIXED: Available fixed spill slots in interpreter frame. -** This definition must match with the *.dasc file(s). -** -** SPS_FIRST: First spill slot for general use. -*/ -#define SPS_FIXED 5 -#define SPS_FIRST 4 - -#define SPOFS_TMP 0 - -#define sps_scale(slot) (4 * (int32_t)(slot)) -#define sps_align(slot) (((slot) - SPS_FIXED + 1) & ~1) - -/* -- Exit state ---------------------------------------------------------- */ - -/* This definition must match with the *.dasc file(s). */ -typedef struct { - lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */ - int32_t gpr[RID_NUM_GPR]; /* General-purpose registers. */ - int32_t spill[256]; /* Spill slots. */ -} ExitState; - -/* Highest exit + 1 indicates stack check. */ -#define EXITSTATE_CHECKEXIT 1 - -/* Return the address of a per-trace exit stub. */ -static LJ_AINLINE uint32_t *exitstub_trace_addr_(uint32_t *p) -{ - while (*p == 0x00000000) p++; /* Skip MIPSI_NOP. */ - return p; -} -/* Avoid dependence on lj_jit.h if only including lj_target.h. */ -#define exitstub_trace_addr(T, exitno) \ - exitstub_trace_addr_((MCode *)((char *)(T)->mcode + (T)->szmcode)) - -/* -- Instructions -------------------------------------------------------- */ - -/* Instruction fields. */ -#define MIPSF_S(r) ((r) << 21) -#define MIPSF_T(r) ((r) << 16) -#define MIPSF_D(r) ((r) << 11) -#define MIPSF_R(r) ((r) << 21) -#define MIPSF_H(r) ((r) << 16) -#define MIPSF_G(r) ((r) << 11) -#define MIPSF_F(r) ((r) << 6) -#define MIPSF_A(n) ((n) << 6) -#define MIPSF_M(n) ((n) << 11) - -typedef enum MIPSIns { - /* Integer instructions. */ - MIPSI_MOVE = 0x00000021, - MIPSI_NOP = 0x00000000, - - MIPSI_LI = 0x24000000, - MIPSI_LU = 0x34000000, - MIPSI_LUI = 0x3c000000, - - MIPSI_ADDIU = 0x24000000, - MIPSI_ANDI = 0x30000000, - MIPSI_ORI = 0x34000000, - MIPSI_XORI = 0x38000000, - MIPSI_SLTI = 0x28000000, - MIPSI_SLTIU = 0x2c000000, - - MIPSI_ADDU = 0x00000021, - MIPSI_SUBU = 0x00000023, - MIPSI_MUL = 0x70000002, - MIPSI_AND = 0x00000024, - MIPSI_OR = 0x00000025, - MIPSI_XOR = 0x00000026, - MIPSI_NOR = 0x00000027, - MIPSI_SLT = 0x0000002a, - MIPSI_SLTU = 0x0000002b, - MIPSI_MOVZ = 0x0000000a, - MIPSI_MOVN = 0x0000000b, - MIPSI_MFHI = 0x00000010, - MIPSI_MFLO = 0x00000012, - MIPSI_MULT = 0x00000018, - - MIPSI_SLL = 0x00000000, - MIPSI_SRL = 0x00000002, - MIPSI_SRA = 0x00000003, - MIPSI_ROTR = 0x00200002, /* MIPS32R2 */ - MIPSI_SLLV = 0x00000004, - MIPSI_SRLV = 0x00000006, - MIPSI_SRAV = 0x00000007, - MIPSI_ROTRV = 0x00000046, /* MIPS32R2 */ - - MIPSI_SEB = 0x7c000420, /* MIPS32R2 */ - MIPSI_SEH = 0x7c000620, /* MIPS32R2 */ - MIPSI_WSBH = 0x7c0000a0, /* MIPS32R2 */ - - MIPSI_B = 0x10000000, - MIPSI_J = 0x08000000, - MIPSI_JAL = 0x0c000000, - MIPSI_JR = 0x00000008, - MIPSI_JALR = 0x0000f809, - - MIPSI_BEQ = 0x10000000, - MIPSI_BNE = 0x14000000, - MIPSI_BLEZ = 0x18000000, - MIPSI_BGTZ = 0x1c000000, - MIPSI_BLTZ = 0x04000000, - MIPSI_BGEZ = 0x04010000, - - /* Load/store instructions. */ - MIPSI_LW = 0x8c000000, - MIPSI_SW = 0xac000000, - MIPSI_LB = 0x80000000, - MIPSI_SB = 0xa0000000, - MIPSI_LH = 0x84000000, - MIPSI_SH = 0xa4000000, - MIPSI_LBU = 0x90000000, - MIPSI_LHU = 0x94000000, - MIPSI_LWC1 = 0xc4000000, - MIPSI_SWC1 = 0xe4000000, - MIPSI_LDC1 = 0xd4000000, - MIPSI_SDC1 = 0xf4000000, - - /* FP instructions. */ - MIPSI_MOV_S = 0x46000006, - MIPSI_MOV_D = 0x46200006, - MIPSI_MOVT_D = 0x46210011, - MIPSI_MOVF_D = 0x46200011, - - MIPSI_ABS_D = 0x46200005, - MIPSI_NEG_D = 0x46200007, - - MIPSI_ADD_D = 0x46200000, - MIPSI_SUB_D = 0x46200001, - MIPSI_MUL_D = 0x46200002, - MIPSI_DIV_D = 0x46200003, - MIPSI_SQRT_D = 0x46200004, - - MIPSI_ADD_S = 0x46000000, - MIPSI_SUB_S = 0x46000001, - - MIPSI_CVT_D_S = 0x46000021, - MIPSI_CVT_W_S = 0x46000024, - MIPSI_CVT_S_D = 0x46200020, - MIPSI_CVT_W_D = 0x46200024, - MIPSI_CVT_S_W = 0x46800020, - MIPSI_CVT_D_W = 0x46800021, - - MIPSI_TRUNC_W_S = 0x4600000d, - MIPSI_TRUNC_W_D = 0x4620000d, - MIPSI_FLOOR_W_S = 0x4600000f, - MIPSI_FLOOR_W_D = 0x4620000f, - - MIPSI_MFC1 = 0x44000000, - MIPSI_MTC1 = 0x44800000, - - MIPSI_BC1F = 0x45000000, - MIPSI_BC1T = 0x45010000, - - MIPSI_C_EQ_D = 0x46200032, - MIPSI_C_OLT_D = 0x46200034, - MIPSI_C_ULT_D = 0x46200035, - MIPSI_C_OLE_D = 0x46200036, - MIPSI_C_ULE_D = 0x46200037, - -} MIPSIns; - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target_ppc.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target_ppc.h deleted file mode 100644 index 99867688b7d..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target_ppc.h +++ /dev/null @@ -1,280 +0,0 @@ -/* -** Definitions for PPC CPUs. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_TARGET_PPC_H -#define _LJ_TARGET_PPC_H - -/* -- Registers IDs ------------------------------------------------------- */ - -#define GPRDEF(_) \ - _(R0) _(SP) _(SYS1) _(R3) _(R4) _(R5) _(R6) _(R7) \ - _(R8) _(R9) _(R10) _(R11) _(R12) _(SYS2) _(R14) _(R15) \ - _(R16) _(R17) _(R18) _(R19) _(R20) _(R21) _(R22) _(R23) \ - _(R24) _(R25) _(R26) _(R27) _(R28) _(R29) _(R30) _(R31) -#define FPRDEF(_) \ - _(F0) _(F1) _(F2) _(F3) _(F4) _(F5) _(F6) _(F7) \ - _(F8) _(F9) _(F10) _(F11) _(F12) _(F13) _(F14) _(F15) \ - _(F16) _(F17) _(F18) _(F19) _(F20) _(F21) _(F22) _(F23) \ - _(F24) _(F25) _(F26) _(F27) _(F28) _(F29) _(F30) _(F31) -#define VRIDDEF(_) - -#define RIDENUM(name) RID_##name, - -enum { - GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */ - FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */ - RID_MAX, - RID_TMP = RID_R0, - - /* Calling conventions. */ - RID_RET = RID_R3, - RID_RETHI = RID_R3, - RID_RETLO = RID_R4, - RID_FPRET = RID_F1, - - /* These definitions must match with the *.dasc file(s): */ - RID_BASE = RID_R14, /* Interpreter BASE. */ - RID_LPC = RID_R16, /* Interpreter PC. */ - RID_DISPATCH = RID_R17, /* Interpreter DISPATCH table. */ - RID_LREG = RID_R18, /* Interpreter L. */ - RID_JGL = RID_R31, /* On-trace: global_State + 32768. */ - - /* Register ranges [min, max) and number of registers. */ - RID_MIN_GPR = RID_R0, - RID_MAX_GPR = RID_R31+1, - RID_MIN_FPR = RID_F0, - RID_MAX_FPR = RID_F31+1, - RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR, - RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR -}; - -#define RID_NUM_KREF RID_NUM_GPR -#define RID_MIN_KREF RID_R0 - -/* -- Register sets ------------------------------------------------------- */ - -/* Make use of all registers, except TMP, SP, SYS1, SYS2 and JGL. */ -#define RSET_FIXED \ - (RID2RSET(RID_TMP)|RID2RSET(RID_SP)|RID2RSET(RID_SYS1)|\ - RID2RSET(RID_SYS2)|RID2RSET(RID_JGL)) -#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR) - RSET_FIXED) -#define RSET_FPR RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR) -#define RSET_ALL (RSET_GPR|RSET_FPR) -#define RSET_INIT RSET_ALL - -#define RSET_SCRATCH_GPR (RSET_RANGE(RID_R3, RID_R12+1)) -#define RSET_SCRATCH_FPR (RSET_RANGE(RID_F0, RID_F13+1)) -#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR) -#define REGARG_FIRSTGPR RID_R3 -#define REGARG_LASTGPR RID_R10 -#define REGARG_NUMGPR 8 -#define REGARG_FIRSTFPR RID_F1 -#define REGARG_LASTFPR RID_F8 -#define REGARG_NUMFPR 8 - -/* -- Spill slots --------------------------------------------------------- */ - -/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs. -** -** SPS_FIXED: Available fixed spill slots in interpreter frame. -** This definition must match with the *.dasc file(s). -** -** SPS_FIRST: First spill slot for general use. -** [sp+12] tmplo word \ -** [sp+ 8] tmphi word / tmp dword, parameter area for callee -** [sp+ 4] tmpw, LR of callee -** [sp+ 0] stack chain -*/ -#define SPS_FIXED 7 -#define SPS_FIRST 4 - -/* Stack offsets for temporary slots. Used for FP<->int conversions etc. */ -#define SPOFS_TMPW 4 -#define SPOFS_TMP 8 -#define SPOFS_TMPHI 8 -#define SPOFS_TMPLO 12 - -#define sps_scale(slot) (4 * (int32_t)(slot)) -#define sps_align(slot) (((slot) - SPS_FIXED + 3) & ~3) - -/* -- Exit state ---------------------------------------------------------- */ - -/* This definition must match with the *.dasc file(s). */ -typedef struct { - lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */ - intptr_t gpr[RID_NUM_GPR]; /* General-purpose registers. */ - int32_t spill[256]; /* Spill slots. */ -} ExitState; - -/* Highest exit + 1 indicates stack check. */ -#define EXITSTATE_CHECKEXIT 1 - -/* Return the address of a per-trace exit stub. */ -static LJ_AINLINE uint32_t *exitstub_trace_addr_(uint32_t *p, uint32_t exitno) -{ - while (*p == 0x60000000) p++; /* Skip PPCI_NOP. */ - return p + 3 + exitno; -} -/* Avoid dependence on lj_jit.h if only including lj_target.h. */ -#define exitstub_trace_addr(T, exitno) \ - exitstub_trace_addr_((MCode *)((char *)(T)->mcode + (T)->szmcode), (exitno)) - -/* -- Instructions -------------------------------------------------------- */ - -/* Instruction fields. */ -#define PPCF_CC(cc) ((((cc) & 3) << 16) | (((cc) & 4) << 22)) -#define PPCF_T(r) ((r) << 21) -#define PPCF_A(r) ((r) << 16) -#define PPCF_B(r) ((r) << 11) -#define PPCF_C(r) ((r) << 6) -#define PPCF_MB(n) ((n) << 6) -#define PPCF_ME(n) ((n) << 1) -#define PPCF_Y 0x00200000 -#define PPCF_DOT 0x00000001 - -typedef enum PPCIns { - /* Integer instructions. */ - PPCI_MR = 0x7c000378, - PPCI_NOP = 0x60000000, - - PPCI_LI = 0x38000000, - PPCI_LIS = 0x3c000000, - - PPCI_ADD = 0x7c000214, - PPCI_ADDC = 0x7c000014, - PPCI_ADDO = 0x7c000614, - PPCI_ADDE = 0x7c000114, - PPCI_ADDZE = 0x7c000194, - PPCI_ADDME = 0x7c0001d4, - PPCI_ADDI = 0x38000000, - PPCI_ADDIS = 0x3c000000, - PPCI_ADDIC = 0x30000000, - PPCI_ADDICDOT = 0x34000000, - - PPCI_SUBF = 0x7c000050, - PPCI_SUBFC = 0x7c000010, - PPCI_SUBFO = 0x7c000450, - PPCI_SUBFE = 0x7c000110, - PPCI_SUBFZE = 0x7c000190, - PPCI_SUBFME = 0x7c0001d0, - PPCI_SUBFIC = 0x20000000, - - PPCI_NEG = 0x7c0000d0, - - PPCI_AND = 0x7c000038, - PPCI_ANDC = 0x7c000078, - PPCI_NAND = 0x7c0003b8, - PPCI_ANDIDOT = 0x70000000, - PPCI_ANDISDOT = 0x74000000, - - PPCI_OR = 0x7c000378, - PPCI_NOR = 0x7c0000f8, - PPCI_ORI = 0x60000000, - PPCI_ORIS = 0x64000000, - - PPCI_XOR = 0x7c000278, - PPCI_EQV = 0x7c000238, - PPCI_XORI = 0x68000000, - PPCI_XORIS = 0x6c000000, - - PPCI_CMPW = 0x7c000000, - PPCI_CMPLW = 0x7c000040, - PPCI_CMPWI = 0x2c000000, - PPCI_CMPLWI = 0x28000000, - - PPCI_MULLW = 0x7c0001d6, - PPCI_MULLI = 0x1c000000, - PPCI_MULLWO = 0x7c0005d6, - - PPCI_EXTSB = 0x7c000774, - PPCI_EXTSH = 0x7c000734, - - PPCI_SLW = 0x7c000030, - PPCI_SRW = 0x7c000430, - PPCI_SRAW = 0x7c000630, - PPCI_SRAWI = 0x7c000670, - - PPCI_RLWNM = 0x5c000000, - PPCI_RLWINM = 0x54000000, - PPCI_RLWIMI = 0x50000000, - - PPCI_B = 0x48000000, - PPCI_BL = 0x48000001, - PPCI_BC = 0x40800000, - PPCI_BCL = 0x40800001, - PPCI_BCTR = 0x4e800420, - PPCI_BCTRL = 0x4e800421, - - PPCI_CRANDC = 0x4c000102, - PPCI_CRXOR = 0x4c000182, - PPCI_CRAND = 0x4c000202, - PPCI_CREQV = 0x4c000242, - PPCI_CRORC = 0x4c000342, - PPCI_CROR = 0x4c000382, - - PPCI_MFLR = 0x7c0802a6, - PPCI_MTCTR = 0x7c0903a6, - - PPCI_MCRXR = 0x7c000400, - - /* Load/store instructions. */ - PPCI_LWZ = 0x80000000, - PPCI_LBZ = 0x88000000, - PPCI_STW = 0x90000000, - PPCI_STB = 0x98000000, - PPCI_LHZ = 0xa0000000, - PPCI_LHA = 0xa8000000, - PPCI_STH = 0xb0000000, - - PPCI_STWU = 0x94000000, - - PPCI_LFS = 0xc0000000, - PPCI_LFD = 0xc8000000, - PPCI_STFS = 0xd0000000, - PPCI_STFD = 0xd8000000, - - PPCI_LWZX = 0x7c00002e, - PPCI_LBZX = 0x7c0000ae, - PPCI_STWX = 0x7c00012e, - PPCI_STBX = 0x7c0001ae, - PPCI_LHZX = 0x7c00022e, - PPCI_LHAX = 0x7c0002ae, - PPCI_STHX = 0x7c00032e, - - PPCI_LWBRX = 0x7c00042c, - PPCI_STWBRX = 0x7c00052c, - - PPCI_LFSX = 0x7c00042e, - PPCI_LFDX = 0x7c0004ae, - PPCI_STFSX = 0x7c00052e, - PPCI_STFDX = 0x7c0005ae, - - /* FP instructions. */ - PPCI_FMR = 0xfc000090, - PPCI_FNEG = 0xfc000050, - PPCI_FABS = 0xfc000210, - - PPCI_FRSP = 0xfc000018, - PPCI_FCTIWZ = 0xfc00001e, - - PPCI_FADD = 0xfc00002a, - PPCI_FSUB = 0xfc000028, - PPCI_FMUL = 0xfc000032, - PPCI_FDIV = 0xfc000024, - PPCI_FSQRT = 0xfc00002c, - - PPCI_FMADD = 0xfc00003a, - PPCI_FMSUB = 0xfc000038, - PPCI_FNMSUB = 0xfc00003c, - - PPCI_FCMPU = 0xfc000000, - PPCI_FSEL = 0xfc00002e, -} PPCIns; - -typedef enum PPCCC { - CC_GE, CC_LE, CC_NE, CC_NS, CC_LT, CC_GT, CC_EQ, CC_SO -} PPCCC; - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target_x86.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target_x86.h deleted file mode 100644 index fc9d37024b0..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_target_x86.h +++ /dev/null @@ -1,345 +0,0 @@ -/* -** Definitions for x86 and x64 CPUs. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_TARGET_X86_H -#define _LJ_TARGET_X86_H - -/* -- Registers IDs ------------------------------------------------------- */ - -#if LJ_64 -#define GPRDEF(_) \ - _(EAX) _(ECX) _(EDX) _(EBX) _(ESP) _(EBP) _(ESI) _(EDI) \ - _(R8D) _(R9D) _(R10D) _(R11D) _(R12D) _(R13D) _(R14D) _(R15D) -#define FPRDEF(_) \ - _(XMM0) _(XMM1) _(XMM2) _(XMM3) _(XMM4) _(XMM5) _(XMM6) _(XMM7) \ - _(XMM8) _(XMM9) _(XMM10) _(XMM11) _(XMM12) _(XMM13) _(XMM14) _(XMM15) -#else -#define GPRDEF(_) \ - _(EAX) _(ECX) _(EDX) _(EBX) _(ESP) _(EBP) _(ESI) _(EDI) -#define FPRDEF(_) \ - _(XMM0) _(XMM1) _(XMM2) _(XMM3) _(XMM4) _(XMM5) _(XMM6) _(XMM7) -#endif -#define VRIDDEF(_) \ - _(MRM) - -#define RIDENUM(name) RID_##name, - -enum { - GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */ - FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */ - RID_MAX, - RID_MRM = RID_MAX, /* Pseudo-id for ModRM operand. */ - - /* Calling conventions. */ - RID_SP = RID_ESP, - RID_RET = RID_EAX, -#if LJ_64 - RID_FPRET = RID_XMM0, -#else - RID_RETLO = RID_EAX, - RID_RETHI = RID_EDX, -#endif - - /* These definitions must match with the *.dasc file(s): */ - RID_BASE = RID_EDX, /* Interpreter BASE. */ -#if LJ_64 && !LJ_ABI_WIN - RID_LPC = RID_EBX, /* Interpreter PC. */ - RID_DISPATCH = RID_R14D, /* Interpreter DISPATCH table. */ -#else - RID_LPC = RID_ESI, /* Interpreter PC. */ - RID_DISPATCH = RID_EBX, /* Interpreter DISPATCH table. */ -#endif - - /* Register ranges [min, max) and number of registers. */ - RID_MIN_GPR = RID_EAX, - RID_MIN_FPR = RID_XMM0, - RID_MAX_GPR = RID_MIN_FPR, - RID_MAX_FPR = RID_MAX, - RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR, - RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR, -}; - -/* -- Register sets ------------------------------------------------------- */ - -/* Make use of all registers, except the stack pointer. */ -#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR)-RID2RSET(RID_ESP)) -#define RSET_FPR (RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR)) -#define RSET_ALL (RSET_GPR|RSET_FPR) -#define RSET_INIT RSET_ALL - -#if LJ_64 -/* Note: this requires the use of FORCE_REX! */ -#define RSET_GPR8 RSET_GPR -#else -#define RSET_GPR8 (RSET_RANGE(RID_EAX, RID_EBX+1)) -#endif - -/* ABI-specific register sets. */ -#define RSET_ACD (RID2RSET(RID_EAX)|RID2RSET(RID_ECX)|RID2RSET(RID_EDX)) -#if LJ_64 -#if LJ_ABI_WIN -/* Windows x64 ABI. */ -#define RSET_SCRATCH \ - (RSET_ACD|RSET_RANGE(RID_R8D, RID_R11D+1)|RSET_RANGE(RID_XMM0, RID_XMM5+1)) -#define REGARG_GPRS \ - (RID_ECX|((RID_EDX|((RID_R8D|(RID_R9D<<5))<<5))<<5)) -#define REGARG_NUMGPR 4 -#define REGARG_NUMFPR 4 -#define REGARG_FIRSTFPR RID_XMM0 -#define REGARG_LASTFPR RID_XMM3 -#define STACKARG_OFS (4*8) -#else -/* The rest of the civilized x64 world has a common ABI. */ -#define RSET_SCRATCH \ - (RSET_ACD|RSET_RANGE(RID_ESI, RID_R11D+1)|RSET_FPR) -#define REGARG_GPRS \ - (RID_EDI|((RID_ESI|((RID_EDX|((RID_ECX|((RID_R8D|(RID_R9D \ - <<5))<<5))<<5))<<5))<<5)) -#define REGARG_NUMGPR 6 -#define REGARG_NUMFPR 8 -#define REGARG_FIRSTFPR RID_XMM0 -#define REGARG_LASTFPR RID_XMM7 -#define STACKARG_OFS 0 -#endif -#else -/* Common x86 ABI. */ -#define RSET_SCRATCH (RSET_ACD|RSET_FPR) -#define REGARG_GPRS (RID_ECX|(RID_EDX<<5)) /* Fastcall only. */ -#define REGARG_NUMGPR 2 /* Fastcall only. */ -#define REGARG_NUMFPR 0 -#define STACKARG_OFS 0 -#endif - -#if LJ_64 -/* Prefer the low 8 regs of each type to reduce REX prefixes. */ -#undef rset_picktop -#define rset_picktop(rs) (lj_fls(lj_bswap(rs)) ^ 0x18) -#endif - -/* -- Spill slots --------------------------------------------------------- */ - -/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs. -** -** SPS_FIXED: Available fixed spill slots in interpreter frame. -** This definition must match with the *.dasc file(s). -** -** SPS_FIRST: First spill slot for general use. Reserve min. two 32 bit slots. -*/ -#if LJ_64 -#if LJ_ABI_WIN -#define SPS_FIXED (4*2) -#define SPS_FIRST (4*2) /* Don't use callee register save area. */ -#else -#if LJ_GC64 -#define SPS_FIXED 2 -#else -#define SPS_FIXED 4 -#endif -#define SPS_FIRST 2 -#endif -#else -#define SPS_FIXED 6 -#define SPS_FIRST 2 -#endif - -#define SPOFS_TMP 0 - -#define sps_scale(slot) (4 * (int32_t)(slot)) -#define sps_align(slot) (((slot) - SPS_FIXED + 3) & ~3) - -/* -- Exit state ---------------------------------------------------------- */ - -/* This definition must match with the *.dasc file(s). */ -typedef struct { - lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */ - intptr_t gpr[RID_NUM_GPR]; /* General-purpose registers. */ - int32_t spill[256]; /* Spill slots. */ -} ExitState; - -/* Limited by the range of a short fwd jump (127): (2+2)*(32-1)-2 = 122. */ -#define EXITSTUB_SPACING (2+2) -#define EXITSTUBS_PER_GROUP 32 - -/* -- x86 ModRM operand encoding ------------------------------------------ */ - -typedef enum { - XM_OFS0 = 0x00, XM_OFS8 = 0x40, XM_OFS32 = 0x80, XM_REG = 0xc0, - XM_SCALE1 = 0x00, XM_SCALE2 = 0x40, XM_SCALE4 = 0x80, XM_SCALE8 = 0xc0, - XM_MASK = 0xc0 -} x86Mode; - -/* Structure to hold variable ModRM operand. */ -typedef struct { - int32_t ofs; /* Offset. */ - uint8_t base; /* Base register or RID_NONE. */ - uint8_t idx; /* Index register or RID_NONE. */ - uint8_t scale; /* Index scale (XM_SCALE1 .. XM_SCALE8). */ -} x86ModRM; - -/* -- Opcodes ------------------------------------------------------------- */ - -/* Macros to construct variable-length x86 opcodes. -(len+1) is in LSB. */ -#define XO_(o) ((uint32_t)(0x0000fe + (0x##o<<24))) -#define XO_FPU(a,b) ((uint32_t)(0x00fd + (0x##a<<16)+(0x##b<<24))) -#define XO_0f(o) ((uint32_t)(0x0f00fd + (0x##o<<24))) -#define XO_66(o) ((uint32_t)(0x6600fd + (0x##o<<24))) -#define XO_660f(o) ((uint32_t)(0x0f66fc + (0x##o<<24))) -#define XO_f20f(o) ((uint32_t)(0x0ff2fc + (0x##o<<24))) -#define XO_f30f(o) ((uint32_t)(0x0ff3fc + (0x##o<<24))) - -/* This list of x86 opcodes is not intended to be complete. Opcodes are only -** included when needed. Take a look at DynASM or jit.dis_x86 to see the -** whole mess. -*/ -typedef enum { - /* Fixed length opcodes. XI_* prefix. */ - XI_NOP = 0x90, - XI_XCHGa = 0x90, - XI_CALL = 0xe8, - XI_JMP = 0xe9, - XI_JMPs = 0xeb, - XI_PUSH = 0x50, /* Really 50+r. */ - XI_JCCs = 0x70, /* Really 7x. */ - XI_JCCn = 0x80, /* Really 0f8x. */ - XI_LEA = 0x8d, - XI_MOVrib = 0xb0, /* Really b0+r. */ - XI_MOVri = 0xb8, /* Really b8+r. */ - XI_ARITHib = 0x80, - XI_ARITHi = 0x81, - XI_ARITHi8 = 0x83, - XI_PUSHi8 = 0x6a, - XI_TESTb = 0x84, - XI_TEST = 0x85, - XI_MOVmi = 0xc7, - XI_GROUP5 = 0xff, - - /* Note: little-endian byte-order! */ - XI_FLDZ = 0xeed9, - XI_FLD1 = 0xe8d9, - XI_FLDLG2 = 0xecd9, - XI_FLDLN2 = 0xedd9, - XI_FDUP = 0xc0d9, /* Really fld st0. */ - XI_FPOP = 0xd8dd, /* Really fstp st0. */ - XI_FPOP1 = 0xd9dd, /* Really fstp st1. */ - XI_FRNDINT = 0xfcd9, - XI_FSIN = 0xfed9, - XI_FCOS = 0xffd9, - XI_FPTAN = 0xf2d9, - XI_FPATAN = 0xf3d9, - XI_FSCALE = 0xfdd9, - XI_FYL2X = 0xf1d9, - - /* Variable-length opcodes. XO_* prefix. */ - XO_MOV = XO_(8b), - XO_MOVto = XO_(89), - XO_MOVtow = XO_66(89), - XO_MOVtob = XO_(88), - XO_MOVmi = XO_(c7), - XO_MOVmib = XO_(c6), - XO_LEA = XO_(8d), - XO_ARITHib = XO_(80), - XO_ARITHi = XO_(81), - XO_ARITHi8 = XO_(83), - XO_ARITHiw8 = XO_66(83), - XO_SHIFTi = XO_(c1), - XO_SHIFT1 = XO_(d1), - XO_SHIFTcl = XO_(d3), - XO_IMUL = XO_0f(af), - XO_IMULi = XO_(69), - XO_IMULi8 = XO_(6b), - XO_CMP = XO_(3b), - XO_TESTb = XO_(84), - XO_TEST = XO_(85), - XO_GROUP3b = XO_(f6), - XO_GROUP3 = XO_(f7), - XO_GROUP5b = XO_(fe), - XO_GROUP5 = XO_(ff), - XO_MOVZXb = XO_0f(b6), - XO_MOVZXw = XO_0f(b7), - XO_MOVSXb = XO_0f(be), - XO_MOVSXw = XO_0f(bf), - XO_MOVSXd = XO_(63), - XO_BSWAP = XO_0f(c8), - XO_CMOV = XO_0f(40), - - XO_MOVSD = XO_f20f(10), - XO_MOVSDto = XO_f20f(11), - XO_MOVSS = XO_f30f(10), - XO_MOVSSto = XO_f30f(11), - XO_MOVLPD = XO_660f(12), - XO_MOVAPS = XO_0f(28), - XO_XORPS = XO_0f(57), - XO_ANDPS = XO_0f(54), - XO_ADDSD = XO_f20f(58), - XO_SUBSD = XO_f20f(5c), - XO_MULSD = XO_f20f(59), - XO_DIVSD = XO_f20f(5e), - XO_SQRTSD = XO_f20f(51), - XO_MINSD = XO_f20f(5d), - XO_MAXSD = XO_f20f(5f), - XO_ROUNDSD = 0x0b3a0ffc, /* Really 66 0f 3a 0b. See asm_fpmath. */ - XO_UCOMISD = XO_660f(2e), - XO_CVTSI2SD = XO_f20f(2a), - XO_CVTTSD2SI= XO_f20f(2c), - XO_CVTSI2SS = XO_f30f(2a), - XO_CVTTSS2SI= XO_f30f(2c), - XO_CVTSS2SD = XO_f30f(5a), - XO_CVTSD2SS = XO_f20f(5a), - XO_ADDSS = XO_f30f(58), - XO_MOVD = XO_660f(6e), - XO_MOVDto = XO_660f(7e), - - XO_FLDd = XO_(d9), XOg_FLDd = 0, - XO_FLDq = XO_(dd), XOg_FLDq = 0, - XO_FILDd = XO_(db), XOg_FILDd = 0, - XO_FILDq = XO_(df), XOg_FILDq = 5, - XO_FSTPd = XO_(d9), XOg_FSTPd = 3, - XO_FSTPq = XO_(dd), XOg_FSTPq = 3, - XO_FISTPq = XO_(df), XOg_FISTPq = 7, - XO_FISTTPq = XO_(dd), XOg_FISTTPq = 1, - XO_FADDq = XO_(dc), XOg_FADDq = 0, - XO_FLDCW = XO_(d9), XOg_FLDCW = 5, - XO_FNSTCW = XO_(d9), XOg_FNSTCW = 7 -} x86Op; - -/* x86 opcode groups. */ -typedef uint32_t x86Group; - -#define XG_(i8, i, g) ((x86Group)(((i8) << 16) + ((i) << 8) + (g))) -#define XG_ARITHi(g) XG_(XI_ARITHi8, XI_ARITHi, g) -#define XG_TOXOi(xg) ((x86Op)(0x000000fe + (((xg)<<16) & 0xff000000))) -#define XG_TOXOi8(xg) ((x86Op)(0x000000fe + (((xg)<<8) & 0xff000000))) - -#define XO_ARITH(a) ((x86Op)(0x030000fe + ((a)<<27))) -#define XO_ARITHw(a) ((x86Op)(0x036600fd + ((a)<<27))) - -typedef enum { - XOg_ADD, XOg_OR, XOg_ADC, XOg_SBB, XOg_AND, XOg_SUB, XOg_XOR, XOg_CMP, - XOg_X_IMUL -} x86Arith; - -typedef enum { - XOg_ROL, XOg_ROR, XOg_RCL, XOg_RCR, XOg_SHL, XOg_SHR, XOg_SAL, XOg_SAR -} x86Shift; - -typedef enum { - XOg_TEST, XOg_TEST_, XOg_NOT, XOg_NEG, XOg_MUL, XOg_IMUL, XOg_DIV, XOg_IDIV -} x86Group3; - -typedef enum { - XOg_INC, XOg_DEC, XOg_CALL, XOg_CALLfar, XOg_JMP, XOg_JMPfar, XOg_PUSH -} x86Group5; - -/* x86 condition codes. */ -typedef enum { - CC_O, CC_NO, CC_B, CC_NB, CC_E, CC_NE, CC_BE, CC_NBE, - CC_S, CC_NS, CC_P, CC_NP, CC_L, CC_NL, CC_LE, CC_NLE, - CC_C = CC_B, CC_NAE = CC_C, CC_NC = CC_NB, CC_AE = CC_NB, - CC_Z = CC_E, CC_NZ = CC_NE, CC_NA = CC_BE, CC_A = CC_NBE, - CC_PE = CC_P, CC_PO = CC_NP, CC_NGE = CC_L, CC_GE = CC_NL, - CC_NG = CC_LE, CC_G = CC_NLE -} x86CC; - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_trace.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_trace.c deleted file mode 100644 index 42f4321d5d2..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_trace.c +++ /dev/null @@ -1,859 +0,0 @@ -/* -** Trace management. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_trace_c -#define LUA_CORE - -#include "lj_obj.h" - -#if LJ_HASJIT - -#include "lj_gc.h" -#include "lj_err.h" -#include "lj_debug.h" -#include "lj_str.h" -#include "lj_frame.h" -#include "lj_state.h" -#include "lj_bc.h" -#include "lj_ir.h" -#include "lj_jit.h" -#include "lj_iropt.h" -#include "lj_mcode.h" -#include "lj_trace.h" -#include "lj_snap.h" -#include "lj_gdbjit.h" -#include "lj_record.h" -#include "lj_asm.h" -#include "lj_dispatch.h" -#include "lj_vm.h" -#include "lj_vmevent.h" -#include "lj_target.h" - -/* -- Error handling ------------------------------------------------------ */ - -/* Synchronous abort with error message. */ -void lj_trace_err(jit_State *J, TraceError e) -{ - setnilV(&J->errinfo); /* No error info. */ - setintV(J->L->top++, (int32_t)e); - lj_err_throw(J->L, LUA_ERRRUN); -} - -/* Synchronous abort with error message and error info. */ -void lj_trace_err_info(jit_State *J, TraceError e) -{ - setintV(J->L->top++, (int32_t)e); - lj_err_throw(J->L, LUA_ERRRUN); -} - -/* -- Trace management ---------------------------------------------------- */ - -/* The current trace is first assembled in J->cur. The variable length -** arrays point to shared, growable buffers (J->irbuf etc.). When trace -** recording ends successfully, the current trace and its data structures -** are copied to a new (compact) GCtrace object. -*/ - -/* Find a free trace number. */ -static TraceNo trace_findfree(jit_State *J) -{ - MSize osz, lim; - if (J->freetrace == 0) - J->freetrace = 1; - for (; J->freetrace < J->sizetrace; J->freetrace++) - if (traceref(J, J->freetrace) == NULL) - return J->freetrace++; - /* Need to grow trace array. */ - lim = (MSize)J->param[JIT_P_maxtrace] + 1; - if (lim < 2) lim = 2; else if (lim > 65535) lim = 65535; - osz = J->sizetrace; - if (osz >= lim) - return 0; /* Too many traces. */ - lj_mem_growvec(J->L, J->trace, J->sizetrace, lim, GCRef); - for (; osz < J->sizetrace; osz++) - setgcrefnull(J->trace[osz]); - return J->freetrace; -} - -#define TRACE_APPENDVEC(field, szfield, tp) \ - T->field = (tp *)p; \ - memcpy(p, J->cur.field, J->cur.szfield*sizeof(tp)); \ - p += J->cur.szfield*sizeof(tp); - -#ifdef LUAJIT_USE_PERFTOOLS -/* -** Create symbol table of JIT-compiled code. For use with Linux perf tools. -** Example usage: -** perf record -f -e cycles luajit test.lua -** perf report -s symbol -** rm perf.data /tmp/perf-*.map -*/ -#include -#include - -static void perftools_addtrace(GCtrace *T) -{ - static FILE *fp; - GCproto *pt = &gcref(T->startpt)->pt; - const BCIns *startpc = mref(T->startpc, const BCIns); - const char *name = proto_chunknamestr(pt); - BCLine lineno; - if (name[0] == '@' || name[0] == '=') - name++; - else - name = "(string)"; - lua_assert(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc); - lineno = lj_debug_line(pt, proto_bcpos(pt, startpc)); - if (!fp) { - char fname[40]; - sprintf(fname, "/tmp/perf-%d.map", getpid()); - if (!(fp = fopen(fname, "w"))) return; - setlinebuf(fp); - } - fprintf(fp, "%lx %x TRACE_%d::%s:%u\n", - (long)T->mcode, T->szmcode, T->traceno, name, lineno); -} -#endif - -/* Allocate space for copy of trace. */ -static GCtrace *trace_save_alloc(jit_State *J) -{ - size_t sztr = ((sizeof(GCtrace)+7)&~7); - size_t szins = (J->cur.nins-J->cur.nk)*sizeof(IRIns); - size_t sz = sztr + szins + - J->cur.nsnap*sizeof(SnapShot) + - J->cur.nsnapmap*sizeof(SnapEntry); - return lj_mem_newt(J->L, (MSize)sz, GCtrace); -} - -/* Save current trace by copying and compacting it. */ -static void trace_save(jit_State *J, GCtrace *T) -{ - size_t sztr = ((sizeof(GCtrace)+7)&~7); - size_t szins = (J->cur.nins-J->cur.nk)*sizeof(IRIns); - char *p = (char *)T + sztr; - memcpy(T, &J->cur, sizeof(GCtrace)); - setgcrefr(T->nextgc, J2G(J)->gc.root); - setgcrefp(J2G(J)->gc.root, T); - newwhite(J2G(J), T); - T->gct = ~LJ_TTRACE; - T->ir = (IRIns *)p - J->cur.nk; - memcpy(p, J->cur.ir+J->cur.nk, szins); - p += szins; - TRACE_APPENDVEC(snap, nsnap, SnapShot) - TRACE_APPENDVEC(snapmap, nsnapmap, SnapEntry) - J->cur.traceno = 0; - setgcrefp(J->trace[T->traceno], T); - lj_gc_barriertrace(J2G(J), T->traceno); - lj_gdbjit_addtrace(J, T); -#ifdef LUAJIT_USE_PERFTOOLS - perftools_addtrace(T); -#endif -} - -void LJ_FASTCALL lj_trace_free(global_State *g, GCtrace *T) -{ - jit_State *J = G2J(g); - if (T->traceno) { - lj_gdbjit_deltrace(J, T); - if (T->traceno < J->freetrace) - J->freetrace = T->traceno; - setgcrefnull(J->trace[T->traceno]); - } - lj_mem_free(g, T, - ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) + - T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry)); -} - -/* Re-enable compiling a prototype by unpatching any modified bytecode. */ -void lj_trace_reenableproto(GCproto *pt) -{ - if ((pt->flags & PROTO_ILOOP)) { - BCIns *bc = proto_bc(pt); - BCPos i, sizebc = pt->sizebc;; - pt->flags &= ~PROTO_ILOOP; - if (bc_op(bc[0]) == BC_IFUNCF) - setbc_op(&bc[0], BC_FUNCF); - for (i = 1; i < sizebc; i++) { - BCOp op = bc_op(bc[i]); - if (op == BC_IFORL || op == BC_IITERL || op == BC_ILOOP) - setbc_op(&bc[i], (int)op+(int)BC_LOOP-(int)BC_ILOOP); - } - } -} - -/* Unpatch the bytecode modified by a root trace. */ -static void trace_unpatch(jit_State *J, GCtrace *T) -{ - BCOp op = bc_op(T->startins); - BCIns *pc = mref(T->startpc, BCIns); - UNUSED(J); - if (op == BC_JMP) - return; /* No need to unpatch branches in parent traces (yet). */ - switch (bc_op(*pc)) { - case BC_JFORL: - lua_assert(traceref(J, bc_d(*pc)) == T); - *pc = T->startins; - pc += bc_j(T->startins); - lua_assert(bc_op(*pc) == BC_JFORI); - setbc_op(pc, BC_FORI); - break; - case BC_JITERL: - case BC_JLOOP: - lua_assert(op == BC_ITERL || op == BC_LOOP || bc_isret(op)); - *pc = T->startins; - break; - case BC_JMP: - lua_assert(op == BC_ITERL); - pc += bc_j(*pc)+2; - if (bc_op(*pc) == BC_JITERL) { - lua_assert(traceref(J, bc_d(*pc)) == T); - *pc = T->startins; - } - break; - case BC_JFUNCF: - lua_assert(op == BC_FUNCF); - *pc = T->startins; - break; - default: /* Already unpatched. */ - break; - } -} - -/* Flush a root trace. */ -static void trace_flushroot(jit_State *J, GCtrace *T) -{ - GCproto *pt = &gcref(T->startpt)->pt; - lua_assert(T->root == 0 && pt != NULL); - /* First unpatch any modified bytecode. */ - trace_unpatch(J, T); - /* Unlink root trace from chain anchored in prototype. */ - if (pt->trace == T->traceno) { /* Trace is first in chain. Easy. */ - pt->trace = T->nextroot; - } else if (pt->trace) { /* Otherwise search in chain of root traces. */ - GCtrace *T2 = traceref(J, pt->trace); - if (T2) { - for (; T2->nextroot; T2 = traceref(J, T2->nextroot)) - if (T2->nextroot == T->traceno) { - T2->nextroot = T->nextroot; /* Unlink from chain. */ - break; - } - } - } -} - -/* Flush a trace. Only root traces are considered. */ -void lj_trace_flush(jit_State *J, TraceNo traceno) -{ - if (traceno > 0 && traceno < J->sizetrace) { - GCtrace *T = traceref(J, traceno); - if (T && T->root == 0) - trace_flushroot(J, T); - } -} - -/* Flush all traces associated with a prototype. */ -void lj_trace_flushproto(global_State *g, GCproto *pt) -{ - while (pt->trace != 0) - trace_flushroot(G2J(g), traceref(G2J(g), pt->trace)); -} - -/* Flush all traces. */ -int lj_trace_flushall(lua_State *L) -{ - jit_State *J = L2J(L); - ptrdiff_t i; - if ((J2G(J)->hookmask & HOOK_GC)) - return 1; - for (i = (ptrdiff_t)J->sizetrace-1; i > 0; i--) { - GCtrace *T = traceref(J, i); - if (T) { - if (T->root == 0) - trace_flushroot(J, T); - lj_gdbjit_deltrace(J, T); - T->traceno = 0; - setgcrefnull(J->trace[i]); - } - } - J->cur.traceno = 0; - J->freetrace = 0; - /* Clear penalty cache. */ - memset(J->penalty, 0, sizeof(J->penalty)); - /* Free the whole machine code and invalidate all exit stub groups. */ - lj_mcode_free(J); - memset(J->exitstubgroup, 0, sizeof(J->exitstubgroup)); - lj_vmevent_send(L, TRACE, - setstrV(L, L->top++, lj_str_newlit(L, "flush")); - ); - return 0; -} - -/* Initialize JIT compiler state. */ -void lj_trace_initstate(global_State *g) -{ - jit_State *J = G2J(g); - TValue *tv; - /* Initialize SIMD constants. */ - tv = LJ_KSIMD(J, LJ_KSIMD_ABS); - tv[0].u64 = U64x(7fffffff,ffffffff); - tv[1].u64 = U64x(7fffffff,ffffffff); - tv = LJ_KSIMD(J, LJ_KSIMD_NEG); - tv[0].u64 = U64x(80000000,00000000); - tv[1].u64 = U64x(80000000,00000000); -} - -/* Free everything associated with the JIT compiler state. */ -void lj_trace_freestate(global_State *g) -{ - jit_State *J = G2J(g); -#ifdef LUA_USE_ASSERT - { /* This assumes all traces have already been freed. */ - ptrdiff_t i; - for (i = 1; i < (ptrdiff_t)J->sizetrace; i++) - lua_assert(i == (ptrdiff_t)J->cur.traceno || traceref(J, i) == NULL); - } -#endif - lj_mcode_free(J); - lj_ir_k64_freeall(J); - lj_mem_freevec(g, J->snapmapbuf, J->sizesnapmap, SnapEntry); - lj_mem_freevec(g, J->snapbuf, J->sizesnap, SnapShot); - lj_mem_freevec(g, J->irbuf + J->irbotlim, J->irtoplim - J->irbotlim, IRIns); - lj_mem_freevec(g, J->trace, J->sizetrace, GCRef); -} - -/* -- Penalties and blacklisting ------------------------------------------ */ - -/* Blacklist a bytecode instruction. */ -static void blacklist_pc(GCproto *pt, BCIns *pc) -{ - setbc_op(pc, (int)bc_op(*pc)+(int)BC_ILOOP-(int)BC_LOOP); - pt->flags |= PROTO_ILOOP; -} - -/* Penalize a bytecode instruction. */ -static void penalty_pc(jit_State *J, GCproto *pt, BCIns *pc, TraceError e) -{ - uint32_t i, val = PENALTY_MIN; - for (i = 0; i < PENALTY_SLOTS; i++) - if (mref(J->penalty[i].pc, const BCIns) == pc) { /* Cache slot found? */ - /* First try to bump its hotcount several times. */ - val = ((uint32_t)J->penalty[i].val << 1) + - LJ_PRNG_BITS(J, PENALTY_RNDBITS); - if (val > PENALTY_MAX) { - blacklist_pc(pt, pc); /* Blacklist it, if that didn't help. */ - return; - } - goto setpenalty; - } - /* Assign a new penalty cache slot. */ - i = J->penaltyslot; - J->penaltyslot = (J->penaltyslot + 1) & (PENALTY_SLOTS-1); - setmref(J->penalty[i].pc, pc); -setpenalty: - J->penalty[i].val = (uint16_t)val; - J->penalty[i].reason = e; - hotcount_set(J2GG(J), pc+1, val); -} - -/* -- Trace compiler state machine ---------------------------------------- */ - -/* Start tracing. */ -static void trace_start(jit_State *J) -{ - lua_State *L; - TraceNo traceno; - - if ((J->pt->flags & PROTO_NOJIT)) { /* JIT disabled for this proto? */ - if (J->parent == 0 && J->exitno == 0) { - /* Lazy bytecode patching to disable hotcount events. */ - lua_assert(bc_op(*J->pc) == BC_FORL || bc_op(*J->pc) == BC_ITERL || - bc_op(*J->pc) == BC_LOOP || bc_op(*J->pc) == BC_FUNCF); - setbc_op(J->pc, (int)bc_op(*J->pc)+(int)BC_ILOOP-(int)BC_LOOP); - J->pt->flags |= PROTO_ILOOP; - } - J->state = LJ_TRACE_IDLE; /* Silently ignored. */ - return; - } - - /* Get a new trace number. */ - traceno = trace_findfree(J); - if (LJ_UNLIKELY(traceno == 0)) { /* No free trace? */ - lua_assert((J2G(J)->hookmask & HOOK_GC) == 0); - lj_trace_flushall(J->L); - J->state = LJ_TRACE_IDLE; /* Silently ignored. */ - return; - } - setgcrefp(J->trace[traceno], &J->cur); - - /* Setup enough of the current trace to be able to send the vmevent. */ - memset(&J->cur, 0, sizeof(GCtrace)); - J->cur.traceno = traceno; - J->cur.nins = J->cur.nk = REF_BASE; - J->cur.ir = J->irbuf; - J->cur.snap = J->snapbuf; - J->cur.snapmap = J->snapmapbuf; - J->mergesnap = 0; - J->needsnap = 0; - J->bcskip = 0; - J->guardemit.irt = 0; - J->postproc = LJ_POST_NONE; - lj_resetsplit(J); - J->retryrec = 0; - setgcref(J->cur.startpt, obj2gco(J->pt)); - - L = J->L; - lj_vmevent_send(L, TRACE, - setstrV(L, L->top++, lj_str_newlit(L, "start")); - setintV(L->top++, traceno); - setfuncV(L, L->top++, J->fn); - setintV(L->top++, proto_bcpos(J->pt, J->pc)); - if (J->parent) { - setintV(L->top++, J->parent); - setintV(L->top++, J->exitno); - } - ); - lj_record_setup(J); -} - -/* Stop tracing. */ -static void trace_stop(jit_State *J) -{ - BCIns *pc = mref(J->cur.startpc, BCIns); - BCOp op = bc_op(J->cur.startins); - GCproto *pt = &gcref(J->cur.startpt)->pt; - TraceNo traceno = J->cur.traceno; - GCtrace *T = trace_save_alloc(J); /* Do this first. May throw OOM. */ - lua_State *L; - - switch (op) { - case BC_FORL: - setbc_op(pc+bc_j(J->cur.startins), BC_JFORI); /* Patch FORI, too. */ - /* fallthrough */ - case BC_LOOP: - case BC_ITERL: - case BC_FUNCF: - /* Patch bytecode of starting instruction in root trace. */ - setbc_op(pc, (int)op+(int)BC_JLOOP-(int)BC_LOOP); - setbc_d(pc, traceno); - addroot: - /* Add to root trace chain in prototype. */ - J->cur.nextroot = pt->trace; - pt->trace = (TraceNo1)traceno; - break; - case BC_RET: - case BC_RET0: - case BC_RET1: - *pc = BCINS_AD(BC_JLOOP, J->cur.snap[0].nslots, traceno); - goto addroot; - case BC_JMP: - /* Patch exit branch in parent to side trace entry. */ - lua_assert(J->parent != 0 && J->cur.root != 0); - lj_asm_patchexit(J, traceref(J, J->parent), J->exitno, J->cur.mcode); - /* Avoid compiling a side trace twice (stack resizing uses parent exit). */ - traceref(J, J->parent)->snap[J->exitno].count = SNAPCOUNT_DONE; - /* Add to side trace chain in root trace. */ - { - GCtrace *root = traceref(J, J->cur.root); - root->nchild++; - J->cur.nextside = root->nextside; - root->nextside = (TraceNo1)traceno; - } - break; - case BC_CALLM: - case BC_CALL: - case BC_ITERC: - /* Trace stitching: patch link of previous trace. */ - traceref(J, J->exitno)->link = traceno; - break; - default: - lua_assert(0); - break; - } - - /* Commit new mcode only after all patching is done. */ - lj_mcode_commit(J, J->cur.mcode); - J->postproc = LJ_POST_NONE; - trace_save(J, T); - - L = J->L; - lj_vmevent_send(L, TRACE, - setstrV(L, L->top++, lj_str_newlit(L, "stop")); - setintV(L->top++, traceno); - setfuncV(L, L->top++, J->fn); - ); -} - -/* Start a new root trace for down-recursion. */ -static int trace_downrec(jit_State *J) -{ - /* Restart recording at the return instruction. */ - lua_assert(J->pt != NULL); - lua_assert(bc_isret(bc_op(*J->pc))); - if (bc_op(*J->pc) == BC_RETM) - return 0; /* NYI: down-recursion with RETM. */ - J->parent = 0; - J->exitno = 0; - J->state = LJ_TRACE_RECORD; - trace_start(J); - return 1; -} - -/* Abort tracing. */ -static int trace_abort(jit_State *J) -{ - lua_State *L = J->L; - TraceError e = LJ_TRERR_RECERR; - TraceNo traceno; - - J->postproc = LJ_POST_NONE; - lj_mcode_abort(J); - if (tvisnumber(L->top-1)) - e = (TraceError)numberVint(L->top-1); - if (e == LJ_TRERR_MCODELM) { - L->top--; /* Remove error object */ - J->state = LJ_TRACE_ASM; - return 1; /* Retry ASM with new MCode area. */ - } - /* Penalize or blacklist starting bytecode instruction. */ - if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))) { - if (J->exitno == 0) { - BCIns *startpc = mref(J->cur.startpc, BCIns); - if (e == LJ_TRERR_RETRY) - hotcount_set(J2GG(J), startpc+1, 1); /* Immediate retry. */ - else - penalty_pc(J, &gcref(J->cur.startpt)->pt, startpc, e); - } else { - traceref(J, J->exitno)->link = J->exitno; /* Self-link is blacklisted. */ - } - } - - /* Is there anything to abort? */ - traceno = J->cur.traceno; - if (traceno) { - ptrdiff_t errobj = savestack(L, L->top-1); /* Stack may be resized. */ - J->cur.link = 0; - J->cur.linktype = LJ_TRLINK_NONE; - lj_vmevent_send(L, TRACE, - TValue *frame; - const BCIns *pc; - GCfunc *fn; - setstrV(L, L->top++, lj_str_newlit(L, "abort")); - setintV(L->top++, traceno); - /* Find original Lua function call to generate a better error message. */ - frame = J->L->base-1; - pc = J->pc; - while (!isluafunc(frame_func(frame))) { - pc = (frame_iscont(frame) ? frame_contpc(frame) : frame_pc(frame)) - 1; - frame = frame_prev(frame); - } - fn = frame_func(frame); - setfuncV(L, L->top++, fn); - setintV(L->top++, proto_bcpos(funcproto(fn), pc)); - copyTV(L, L->top++, restorestack(L, errobj)); - copyTV(L, L->top++, &J->errinfo); - ); - /* Drop aborted trace after the vmevent (which may still access it). */ - setgcrefnull(J->trace[traceno]); - if (traceno < J->freetrace) - J->freetrace = traceno; - J->cur.traceno = 0; - } - L->top--; /* Remove error object */ - if (e == LJ_TRERR_DOWNREC) - return trace_downrec(J); - else if (e == LJ_TRERR_MCODEAL) - lj_trace_flushall(L); - return 0; -} - -/* Perform pending re-patch of a bytecode instruction. */ -static LJ_AINLINE void trace_pendpatch(jit_State *J, int force) -{ - if (LJ_UNLIKELY(J->patchpc)) { - if (force || J->bcskip == 0) { - *J->patchpc = J->patchins; - J->patchpc = NULL; - } else { - J->bcskip = 0; - } - } -} - -/* State machine for the trace compiler. Protected callback. */ -static TValue *trace_state(lua_State *L, lua_CFunction dummy, void *ud) -{ - jit_State *J = (jit_State *)ud; - UNUSED(dummy); - do { - retry: - switch (J->state) { - case LJ_TRACE_START: - J->state = LJ_TRACE_RECORD; /* trace_start() may change state. */ - trace_start(J); - lj_dispatch_update(J2G(J)); - break; - - case LJ_TRACE_RECORD: - trace_pendpatch(J, 0); - setvmstate(J2G(J), RECORD); - lj_vmevent_send_(L, RECORD, - /* Save/restore tmptv state for trace recorder. */ - TValue savetv = J2G(J)->tmptv; - TValue savetv2 = J2G(J)->tmptv2; - setintV(L->top++, J->cur.traceno); - setfuncV(L, L->top++, J->fn); - setintV(L->top++, J->pt ? (int32_t)proto_bcpos(J->pt, J->pc) : -1); - setintV(L->top++, J->framedepth); - , - J2G(J)->tmptv = savetv; - J2G(J)->tmptv2 = savetv2; - ); - lj_record_ins(J); - break; - - case LJ_TRACE_END: - trace_pendpatch(J, 1); - J->loopref = 0; - if ((J->flags & JIT_F_OPT_LOOP) && - J->cur.link == J->cur.traceno && J->framedepth + J->retdepth == 0) { - setvmstate(J2G(J), OPT); - lj_opt_dce(J); - if (lj_opt_loop(J)) { /* Loop optimization failed? */ - J->cur.link = 0; - J->cur.linktype = LJ_TRLINK_NONE; - J->loopref = J->cur.nins; - J->state = LJ_TRACE_RECORD; /* Try to continue recording. */ - break; - } - J->loopref = J->chain[IR_LOOP]; /* Needed by assembler. */ - } - lj_opt_split(J); - lj_opt_sink(J); - if (!J->loopref) J->cur.snap[J->cur.nsnap-1].count = SNAPCOUNT_DONE; - J->state = LJ_TRACE_ASM; - break; - - case LJ_TRACE_ASM: - setvmstate(J2G(J), ASM); - lj_asm_trace(J, &J->cur); - trace_stop(J); - setvmstate(J2G(J), INTERP); - J->state = LJ_TRACE_IDLE; - lj_dispatch_update(J2G(J)); - return NULL; - - default: /* Trace aborted asynchronously. */ - setintV(L->top++, (int32_t)LJ_TRERR_RECERR); - /* fallthrough */ - case LJ_TRACE_ERR: - trace_pendpatch(J, 1); - if (trace_abort(J)) - goto retry; - setvmstate(J2G(J), INTERP); - J->state = LJ_TRACE_IDLE; - lj_dispatch_update(J2G(J)); - return NULL; - } - } while (J->state > LJ_TRACE_RECORD); - return NULL; -} - -/* -- Event handling ------------------------------------------------------ */ - -/* A bytecode instruction is about to be executed. Record it. */ -void lj_trace_ins(jit_State *J, const BCIns *pc) -{ - /* Note: J->L must already be set. pc is the true bytecode PC here. */ - J->pc = pc; - J->fn = curr_func(J->L); - J->pt = isluafunc(J->fn) ? funcproto(J->fn) : NULL; - while (lj_vm_cpcall(J->L, NULL, (void *)J, trace_state) != 0) - J->state = LJ_TRACE_ERR; -} - -/* A hotcount triggered. Start recording a root trace. */ -void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc) -{ - /* Note: pc is the interpreter bytecode PC here. It's offset by 1. */ - ERRNO_SAVE - /* Reset hotcount. */ - hotcount_set(J2GG(J), pc, J->param[JIT_P_hotloop]*HOTCOUNT_LOOP); - /* Only start a new trace if not recording or inside __gc call or vmevent. */ - if (J->state == LJ_TRACE_IDLE && - !(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) { - J->parent = 0; /* Root trace. */ - J->exitno = 0; - J->state = LJ_TRACE_START; - lj_trace_ins(J, pc-1); - } - ERRNO_RESTORE -} - -/* Check for a hot side exit. If yes, start recording a side trace. */ -static void trace_hotside(jit_State *J, const BCIns *pc) -{ - SnapShot *snap = &traceref(J, J->parent)->snap[J->exitno]; - if (!(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT)) && - isluafunc(curr_func(J->L)) && - snap->count != SNAPCOUNT_DONE && - ++snap->count >= J->param[JIT_P_hotexit]) { - lua_assert(J->state == LJ_TRACE_IDLE); - /* J->parent is non-zero for a side trace. */ - J->state = LJ_TRACE_START; - lj_trace_ins(J, pc); - } -} - -/* Stitch a new trace to the previous trace. */ -void LJ_FASTCALL lj_trace_stitch(jit_State *J, const BCIns *pc) -{ - /* Only start a new trace if not recording or inside __gc call or vmevent. */ - if (J->state == LJ_TRACE_IDLE && - !(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) { - J->parent = 0; /* Have to treat it like a root trace. */ - /* J->exitno is set to the invoking trace. */ - J->state = LJ_TRACE_START; - lj_trace_ins(J, pc); - } -} - - -/* Tiny struct to pass data to protected call. */ -typedef struct ExitDataCP { - jit_State *J; - void *exptr; /* Pointer to exit state. */ - const BCIns *pc; /* Restart interpreter at this PC. */ -} ExitDataCP; - -/* Need to protect lj_snap_restore because it may throw. */ -static TValue *trace_exit_cp(lua_State *L, lua_CFunction dummy, void *ud) -{ - ExitDataCP *exd = (ExitDataCP *)ud; - cframe_errfunc(L->cframe) = -1; /* Inherit error function. */ - exd->pc = lj_snap_restore(exd->J, exd->exptr); - UNUSED(dummy); - return NULL; -} - -#ifndef LUAJIT_DISABLE_VMEVENT -/* Push all registers from exit state. */ -static void trace_exit_regs(lua_State *L, ExitState *ex) -{ - int32_t i; - setintV(L->top++, RID_NUM_GPR); - setintV(L->top++, RID_NUM_FPR); - for (i = 0; i < RID_NUM_GPR; i++) { - if (sizeof(ex->gpr[i]) == sizeof(int32_t)) - setintV(L->top++, (int32_t)ex->gpr[i]); - else - setnumV(L->top++, (lua_Number)ex->gpr[i]); - } -#if !LJ_SOFTFP - for (i = 0; i < RID_NUM_FPR; i++) { - setnumV(L->top, ex->fpr[i]); - if (LJ_UNLIKELY(tvisnan(L->top))) - setnanV(L->top); - L->top++; - } -#endif -} -#endif - -#ifdef EXITSTATE_PCREG -/* Determine trace number from pc of exit instruction. */ -static TraceNo trace_exit_find(jit_State *J, MCode *pc) -{ - TraceNo traceno; - for (traceno = 1; traceno < J->sizetrace; traceno++) { - GCtrace *T = traceref(J, traceno); - if (T && pc >= T->mcode && pc < (MCode *)((char *)T->mcode + T->szmcode)) - return traceno; - } - lua_assert(0); - return 0; -} -#endif - -/* A trace exited. Restore interpreter state. */ -int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr) -{ - ERRNO_SAVE - lua_State *L = J->L; - ExitState *ex = (ExitState *)exptr; - ExitDataCP exd; - int errcode; - const BCIns *pc; - void *cf; - GCtrace *T; -#ifdef EXITSTATE_PCREG - J->parent = trace_exit_find(J, (MCode *)(intptr_t)ex->gpr[EXITSTATE_PCREG]); -#endif - T = traceref(J, J->parent); UNUSED(T); -#ifdef EXITSTATE_CHECKEXIT - if (J->exitno == T->nsnap) { /* Treat stack check like a parent exit. */ - lua_assert(T->root != 0); - J->exitno = T->ir[REF_BASE].op2; - J->parent = T->ir[REF_BASE].op1; - T = traceref(J, J->parent); - } -#endif - lua_assert(T != NULL && J->exitno < T->nsnap); - exd.J = J; - exd.exptr = exptr; - errcode = lj_vm_cpcall(L, NULL, &exd, trace_exit_cp); - if (errcode) - return -errcode; /* Return negated error code. */ - - if (!(LJ_HASPROFILE && (G(L)->hookmask & HOOK_PROFILE))) - lj_vmevent_send(L, TEXIT, - lj_state_checkstack(L, 4+RID_NUM_GPR+RID_NUM_FPR+LUA_MINSTACK); - setintV(L->top++, J->parent); - setintV(L->top++, J->exitno); - trace_exit_regs(L, ex); - ); - - pc = exd.pc; - cf = cframe_raw(L->cframe); - setcframe_pc(cf, pc); - if (LJ_HASPROFILE && (G(L)->hookmask & HOOK_PROFILE)) { - /* Just exit to interpreter. */ - } else if (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize) { - if (!(G(L)->hookmask & HOOK_GC)) - lj_gc_step(L); /* Exited because of GC: drive GC forward. */ - } else { - trace_hotside(J, pc); - } - if (bc_op(*pc) == BC_JLOOP) { - BCIns *retpc = &traceref(J, bc_d(*pc))->startins; - if (bc_isret(bc_op(*retpc))) { - if (J->state == LJ_TRACE_RECORD) { - J->patchins = *pc; - J->patchpc = (BCIns *)pc; - *J->patchpc = *retpc; - J->bcskip = 1; - } else { - pc = retpc; - setcframe_pc(cf, pc); - } - } - } - /* Return MULTRES or 0. */ - ERRNO_RESTORE - switch (bc_op(*pc)) { - case BC_CALLM: case BC_CALLMT: - return (int)((BCReg)(L->top - L->base) - bc_a(*pc) - bc_c(*pc) + LJ_FR2); - case BC_RETM: - return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc) - bc_d(*pc)); - case BC_TSETM: - return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc)); - default: - if (bc_op(*pc) >= BC_FUNCF) - return (int)((BCReg)(L->top - L->base) + 1); - return 0; - } -} - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_trace.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_trace.h deleted file mode 100644 index 9eaf91b0e33..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_trace.h +++ /dev/null @@ -1,54 +0,0 @@ -/* -** Trace management. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_TRACE_H -#define _LJ_TRACE_H - -#include "lj_obj.h" - -#if LJ_HASJIT -#include "lj_jit.h" -#include "lj_dispatch.h" - -/* Trace errors. */ -typedef enum { -#define TREDEF(name, msg) LJ_TRERR_##name, -#include "lj_traceerr.h" - LJ_TRERR__MAX -} TraceError; - -LJ_FUNC_NORET void lj_trace_err(jit_State *J, TraceError e); -LJ_FUNC_NORET void lj_trace_err_info(jit_State *J, TraceError e); - -/* Trace management. */ -LJ_FUNC void LJ_FASTCALL lj_trace_free(global_State *g, GCtrace *T); -LJ_FUNC void lj_trace_reenableproto(GCproto *pt); -LJ_FUNC void lj_trace_flushproto(global_State *g, GCproto *pt); -LJ_FUNC void lj_trace_flush(jit_State *J, TraceNo traceno); -LJ_FUNC int lj_trace_flushall(lua_State *L); -LJ_FUNC void lj_trace_initstate(global_State *g); -LJ_FUNC void lj_trace_freestate(global_State *g); - -/* Event handling. */ -LJ_FUNC void lj_trace_ins(jit_State *J, const BCIns *pc); -LJ_FUNCA void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc); -LJ_FUNCA void LJ_FASTCALL lj_trace_stitch(jit_State *J, const BCIns *pc); -LJ_FUNCA int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr); - -/* Signal asynchronous abort of trace or end of trace. */ -#define lj_trace_abort(g) (G2J(g)->state &= ~LJ_TRACE_ACTIVE) -#define lj_trace_end(J) (J->state = LJ_TRACE_END) - -#else - -#define lj_trace_flushall(L) (UNUSED(L), 0) -#define lj_trace_initstate(g) UNUSED(g) -#define lj_trace_freestate(g) UNUSED(g) -#define lj_trace_abort(g) UNUSED(g) -#define lj_trace_end(J) UNUSED(J) - -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_traceerr.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_traceerr.h deleted file mode 100644 index 12e90d0395c..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_traceerr.h +++ /dev/null @@ -1,63 +0,0 @@ -/* -** Trace compiler error messages. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -/* This file may be included multiple times with different TREDEF macros. */ - -/* Recording. */ -TREDEF(RECERR, "error thrown or hook called during recording") -TREDEF(TRACEUV, "trace too short") -TREDEF(TRACEOV, "trace too long") -TREDEF(STACKOV, "trace too deep") -TREDEF(SNAPOV, "too many snapshots") -TREDEF(BLACKL, "blacklisted") -TREDEF(RETRY, "retry recording") -TREDEF(NYIBC, "NYI: bytecode %d") - -/* Recording loop ops. */ -TREDEF(LLEAVE, "leaving loop in root trace") -TREDEF(LINNER, "inner loop in root trace") -TREDEF(LUNROLL, "loop unroll limit reached") - -/* Recording calls/returns. */ -TREDEF(BADTYPE, "bad argument type") -TREDEF(CJITOFF, "JIT compilation disabled for function") -TREDEF(CUNROLL, "call unroll limit reached") -TREDEF(DOWNREC, "down-recursion, restarting") -TREDEF(NYICF, "NYI: C function %s") -TREDEF(NYIFF, "NYI: FastFunc %s") -TREDEF(NYIFFU, "NYI: unsupported variant of FastFunc %s") -TREDEF(NYIRETL, "NYI: return to lower frame") - -/* Recording indexed load/store. */ -TREDEF(STORENN, "store with nil or NaN key") -TREDEF(NOMM, "missing metamethod") -TREDEF(IDXLOOP, "looping index lookup") -TREDEF(NYITMIX, "NYI: mixed sparse/dense table") - -/* Recording C data operations. */ -TREDEF(NOCACHE, "symbol not in cache") -TREDEF(NYICONV, "NYI: unsupported C type conversion") -TREDEF(NYICALL, "NYI: unsupported C function type") - -/* Optimizations. */ -TREDEF(GFAIL, "guard would always fail") -TREDEF(PHIOV, "too many PHIs") -TREDEF(TYPEINS, "persistent type instability") - -/* Assembler. */ -TREDEF(MCODEAL, "failed to allocate mcode memory") -TREDEF(MCODEOV, "machine code too long") -TREDEF(MCODELM, "hit mcode limit (retrying)") -TREDEF(SPILLOV, "too many spill slots") -TREDEF(BADRA, "inconsistent register allocation") -TREDEF(NYIIR, "NYI: cannot assemble IR instruction %d") -TREDEF(NYIPHI, "NYI: PHI shuffling too complex") -TREDEF(NYICOAL, "NYI: register coalescing too complex") - -#undef TREDEF - -/* Detecting unused error messages: - awk -F, '/^TREDEF/ { gsub(/TREDEF./, ""); printf "grep -q LJ_TRERR_%s *.[ch] || echo %s\n", $1, $1}' lj_traceerr.h | sh -*/ diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_udata.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_udata.c deleted file mode 100644 index d401a3d8e85..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_udata.c +++ /dev/null @@ -1,34 +0,0 @@ -/* -** Userdata handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_udata_c -#define LUA_CORE - -#include "lj_obj.h" -#include "lj_gc.h" -#include "lj_udata.h" - -GCudata *lj_udata_new(lua_State *L, MSize sz, GCtab *env) -{ - GCudata *ud = lj_mem_newt(L, sizeof(GCudata) + sz, GCudata); - global_State *g = G(L); - newwhite(g, ud); /* Not finalized. */ - ud->gct = ~LJ_TUDATA; - ud->udtype = UDTYPE_USERDATA; - ud->len = sz; - /* NOBARRIER: The GCudata is new (marked white). */ - setgcrefnull(ud->metatable); - setgcref(ud->env, obj2gco(env)); - /* Chain to userdata list (after main thread). */ - setgcrefr(ud->nextgc, mainthread(g)->nextgc); - setgcref(mainthread(g)->nextgc, obj2gco(ud)); - return ud; -} - -void LJ_FASTCALL lj_udata_free(global_State *g, GCudata *ud) -{ - lj_mem_free(g, ud, sizeudata(ud)); -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_udata.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_udata.h deleted file mode 100644 index 676e970f488..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_udata.h +++ /dev/null @@ -1,14 +0,0 @@ -/* -** Userdata handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_UDATA_H -#define _LJ_UDATA_H - -#include "lj_obj.h" - -LJ_FUNC GCudata *lj_udata_new(lua_State *L, MSize sz, GCtab *env); -LJ_FUNC void LJ_FASTCALL lj_udata_free(global_State *g, GCudata *ud); - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_vm.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_vm.h deleted file mode 100644 index b31e22f70f7..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_vm.h +++ /dev/null @@ -1,114 +0,0 @@ -/* -** Assembler VM interface definitions. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_VM_H -#define _LJ_VM_H - -#include "lj_obj.h" - -/* Entry points for ASM parts of VM. */ -LJ_ASMF void lj_vm_call(lua_State *L, TValue *base, int nres1); -LJ_ASMF int lj_vm_pcall(lua_State *L, TValue *base, int nres1, ptrdiff_t ef); -typedef TValue *(*lua_CPFunction)(lua_State *L, lua_CFunction func, void *ud); -LJ_ASMF int lj_vm_cpcall(lua_State *L, lua_CFunction func, void *ud, - lua_CPFunction cp); -LJ_ASMF int lj_vm_resume(lua_State *L, TValue *base, int nres1, ptrdiff_t ef); -LJ_ASMF_NORET void LJ_FASTCALL lj_vm_unwind_c(void *cframe, int errcode); -LJ_ASMF_NORET void LJ_FASTCALL lj_vm_unwind_ff(void *cframe); -LJ_ASMF void lj_vm_unwind_c_eh(void); -LJ_ASMF void lj_vm_unwind_ff_eh(void); -#if LJ_TARGET_X86ORX64 -LJ_ASMF void lj_vm_unwind_rethrow(void); -#endif - -/* Miscellaneous functions. */ -#if LJ_TARGET_X86ORX64 -LJ_ASMF int lj_vm_cpuid(uint32_t f, uint32_t res[4]); -#endif -#if LJ_TARGET_PPC -void lj_vm_cachesync(void *start, void *end); -#endif -LJ_ASMF double lj_vm_foldarith(double x, double y, int op); -#if LJ_HASJIT -LJ_ASMF double lj_vm_foldfpm(double x, int op); -#endif -#if !LJ_ARCH_HASFPU -/* Declared in lj_obj.h: LJ_ASMF int32_t lj_vm_tobit(double x); */ -#endif - -/* Dispatch targets for recording and hooks. */ -LJ_ASMF void lj_vm_record(void); -LJ_ASMF void lj_vm_inshook(void); -LJ_ASMF void lj_vm_rethook(void); -LJ_ASMF void lj_vm_callhook(void); -LJ_ASMF void lj_vm_profhook(void); - -/* Trace exit handling. */ -LJ_ASMF void lj_vm_exit_handler(void); -LJ_ASMF void lj_vm_exit_interp(void); - -/* Internal math helper functions. */ -#if LJ_TARGET_PPC || LJ_TARGET_ARM64 -#define lj_vm_floor floor -#define lj_vm_ceil ceil -#else -LJ_ASMF double lj_vm_floor(double); -LJ_ASMF double lj_vm_ceil(double); -#if LJ_TARGET_ARM -LJ_ASMF double lj_vm_floor_sf(double); -LJ_ASMF double lj_vm_ceil_sf(double); -#endif -#endif -#ifdef LUAJIT_NO_LOG2 -LJ_ASMF double lj_vm_log2(double); -#else -#define lj_vm_log2 log2 -#endif - -#if LJ_HASJIT -#if LJ_TARGET_X86ORX64 -LJ_ASMF void lj_vm_floor_sse(void); -LJ_ASMF void lj_vm_ceil_sse(void); -LJ_ASMF void lj_vm_trunc_sse(void); -LJ_ASMF void lj_vm_powi_sse(void); -#define lj_vm_powi NULL -#else -LJ_ASMF double lj_vm_powi(double, int32_t); -#endif -#if LJ_TARGET_PPC || LJ_TARGET_ARM64 -#define lj_vm_trunc trunc -#else -LJ_ASMF double lj_vm_trunc(double); -#if LJ_TARGET_ARM -LJ_ASMF double lj_vm_trunc_sf(double); -#endif -#endif -#ifdef LUAJIT_NO_EXP2 -LJ_ASMF double lj_vm_exp2(double); -#else -#define lj_vm_exp2 exp2 -#endif -LJ_ASMF int32_t LJ_FASTCALL lj_vm_modi(int32_t, int32_t); -#if LJ_HASFFI -LJ_ASMF int lj_vm_errno(void); -#endif -#endif - -/* Continuations for metamethods. */ -LJ_ASMF void lj_cont_cat(void); /* Continue with concatenation. */ -LJ_ASMF void lj_cont_ra(void); /* Store result in RA from instruction. */ -LJ_ASMF void lj_cont_nop(void); /* Do nothing, just continue execution. */ -LJ_ASMF void lj_cont_condt(void); /* Branch if result is true. */ -LJ_ASMF void lj_cont_condf(void); /* Branch if result is false. */ -LJ_ASMF void lj_cont_hook(void); /* Continue from hook yield. */ -LJ_ASMF void lj_cont_stitch(void); /* Trace stitching. */ - -/* Start of the ASM code. */ -LJ_ASMF char lj_vm_asm_begin[]; - -/* Bytecode offsets are relative to lj_vm_asm_begin. */ -#define makeasmfunc(ofs) ((ASMFunction)(lj_vm_asm_begin + (ofs))) - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_vmevent.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_vmevent.c deleted file mode 100644 index 87ebcfbd286..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_vmevent.c +++ /dev/null @@ -1,58 +0,0 @@ -/* -** VM event handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#include - -#define lj_vmevent_c -#define LUA_CORE - -#include "lj_obj.h" -#include "lj_str.h" -#include "lj_tab.h" -#include "lj_state.h" -#include "lj_dispatch.h" -#include "lj_vm.h" -#include "lj_vmevent.h" - -ptrdiff_t lj_vmevent_prepare(lua_State *L, VMEvent ev) -{ - global_State *g = G(L); - GCstr *s = lj_str_newlit(L, LJ_VMEVENTS_REGKEY); - cTValue *tv = lj_tab_getstr(tabV(registry(L)), s); - if (tvistab(tv)) { - int hash = VMEVENT_HASH(ev); - tv = lj_tab_getint(tabV(tv), hash); - if (tv && tvisfunc(tv)) { - lj_state_checkstack(L, LUA_MINSTACK); - setfuncV(L, L->top++, funcV(tv)); - if (LJ_FR2) setnilV(L->top++); - return savestack(L, L->top); - } - } - g->vmevmask &= ~VMEVENT_MASK(ev); /* No handler: cache this fact. */ - return 0; -} - -void lj_vmevent_call(lua_State *L, ptrdiff_t argbase) -{ - global_State *g = G(L); - uint8_t oldmask = g->vmevmask; - uint8_t oldh = hook_save(g); - int status; - g->vmevmask = 0; /* Disable all events. */ - hook_vmevent(g); - status = lj_vm_pcall(L, restorestack(L, argbase), 0+1, 0); - if (LJ_UNLIKELY(status)) { - /* Really shouldn't use stderr here, but where else to complain? */ - L->top--; - fputs("VM handler failed: ", stderr); - fputs(tvisstr(L->top) ? strVdata(L->top) : "?", stderr); - fputc('\n', stderr); - } - hook_restore(g, oldh); - if (g->vmevmask != VMEVENT_NOCACHE) - g->vmevmask = oldmask; /* Restore event mask, but not if not modified. */ -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_vmevent.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_vmevent.h deleted file mode 100644 index 231e00ecdf9..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_vmevent.h +++ /dev/null @@ -1,59 +0,0 @@ -/* -** VM event handling. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LJ_VMEVENT_H -#define _LJ_VMEVENT_H - -#include "lj_obj.h" - -/* Registry key for VM event handler table. */ -#define LJ_VMEVENTS_REGKEY "_VMEVENTS" -#define LJ_VMEVENTS_HSIZE 4 - -#define VMEVENT_MASK(ev) ((uint8_t)1 << ((int)(ev) & 7)) -#define VMEVENT_HASH(ev) ((int)(ev) & ~7) -#define VMEVENT_HASHIDX(h) ((int)(h) << 3) -#define VMEVENT_NOCACHE 255 - -#define VMEVENT_DEF(name, hash) \ - LJ_VMEVENT_##name##_, \ - LJ_VMEVENT_##name = ((LJ_VMEVENT_##name##_) & 7)|((hash) << 3) - -/* VM event IDs. */ -typedef enum { - VMEVENT_DEF(BC, 0x00003883), - VMEVENT_DEF(TRACE, 0xb2d91467), - VMEVENT_DEF(RECORD, 0x9284bf4f), - VMEVENT_DEF(TEXIT, 0xb29df2b0), - LJ_VMEVENT__MAX -} VMEvent; - -#ifdef LUAJIT_DISABLE_VMEVENT -#define lj_vmevent_send(L, ev, args) UNUSED(L) -#define lj_vmevent_send_(L, ev, args, post) UNUSED(L) -#else -#define lj_vmevent_send(L, ev, args) \ - if (G(L)->vmevmask & VMEVENT_MASK(LJ_VMEVENT_##ev)) { \ - ptrdiff_t argbase = lj_vmevent_prepare(L, LJ_VMEVENT_##ev); \ - if (argbase) { \ - args \ - lj_vmevent_call(L, argbase); \ - } \ - } -#define lj_vmevent_send_(L, ev, args, post) \ - if (G(L)->vmevmask & VMEVENT_MASK(LJ_VMEVENT_##ev)) { \ - ptrdiff_t argbase = lj_vmevent_prepare(L, LJ_VMEVENT_##ev); \ - if (argbase) { \ - args \ - lj_vmevent_call(L, argbase); \ - post \ - } \ - } - -LJ_FUNC ptrdiff_t lj_vmevent_prepare(lua_State *L, VMEvent ev); -LJ_FUNC void lj_vmevent_call(lua_State *L, ptrdiff_t argbase); -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_vmmath.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_vmmath.c deleted file mode 100644 index ecad2950d93..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lj_vmmath.c +++ /dev/null @@ -1,152 +0,0 @@ -/* -** Math helper functions for assembler VM. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#define lj_vmmath_c -#define LUA_CORE - -#include -#include - -#include "lj_obj.h" -#include "lj_ir.h" -#include "lj_vm.h" - -/* -- Wrapper functions --------------------------------------------------- */ - -#if LJ_TARGET_X86 && __ELF__ && __PIC__ -/* Wrapper functions to deal with the ELF/x86 PIC disaster. */ -LJ_FUNCA double lj_wrap_log(double x) { return log(x); } -LJ_FUNCA double lj_wrap_log10(double x) { return log10(x); } -LJ_FUNCA double lj_wrap_exp(double x) { return exp(x); } -LJ_FUNCA double lj_wrap_sin(double x) { return sin(x); } -LJ_FUNCA double lj_wrap_cos(double x) { return cos(x); } -LJ_FUNCA double lj_wrap_tan(double x) { return tan(x); } -LJ_FUNCA double lj_wrap_asin(double x) { return asin(x); } -LJ_FUNCA double lj_wrap_acos(double x) { return acos(x); } -LJ_FUNCA double lj_wrap_atan(double x) { return atan(x); } -LJ_FUNCA double lj_wrap_sinh(double x) { return sinh(x); } -LJ_FUNCA double lj_wrap_cosh(double x) { return cosh(x); } -LJ_FUNCA double lj_wrap_tanh(double x) { return tanh(x); } -LJ_FUNCA double lj_wrap_atan2(double x, double y) { return atan2(x, y); } -LJ_FUNCA double lj_wrap_pow(double x, double y) { return pow(x, y); } -LJ_FUNCA double lj_wrap_fmod(double x, double y) { return fmod(x, y); } -#endif - -/* -- Helper functions for generated machine code ------------------------- */ - -double lj_vm_foldarith(double x, double y, int op) -{ - switch (op) { - case IR_ADD - IR_ADD: return x+y; break; - case IR_SUB - IR_ADD: return x-y; break; - case IR_MUL - IR_ADD: return x*y; break; - case IR_DIV - IR_ADD: return x/y; break; - case IR_MOD - IR_ADD: return x-lj_vm_floor(x/y)*y; break; - case IR_POW - IR_ADD: return pow(x, y); break; - case IR_NEG - IR_ADD: return -x; break; - case IR_ABS - IR_ADD: return fabs(x); break; -#if LJ_HASJIT - case IR_ATAN2 - IR_ADD: return atan2(x, y); break; - case IR_LDEXP - IR_ADD: return ldexp(x, (int)y); break; - case IR_MIN - IR_ADD: return x > y ? y : x; break; - case IR_MAX - IR_ADD: return x < y ? y : x; break; -#endif - default: return x; - } -} - -#if LJ_HASJIT - -#ifdef LUAJIT_NO_LOG2 -double lj_vm_log2(double a) -{ - return log(a) * 1.4426950408889634074; -} -#endif - -#ifdef LUAJIT_NO_EXP2 -double lj_vm_exp2(double a) -{ - return exp(a * 0.6931471805599453); -} -#endif - -#if !(LJ_TARGET_ARM || LJ_TARGET_ARM64 || LJ_TARGET_PPC) -int32_t LJ_FASTCALL lj_vm_modi(int32_t a, int32_t b) -{ - uint32_t y, ua, ub; - lua_assert(b != 0); /* This must be checked before using this function. */ - ua = a < 0 ? (uint32_t)-a : (uint32_t)a; - ub = b < 0 ? (uint32_t)-b : (uint32_t)b; - y = ua % ub; - if (y != 0 && (a^b) < 0) y = y - ub; - if (((int32_t)y^b) < 0) y = (uint32_t)-(int32_t)y; - return (int32_t)y; -} -#endif - -#if !LJ_TARGET_X86ORX64 -/* Unsigned x^k. */ -static double lj_vm_powui(double x, uint32_t k) -{ - double y; - lua_assert(k != 0); - for (; (k & 1) == 0; k >>= 1) x *= x; - y = x; - if ((k >>= 1) != 0) { - for (;;) { - x *= x; - if (k == 1) break; - if (k & 1) y *= x; - k >>= 1; - } - y *= x; - } - return y; -} - -/* Signed x^k. */ -double lj_vm_powi(double x, int32_t k) -{ - if (k > 1) - return lj_vm_powui(x, (uint32_t)k); - else if (k == 1) - return x; - else if (k == 0) - return 1.0; - else - return 1.0 / lj_vm_powui(x, (uint32_t)-k); -} -#endif - -/* Computes fpm(x) for extended math functions. */ -double lj_vm_foldfpm(double x, int fpm) -{ - switch (fpm) { - case IRFPM_FLOOR: return lj_vm_floor(x); - case IRFPM_CEIL: return lj_vm_ceil(x); - case IRFPM_TRUNC: return lj_vm_trunc(x); - case IRFPM_SQRT: return sqrt(x); - case IRFPM_EXP: return exp(x); - case IRFPM_EXP2: return lj_vm_exp2(x); - case IRFPM_LOG: return log(x); - case IRFPM_LOG2: return lj_vm_log2(x); - case IRFPM_LOG10: return log10(x); - case IRFPM_SIN: return sin(x); - case IRFPM_COS: return cos(x); - case IRFPM_TAN: return tan(x); - default: lua_assert(0); - } - return 0; -} - -#if LJ_HASFFI -int lj_vm_errno(void) -{ - return errno; -} -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/ljamalg.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/ljamalg.c deleted file mode 100644 index be0c52d743e..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/ljamalg.c +++ /dev/null @@ -1,96 +0,0 @@ -/* -** LuaJIT core and libraries amalgamation. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -/* -+--------------------------------------------------------------------------+ -| WARNING: Compiling the amalgamation needs a lot of virtual memory | -| (around 300 MB with GCC 4.x)! If you don't have enough physical memory | -| your machine will start swapping to disk and the compile will not finish | -| within a reasonable amount of time. | -| So either compile on a bigger machine or use the non-amalgamated build. | -+--------------------------------------------------------------------------+ -*/ - -#define ljamalg_c -#define LUA_CORE - -/* To get the mremap prototype. Must be defined before any system includes. */ -#if defined(__linux__) && !defined(_GNU_SOURCE) -#define _GNU_SOURCE -#endif - -#ifndef WINVER -#define WINVER 0x0501 -#endif - -#include "lua.h" -#include "lauxlib.h" - -#include "lj_gc.c" -#include "lj_err.c" -#include "lj_char.c" -#include "lj_bc.c" -#include "lj_obj.c" -#include "lj_buf.c" -#include "lj_str.c" -#include "lj_tab.c" -#include "lj_func.c" -#include "lj_udata.c" -#include "lj_meta.c" -#include "lj_debug.c" -#include "lj_state.c" -#include "lj_dispatch.c" -#include "lj_vmevent.c" -#include "lj_vmmath.c" -#include "lj_strscan.c" -#include "lj_strfmt.c" -#include "lj_api.c" -#include "lj_profile.c" -#include "lj_lex.c" -#include "lj_parse.c" -#include "lj_bcread.c" -#include "lj_bcwrite.c" -#include "lj_load.c" -#include "lj_ctype.c" -#include "lj_cdata.c" -#include "lj_cconv.c" -#include "lj_ccall.c" -#include "lj_ccallback.c" -#include "lj_carith.c" -#include "lj_clib.c" -#include "lj_cparse.c" -#include "lj_lib.c" -#include "lj_ir.c" -#include "lj_opt_mem.c" -#include "lj_opt_fold.c" -#include "lj_opt_narrow.c" -#include "lj_opt_dce.c" -#include "lj_opt_loop.c" -#include "lj_opt_split.c" -#include "lj_opt_sink.c" -#include "lj_mcode.c" -#include "lj_snap.c" -#include "lj_record.c" -#include "lj_crecord.c" -#include "lj_ffrecord.c" -#include "lj_asm.c" -#include "lj_trace.c" -#include "lj_gdbjit.c" -#include "lj_alloc.c" - -#include "lib_aux.c" -#include "lib_base.c" -#include "lib_math.c" -#include "lib_string.c" -#include "lib_table.c" -#include "lib_io.c" -#include "lib_os.c" -#include "lib_package.c" -#include "lib_debug.c" -#include "lib_bit.c" -#include "lib_jit.c" -#include "lib_ffi.c" -#include "lib_init.c" - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lua.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lua.h deleted file mode 100644 index 64a930e3e38..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lua.h +++ /dev/null @@ -1,406 +0,0 @@ -/* -** $Id: lua.h,v 1.218.1.5 2008/08/06 13:30:12 roberto Exp $ -** Lua - An Extensible Extension Language -** Lua.org, PUC-Rio, Brazil (http://www.lua.org) -** See Copyright Notice at the end of this file -*/ - - -#ifndef lua_h -#define lua_h - -#include -#include - - -#include "luaconf.h" - - -#define LUA_VERSION "Lua 5.1" -#define LUA_RELEASE "Lua 5.1.4" -#define LUA_VERSION_NUM 501 -#define LUA_COPYRIGHT "Copyright (C) 1994-2008 Lua.org, PUC-Rio" -#define LUA_AUTHORS "R. Ierusalimschy, L. H. de Figueiredo & W. Celes" - - -/* mark for precompiled code (`Lua') */ -#define LUA_SIGNATURE "\033Lua" - -/* option for multiple returns in `lua_pcall' and `lua_call' */ -#define LUA_MULTRET (-1) - - -/* -** pseudo-indices -*/ -#define LUA_REGISTRYINDEX (-10000) -#define LUA_ENVIRONINDEX (-10001) -#define LUA_GLOBALSINDEX (-10002) -#define lua_upvalueindex(i) (LUA_GLOBALSINDEX-(i)) - - -/* thread status; 0 is OK */ -#define LUA_YIELD 1 -#define LUA_ERRRUN 2 -#define LUA_ERRSYNTAX 3 -#define LUA_ERRMEM 4 -#define LUA_ERRERR 5 - - -typedef struct lua_State lua_State; - -typedef int (*lua_CFunction) (lua_State *L); - - -/* -** functions that read/write blocks when loading/dumping Lua chunks -*/ -typedef const char * (*lua_Reader) (lua_State *L, void *ud, size_t *sz); - -typedef int (*lua_Writer) (lua_State *L, const void* p, size_t sz, void* ud); - - -/* -** prototype for memory-allocation functions -*/ -typedef void * (*lua_Alloc) (void *ud, void *ptr, size_t osize, size_t nsize); - - -/* -** basic types -*/ -#define LUA_TNONE (-1) - -#define LUA_TNIL 0 -#define LUA_TBOOLEAN 1 -#define LUA_TLIGHTUSERDATA 2 -#define LUA_TNUMBER 3 -#define LUA_TSTRING 4 -#define LUA_TTABLE 5 -#define LUA_TFUNCTION 6 -#define LUA_TUSERDATA 7 -#define LUA_TTHREAD 8 - - - -/* minimum Lua stack available to a C function */ -#define LUA_MINSTACK 20 - - -/* -** generic extra include file -*/ -#if defined(LUA_USER_H) -#include LUA_USER_H -#endif - - -/* type of numbers in Lua */ -typedef LUA_NUMBER lua_Number; - - -/* type for integer functions */ -typedef LUA_INTEGER lua_Integer; - - -/* communication with LuaJiTTeX */ -LUA_API int luajittex_choose_hash_function; - - -/* -** state manipulation -*/ -LUA_API lua_State *(lua_newstate) (lua_Alloc f, void *ud); -LUA_API void (lua_close) (lua_State *L); -LUA_API lua_State *(lua_newthread) (lua_State *L); - -LUA_API lua_CFunction (lua_atpanic) (lua_State *L, lua_CFunction panicf); - - -/* -** basic stack manipulation -*/ -LUA_API int (lua_gettop) (lua_State *L); -LUA_API void (lua_settop) (lua_State *L, int idx); -LUA_API void (lua_pushvalue) (lua_State *L, int idx); -LUA_API void (lua_remove) (lua_State *L, int idx); -LUA_API void (lua_insert) (lua_State *L, int idx); -LUA_API void (lua_replace) (lua_State *L, int idx); -LUA_API int (lua_checkstack) (lua_State *L, int sz); - -LUA_API void (lua_xmove) (lua_State *from, lua_State *to, int n); - - -/* -** access functions (stack -> C) -*/ - -LUA_API int (lua_isnumber) (lua_State *L, int idx); -LUA_API int (lua_isstring) (lua_State *L, int idx); -LUA_API int (lua_iscfunction) (lua_State *L, int idx); -LUA_API int (lua_isuserdata) (lua_State *L, int idx); -LUA_API int (lua_type) (lua_State *L, int idx); -LUA_API const char *(lua_typename) (lua_State *L, int tp); - -LUA_API int (lua_equal) (lua_State *L, int idx1, int idx2); -LUA_API int (lua_rawequal) (lua_State *L, int idx1, int idx2); -LUA_API int (lua_lessthan) (lua_State *L, int idx1, int idx2); - -LUA_API lua_Number (lua_tonumber) (lua_State *L, int idx); -LUA_API lua_Integer (lua_tointeger) (lua_State *L, int idx); -LUA_API int (lua_toboolean) (lua_State *L, int idx); -LUA_API const char *(lua_tolstring) (lua_State *L, int idx, size_t *len); -LUA_API size_t (lua_objlen) (lua_State *L, int idx); -LUA_API lua_CFunction (lua_tocfunction) (lua_State *L, int idx); -LUA_API void *(lua_touserdata) (lua_State *L, int idx); -LUA_API lua_State *(lua_tothread) (lua_State *L, int idx); -LUA_API const void *(lua_topointer) (lua_State *L, int idx); - - -/* -** push functions (C -> stack) -*/ -LUA_API void (lua_pushnil) (lua_State *L); -LUA_API void (lua_pushnumber) (lua_State *L, lua_Number n); -LUA_API void (lua_pushinteger) (lua_State *L, lua_Integer n); -LUA_API void (lua_pushlstring) (lua_State *L, const char *s, size_t l); -LUA_API void (lua_pushstring) (lua_State *L, const char *s); -LUA_API const char *(lua_pushvfstring) (lua_State *L, const char *fmt, - va_list argp); -LUA_API const char *(lua_pushfstring) (lua_State *L, const char *fmt, ...); -LUA_API void (lua_pushcclosure) (lua_State *L, lua_CFunction fn, int n); -LUA_API void (lua_pushboolean) (lua_State *L, int b); -LUA_API void (lua_pushlightuserdata) (lua_State *L, void *p); -LUA_API int (lua_pushthread) (lua_State *L); - - -/* -** get functions (Lua -> stack) -*/ -LUA_API void (lua_gettable) (lua_State *L, int idx); -LUA_API void (lua_getfield) (lua_State *L, int idx, const char *k); -LUA_API void (lua_rawget) (lua_State *L, int idx); -LUA_API void (lua_rawgeti) (lua_State *L, int idx, int n); -LUA_API void (lua_createtable) (lua_State *L, int narr, int nrec); -LUA_API void *(lua_newuserdata) (lua_State *L, size_t sz); -LUA_API int (lua_getmetatable) (lua_State *L, int objindex); -LUA_API void (lua_getfenv) (lua_State *L, int idx); - - -/* -** set functions (stack -> Lua) -*/ -LUA_API void (lua_settable) (lua_State *L, int idx); -LUA_API void (lua_setfield) (lua_State *L, int idx, const char *k); -LUA_API void (lua_rawset) (lua_State *L, int idx); -LUA_API void (lua_rawseti) (lua_State *L, int idx, int n); -LUA_API int (lua_setmetatable) (lua_State *L, int objindex); -LUA_API int (lua_setfenv) (lua_State *L, int idx); - - -/* -** `load' and `call' functions (load and run Lua code) -*/ -LUA_API void (lua_call) (lua_State *L, int nargs, int nresults); -LUA_API int (lua_pcall) (lua_State *L, int nargs, int nresults, int errfunc); -LUA_API int (lua_cpcall) (lua_State *L, lua_CFunction func, void *ud); -LUA_API int (lua_load) (lua_State *L, lua_Reader reader, void *dt, - const char *chunkname); - -LUA_API int (lua_dump) (lua_State *L, lua_Writer writer, void *data); - - -/* -** coroutine functions -*/ -LUA_API int (lua_yield) (lua_State *L, int nresults); -LUA_API int (lua_resume) (lua_State *L, int narg); -LUA_API int (lua_status) (lua_State *L); - -/* -** garbage-collection function and options -*/ - -#define LUA_GCSTOP 0 -#define LUA_GCRESTART 1 -#define LUA_GCCOLLECT 2 -#define LUA_GCCOUNT 3 -#define LUA_GCCOUNTB 4 -#define LUA_GCSTEP 5 -#define LUA_GCSETPAUSE 6 -#define LUA_GCSETSTEPMUL 7 - -LUA_API int (lua_gc) (lua_State *L, int what, int data); - - -/* -** miscellaneous functions -*/ - -LUA_API int (lua_error) (lua_State *L); - -LUA_API int (lua_next) (lua_State *L, int idx); - -LUA_API void (lua_concat) (lua_State *L, int n); - -LUA_API lua_Alloc (lua_getallocf) (lua_State *L, void **ud); -LUA_API void lua_setallocf (lua_State *L, lua_Alloc f, void *ud); - - - -/* -** =============================================================== -** some useful macros -** =============================================================== -*/ - -#define lua_pop(L,n) lua_settop(L, -(n)-1) - -#define lua_newtable(L) lua_createtable(L, 0, 0) - -#define lua_register(L,n,f) (lua_pushcfunction(L, (f)), lua_setglobal(L, (n))) - -#define lua_pushcfunction(L,f) lua_pushcclosure(L, (f), 0) - -#define lua_strlen(L,i) lua_objlen(L, (i)) - -#define lua_isfunction(L,n) (lua_type(L, (n)) == LUA_TFUNCTION) -#define lua_istable(L,n) (lua_type(L, (n)) == LUA_TTABLE) -#define lua_islightuserdata(L,n) (lua_type(L, (n)) == LUA_TLIGHTUSERDATA) -#define lua_isnil(L,n) (lua_type(L, (n)) == LUA_TNIL) -#define lua_isboolean(L,n) (lua_type(L, (n)) == LUA_TBOOLEAN) -#define lua_isthread(L,n) (lua_type(L, (n)) == LUA_TTHREAD) -#define lua_isnone(L,n) (lua_type(L, (n)) == LUA_TNONE) -#define lua_isnoneornil(L, n) (lua_type(L, (n)) <= 0) - -#define lua_pushliteral(L, s) \ - lua_pushlstring(L, "" s, (sizeof(s)/sizeof(char))-1) - -#define lua_setglobal(L,s) lua_setfield(L, LUA_GLOBALSINDEX, (s)) -#define lua_getglobal(L,s) lua_getfield(L, LUA_GLOBALSINDEX, (s)) - -#define lua_tostring(L,i) lua_tolstring(L, (i), NULL) - - - -/* -** compatibility macros and functions -*/ - -#define lua_open() luaL_newstate() - -#define lua_getregistry(L) lua_pushvalue(L, LUA_REGISTRYINDEX) - -#define lua_getgccount(L) lua_gc(L, LUA_GCCOUNT, 0) - -#define lua_Chunkreader lua_Reader -#define lua_Chunkwriter lua_Writer - - -/* hack */ -LUA_API void lua_setlevel (lua_State *from, lua_State *to); - - -/* -** {====================================================================== -** Debug API -** ======================================================================= -*/ - - -/* -** Event codes -*/ -#define LUA_HOOKCALL 0 -#define LUA_HOOKRET 1 -#define LUA_HOOKLINE 2 -#define LUA_HOOKCOUNT 3 -#define LUA_HOOKTAILRET 4 - - -/* -** Event masks -*/ -#define LUA_MASKCALL (1 << LUA_HOOKCALL) -#define LUA_MASKRET (1 << LUA_HOOKRET) -#define LUA_MASKLINE (1 << LUA_HOOKLINE) -#define LUA_MASKCOUNT (1 << LUA_HOOKCOUNT) - -typedef struct lua_Debug lua_Debug; /* activation record */ - - -/* Functions to be called by the debuger in specific events */ -typedef void (*lua_Hook) (lua_State *L, lua_Debug *ar); - - -LUA_API int lua_getstack (lua_State *L, int level, lua_Debug *ar); -LUA_API int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar); -LUA_API const char *lua_getlocal (lua_State *L, const lua_Debug *ar, int n); -LUA_API const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n); -LUA_API const char *lua_getupvalue (lua_State *L, int funcindex, int n); -LUA_API const char *lua_setupvalue (lua_State *L, int funcindex, int n); -LUA_API int lua_sethook (lua_State *L, lua_Hook func, int mask, int count); -LUA_API lua_Hook lua_gethook (lua_State *L); -LUA_API int lua_gethookmask (lua_State *L); -LUA_API int lua_gethookcount (lua_State *L); - -/* From Lua 5.2. */ -LUA_API void *lua_upvalueid (lua_State *L, int idx, int n); -LUA_API void lua_upvaluejoin (lua_State *L, int idx1, int n1, int idx2, int n2); -LUA_API int lua_loadx (lua_State *L, lua_Reader reader, void *dt, - const char *chunkname, const char *mode); - - -#define LUA_OPEQ 0 -#define LUA_OPLT 1 -#define LUA_OPLE 2 -#define LUA_OK 0 - -/* see http://comments.gmane.org/gmane.comp.programming.swig/18673 */ -# define lua_rawlen lua_objlen - - - -struct lua_Debug { - int event; - const char *name; /* (n) */ - const char *namewhat; /* (n) `global', `local', `field', `method' */ - const char *what; /* (S) `Lua', `C', `main', `tail' */ - const char *source; /* (S) */ - int currentline; /* (l) */ - int nups; /* (u) number of upvalues */ - int linedefined; /* (S) */ - int lastlinedefined; /* (S) */ - char short_src[LUA_IDSIZE]; /* (S) */ - /* private part */ - int i_ci; /* active function */ -}; - -/* }====================================================================== */ - - -/****************************************************************************** -* Copyright (C) 1994-2008 Lua.org, PUC-Rio. All rights reserved. -* -* Permission is hereby granted, free of charge, to any person obtaining -* a copy of this software and associated documentation files (the -* "Software"), to deal in the Software without restriction, including -* without limitation the rights to use, copy, modify, merge, publish, -* distribute, sublicense, and/or sell copies of the Software, and to -* permit persons to whom the Software is furnished to do so, subject to -* the following conditions: -* -* The above copyright notice and this permission notice shall be -* included in all copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -******************************************************************************/ - - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lua.hpp b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lua.hpp deleted file mode 100644 index 07e9002dc5a..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lua.hpp +++ /dev/null @@ -1,9 +0,0 @@ -// C++ wrapper for LuaJIT header files. - -extern "C" { -#include "lua.h" -#include "lauxlib.h" -#include "lualib.h" -#include "luajit.h" -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/luaconf.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/luaconf.h deleted file mode 100644 index 79f514825f8..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/luaconf.h +++ /dev/null @@ -1,156 +0,0 @@ -/* -** Configuration header. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef luaconf_h -#define luaconf_h - -#ifndef WINVER -#define WINVER 0x0501 -#endif -#include -#include - -/* Default path for loading Lua and C modules with require(). */ -#if defined(_WIN32) -/* -** In Windows, any exclamation mark ('!') in the path is replaced by the -** path of the directory of the executable file of the current process. -*/ -#define LUA_LDIR "!\\lua\\" -#define LUA_CDIR "!\\" -#define LUA_PATH_DEFAULT \ - ".\\?.lua;" LUA_LDIR"?.lua;" LUA_LDIR"?\\init.lua;" -#define LUA_CPATH_DEFAULT \ - ".\\?.dll;" LUA_CDIR"?.dll;" LUA_CDIR"loadall.dll" -#else -/* -** Note to distribution maintainers: do NOT patch the following lines! -** Please read ../doc/install.html#distro and pass PREFIX=/usr instead. -*/ -#ifndef LUA_MULTILIB -#define LUA_MULTILIB "lib" -#endif -#ifndef LUA_LMULTILIB -#define LUA_LMULTILIB "lib" -#endif -#define LUA_LROOT "/usr/local" -#define LUA_LUADIR "/lua/5.1/" -#define LUA_LJDIR "/luajit-2.1.0-beta1/" - -#ifdef LUA_ROOT -#define LUA_JROOT LUA_ROOT -#define LUA_RLDIR LUA_ROOT "/share" LUA_LUADIR -#define LUA_RCDIR LUA_ROOT "/" LUA_MULTILIB LUA_LUADIR -#define LUA_RLPATH ";" LUA_RLDIR "?.lua;" LUA_RLDIR "?/init.lua" -#define LUA_RCPATH ";" LUA_RCDIR "?.so" -#else -#define LUA_JROOT LUA_LROOT -#define LUA_RLPATH -#define LUA_RCPATH -#endif - -#define LUA_JPATH ";" LUA_JROOT "/share" LUA_LJDIR "?.lua" -#define LUA_LLDIR LUA_LROOT "/share" LUA_LUADIR -#define LUA_LCDIR LUA_LROOT "/" LUA_LMULTILIB LUA_LUADIR -#define LUA_LLPATH ";" LUA_LLDIR "?.lua;" LUA_LLDIR "?/init.lua" -#define LUA_LCPATH1 ";" LUA_LCDIR "?.so" -#define LUA_LCPATH2 ";" LUA_LCDIR "loadall.so" - -#define LUA_PATH_DEFAULT "./?.lua" LUA_JPATH LUA_LLPATH LUA_RLPATH -#define LUA_CPATH_DEFAULT "./?.so" LUA_LCPATH1 LUA_RCPATH LUA_LCPATH2 -#endif - -/* Environment variable names for path overrides and initialization code. */ -#define LUA_PATH "LUA_PATH" -#define LUA_CPATH "LUA_CPATH" -#define LUA_INIT "LUA_INIT" - -/* Special file system characters. */ -#if defined(_WIN32) -#define LUA_DIRSEP "\\" -#else -#define LUA_DIRSEP "/" -#endif -#define LUA_PATHSEP ";" -#define LUA_PATH_MARK "?" -#define LUA_EXECDIR "!" -#define LUA_IGMARK "-" -#define LUA_PATH_CONFIG \ - LUA_DIRSEP "\n" LUA_PATHSEP "\n" LUA_PATH_MARK "\n" \ - LUA_EXECDIR "\n" LUA_IGMARK - -/* Quoting in error messages. */ -#define LUA_QL(x) "'" x "'" -#define LUA_QS LUA_QL("%s") - -/* Various tunables. */ -#define LUAI_MAXSTACK 65500 /* Max. # of stack slots for a thread (<64K). */ -#define LUAI_MAXCSTACK 8000 /* Max. # of stack slots for a C func (<10K). */ -#define LUAI_GCPAUSE 200 /* Pause GC until memory is at 200%. */ -#define LUAI_GCMUL 200 /* Run GC at 200% of allocation speed. */ -#define LUA_MAXCAPTURES 32 /* Max. pattern captures. */ - -/* Compatibility with older library function names. */ -#define LUA_COMPAT_MOD /* OLD: math.mod, NEW: math.fmod */ -#define LUA_COMPAT_GFIND /* OLD: string.gfind, NEW: string.gmatch */ - -/* Configuration for the frontend (the luajit executable). */ -#if defined(luajit_c) -#define LUA_PROGNAME "luajit" /* Fallback frontend name. */ -#define LUA_PROMPT "> " /* Interactive prompt. */ -#define LUA_PROMPT2 ">> " /* Continuation prompt. */ -#define LUA_MAXINPUT 512 /* Max. input line length. */ -#endif - -/* Note: changing the following defines breaks the Lua 5.1 ABI. */ -#define LUA_INTEGER ptrdiff_t -#define LUA_IDSIZE 60 /* Size of lua_Debug.short_src. */ -/* -** Size of lauxlib and io.* on-stack buffers. Weird workaround to avoid using -** unreasonable amounts of stack space, but still retain ABI compatibility. -** Blame Lua for depending on BUFSIZ in the ABI, blame **** for wrecking it. -*/ -#define LUAL_BUFFERSIZE (BUFSIZ > 16384 ? 8192 : BUFSIZ) - -/* The following defines are here only for compatibility with luaconf.h -** from the standard Lua distribution. They must not be changed for LuaJIT. -*/ -#define LUA_NUMBER_DOUBLE -#define LUA_NUMBER double -#define LUAI_UACNUMBER double -#define LUA_NUMBER_SCAN "%lf" -#define LUA_NUMBER_FMT "%.14g" -#define lua_number2str(s, n) sprintf((s), LUA_NUMBER_FMT, (n)) -#define LUAI_MAXNUMBER2STR 32 -#define LUA_INTFRMLEN "l" -#define LUA_INTFRM_T long - -/* Linkage of public API functions. */ -#if defined(LUA_BUILD_AS_DLL) -#if defined(LUA_CORE) || defined(LUA_LIB) -#define LUA_API __declspec(dllexport) -#else -#define LUA_API __declspec(dllimport) -#endif -#else -#define LUA_API extern -#endif - -#define LUALIB_API LUA_API - -/* Support for internal assertions. */ -#if defined(LUA_USE_ASSERT) || defined(LUA_USE_APICHECK) -#include -#endif -#ifdef LUA_USE_ASSERT -#define lua_assert(x) assert(x) -#endif -#ifdef LUA_USE_APICHECK -#define luai_apicheck(L, o) { (void)L; assert(o); } -#else -#define luai_apicheck(L, o) { (void)L; } -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/luajit.c b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/luajit.c deleted file mode 100644 index 0ebc73005f8..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/luajit.c +++ /dev/null @@ -1,570 +0,0 @@ -/* -** LuaJIT frontend. Runs commands, scripts, read-eval-print (REPL) etc. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -** -** Major portions taken verbatim or adapted from the Lua interpreter. -** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h -*/ - -#include -#include -#include - -#define luajit_c - -#include "lua.h" -#include "lauxlib.h" -#include "lualib.h" -#include "luajit.h" - -#include "lj_arch.h" - -#if LJ_TARGET_POSIX -#include -#define lua_stdin_is_tty() isatty(0) -#elif LJ_TARGET_WINDOWS -#include -#ifdef __BORLANDC__ -#define lua_stdin_is_tty() isatty(_fileno(stdin)) -#else -#define lua_stdin_is_tty() _isatty(_fileno(stdin)) -#endif -#else -#define lua_stdin_is_tty() 1 -#endif - -#if !LJ_TARGET_CONSOLE -#include -#endif - -static lua_State *globalL = NULL; -static const char *progname = LUA_PROGNAME; - -#if !LJ_TARGET_CONSOLE -static void lstop(lua_State *L, lua_Debug *ar) -{ - (void)ar; /* unused arg. */ - lua_sethook(L, NULL, 0, 0); - /* Avoid luaL_error -- a C hook doesn't add an extra frame. */ - luaL_where(L, 0); - lua_pushfstring(L, "%sinterrupted!", lua_tostring(L, -1)); - lua_error(L); -} - -static void laction(int i) -{ - signal(i, SIG_DFL); /* if another SIGINT happens before lstop, - terminate process (default action) */ - lua_sethook(globalL, lstop, LUA_MASKCALL | LUA_MASKRET | LUA_MASKCOUNT, 1); -} -#endif - -static void print_usage(void) -{ - fputs("usage: ", stderr); - fputs(progname, stderr); - fputs(" [options]... [script [args]...].\n" - "Available options are:\n" - " -e chunk Execute string " LUA_QL("chunk") ".\n" - " -l name Require library " LUA_QL("name") ".\n" - " -b ... Save or list bytecode.\n" - " -j cmd Perform LuaJIT control command.\n" - " -O[opt] Control LuaJIT optimizations.\n" - " -i Enter interactive mode after executing " LUA_QL("script") ".\n" - " -v Show version information.\n" - " -E Ignore environment variables.\n" - " -- Stop handling options.\n" - " - Execute stdin and stop handling options.\n", stderr); - fflush(stderr); -} - -static void l_message(const char *pname, const char *msg) -{ - if (pname) { fputs(pname, stderr); fputc(':', stderr); fputc(' ', stderr); } - fputs(msg, stderr); fputc('\n', stderr); - fflush(stderr); -} - -static int report(lua_State *L, int status) -{ - if (status && !lua_isnil(L, -1)) { - const char *msg = lua_tostring(L, -1); - if (msg == NULL) msg = "(error object is not a string)"; - l_message(progname, msg); - lua_pop(L, 1); - } - return status; -} - -static int traceback(lua_State *L) -{ - if (!lua_isstring(L, 1)) { /* Non-string error object? Try metamethod. */ - if (lua_isnoneornil(L, 1) || - !luaL_callmeta(L, 1, "__tostring") || - !lua_isstring(L, -1)) - return 1; /* Return non-string error object. */ - lua_remove(L, 1); /* Replace object by result of __tostring metamethod. */ - } - luaL_traceback(L, L, lua_tostring(L, 1), 1); - return 1; -} - -static int docall(lua_State *L, int narg, int clear) -{ - int status; - int base = lua_gettop(L) - narg; /* function index */ - lua_pushcfunction(L, traceback); /* push traceback function */ - lua_insert(L, base); /* put it under chunk and args */ -#if !LJ_TARGET_CONSOLE - signal(SIGINT, laction); -#endif - status = lua_pcall(L, narg, (clear ? 0 : LUA_MULTRET), base); -#if !LJ_TARGET_CONSOLE - signal(SIGINT, SIG_DFL); -#endif - lua_remove(L, base); /* remove traceback function */ - /* force a complete garbage collection in case of errors */ - if (status != 0) lua_gc(L, LUA_GCCOLLECT, 0); - return status; -} - -static void print_version(void) -{ - fputs(LUAJIT_VERSION " -- " LUAJIT_COPYRIGHT ". " LUAJIT_URL "\n", stdout); -} - -static void print_jit_status(lua_State *L) -{ - int n; - const char *s; - lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); - lua_getfield(L, -1, "jit"); /* Get jit.* module table. */ - lua_remove(L, -2); - lua_getfield(L, -1, "status"); - lua_remove(L, -2); - n = lua_gettop(L); - lua_call(L, 0, LUA_MULTRET); - fputs(lua_toboolean(L, n) ? "JIT: ON" : "JIT: OFF", stdout); - for (n++; (s = lua_tostring(L, n)); n++) { - putc(' ', stdout); - fputs(s, stdout); - } - putc('\n', stdout); -} - -static int getargs(lua_State *L, char **argv, int n) -{ - int narg; - int i; - int argc = 0; - while (argv[argc]) argc++; /* count total number of arguments */ - narg = argc - (n + 1); /* number of arguments to the script */ - luaL_checkstack(L, narg + 3, "too many arguments to script"); - for (i = n+1; i < argc; i++) - lua_pushstring(L, argv[i]); - lua_createtable(L, narg, n + 1); - for (i = 0; i < argc; i++) { - lua_pushstring(L, argv[i]); - lua_rawseti(L, -2, i - n); - } - return narg; -} - -static int dofile(lua_State *L, const char *name) -{ - int status = luaL_loadfile(L, name) || docall(L, 0, 1); - return report(L, status); -} - -static int dostring(lua_State *L, const char *s, const char *name) -{ - int status = luaL_loadbuffer(L, s, strlen(s), name) || docall(L, 0, 1); - return report(L, status); -} - -static int dolibrary(lua_State *L, const char *name) -{ - lua_getglobal(L, "require"); - lua_pushstring(L, name); - return report(L, docall(L, 1, 1)); -} - -static void write_prompt(lua_State *L, int firstline) -{ - const char *p; - lua_getfield(L, LUA_GLOBALSINDEX, firstline ? "_PROMPT" : "_PROMPT2"); - p = lua_tostring(L, -1); - if (p == NULL) p = firstline ? LUA_PROMPT : LUA_PROMPT2; - fputs(p, stdout); - fflush(stdout); - lua_pop(L, 1); /* remove global */ -} - -static int incomplete(lua_State *L, int status) -{ - if (status == LUA_ERRSYNTAX) { - size_t lmsg; - const char *msg = lua_tolstring(L, -1, &lmsg); - const char *tp = msg + lmsg - (sizeof(LUA_QL("")) - 1); - if (strstr(msg, LUA_QL("")) == tp) { - lua_pop(L, 1); - return 1; - } - } - return 0; /* else... */ -} - -static int pushline(lua_State *L, int firstline) -{ - char buf[LUA_MAXINPUT]; - write_prompt(L, firstline); - if (fgets(buf, LUA_MAXINPUT, stdin)) { - size_t len = strlen(buf); - if (len > 0 && buf[len-1] == '\n') - buf[len-1] = '\0'; - if (firstline && buf[0] == '=') - lua_pushfstring(L, "return %s", buf+1); - else - lua_pushstring(L, buf); - return 1; - } - return 0; -} - -static int loadline(lua_State *L) -{ - int status; - lua_settop(L, 0); - if (!pushline(L, 1)) - return -1; /* no input */ - for (;;) { /* repeat until gets a complete line */ - status = luaL_loadbuffer(L, lua_tostring(L, 1), lua_strlen(L, 1), "=stdin"); - if (!incomplete(L, status)) break; /* cannot try to add lines? */ - if (!pushline(L, 0)) /* no more input? */ - return -1; - lua_pushliteral(L, "\n"); /* add a new line... */ - lua_insert(L, -2); /* ...between the two lines */ - lua_concat(L, 3); /* join them */ - } - lua_remove(L, 1); /* remove line */ - return status; -} - -static void dotty(lua_State *L) -{ - int status; - const char *oldprogname = progname; - progname = NULL; - while ((status = loadline(L)) != -1) { - if (status == 0) status = docall(L, 0, 0); - report(L, status); - if (status == 0 && lua_gettop(L) > 0) { /* any result to print? */ - lua_getglobal(L, "print"); - lua_insert(L, 1); - if (lua_pcall(L, lua_gettop(L)-1, 0, 0) != 0) - l_message(progname, - lua_pushfstring(L, "error calling " LUA_QL("print") " (%s)", - lua_tostring(L, -1))); - } - } - lua_settop(L, 0); /* clear stack */ - fputs("\n", stdout); - fflush(stdout); - progname = oldprogname; -} - -static int handle_script(lua_State *L, char **argv, int n) -{ - int status; - const char *fname; - int narg = getargs(L, argv, n); /* collect arguments */ - lua_setglobal(L, "arg"); - fname = argv[n]; - if (strcmp(fname, "-") == 0 && strcmp(argv[n-1], "--") != 0) - fname = NULL; /* stdin */ - status = luaL_loadfile(L, fname); - lua_insert(L, -(narg+1)); - if (status == 0) - status = docall(L, narg, 0); - else - lua_pop(L, narg); - return report(L, status); -} - -/* Load add-on module. */ -static int loadjitmodule(lua_State *L) -{ - lua_getglobal(L, "require"); - lua_pushliteral(L, "jit."); - lua_pushvalue(L, -3); - lua_concat(L, 2); - if (lua_pcall(L, 1, 1, 0)) { - const char *msg = lua_tostring(L, -1); - if (msg && !strncmp(msg, "module ", 7)) - goto nomodule; - return report(L, 1); - } - lua_getfield(L, -1, "start"); - if (lua_isnil(L, -1)) { - nomodule: - l_message(progname, - "unknown luaJIT command or jit.* modules not installed"); - return 1; - } - lua_remove(L, -2); /* Drop module table. */ - return 0; -} - -/* Run command with options. */ -static int runcmdopt(lua_State *L, const char *opt) -{ - int narg = 0; - if (opt && *opt) { - for (;;) { /* Split arguments. */ - const char *p = strchr(opt, ','); - narg++; - if (!p) break; - if (p == opt) - lua_pushnil(L); - else - lua_pushlstring(L, opt, (size_t)(p - opt)); - opt = p + 1; - } - if (*opt) - lua_pushstring(L, opt); - else - lua_pushnil(L); - } - return report(L, lua_pcall(L, narg, 0, 0)); -} - -/* JIT engine control command: try jit library first or load add-on module. */ -static int dojitcmd(lua_State *L, const char *cmd) -{ - const char *opt = strchr(cmd, '='); - lua_pushlstring(L, cmd, opt ? (size_t)(opt - cmd) : strlen(cmd)); - lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); - lua_getfield(L, -1, "jit"); /* Get jit.* module table. */ - lua_remove(L, -2); - lua_pushvalue(L, -2); - lua_gettable(L, -2); /* Lookup library function. */ - if (!lua_isfunction(L, -1)) { - lua_pop(L, 2); /* Drop non-function and jit.* table, keep module name. */ - if (loadjitmodule(L)) - return 1; - } else { - lua_remove(L, -2); /* Drop jit.* table. */ - } - lua_remove(L, -2); /* Drop module name. */ - return runcmdopt(L, opt ? opt+1 : opt); -} - -/* Optimization flags. */ -static int dojitopt(lua_State *L, const char *opt) -{ - lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); - lua_getfield(L, -1, "jit.opt"); /* Get jit.opt.* module table. */ - lua_remove(L, -2); - lua_getfield(L, -1, "start"); - lua_remove(L, -2); - return runcmdopt(L, opt); -} - -/* Save or list bytecode. */ -static int dobytecode(lua_State *L, char **argv) -{ - int narg = 0; - lua_pushliteral(L, "bcsave"); - if (loadjitmodule(L)) - return 1; - if (argv[0][2]) { - narg++; - argv[0][1] = '-'; - lua_pushstring(L, argv[0]+1); - } - for (argv++; *argv != NULL; narg++, argv++) - lua_pushstring(L, *argv); - return report(L, lua_pcall(L, narg, 0, 0)); -} - -/* check that argument has no extra characters at the end */ -#define notail(x) {if ((x)[2] != '\0') return -1;} - -#define FLAGS_INTERACTIVE 1 -#define FLAGS_VERSION 2 -#define FLAGS_EXEC 4 -#define FLAGS_OPTION 8 -#define FLAGS_NOENV 16 - -static int collectargs(char **argv, int *flags) -{ - int i; - for (i = 1; argv[i] != NULL; i++) { - if (argv[i][0] != '-') /* Not an option? */ - return i; - switch (argv[i][1]) { /* Check option. */ - case '-': - notail(argv[i]); - return (argv[i+1] != NULL ? i+1 : 0); - case '\0': - return i; - case 'i': - notail(argv[i]); - *flags |= FLAGS_INTERACTIVE; - /* fallthrough */ - case 'v': - notail(argv[i]); - *flags |= FLAGS_VERSION; - break; - case 'e': - *flags |= FLAGS_EXEC; - case 'j': /* LuaJIT extension */ - case 'l': - *flags |= FLAGS_OPTION; - if (argv[i][2] == '\0') { - i++; - if (argv[i] == NULL) return -1; - } - break; - case 'O': break; /* LuaJIT extension */ - case 'b': /* LuaJIT extension */ - if (*flags) return -1; - *flags |= FLAGS_EXEC; - return 0; - case 'E': - *flags |= FLAGS_NOENV; - break; - default: return -1; /* invalid option */ - } - } - return 0; -} - -static int runargs(lua_State *L, char **argv, int n) -{ - int i; - for (i = 1; i < n; i++) { - if (argv[i] == NULL) continue; - lua_assert(argv[i][0] == '-'); - switch (argv[i][1]) { /* option */ - case 'e': { - const char *chunk = argv[i] + 2; - if (*chunk == '\0') chunk = argv[++i]; - lua_assert(chunk != NULL); - if (dostring(L, chunk, "=(command line)") != 0) - return 1; - break; - } - case 'l': { - const char *filename = argv[i] + 2; - if (*filename == '\0') filename = argv[++i]; - lua_assert(filename != NULL); - if (dolibrary(L, filename)) - return 1; /* stop if file fails */ - break; - } - case 'j': { /* LuaJIT extension */ - const char *cmd = argv[i] + 2; - if (*cmd == '\0') cmd = argv[++i]; - lua_assert(cmd != NULL); - if (dojitcmd(L, cmd)) - return 1; - break; - } - case 'O': /* LuaJIT extension */ - if (dojitopt(L, argv[i] + 2)) - return 1; - break; - case 'b': /* LuaJIT extension */ - return dobytecode(L, argv+i); - default: break; - } - } - return 0; -} - -static int handle_luainit(lua_State *L) -{ -#if LJ_TARGET_CONSOLE - const char *init = NULL; -#else - const char *init = getenv(LUA_INIT); -#endif - if (init == NULL) - return 0; /* status OK */ - else if (init[0] == '@') - return dofile(L, init+1); - else - return dostring(L, init, "=" LUA_INIT); -} - -static struct Smain { - char **argv; - int argc; - int status; -} smain; - -static int pmain(lua_State *L) -{ - struct Smain *s = &smain; - char **argv = s->argv; - int script; - int flags = 0; - globalL = L; - if (argv[0] && argv[0][0]) progname = argv[0]; - LUAJIT_VERSION_SYM(); /* linker-enforced version check */ - script = collectargs(argv, &flags); - if (script < 0) { /* invalid args? */ - print_usage(); - s->status = 1; - return 0; - } - if ((flags & FLAGS_NOENV)) { - lua_pushboolean(L, 1); - lua_setfield(L, LUA_REGISTRYINDEX, "LUA_NOENV"); - } - lua_gc(L, LUA_GCSTOP, 0); /* stop collector during initialization */ - luaL_openlibs(L); /* open libraries */ - lua_gc(L, LUA_GCRESTART, -1); - if (!(flags & FLAGS_NOENV)) { - s->status = handle_luainit(L); - if (s->status != 0) return 0; - } - if ((flags & FLAGS_VERSION)) print_version(); - s->status = runargs(L, argv, (script > 0) ? script : s->argc); - if (s->status != 0) return 0; - if (script) { - s->status = handle_script(L, argv, script); - if (s->status != 0) return 0; - } - if ((flags & FLAGS_INTERACTIVE)) { - print_jit_status(L); - dotty(L); - } else if (script == 0 && !(flags & (FLAGS_EXEC|FLAGS_VERSION))) { - if (lua_stdin_is_tty()) { - print_version(); - print_jit_status(L); - dotty(L); - } else { - dofile(L, NULL); /* executes stdin as a file */ - } - } - return 0; -} - -int main(int argc, char **argv) -{ - int status; - lua_State *L = lua_open(); /* create state */ - if (L == NULL) { - l_message(argv[0], "cannot create state: not enough memory"); - return EXIT_FAILURE; - } - smain.argc = argc; - smain.argv = argv; - status = lua_cpcall(L, pmain, NULL); - report(L, status); - lua_close(L); - return (status || smain.status) ? EXIT_FAILURE : EXIT_SUCCESS; -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/luajit.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/luajit.h deleted file mode 100644 index 960418519ee..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/luajit.h +++ /dev/null @@ -1,79 +0,0 @@ -/* -** LuaJIT -- a Just-In-Time Compiler for Lua. http://luajit.org/ -** -** Copyright (C) 2005-2015 Mike Pall. All rights reserved. -** -** Permission is hereby granted, free of charge, to any person obtaining -** a copy of this software and associated documentation files (the -** "Software"), to deal in the Software without restriction, including -** without limitation the rights to use, copy, modify, merge, publish, -** distribute, sublicense, and/or sell copies of the Software, and to -** permit persons to whom the Software is furnished to do so, subject to -** the following conditions: -** -** The above copyright notice and this permission notice shall be -** included in all copies or substantial portions of the Software. -** -** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -** SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -** -** [ MIT license: http://www.opensource.org/licenses/mit-license.php ] -*/ - -#ifndef _LUAJIT_H -#define _LUAJIT_H - -#include "lua.h" - -#define LUAJIT_VERSION "LuaJIT 2.1.0-beta1" -#define LUAJIT_VERSION_NUM 20100 /* Version 2.1.0 = 02.01.00. */ -#define LUAJIT_VERSION_SYM luaJIT_version_2_1_0_beta1 -#define LUAJIT_COPYRIGHT "Copyright (C) 2005-2015 Mike Pall" -#define LUAJIT_URL "http://luajit.org/" - -/* Modes for luaJIT_setmode. */ -#define LUAJIT_MODE_MASK 0x00ff - -enum { - LUAJIT_MODE_ENGINE, /* Set mode for whole JIT engine. */ - LUAJIT_MODE_DEBUG, /* Set debug mode (idx = level). */ - - LUAJIT_MODE_FUNC, /* Change mode for a function. */ - LUAJIT_MODE_ALLFUNC, /* Recurse into subroutine protos. */ - LUAJIT_MODE_ALLSUBFUNC, /* Change only the subroutines. */ - - LUAJIT_MODE_TRACE, /* Flush a compiled trace. */ - - LUAJIT_MODE_WRAPCFUNC = 0x10, /* Set wrapper mode for C function calls. */ - - LUAJIT_MODE_MAX -}; - -/* Flags or'ed in to the mode. */ -#define LUAJIT_MODE_OFF 0x0000 /* Turn feature off. */ -#define LUAJIT_MODE_ON 0x0100 /* Turn feature on. */ -#define LUAJIT_MODE_FLUSH 0x0200 /* Flush JIT-compiled code. */ - -/* LuaJIT public C API. */ - -/* Control the JIT engine. */ -LUA_API int luaJIT_setmode(lua_State *L, int idx, int mode); - -/* Low-overhead profiling API. */ -typedef void (*luaJIT_profile_callback)(void *data, lua_State *L, - int samples, int vmstate); -LUA_API void luaJIT_profile_start(lua_State *L, const char *mode, - luaJIT_profile_callback cb, void *data); -LUA_API void luaJIT_profile_stop(lua_State *L); -LUA_API const char *luaJIT_profile_dumpstack(lua_State *L, const char *fmt, - int depth, size_t *len); - -/* Enforce (dynamic) linker error for version mismatches. Call from main. */ -LUA_API void LUAJIT_VERSION_SYM(void); - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lualib.h b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lualib.h deleted file mode 100644 index 58dd3349aa9..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/lualib.h +++ /dev/null @@ -1,47 +0,0 @@ -/* -** Standard library header. -** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -*/ - -#ifndef _LUALIB_H -#define _LUALIB_H - -#include "lua.h" - -#define LUA_FILEHANDLE "FILE*" - -#define LUA_COLIBNAME "coroutine" -#define LUA_MATHLIBNAME "math" -#define LUA_STRLIBNAME "string" -#define LUA_TABLIBNAME "table" -#define LUA_IOLIBNAME "io" -#define LUA_OSLIBNAME "os" -#define LUA_LOADLIBNAME "package" -#define LUA_DBLIBNAME "debug" -#define LUA_BITLIBNAME "bit" -#define LUA_JITLIBNAME "jit" -#define LUA_FFILIBNAME "ffi" - -#define LUA_BITLIBNAME_32 "bit32" - -LUALIB_API int luaopen_base(lua_State *L); -LUALIB_API int luaopen_math(lua_State *L); -LUALIB_API int luaopen_string(lua_State *L); -LUALIB_API int luaopen_table(lua_State *L); -LUALIB_API int luaopen_io(lua_State *L); -LUALIB_API int luaopen_os(lua_State *L); -LUALIB_API int luaopen_package(lua_State *L); -LUALIB_API int luaopen_debug(lua_State *L); -LUALIB_API int luaopen_bit(lua_State *L); -LUALIB_API int luaopen_jit(lua_State *L); -LUALIB_API int luaopen_ffi(lua_State *L); - -LUALIB_API int luaopen_bit32(lua_State *L); - -LUALIB_API void luaL_openlibs(lua_State *L); - -#ifndef lua_assert -#define lua_assert(x) ((void)0) -#endif - -#endif diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/msvcbuild.bat b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/msvcbuild.bat deleted file mode 100755 index 0360d7e33b8..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/msvcbuild.bat +++ /dev/null @@ -1,114 +0,0 @@ -@rem Script to build LuaJIT with MSVC. -@rem Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -@rem -@rem Either open a "Visual Studio .NET Command Prompt" -@rem (Note that the Express Edition does not contain an x64 compiler) -@rem -or- -@rem Open a "Windows SDK Command Shell" and set the compiler environment: -@rem setenv /release /x86 -@rem -or- -@rem setenv /release /x64 -@rem -@rem Then cd to this directory and run this script. - -@if not defined INCLUDE goto :FAIL - -@setlocal -@set LJCOMPILE=cl /nologo /c /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE -@set LJLINK=link /nologo -@set LJMT=mt /nologo -@set LJLIB=lib /nologo /nodefaultlib -@set DASMDIR=..\dynasm -@set DASM=%DASMDIR%\dynasm.lua -@set LJDLLNAME=lua51.dll -@set LJLIBNAME=lua51.lib -@set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c - -%LJCOMPILE% host\minilua.c -@if errorlevel 1 goto :BAD -%LJLINK% /out:minilua.exe minilua.obj -@if errorlevel 1 goto :BAD -if exist minilua.exe.manifest^ - %LJMT% -manifest minilua.exe.manifest -outputresource:minilua.exe - -@set DASMFLAGS=-D WIN -D JIT -D FFI -D P64 -@set LJARCH=x64 -@minilua -@if errorlevel 8 goto :X64 -@set DASMFLAGS=-D WIN -D JIT -D FFI -@set LJARCH=x86 -@set LJCOMPILE=%LJCOMPILE% /arch:SSE2 -:X64 -minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h vm_x86.dasc -@if errorlevel 1 goto :BAD - -%LJCOMPILE% /I "." /I %DASMDIR% host\buildvm*.c -@if errorlevel 1 goto :BAD -%LJLINK% /out:buildvm.exe buildvm*.obj -@if errorlevel 1 goto :BAD -if exist buildvm.exe.manifest^ - %LJMT% -manifest buildvm.exe.manifest -outputresource:buildvm.exe - -buildvm -m peobj -o lj_vm.obj -@if errorlevel 1 goto :BAD -buildvm -m bcdef -o lj_bcdef.h %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m ffdef -o lj_ffdef.h %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m libdef -o lj_libdef.h %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m recdef -o lj_recdef.h %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m vmdef -o jit\vmdef.lua %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m folddef -o lj_folddef.h lj_opt_fold.c -@if errorlevel 1 goto :BAD - -@if "%1" neq "debug" goto :NODEBUG -@shift -@set LJCOMPILE=%LJCOMPILE% /Zi -@set LJLINK=%LJLINK% /debug -:NODEBUG -@if "%1"=="amalg" goto :AMALGDLL -@if "%1"=="static" goto :STATIC -%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL lj_*.c lib_*.c -@if errorlevel 1 goto :BAD -%LJLINK% /DLL /out:%LJDLLNAME% lj_*.obj lib_*.obj -@if errorlevel 1 goto :BAD -@goto :MTDLL -:STATIC -%LJCOMPILE% lj_*.c lib_*.c -@if errorlevel 1 goto :BAD -%LJLIB% /OUT:%LJLIBNAME% lj_*.obj lib_*.obj -@if errorlevel 1 goto :BAD -@goto :MTDLL -:AMALGDLL -%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL ljamalg.c -@if errorlevel 1 goto :BAD -%LJLINK% /DLL /out:%LJDLLNAME% ljamalg.obj lj_vm.obj -@if errorlevel 1 goto :BAD -:MTDLL -if exist %LJDLLNAME%.manifest^ - %LJMT% -manifest %LJDLLNAME%.manifest -outputresource:%LJDLLNAME%;2 - -%LJCOMPILE% luajit.c -@if errorlevel 1 goto :BAD -%LJLINK% /out:luajit.exe luajit.obj %LJLIBNAME% -@if errorlevel 1 goto :BAD -if exist luajit.exe.manifest^ - %LJMT% -manifest luajit.exe.manifest -outputresource:luajit.exe - -@del *.obj *.manifest minilua.exe buildvm.exe -@echo. -@echo === Successfully built LuaJIT for Windows/%LJARCH% === - -@goto :END -:BAD -@echo. -@echo ******************************************************* -@echo *** Build FAILED -- Please check the error messages *** -@echo ******************************************************* -@goto :END -:FAIL -@echo You must open a "Visual Studio .NET Command Prompt" to run this script -:END diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/ps4build.bat b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/ps4build.bat deleted file mode 100755 index 337a44fa72d..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/ps4build.bat +++ /dev/null @@ -1,103 +0,0 @@ -@rem Script to build LuaJIT with the PS4 SDK. -@rem Donated to the public domain. -@rem -@rem Open a "Visual Studio .NET Command Prompt" (64 bit host compiler) -@rem Then cd to this directory and run this script. - -@if not defined INCLUDE goto :FAIL -@if not defined SCE_ORBIS_SDK_DIR goto :FAIL - -@setlocal -@rem ---- Host compiler ---- -@set LJCOMPILE=cl /nologo /c /MD /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE -@set LJLINK=link /nologo -@set LJMT=mt /nologo -@set DASMDIR=..\dynasm -@set DASM=%DASMDIR%\dynasm.lua -@set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c - -%LJCOMPILE% host\minilua.c -@if errorlevel 1 goto :BAD -%LJLINK% /out:minilua.exe minilua.obj -@if errorlevel 1 goto :BAD -if exist minilua.exe.manifest^ - %LJMT% -manifest minilua.exe.manifest -outputresource:minilua.exe - -@rem Check for 64 bit host compiler. -@minilua -@if not errorlevel 8 goto :FAIL - -@set DASMFLAGS=-D P64 -D NO_UNWIND -minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h vm_x86.dasc -@if errorlevel 1 goto :BAD - -%LJCOMPILE% /I "." /I %DASMDIR% -DLUAJIT_TARGET=LUAJIT_ARCH_X64 -DLUAJIT_OS=LUAJIT_OS_OTHER -DLUAJIT_DISABLE_JIT -DLUAJIT_DISABLE_FFI -DLUAJIT_NO_UNWIND host\buildvm*.c -@if errorlevel 1 goto :BAD -%LJLINK% /out:buildvm.exe buildvm*.obj -@if errorlevel 1 goto :BAD -if exist buildvm.exe.manifest^ - %LJMT% -manifest buildvm.exe.manifest -outputresource:buildvm.exe - -buildvm -m elfasm -o lj_vm.s -@if errorlevel 1 goto :BAD -buildvm -m bcdef -o lj_bcdef.h %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m ffdef -o lj_ffdef.h %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m libdef -o lj_libdef.h %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m recdef -o lj_recdef.h %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m vmdef -o jit\vmdef.lua %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m folddef -o lj_folddef.h lj_opt_fold.c -@if errorlevel 1 goto :BAD - -@rem ---- Cross compiler ---- -@set LJCOMPILE="%SCE_ORBIS_SDK_DIR%\host_tools\bin\orbis-clang" -c -Wall -DLUAJIT_DISABLE_FFI -@set LJLIB="%SCE_ORBIS_SDK_DIR%\host_tools\bin\orbis-ar" rcus -@set INCLUDE="" - -orbis-as -o lj_vm.o lj_vm.s - -@if "%1" neq "debug" goto :NODEBUG -@shift -@set LJCOMPILE=%LJCOMPILE% -g -O0 -@set TARGETLIB=libluajitD.a -goto :BUILD -:NODEBUG -@set LJCOMPILE=%LJCOMPILE% -O2 -@set TARGETLIB=libluajit.a -:BUILD -del %TARGETLIB% -@if "%1"=="amalg" goto :AMALG -for %%f in (lj_*.c lib_*.c) do ( - %LJCOMPILE% %%f - @if errorlevel 1 goto :BAD -) - -%LJLIB% %TARGETLIB% lj_*.o lib_*.o -@if errorlevel 1 goto :BAD -@goto :NOAMALG -:AMALG -%LJCOMPILE% ljamalg.c -@if errorlevel 1 goto :BAD -%LJLIB% %TARGETLIB% ljamalg.o lj_vm.o -@if errorlevel 1 goto :BAD -:NOAMALG - -@del *.o *.obj *.manifest minilua.exe buildvm.exe -@echo. -@echo === Successfully built LuaJIT for PS4 === - -@goto :END -:BAD -@echo. -@echo ******************************************************* -@echo *** Build FAILED -- Please check the error messages *** -@echo ******************************************************* -@goto :END -:FAIL -@echo To run this script you must open a "Visual Studio .NET Command Prompt" -@echo (64 bit host compiler). The PS4 Orbis SDK must be installed, too. -:END diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/psvitabuild.bat b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/psvitabuild.bat deleted file mode 100755 index 3991dc65353..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/psvitabuild.bat +++ /dev/null @@ -1,93 +0,0 @@ -@rem Script to build LuaJIT with the PS Vita SDK. -@rem Donated to the public domain. -@rem -@rem Open a "Visual Studio .NET Command Prompt" (32 bit host compiler) -@rem Then cd to this directory and run this script. - -@if not defined INCLUDE goto :FAIL -@if not defined SCE_PSP2_SDK_DIR goto :FAIL - -@setlocal -@rem ---- Host compiler ---- -@set LJCOMPILE=cl /nologo /c /MD /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE -@set LJLINK=link /nologo -@set LJMT=mt /nologo -@set DASMDIR=..\dynasm -@set DASM=%DASMDIR%\dynasm.lua -@set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c - -%LJCOMPILE% host\minilua.c -@if errorlevel 1 goto :BAD -%LJLINK% /out:minilua.exe minilua.obj -@if errorlevel 1 goto :BAD -if exist minilua.exe.manifest^ - %LJMT% -manifest minilua.exe.manifest -outputresource:minilua.exe - -@rem Check for 32 bit host compiler. -@minilua -@if errorlevel 8 goto :FAIL - -@set DASMFLAGS=-D FPU -D HFABI -minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h vm_arm.dasc -@if errorlevel 1 goto :BAD - -%LJCOMPILE% /I "." /I %DASMDIR% -DLUAJIT_TARGET=LUAJIT_ARCH_ARM -DLUAJIT_OS=LUAJIT_OS_OTHER -DLUAJIT_DISABLE_JIT -DLUAJIT_DISABLE_FFI -DLJ_TARGET_PSVITA=1 host\buildvm*.c -@if errorlevel 1 goto :BAD -%LJLINK% /out:buildvm.exe buildvm*.obj -@if errorlevel 1 goto :BAD -if exist buildvm.exe.manifest^ - %LJMT% -manifest buildvm.exe.manifest -outputresource:buildvm.exe - -buildvm -m elfasm -o lj_vm.s -@if errorlevel 1 goto :BAD -buildvm -m bcdef -o lj_bcdef.h %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m ffdef -o lj_ffdef.h %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m libdef -o lj_libdef.h %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m recdef -o lj_recdef.h %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m vmdef -o jit\vmdef.lua %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m folddef -o lj_folddef.h lj_opt_fold.c -@if errorlevel 1 goto :BAD - -@rem ---- Cross compiler ---- -@set LJCOMPILE="%SCE_PSP2_SDK_DIR%\host_tools\build\bin\psp2snc" -c -w -DLUAJIT_DISABLE_FFI -DLUAJIT_USE_SYSMALLOC -@set LJLIB="%SCE_PSP2_SDK_DIR%\host_tools\build\bin\psp2ld32" -r --output= -@set INCLUDE="" - -"%SCE_PSP2_SDK_DIR%\host_tools\build\bin\psp2as" -o lj_vm.o lj_vm.s - -@if "%1" neq "debug" goto :NODEBUG -@shift -@set LJCOMPILE=%LJCOMPILE% -g -O0 -@set TARGETLIB=libluajitD.a -goto :BUILD -:NODEBUG -@set LJCOMPILE=%LJCOMPILE% -O2 -@set TARGETLIB=libluajit.a -:BUILD -del %TARGETLIB% - -%LJCOMPILE% ljamalg.c -@if errorlevel 1 goto :BAD -%LJLIB%%TARGETLIB% ljamalg.o lj_vm.o -@if errorlevel 1 goto :BAD - -@del *.o *.obj *.manifest minilua.exe buildvm.exe -@echo. -@echo === Successfully built LuaJIT for PS Vita === - -@goto :END -:BAD -@echo. -@echo ******************************************************* -@echo *** Build FAILED -- Please check the error messages *** -@echo ******************************************************* -@goto :END -:FAIL -@echo To run this script you must open a "Visual Studio .NET Command Prompt" -@echo (32 bit host compiler). The PS Vita SDK must be installed, too. -:END diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_arm.dasc b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_arm.dasc deleted file mode 100644 index 0bd9b147ad3..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_arm.dasc +++ /dev/null @@ -1,4585 +0,0 @@ -|// Low-level VM code for ARM CPUs. -|// Bytecode interpreter, fast functions and helper functions. -|// Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -| -|.arch arm -|.section code_op, code_sub -| -|.actionlist build_actionlist -|.globals GLOB_ -|.globalnames globnames -|.externnames extnames -| -|// Note: The ragged indentation of the instructions is intentional. -|// The starting columns indicate data dependencies. -| -|//----------------------------------------------------------------------- -| -|// Fixed register assignments for the interpreter. -| -|// The following must be C callee-save. -|.define MASKR8, r4 // 255*8 constant for fast bytecode decoding. -|.define KBASE, r5 // Constants of current Lua function. -|.define PC, r6 // Next PC. -|.define DISPATCH, r7 // Opcode dispatch table. -|.define LREG, r8 // Register holding lua_State (also in SAVE_L). -| -|// C callee-save in EABI, but often refetched. Temporary in iOS 3.0+. -|.define BASE, r9 // Base of current Lua stack frame. -| -|// The following temporaries are not saved across C calls, except for RA/RC. -|.define RA, r10 // Callee-save. -|.define RC, r11 // Callee-save. -|.define RB, r12 -|.define OP, r12 // Overlaps RB, must not be lr. -|.define INS, lr -| -|// Calling conventions. Also used as temporaries. -|.define CARG1, r0 -|.define CARG2, r1 -|.define CARG3, r2 -|.define CARG4, r3 -|.define CARG12, r0 // For 1st soft-fp double. -|.define CARG34, r2 // For 2nd soft-fp double. -| -|.define CRET1, r0 -|.define CRET2, r1 -| -|// Stack layout while in interpreter. Must match with lj_frame.h. -|.define SAVE_R4, [sp, #28] -|.define CFRAME_SPACE, #28 -|.define SAVE_ERRF, [sp, #24] -|.define SAVE_NRES, [sp, #20] -|.define SAVE_CFRAME, [sp, #16] -|.define SAVE_L, [sp, #12] -|.define SAVE_PC, [sp, #8] -|.define SAVE_MULTRES, [sp, #4] -|.define ARG5, [sp] -| -|.define TMPDhi, [sp, #4] -|.define TMPDlo, [sp] -|.define TMPD, [sp] -|.define TMPDp, sp -| -|.if FPU -|.macro saveregs -| push {r5, r6, r7, r8, r9, r10, r11, lr} -| vpush {d8-d15} -| sub sp, sp, CFRAME_SPACE+4 -| str r4, SAVE_R4 -|.endmacro -|.macro restoreregs_ret -| ldr r4, SAVE_R4 -| add sp, sp, CFRAME_SPACE+4 -| vpop {d8-d15} -| pop {r5, r6, r7, r8, r9, r10, r11, pc} -|.endmacro -|.else -|.macro saveregs -| push {r4, r5, r6, r7, r8, r9, r10, r11, lr} -| sub sp, sp, CFRAME_SPACE -|.endmacro -|.macro restoreregs_ret -| add sp, sp, CFRAME_SPACE -| pop {r4, r5, r6, r7, r8, r9, r10, r11, pc} -|.endmacro -|.endif -| -|// Type definitions. Some of these are only used for documentation. -|.type L, lua_State, LREG -|.type GL, global_State -|.type TVALUE, TValue -|.type GCOBJ, GCobj -|.type STR, GCstr -|.type TAB, GCtab -|.type LFUNC, GCfuncL -|.type CFUNC, GCfuncC -|.type PROTO, GCproto -|.type UPVAL, GCupval -|.type NODE, Node -|.type NARGS8, int -|.type TRACE, GCtrace -|.type SBUF, SBuf -| -|//----------------------------------------------------------------------- -| -|// Trap for not-yet-implemented parts. -|.macro NYI; ud; .endmacro -| -|//----------------------------------------------------------------------- -| -|// Access to frame relative to BASE. -|.define FRAME_FUNC, #-8 -|.define FRAME_PC, #-4 -| -|.macro decode_RA8, dst, ins; and dst, MASKR8, ins, lsr #5; .endmacro -|.macro decode_RB8, dst, ins; and dst, MASKR8, ins, lsr #21; .endmacro -|.macro decode_RC8, dst, ins; and dst, MASKR8, ins, lsr #13; .endmacro -|.macro decode_RD, dst, ins; lsr dst, ins, #16; .endmacro -|.macro decode_OP, dst, ins; and dst, ins, #255; .endmacro -| -|// Instruction fetch. -|.macro ins_NEXT1 -| ldrb OP, [PC] -|.endmacro -|.macro ins_NEXT2 -| ldr INS, [PC], #4 -|.endmacro -|// Instruction decode+dispatch. -|.macro ins_NEXT3 -| ldr OP, [DISPATCH, OP, lsl #2] -| decode_RA8 RA, INS -| decode_RD RC, INS -| bx OP -|.endmacro -|.macro ins_NEXT -| ins_NEXT1 -| ins_NEXT2 -| ins_NEXT3 -|.endmacro -| -|// Instruction footer. -|.if 1 -| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use. -| .define ins_next, ins_NEXT -| .define ins_next_, ins_NEXT -| .define ins_next1, ins_NEXT1 -| .define ins_next2, ins_NEXT2 -| .define ins_next3, ins_NEXT3 -|.else -| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch. -| // Affects only certain kinds of benchmarks (and only with -j off). -| .macro ins_next -| b ->ins_next -| .endmacro -| .macro ins_next1 -| .endmacro -| .macro ins_next2 -| .endmacro -| .macro ins_next3 -| b ->ins_next -| .endmacro -| .macro ins_next_ -| ->ins_next: -| ins_NEXT -| .endmacro -|.endif -| -|// Avoid register name substitution for field name. -#define field_pc pc -| -|// Call decode and dispatch. -|.macro ins_callt -| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC -| ldr PC, LFUNC:CARG3->field_pc -| ldrb OP, [PC] // STALL: load PC. early PC. -| ldr INS, [PC], #4 -| ldr OP, [DISPATCH, OP, lsl #2] // STALL: load OP. early OP. -| decode_RA8 RA, INS -| add RA, RA, BASE -| bx OP -|.endmacro -| -|.macro ins_call -| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, PC = caller PC -| str PC, [BASE, FRAME_PC] -| ins_callt // STALL: locked PC. -|.endmacro -| -|//----------------------------------------------------------------------- -| -|// Macros to test operand types. -|.macro checktp, reg, tp; cmn reg, #-tp; .endmacro -|.macro checktpeq, reg, tp; cmneq reg, #-tp; .endmacro -|.macro checktpne, reg, tp; cmnne reg, #-tp; .endmacro -|.macro checkstr, reg, target; checktp reg, LJ_TSTR; bne target; .endmacro -|.macro checktab, reg, target; checktp reg, LJ_TTAB; bne target; .endmacro -|.macro checkfunc, reg, target; checktp reg, LJ_TFUNC; bne target; .endmacro -| -|// Assumes DISPATCH is relative to GL. -#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field)) -#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field)) -| -#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto)) -| -|.macro hotcheck, delta -| lsr CARG1, PC, #1 -| and CARG1, CARG1, #126 -| sub CARG1, CARG1, #-GG_DISP2HOT -| ldrh CARG2, [DISPATCH, CARG1] -| subs CARG2, CARG2, #delta -| strh CARG2, [DISPATCH, CARG1] -|.endmacro -| -|.macro hotloop -| hotcheck HOTCOUNT_LOOP -| blo ->vm_hotloop -|.endmacro -| -|.macro hotcall -| hotcheck HOTCOUNT_CALL -| blo ->vm_hotcall -|.endmacro -| -|// Set current VM state. -|.macro mv_vmstate, reg, st; mvn reg, #LJ_VMST_..st; .endmacro -|.macro st_vmstate, reg; str reg, [DISPATCH, #DISPATCH_GL(vmstate)]; .endmacro -| -|// Move table write barrier back. Overwrites mark and tmp. -|.macro barrierback, tab, mark, tmp -| ldr tmp, [DISPATCH, #DISPATCH_GL(gc.grayagain)] -| bic mark, mark, #LJ_GC_BLACK // black2gray(tab) -| str tab, [DISPATCH, #DISPATCH_GL(gc.grayagain)] -| strb mark, tab->marked -| str tmp, tab->gclist -|.endmacro -| -|.macro .IOS, a, b -|.if IOS -| a, b -|.endif -|.endmacro -| -|//----------------------------------------------------------------------- - -#if !LJ_DUALNUM -#error "Only dual-number mode supported for ARM target" -#endif - -/* Generate subroutines used by opcodes and other parts of the VM. */ -/* The .code_sub section should be last to help static branch prediction. */ -static void build_subroutines(BuildCtx *ctx) -{ - |.code_sub - | - |//----------------------------------------------------------------------- - |//-- Return handling ---------------------------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_returnp: - | // See vm_return. Also: RB = previous base. - | tst PC, #FRAME_P - | beq ->cont_dispatch - | - | // Return from pcall or xpcall fast func. - | ldr PC, [RB, FRAME_PC] // Fetch PC of previous frame. - | mvn CARG2, #~LJ_TTRUE - | mov BASE, RB - | // Prepending may overwrite the pcall frame, so do it at the end. - | str CARG2, [RA, FRAME_PC] // Prepend true to results. - | sub RA, RA, #8 - | - |->vm_returnc: - | adds RC, RC, #8 // RC = (nresults+1)*8. - | mov CRET1, #LUA_YIELD - | beq ->vm_unwind_c_eh - | str RC, SAVE_MULTRES - | ands CARG1, PC, #FRAME_TYPE - | beq ->BC_RET_Z // Handle regular return to Lua. - | - |->vm_return: - | // BASE = base, RA = resultptr, RC/MULTRES = (nresults+1)*8, PC = return - | // CARG1 = PC & FRAME_TYPE - | bic RB, PC, #FRAME_TYPEP - | cmp CARG1, #FRAME_C - | sub RB, BASE, RB // RB = previous base. - | bne ->vm_returnp - | - | str RB, L->base - | ldr KBASE, SAVE_NRES - | mv_vmstate CARG4, C - | sub BASE, BASE, #8 - | subs CARG3, RC, #8 - | lsl KBASE, KBASE, #3 // KBASE = (nresults_wanted+1)*8 - | st_vmstate CARG4 - | beq >2 - |1: - | subs CARG3, CARG3, #8 - | ldrd CARG12, [RA], #8 - | strd CARG12, [BASE], #8 - | bne <1 - |2: - | cmp KBASE, RC // More/less results wanted? - | bne >6 - |3: - | str BASE, L->top // Store new top. - | - |->vm_leave_cp: - | ldr RC, SAVE_CFRAME // Restore previous C frame. - | mov CRET1, #0 // Ok return status for vm_pcall. - | str RC, L->cframe - | - |->vm_leave_unw: - | restoreregs_ret - | - |6: - | blt >7 // Less results wanted? - | // More results wanted. Check stack size and fill up results with nil. - | ldr CARG3, L->maxstack - | mvn CARG2, #~LJ_TNIL - | cmp BASE, CARG3 - | bhs >8 - | str CARG2, [BASE, #4] - | add RC, RC, #8 - | add BASE, BASE, #8 - | b <2 - | - |7: // Less results wanted. - | sub CARG1, RC, KBASE - | cmp KBASE, #0 // LUA_MULTRET+1 case? - | subne BASE, BASE, CARG1 // Either keep top or shrink it. - | b <3 - | - |8: // Corner case: need to grow stack for filling up results. - | // This can happen if: - | // - A C function grows the stack (a lot). - | // - The GC shrinks the stack in between. - | // - A return back from a lua_call() with (high) nresults adjustment. - | str BASE, L->top // Save current top held in BASE (yes). - | lsr CARG2, KBASE, #3 - | mov CARG1, L - | bl extern lj_state_growstack // (lua_State *L, int n) - | ldr BASE, L->top // Need the (realloced) L->top in BASE. - | b <2 - | - |->vm_unwind_c: // Unwind C stack, return from vm_pcall. - | // (void *cframe, int errcode) - | mov sp, CARG1 - | mov CRET1, CARG2 - |->vm_unwind_c_eh: // Landing pad for external unwinder. - | ldr L, SAVE_L - | mv_vmstate CARG4, C - | ldr GL:CARG3, L->glref - | str CARG4, GL:CARG3->vmstate - | b ->vm_leave_unw - | - |->vm_unwind_ff: // Unwind C stack, return from ff pcall. - | // (void *cframe) - | bic CARG1, CARG1, #~CFRAME_RAWMASK // Use two steps: bic sp is deprecated. - | mov sp, CARG1 - |->vm_unwind_ff_eh: // Landing pad for external unwinder. - | ldr L, SAVE_L - | mov MASKR8, #255 - | mov RC, #16 // 2 results: false + error message. - | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8. - | ldr BASE, L->base - | ldr DISPATCH, L->glref // Setup pointer to dispatch table. - | mvn CARG1, #~LJ_TFALSE - | sub RA, BASE, #8 // Results start at BASE-8. - | ldr PC, [BASE, FRAME_PC] // Fetch PC of previous frame. - | add DISPATCH, DISPATCH, #GG_G2DISP - | mv_vmstate CARG2, INTERP - | str CARG1, [BASE, #-4] // Prepend false to error message. - | st_vmstate CARG2 - | b ->vm_returnc - | - |//----------------------------------------------------------------------- - |//-- Grow stack for calls ----------------------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_growstack_c: // Grow stack for C function. - | // CARG1 = L - | mov CARG2, #LUA_MINSTACK - | b >2 - | - |->vm_growstack_l: // Grow stack for Lua function. - | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC - | add RC, BASE, RC - | sub RA, RA, BASE - | mov CARG1, L - | str BASE, L->base - | add PC, PC, #4 // Must point after first instruction. - | str RC, L->top - | lsr CARG2, RA, #3 - |2: - | // L->base = new base, L->top = top - | str PC, SAVE_PC - | bl extern lj_state_growstack // (lua_State *L, int n) - | ldr BASE, L->base - | ldr RC, L->top - | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] - | sub NARGS8:RC, RC, BASE - | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC - | ins_callt // Just retry the call. - | - |//----------------------------------------------------------------------- - |//-- Entry points into the assembler VM --------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_resume: // Setup C frame and resume thread. - | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0) - | saveregs - | mov L, CARG1 - | ldr DISPATCH, L:CARG1->glref // Setup pointer to dispatch table. - | mov BASE, CARG2 - | add DISPATCH, DISPATCH, #GG_G2DISP - | str L, SAVE_L - | mov PC, #FRAME_CP - | str CARG3, SAVE_NRES - | add CARG2, sp, #CFRAME_RESUME - | ldrb CARG1, L->status - | str CARG3, SAVE_ERRF - | str L, SAVE_PC // Any value outside of bytecode is ok. - | str CARG3, SAVE_CFRAME - | cmp CARG1, #0 - | str CARG2, L->cframe - | beq >3 - | - | // Resume after yield (like a return). - | str L, [DISPATCH, #DISPATCH_GL(cur_L)] - | mov RA, BASE - | ldr BASE, L->base - | ldr CARG1, L->top - | mov MASKR8, #255 - | strb CARG3, L->status - | sub RC, CARG1, BASE - | ldr PC, [BASE, FRAME_PC] - | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8. - | mv_vmstate CARG2, INTERP - | add RC, RC, #8 - | ands CARG1, PC, #FRAME_TYPE - | st_vmstate CARG2 - | str RC, SAVE_MULTRES - | beq ->BC_RET_Z - | b ->vm_return - | - |->vm_pcall: // Setup protected C frame and enter VM. - | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef) - | saveregs - | mov PC, #FRAME_CP - | str CARG4, SAVE_ERRF - | b >1 - | - |->vm_call: // Setup C frame and enter VM. - | // (lua_State *L, TValue *base, int nres1) - | saveregs - | mov PC, #FRAME_C - | - |1: // Entry point for vm_pcall above (PC = ftype). - | ldr RC, L:CARG1->cframe - | str CARG3, SAVE_NRES - | mov L, CARG1 - | str CARG1, SAVE_L - | ldr DISPATCH, L->glref // Setup pointer to dispatch table. - | mov BASE, CARG2 - | str CARG1, SAVE_PC // Any value outside of bytecode is ok. - | str RC, SAVE_CFRAME - | add DISPATCH, DISPATCH, #GG_G2DISP - | str sp, L->cframe // Add our C frame to cframe chain. - | - |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype). - | str L, [DISPATCH, #DISPATCH_GL(cur_L)] - | ldr RB, L->base // RB = old base (for vmeta_call). - | ldr CARG1, L->top - | mov MASKR8, #255 - | add PC, PC, BASE - | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8. - | sub PC, PC, RB // PC = frame delta + frame type - | mv_vmstate CARG2, INTERP - | sub NARGS8:RC, CARG1, BASE - | st_vmstate CARG2 - | - |->vm_call_dispatch: - | // RB = old base, BASE = new base, RC = nargs*8, PC = caller PC - | ldrd CARG34, [BASE, FRAME_FUNC] - | checkfunc CARG4, ->vmeta_call - | - |->vm_call_dispatch_f: - | ins_call - | // BASE = new base, CARG3 = func, RC = nargs*8, PC = caller PC - | - |->vm_cpcall: // Setup protected C frame, call C. - | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp) - | saveregs - | mov L, CARG1 - | ldr RA, L:CARG1->stack - | str CARG1, SAVE_L - | ldr DISPATCH, L->glref // Setup pointer to dispatch table. - | ldr RB, L->top - | str CARG1, SAVE_PC // Any value outside of bytecode is ok. - | ldr RC, L->cframe - | add DISPATCH, DISPATCH, #GG_G2DISP - | sub RA, RA, RB // Compute -savestack(L, L->top). - | mov RB, #0 - | str RA, SAVE_NRES // Neg. delta means cframe w/o frame. - | str RB, SAVE_ERRF // No error function. - | str RC, SAVE_CFRAME - | str sp, L->cframe // Add our C frame to cframe chain. - | str L, [DISPATCH, #DISPATCH_GL(cur_L)] - | blx CARG4 // (lua_State *L, lua_CFunction func, void *ud) - | movs BASE, CRET1 - | mov PC, #FRAME_CP - | bne <3 // Else continue with the call. - | b ->vm_leave_cp // No base? Just remove C frame. - | - |//----------------------------------------------------------------------- - |//-- Metamethod handling ------------------------------------------------ - |//----------------------------------------------------------------------- - | - |//-- Continuation dispatch ---------------------------------------------- - | - |->cont_dispatch: - | // BASE = meta base, RA = resultptr, RC = (nresults+1)*8 - | ldr LFUNC:CARG3, [RB, FRAME_FUNC] - | ldr CARG1, [BASE, #-16] // Get continuation. - | mov CARG4, BASE - | mov BASE, RB // Restore caller BASE. - |.if FFI - | cmp CARG1, #1 - |.endif - | ldr PC, [CARG4, #-12] // Restore PC from [cont|PC]. - | ldr CARG3, LFUNC:CARG3->field_pc - | mvn INS, #~LJ_TNIL - | add CARG2, RA, RC - | str INS, [CARG2, #-4] // Ensure one valid arg. - |.if FFI - | bls >1 - |.endif - | ldr KBASE, [CARG3, #PC2PROTO(k)] - | // BASE = base, RA = resultptr, CARG4 = meta base - | bx CARG1 - | - |.if FFI - |1: - | beq ->cont_ffi_callback // cont = 1: return from FFI callback. - | // cont = 0: tailcall from C function. - | sub CARG4, CARG4, #16 - | sub RC, CARG4, BASE - | b ->vm_call_tail - |.endif - | - |->cont_cat: // RA = resultptr, CARG4 = meta base - | ldr INS, [PC, #-4] - | sub CARG2, CARG4, #16 - | ldrd CARG34, [RA] - | str BASE, L->base - | decode_RB8 RC, INS - | decode_RA8 RA, INS - | add CARG1, BASE, RC - | subs CARG1, CARG2, CARG1 - | strdne CARG34, [CARG2] - | movne CARG3, CARG1 - | bne ->BC_CAT_Z - | strd CARG34, [BASE, RA] - | b ->cont_nop - | - |//-- Table indexing metamethods ----------------------------------------- - | - |->vmeta_tgets1: - | add CARG2, BASE, RB - | b >2 - | - |->vmeta_tgets: - | sub CARG2, DISPATCH, #-DISPATCH_GL(tmptv) - | mvn CARG4, #~LJ_TTAB - | str TAB:RB, [CARG2] - | str CARG4, [CARG2, #4] - |2: - | mvn CARG4, #~LJ_TSTR - | str STR:RC, TMPDlo - | str CARG4, TMPDhi - | mov CARG3, TMPDp - | b >1 - | - |->vmeta_tgetb: // RC = index - | decode_RB8 RB, INS - | str RC, TMPDlo - | mvn CARG4, #~LJ_TISNUM - | add CARG2, BASE, RB - | str CARG4, TMPDhi - | mov CARG3, TMPDp - | b >1 - | - |->vmeta_tgetv: - | add CARG2, BASE, RB - | add CARG3, BASE, RC - |1: - | str BASE, L->base - | mov CARG1, L - | str PC, SAVE_PC - | bl extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k) - | // Returns TValue * (finished) or NULL (metamethod). - | .IOS ldr BASE, L->base - | cmp CRET1, #0 - | beq >3 - | ldrd CARG34, [CRET1] - | ins_next1 - | ins_next2 - | strd CARG34, [BASE, RA] - | ins_next3 - | - |3: // Call __index metamethod. - | // BASE = base, L->top = new base, stack = cont/func/t/k - | rsb CARG1, BASE, #FRAME_CONT - | ldr BASE, L->top - | mov NARGS8:RC, #16 // 2 args for func(t, k). - | str PC, [BASE, #-12] // [cont|PC] - | add PC, CARG1, BASE - | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here. - | b ->vm_call_dispatch_f - | - |->vmeta_tgetr: - | .IOS mov RC, BASE - | bl extern lj_tab_getinth // (GCtab *t, int32_t key) - | // Returns cTValue * or NULL. - | .IOS mov BASE, RC - | cmp CRET1, #0 - | ldrdne CARG12, [CRET1] - | mvneq CARG2, #~LJ_TNIL - | b ->BC_TGETR_Z - | - |//----------------------------------------------------------------------- - | - |->vmeta_tsets1: - | add CARG2, BASE, RB - | b >2 - | - |->vmeta_tsets: - | sub CARG2, DISPATCH, #-DISPATCH_GL(tmptv) - | mvn CARG4, #~LJ_TTAB - | str TAB:RB, [CARG2] - | str CARG4, [CARG2, #4] - |2: - | mvn CARG4, #~LJ_TSTR - | str STR:RC, TMPDlo - | str CARG4, TMPDhi - | mov CARG3, TMPDp - | b >1 - | - |->vmeta_tsetb: // RC = index - | decode_RB8 RB, INS - | str RC, TMPDlo - | mvn CARG4, #~LJ_TISNUM - | add CARG2, BASE, RB - | str CARG4, TMPDhi - | mov CARG3, TMPDp - | b >1 - | - |->vmeta_tsetv: - | add CARG2, BASE, RB - | add CARG3, BASE, RC - |1: - | str BASE, L->base - | mov CARG1, L - | str PC, SAVE_PC - | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k) - | // Returns TValue * (finished) or NULL (metamethod). - | .IOS ldr BASE, L->base - | cmp CRET1, #0 - | ldrd CARG34, [BASE, RA] - | beq >3 - | ins_next1 - | // NOBARRIER: lj_meta_tset ensures the table is not black. - | strd CARG34, [CRET1] - | ins_next2 - | ins_next3 - | - |3: // Call __newindex metamethod. - | // BASE = base, L->top = new base, stack = cont/func/t/k/(v) - | rsb CARG1, BASE, #FRAME_CONT - | ldr BASE, L->top - | mov NARGS8:RC, #24 // 3 args for func(t, k, v). - | strd CARG34, [BASE, #16] // Copy value to third argument. - | str PC, [BASE, #-12] // [cont|PC] - | add PC, CARG1, BASE - | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here. - | b ->vm_call_dispatch_f - | - |->vmeta_tsetr: - | str BASE, L->base - | .IOS mov RC, BASE - | str PC, SAVE_PC - | bl extern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key) - | // Returns TValue *. - | .IOS mov BASE, RC - | b ->BC_TSETR_Z - | - |//-- Comparison metamethods --------------------------------------------- - | - |->vmeta_comp: - | mov CARG1, L - | sub PC, PC, #4 - | mov CARG2, RA - | str BASE, L->base - | mov CARG3, RC - | str PC, SAVE_PC - | decode_OP CARG4, INS - | bl extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op) - | // Returns 0/1 or TValue * (metamethod). - |3: - | .IOS ldr BASE, L->base - | cmp CRET1, #1 - | bhi ->vmeta_binop - |4: - | ldrh RB, [PC, #2] - | add PC, PC, #4 - | add RB, PC, RB, lsl #2 - | subhs PC, RB, #0x20000 - |->cont_nop: - | ins_next - | - |->cont_ra: // RA = resultptr - | ldr INS, [PC, #-4] - | ldrd CARG12, [RA] - | decode_RA8 CARG3, INS - | strd CARG12, [BASE, CARG3] - | b ->cont_nop - | - |->cont_condt: // RA = resultptr - | ldr CARG2, [RA, #4] - | mvn CARG1, #~LJ_TTRUE - | cmp CARG1, CARG2 // Branch if result is true. - | b <4 - | - |->cont_condf: // RA = resultptr - | ldr CARG2, [RA, #4] - | checktp CARG2, LJ_TFALSE // Branch if result is false. - | b <4 - | - |->vmeta_equal: - | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV. - | sub PC, PC, #4 - | str BASE, L->base - | mov CARG1, L - | str PC, SAVE_PC - | bl extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne) - | // Returns 0/1 or TValue * (metamethod). - | b <3 - | - |->vmeta_equal_cd: - |.if FFI - | sub PC, PC, #4 - | str BASE, L->base - | mov CARG1, L - | mov CARG2, INS - | str PC, SAVE_PC - | bl extern lj_meta_equal_cd // (lua_State *L, BCIns op) - | // Returns 0/1 or TValue * (metamethod). - | b <3 - |.endif - | - |->vmeta_istype: - | sub PC, PC, #4 - | str BASE, L->base - | mov CARG1, L - | lsr CARG2, RA, #3 - | mov CARG3, RC - | str PC, SAVE_PC - | bl extern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp) - | .IOS ldr BASE, L->base - | b ->cont_nop - | - |//-- Arithmetic metamethods --------------------------------------------- - | - |->vmeta_arith_vn: - | decode_RB8 RB, INS - | decode_RC8 RC, INS - | add CARG3, BASE, RB - | add CARG4, KBASE, RC - | b >1 - | - |->vmeta_arith_nv: - | decode_RB8 RB, INS - | decode_RC8 RC, INS - | add CARG4, BASE, RB - | add CARG3, KBASE, RC - | b >1 - | - |->vmeta_unm: - | ldr INS, [PC, #-8] - | sub PC, PC, #4 - | add CARG3, BASE, RC - | add CARG4, BASE, RC - | b >1 - | - |->vmeta_arith_vv: - | decode_RB8 RB, INS - | decode_RC8 RC, INS - | add CARG3, BASE, RB - | add CARG4, BASE, RC - |1: - | decode_OP OP, INS - | add CARG2, BASE, RA - | str BASE, L->base - | mov CARG1, L - | str PC, SAVE_PC - | str OP, ARG5 - | bl extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op) - | // Returns NULL (finished) or TValue * (metamethod). - | .IOS ldr BASE, L->base - | cmp CRET1, #0 - | beq ->cont_nop - | - | // Call metamethod for binary op. - |->vmeta_binop: - | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2 - | sub CARG2, CRET1, BASE - | str PC, [CRET1, #-12] // [cont|PC] - | add PC, CARG2, #FRAME_CONT - | mov BASE, CRET1 - | mov NARGS8:RC, #16 // 2 args for func(o1, o2). - | b ->vm_call_dispatch - | - |->vmeta_len: - | add CARG2, BASE, RC - | str BASE, L->base - | mov CARG1, L - | str PC, SAVE_PC - | bl extern lj_meta_len // (lua_State *L, TValue *o) - | // Returns NULL (retry) or TValue * (metamethod base). - | .IOS ldr BASE, L->base -#if LJ_52 - | cmp CRET1, #0 - | bne ->vmeta_binop // Binop call for compatibility. - | ldr TAB:CARG1, [BASE, RC] - | b ->BC_LEN_Z -#else - | b ->vmeta_binop // Binop call for compatibility. -#endif - | - |//-- Call metamethod ---------------------------------------------------- - | - |->vmeta_call: // Resolve and call __call metamethod. - | // RB = old base, BASE = new base, RC = nargs*8 - | mov CARG1, L - | str RB, L->base // This is the callers base! - | sub CARG2, BASE, #8 - | str PC, SAVE_PC - | add CARG3, BASE, NARGS8:RC - | .IOS mov RA, BASE - | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top) - | .IOS mov BASE, RA - | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here. - | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now. - | ins_call - | - |->vmeta_callt: // Resolve __call for BC_CALLT. - | // BASE = old base, RA = new base, RC = nargs*8 - | mov CARG1, L - | str BASE, L->base - | sub CARG2, RA, #8 - | str PC, SAVE_PC - | add CARG3, RA, NARGS8:RC - | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top) - | .IOS ldr BASE, L->base - | ldr LFUNC:CARG3, [RA, FRAME_FUNC] // Guaranteed to be a function here. - | ldr PC, [BASE, FRAME_PC] - | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now. - | b ->BC_CALLT2_Z - | - |//-- Argument coercion for 'for' statement ------------------------------ - | - |->vmeta_for: - | mov CARG1, L - | str BASE, L->base - | mov CARG2, RA - | str PC, SAVE_PC - | bl extern lj_meta_for // (lua_State *L, TValue *base) - | .IOS ldr BASE, L->base - |.if JIT - | ldrb OP, [PC, #-4] - |.endif - | ldr INS, [PC, #-4] - |.if JIT - | cmp OP, #BC_JFORI - |.endif - | decode_RA8 RA, INS - | decode_RD RC, INS - |.if JIT - | beq =>BC_JFORI - |.endif - | b =>BC_FORI - | - |//----------------------------------------------------------------------- - |//-- Fast functions ----------------------------------------------------- - |//----------------------------------------------------------------------- - | - |.macro .ffunc, name - |->ff_ .. name: - |.endmacro - | - |.macro .ffunc_1, name - |->ff_ .. name: - | ldrd CARG12, [BASE] - | cmp NARGS8:RC, #8 - | blo ->fff_fallback - |.endmacro - | - |.macro .ffunc_2, name - |->ff_ .. name: - | ldrd CARG12, [BASE] - | ldrd CARG34, [BASE, #8] - | cmp NARGS8:RC, #16 - | blo ->fff_fallback - |.endmacro - | - |.macro .ffunc_n, name - | .ffunc_1 name - | checktp CARG2, LJ_TISNUM - | bhs ->fff_fallback - |.endmacro - | - |.macro .ffunc_nn, name - | .ffunc_2 name - | checktp CARG2, LJ_TISNUM - | cmnlo CARG4, #-LJ_TISNUM - | bhs ->fff_fallback - |.endmacro - | - |.macro .ffunc_d, name - | .ffunc name - | ldr CARG2, [BASE, #4] - | cmp NARGS8:RC, #8 - | vldr d0, [BASE] - | blo ->fff_fallback - | checktp CARG2, LJ_TISNUM - | bhs ->fff_fallback - |.endmacro - | - |.macro .ffunc_dd, name - | .ffunc name - | ldr CARG2, [BASE, #4] - | ldr CARG4, [BASE, #12] - | cmp NARGS8:RC, #16 - | vldr d0, [BASE] - | vldr d1, [BASE, #8] - | blo ->fff_fallback - | checktp CARG2, LJ_TISNUM - | cmnlo CARG4, #-LJ_TISNUM - | bhs ->fff_fallback - |.endmacro - | - |// Inlined GC threshold check. Caveat: uses CARG1 and CARG2. - |.macro ffgccheck - | ldr CARG1, [DISPATCH, #DISPATCH_GL(gc.total)] - | ldr CARG2, [DISPATCH, #DISPATCH_GL(gc.threshold)] - | cmp CARG1, CARG2 - | blge ->fff_gcstep - |.endmacro - | - |//-- Base library: checks ----------------------------------------------- - | - |.ffunc_1 assert - | checktp CARG2, LJ_TTRUE - | bhi ->fff_fallback - | ldr PC, [BASE, FRAME_PC] - | strd CARG12, [BASE, #-8] - | mov RB, BASE - | subs RA, NARGS8:RC, #8 - | add RC, NARGS8:RC, #8 // Compute (nresults+1)*8. - | beq ->fff_res // Done if exactly 1 argument. - |1: - | ldrd CARG12, [RB, #8] - | subs RA, RA, #8 - | strd CARG12, [RB], #8 - | bne <1 - | b ->fff_res - | - |.ffunc type - | ldr CARG2, [BASE, #4] - | cmp NARGS8:RC, #8 - | blo ->fff_fallback - | checktp CARG2, LJ_TISNUM - | mvnlo CARG2, #~LJ_TISNUM - | rsb CARG4, CARG2, #(int)(offsetof(GCfuncC, upvalue)>>3)-1 - | lsl CARG4, CARG4, #3 - | ldrd CARG12, [CFUNC:CARG3, CARG4] - | b ->fff_restv - | - |//-- Base library: getters and setters --------------------------------- - | - |.ffunc_1 getmetatable - | checktp CARG2, LJ_TTAB - | cmnne CARG2, #-LJ_TUDATA - | bne >6 - |1: // Field metatable must be at same offset for GCtab and GCudata! - | ldr TAB:RB, TAB:CARG1->metatable - |2: - | mvn CARG2, #~LJ_TNIL - | ldr STR:RC, [DISPATCH, #DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])] - | cmp TAB:RB, #0 - | beq ->fff_restv - | ldr CARG3, TAB:RB->hmask - | ldr CARG4, STR:RC->hash - | ldr NODE:INS, TAB:RB->node - | and CARG3, CARG3, CARG4 // idx = str->hash & tab->hmask - | add CARG3, CARG3, CARG3, lsl #1 - | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8 - |3: // Rearranged logic, because we expect _not_ to find the key. - | ldrd CARG34, NODE:INS->key // STALL: early NODE:INS. - | ldrd CARG12, NODE:INS->val - | ldr NODE:INS, NODE:INS->next - | checktp CARG4, LJ_TSTR - | cmpeq CARG3, STR:RC - | beq >5 - | cmp NODE:INS, #0 - | bne <3 - |4: - | mov CARG1, RB // Use metatable as default result. - | mvn CARG2, #~LJ_TTAB - | b ->fff_restv - |5: - | checktp CARG2, LJ_TNIL - | bne ->fff_restv - | b <4 - | - |6: - | checktp CARG2, LJ_TISNUM - | mvnhs CARG2, CARG2 - | movlo CARG2, #~LJ_TISNUM - | add CARG4, DISPATCH, CARG2, lsl #2 - | ldr TAB:RB, [CARG4, #DISPATCH_GL(gcroot[GCROOT_BASEMT])] - | b <2 - | - |.ffunc_2 setmetatable - | // Fast path: no mt for table yet and not clearing the mt. - | checktp CARG2, LJ_TTAB - | ldreq TAB:RB, TAB:CARG1->metatable - | checktpeq CARG4, LJ_TTAB - | ldrbeq CARG4, TAB:CARG1->marked - | cmpeq TAB:RB, #0 - | bne ->fff_fallback - | tst CARG4, #LJ_GC_BLACK // isblack(table) - | str TAB:CARG3, TAB:CARG1->metatable - | beq ->fff_restv - | barrierback TAB:CARG1, CARG4, CARG3 - | b ->fff_restv - | - |.ffunc rawget - | ldrd CARG34, [BASE] - | cmp NARGS8:RC, #16 - | blo ->fff_fallback - | mov CARG2, CARG3 - | checktab CARG4, ->fff_fallback - | mov CARG1, L - | add CARG3, BASE, #8 - | .IOS mov RA, BASE - | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key) - | // Returns cTValue *. - | .IOS mov BASE, RA - | ldrd CARG12, [CRET1] - | b ->fff_restv - | - |//-- Base library: conversions ------------------------------------------ - | - |.ffunc tonumber - | // Only handles the number case inline (without a base argument). - | ldrd CARG12, [BASE] - | cmp NARGS8:RC, #8 - | bne ->fff_fallback - | checktp CARG2, LJ_TISNUM - | bls ->fff_restv - | b ->fff_fallback - | - |.ffunc_1 tostring - | // Only handles the string or number case inline. - | checktp CARG2, LJ_TSTR - | // A __tostring method in the string base metatable is ignored. - | beq ->fff_restv - | // Handle numbers inline, unless a number base metatable is present. - | ldr CARG4, [DISPATCH, #DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])] - | str BASE, L->base - | checktp CARG2, LJ_TISNUM - | cmpls CARG4, #0 - | str PC, SAVE_PC // Redundant (but a defined value). - | bhi ->fff_fallback - | ffgccheck - | mov CARG1, L - | mov CARG2, BASE - | bl extern lj_strfmt_number // (lua_State *L, cTValue *o) - | // Returns GCstr *. - | ldr BASE, L->base - | mvn CARG2, #~LJ_TSTR - | b ->fff_restv - | - |//-- Base library: iterators ------------------------------------------- - | - |.ffunc_1 next - | mvn CARG4, #~LJ_TNIL - | checktab CARG2, ->fff_fallback - | strd CARG34, [BASE, NARGS8:RC] // Set missing 2nd arg to nil. - | ldr PC, [BASE, FRAME_PC] - | mov CARG2, CARG1 - | str BASE, L->base // Add frame since C call can throw. - | mov CARG1, L - | str BASE, L->top // Dummy frame length is ok. - | add CARG3, BASE, #8 - | str PC, SAVE_PC - | bl extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key) - | // Returns 0 at end of traversal. - | .IOS ldr BASE, L->base - | cmp CRET1, #0 - | mvneq CRET2, #~LJ_TNIL - | beq ->fff_restv // End of traversal: return nil. - | ldrd CARG12, [BASE, #8] // Copy key and value to results. - | ldrd CARG34, [BASE, #16] - | mov RC, #(2+1)*8 - | strd CARG12, [BASE, #-8] - | strd CARG34, [BASE] - | b ->fff_res - | - |.ffunc_1 pairs - | checktab CARG2, ->fff_fallback -#if LJ_52 - | ldr TAB:RB, TAB:CARG1->metatable -#endif - | ldrd CFUNC:CARG34, CFUNC:CARG3->upvalue[0] - | ldr PC, [BASE, FRAME_PC] -#if LJ_52 - | cmp TAB:RB, #0 - | bne ->fff_fallback -#endif - | mvn CARG2, #~LJ_TNIL - | mov RC, #(3+1)*8 - | strd CFUNC:CARG34, [BASE, #-8] - | str CARG2, [BASE, #12] - | b ->fff_res - | - |.ffunc_2 ipairs_aux - | checktp CARG2, LJ_TTAB - | checktpeq CARG4, LJ_TISNUM - | bne ->fff_fallback - | ldr RB, TAB:CARG1->asize - | ldr RC, TAB:CARG1->array - | add CARG3, CARG3, #1 - | ldr PC, [BASE, FRAME_PC] - | cmp CARG3, RB - | add RC, RC, CARG3, lsl #3 - | strd CARG34, [BASE, #-8] - | ldrdlo CARG12, [RC] - | mov RC, #(0+1)*8 - | bhs >2 // Not in array part? - |1: - | checktp CARG2, LJ_TNIL - | movne RC, #(2+1)*8 - | strdne CARG12, [BASE] - | b ->fff_res - |2: // Check for empty hash part first. Otherwise call C function. - | ldr RB, TAB:CARG1->hmask - | mov CARG2, CARG3 - | cmp RB, #0 - | beq ->fff_res - | .IOS mov RA, BASE - | bl extern lj_tab_getinth // (GCtab *t, int32_t key) - | // Returns cTValue * or NULL. - | .IOS mov BASE, RA - | cmp CRET1, #0 - | beq ->fff_res - | ldrd CARG12, [CRET1] - | b <1 - | - |.ffunc_1 ipairs - | checktab CARG2, ->fff_fallback -#if LJ_52 - | ldr TAB:RB, TAB:CARG1->metatable -#endif - | ldrd CFUNC:CARG34, CFUNC:CARG3->upvalue[0] - | ldr PC, [BASE, FRAME_PC] -#if LJ_52 - | cmp TAB:RB, #0 - | bne ->fff_fallback -#endif - | mov CARG1, #0 - | mvn CARG2, #~LJ_TISNUM - | mov RC, #(3+1)*8 - | strd CFUNC:CARG34, [BASE, #-8] - | strd CARG12, [BASE, #8] - | b ->fff_res - | - |//-- Base library: catch errors ---------------------------------------- - | - |.ffunc pcall - | ldrb RA, [DISPATCH, #DISPATCH_GL(hookmask)] - | cmp NARGS8:RC, #8 - | blo ->fff_fallback - | tst RA, #HOOK_ACTIVE // Remember active hook before pcall. - | mov RB, BASE - | add BASE, BASE, #8 - | moveq PC, #8+FRAME_PCALL - | movne PC, #8+FRAME_PCALLH - | sub NARGS8:RC, NARGS8:RC, #8 - | b ->vm_call_dispatch - | - |.ffunc_2 xpcall - | ldrb RA, [DISPATCH, #DISPATCH_GL(hookmask)] - | checkfunc CARG4, ->fff_fallback // Traceback must be a function. - | mov RB, BASE - | strd CARG12, [BASE, #8] // Swap function and traceback. - | strd CARG34, [BASE] - | tst RA, #HOOK_ACTIVE // Remember active hook before pcall. - | add BASE, BASE, #16 - | moveq PC, #16+FRAME_PCALL - | movne PC, #16+FRAME_PCALLH - | sub NARGS8:RC, NARGS8:RC, #16 - | b ->vm_call_dispatch - | - |//-- Coroutine library -------------------------------------------------- - | - |.macro coroutine_resume_wrap, resume - |.if resume - |.ffunc_1 coroutine_resume - | checktp CARG2, LJ_TTHREAD - | bne ->fff_fallback - |.else - |.ffunc coroutine_wrap_aux - | ldr L:CARG1, CFUNC:CARG3->upvalue[0].gcr - |.endif - | ldr PC, [BASE, FRAME_PC] - | str BASE, L->base - | ldr CARG2, L:CARG1->top - | ldrb RA, L:CARG1->status - | ldr RB, L:CARG1->base - | add CARG3, CARG2, NARGS8:RC - | add CARG4, CARG2, RA - | str PC, SAVE_PC - | cmp CARG4, RB - | beq ->fff_fallback - | ldr CARG4, L:CARG1->maxstack - | ldr RB, L:CARG1->cframe - | cmp RA, #LUA_YIELD - | cmpls CARG3, CARG4 - | cmpls RB, #0 - | bhi ->fff_fallback - |1: - |.if resume - | sub CARG3, CARG3, #8 // Keep resumed thread in stack for GC. - | add BASE, BASE, #8 - | sub NARGS8:RC, NARGS8:RC, #8 - |.endif - | str CARG3, L:CARG1->top - | str BASE, L->top - |2: // Move args to coroutine. - | ldrd CARG34, [BASE, RB] - | cmp RB, NARGS8:RC - | strdne CARG34, [CARG2, RB] - | add RB, RB, #8 - | bne <2 - | - | mov CARG3, #0 - | mov L:RA, L:CARG1 - | mov CARG4, #0 - | bl ->vm_resume // (lua_State *L, TValue *base, 0, 0) - | // Returns thread status. - |4: - | ldr CARG3, L:RA->base - | mv_vmstate CARG2, INTERP - | ldr CARG4, L:RA->top - | cmp CRET1, #LUA_YIELD - | ldr BASE, L->base - | str L, [DISPATCH, #DISPATCH_GL(cur_L)] - | st_vmstate CARG2 - | bhi >8 - | subs RC, CARG4, CARG3 - | ldr CARG1, L->maxstack - | add CARG2, BASE, RC - | beq >6 // No results? - | cmp CARG2, CARG1 - | mov RB, #0 - | bhi >9 // Need to grow stack? - | - | sub CARG4, RC, #8 - | str CARG3, L:RA->top // Clear coroutine stack. - |5: // Move results from coroutine. - | ldrd CARG12, [CARG3, RB] - | cmp RB, CARG4 - | strd CARG12, [BASE, RB] - | add RB, RB, #8 - | bne <5 - |6: - |.if resume - | mvn CARG3, #~LJ_TTRUE - | add RC, RC, #16 - |7: - | str CARG3, [BASE, #-4] // Prepend true/false to results. - | sub RA, BASE, #8 - |.else - | mov RA, BASE - | add RC, RC, #8 - |.endif - | ands CARG1, PC, #FRAME_TYPE - | str PC, SAVE_PC - | str RC, SAVE_MULTRES - | beq ->BC_RET_Z - | b ->vm_return - | - |8: // Coroutine returned with error (at co->top-1). - |.if resume - | ldrd CARG12, [CARG4, #-8]! - | mvn CARG3, #~LJ_TFALSE - | mov RC, #(2+1)*8 - | str CARG4, L:RA->top // Remove error from coroutine stack. - | strd CARG12, [BASE] // Copy error message. - | b <7 - |.else - | mov CARG1, L - | mov CARG2, L:RA - | bl extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co) - | // Never returns. - |.endif - | - |9: // Handle stack expansion on return from yield. - | mov CARG1, L - | lsr CARG2, RC, #3 - | bl extern lj_state_growstack // (lua_State *L, int n) - | mov CRET1, #0 - | b <4 - |.endmacro - | - | coroutine_resume_wrap 1 // coroutine.resume - | coroutine_resume_wrap 0 // coroutine.wrap - | - |.ffunc coroutine_yield - | ldr CARG1, L->cframe - | add CARG2, BASE, NARGS8:RC - | str BASE, L->base - | tst CARG1, #CFRAME_RESUME - | str CARG2, L->top - | mov CRET1, #LUA_YIELD - | mov CARG3, #0 - | beq ->fff_fallback - | str CARG3, L->cframe - | strb CRET1, L->status - | b ->vm_leave_unw - | - |//-- Math library ------------------------------------------------------- - | - |.macro math_round, func - | .ffunc_1 math_ .. func - | checktp CARG2, LJ_TISNUM - | beq ->fff_restv - | bhi ->fff_fallback - | // Round FP value and normalize result. - | lsl CARG3, CARG2, #1 - | adds RB, CARG3, #0x00200000 - | bpl >2 // |x| < 1? - | mvn CARG4, #0x3e0 - | subs RB, CARG4, RB, asr #21 - | lsl CARG4, CARG2, #11 - | lsl CARG3, CARG1, #11 - | orr CARG4, CARG4, #0x80000000 - | rsb INS, RB, #32 - | orr CARG4, CARG4, CARG1, lsr #21 - | bls >3 // |x| >= 2^31? - | orr CARG3, CARG3, CARG4, lsl INS - | lsr CARG1, CARG4, RB - |.if "func" == "floor" - | tst CARG3, CARG2, asr #31 - | addne CARG1, CARG1, #1 - |.else - | bics CARG3, CARG3, CARG2, asr #31 - | addsne CARG1, CARG1, #1 - | ldrdvs CARG12, >9 - | bvs ->fff_restv - |.endif - | cmp CARG2, #0 - | rsblt CARG1, CARG1, #0 - |1: - | mvn CARG2, #~LJ_TISNUM - | b ->fff_restv - | - |2: // |x| < 1 - | bcs ->fff_restv // |x| is not finite. - | orr CARG3, CARG3, CARG1 // ztest = abs(hi) | lo - |.if "func" == "floor" - | tst CARG3, CARG2, asr #31 // return (ztest & sign) == 0 ? 0 : -1 - | moveq CARG1, #0 - | mvnne CARG1, #0 - |.else - | bics CARG3, CARG3, CARG2, asr #31 // return (ztest & ~sign) == 0 ? 0 : 1 - | moveq CARG1, #0 - | movne CARG1, #1 - |.endif - | mvn CARG2, #~LJ_TISNUM - | b ->fff_restv - | - |3: // |x| >= 2^31. Check for x == -(2^31). - | cmpeq CARG4, #0x80000000 - |.if "func" == "floor" - | cmpeq CARG3, #0 - |.endif - | bne >4 - | cmp CARG2, #0 - | movmi CARG1, #0x80000000 - | bmi <1 - |4: - | bl ->vm_..func.._sf - | b ->fff_restv - |.endmacro - | - | math_round floor - | math_round ceil - | - |.align 8 - |9: - | .long 0x00000000, 0x41e00000 // 2^31. - | - |.ffunc_1 math_abs - | checktp CARG2, LJ_TISNUM - | bhi ->fff_fallback - | bicne CARG2, CARG2, #0x80000000 - | bne ->fff_restv - | cmp CARG1, #0 - | rsbslt CARG1, CARG1, #0 - | ldrdvs CARG12, <9 - | // Fallthrough. - | - |->fff_restv: - | // CARG12 = TValue result. - | ldr PC, [BASE, FRAME_PC] - | strd CARG12, [BASE, #-8] - |->fff_res1: - | // PC = return. - | mov RC, #(1+1)*8 - |->fff_res: - | // RC = (nresults+1)*8, PC = return. - | ands CARG1, PC, #FRAME_TYPE - | ldreq INS, [PC, #-4] - | str RC, SAVE_MULTRES - | sub RA, BASE, #8 - | bne ->vm_return - | decode_RB8 RB, INS - |5: - | cmp RB, RC // More results expected? - | bhi >6 - | decode_RA8 CARG1, INS - | ins_next1 - | ins_next2 - | // Adjust BASE. KBASE is assumed to be set for the calling frame. - | sub BASE, RA, CARG1 - | ins_next3 - | - |6: // Fill up results with nil. - | add CARG2, RA, RC - | mvn CARG1, #~LJ_TNIL - | add RC, RC, #8 - | str CARG1, [CARG2, #-4] - | b <5 - | - |.macro math_extern, func - |.if HFABI - | .ffunc_d math_ .. func - |.else - | .ffunc_n math_ .. func - |.endif - | .IOS mov RA, BASE - | bl extern func - | .IOS mov BASE, RA - |.if HFABI - | b ->fff_resd - |.else - | b ->fff_restv - |.endif - |.endmacro - | - |.macro math_extern2, func - |.if HFABI - | .ffunc_dd math_ .. func - |.else - | .ffunc_nn math_ .. func - |.endif - | .IOS mov RA, BASE - | bl extern func - | .IOS mov BASE, RA - |.if HFABI - | b ->fff_resd - |.else - | b ->fff_restv - |.endif - |.endmacro - | - |.if FPU - | .ffunc_d math_sqrt - | vsqrt.f64 d0, d0 - |->fff_resd: - | ldr PC, [BASE, FRAME_PC] - | vstr d0, [BASE, #-8] - | b ->fff_res1 - |.else - | math_extern sqrt - |.endif - | - |.ffunc math_log - |.if HFABI - | ldr CARG2, [BASE, #4] - | cmp NARGS8:RC, #8 // Need exactly 1 argument. - | vldr d0, [BASE] - | bne ->fff_fallback - |.else - | ldrd CARG12, [BASE] - | cmp NARGS8:RC, #8 // Need exactly 1 argument. - | bne ->fff_fallback - |.endif - | checktp CARG2, LJ_TISNUM - | bhs ->fff_fallback - | .IOS mov RA, BASE - | bl extern log - | .IOS mov BASE, RA - |.if HFABI - | b ->fff_resd - |.else - | b ->fff_restv - |.endif - | - | math_extern log10 - | math_extern exp - | math_extern sin - | math_extern cos - | math_extern tan - | math_extern asin - | math_extern acos - | math_extern atan - | math_extern sinh - | math_extern cosh - | math_extern tanh - | math_extern2 pow - | math_extern2 atan2 - | math_extern2 fmod - | - |.if HFABI - | .ffunc math_ldexp - | ldr CARG4, [BASE, #4] - | ldrd CARG12, [BASE, #8] - | cmp NARGS8:RC, #16 - | blo ->fff_fallback - | vldr d0, [BASE] - | checktp CARG4, LJ_TISNUM - | bhs ->fff_fallback - | checktp CARG2, LJ_TISNUM - | bne ->fff_fallback - | .IOS mov RA, BASE - | bl extern ldexp // (double x, int exp) - | .IOS mov BASE, RA - | b ->fff_resd - |.else - |.ffunc_2 math_ldexp - | checktp CARG2, LJ_TISNUM - | bhs ->fff_fallback - | checktp CARG4, LJ_TISNUM - | bne ->fff_fallback - | .IOS mov RA, BASE - | bl extern ldexp // (double x, int exp) - | .IOS mov BASE, RA - | b ->fff_restv - |.endif - | - |.if HFABI - |.ffunc_d math_frexp - | mov CARG1, sp - | .IOS mov RA, BASE - | bl extern frexp - | .IOS mov BASE, RA - | ldr CARG3, [sp] - | mvn CARG4, #~LJ_TISNUM - | ldr PC, [BASE, FRAME_PC] - | vstr d0, [BASE, #-8] - | mov RC, #(2+1)*8 - | strd CARG34, [BASE] - | b ->fff_res - |.else - |.ffunc_n math_frexp - | mov CARG3, sp - | .IOS mov RA, BASE - | bl extern frexp - | .IOS mov BASE, RA - | ldr CARG3, [sp] - | mvn CARG4, #~LJ_TISNUM - | ldr PC, [BASE, FRAME_PC] - | strd CARG12, [BASE, #-8] - | mov RC, #(2+1)*8 - | strd CARG34, [BASE] - | b ->fff_res - |.endif - | - |.if HFABI - |.ffunc_d math_modf - | sub CARG1, BASE, #8 - | ldr PC, [BASE, FRAME_PC] - | .IOS mov RA, BASE - | bl extern modf - | .IOS mov BASE, RA - | mov RC, #(2+1)*8 - | vstr d0, [BASE] - | b ->fff_res - |.else - |.ffunc_n math_modf - | sub CARG3, BASE, #8 - | ldr PC, [BASE, FRAME_PC] - | .IOS mov RA, BASE - | bl extern modf - | .IOS mov BASE, RA - | mov RC, #(2+1)*8 - | strd CARG12, [BASE] - | b ->fff_res - |.endif - | - |.macro math_minmax, name, cond, fcond - |.if FPU - | .ffunc_1 name - | add RB, BASE, RC - | checktp CARG2, LJ_TISNUM - | add RA, BASE, #8 - | bne >4 - |1: // Handle integers. - | ldrd CARG34, [RA] - | cmp RA, RB - | bhs ->fff_restv - | checktp CARG4, LJ_TISNUM - | bne >3 - | cmp CARG1, CARG3 - | add RA, RA, #8 - | mov..cond CARG1, CARG3 - | b <1 - |3: // Convert intermediate result to number and continue below. - | vmov s4, CARG1 - | bhi ->fff_fallback - | vldr d1, [RA] - | vcvt.f64.s32 d0, s4 - | b >6 - | - |4: - | vldr d0, [BASE] - | bhi ->fff_fallback - |5: // Handle numbers. - | ldrd CARG34, [RA] - | vldr d1, [RA] - | cmp RA, RB - | bhs ->fff_resd - | checktp CARG4, LJ_TISNUM - | bhs >7 - |6: - | vcmp.f64 d0, d1 - | vmrs - | add RA, RA, #8 - | vmov..fcond.f64 d0, d1 - | b <5 - |7: // Convert integer to number and continue above. - | vmov s4, CARG3 - | bhi ->fff_fallback - | vcvt.f64.s32 d1, s4 - | b <6 - | - |.else - | - | .ffunc_1 name - | checktp CARG2, LJ_TISNUM - | mov RA, #8 - | bne >4 - |1: // Handle integers. - | ldrd CARG34, [BASE, RA] - | cmp RA, RC - | bhs ->fff_restv - | checktp CARG4, LJ_TISNUM - | bne >3 - | cmp CARG1, CARG3 - | add RA, RA, #8 - | mov..cond CARG1, CARG3 - | b <1 - |3: // Convert intermediate result to number and continue below. - | bhi ->fff_fallback - | bl extern __aeabi_i2d - | ldrd CARG34, [BASE, RA] - | b >6 - | - |4: - | bhi ->fff_fallback - |5: // Handle numbers. - | ldrd CARG34, [BASE, RA] - | cmp RA, RC - | bhs ->fff_restv - | checktp CARG4, LJ_TISNUM - | bhs >7 - |6: - | bl extern __aeabi_cdcmple - | add RA, RA, #8 - | mov..fcond CARG1, CARG3 - | mov..fcond CARG2, CARG4 - | b <5 - |7: // Convert integer to number and continue above. - | bhi ->fff_fallback - | strd CARG12, TMPD - | mov CARG1, CARG3 - | bl extern __aeabi_i2d - | ldrd CARG34, TMPD - | b <6 - |.endif - |.endmacro - | - | math_minmax math_min, gt, hi - | math_minmax math_max, lt, lo - | - |//-- String library ----------------------------------------------------- - | - |.ffunc string_byte // Only handle the 1-arg case here. - | ldrd CARG12, [BASE] - | ldr PC, [BASE, FRAME_PC] - | cmp NARGS8:RC, #8 - | checktpeq CARG2, LJ_TSTR // Need exactly 1 argument. - | bne ->fff_fallback - | ldr CARG3, STR:CARG1->len - | ldrb CARG1, STR:CARG1[1] // Access is always ok (NUL at end). - | mvn CARG2, #~LJ_TISNUM - | cmp CARG3, #0 - | moveq RC, #(0+1)*8 - | movne RC, #(1+1)*8 - | strd CARG12, [BASE, #-8] - | b ->fff_res - | - |.ffunc string_char // Only handle the 1-arg case here. - | ffgccheck - | ldrd CARG12, [BASE] - | ldr PC, [BASE, FRAME_PC] - | cmp NARGS8:RC, #8 // Need exactly 1 argument. - | checktpeq CARG2, LJ_TISNUM - | bicseq CARG4, CARG1, #255 - | mov CARG3, #1 - | bne ->fff_fallback - | str CARG1, TMPD - | mov CARG2, TMPDp // Points to stack. Little-endian. - |->fff_newstr: - | // CARG2 = str, CARG3 = len. - | str BASE, L->base - | mov CARG1, L - | str PC, SAVE_PC - | bl extern lj_str_new // (lua_State *L, char *str, size_t l) - |->fff_resstr: - | // Returns GCstr *. - | ldr BASE, L->base - | mvn CARG2, #~LJ_TSTR - | b ->fff_restv - | - |.ffunc string_sub - | ffgccheck - | ldrd CARG12, [BASE] - | ldrd CARG34, [BASE, #16] - | cmp NARGS8:RC, #16 - | mvn RB, #0 - | beq >1 - | blo ->fff_fallback - | checktp CARG4, LJ_TISNUM - | mov RB, CARG3 - | bne ->fff_fallback - |1: - | ldrd CARG34, [BASE, #8] - | checktp CARG2, LJ_TSTR - | ldreq CARG2, STR:CARG1->len - | checktpeq CARG4, LJ_TISNUM - | bne ->fff_fallback - | // CARG1 = str, CARG2 = str->len, CARG3 = start, RB = end - | add CARG4, CARG2, #1 - | cmp CARG3, #0 // if (start < 0) start += len+1 - | addlt CARG3, CARG3, CARG4 - | cmp CARG3, #1 // if (start < 1) start = 1 - | movlt CARG3, #1 - | cmp RB, #0 // if (end < 0) end += len+1 - | addlt RB, RB, CARG4 - | bic RB, RB, RB, asr #31 // if (end < 0) end = 0 - | cmp RB, CARG2 // if (end > len) end = len - | add CARG1, STR:CARG1, #sizeof(GCstr)-1 - | movgt RB, CARG2 - | add CARG2, CARG1, CARG3 - | subs CARG3, RB, CARG3 // len = end - start - | add CARG3, CARG3, #1 // len += 1 - | bge ->fff_newstr - |->fff_emptystr: - | sub STR:CARG1, DISPATCH, #-DISPATCH_GL(strempty) - | mvn CARG2, #~LJ_TSTR - | b ->fff_restv - | - |.macro ffstring_op, name - | .ffunc string_ .. name - | ffgccheck - | ldr CARG3, [BASE, #4] - | cmp NARGS8:RC, #8 - | ldr STR:CARG2, [BASE] - | blo ->fff_fallback - | sub SBUF:CARG1, DISPATCH, #-DISPATCH_GL(tmpbuf) - | checkstr CARG3, ->fff_fallback - | ldr CARG4, SBUF:CARG1->b - | str BASE, L->base - | str PC, SAVE_PC - | str L, SBUF:CARG1->L - | str CARG4, SBUF:CARG1->p - | bl extern lj_buf_putstr_ .. name - | bl extern lj_buf_tostr - | b ->fff_resstr - |.endmacro - | - |ffstring_op reverse - |ffstring_op lower - |ffstring_op upper - | - |//-- Bit library -------------------------------------------------------- - | - |// FP number to bit conversion for soft-float. Clobbers r0-r3. - |->vm_tobit_fb: - | bhi ->fff_fallback - |->vm_tobit: - | lsl RB, CARG2, #1 - | adds RB, RB, #0x00200000 - | movpl CARG1, #0 // |x| < 1? - | bxpl lr - | mvn CARG4, #0x3e0 - | subs RB, CARG4, RB, asr #21 - | bmi >1 // |x| >= 2^32? - | lsl CARG4, CARG2, #11 - | orr CARG4, CARG4, #0x80000000 - | orr CARG4, CARG4, CARG1, lsr #21 - | cmp CARG2, #0 - | lsr CARG1, CARG4, RB - | rsblt CARG1, CARG1, #0 - | bx lr - |1: - | add RB, RB, #21 - | lsr CARG4, CARG1, RB - | rsb RB, RB, #20 - | lsl CARG1, CARG2, #12 - | cmp CARG2, #0 - | orr CARG1, CARG4, CARG1, lsl RB - | rsblt CARG1, CARG1, #0 - | bx lr - | - |.macro .ffunc_bit, name - | .ffunc_1 bit_..name - | checktp CARG2, LJ_TISNUM - | blne ->vm_tobit_fb - |.endmacro - | - |.ffunc_bit tobit - | mvn CARG2, #~LJ_TISNUM - | b ->fff_restv - | - |.macro .ffunc_bit_op, name, ins - | .ffunc_bit name - | mov CARG3, CARG1 - | mov RA, #8 - |1: - | ldrd CARG12, [BASE, RA] - | cmp RA, NARGS8:RC - | add RA, RA, #8 - | bge >2 - | checktp CARG2, LJ_TISNUM - | blne ->vm_tobit_fb - | ins CARG3, CARG3, CARG1 - | b <1 - |.endmacro - | - |.ffunc_bit_op band, and - |.ffunc_bit_op bor, orr - |.ffunc_bit_op bxor, eor - | - |2: - | mvn CARG4, #~LJ_TISNUM - | ldr PC, [BASE, FRAME_PC] - | strd CARG34, [BASE, #-8] - | b ->fff_res1 - | - |.ffunc_bit bswap - | eor CARG3, CARG1, CARG1, ror #16 - | bic CARG3, CARG3, #0x00ff0000 - | ror CARG1, CARG1, #8 - | mvn CARG2, #~LJ_TISNUM - | eor CARG1, CARG1, CARG3, lsr #8 - | b ->fff_restv - | - |.ffunc_bit bnot - | mvn CARG1, CARG1 - | mvn CARG2, #~LJ_TISNUM - | b ->fff_restv - | - |.macro .ffunc_bit_sh, name, ins, shmod - | .ffunc bit_..name - | ldrd CARG12, [BASE, #8] - | cmp NARGS8:RC, #16 - | blo ->fff_fallback - | checktp CARG2, LJ_TISNUM - | blne ->vm_tobit_fb - |.if shmod == 0 - | and RA, CARG1, #31 - |.else - | rsb RA, CARG1, #0 - |.endif - | ldrd CARG12, [BASE] - | checktp CARG2, LJ_TISNUM - | blne ->vm_tobit_fb - | ins CARG1, CARG1, RA - | mvn CARG2, #~LJ_TISNUM - | b ->fff_restv - |.endmacro - | - |.ffunc_bit_sh lshift, lsl, 0 - |.ffunc_bit_sh rshift, lsr, 0 - |.ffunc_bit_sh arshift, asr, 0 - |.ffunc_bit_sh rol, ror, 1 - |.ffunc_bit_sh ror, ror, 0 - | - |//----------------------------------------------------------------------- - | - |->fff_fallback: // Call fast function fallback handler. - | // BASE = new base, RC = nargs*8 - | ldr CARG3, [BASE, FRAME_FUNC] - | ldr CARG2, L->maxstack - | add CARG1, BASE, NARGS8:RC - | ldr PC, [BASE, FRAME_PC] // Fallback may overwrite PC. - | str CARG1, L->top - | ldr CARG3, CFUNC:CARG3->f - | str BASE, L->base - | add CARG1, CARG1, #8*LUA_MINSTACK - | str PC, SAVE_PC // Redundant (but a defined value). - | cmp CARG1, CARG2 - | mov CARG1, L - | bhi >5 // Need to grow stack. - | blx CARG3 // (lua_State *L) - | // Either throws an error, or recovers and returns -1, 0 or nresults+1. - | ldr BASE, L->base - | cmp CRET1, #0 - | lsl RC, CRET1, #3 - | sub RA, BASE, #8 - | bgt ->fff_res // Returned nresults+1? - |1: // Returned 0 or -1: retry fast path. - | ldr CARG1, L->top - | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] - | sub NARGS8:RC, CARG1, BASE - | bne ->vm_call_tail // Returned -1? - | ins_callt // Returned 0: retry fast path. - | - |// Reconstruct previous base for vmeta_call during tailcall. - |->vm_call_tail: - | ands CARG1, PC, #FRAME_TYPE - | bic CARG2, PC, #FRAME_TYPEP - | ldreq INS, [PC, #-4] - | andeq CARG2, MASKR8, INS, lsr #5 // Conditional decode_RA8. - | addeq CARG2, CARG2, #8 - | sub RB, BASE, CARG2 - | b ->vm_call_dispatch // Resolve again for tailcall. - | - |5: // Grow stack for fallback handler. - | mov CARG2, #LUA_MINSTACK - | bl extern lj_state_growstack // (lua_State *L, int n) - | ldr BASE, L->base - | cmp CARG1, CARG1 // Set zero-flag to force retry. - | b <1 - | - |->fff_gcstep: // Call GC step function. - | // BASE = new base, RC = nargs*8 - | mov RA, lr - | str BASE, L->base - | add CARG2, BASE, NARGS8:RC - | str PC, SAVE_PC // Redundant (but a defined value). - | str CARG2, L->top - | mov CARG1, L - | bl extern lj_gc_step // (lua_State *L) - | ldr BASE, L->base - | mov lr, RA // Help return address predictor. - | ldr CFUNC:CARG3, [BASE, FRAME_FUNC] - | bx lr - | - |//----------------------------------------------------------------------- - |//-- Special dispatch targets ------------------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_record: // Dispatch target for recording phase. - |.if JIT - | ldrb CARG1, [DISPATCH, #DISPATCH_GL(hookmask)] - | tst CARG1, #HOOK_VMEVENT // No recording while in vmevent. - | bne >5 - | // Decrement the hookcount for consistency, but always do the call. - | ldr CARG2, [DISPATCH, #DISPATCH_GL(hookcount)] - | tst CARG1, #HOOK_ACTIVE - | bne >1 - | sub CARG2, CARG2, #1 - | tst CARG1, #LUA_MASKLINE|LUA_MASKCOUNT - | strne CARG2, [DISPATCH, #DISPATCH_GL(hookcount)] - | b >1 - |.endif - | - |->vm_rethook: // Dispatch target for return hooks. - | ldrb CARG1, [DISPATCH, #DISPATCH_GL(hookmask)] - | tst CARG1, #HOOK_ACTIVE // Hook already active? - | beq >1 - |5: // Re-dispatch to static ins. - | decode_OP OP, INS - | add OP, DISPATCH, OP, lsl #2 - | ldr pc, [OP, #GG_DISP2STATIC] - | - |->vm_inshook: // Dispatch target for instr/line hooks. - | ldrb CARG1, [DISPATCH, #DISPATCH_GL(hookmask)] - | ldr CARG2, [DISPATCH, #DISPATCH_GL(hookcount)] - | tst CARG1, #HOOK_ACTIVE // Hook already active? - | bne <5 - | tst CARG1, #LUA_MASKLINE|LUA_MASKCOUNT - | beq <5 - | subs CARG2, CARG2, #1 - | str CARG2, [DISPATCH, #DISPATCH_GL(hookcount)] - | beq >1 - | tst CARG1, #LUA_MASKLINE - | beq <5 - |1: - | mov CARG1, L - | str BASE, L->base - | mov CARG2, PC - | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC. - | bl extern lj_dispatch_ins // (lua_State *L, const BCIns *pc) - |3: - | ldr BASE, L->base - |4: // Re-dispatch to static ins. - | ldrb OP, [PC, #-4] - | ldr INS, [PC, #-4] - | add OP, DISPATCH, OP, lsl #2 - | ldr OP, [OP, #GG_DISP2STATIC] - | decode_RA8 RA, INS - | decode_RD RC, INS - | bx OP - | - |->cont_hook: // Continue from hook yield. - | ldr CARG1, [CARG4, #-24] - | add PC, PC, #4 - | str CARG1, SAVE_MULTRES // Restore MULTRES for *M ins. - | b <4 - | - |->vm_hotloop: // Hot loop counter underflow. - |.if JIT - | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Same as curr_topL(L). - | sub CARG1, DISPATCH, #-GG_DISP2J - | str PC, SAVE_PC - | ldr CARG3, LFUNC:CARG3->field_pc - | mov CARG2, PC - | str L, [DISPATCH, #DISPATCH_J(L)] - | ldrb CARG3, [CARG3, #PC2PROTO(framesize)] - | str BASE, L->base - | add CARG3, BASE, CARG3, lsl #3 - | str CARG3, L->top - | bl extern lj_trace_hot // (jit_State *J, const BCIns *pc) - | b <3 - |.endif - | - |->vm_callhook: // Dispatch target for call hooks. - | mov CARG2, PC - |.if JIT - | b >1 - |.endif - | - |->vm_hotcall: // Hot call counter underflow. - |.if JIT - | orr CARG2, PC, #1 - |1: - |.endif - | add CARG4, BASE, RC - | str PC, SAVE_PC - | mov CARG1, L - | str BASE, L->base - | sub RA, RA, BASE - | str CARG4, L->top - | bl extern lj_dispatch_call // (lua_State *L, const BCIns *pc) - | // Returns ASMFunction. - | ldr BASE, L->base - | ldr CARG4, L->top - | mov CARG2, #0 - | add RA, BASE, RA - | sub NARGS8:RC, CARG4, BASE - | str CARG2, SAVE_PC // Invalidate for subsequent line hook. - | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] - | ldr INS, [PC, #-4] - | bx CRET1 - | - |->cont_stitch: // Trace stitching. - |.if JIT - | // RA = resultptr, CARG4 = meta base - | ldr RB, SAVE_MULTRES - | ldr INS, [PC, #-4] - | ldr CARG3, [CARG4, #-24] // Save previous trace number. - | subs RB, RB, #8 - | decode_RA8 RC, INS // Call base. - | beq >2 - |1: // Move results down. - | ldrd CARG12, [RA] - | add RA, RA, #8 - | subs RB, RB, #8 - | strd CARG12, [BASE, RC] - | add RC, RC, #8 - | bne <1 - |2: - | decode_RA8 RA, INS - | decode_RB8 RB, INS - | add RA, RA, RB - | ldr CARG1, [DISPATCH, #DISPATCH_J(trace)] - |3: - | cmp RA, RC - | mvn CARG2, #~LJ_TNIL - | bhi >9 // More results wanted? - | - | ldr TRACE:RA, [CARG1, CARG3, lsl #2] - | cmp TRACE:RA, #0 - | beq ->cont_nop - | ldrh RC, TRACE:RA->link - | cmp RC, CARG3 - | beq ->cont_nop // Blacklisted. - | cmp RC, #0 - | bne =>BC_JLOOP // Jump to stitched trace. - | - | // Stitch a new trace to the previous trace. - | str CARG3, [DISPATCH, #DISPATCH_J(exitno)] - | str L, [DISPATCH, #DISPATCH_J(L)] - | str BASE, L->base - | sub CARG1, DISPATCH, #-GG_DISP2J - | mov CARG2, PC - | bl extern lj_dispatch_stitch // (jit_State *J, const BCIns *pc) - | ldr BASE, L->base - | b ->cont_nop - | - |9: // Fill up results with nil. - | strd CARG12, [BASE, RC] - | add RC, RC, #8 - | b <3 - |.endif - | - |->vm_profhook: // Dispatch target for profiler hook. -#if LJ_HASPROFILE - | mov CARG1, L - | str BASE, L->base - | mov CARG2, PC - | bl extern lj_dispatch_profile // (lua_State *L, const BCIns *pc) - | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction. - | ldr BASE, L->base - | sub PC, PC, #4 - | b ->cont_nop -#endif - | - |//----------------------------------------------------------------------- - |//-- Trace exit handler ------------------------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_exit_handler: - |.if JIT - | sub sp, sp, #12 - | push {r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12} - | ldr CARG1, [sp, #64] // Load original value of lr. - | ldr DISPATCH, [lr] // Load DISPATCH. - | add CARG3, sp, #64 // Recompute original value of sp. - | mv_vmstate CARG4, EXIT - | str CARG3, [sp, #52] // Store sp in RID_SP - | st_vmstate CARG4 - | ldr CARG2, [CARG1, #-4]! // Get exit instruction. - | str CARG1, [sp, #56] // Store exit pc in RID_LR and RID_PC. - | str CARG1, [sp, #60] - |.if FPU - | vpush {d0-d15} - |.endif - | lsl CARG2, CARG2, #8 - | add CARG1, CARG1, CARG2, asr #6 - | ldr CARG2, [lr, #4] // Load exit stub group offset. - | sub CARG1, CARG1, lr - | ldr L, [DISPATCH, #DISPATCH_GL(cur_L)] - | add CARG1, CARG2, CARG1, lsr #2 // Compute exit number. - | ldr BASE, [DISPATCH, #DISPATCH_GL(jit_base)] - | str CARG1, [DISPATCH, #DISPATCH_J(exitno)] - | mov CARG4, #0 - | str BASE, L->base - | str L, [DISPATCH, #DISPATCH_J(L)] - | str CARG4, [DISPATCH, #DISPATCH_GL(jit_base)] - | sub CARG1, DISPATCH, #-GG_DISP2J - | mov CARG2, sp - | bl extern lj_trace_exit // (jit_State *J, ExitState *ex) - | // Returns MULTRES (unscaled) or negated error code. - | ldr CARG2, L->cframe - | ldr BASE, L->base - | bic CARG2, CARG2, #~CFRAME_RAWMASK // Use two steps: bic sp is deprecated. - | mov sp, CARG2 - | ldr PC, SAVE_PC // Get SAVE_PC. - | str L, SAVE_L // Set SAVE_L (on-trace resume/yield). - | b >1 - |.endif - |->vm_exit_interp: - | // CARG1 = MULTRES or negated error code, BASE, PC and DISPATCH set. - |.if JIT - | ldr L, SAVE_L - |1: - | cmp CARG1, #0 - | blt >9 // Check for error from exit. - | lsl RC, CARG1, #3 - | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] - | str RC, SAVE_MULTRES - | mov CARG3, #0 - | str BASE, L->base - | ldr CARG2, LFUNC:CARG2->field_pc - | str CARG3, [DISPATCH, #DISPATCH_GL(jit_base)] - | mv_vmstate CARG4, INTERP - | ldr KBASE, [CARG2, #PC2PROTO(k)] - | // Modified copy of ins_next which handles function header dispatch, too. - | ldrb OP, [PC] - | mov MASKR8, #255 - | ldr INS, [PC], #4 - | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8. - | st_vmstate CARG4 - | cmp OP, #BC_FUNCC+2 // Fast function? - | bhs >4 - |2: - | cmp OP, #BC_FUNCF // Function header? - | ldr OP, [DISPATCH, OP, lsl #2] - | decode_RA8 RA, INS - | lsrlo RC, INS, #16 // No: Decode operands A*8 and D. - | subhs RC, RC, #8 - | addhs RA, RA, BASE // Yes: RA = BASE+framesize*8, RC = nargs*8 - | ldrhs CARG3, [BASE, FRAME_FUNC] - | bx OP - | - |4: // Check frame below fast function. - | ldr CARG1, [BASE, FRAME_PC] - | ands CARG2, CARG1, #FRAME_TYPE - | bne <2 // Trace stitching continuation? - | // Otherwise set KBASE for Lua function below fast function. - | ldr CARG3, [CARG1, #-4] - | decode_RA8 CARG1, CARG3 - | sub CARG2, BASE, CARG1 - | ldr LFUNC:CARG3, [CARG2, #-16] - | ldr CARG3, LFUNC:CARG3->field_pc - | ldr KBASE, [CARG3, #PC2PROTO(k)] - | b <2 - | - |9: // Rethrow error from the right C frame. - | rsb CARG2, CARG1, #0 - | mov CARG1, L - | bl extern lj_err_throw // (lua_State *L, int errcode) - |.endif - | - |//----------------------------------------------------------------------- - |//-- Math helper functions ---------------------------------------------- - |//----------------------------------------------------------------------- - | - |// FP value rounding. Called from JIT code. - |// - |// double lj_vm_floor/ceil/trunc(double x); - |.macro vm_round, func, hf - |.if hf == 1 - | vmov CARG1, CARG2, d0 - |.endif - | lsl CARG3, CARG2, #1 - | adds RB, CARG3, #0x00200000 - | bpl >2 // |x| < 1? - | mvn CARG4, #0x3cc - | subs RB, CARG4, RB, asr #21 // 2^0: RB = 51, 2^51: RB = 0. - | bxlo lr // |x| >= 2^52: done. - | mvn CARG4, #1 - | bic CARG3, CARG1, CARG4, lsl RB // ztest = lo & ~lomask - | and CARG1, CARG1, CARG4, lsl RB // lo &= lomask - | subs RB, RB, #32 - | bicpl CARG4, CARG2, CARG4, lsl RB // |x| <= 2^20: ztest |= hi & ~himask - | orrpl CARG3, CARG3, CARG4 - | mvnpl CARG4, #1 - | andpl CARG2, CARG2, CARG4, lsl RB // |x| <= 2^20: hi &= himask - |.if "func" == "floor" - | tst CARG3, CARG2, asr #31 // iszero = ((ztest & signmask) == 0) - |.else - | bics CARG3, CARG3, CARG2, asr #31 // iszero = ((ztest & ~signmask) == 0) - |.endif - |.if hf == 1 - | vmoveq d0, CARG1, CARG2 - |.endif - | bxeq lr // iszero: done. - | mvn CARG4, #1 - | cmp RB, #0 - | lslpl CARG3, CARG4, RB - | mvnmi CARG3, #0 - | add RB, RB, #32 - | subs CARG1, CARG1, CARG4, lsl RB // lo = lo-lomask - | sbc CARG2, CARG2, CARG3 // hi = hi-himask+carry - |.if hf == 1 - | vmov d0, CARG1, CARG2 - |.endif - | bx lr - | - |2: // |x| < 1: - | bxcs lr // |x| is not finite. - | orr CARG3, CARG3, CARG1 // ztest = (2*hi) | lo - |.if "func" == "floor" - | tst CARG3, CARG2, asr #31 // iszero = ((ztest & signmask) == 0) - |.else - | bics CARG3, CARG3, CARG2, asr #31 // iszero = ((ztest & ~signmask) == 0) - |.endif - | mov CARG1, #0 // lo = 0 - | and CARG2, CARG2, #0x80000000 - | ldrne CARG4, <9 // hi = sign(x) | (iszero ? 0.0 : 1.0) - | orrne CARG2, CARG2, CARG4 - |.if hf == 1 - | vmov d0, CARG1, CARG2 - |.endif - | bx lr - |.endmacro - | - |9: - | .long 0x3ff00000 // hiword(+1.0) - | - |->vm_floor: - |.if HFABI - | vm_round floor, 1 - |.endif - |->vm_floor_sf: - | vm_round floor, 0 - | - |->vm_ceil: - |.if HFABI - | vm_round ceil, 1 - |.endif - |->vm_ceil_sf: - | vm_round ceil, 0 - | - |.macro vm_trunc, hf - |.if JIT - |.if hf == 1 - | vmov CARG1, CARG2, d0 - |.endif - | lsl CARG3, CARG2, #1 - | adds RB, CARG3, #0x00200000 - | andpl CARG2, CARG2, #0x80000000 // |x| < 1? hi = sign(x), lo = 0. - | movpl CARG1, #0 - |.if hf == 1 - | vmovpl d0, CARG1, CARG2 - |.endif - | bxpl lr - | mvn CARG4, #0x3cc - | subs RB, CARG4, RB, asr #21 // 2^0: RB = 51, 2^51: RB = 0. - | bxlo lr // |x| >= 2^52: already done. - | mvn CARG4, #1 - | and CARG1, CARG1, CARG4, lsl RB // lo &= lomask - | subs RB, RB, #32 - | andpl CARG2, CARG2, CARG4, lsl RB // |x| <= 2^20: hi &= himask - |.if hf == 1 - | vmov d0, CARG1, CARG2 - |.endif - | bx lr - |.endif - |.endmacro - | - |->vm_trunc: - |.if HFABI - | vm_trunc 1 - |.endif - |->vm_trunc_sf: - | vm_trunc 0 - | - | // double lj_vm_mod(double dividend, double divisor); - |->vm_mod: - |.if FPU - | // Special calling convention. Also, RC (r11) is not preserved. - | vdiv.f64 d0, d6, d7 - | mov RC, lr - | vmov CARG1, CARG2, d0 - | bl ->vm_floor_sf - | vmov d0, CARG1, CARG2 - | vmul.f64 d0, d0, d7 - | mov lr, RC - | vsub.f64 d6, d6, d0 - | bx lr - |.else - | push {r0, r1, r2, r3, r4, lr} - | bl extern __aeabi_ddiv - | bl ->vm_floor_sf - | ldrd CARG34, [sp, #8] - | bl extern __aeabi_dmul - | ldrd CARG34, [sp] - | eor CARG2, CARG2, #0x80000000 - | bl extern __aeabi_dadd - | add sp, sp, #20 - | pop {pc} - |.endif - | - | // int lj_vm_modi(int dividend, int divisor); - |->vm_modi: - | ands RB, CARG1, #0x80000000 - | rsbmi CARG1, CARG1, #0 // a = |dividend| - | eor RB, RB, CARG2, asr #1 // Keep signdiff and sign(divisor). - | cmp CARG2, #0 - | rsbmi CARG2, CARG2, #0 // b = |divisor| - | subs CARG4, CARG2, #1 - | cmpne CARG1, CARG2 - | moveq CARG1, #0 // if (b == 1 || a == b) a = 0 - | tsthi CARG2, CARG4 - | andeq CARG1, CARG1, CARG4 // else if ((b & (b-1)) == 0) a &= b-1 - | bls >1 - | // Use repeated subtraction to get the remainder. - | clz CARG3, CARG1 - | clz CARG4, CARG2 - | sub CARG4, CARG4, CARG3 - | rsbs CARG3, CARG4, #31 // entry = (31-(clz(b)-clz(a)))*8 - | addne pc, pc, CARG3, lsl #3 // Duff's device. - | nop - { - int i; - for (i = 31; i >= 0; i--) { - | cmp CARG1, CARG2, lsl #i - | subhs CARG1, CARG1, CARG2, lsl #i - } - } - |1: - | cmp CARG1, #0 - | cmpne RB, #0 - | submi CARG1, CARG1, CARG2 // if (y != 0 && signdiff) y = y - b - | eors CARG2, CARG1, RB, lsl #1 - | rsbmi CARG1, CARG1, #0 // if (sign(divisor) != sign(y)) y = -y - | bx lr - | - |//----------------------------------------------------------------------- - |//-- Miscellaneous functions -------------------------------------------- - |//----------------------------------------------------------------------- - | - |//----------------------------------------------------------------------- - |//-- FFI helper functions ----------------------------------------------- - |//----------------------------------------------------------------------- - | - |// Handler for callback functions. - |// Saveregs already performed. Callback slot number in [sp], g in r12. - |->vm_ffi_callback: - |.if FFI - |.type CTSTATE, CTState, PC - | ldr CTSTATE, GL:r12->ctype_state - | add DISPATCH, r12, #GG_G2DISP - |.if FPU - | str r4, SAVE_R4 - | add r4, sp, CFRAME_SPACE+4+8*8 - | vstmdb r4!, {d8-d15} - |.endif - |.if HFABI - | add r12, CTSTATE, #offsetof(CTState, cb.fpr[8]) - |.endif - | strd CARG34, CTSTATE->cb.gpr[2] - | strd CARG12, CTSTATE->cb.gpr[0] - |.if HFABI - | vstmdb r12!, {d0-d7} - |.endif - | ldr CARG4, [sp] - | add CARG3, sp, #CFRAME_SIZE - | mov CARG1, CTSTATE - | lsr CARG4, CARG4, #3 - | str CARG3, CTSTATE->cb.stack - | mov CARG2, sp - | str CARG4, CTSTATE->cb.slot - | str CTSTATE, SAVE_PC // Any value outside of bytecode is ok. - | bl extern lj_ccallback_enter // (CTState *cts, void *cf) - | // Returns lua_State *. - | ldr BASE, L:CRET1->base - | mv_vmstate CARG2, INTERP - | ldr RC, L:CRET1->top - | mov MASKR8, #255 - | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] - | mov L, CRET1 - | sub RC, RC, BASE - | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8. - | st_vmstate CARG2 - | ins_callt - |.endif - | - |->cont_ffi_callback: // Return from FFI callback. - |.if FFI - | ldr CTSTATE, [DISPATCH, #DISPATCH_GL(ctype_state)] - | str BASE, L->base - | str CARG4, L->top - | str L, CTSTATE->L - | mov CARG1, CTSTATE - | mov CARG2, RA - | bl extern lj_ccallback_leave // (CTState *cts, TValue *o) - | ldrd CARG12, CTSTATE->cb.gpr[0] - |.if HFABI - | vldr d0, CTSTATE->cb.fpr[0] - |.endif - | b ->vm_leave_unw - |.endif - | - |->vm_ffi_call: // Call C function via FFI. - | // Caveat: needs special frame unwinding, see below. - |.if FFI - | .type CCSTATE, CCallState, r4 - | push {CCSTATE, r5, r11, lr} - | mov CCSTATE, CARG1 - | ldr CARG1, CCSTATE:CARG1->spadj - | ldrb CARG2, CCSTATE->nsp - | add CARG3, CCSTATE, #offsetof(CCallState, stack) - |.if HFABI - | add RB, CCSTATE, #offsetof(CCallState, fpr[0]) - |.endif - | mov r11, sp - | sub sp, sp, CARG1 // Readjust stack. - | subs CARG2, CARG2, #1 - |.if HFABI - | vldm RB, {d0-d7} - |.endif - | ldr RB, CCSTATE->func - | bmi >2 - |1: // Copy stack slots. - | ldr CARG4, [CARG3, CARG2, lsl #2] - | str CARG4, [sp, CARG2, lsl #2] - | subs CARG2, CARG2, #1 - | bpl <1 - |2: - | ldrd CARG12, CCSTATE->gpr[0] - | ldrd CARG34, CCSTATE->gpr[2] - | blx RB - | mov sp, r11 - |.if HFABI - | add r12, CCSTATE, #offsetof(CCallState, fpr[4]) - |.endif - | strd CRET1, CCSTATE->gpr[0] - |.if HFABI - | vstmdb r12!, {d0-d3} - |.endif - | pop {CCSTATE, r5, r11, pc} - |.endif - |// Note: vm_ffi_call must be the last function in this object file! - | - |//----------------------------------------------------------------------- -} - -/* Generate the code for a single instruction. */ -static void build_ins(BuildCtx *ctx, BCOp op, int defop) -{ - int vk = 0; - |=>defop: - - switch (op) { - - /* -- Comparison ops ---------------------------------------------------- */ - - /* Remember: all ops branch for a true comparison, fall through otherwise. */ - - case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: - | // RA = src1*8, RC = src2, JMP with RC = target - | lsl RC, RC, #3 - | ldrd CARG12, [RA, BASE]! - | ldrh RB, [PC, #2] - | ldrd CARG34, [RC, BASE]! - | add PC, PC, #4 - | add RB, PC, RB, lsl #2 - | checktp CARG2, LJ_TISNUM - | bne >3 - | checktp CARG4, LJ_TISNUM - | bne >4 - | cmp CARG1, CARG3 - if (op == BC_ISLT) { - | sublt PC, RB, #0x20000 - } else if (op == BC_ISGE) { - | subge PC, RB, #0x20000 - } else if (op == BC_ISLE) { - | suble PC, RB, #0x20000 - } else { - | subgt PC, RB, #0x20000 - } - |1: - | ins_next - | - |3: // CARG12 is not an integer. - |.if FPU - | vldr d0, [RA] - | bhi ->vmeta_comp - | // d0 is a number. - | checktp CARG4, LJ_TISNUM - | vldr d1, [RC] - | blo >5 - | bhi ->vmeta_comp - | // d0 is a number, CARG3 is an integer. - | vmov s4, CARG3 - | vcvt.f64.s32 d1, s4 - | b >5 - |4: // CARG1 is an integer, CARG34 is not an integer. - | vldr d1, [RC] - | bhi ->vmeta_comp - | // CARG1 is an integer, d1 is a number. - | vmov s4, CARG1 - | vcvt.f64.s32 d0, s4 - |5: // d0 and d1 are numbers. - | vcmp.f64 d0, d1 - | vmrs - | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't. - if (op == BC_ISLT) { - | sublo PC, RB, #0x20000 - } else if (op == BC_ISGE) { - | subhs PC, RB, #0x20000 - } else if (op == BC_ISLE) { - | subls PC, RB, #0x20000 - } else { - | subhi PC, RB, #0x20000 - } - | b <1 - |.else - | bhi ->vmeta_comp - | // CARG12 is a number. - | checktp CARG4, LJ_TISNUM - | movlo RA, RB // Save RB. - | blo >5 - | bhi ->vmeta_comp - | // CARG12 is a number, CARG3 is an integer. - | mov CARG1, CARG3 - | mov RC, RA - | mov RA, RB // Save RB. - | bl extern __aeabi_i2d - | mov CARG3, CARG1 - | mov CARG4, CARG2 - | ldrd CARG12, [RC] // Restore first operand. - | b >5 - |4: // CARG1 is an integer, CARG34 is not an integer. - | bhi ->vmeta_comp - | // CARG1 is an integer, CARG34 is a number. - | mov RA, RB // Save RB. - | bl extern __aeabi_i2d - | ldrd CARG34, [RC] // Restore second operand. - |5: // CARG12 and CARG34 are numbers. - | bl extern __aeabi_cdcmple - | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't. - if (op == BC_ISLT) { - | sublo PC, RA, #0x20000 - } else if (op == BC_ISGE) { - | subhs PC, RA, #0x20000 - } else if (op == BC_ISLE) { - | subls PC, RA, #0x20000 - } else { - | subhi PC, RA, #0x20000 - } - | b <1 - |.endif - break; - - case BC_ISEQV: case BC_ISNEV: - vk = op == BC_ISEQV; - | // RA = src1*8, RC = src2, JMP with RC = target - | lsl RC, RC, #3 - | ldrd CARG12, [RA, BASE]! - | ldrh RB, [PC, #2] - | ldrd CARG34, [RC, BASE]! - | add PC, PC, #4 - | add RB, PC, RB, lsl #2 - | checktp CARG2, LJ_TISNUM - | cmnls CARG4, #-LJ_TISNUM - if (vk) { - | bls ->BC_ISEQN_Z - } else { - | bls ->BC_ISNEN_Z - } - | // Either or both types are not numbers. - |.if FFI - | checktp CARG2, LJ_TCDATA - | checktpne CARG4, LJ_TCDATA - | beq ->vmeta_equal_cd - |.endif - | cmp CARG2, CARG4 // Compare types. - | bne >2 // Not the same type? - | checktp CARG2, LJ_TISPRI - | bhs >1 // Same type and primitive type? - | - | // Same types and not a primitive type. Compare GCobj or pvalue. - | cmp CARG1, CARG3 - if (vk) { - | bne >3 // Different GCobjs or pvalues? - |1: // Branch if same. - | sub PC, RB, #0x20000 - |2: // Different. - | ins_next - |3: - | checktp CARG2, LJ_TISTABUD - | bhi <2 // Different objects and not table/ud? - } else { - | beq >1 // Same GCobjs or pvalues? - | checktp CARG2, LJ_TISTABUD - | bhi >2 // Different objects and not table/ud? - } - | // Different tables or userdatas. Need to check __eq metamethod. - | // Field metatable must be at same offset for GCtab and GCudata! - | ldr TAB:RA, TAB:CARG1->metatable - | cmp TAB:RA, #0 - if (vk) { - | beq <2 // No metatable? - } else { - | beq >2 // No metatable? - } - | ldrb RA, TAB:RA->nomm - | mov CARG4, #1-vk // ne = 0 or 1. - | mov CARG2, CARG1 - | tst RA, #1<vmeta_equal // 'no __eq' flag not set? - if (vk) { - | b <2 - } else { - |2: // Branch if different. - | sub PC, RB, #0x20000 - |1: // Same. - | ins_next - } - break; - - case BC_ISEQS: case BC_ISNES: - vk = op == BC_ISEQS; - | // RA = src*8, RC = str_const (~), JMP with RC = target - | mvn RC, RC - | ldrd CARG12, [BASE, RA] - | ldrh RB, [PC, #2] - | ldr STR:CARG3, [KBASE, RC, lsl #2] - | add PC, PC, #4 - | add RB, PC, RB, lsl #2 - | checktp CARG2, LJ_TSTR - |.if FFI - | bne >7 - | cmp CARG1, CARG3 - |.else - | cmpeq CARG1, CARG3 - |.endif - if (vk) { - | subeq PC, RB, #0x20000 - |1: - } else { - |1: - | subne PC, RB, #0x20000 - } - | ins_next - | - |.if FFI - |7: - | checktp CARG2, LJ_TCDATA - | bne <1 - | b ->vmeta_equal_cd - |.endif - break; - - case BC_ISEQN: case BC_ISNEN: - vk = op == BC_ISEQN; - | // RA = src*8, RC = num_const (~), JMP with RC = target - | lsl RC, RC, #3 - | ldrd CARG12, [RA, BASE]! - | ldrh RB, [PC, #2] - | ldrd CARG34, [RC, KBASE]! - | add PC, PC, #4 - | add RB, PC, RB, lsl #2 - if (vk) { - |->BC_ISEQN_Z: - } else { - |->BC_ISNEN_Z: - } - | checktp CARG2, LJ_TISNUM - | bne >3 - | checktp CARG4, LJ_TISNUM - | bne >4 - | cmp CARG1, CARG3 - if (vk) { - | subeq PC, RB, #0x20000 - |1: - } else { - |1: - | subne PC, RB, #0x20000 - } - |2: - | ins_next - | - |3: // CARG12 is not an integer. - |.if FFI - | bhi >7 - |.else - if (!vk) { - | subhi PC, RB, #0x20000 - } - | bhi <2 - |.endif - |.if FPU - | checktp CARG4, LJ_TISNUM - | vmov s4, CARG3 - | vldr d0, [RA] - | vldrlo d1, [RC] - | vcvths.f64.s32 d1, s4 - | b >5 - |4: // CARG1 is an integer, d1 is a number. - | vmov s4, CARG1 - | vldr d1, [RC] - | vcvt.f64.s32 d0, s4 - |5: // d0 and d1 are numbers. - | vcmp.f64 d0, d1 - | vmrs - if (vk) { - | subeq PC, RB, #0x20000 - } else { - | subne PC, RB, #0x20000 - } - | b <2 - |.else - | // CARG12 is a number. - | checktp CARG4, LJ_TISNUM - | movlo RA, RB // Save RB. - | blo >5 - | // CARG12 is a number, CARG3 is an integer. - | mov CARG1, CARG3 - | mov RC, RA - |4: // CARG1 is an integer, CARG34 is a number. - | mov RA, RB // Save RB. - | bl extern __aeabi_i2d - | ldrd CARG34, [RC] // Restore other operand. - |5: // CARG12 and CARG34 are numbers. - | bl extern __aeabi_cdcmpeq - if (vk) { - | subeq PC, RA, #0x20000 - } else { - | subne PC, RA, #0x20000 - } - | b <2 - |.endif - | - |.if FFI - |7: - | checktp CARG2, LJ_TCDATA - | bne <1 - | b ->vmeta_equal_cd - |.endif - break; - - case BC_ISEQP: case BC_ISNEP: - vk = op == BC_ISEQP; - | // RA = src*8, RC = primitive_type (~), JMP with RC = target - | ldrd CARG12, [BASE, RA] - | ldrh RB, [PC, #2] - | add PC, PC, #4 - | mvn RC, RC - | add RB, PC, RB, lsl #2 - |.if FFI - | checktp CARG2, LJ_TCDATA - | beq ->vmeta_equal_cd - |.endif - | cmp CARG2, RC - if (vk) { - | subeq PC, RB, #0x20000 - } else { - | subne PC, RB, #0x20000 - } - | ins_next - break; - - /* -- Unary test and copy ops ------------------------------------------- */ - - case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF: - | // RA = dst*8 or unused, RC = src, JMP with RC = target - | add RC, BASE, RC, lsl #3 - | ldrh RB, [PC, #2] - | ldrd CARG12, [RC] - | add PC, PC, #4 - | add RB, PC, RB, lsl #2 - | checktp CARG2, LJ_TTRUE - if (op == BC_ISTC || op == BC_IST) { - | subls PC, RB, #0x20000 - if (op == BC_ISTC) { - | strdls CARG12, [BASE, RA] - } - } else { - | subhi PC, RB, #0x20000 - if (op == BC_ISFC) { - | strdhi CARG12, [BASE, RA] - } - } - | ins_next - break; - - case BC_ISTYPE: - | // RA = src*8, RC = -type - | ldrd CARG12, [BASE, RA] - | ins_next1 - | cmn CARG2, RC - | ins_next2 - | bne ->vmeta_istype - | ins_next3 - break; - case BC_ISNUM: - | // RA = src*8, RC = -(TISNUM-1) - | ldrd CARG12, [BASE, RA] - | ins_next1 - | checktp CARG2, LJ_TISNUM - | ins_next2 - | bhs ->vmeta_istype - | ins_next3 - break; - - /* -- Unary ops --------------------------------------------------------- */ - - case BC_MOV: - | // RA = dst*8, RC = src - | lsl RC, RC, #3 - | ins_next1 - | ldrd CARG12, [BASE, RC] - | ins_next2 - | strd CARG12, [BASE, RA] - | ins_next3 - break; - case BC_NOT: - | // RA = dst*8, RC = src - | add RC, BASE, RC, lsl #3 - | ins_next1 - | ldr CARG1, [RC, #4] - | add RA, BASE, RA - | ins_next2 - | checktp CARG1, LJ_TTRUE - | mvnls CARG2, #~LJ_TFALSE - | mvnhi CARG2, #~LJ_TTRUE - | str CARG2, [RA, #4] - | ins_next3 - break; - case BC_UNM: - | // RA = dst*8, RC = src - | lsl RC, RC, #3 - | ldrd CARG12, [BASE, RC] - | ins_next1 - | ins_next2 - | checktp CARG2, LJ_TISNUM - | bhi ->vmeta_unm - | eorne CARG2, CARG2, #0x80000000 - | bne >5 - | rsbseq CARG1, CARG1, #0 - | ldrdvs CARG12, >9 - |5: - | strd CARG12, [BASE, RA] - | ins_next3 - | - |.align 8 - |9: - | .long 0x00000000, 0x41e00000 // 2^31. - break; - case BC_LEN: - | // RA = dst*8, RC = src - | lsl RC, RC, #3 - | ldrd CARG12, [BASE, RC] - | checkstr CARG2, >2 - | ldr CARG1, STR:CARG1->len - |1: - | mvn CARG2, #~LJ_TISNUM - | ins_next1 - | ins_next2 - | strd CARG12, [BASE, RA] - | ins_next3 - |2: - | checktab CARG2, ->vmeta_len -#if LJ_52 - | ldr TAB:CARG3, TAB:CARG1->metatable - | cmp TAB:CARG3, #0 - | bne >9 - |3: -#endif - |->BC_LEN_Z: - | .IOS mov RC, BASE - | bl extern lj_tab_len // (GCtab *t) - | // Returns uint32_t (but less than 2^31). - | .IOS mov BASE, RC - | b <1 -#if LJ_52 - |9: - | ldrb CARG4, TAB:CARG3->nomm - | tst CARG4, #1<vmeta_len -#endif - break; - - /* -- Binary ops -------------------------------------------------------- */ - - |.macro ins_arithcheck, cond, ncond, target - ||if (vk == 1) { - | cmn CARG4, #-LJ_TISNUM - | cmn..cond CARG2, #-LJ_TISNUM - ||} else { - | cmn CARG2, #-LJ_TISNUM - | cmn..cond CARG4, #-LJ_TISNUM - ||} - | b..ncond target - |.endmacro - |.macro ins_arithcheck_int, target - | ins_arithcheck eq, ne, target - |.endmacro - |.macro ins_arithcheck_num, target - | ins_arithcheck lo, hs, target - |.endmacro - | - |.macro ins_arithpre - | decode_RB8 RB, INS - | decode_RC8 RC, INS - | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8 - ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); - ||switch (vk) { - ||case 0: - | .if FPU - | ldrd CARG12, [RB, BASE]! - | ldrd CARG34, [RC, KBASE]! - | .else - | ldrd CARG12, [BASE, RB] - | ldrd CARG34, [KBASE, RC] - | .endif - || break; - ||case 1: - | .if FPU - | ldrd CARG34, [RB, BASE]! - | ldrd CARG12, [RC, KBASE]! - | .else - | ldrd CARG34, [BASE, RB] - | ldrd CARG12, [KBASE, RC] - | .endif - || break; - ||default: - | .if FPU - | ldrd CARG12, [RB, BASE]! - | ldrd CARG34, [RC, BASE]! - | .else - | ldrd CARG12, [BASE, RB] - | ldrd CARG34, [BASE, RC] - | .endif - || break; - ||} - |.endmacro - | - |.macro ins_arithpre_fpu, reg1, reg2 - |.if FPU - ||if (vk == 1) { - | vldr reg2, [RB] - | vldr reg1, [RC] - ||} else { - | vldr reg1, [RB] - | vldr reg2, [RC] - ||} - |.endif - |.endmacro - | - |.macro ins_arithpost_fpu, reg - | ins_next1 - | add RA, BASE, RA - | ins_next2 - | vstr reg, [RA] - | ins_next3 - |.endmacro - | - |.macro ins_arithfallback, ins - ||switch (vk) { - ||case 0: - | ins ->vmeta_arith_vn - || break; - ||case 1: - | ins ->vmeta_arith_nv - || break; - ||default: - | ins ->vmeta_arith_vv - || break; - ||} - |.endmacro - | - |.macro ins_arithdn, intins, fpins, fpcall - | ins_arithpre - |.if "intins" ~= "vm_modi" and not FPU - | ins_next1 - |.endif - | ins_arithcheck_int >5 - |.if "intins" == "smull" - | smull CARG1, RC, CARG3, CARG1 - | cmp RC, CARG1, asr #31 - | ins_arithfallback bne - |.elif "intins" == "vm_modi" - | movs CARG2, CARG3 - | ins_arithfallback beq - | bl ->vm_modi - | mvn CARG2, #~LJ_TISNUM - |.else - | intins CARG1, CARG1, CARG3 - | ins_arithfallback bvs - |.endif - |4: - |.if "intins" == "vm_modi" or FPU - | ins_next1 - |.endif - | ins_next2 - | strd CARG12, [BASE, RA] - | ins_next3 - |5: // FP variant. - | ins_arithpre_fpu d6, d7 - | ins_arithfallback ins_arithcheck_num - |.if FPU - |.if "intins" == "vm_modi" - | bl fpcall - |.else - | fpins d6, d6, d7 - |.endif - | ins_arithpost_fpu d6 - |.else - | bl fpcall - |.if "intins" ~= "vm_modi" - | ins_next1 - |.endif - | b <4 - |.endif - |.endmacro - | - |.macro ins_arithfp, fpins, fpcall - | ins_arithpre - |.if "fpins" ~= "extern" or HFABI - | ins_arithpre_fpu d0, d1 - |.endif - | ins_arithfallback ins_arithcheck_num - |.if "fpins" == "extern" - | .IOS mov RC, BASE - | bl fpcall - | .IOS mov BASE, RC - |.elif FPU - | fpins d0, d0, d1 - |.else - | bl fpcall - |.endif - |.if ("fpins" ~= "extern" or HFABI) and FPU - | ins_arithpost_fpu d0 - |.else - | ins_next1 - | ins_next2 - | strd CARG12, [BASE, RA] - | ins_next3 - |.endif - |.endmacro - - case BC_ADDVN: case BC_ADDNV: case BC_ADDVV: - | ins_arithdn adds, vadd.f64, extern __aeabi_dadd - break; - case BC_SUBVN: case BC_SUBNV: case BC_SUBVV: - | ins_arithdn subs, vsub.f64, extern __aeabi_dsub - break; - case BC_MULVN: case BC_MULNV: case BC_MULVV: - | ins_arithdn smull, vmul.f64, extern __aeabi_dmul - break; - case BC_DIVVN: case BC_DIVNV: case BC_DIVVV: - | ins_arithfp vdiv.f64, extern __aeabi_ddiv - break; - case BC_MODVN: case BC_MODNV: case BC_MODVV: - | ins_arithdn vm_modi, vm_mod, ->vm_mod - break; - case BC_POW: - | // NYI: (partial) integer arithmetic. - | ins_arithfp extern, extern pow - break; - - case BC_CAT: - | decode_RB8 RC, INS - | decode_RC8 RB, INS - | // RA = dst*8, RC = src_start*8, RB = src_end*8 (note: RB/RC swapped!) - | sub CARG3, RB, RC - | str BASE, L->base - | add CARG2, BASE, RB - |->BC_CAT_Z: - | // RA = dst*8, RC = src_start*8, CARG2 = top-1 - | mov CARG1, L - | str PC, SAVE_PC - | lsr CARG3, CARG3, #3 - | bl extern lj_meta_cat // (lua_State *L, TValue *top, int left) - | // Returns NULL (finished) or TValue * (metamethod). - | ldr BASE, L->base - | cmp CRET1, #0 - | bne ->vmeta_binop - | ldrd CARG34, [BASE, RC] - | ins_next1 - | ins_next2 - | strd CARG34, [BASE, RA] // Copy result to RA. - | ins_next3 - break; - - /* -- Constant ops ------------------------------------------------------ */ - - case BC_KSTR: - | // RA = dst*8, RC = str_const (~) - | mvn RC, RC - | ins_next1 - | ldr CARG1, [KBASE, RC, lsl #2] - | mvn CARG2, #~LJ_TSTR - | ins_next2 - | strd CARG12, [BASE, RA] - | ins_next3 - break; - case BC_KCDATA: - |.if FFI - | // RA = dst*8, RC = cdata_const (~) - | mvn RC, RC - | ins_next1 - | ldr CARG1, [KBASE, RC, lsl #2] - | mvn CARG2, #~LJ_TCDATA - | ins_next2 - | strd CARG12, [BASE, RA] - | ins_next3 - |.endif - break; - case BC_KSHORT: - | // RA = dst*8, (RC = int16_literal) - | mov CARG1, INS, asr #16 // Refetch sign-extended reg. - | mvn CARG2, #~LJ_TISNUM - | ins_next1 - | ins_next2 - | strd CARG12, [BASE, RA] - | ins_next3 - break; - case BC_KNUM: - | // RA = dst*8, RC = num_const - | lsl RC, RC, #3 - | ins_next1 - | ldrd CARG12, [KBASE, RC] - | ins_next2 - | strd CARG12, [BASE, RA] - | ins_next3 - break; - case BC_KPRI: - | // RA = dst*8, RC = primitive_type (~) - | add RA, BASE, RA - | mvn RC, RC - | ins_next1 - | ins_next2 - | str RC, [RA, #4] - | ins_next3 - break; - case BC_KNIL: - | // RA = base*8, RC = end - | add RA, BASE, RA - | add RC, BASE, RC, lsl #3 - | mvn CARG1, #~LJ_TNIL - | str CARG1, [RA, #4] - | add RA, RA, #8 - |1: - | str CARG1, [RA, #4] - | cmp RA, RC - | add RA, RA, #8 - | blt <1 - | ins_next_ - break; - - /* -- Upvalue and function ops ------------------------------------------ */ - - case BC_UGET: - | // RA = dst*8, RC = uvnum - | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] - | lsl RC, RC, #2 - | add RC, RC, #offsetof(GCfuncL, uvptr) - | ldr UPVAL:CARG2, [LFUNC:CARG2, RC] - | ldr CARG2, UPVAL:CARG2->v - | ldrd CARG34, [CARG2] - | ins_next1 - | ins_next2 - | strd CARG34, [BASE, RA] - | ins_next3 - break; - case BC_USETV: - | // RA = uvnum*8, RC = src - | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] - | lsr RA, RA, #1 - | add RA, RA, #offsetof(GCfuncL, uvptr) - | lsl RC, RC, #3 - | ldr UPVAL:CARG2, [LFUNC:CARG2, RA] - | ldrd CARG34, [BASE, RC] - | ldrb RB, UPVAL:CARG2->marked - | ldrb RC, UPVAL:CARG2->closed - | ldr CARG2, UPVAL:CARG2->v - | tst RB, #LJ_GC_BLACK // isblack(uv) - | add RB, CARG4, #-LJ_TISGCV - | cmpne RC, #0 - | strd CARG34, [CARG2] - | bne >2 // Upvalue is closed and black? - |1: - | ins_next - | - |2: // Check if new value is collectable. - | cmn RB, #-(LJ_TNUMX - LJ_TISGCV) - | ldrbhi RC, GCOBJ:CARG3->gch.marked - | bls <1 // tvisgcv(v) - | sub CARG1, DISPATCH, #-GG_DISP2G - | tst RC, #LJ_GC_WHITES - | // Crossed a write barrier. Move the barrier forward. - |.if IOS - | beq <1 - | mov RC, BASE - | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv) - | mov BASE, RC - |.else - | blne extern lj_gc_barrieruv // (global_State *g, TValue *tv) - |.endif - | b <1 - break; - case BC_USETS: - | // RA = uvnum*8, RC = str_const (~) - | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] - | lsr RA, RA, #1 - | add RA, RA, #offsetof(GCfuncL, uvptr) - | mvn RC, RC - | ldr UPVAL:CARG2, [LFUNC:CARG2, RA] - | ldr STR:CARG3, [KBASE, RC, lsl #2] - | ldrb RB, UPVAL:CARG2->marked - | ldrb RC, UPVAL:CARG2->closed - | ldr CARG2, UPVAL:CARG2->v - | mvn CARG4, #~LJ_TSTR - | tst RB, #LJ_GC_BLACK // isblack(uv) - | ldrb RB, STR:CARG3->marked - | strd CARG34, [CARG2] - | bne >2 - |1: - | ins_next - | - |2: // Check if string is white and ensure upvalue is closed. - | tst RB, #LJ_GC_WHITES // iswhite(str) - | cmpne RC, #0 - | sub CARG1, DISPATCH, #-GG_DISP2G - | // Crossed a write barrier. Move the barrier forward. - |.if IOS - | beq <1 - | mov RC, BASE - | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv) - | mov BASE, RC - |.else - | blne extern lj_gc_barrieruv // (global_State *g, TValue *tv) - |.endif - | b <1 - break; - case BC_USETN: - | // RA = uvnum*8, RC = num_const - | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] - | lsr RA, RA, #1 - | add RA, RA, #offsetof(GCfuncL, uvptr) - | lsl RC, RC, #3 - | ldr UPVAL:CARG2, [LFUNC:CARG2, RA] - | ldrd CARG34, [KBASE, RC] - | ldr CARG2, UPVAL:CARG2->v - | ins_next1 - | ins_next2 - | strd CARG34, [CARG2] - | ins_next3 - break; - case BC_USETP: - | // RA = uvnum*8, RC = primitive_type (~) - | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] - | lsr RA, RA, #1 - | add RA, RA, #offsetof(GCfuncL, uvptr) - | ldr UPVAL:CARG2, [LFUNC:CARG2, RA] - | mvn RC, RC - | ldr CARG2, UPVAL:CARG2->v - | ins_next1 - | ins_next2 - | str RC, [CARG2, #4] - | ins_next3 - break; - - case BC_UCLO: - | // RA = level*8, RC = target - | ldr CARG3, L->openupval - | add RC, PC, RC, lsl #2 - | str BASE, L->base - | cmp CARG3, #0 - | sub PC, RC, #0x20000 - | beq >1 - | mov CARG1, L - | add CARG2, BASE, RA - | bl extern lj_func_closeuv // (lua_State *L, TValue *level) - | ldr BASE, L->base - |1: - | ins_next - break; - - case BC_FNEW: - | // RA = dst*8, RC = proto_const (~) (holding function prototype) - | mvn RC, RC - | str BASE, L->base - | ldr CARG2, [KBASE, RC, lsl #2] - | str PC, SAVE_PC - | ldr CARG3, [BASE, FRAME_FUNC] - | mov CARG1, L - | // (lua_State *L, GCproto *pt, GCfuncL *parent) - | bl extern lj_func_newL_gc - | // Returns GCfuncL *. - | ldr BASE, L->base - | mvn CARG2, #~LJ_TFUNC - | ins_next1 - | ins_next2 - | strd CARG12, [BASE, RA] - | ins_next3 - break; - - /* -- Table ops --------------------------------------------------------- */ - - case BC_TNEW: - case BC_TDUP: - | // RA = dst*8, RC = (hbits|asize) | tab_const (~) - if (op == BC_TDUP) { - | mvn RC, RC - } - | ldr CARG3, [DISPATCH, #DISPATCH_GL(gc.total)] - | ldr CARG4, [DISPATCH, #DISPATCH_GL(gc.threshold)] - | str BASE, L->base - | str PC, SAVE_PC - | cmp CARG3, CARG4 - | mov CARG1, L - | bhs >5 - |1: - if (op == BC_TNEW) { - | lsl CARG2, RC, #21 - | lsr CARG3, RC, #11 - | asr RC, CARG2, #21 - | lsr CARG2, CARG2, #21 - | cmn RC, #1 - | addeq CARG2, CARG2, #2 - | bl extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits) - | // Returns GCtab *. - } else { - | ldr CARG2, [KBASE, RC, lsl #2] - | bl extern lj_tab_dup // (lua_State *L, Table *kt) - | // Returns GCtab *. - } - | ldr BASE, L->base - | mvn CARG2, #~LJ_TTAB - | ins_next1 - | ins_next2 - | strd CARG12, [BASE, RA] - | ins_next3 - |5: - | bl extern lj_gc_step_fixtop // (lua_State *L) - | mov CARG1, L - | b <1 - break; - - case BC_GGET: - | // RA = dst*8, RC = str_const (~) - case BC_GSET: - | // RA = dst*8, RC = str_const (~) - | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] - | mvn RC, RC - | ldr TAB:CARG1, LFUNC:CARG2->env - | ldr STR:RC, [KBASE, RC, lsl #2] - if (op == BC_GGET) { - | b ->BC_TGETS_Z - } else { - | b ->BC_TSETS_Z - } - break; - - case BC_TGETV: - | decode_RB8 RB, INS - | decode_RC8 RC, INS - | // RA = dst*8, RB = table*8, RC = key*8 - | ldrd TAB:CARG12, [BASE, RB] - | ldrd CARG34, [BASE, RC] - | checktab CARG2, ->vmeta_tgetv // STALL: load CARG12. - | checktp CARG4, LJ_TISNUM // Integer key? - | ldreq CARG4, TAB:CARG1->array - | ldreq CARG2, TAB:CARG1->asize - | bne >9 - | - | add CARG4, CARG4, CARG3, lsl #3 - | cmp CARG3, CARG2 // In array part? - | ldrdlo CARG34, [CARG4] - | bhs ->vmeta_tgetv - | ins_next1 // Overwrites RB! - | checktp CARG4, LJ_TNIL - | beq >5 - |1: - | ins_next2 - | strd CARG34, [BASE, RA] - | ins_next3 - | - |5: // Check for __index if table value is nil. - | ldr TAB:CARG2, TAB:CARG1->metatable - | cmp TAB:CARG2, #0 - | beq <1 // No metatable: done. - | ldrb CARG2, TAB:CARG2->nomm - | tst CARG2, #1<vmeta_tgetv - | - |9: - | checktp CARG4, LJ_TSTR // String key? - | moveq STR:RC, CARG3 - | beq ->BC_TGETS_Z - | b ->vmeta_tgetv - break; - case BC_TGETS: - | decode_RB8 RB, INS - | and RC, RC, #255 - | // RA = dst*8, RB = table*8, RC = str_const (~) - | ldrd CARG12, [BASE, RB] - | mvn RC, RC - | ldr STR:RC, [KBASE, RC, lsl #2] // STALL: early RC. - | checktab CARG2, ->vmeta_tgets1 - |->BC_TGETS_Z: - | // (TAB:RB =) TAB:CARG1 = GCtab *, STR:RC = GCstr *, RA = dst*8 - | ldr CARG3, TAB:CARG1->hmask - | ldr CARG4, STR:RC->hash - | ldr NODE:INS, TAB:CARG1->node - | mov TAB:RB, TAB:CARG1 - | and CARG3, CARG3, CARG4 // idx = str->hash & tab->hmask - | add CARG3, CARG3, CARG3, lsl #1 - | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8 - |1: - | ldrd CARG12, NODE:INS->key // STALL: early NODE:INS. - | ldrd CARG34, NODE:INS->val - | ldr NODE:INS, NODE:INS->next - | checktp CARG2, LJ_TSTR - | cmpeq CARG1, STR:RC - | bne >4 - | checktp CARG4, LJ_TNIL - | beq >5 - |3: - | ins_next1 - | ins_next2 - | strd CARG34, [BASE, RA] - | ins_next3 - | - |4: // Follow hash chain. - | cmp NODE:INS, #0 - | bne <1 - | // End of hash chain: key not found, nil result. - | - |5: // Check for __index if table value is nil. - | ldr TAB:CARG1, TAB:RB->metatable - | mov CARG3, #0 // Optional clear of undef. value (during load stall). - | mvn CARG4, #~LJ_TNIL - | cmp TAB:CARG1, #0 - | beq <3 // No metatable: done. - | ldrb CARG2, TAB:CARG1->nomm - | tst CARG2, #1<vmeta_tgets - break; - case BC_TGETB: - | decode_RB8 RB, INS - | and RC, RC, #255 - | // RA = dst*8, RB = table*8, RC = index - | ldrd CARG12, [BASE, RB] - | checktab CARG2, ->vmeta_tgetb // STALL: load CARG12. - | ldr CARG3, TAB:CARG1->asize - | ldr CARG4, TAB:CARG1->array - | lsl CARG2, RC, #3 - | cmp RC, CARG3 - | ldrdlo CARG34, [CARG4, CARG2] - | bhs ->vmeta_tgetb - | ins_next1 // Overwrites RB! - | checktp CARG4, LJ_TNIL - | beq >5 - |1: - | ins_next2 - | strd CARG34, [BASE, RA] - | ins_next3 - | - |5: // Check for __index if table value is nil. - | ldr TAB:CARG2, TAB:CARG1->metatable - | cmp TAB:CARG2, #0 - | beq <1 // No metatable: done. - | ldrb CARG2, TAB:CARG2->nomm - | tst CARG2, #1<vmeta_tgetb - break; - case BC_TGETR: - | decode_RB8 RB, INS - | decode_RC8 RC, INS - | // RA = dst*8, RB = table*8, RC = key*8 - | ldr TAB:CARG1, [BASE, RB] - | ldr CARG2, [BASE, RC] - | ldr CARG4, TAB:CARG1->array - | ldr CARG3, TAB:CARG1->asize - | add CARG4, CARG4, CARG2, lsl #3 - | cmp CARG2, CARG3 // In array part? - | bhs ->vmeta_tgetr - | ldrd CARG12, [CARG4] - |->BC_TGETR_Z: - | ins_next1 - | ins_next2 - | strd CARG12, [BASE, RA] - | ins_next3 - break; - - case BC_TSETV: - | decode_RB8 RB, INS - | decode_RC8 RC, INS - | // RA = src*8, RB = table*8, RC = key*8 - | ldrd TAB:CARG12, [BASE, RB] - | ldrd CARG34, [BASE, RC] - | checktab CARG2, ->vmeta_tsetv // STALL: load CARG12. - | checktp CARG4, LJ_TISNUM // Integer key? - | ldreq CARG2, TAB:CARG1->array - | ldreq CARG4, TAB:CARG1->asize - | bne >9 - | - | add CARG2, CARG2, CARG3, lsl #3 - | cmp CARG3, CARG4 // In array part? - | ldrlo INS, [CARG2, #4] - | bhs ->vmeta_tsetv - | ins_next1 // Overwrites RB! - | checktp INS, LJ_TNIL - | ldrb INS, TAB:CARG1->marked - | ldrd CARG34, [BASE, RA] - | beq >5 - |1: - | tst INS, #LJ_GC_BLACK // isblack(table) - | strd CARG34, [CARG2] - | bne >7 - |2: - | ins_next2 - | ins_next3 - | - |5: // Check for __newindex if previous value is nil. - | ldr TAB:RA, TAB:CARG1->metatable - | cmp TAB:RA, #0 - | beq <1 // No metatable: done. - | ldrb RA, TAB:RA->nomm - | tst RA, #1<vmeta_tsetv - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:CARG1, INS, CARG3 - | b <2 - | - |9: - | checktp CARG4, LJ_TSTR // String key? - | moveq STR:RC, CARG3 - | beq ->BC_TSETS_Z - | b ->vmeta_tsetv - break; - case BC_TSETS: - | decode_RB8 RB, INS - | and RC, RC, #255 - | // RA = src*8, RB = table*8, RC = str_const (~) - | ldrd CARG12, [BASE, RB] - | mvn RC, RC - | ldr STR:RC, [KBASE, RC, lsl #2] // STALL: early RC. - | checktab CARG2, ->vmeta_tsets1 - |->BC_TSETS_Z: - | // (TAB:RB =) TAB:CARG1 = GCtab *, STR:RC = GCstr *, RA = dst*8 - | ldr CARG3, TAB:CARG1->hmask - | ldr CARG4, STR:RC->hash - | ldr NODE:INS, TAB:CARG1->node - | mov TAB:RB, TAB:CARG1 - | and CARG3, CARG3, CARG4 // idx = str->hash & tab->hmask - | add CARG3, CARG3, CARG3, lsl #1 - | mov CARG4, #0 - | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8 - | strb CARG4, TAB:RB->nomm // Clear metamethod cache. - |1: - | ldrd CARG12, NODE:INS->key - | ldr CARG4, NODE:INS->val.it - | ldr NODE:CARG3, NODE:INS->next - | checktp CARG2, LJ_TSTR - | cmpeq CARG1, STR:RC - | bne >5 - | ldrb CARG2, TAB:RB->marked - | checktp CARG4, LJ_TNIL // Key found, but nil value? - | ldrd CARG34, [BASE, RA] - | beq >4 - |2: - | tst CARG2, #LJ_GC_BLACK // isblack(table) - | strd CARG34, NODE:INS->val - | bne >7 - |3: - | ins_next - | - |4: // Check for __newindex if previous value is nil. - | ldr TAB:CARG1, TAB:RB->metatable - | cmp TAB:CARG1, #0 - | beq <2 // No metatable: done. - | ldrb CARG1, TAB:CARG1->nomm - | tst CARG1, #1<vmeta_tsets - | - |5: // Follow hash chain. - | movs NODE:INS, NODE:CARG3 - | bne <1 - | // End of hash chain: key not found, add a new one. - | - | // But check for __newindex first. - | ldr TAB:CARG1, TAB:RB->metatable - | mov CARG3, TMPDp - | str PC, SAVE_PC - | cmp TAB:CARG1, #0 // No metatable: continue. - | str BASE, L->base - | ldrbne CARG2, TAB:CARG1->nomm - | mov CARG1, L - | beq >6 - | tst CARG2, #1<vmeta_tsets // 'no __newindex' flag NOT set: check. - |6: - | mvn CARG4, #~LJ_TSTR - | str STR:RC, TMPDlo - | mov CARG2, TAB:RB - | str CARG4, TMPDhi - | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k) - | // Returns TValue *. - | ldr BASE, L->base - | ldrd CARG34, [BASE, RA] - | strd CARG34, [CRET1] - | b <3 // No 2nd write barrier needed. - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:RB, CARG2, CARG3 - | b <3 - break; - case BC_TSETB: - | decode_RB8 RB, INS - | and RC, RC, #255 - | // RA = src*8, RB = table*8, RC = index - | ldrd CARG12, [BASE, RB] - | checktab CARG2, ->vmeta_tsetb // STALL: load CARG12. - | ldr CARG3, TAB:CARG1->asize - | ldr RB, TAB:CARG1->array - | lsl CARG2, RC, #3 - | cmp RC, CARG3 - | ldrdlo CARG34, [CARG2, RB]! - | bhs ->vmeta_tsetb - | ins_next1 // Overwrites RB! - | checktp CARG4, LJ_TNIL - | ldrb INS, TAB:CARG1->marked - | ldrd CARG34, [BASE, RA] - | beq >5 - |1: - | tst INS, #LJ_GC_BLACK // isblack(table) - | strd CARG34, [CARG2] - | bne >7 - |2: - | ins_next2 - | ins_next3 - | - |5: // Check for __newindex if previous value is nil. - | ldr TAB:RA, TAB:CARG1->metatable - | cmp TAB:RA, #0 - | beq <1 // No metatable: done. - | ldrb RA, TAB:RA->nomm - | tst RA, #1<vmeta_tsetb - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:CARG1, INS, CARG3 - | b <2 - break; - case BC_TSETR: - | decode_RB8 RB, INS - | decode_RC8 RC, INS - | // RA = src*8, RB = table*8, RC = key*8 - | ldr TAB:CARG2, [BASE, RB] - | ldr CARG3, [BASE, RC] - | ldrb INS, TAB:CARG2->marked - | ldr CARG1, TAB:CARG2->array - | ldr CARG4, TAB:CARG2->asize - | tst INS, #LJ_GC_BLACK // isblack(table) - | add CARG1, CARG1, CARG3, lsl #3 - | bne >7 - |2: - | cmp CARG3, CARG4 // In array part? - | bhs ->vmeta_tsetr - |->BC_TSETR_Z: - | ldrd CARG34, [BASE, RA] - | ins_next1 - | ins_next2 - | strd CARG34, [CARG1] - | ins_next3 - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:CARG2, INS, RB - | b <2 - break; - - case BC_TSETM: - | // RA = base*8 (table at base-1), RC = num_const (start index) - | add RA, BASE, RA - |1: - | ldr RB, SAVE_MULTRES - | ldr TAB:CARG2, [RA, #-8] // Guaranteed to be a table. - | ldr CARG1, [KBASE, RC, lsl #3] // Integer constant is in lo-word. - | subs RB, RB, #8 - | ldr CARG4, TAB:CARG2->asize - | beq >4 // Nothing to copy? - | add CARG3, CARG1, RB, lsr #3 - | cmp CARG3, CARG4 - | ldr CARG4, TAB:CARG2->array - | add RB, RA, RB - | bhi >5 - | add INS, CARG4, CARG1, lsl #3 - | ldrb CARG1, TAB:CARG2->marked - |3: // Copy result slots to table. - | ldrd CARG34, [RA], #8 - | strd CARG34, [INS], #8 - | cmp RA, RB - | blo <3 - | tst CARG1, #LJ_GC_BLACK // isblack(table) - | bne >7 - |4: - | ins_next - | - |5: // Need to resize array part. - | str BASE, L->base - | mov CARG1, L - | str PC, SAVE_PC - | bl extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize) - | // Must not reallocate the stack. - | .IOS ldr BASE, L->base - | b <1 - | - |7: // Possible table write barrier for any value. Skip valiswhite check. - | barrierback TAB:CARG2, CARG1, CARG3 - | b <4 - break; - - /* -- Calls and vararg handling ----------------------------------------- */ - - case BC_CALLM: - | // RA = base*8, (RB = nresults+1,) RC = extra_nargs - | ldr CARG1, SAVE_MULTRES - | decode_RC8 NARGS8:RC, INS - | add NARGS8:RC, NARGS8:RC, CARG1 - | b ->BC_CALL_Z - break; - case BC_CALL: - | decode_RC8 NARGS8:RC, INS - | // RA = base*8, (RB = nresults+1,) RC = (nargs+1)*8 - |->BC_CALL_Z: - | mov RB, BASE // Save old BASE for vmeta_call. - | ldrd CARG34, [BASE, RA]! - | sub NARGS8:RC, NARGS8:RC, #8 - | add BASE, BASE, #8 - | checkfunc CARG4, ->vmeta_call - | ins_call - break; - - case BC_CALLMT: - | // RA = base*8, (RB = 0,) RC = extra_nargs - | ldr CARG1, SAVE_MULTRES - | add NARGS8:RC, CARG1, RC, lsl #3 - | b ->BC_CALLT1_Z - break; - case BC_CALLT: - | lsl NARGS8:RC, RC, #3 - | // RA = base*8, (RB = 0,) RC = (nargs+1)*8 - |->BC_CALLT1_Z: - | ldrd LFUNC:CARG34, [RA, BASE]! - | sub NARGS8:RC, NARGS8:RC, #8 - | add RA, RA, #8 - | checkfunc CARG4, ->vmeta_callt - | ldr PC, [BASE, FRAME_PC] - |->BC_CALLT2_Z: - | mov RB, #0 - | ldrb CARG4, LFUNC:CARG3->ffid - | tst PC, #FRAME_TYPE - | bne >7 - |1: - | str LFUNC:CARG3, [BASE, FRAME_FUNC] // Copy function down, but keep PC. - | cmp NARGS8:RC, #0 - | beq >3 - |2: - | ldrd CARG12, [RA, RB] - | add INS, RB, #8 - | cmp INS, NARGS8:RC - | strd CARG12, [BASE, RB] - | mov RB, INS - | bne <2 - |3: - | cmp CARG4, #1 // (> FF_C) Calling a fast function? - | bhi >5 - |4: - | ins_callt - | - |5: // Tailcall to a fast function with a Lua frame below. - | ldr INS, [PC, #-4] - | decode_RA8 RA, INS - | sub CARG1, BASE, RA - | ldr LFUNC:CARG1, [CARG1, #-16] - | ldr CARG1, LFUNC:CARG1->field_pc - | ldr KBASE, [CARG1, #PC2PROTO(k)] - | b <4 - | - |7: // Tailcall from a vararg function. - | eor PC, PC, #FRAME_VARG - | tst PC, #FRAME_TYPEP // Vararg frame below? - | movne CARG4, #0 // Clear ffid if no Lua function below. - | bne <1 - | sub BASE, BASE, PC - | ldr PC, [BASE, FRAME_PC] - | tst PC, #FRAME_TYPE - | movne CARG4, #0 // Clear ffid if no Lua function below. - | b <1 - break; - - case BC_ITERC: - | // RA = base*8, (RB = nresults+1, RC = nargs+1 (2+1)) - | add RA, BASE, RA - | mov RB, BASE // Save old BASE for vmeta_call. - | ldrd CARG34, [RA, #-16] - | ldrd CARG12, [RA, #-8] - | add BASE, RA, #8 - | strd CARG34, [RA, #8] // Copy state. - | strd CARG12, [RA, #16] // Copy control var. - | // STALL: locked CARG34. - | ldrd LFUNC:CARG34, [RA, #-24] - | mov NARGS8:RC, #16 // Iterators get 2 arguments. - | // STALL: load CARG34. - | strd LFUNC:CARG34, [RA] // Copy callable. - | checkfunc CARG4, ->vmeta_call - | ins_call - break; - - case BC_ITERN: - | // RA = base*8, (RB = nresults+1, RC = nargs+1 (2+1)) - |.if JIT - | // NYI: add hotloop, record BC_ITERN. - |.endif - | add RA, BASE, RA - | ldr TAB:RB, [RA, #-16] - | ldr CARG1, [RA, #-8] // Get index from control var. - | ldr INS, TAB:RB->asize - | ldr CARG2, TAB:RB->array - | add PC, PC, #4 - |1: // Traverse array part. - | subs RC, CARG1, INS - | add CARG3, CARG2, CARG1, lsl #3 - | bhs >5 // Index points after array part? - | ldrd CARG34, [CARG3] - | checktp CARG4, LJ_TNIL - | addeq CARG1, CARG1, #1 // Skip holes in array part. - | beq <1 - | ldrh RC, [PC, #-2] - | mvn CARG2, #~LJ_TISNUM - | strd CARG34, [RA, #8] - | add RC, PC, RC, lsl #2 - | add RB, CARG1, #1 - | strd CARG12, [RA] - | sub PC, RC, #0x20000 - | str RB, [RA, #-8] // Update control var. - |3: - | ins_next - | - |5: // Traverse hash part. - | ldr CARG4, TAB:RB->hmask - | ldr NODE:RB, TAB:RB->node - |6: - | add CARG1, RC, RC, lsl #1 - | cmp RC, CARG4 // End of iteration? Branch to ITERL+1. - | add NODE:CARG3, NODE:RB, CARG1, lsl #3 // node = tab->node + idx*3*8 - | bhi <3 - | ldrd CARG12, NODE:CARG3->val - | checktp CARG2, LJ_TNIL - | add RC, RC, #1 - | beq <6 // Skip holes in hash part. - | ldrh RB, [PC, #-2] - | add RC, RC, INS - | ldrd CARG34, NODE:CARG3->key - | str RC, [RA, #-8] // Update control var. - | strd CARG12, [RA, #8] - | add RC, PC, RB, lsl #2 - | sub PC, RC, #0x20000 - | strd CARG34, [RA] - | b <3 - break; - - case BC_ISNEXT: - | // RA = base*8, RC = target (points to ITERN) - | add RA, BASE, RA - | add RC, PC, RC, lsl #2 - | ldrd CFUNC:CARG12, [RA, #-24] - | ldr CARG3, [RA, #-12] - | ldr CARG4, [RA, #-4] - | checktp CARG2, LJ_TFUNC - | ldrbeq CARG1, CFUNC:CARG1->ffid - | checktpeq CARG3, LJ_TTAB - | checktpeq CARG4, LJ_TNIL - | cmpeq CARG1, #FF_next_N - | subeq PC, RC, #0x20000 - | bne >5 - | ins_next1 - | ins_next2 - | mov CARG1, #0 - | mvn CARG2, #0x00018000 - | strd CARG1, [RA, #-8] // Initialize control var. - |1: - | ins_next3 - |5: // Despecialize bytecode if any of the checks fail. - | mov CARG1, #BC_JMP - | mov OP, #BC_ITERC - | strb CARG1, [PC, #-4] - | sub PC, RC, #0x20000 - | strb OP, [PC] // Subsumes ins_next1. - | ins_next2 - | b <1 - break; - - case BC_VARG: - | decode_RB8 RB, INS - | decode_RC8 RC, INS - | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8 - | ldr CARG1, [BASE, FRAME_PC] - | add RC, BASE, RC - | add RA, BASE, RA - | add RC, RC, #FRAME_VARG - | add CARG4, RA, RB - | sub CARG3, BASE, #8 // CARG3 = vtop - | sub RC, RC, CARG1 // RC = vbase - | // Note: RC may now be even _above_ BASE if nargs was < numparams. - | cmp RB, #0 - | sub CARG1, CARG3, RC - | beq >5 // Copy all varargs? - | sub CARG4, CARG4, #16 - |1: // Copy vararg slots to destination slots. - | cmp RC, CARG3 - | ldrdlo CARG12, [RC], #8 - | mvnhs CARG2, #~LJ_TNIL - | cmp RA, CARG4 - | strd CARG12, [RA], #8 - | blo <1 - |2: - | ins_next - | - |5: // Copy all varargs. - | ldr CARG4, L->maxstack - | cmp CARG1, #0 - | movle RB, #8 // MULTRES = (0+1)*8 - | addgt RB, CARG1, #8 - | add CARG2, RA, CARG1 - | str RB, SAVE_MULTRES - | ble <2 - | cmp CARG2, CARG4 - | bhi >7 - |6: - | ldrd CARG12, [RC], #8 - | strd CARG12, [RA], #8 - | cmp RC, CARG3 - | blo <6 - | b <2 - | - |7: // Grow stack for varargs. - | lsr CARG2, CARG1, #3 - | str RA, L->top - | mov CARG1, L - | str BASE, L->base - | sub RC, RC, BASE // Need delta, because BASE may change. - | str PC, SAVE_PC - | sub RA, RA, BASE - | bl extern lj_state_growstack // (lua_State *L, int n) - | ldr BASE, L->base - | add RA, BASE, RA - | add RC, BASE, RC - | sub CARG3, BASE, #8 - | b <6 - break; - - /* -- Returns ----------------------------------------------------------- */ - - case BC_RETM: - | // RA = results*8, RC = extra results - | ldr CARG1, SAVE_MULTRES - | ldr PC, [BASE, FRAME_PC] - | add RA, BASE, RA - | add RC, CARG1, RC, lsl #3 - | b ->BC_RETM_Z - break; - - case BC_RET: - | // RA = results*8, RC = nresults+1 - | ldr PC, [BASE, FRAME_PC] - | lsl RC, RC, #3 - | add RA, BASE, RA - |->BC_RETM_Z: - | str RC, SAVE_MULTRES - |1: - | ands CARG1, PC, #FRAME_TYPE - | eor CARG2, PC, #FRAME_VARG - | bne ->BC_RETV2_Z - | - |->BC_RET_Z: - | // BASE = base, RA = resultptr, RC = (nresults+1)*8, PC = return - | ldr INS, [PC, #-4] - | subs CARG4, RC, #8 - | sub CARG3, BASE, #8 - | beq >3 - |2: - | ldrd CARG12, [RA], #8 - | add BASE, BASE, #8 - | subs CARG4, CARG4, #8 - | strd CARG12, [BASE, #-16] - | bne <2 - |3: - | decode_RA8 RA, INS - | sub CARG4, CARG3, RA - | decode_RB8 RB, INS - | ldr LFUNC:CARG1, [CARG4, FRAME_FUNC] - |5: - | cmp RB, RC // More results expected? - | bhi >6 - | mov BASE, CARG4 - | ldr CARG2, LFUNC:CARG1->field_pc - | ins_next1 - | ins_next2 - | ldr KBASE, [CARG2, #PC2PROTO(k)] - | ins_next3 - | - |6: // Fill up results with nil. - | mvn CARG2, #~LJ_TNIL - | add BASE, BASE, #8 - | add RC, RC, #8 - | str CARG2, [BASE, #-12] - | b <5 - | - |->BC_RETV1_Z: // Non-standard return case. - | add RA, BASE, RA - |->BC_RETV2_Z: - | tst CARG2, #FRAME_TYPEP - | bne ->vm_return - | // Return from vararg function: relocate BASE down. - | sub BASE, BASE, CARG2 - | ldr PC, [BASE, FRAME_PC] - | b <1 - break; - - case BC_RET0: case BC_RET1: - | // RA = results*8, RC = nresults+1 - | ldr PC, [BASE, FRAME_PC] - | lsl RC, RC, #3 - | str RC, SAVE_MULTRES - | ands CARG1, PC, #FRAME_TYPE - | eor CARG2, PC, #FRAME_VARG - | ldreq INS, [PC, #-4] - | bne ->BC_RETV1_Z - if (op == BC_RET1) { - | ldrd CARG12, [BASE, RA] - } - | sub CARG4, BASE, #8 - | decode_RA8 RA, INS - if (op == BC_RET1) { - | strd CARG12, [CARG4] - } - | sub BASE, CARG4, RA - | decode_RB8 RB, INS - | ldr LFUNC:CARG1, [BASE, FRAME_FUNC] - |5: - | cmp RB, RC - | bhi >6 - | ldr CARG2, LFUNC:CARG1->field_pc - | ins_next1 - | ins_next2 - | ldr KBASE, [CARG2, #PC2PROTO(k)] - | ins_next3 - | - |6: // Fill up results with nil. - | sub CARG2, CARG4, #4 - | mvn CARG3, #~LJ_TNIL - | str CARG3, [CARG2, RC] - | add RC, RC, #8 - | b <5 - break; - - /* -- Loops and branches ------------------------------------------------ */ - - |.define FOR_IDX, [RA]; .define FOR_TIDX, [RA, #4] - |.define FOR_STOP, [RA, #8]; .define FOR_TSTOP, [RA, #12] - |.define FOR_STEP, [RA, #16]; .define FOR_TSTEP, [RA, #20] - |.define FOR_EXT, [RA, #24]; .define FOR_TEXT, [RA, #28] - - case BC_FORL: - |.if JIT - | hotloop - |.endif - | // Fall through. Assumes BC_IFORL follows. - break; - - case BC_JFORI: - case BC_JFORL: -#if !LJ_HASJIT - break; -#endif - case BC_FORI: - case BC_IFORL: - | // RA = base*8, RC = target (after end of loop or start of loop) - vk = (op == BC_IFORL || op == BC_JFORL); - | ldrd CARG12, [RA, BASE]! - if (op != BC_JFORL) { - | add RC, PC, RC, lsl #2 - } - if (!vk) { - | ldrd CARG34, FOR_STOP - | checktp CARG2, LJ_TISNUM - | ldr RB, FOR_TSTEP - | bne >5 - | checktp CARG4, LJ_TISNUM - | ldr CARG4, FOR_STEP - | checktpeq RB, LJ_TISNUM - | bne ->vmeta_for - | cmp CARG4, #0 - | blt >4 - | cmp CARG1, CARG3 - } else { - | ldrd CARG34, FOR_STEP - | checktp CARG2, LJ_TISNUM - | bne >5 - | adds CARG1, CARG1, CARG3 - | ldr CARG4, FOR_STOP - if (op == BC_IFORL) { - | addvs RC, PC, #0x20000 // Overflow: prevent branch. - } else { - | bvs >2 // Overflow: do not enter mcode. - } - | cmp CARG3, #0 - | blt >4 - | cmp CARG1, CARG4 - } - |1: - if (op == BC_FORI) { - | subgt PC, RC, #0x20000 - } else if (op == BC_JFORI) { - | sub PC, RC, #0x20000 - | ldrhle RC, [PC, #-2] - } else if (op == BC_IFORL) { - | suble PC, RC, #0x20000 - } - if (vk) { - | strd CARG12, FOR_IDX - } - |2: - | ins_next1 - | ins_next2 - | strd CARG12, FOR_EXT - if (op == BC_JFORI || op == BC_JFORL) { - | ble =>BC_JLOOP - } - |3: - | ins_next3 - | - |4: // Invert check for negative step. - if (!vk) { - | cmp CARG3, CARG1 - } else { - | cmp CARG4, CARG1 - } - | b <1 - | - |5: // FP loop. - if (!vk) { - | cmnlo CARG4, #-LJ_TISNUM - | cmnlo RB, #-LJ_TISNUM - | bhs ->vmeta_for - |.if FPU - | vldr d0, FOR_IDX - | vldr d1, FOR_STOP - | cmp RB, #0 - | vstr d0, FOR_EXT - |.else - | cmp RB, #0 - | strd CARG12, FOR_EXT - | blt >8 - |.endif - } else { - |.if FPU - | vldr d0, FOR_IDX - | vldr d2, FOR_STEP - | vldr d1, FOR_STOP - | cmp CARG4, #0 - | vadd.f64 d0, d0, d2 - |.else - | cmp CARG4, #0 - | blt >8 - | bl extern __aeabi_dadd - | strd CARG12, FOR_IDX - | ldrd CARG34, FOR_STOP - | strd CARG12, FOR_EXT - |.endif - } - |6: - |.if FPU - | vcmpge.f64 d0, d1 - | vcmplt.f64 d1, d0 - | vmrs - |.else - | bl extern __aeabi_cdcmple - |.endif - if (vk) { - |.if FPU - | vstr d0, FOR_IDX - | vstr d0, FOR_EXT - |.endif - } - if (op == BC_FORI) { - | subhi PC, RC, #0x20000 - } else if (op == BC_JFORI) { - | sub PC, RC, #0x20000 - | ldrhls RC, [PC, #-2] - | bls =>BC_JLOOP - } else if (op == BC_IFORL) { - | subls PC, RC, #0x20000 - } else { - | bls =>BC_JLOOP - } - | ins_next1 - | ins_next2 - | b <3 - | - |.if not FPU - |8: // Invert check for negative step. - if (vk) { - | bl extern __aeabi_dadd - | strd CARG12, FOR_IDX - | strd CARG12, FOR_EXT - } - | mov CARG3, CARG1 - | mov CARG4, CARG2 - | ldrd CARG12, FOR_STOP - | b <6 - |.endif - break; - - case BC_ITERL: - |.if JIT - | hotloop - |.endif - | // Fall through. Assumes BC_IITERL follows. - break; - - case BC_JITERL: -#if !LJ_HASJIT - break; -#endif - case BC_IITERL: - | // RA = base*8, RC = target - | ldrd CARG12, [RA, BASE]! - if (op == BC_JITERL) { - | cmn CARG2, #-LJ_TNIL // Stop if iterator returned nil. - | strdne CARG12, [RA, #-8] - | bne =>BC_JLOOP - } else { - | add RC, PC, RC, lsl #2 - | // STALL: load CARG12. - | cmn CARG2, #-LJ_TNIL // Stop if iterator returned nil. - | subne PC, RC, #0x20000 // Otherwise save control var + branch. - | strdne CARG12, [RA, #-8] - } - | ins_next - break; - - case BC_LOOP: - | // RA = base*8, RC = target (loop extent) - | // Note: RA/RC is only used by trace recorder to determine scope/extent - | // This opcode does NOT jump, it's only purpose is to detect a hot loop. - |.if JIT - | hotloop - |.endif - | // Fall through. Assumes BC_ILOOP follows. - break; - - case BC_ILOOP: - | // RA = base*8, RC = target (loop extent) - | ins_next - break; - - case BC_JLOOP: - |.if JIT - | // RA = base (ignored), RC = traceno - | ldr CARG1, [DISPATCH, #DISPATCH_J(trace)] - | mov CARG2, #0 // Traces on ARM don't store the trace number, so use 0. - | ldr TRACE:RC, [CARG1, RC, lsl #2] - | st_vmstate CARG2 - | ldr RA, TRACE:RC->mcode - | str BASE, [DISPATCH, #DISPATCH_GL(jit_base)] - | str L, [DISPATCH, #DISPATCH_GL(tmpbuf.L)] - | bx RA - |.endif - break; - - case BC_JMP: - | // RA = base*8 (only used by trace recorder), RC = target - | add RC, PC, RC, lsl #2 - | sub PC, RC, #0x20000 - | ins_next - break; - - /* -- Function headers -------------------------------------------------- */ - - case BC_FUNCF: - |.if JIT - | hotcall - |.endif - case BC_FUNCV: /* NYI: compiled vararg functions. */ - | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow. - break; - - case BC_JFUNCF: -#if !LJ_HASJIT - break; -#endif - case BC_IFUNCF: - | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8 - | ldr CARG1, L->maxstack - | ldrb CARG2, [PC, #-4+PC2PROTO(numparams)] - | ldr KBASE, [PC, #-4+PC2PROTO(k)] - | cmp RA, CARG1 - | bhi ->vm_growstack_l - if (op != BC_JFUNCF) { - | ins_next1 - | ins_next2 - } - |2: - | cmp NARGS8:RC, CARG2, lsl #3 // Check for missing parameters. - | mvn CARG4, #~LJ_TNIL - | blo >3 - if (op == BC_JFUNCF) { - | decode_RD RC, INS - | b =>BC_JLOOP - } else { - | ins_next3 - } - | - |3: // Clear missing parameters. - | strd CARG34, [BASE, NARGS8:RC] - | add NARGS8:RC, NARGS8:RC, #8 - | b <2 - break; - - case BC_JFUNCV: -#if !LJ_HASJIT - break; -#endif - | NYI // NYI: compiled vararg functions - break; /* NYI: compiled vararg functions. */ - - case BC_IFUNCV: - | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8 - | ldr CARG1, L->maxstack - | add CARG4, BASE, RC - | add RA, RA, RC - | str LFUNC:CARG3, [CARG4] // Store copy of LFUNC. - | add CARG2, RC, #8+FRAME_VARG - | ldr KBASE, [PC, #-4+PC2PROTO(k)] - | cmp RA, CARG1 - | str CARG2, [CARG4, #4] // Store delta + FRAME_VARG. - | bhs ->vm_growstack_l - | ldrb RB, [PC, #-4+PC2PROTO(numparams)] - | mov RA, BASE - | mov RC, CARG4 - | cmp RB, #0 - | add BASE, CARG4, #8 - | beq >3 - | mvn CARG3, #~LJ_TNIL - |1: - | cmp RA, RC // Less args than parameters? - | ldrdlo CARG12, [RA], #8 - | movhs CARG2, CARG3 - | strlo CARG3, [RA, #-4] // Clear old fixarg slot (help the GC). - |2: - | subs RB, RB, #1 - | strd CARG12, [CARG4, #8]! - | bne <1 - |3: - | ins_next - break; - - case BC_FUNCC: - case BC_FUNCCW: - | // BASE = new base, RA = BASE+framesize*8, CARG3 = CFUNC, RC = nargs*8 - if (op == BC_FUNCC) { - | ldr CARG4, CFUNC:CARG3->f - } else { - | ldr CARG4, [DISPATCH, #DISPATCH_GL(wrapf)] - } - | add CARG2, RA, NARGS8:RC - | ldr CARG1, L->maxstack - | add RC, BASE, NARGS8:RC - | str BASE, L->base - | cmp CARG2, CARG1 - | str RC, L->top - if (op == BC_FUNCCW) { - | ldr CARG2, CFUNC:CARG3->f - } - | mv_vmstate CARG3, C - | mov CARG1, L - | bhi ->vm_growstack_c // Need to grow stack. - | st_vmstate CARG3 - | blx CARG4 // (lua_State *L [, lua_CFunction f]) - | // Returns nresults. - | ldr BASE, L->base - | mv_vmstate CARG3, INTERP - | ldr CRET2, L->top - | str L, [DISPATCH, #DISPATCH_GL(cur_L)] - | lsl RC, CRET1, #3 - | st_vmstate CARG3 - | ldr PC, [BASE, FRAME_PC] - | sub RA, CRET2, RC // RA = L->top - nresults*8 - | b ->vm_returnc - break; - - /* ---------------------------------------------------------------------- */ - - default: - fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]); - exit(2); - break; - } -} - -static int build_backend(BuildCtx *ctx) -{ - int op; - - dasm_growpc(Dst, BC__MAX); - - build_subroutines(ctx); - - |.code_op - for (op = 0; op < BC__MAX; op++) - build_ins(ctx, (BCOp)op, op); - - return BC__MAX; -} - -/* Emit pseudo frame-info for all assembler functions. */ -static void emit_asm_debug(BuildCtx *ctx) -{ - int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code); - int i; - switch (ctx->mode) { - case BUILD_elfasm: - fprintf(ctx->fp, "\t.section .debug_frame,\"\",%%progbits\n"); - fprintf(ctx->fp, - ".Lframe0:\n" - "\t.long .LECIE0-.LSCIE0\n" - ".LSCIE0:\n" - "\t.long 0xffffffff\n" - "\t.byte 0x1\n" - "\t.string \"\"\n" - "\t.uleb128 0x1\n" - "\t.sleb128 -4\n" - "\t.byte 0xe\n" /* Return address is in lr. */ - "\t.byte 0xc\n\t.uleb128 0xd\n\t.uleb128 0\n" /* def_cfa sp */ - "\t.align 2\n" - ".LECIE0:\n\n"); - fprintf(ctx->fp, - ".LSFDE0:\n" - "\t.long .LEFDE0-.LASFDE0\n" - ".LASFDE0:\n" - "\t.long .Lframe0\n" - "\t.long .Lbegin\n" - "\t.long %d\n" - "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ - "\t.byte 0x8e\n\t.uleb128 1\n", /* offset lr */ - fcofs, CFRAME_SIZE); - for (i = 11; i >= (LJ_ARCH_HASFPU ? 5 : 4); i--) /* offset r4-r11 */ - fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 2+(11-i)); -#if LJ_ARCH_HASFPU - for (i = 15; i >= 8; i--) /* offset d8-d15 */ - fprintf(ctx->fp, "\t.byte 5\n\t.uleb128 %d, %d\n", - 64+2*i, 10+2*(15-i)); - fprintf(ctx->fp, "\t.byte 0x84\n\t.uleb128 %d\n", 25); /* offset r4 */ -#endif - fprintf(ctx->fp, - "\t.align 2\n" - ".LEFDE0:\n\n"); -#if LJ_HASFFI - fprintf(ctx->fp, - ".LSFDE1:\n" - "\t.long .LEFDE1-.LASFDE1\n" - ".LASFDE1:\n" - "\t.long .Lframe0\n" - "\t.long lj_vm_ffi_call\n" - "\t.long %d\n" - "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */ - "\t.byte 0x8e\n\t.uleb128 1\n" /* offset lr */ - "\t.byte 0x8b\n\t.uleb128 2\n" /* offset r11 */ - "\t.byte 0x85\n\t.uleb128 3\n" /* offset r5 */ - "\t.byte 0x84\n\t.uleb128 4\n" /* offset r4 */ - "\t.byte 0xd\n\t.uleb128 0xb\n" /* def_cfa_register r11 */ - "\t.align 2\n" - ".LEFDE1:\n\n", (int)ctx->codesz - fcofs); -#endif - break; - default: - break; - } -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_arm64.dasc b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_arm64.dasc deleted file mode 100644 index f1251f2c48a..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_arm64.dasc +++ /dev/null @@ -1,3764 +0,0 @@ -|// Low-level VM code for ARM64 CPUs. -|// Bytecode interpreter, fast functions and helper functions. -|// Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -| -|.arch arm64 -|.section code_op, code_sub -| -|.actionlist build_actionlist -|.globals GLOB_ -|.globalnames globnames -|.externnames extnames -| -|// Note: The ragged indentation of the instructions is intentional. -|// The starting columns indicate data dependencies. -| -|//----------------------------------------------------------------------- -| -|// ARM64 registers and the AAPCS64 ABI 1.0 at a glance: -|// -|// x0-x17 temp, x19-x28 callee-saved, x29 fp, x30 lr -|// x18 is reserved on most platforms. Don't use it, save it or restore it. -|// x31 doesn't exist. Register number 31 either means xzr/wzr (zero) or sp, -|// depending on the instruction. -|// v0-v7 temp, v8-v15 callee-saved (only d8-d15 preserved), v16-v31 temp -|// -|// x0-x7/v0-v7 hold parameters and results. -| -|// Fixed register assignments for the interpreter. -| -|// The following must be C callee-save. -|.define BASE, x19 // Base of current Lua stack frame. -|.define KBASE, x20 // Constants of current Lua function. -|.define PC, x21 // Next PC. -|.define GLREG, x22 // Global state. -|.define LREG, x23 // Register holding lua_State (also in SAVE_L). -|.define TISNUM, x24 // Constant LJ_TISNUM << 47. -|.define TISNUMhi, x25 // Constant LJ_TISNUM << 15. -|.define TISNIL, x26 // Constant -1LL. -|.define fp, x29 // Yes, we have to maintain a frame pointer. -| -|.define ST_INTERP, w26 // Constant -1. -| -|// The following temporaries are not saved across C calls, except for RA/RC. -|.define RA, x27 -|.define RC, x28 -|.define RB, x17 -|.define RAw, w27 -|.define RCw, w28 -|.define RBw, w17 -|.define INS, x16 -|.define INSw, w16 -|.define ITYPE, x15 -|.define TMP0, x8 -|.define TMP1, x9 -|.define TMP2, x10 -|.define TMP3, x11 -|.define TMP0w, w8 -|.define TMP1w, w9 -|.define TMP2w, w10 -|.define TMP3w, w11 -| -|// Calling conventions. Also used as temporaries. -|.define CARG1, x0 -|.define CARG2, x1 -|.define CARG3, x2 -|.define CARG4, x3 -|.define CARG5, x4 -|.define CARG1w, w0 -|.define CARG2w, w1 -|.define CARG3w, w2 -|.define CARG4w, w3 -|.define CARG5w, w4 -| -|.define FARG1, d0 -|.define FARG2, d1 -| -|.define CRET1, x0 -|.define CRET1w, w0 -| -|// Stack layout while in interpreter. Must match with lj_frame.h. -| -|.define CFRAME_SPACE, 208 -|//----- 16 byte aligned, <-- sp entering interpreter -|// Unused [sp, #204] // 32 bit values -|.define SAVE_NRES, [sp, #200] -|.define SAVE_ERRF, [sp, #196] -|.define SAVE_MULTRES, [sp, #192] -|.define TMPD, [sp, #184] // 64 bit values -|.define SAVE_L, [sp, #176] -|.define SAVE_PC, [sp, #168] -|.define SAVE_CFRAME, [sp, #160] -|.define SAVE_FPR_, 96 // 96+8*8: 64 bit FPR saves -|.define SAVE_GPR_, 16 // 16+10*8: 64 bit GPR saves -|.define SAVE_LR, [sp, #8] -|.define SAVE_FP, [sp] -|//----- 16 byte aligned, <-- sp while in interpreter. -| -|.define TMPDofs, #184 -| -|.macro save_, gpr1, gpr2, fpr1, fpr2 -| stp d..fpr1, d..fpr2, [sp, # SAVE_FPR_+(fpr1-8)*8] -| stp x..gpr1, x..gpr2, [sp, # SAVE_GPR_+(gpr1-19)*8] -|.endmacro -|.macro rest_, gpr1, gpr2, fpr1, fpr2 -| ldp d..fpr1, d..fpr2, [sp, # SAVE_FPR_+(fpr1-8)*8] -| ldp x..gpr1, x..gpr2, [sp, # SAVE_GPR_+(gpr1-19)*8] -|.endmacro -| -|.macro saveregs -| stp fp, lr, [sp, #-CFRAME_SPACE]! -| add fp, sp, #0 -| stp x19, x20, [sp, # SAVE_GPR_] -| save_ 21, 22, 8, 9 -| save_ 23, 24, 10, 11 -| save_ 25, 26, 12, 13 -| save_ 27, 28, 14, 15 -|.endmacro -|.macro restoreregs -| ldp x19, x20, [sp, # SAVE_GPR_] -| rest_ 21, 22, 8, 9 -| rest_ 23, 24, 10, 11 -| rest_ 25, 26, 12, 13 -| rest_ 27, 28, 14, 15 -| ldp fp, lr, [sp], # CFRAME_SPACE -|.endmacro -| -|// Type definitions. Some of these are only used for documentation. -|.type L, lua_State, LREG -|.type GL, global_State, GLREG -|.type TVALUE, TValue -|.type GCOBJ, GCobj -|.type STR, GCstr -|.type TAB, GCtab -|.type LFUNC, GCfuncL -|.type CFUNC, GCfuncC -|.type PROTO, GCproto -|.type UPVAL, GCupval -|.type NODE, Node -|.type NARGS8, int -|.type TRACE, GCtrace -|.type SBUF, SBuf -| -|//----------------------------------------------------------------------- -| -|// Trap for not-yet-implemented parts. -|.macro NYI; brk; .endmacro -| -|//----------------------------------------------------------------------- -| -|// Access to frame relative to BASE. -|.define FRAME_FUNC, #-16 -|.define FRAME_PC, #-8 -| -|.macro decode_RA, dst, ins; ubfx dst, ins, #8, #8; .endmacro -|.macro decode_RB, dst, ins; ubfx dst, ins, #24, #8; .endmacro -|.macro decode_RC, dst, ins; ubfx dst, ins, #16, #8; .endmacro -|.macro decode_RD, dst, ins; ubfx dst, ins, #16, #16; .endmacro -|.macro decode_RC8RD, dst, src; ubfiz dst, src, #3, #8; .endmacro -| -|// Instruction decode+dispatch. -|.macro ins_NEXT -| ldr INSw, [PC], #4 -| add TMP1, GL, INS, uxtb #3 -| decode_RA RA, INS -| ldr TMP0, [TMP1, #GG_G2DISP] -| decode_RD RC, INS -| br TMP0 -|.endmacro -| -|// Instruction footer. -|.if 1 -| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use. -| .define ins_next, ins_NEXT -| .define ins_next_, ins_NEXT -|.else -| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch. -| // Affects only certain kinds of benchmarks (and only with -j off). -| .macro ins_next -| b ->ins_next -| .endmacro -| .macro ins_next_ -| ->ins_next: -| ins_NEXT -| .endmacro -|.endif -| -|// Call decode and dispatch. -|.macro ins_callt -| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC -| ldr PC, LFUNC:CARG3->pc -| ldr INSw, [PC], #4 -| add TMP1, GL, INS, uxtb #3 -| decode_RA RA, INS -| ldr TMP0, [TMP1, #GG_G2DISP] -| add RA, BASE, RA, lsl #3 -| br TMP0 -|.endmacro -| -|.macro ins_call -| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, PC = caller PC -| str PC, [BASE, FRAME_PC] -| ins_callt -|.endmacro -| -|//----------------------------------------------------------------------- -| -|// Macros to check the TValue type and extract the GCobj. Branch on failure. -|.macro checktp, reg, tp, target -| asr ITYPE, reg, #47 -| cmn ITYPE, #-tp -| and reg, reg, #LJ_GCVMASK -| bne target -|.endmacro -|.macro checktp, dst, reg, tp, target -| asr ITYPE, reg, #47 -| cmn ITYPE, #-tp -| and dst, reg, #LJ_GCVMASK -| bne target -|.endmacro -|.macro checkstr, reg, target; checktp reg, LJ_TSTR, target; .endmacro -|.macro checktab, reg, target; checktp reg, LJ_TTAB, target; .endmacro -|.macro checkfunc, reg, target; checktp reg, LJ_TFUNC, target; .endmacro -|.macro checkint, reg, target -| cmp TISNUMhi, reg, lsr #32 -| bne target -|.endmacro -|.macro checknum, reg, target -| cmp TISNUMhi, reg, lsr #32 -| bls target -|.endmacro -|.macro checknumber, reg, target -| cmp TISNUMhi, reg, lsr #32 -| blo target -|.endmacro -| -|.macro mov_false, reg; movn reg, #0x8000, lsl #32; .endmacro -|.macro mov_true, reg; movn reg, #0x0001, lsl #48; .endmacro -| -#define GL_J(field) (GG_OFS(J) + (int)offsetof(jit_State, field)) -| -#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto)) -| -|.macro hotcheck, delta -| NYI -|.endmacro -| -|.macro hotloop -| hotcheck HOTCOUNT_LOOP -| blo ->vm_hotloop -|.endmacro -| -|.macro hotcall -| hotcheck HOTCOUNT_CALL -| blo ->vm_hotcall -|.endmacro -| -|// Set current VM state. -|.macro mv_vmstate, reg, st; movn reg, #LJ_VMST_..st; .endmacro -|.macro st_vmstate, reg; str reg, GL->vmstate; .endmacro -| -|// Move table write barrier back. Overwrites mark and tmp. -|.macro barrierback, tab, mark, tmp -| ldr tmp, GL->gc.grayagain -| and mark, mark, #~LJ_GC_BLACK // black2gray(tab) -| str tab, GL->gc.grayagain -| strb mark, tab->marked -| str tmp, tab->gclist -|.endmacro -| -|//----------------------------------------------------------------------- - -#if !LJ_DUALNUM -#error "Only dual-number mode supported for ARM64 target" -#endif - -/* Generate subroutines used by opcodes and other parts of the VM. */ -/* The .code_sub section should be last to help static branch prediction. */ -static void build_subroutines(BuildCtx *ctx) -{ - |.code_sub - | - |//----------------------------------------------------------------------- - |//-- Return handling ---------------------------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_returnp: - | // See vm_return. Also: RB = previous base. - | tbz PC, #2, ->cont_dispatch // (PC & FRAME_P) == 0? - | - | // Return from pcall or xpcall fast func. - | ldr PC, [RB, FRAME_PC] // Fetch PC of previous frame. - | mov_true TMP0 - | mov BASE, RB - | // Prepending may overwrite the pcall frame, so do it at the end. - | str TMP0, [RA, #-8]! // Prepend true to results. - | - |->vm_returnc: - | adds RC, RC, #8 // RC = (nresults+1)*8. - | mov CRET1, #LUA_YIELD - | beq ->vm_unwind_c_eh - | str RCw, SAVE_MULTRES - | ands CARG1, PC, #FRAME_TYPE - | beq ->BC_RET_Z // Handle regular return to Lua. - | - |->vm_return: - | // BASE = base, RA = resultptr, RC/MULTRES = (nresults+1)*8, PC = return - | // CARG1 = PC & FRAME_TYPE - | and RB, PC, #~FRAME_TYPEP - | cmp CARG1, #FRAME_C - | sub RB, BASE, RB // RB = previous base. - | bne ->vm_returnp - | - | str RB, L->base - | ldrsw CARG2, SAVE_NRES // CARG2 = nresults+1. - | mv_vmstate TMP0w, C - | sub BASE, BASE, #16 - | subs TMP2, RC, #8 - | st_vmstate TMP0w - | beq >2 - |1: - | subs TMP2, TMP2, #8 - | ldr TMP0, [RA], #8 - | str TMP0, [BASE], #8 - | bne <1 - |2: - | cmp RC, CARG2, lsl #3 // More/less results wanted? - | bne >6 - |3: - | str BASE, L->top // Store new top. - | - |->vm_leave_cp: - | ldr RC, SAVE_CFRAME // Restore previous C frame. - | mov CRET1, #0 // Ok return status for vm_pcall. - | str RC, L->cframe - | - |->vm_leave_unw: - | restoreregs - | ret - | - |6: - | bgt >7 // Less results wanted? - | // More results wanted. Check stack size and fill up results with nil. - | ldr CARG3, L->maxstack - | cmp BASE, CARG3 - | bhs >8 - | str TISNIL, [BASE], #8 - | add RC, RC, #8 - | b <2 - | - |7: // Less results wanted. - | cbz CARG2, <3 // LUA_MULTRET+1 case? - | sub CARG1, RC, CARG2, lsl #3 - | sub BASE, BASE, CARG1 // Shrink top. - | b <3 - | - |8: // Corner case: need to grow stack for filling up results. - | // This can happen if: - | // - A C function grows the stack (a lot). - | // - The GC shrinks the stack in between. - | // - A return back from a lua_call() with (high) nresults adjustment. - | str BASE, L->top // Save current top held in BASE (yes). - | mov CARG1, L - | bl extern lj_state_growstack // (lua_State *L, int n) - | ldr BASE, L->top // Need the (realloced) L->top in BASE. - | ldrsw CARG2, SAVE_NRES - | b <2 - | - |->vm_unwind_c: // Unwind C stack, return from vm_pcall. - | // (void *cframe, int errcode) - | mov sp, CARG1 - | mov CRET1, CARG2 - |->vm_unwind_c_eh: // Landing pad for external unwinder. - | ldr L, SAVE_L - | mv_vmstate TMP0w, C - | ldr GL, L->glref - | st_vmstate TMP0w - | b ->vm_leave_unw - | - |->vm_unwind_ff: // Unwind C stack, return from ff pcall. - | // (void *cframe) - | and sp, CARG1, #CFRAME_RAWMASK - |->vm_unwind_ff_eh: // Landing pad for external unwinder. - | ldr L, SAVE_L - | movz TISNUM, #(LJ_TISNUM>>1)&0xffff, lsl #48 - | movz TISNUMhi, #(LJ_TISNUM>>1)&0xffff, lsl #16 - | movn TISNIL, #0 - | mov RC, #16 // 2 results: false + error message. - | ldr BASE, L->base - | ldr GL, L->glref // Setup pointer to global state. - | mov_false TMP0 - | sub RA, BASE, #8 // Results start at BASE-8. - | ldr PC, [BASE, FRAME_PC] // Fetch PC of previous frame. - | str TMP0, [BASE, #-8] // Prepend false to error message. - | st_vmstate ST_INTERP - | b ->vm_returnc - | - |//----------------------------------------------------------------------- - |//-- Grow stack for calls ----------------------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_growstack_c: // Grow stack for C function. - | // CARG1 = L - | mov CARG2, #LUA_MINSTACK - | b >2 - | - |->vm_growstack_l: // Grow stack for Lua function. - | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC - | add RC, BASE, RC - | sub RA, RA, BASE - | mov CARG1, L - | stp BASE, RC, L->base - | add PC, PC, #4 // Must point after first instruction. - | lsr CARG2, RA, #3 - |2: - | // L->base = new base, L->top = top - | str PC, SAVE_PC - | bl extern lj_state_growstack // (lua_State *L, int n) - | ldp BASE, RC, L->base - | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] - | sub NARGS8:RC, RC, BASE - | and LFUNC:CARG3, CARG3, #LJ_GCVMASK - | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC - | ins_callt // Just retry the call. - | - |//----------------------------------------------------------------------- - |//-- Entry points into the assembler VM --------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_resume: // Setup C frame and resume thread. - | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0) - | saveregs - | mov L, CARG1 - | ldr GL, L->glref // Setup pointer to global state. - | mov BASE, CARG2 - | str L, SAVE_L - | mov PC, #FRAME_CP - | str wzr, SAVE_NRES - | add TMP0, sp, #CFRAME_RESUME - | ldrb TMP1w, L->status - | str wzr, SAVE_ERRF - | str L, SAVE_PC // Any value outside of bytecode is ok. - | str xzr, SAVE_CFRAME - | str TMP0, L->cframe - | cbz TMP1w, >3 - | - | // Resume after yield (like a return). - | str L, GL->cur_L - | mov RA, BASE - | ldp BASE, CARG1, L->base - | movz TISNUM, #(LJ_TISNUM>>1)&0xffff, lsl #48 - | movz TISNUMhi, #(LJ_TISNUM>>1)&0xffff, lsl #16 - | ldr PC, [BASE, FRAME_PC] - | strb wzr, L->status - | movn TISNIL, #0 - | sub RC, CARG1, BASE - | ands CARG1, PC, #FRAME_TYPE - | add RC, RC, #8 - | st_vmstate ST_INTERP - | str RCw, SAVE_MULTRES - | beq ->BC_RET_Z - | b ->vm_return - | - |->vm_pcall: // Setup protected C frame and enter VM. - | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef) - | saveregs - | mov PC, #FRAME_CP - | str CARG4w, SAVE_ERRF - | b >1 - | - |->vm_call: // Setup C frame and enter VM. - | // (lua_State *L, TValue *base, int nres1) - | saveregs - | mov PC, #FRAME_C - | - |1: // Entry point for vm_pcall above (PC = ftype). - | ldr RC, L:CARG1->cframe - | str CARG3w, SAVE_NRES - | mov L, CARG1 - | str CARG1, SAVE_L - | ldr GL, L->glref // Setup pointer to global state. - | mov BASE, CARG2 - | str CARG1, SAVE_PC // Any value outside of bytecode is ok. - | str RC, SAVE_CFRAME - | str fp, L->cframe // Add our C frame to cframe chain. - | - |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype). - | str L, GL->cur_L - | ldp RB, CARG1, L->base // RB = old base (for vmeta_call). - | movz TISNUM, #(LJ_TISNUM>>1)&0xffff, lsl #48 - | movz TISNUMhi, #(LJ_TISNUM>>1)&0xffff, lsl #16 - | add PC, PC, BASE - | movn TISNIL, #0 - | sub PC, PC, RB // PC = frame delta + frame type - | sub NARGS8:RC, CARG1, BASE - | st_vmstate ST_INTERP - | - |->vm_call_dispatch: - | // RB = old base, BASE = new base, RC = nargs*8, PC = caller PC - | ldr CARG3, [BASE, FRAME_FUNC] - | checkfunc CARG3, ->vmeta_call - | - |->vm_call_dispatch_f: - | ins_call - | // BASE = new base, CARG3 = func, RC = nargs*8, PC = caller PC - | - |->vm_cpcall: // Setup protected C frame, call C. - | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp) - | saveregs - | mov L, CARG1 - | ldr RA, L:CARG1->stack - | str CARG1, SAVE_L - | ldr GL, L->glref // Setup pointer to global state. - | ldr RB, L->top - | str CARG1, SAVE_PC // Any value outside of bytecode is ok. - | ldr RC, L->cframe - | sub RA, RA, RB // Compute -savestack(L, L->top). - | str RAw, SAVE_NRES // Neg. delta means cframe w/o frame. - | str wzr, SAVE_ERRF // No error function. - | str RC, SAVE_CFRAME - | str fp, L->cframe // Add our C frame to cframe chain. - | str L, GL->cur_L - | blr CARG4 // (lua_State *L, lua_CFunction func, void *ud) - | mov BASE, CRET1 - | mov PC, #FRAME_CP - | cbnz BASE, <3 // Else continue with the call. - | b ->vm_leave_cp // No base? Just remove C frame. - | - |//----------------------------------------------------------------------- - |//-- Metamethod handling ------------------------------------------------ - |//----------------------------------------------------------------------- - | - |//-- Continuation dispatch ---------------------------------------------- - | - |->cont_dispatch: - | // BASE = meta base, RA = resultptr, RC = (nresults+1)*8 - | ldr LFUNC:CARG3, [RB, FRAME_FUNC] - | ldr CARG1, [BASE, #-32] // Get continuation. - | mov CARG4, BASE - | mov BASE, RB // Restore caller BASE. - | and LFUNC:CARG3, CARG3, #LJ_GCVMASK - |.if FFI - | cmp CARG1, #1 - |.endif - | ldr PC, [CARG4, #-24] // Restore PC from [cont|PC]. - | ldr CARG3, LFUNC:CARG3->pc - | add TMP0, RA, RC - | str TISNIL, [TMP0, #-8] // Ensure one valid arg. - |.if FFI - | bls >1 - |.endif - | ldr KBASE, [CARG3, #PC2PROTO(k)] - | // BASE = base, RA = resultptr, CARG4 = meta base - | br CARG1 - | - |.if FFI - |1: - | beq ->cont_ffi_callback // cont = 1: return from FFI callback. - | // cont = 0: tailcall from C function. - | sub CARG4, CARG4, #32 - | sub RC, CARG4, BASE - | b ->vm_call_tail - |.endif - | - |->cont_cat: // RA = resultptr, CARG4 = meta base - | ldr INSw, [PC, #-4] - | sub CARG2, CARG4, #32 - | ldr TMP0, [RA] - | str BASE, L->base - | decode_RB RB, INS - | decode_RA RA, INS - | add TMP1, BASE, RB, lsl #3 - | subs TMP1, CARG2, TMP1 - | beq >1 - | str TMP0, [CARG2] - | lsr CARG3, TMP1, #3 - | b ->BC_CAT_Z - | - |1: - | str TMP0, [BASE, RA, lsl #3] - | b ->cont_nop - | - |//-- Table indexing metamethods ----------------------------------------- - | - |->vmeta_tgets1: - | movn CARG4, #~LJ_TSTR - | add CARG2, BASE, RB, lsl #3 - | add CARG4, STR:RC, CARG4, lsl #47 - | b >2 - | - |->vmeta_tgets: - | movk CARG2, #(LJ_TTAB>>1)&0xffff, lsl #48 - | str CARG2, GL->tmptv - | add CARG2, GL, #offsetof(global_State, tmptv) - |2: - | add CARG3, sp, TMPDofs - | str CARG4, TMPD - | b >1 - | - |->vmeta_tgetb: // RB = table, RC = index - | add RC, RC, TISNUM - | add CARG2, BASE, RB, lsl #3 - | add CARG3, sp, TMPDofs - | str RC, TMPD - | b >1 - | - |->vmeta_tgetv: // RB = table, RC = key - | add CARG2, BASE, RB, lsl #3 - | add CARG3, BASE, RC, lsl #3 - |1: - | str BASE, L->base - | mov CARG1, L - | str PC, SAVE_PC - | bl extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k) - | // Returns TValue * (finished) or NULL (metamethod). - | cbz CRET1, >3 - | ldr TMP0, [CRET1] - | str TMP0, [BASE, RA, lsl #3] - | ins_next - | - |3: // Call __index metamethod. - | // BASE = base, L->top = new base, stack = cont/func/t/k - | sub TMP1, BASE, #FRAME_CONT - | ldr BASE, L->top - | mov NARGS8:RC, #16 // 2 args for func(t, k). - | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here. - | str PC, [BASE, #-24] // [cont|PC] - | sub PC, BASE, TMP1 - | and LFUNC:CARG3, CARG3, #LJ_GCVMASK - | b ->vm_call_dispatch_f - | - |->vmeta_tgetr: - | sxtw CARG2, TMP1w - | bl extern lj_tab_getinth // (GCtab *t, int32_t key) - | // Returns cTValue * or NULL. - | mov TMP0, TISNIL - | cbz CRET1, ->BC_TGETR_Z - | ldr TMP0, [CRET1] - | b ->BC_TGETR_Z - | - |//----------------------------------------------------------------------- - | - |->vmeta_tsets1: - | movn CARG4, #~LJ_TSTR - | add CARG2, BASE, RB, lsl #3 - | add CARG4, STR:RC, CARG4, lsl #47 - | b >2 - | - |->vmeta_tsets: - | movk CARG2, #(LJ_TTAB>>1)&0xffff, lsl #48 - | str CARG2, GL->tmptv - | add CARG2, GL, #offsetof(global_State, tmptv) - |2: - | add CARG3, sp, TMPDofs - | str CARG4, TMPD - | b >1 - | - |->vmeta_tsetb: // RB = table, RC = index - | add RC, RC, TISNUM - | add CARG2, BASE, RB, lsl #3 - | add CARG3, sp, TMPDofs - | str RC, TMPD - | b >1 - | - |->vmeta_tsetv: - | add CARG2, BASE, RB, lsl #3 - | add CARG3, BASE, RC, lsl #3 - |1: - | str BASE, L->base - | mov CARG1, L - | str PC, SAVE_PC - | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k) - | // Returns TValue * (finished) or NULL (metamethod). - | ldr TMP0, [BASE, RA, lsl #3] - | cbz CRET1, >3 - | // NOBARRIER: lj_meta_tset ensures the table is not black. - | str TMP0, [CRET1] - | ins_next - | - |3: // Call __newindex metamethod. - | // BASE = base, L->top = new base, stack = cont/func/t/k/(v) - | sub TMP1, BASE, #FRAME_CONT - | ldr BASE, L->top - | mov NARGS8:RC, #24 // 3 args for func(t, k, v). - | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here. - | str TMP0, [BASE, #16] // Copy value to third argument. - | str PC, [BASE, #-24] // [cont|PC] - | sub PC, BASE, TMP1 - | and LFUNC:CARG3, CARG3, #LJ_GCVMASK - | b ->vm_call_dispatch_f - | - |->vmeta_tsetr: - | sxtw CARG3, TMP1w - | str BASE, L->base - | str PC, SAVE_PC - | bl extern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key) - | // Returns TValue *. - | b ->BC_TSETR_Z - | - |//-- Comparison metamethods --------------------------------------------- - | - |->vmeta_comp: - | add CARG2, BASE, RA, lsl #3 - | sub PC, PC, #4 - | add CARG3, BASE, RC, lsl #3 - | str BASE, L->base - | mov CARG1, L - | str PC, SAVE_PC - | uxtb CARG4w, INSw - | bl extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op) - | // Returns 0/1 or TValue * (metamethod). - |3: - | cmp CRET1, #1 - | bhi ->vmeta_binop - |4: - | ldrh RBw, [PC, #2] - | add PC, PC, #4 - | add RB, PC, RB, lsl #2 - | sub RB, RB, #0x20000 - | csel PC, PC, RB, lo - |->cont_nop: - | ins_next - | - |->cont_ra: // RA = resultptr - | ldr INSw, [PC, #-4] - | ldr TMP0, [RA] - | decode_RA TMP1, INS - | str TMP0, [BASE, TMP1, lsl #3] - | b ->cont_nop - | - |->cont_condt: // RA = resultptr - | ldr TMP0, [RA] - | mov_true TMP1 - | cmp TMP1, TMP0 // Branch if result is true. - | b <4 - | - |->cont_condf: // RA = resultptr - | ldr TMP0, [RA] - | mov_false TMP1 - | cmp TMP0, TMP1 // Branch if result is false. - | b <4 - | - |->vmeta_equal: - | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV. - | and TAB:CARG3, CARG3, #LJ_GCVMASK - | sub PC, PC, #4 - | str BASE, L->base - | mov CARG1, L - | str PC, SAVE_PC - | bl extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne) - | // Returns 0/1 or TValue * (metamethod). - | b <3 - | - |->vmeta_equal_cd: - |.if FFI - | sub PC, PC, #4 - | str BASE, L->base - | mov CARG1, L - | mov CARG2, INS - | str PC, SAVE_PC - | bl extern lj_meta_equal_cd // (lua_State *L, BCIns op) - | // Returns 0/1 or TValue * (metamethod). - | b <3 - |.endif - | - |->vmeta_istype: - | sub PC, PC, #4 - | str BASE, L->base - | mov CARG1, L - | mov CARG2, RA - | mov CARG3, RC - | str PC, SAVE_PC - | bl extern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp) - | b ->cont_nop - | - |//-- Arithmetic metamethods --------------------------------------------- - | - |->vmeta_arith_vn: - | add CARG3, BASE, RB, lsl #3 - | add CARG4, KBASE, RC, lsl #3 - | b >1 - | - |->vmeta_arith_nv: - | add CARG4, BASE, RB, lsl #3 - | add CARG3, KBASE, RC, lsl #3 - | b >1 - | - |->vmeta_unm: - | add CARG3, BASE, RC, lsl #3 - | mov CARG4, CARG3 - | b >1 - | - |->vmeta_arith_vv: - | add CARG3, BASE, RB, lsl #3 - | add CARG4, BASE, RC, lsl #3 - |1: - | uxtb CARG5w, INSw - | add CARG2, BASE, RA, lsl #3 - | str BASE, L->base - | mov CARG1, L - | str PC, SAVE_PC - | bl extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op) - | // Returns NULL (finished) or TValue * (metamethod). - | cbz CRET1, ->cont_nop - | - | // Call metamethod for binary op. - |->vmeta_binop: - | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2 - | sub TMP1, CRET1, BASE - | str PC, [CRET1, #-24] // [cont|PC] - | add PC, TMP1, #FRAME_CONT - | mov BASE, CRET1 - | mov NARGS8:RC, #16 // 2 args for func(o1, o2). - | b ->vm_call_dispatch - | - |->vmeta_len: - | add CARG2, BASE, RC, lsl #3 -#if LJ_52 - | mov TAB:RC, TAB:CARG1 // Save table (ignored for other types). -#endif - | str BASE, L->base - | mov CARG1, L - | str PC, SAVE_PC - | bl extern lj_meta_len // (lua_State *L, TValue *o) - | // Returns NULL (retry) or TValue * (metamethod base). -#if LJ_52 - | cbnz CRET1, ->vmeta_binop // Binop call for compatibility. - | mov TAB:CARG1, TAB:RC - | b ->BC_LEN_Z -#else - | b ->vmeta_binop // Binop call for compatibility. -#endif - | - |//-- Call metamethod ---------------------------------------------------- - | - |->vmeta_call: // Resolve and call __call metamethod. - | // RB = old base, BASE = new base, RC = nargs*8 - | mov CARG1, L - | str RB, L->base // This is the callers base! - | sub CARG2, BASE, #16 - | str PC, SAVE_PC - | add CARG3, BASE, NARGS8:RC - | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top) - | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here. - | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now. - | and LFUNC:CARG3, CARG3, #LJ_GCVMASK - | ins_call - | - |->vmeta_callt: // Resolve __call for BC_CALLT. - | // BASE = old base, RA = new base, RC = nargs*8 - | mov CARG1, L - | str BASE, L->base - | sub CARG2, RA, #16 - | str PC, SAVE_PC - | add CARG3, RA, NARGS8:RC - | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top) - | ldr TMP1, [RA, FRAME_FUNC] // Guaranteed to be a function here. - | ldr PC, [BASE, FRAME_PC] - | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now. - | and LFUNC:CARG3, TMP1, #LJ_GCVMASK - | b ->BC_CALLT2_Z - | - |//-- Argument coercion for 'for' statement ------------------------------ - | - |->vmeta_for: - | mov CARG1, L - | str BASE, L->base - | mov CARG2, RA - | str PC, SAVE_PC - | bl extern lj_meta_for // (lua_State *L, TValue *base) - | ldr INSw, [PC, #-4] - |.if JIT - | uxtb TMP0, INS - |.endif - | decode_RA RA, INS - | decode_RD RC, INS - |.if JIT - | cmp TMP0, #BC_JFORI - | beq =>BC_JFORI - |.endif - | b =>BC_FORI - | - |//----------------------------------------------------------------------- - |//-- Fast functions ----------------------------------------------------- - |//----------------------------------------------------------------------- - | - |.macro .ffunc, name - |->ff_ .. name: - |.endmacro - | - |.macro .ffunc_1, name - |->ff_ .. name: - | ldr CARG1, [BASE] - | cmp NARGS8:RC, #8 - | blo ->fff_fallback - |.endmacro - | - |.macro .ffunc_2, name - |->ff_ .. name: - | ldp CARG1, CARG2, [BASE] - | cmp NARGS8:RC, #16 - | blo ->fff_fallback - |.endmacro - | - |.macro .ffunc_n, name - | .ffunc name - | ldr CARG1, [BASE] - | cmp NARGS8:RC, #8 - | ldr FARG1, [BASE] - | blo ->fff_fallback - | checknum CARG1, ->fff_fallback - |.endmacro - | - |.macro .ffunc_nn, name - | .ffunc name - | ldp CARG1, CARG2, [BASE] - | cmp NARGS8:RC, #16 - | ldp FARG1, FARG2, [BASE] - | blo ->fff_fallback - | checknum CARG1, ->fff_fallback - | checknum CARG2, ->fff_fallback - |.endmacro - | - |// Inlined GC threshold check. Caveat: uses CARG1 and CARG2. - |.macro ffgccheck - | ldp CARG1, CARG2, GL->gc.total // Assumes threshold follows total. - | cmp CARG1, CARG2 - | blt >1 - | bl ->fff_gcstep - |1: - |.endmacro - | - |//-- Base library: checks ----------------------------------------------- - | - |.ffunc_1 assert - | ldr PC, [BASE, FRAME_PC] - | mov_false TMP1 - | cmp CARG1, TMP1 - | bhs ->fff_fallback - | str CARG1, [BASE, #-16] - | sub RB, BASE, #8 - | subs RA, NARGS8:RC, #8 - | add RC, NARGS8:RC, #8 // Compute (nresults+1)*8. - | cbz RA, ->fff_res // Done if exactly 1 argument. - |1: - | ldr CARG1, [RB, #16] - | sub RA, RA, #8 - | str CARG1, [RB], #8 - | cbnz RA, <1 - | b ->fff_res - | - |.ffunc_1 type - | mov TMP0, #~LJ_TISNUM - | asr ITYPE, CARG1, #47 - | cmn ITYPE, #~LJ_TISNUM - | csinv TMP1, TMP0, ITYPE, lo - | add TMP1, TMP1, #offsetof(GCfuncC, upvalue)/8 - | ldr CARG1, [CFUNC:CARG3, TMP1, lsl #3] - | b ->fff_restv - | - |//-- Base library: getters and setters --------------------------------- - | - |.ffunc_1 getmetatable - | asr ITYPE, CARG1, #47 - | cmn ITYPE, #-LJ_TTAB - | ccmn ITYPE, #-LJ_TUDATA, #4, ne - | and TAB:CARG1, CARG1, #LJ_GCVMASK - | bne >6 - |1: // Field metatable must be at same offset for GCtab and GCudata! - | ldr TAB:RB, TAB:CARG1->metatable - |2: - | mov CARG1, TISNIL - | ldr STR:RC, GL->gcroot[GCROOT_MMNAME+MM_metatable] - | cbz TAB:RB, ->fff_restv - | ldr TMP1w, TAB:RB->hmask - | ldr TMP2w, STR:RC->hash - | ldr NODE:CARG3, TAB:RB->node - | and TMP1w, TMP1w, TMP2w // idx = str->hash & tab->hmask - | add TMP1, TMP1, TMP1, lsl #1 - | movn CARG4, #~LJ_TSTR - | add NODE:CARG3, NODE:CARG3, TMP1, lsl #3 // node = tab->node + idx*3*8 - | add CARG4, STR:RC, CARG4, lsl #47 // Tagged key to look for. - |3: // Rearranged logic, because we expect _not_ to find the key. - | ldp CARG1, TMP0, NODE:CARG3->val - | ldr NODE:CARG3, NODE:CARG3->next - | cmp TMP0, CARG4 - | beq >5 - | cbnz NODE:CARG3, <3 - |4: - | mov CARG1, RB // Use metatable as default result. - | movk CARG1, #(LJ_TTAB>>1)&0xffff, lsl #48 - | b ->fff_restv - |5: - | cmp TMP0, TISNIL - | bne ->fff_restv - | b <4 - | - |6: - | movn TMP0, #~LJ_TISNUM - | cmp ITYPE, TMP0 - | csel ITYPE, ITYPE, TMP0, hs - | sub TMP1, GL, ITYPE, lsl #3 - | ldr TAB:RB, [TMP1, #offsetof(global_State, gcroot[GCROOT_BASEMT])-8] - | b <2 - | - |.ffunc_2 setmetatable - | // Fast path: no mt for table yet and not clearing the mt. - | checktp TMP1, CARG1, LJ_TTAB, ->fff_fallback - | ldr TAB:TMP0, TAB:TMP1->metatable - | asr ITYPE, CARG2, #47 - | ldrb TMP2w, TAB:TMP1->marked - | cmn ITYPE, #-LJ_TTAB - | and TAB:CARG2, CARG2, #LJ_GCVMASK - | ccmp TAB:TMP0, #0, #0, eq - | bne ->fff_fallback - | str TAB:CARG2, TAB:TMP1->metatable - | tbz TMP2w, #2, ->fff_restv // isblack(table) - | barrierback TAB:TMP1, TMP2w, TMP0 - | b ->fff_restv - | - |.ffunc rawget - | ldr CARG2, [BASE] - | cmp NARGS8:RC, #16 - | blo ->fff_fallback - | checktab CARG2, ->fff_fallback - | mov CARG1, L - | add CARG3, BASE, #8 - | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key) - | // Returns cTValue *. - | ldr CARG1, [CRET1] - | b ->fff_restv - | - |//-- Base library: conversions ------------------------------------------ - | - |.ffunc tonumber - | // Only handles the number case inline (without a base argument). - | ldr CARG1, [BASE] - | cmp NARGS8:RC, #8 - | bne ->fff_fallback - | checknumber CARG1, ->fff_fallback - | b ->fff_restv - | - |.ffunc_1 tostring - | // Only handles the string or number case inline. - | asr ITYPE, CARG1, #47 - | cmn ITYPE, #-LJ_TSTR - | // A __tostring method in the string base metatable is ignored. - | beq ->fff_restv - | // Handle numbers inline, unless a number base metatable is present. - | ldr TMP1, GL->gcroot[GCROOT_BASEMT_NUM] - | str BASE, L->base - | cmn ITYPE, #-LJ_TISNUM - | ccmp TMP1, #0, #0, ls - | str PC, SAVE_PC // Redundant (but a defined value). - | bne ->fff_fallback - | ffgccheck - | mov CARG1, L - | mov CARG2, BASE - | bl extern lj_strfmt_number // (lua_State *L, cTValue *o) - | // Returns GCstr *. - | movn TMP1, #~LJ_TSTR - | ldr BASE, L->base - | add CARG1, CARG1, TMP1, lsl #47 - | b ->fff_restv - | - |//-- Base library: iterators ------------------------------------------- - | - |.ffunc_1 next - | checktp CARG2, CARG1, LJ_TTAB, ->fff_fallback - | str TISNIL, [BASE, NARGS8:RC] // Set missing 2nd arg to nil. - | ldr PC, [BASE, FRAME_PC] - | stp BASE, BASE, L->base // Add frame since C call can throw. - | mov CARG1, L - | add CARG3, BASE, #8 - | str PC, SAVE_PC - | bl extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key) - | // Returns 0 at end of traversal. - | str TISNIL, [BASE, #-16] - | cbz CRET1, ->fff_res1 // End of traversal: return nil. - | ldp CARG1, CARG2, [BASE, #8] // Copy key and value to results. - | mov RC, #(2+1)*8 - | stp CARG1, CARG2, [BASE, #-16] - | b ->fff_res - | - |.ffunc_1 pairs - | checktp TMP1, CARG1, LJ_TTAB, ->fff_fallback -#if LJ_52 - | ldr TAB:CARG2, TAB:TMP1->metatable -#endif - | ldr CFUNC:CARG4, CFUNC:CARG3->upvalue[0] - | ldr PC, [BASE, FRAME_PC] -#if LJ_52 - | cbnz TAB:CARG2, ->fff_fallback -#endif - | mov RC, #(3+1)*8 - | stp CARG1, TISNIL, [BASE, #-8] - | str CFUNC:CARG4, [BASE, #-16] - | b ->fff_res - | - |.ffunc_2 ipairs_aux - | checktab CARG1, ->fff_fallback - | checkint CARG2, ->fff_fallback - | ldr TMP1w, TAB:CARG1->asize - | ldr CARG3, TAB:CARG1->array - | ldr TMP0w, TAB:CARG1->hmask - | add CARG2w, CARG2w, #1 - | cmp CARG2w, TMP1w - | ldr PC, [BASE, FRAME_PC] - | add TMP2, CARG2, TISNUM - | mov RC, #(0+1)*8 - | str TMP2, [BASE, #-16] - | bhs >2 // Not in array part? - | ldr TMP0, [CARG3, CARG2, lsl #3] - |1: - | mov TMP1, #(2+1)*8 - | cmp TMP0, TISNIL - | str TMP0, [BASE, #-8] - | csel RC, RC, TMP1, eq - | b ->fff_res - |2: // Check for empty hash part first. Otherwise call C function. - | cbz TMP0w, ->fff_res - | bl extern lj_tab_getinth // (GCtab *t, int32_t key) - | // Returns cTValue * or NULL. - | cbz CRET1, ->fff_res - | ldr TMP0, [CRET1] - | b <1 - | - |.ffunc_1 ipairs - | checktp TMP1, CARG1, LJ_TTAB, ->fff_fallback -#if LJ_52 - | ldr TAB:CARG2, TAB:TMP1->metatable -#endif - | ldr CFUNC:CARG4, CFUNC:CARG3->upvalue[0] - | ldr PC, [BASE, FRAME_PC] -#if LJ_52 - | cbnz TAB:CARG2, ->fff_fallback -#endif - | mov RC, #(3+1)*8 - | stp CARG1, TISNUM, [BASE, #-8] - | str CFUNC:CARG4, [BASE, #-16] - | b ->fff_res - | - |//-- Base library: catch errors ---------------------------------------- - | - |.ffunc pcall - | ldrb TMP0w, GL->hookmask - | subs NARGS8:RC, NARGS8:RC, #8 - | blo ->fff_fallback - | mov RB, BASE - | add BASE, BASE, #16 - | ubfx TMP0w, TMP0w, #HOOK_ACTIVE_SHIFT, #1 - | add PC, TMP0, #16+FRAME_PCALL - | beq ->vm_call_dispatch - |1: - | add TMP2, BASE, NARGS8:RC - |2: - | ldr TMP0, [TMP2, #-16] - | str TMP0, [TMP2, #-8]! - | cmp TMP2, BASE - | bne <2 - | b ->vm_call_dispatch - | - |.ffunc xpcall - | ldp CARG1, CARG2, [BASE] - | ldrb TMP0w, GL->hookmask - | subs NARGS8:RC, NARGS8:RC, #16 - | blo ->fff_fallback - | mov RB, BASE - | add BASE, BASE, #24 - | asr ITYPE, CARG2, #47 - | ubfx TMP0w, TMP0w, #HOOK_ACTIVE_SHIFT, #1 - | cmn ITYPE, #-LJ_TFUNC - | add PC, TMP0, #24+FRAME_PCALL - | bne ->fff_fallback // Traceback must be a function. - | stp CARG2, CARG1, [RB] // Swap function and traceback. - | cbz NARGS8:RC, ->vm_call_dispatch - | b <1 - | - |//-- Coroutine library -------------------------------------------------- - | - |.macro coroutine_resume_wrap, resume - |.if resume - |.ffunc_1 coroutine_resume - | checktp CARG1, LJ_TTHREAD, ->fff_fallback - |.else - |.ffunc coroutine_wrap_aux - | ldr L:CARG1, CFUNC:CARG3->upvalue[0].gcr - | and L:CARG1, CARG1, #LJ_GCVMASK - |.endif - | ldr PC, [BASE, FRAME_PC] - | str BASE, L->base - | ldp RB, CARG2, L:CARG1->base - | ldrb TMP1w, L:CARG1->status - | add TMP0, CARG2, TMP1 - | str PC, SAVE_PC - | cmp TMP0, RB - | beq ->fff_fallback - | cmp TMP1, #LUA_YIELD - | add TMP0, CARG2, #8 - | csel CARG2, CARG2, TMP0, hs - | ldr CARG4, L:CARG1->maxstack - | add CARG3, CARG2, NARGS8:RC - | ldr RB, L:CARG1->cframe - | ccmp CARG3, CARG4, #2, ls - | ccmp RB, #0, #2, ls - | bhi ->fff_fallback - |.if resume - | sub CARG3, CARG3, #8 // Keep resumed thread in stack for GC. - | add BASE, BASE, #8 - | sub NARGS8:RC, NARGS8:RC, #8 - |.endif - | str CARG3, L:CARG1->top - | str BASE, L->top - | cbz NARGS8:RC, >3 - |2: // Move args to coroutine. - | ldr TMP0, [BASE, RB] - | cmp RB, NARGS8:RC - | str TMP0, [CARG2, RB] - | add RB, RB, #8 - | bne <2 - |3: - | mov CARG3, #0 - | mov L:RA, L:CARG1 - | mov CARG4, #0 - | bl ->vm_resume // (lua_State *L, TValue *base, 0, 0) - | // Returns thread status. - |4: - | ldp CARG3, CARG4, L:RA->base - | cmp CRET1, #LUA_YIELD - | ldr BASE, L->base - | str L, GL->cur_L - | st_vmstate ST_INTERP - | bhi >8 - | sub RC, CARG4, CARG3 - | ldr CARG1, L->maxstack - | add CARG2, BASE, RC - | cbz RC, >6 // No results? - | cmp CARG2, CARG1 - | mov RB, #0 - | bhi >9 // Need to grow stack? - | - | sub CARG4, RC, #8 - | str CARG3, L:RA->top // Clear coroutine stack. - |5: // Move results from coroutine. - | ldr TMP0, [CARG3, RB] - | cmp RB, CARG4 - | str TMP0, [BASE, RB] - | add RB, RB, #8 - | bne <5 - |6: - |.if resume - | mov_true TMP1 - | add RC, RC, #16 - |7: - | str TMP1, [BASE, #-8] // Prepend true/false to results. - | sub RA, BASE, #8 - |.else - | mov RA, BASE - | add RC, RC, #8 - |.endif - | ands CARG1, PC, #FRAME_TYPE - | str PC, SAVE_PC - | str RCw, SAVE_MULTRES - | beq ->BC_RET_Z - | b ->vm_return - | - |8: // Coroutine returned with error (at co->top-1). - |.if resume - | ldr TMP0, [CARG4, #-8]! - | mov_false TMP1 - | mov RC, #(2+1)*8 - | str CARG4, L:RA->top // Remove error from coroutine stack. - | str TMP0, [BASE] // Copy error message. - | b <7 - |.else - | mov CARG1, L - | mov CARG2, L:RA - | bl extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co) - | // Never returns. - |.endif - | - |9: // Handle stack expansion on return from yield. - | mov CARG1, L - | lsr CARG2, RC, #3 - | bl extern lj_state_growstack // (lua_State *L, int n) - | mov CRET1, #0 - | b <4 - |.endmacro - | - | coroutine_resume_wrap 1 // coroutine.resume - | coroutine_resume_wrap 0 // coroutine.wrap - | - |.ffunc coroutine_yield - | ldr TMP0, L->cframe - | add TMP1, BASE, NARGS8:RC - | mov CRET1, #LUA_YIELD - | stp BASE, TMP1, L->base - | tbz TMP0, #0, ->fff_fallback - | str xzr, L->cframe - | strb CRET1w, L->status - | b ->vm_leave_unw - | - |//-- Math library ------------------------------------------------------- - | - |.macro math_round, func, round - | .ffunc math_ .. func - | ldr CARG1, [BASE] - | cmp NARGS8:RC, #8 - | ldr d0, [BASE] - | blo ->fff_fallback - | cmp TISNUMhi, CARG1, lsr #32 - | beq ->fff_restv - | blo ->fff_fallback - | round d0, d0 - | b ->fff_resn - |.endmacro - | - | math_round floor, frintm - | math_round ceil, frintp - | - |.ffunc_1 math_abs - | checknumber CARG1, ->fff_fallback - | and CARG1, CARG1, #U64x(7fffffff,ffffffff) - | bne ->fff_restv - | eor CARG2w, CARG1w, CARG1w, asr #31 - | movz CARG3, #0x41e0, lsl #48 // 2^31. - | subs CARG1w, CARG2w, CARG1w, asr #31 - | add CARG1, CARG1, TISNUM - | csel CARG1, CARG1, CARG3, pl - | // Fallthrough. - | - |->fff_restv: - | // CARG1 = TValue result. - | ldr PC, [BASE, FRAME_PC] - | str CARG1, [BASE, #-16] - |->fff_res1: - | // PC = return. - | mov RC, #(1+1)*8 - |->fff_res: - | // RC = (nresults+1)*8, PC = return. - | ands CARG1, PC, #FRAME_TYPE - | str RCw, SAVE_MULTRES - | sub RA, BASE, #16 - | bne ->vm_return - | ldr INSw, [PC, #-4] - | decode_RB RB, INS - |5: - | cmp RC, RB, lsl #3 // More results expected? - | blo >6 - | decode_RA TMP1, INS - | // Adjust BASE. KBASE is assumed to be set for the calling frame. - | sub BASE, RA, TMP1, lsl #3 - | ins_next - | - |6: // Fill up results with nil. - | add TMP1, RA, RC - | add RC, RC, #8 - | str TISNIL, [TMP1, #-8] - | b <5 - | - |.macro math_extern, func - | .ffunc_n math_ .. func - | bl extern func - | b ->fff_resn - |.endmacro - | - |.macro math_extern2, func - | .ffunc_nn math_ .. func - | bl extern func - | b ->fff_resn - |.endmacro - | - |.ffunc_n math_sqrt - | fsqrt d0, d0 - |->fff_resn: - | ldr PC, [BASE, FRAME_PC] - | str d0, [BASE, #-16] - | b ->fff_res1 - | - |.ffunc math_log - | ldr CARG1, [BASE] - | cmp NARGS8:RC, #8 - | ldr FARG1, [BASE] - | bne ->fff_fallback // Need exactly 1 argument. - | checknum CARG1, ->fff_fallback - | bl extern log - | b ->fff_resn - | - | math_extern log10 - | math_extern exp - | math_extern sin - | math_extern cos - | math_extern tan - | math_extern asin - | math_extern acos - | math_extern atan - | math_extern sinh - | math_extern cosh - | math_extern tanh - | math_extern2 pow - | math_extern2 atan2 - | math_extern2 fmod - | - |.ffunc_2 math_ldexp - | ldr FARG1, [BASE] - | checknum CARG1, ->fff_fallback - | checkint CARG2, ->fff_fallback - | sxtw CARG1, CARG2w - | bl extern ldexp // (double x, int exp) - | b ->fff_resn - | - |.ffunc_n math_frexp - | add CARG1, sp, TMPDofs - | bl extern frexp - | ldr CARG2w, TMPD - | ldr PC, [BASE, FRAME_PC] - | str d0, [BASE, #-16] - | mov RC, #(2+1)*8 - | add CARG2, CARG2, TISNUM - | str CARG2, [BASE, #-8] - | b ->fff_res - | - |.ffunc_n math_modf - | sub CARG1, BASE, #16 - | ldr PC, [BASE, FRAME_PC] - | bl extern modf - | mov RC, #(2+1)*8 - | str d0, [BASE, #-8] - | b ->fff_res - | - |.macro math_minmax, name, cond, fcond - | .ffunc_1 name - | add RB, BASE, RC - | add RA, BASE, #8 - | checkint CARG1, >4 - |1: // Handle integers. - | ldr CARG2, [RA] - | cmp RA, RB - | bhs ->fff_restv - | checkint CARG2, >3 - | cmp CARG1w, CARG2w - | add RA, RA, #8 - | csel CARG1, CARG2, CARG1, cond - | b <1 - |3: // Convert intermediate result to number and continue below. - | scvtf d0, CARG1w - | blo ->fff_fallback - | ldr d1, [RA] - | b >6 - | - |4: - | ldr d0, [BASE] - | blo ->fff_fallback - |5: // Handle numbers. - | ldr CARG2, [RA] - | ldr d1, [RA] - | cmp RA, RB - | bhs ->fff_resn - | checknum CARG2, >7 - |6: - | fcmp d0, d1 - | add RA, RA, #8 - | fcsel d0, d1, d0, fcond - | b <5 - |7: // Convert integer to number and continue above. - | scvtf d1, CARG2w - | blo ->fff_fallback - | b <6 - |.endmacro - | - | math_minmax math_min, gt, hi - | math_minmax math_max, lt, lo - | - |//-- String library ----------------------------------------------------- - | - |.ffunc string_byte // Only handle the 1-arg case here. - | ldp PC, CARG1, [BASE, FRAME_PC] - | cmp NARGS8:RC, #8 - | asr ITYPE, CARG1, #47 - | ccmn ITYPE, #-LJ_TSTR, #0, eq - | and STR:CARG1, CARG1, #LJ_GCVMASK - | bne ->fff_fallback - | ldrb TMP0w, STR:CARG1[1] // Access is always ok (NUL at end). - | ldr CARG3w, STR:CARG1->len - | add TMP0, TMP0, TISNUM - | str TMP0, [BASE, #-16] - | mov RC, #(0+1)*8 - | cbz CARG3, ->fff_res - | b ->fff_res1 - | - |.ffunc string_char // Only handle the 1-arg case here. - | ffgccheck - | ldp PC, CARG1, [BASE, FRAME_PC] - | cmp CARG1w, #255 - | ccmp NARGS8:RC, #8, #0, ls // Need exactly 1 argument. - | bne ->fff_fallback - | checkint CARG1, ->fff_fallback - | mov CARG3, #1 - | mov CARG2, BASE // Points to stack. Little-endian. - |->fff_newstr: - | // CARG2 = str, CARG3 = len. - | str BASE, L->base - | mov CARG1, L - | str PC, SAVE_PC - | bl extern lj_str_new // (lua_State *L, char *str, size_t l) - |->fff_resstr: - | // Returns GCstr *. - | ldr BASE, L->base - | movn TMP1, #~LJ_TSTR - | add CARG1, CARG1, TMP1, lsl #47 - | b ->fff_restv - | - |.ffunc string_sub - | ffgccheck - | ldr CARG1, [BASE] - | ldr CARG3, [BASE, #16] - | cmp NARGS8:RC, #16 - | movn RB, #0 - | beq >1 - | blo ->fff_fallback - | checkint CARG3, ->fff_fallback - | sxtw RB, CARG3w - |1: - | ldr CARG2, [BASE, #8] - | checkstr CARG1, ->fff_fallback - | ldr TMP1w, STR:CARG1->len - | checkint CARG2, ->fff_fallback - | sxtw CARG2, CARG2w - | // CARG1 = str, TMP1 = str->len, CARG2 = start, RB = end - | add TMP2, RB, TMP1 - | cmp RB, #0 - | add TMP0, CARG2, TMP1 - | csinc RB, RB, TMP2, ge // if (end < 0) end += len+1 - | cmp CARG2, #0 - | csinc CARG2, CARG2, TMP0, ge // if (start < 0) start += len+1 - | cmp RB, #0 - | csel RB, RB, xzr, ge // if (end < 0) end = 0 - | cmp CARG2, #1 - | csinc CARG2, CARG2, xzr, ge // if (start < 1) start = 1 - | cmp RB, TMP1 - | csel RB, RB, TMP1, le // if (end > len) end = len - | add CARG1, STR:CARG1, #sizeof(GCstr)-1 - | subs CARG3, RB, CARG2 // len = end - start - | add CARG2, CARG1, CARG2 - | add CARG3, CARG3, #1 // len += 1 - | bge ->fff_newstr - | add STR:CARG1, GL, #offsetof(global_State, strempty) - | movn TMP1, #~LJ_TSTR - | add CARG1, CARG1, TMP1, lsl #47 - | b ->fff_restv - | - |.macro ffstring_op, name - | .ffunc string_ .. name - | ffgccheck - | ldr CARG2, [BASE] - | cmp NARGS8:RC, #8 - | asr ITYPE, CARG2, #47 - | ccmn ITYPE, #-LJ_TSTR, #0, hs - | and STR:CARG2, CARG2, #LJ_GCVMASK - | bne ->fff_fallback - | ldr TMP0, GL->tmpbuf.b - | add SBUF:CARG1, GL, #offsetof(global_State, tmpbuf) - | str BASE, L->base - | str PC, SAVE_PC - | str L, GL->tmpbuf.L - | str TMP0, GL->tmpbuf.p - | bl extern lj_buf_putstr_ .. name - | bl extern lj_buf_tostr - | b ->fff_resstr - |.endmacro - | - |ffstring_op reverse - |ffstring_op lower - |ffstring_op upper - | - |//-- Bit library -------------------------------------------------------- - | - |// FP number to bit conversion for soft-float. Clobbers CARG1-CARG3 - |->vm_tobit_fb: - | bls ->fff_fallback - | add CARG2, CARG1, CARG1 - | mov CARG3, #1076 - | sub CARG3, CARG3, CARG2, lsr #53 - | cmp CARG3, #53 - | bhi >1 - | and CARG2, CARG2, #U64x(001fffff,ffffffff) - | orr CARG2, CARG2, #U64x(00200000,00000000) - | cmp CARG1, #0 - | lsr CARG2, CARG2, CARG3 - | cneg CARG1w, CARG2w, mi - | br lr - |1: - | mov CARG1w, #0 - | br lr - | - |.macro .ffunc_bit, name - | .ffunc_1 bit_..name - | adr lr, >1 - | checkint CARG1, ->vm_tobit_fb - |1: - |.endmacro - | - |.macro .ffunc_bit_op, name, ins - | .ffunc_bit name - | mov RA, #8 - | mov TMP0w, CARG1w - | adr lr, >2 - |1: - | ldr CARG1, [BASE, RA] - | cmp RA, NARGS8:RC - | add RA, RA, #8 - | bge >9 - | checkint CARG1, ->vm_tobit_fb - |2: - | ins TMP0w, TMP0w, CARG1w - | b <1 - |.endmacro - | - |.ffunc_bit_op band, and - |.ffunc_bit_op bor, orr - |.ffunc_bit_op bxor, eor - | - |.ffunc_bit tobit - | mov TMP0w, CARG1w - |9: // Label reused by .ffunc_bit_op users. - | add CARG1, TMP0, TISNUM - | b ->fff_restv - | - |.ffunc_bit bswap - | rev TMP0w, CARG1w - | add CARG1, TMP0, TISNUM - | b ->fff_restv - | - |.ffunc_bit bnot - | mvn TMP0w, CARG1w - | add CARG1, TMP0, TISNUM - | b ->fff_restv - | - |.macro .ffunc_bit_sh, name, ins, shmod - | .ffunc bit_..name - | ldp TMP0, CARG1, [BASE] - | cmp NARGS8:RC, #16 - | blo ->fff_fallback - | adr lr, >1 - | checkint CARG1, ->vm_tobit_fb - |1: - |.if shmod == 0 - | mov TMP1, CARG1 - |.else - | neg TMP1, CARG1 - |.endif - | mov CARG1, TMP0 - | adr lr, >2 - | checkint CARG1, ->vm_tobit_fb - |2: - | ins TMP0w, CARG1w, TMP1w - | add CARG1, TMP0, TISNUM - | b ->fff_restv - |.endmacro - | - |.ffunc_bit_sh lshift, lsl, 0 - |.ffunc_bit_sh rshift, lsr, 0 - |.ffunc_bit_sh arshift, asr, 0 - |.ffunc_bit_sh rol, ror, 1 - |.ffunc_bit_sh ror, ror, 0 - | - |//----------------------------------------------------------------------- - | - |->fff_fallback: // Call fast function fallback handler. - | // BASE = new base, RC = nargs*8 - | ldp CFUNC:CARG3, PC, [BASE, FRAME_FUNC] // Fallback may overwrite PC. - | ldr TMP2, L->maxstack - | add TMP1, BASE, NARGS8:RC - | stp BASE, TMP1, L->base - | and CFUNC:CARG3, CARG3, #LJ_GCVMASK - | add TMP1, TMP1, #8*LUA_MINSTACK - | ldr CARG3, CFUNC:CARG3->f - | str PC, SAVE_PC // Redundant (but a defined value). - | cmp TMP1, TMP2 - | mov CARG1, L - | bhi >5 // Need to grow stack. - | blr CARG3 // (lua_State *L) - | // Either throws an error, or recovers and returns -1, 0 or nresults+1. - | ldr BASE, L->base - | cmp CRET1w, #0 - | lsl RC, CRET1, #3 - | sub RA, BASE, #16 - | bgt ->fff_res // Returned nresults+1? - |1: // Returned 0 or -1: retry fast path. - | ldr CARG1, L->top - | ldr CFUNC:CARG3, [BASE, FRAME_FUNC] - | sub NARGS8:RC, CARG1, BASE - | bne ->vm_call_tail // Returned -1? - | and CFUNC:CARG3, CARG3, #LJ_GCVMASK - | ins_callt // Returned 0: retry fast path. - | - |// Reconstruct previous base for vmeta_call during tailcall. - |->vm_call_tail: - | ands TMP0, PC, #FRAME_TYPE - | and TMP1, PC, #~FRAME_TYPEP - | bne >3 - | ldrb RAw, [PC, #-3] - | lsl RA, RA, #3 - | add TMP1, RA, #16 - |3: - | sub RB, BASE, TMP1 - | b ->vm_call_dispatch // Resolve again for tailcall. - | - |5: // Grow stack for fallback handler. - | mov CARG2, #LUA_MINSTACK - | bl extern lj_state_growstack // (lua_State *L, int n) - | ldr BASE, L->base - | cmp CARG1, CARG1 // Set zero-flag to force retry. - | b <1 - | - |->fff_gcstep: // Call GC step function. - | // BASE = new base, RC = nargs*8 - | add CARG2, BASE, NARGS8:RC // Calculate L->top. - | mov RA, lr - | stp BASE, CARG2, L->base - | str PC, SAVE_PC // Redundant (but a defined value). - | mov CARG1, L - | bl extern lj_gc_step // (lua_State *L) - | ldp BASE, CARG2, L->base - | ldr CFUNC:CARG3, [BASE, FRAME_FUNC] - | mov lr, RA // Help return address predictor. - | sub NARGS8:RC, CARG2, BASE // Calculate nargs*8. - | and CFUNC:CARG3, CARG3, #LJ_GCVMASK - | ret - | - |//----------------------------------------------------------------------- - |//-- Special dispatch targets ------------------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_record: // Dispatch target for recording phase. - | NYI - | - |->vm_rethook: // Dispatch target for return hooks. - | ldrb TMP2w, GL->hookmask - | tbz TMP2w, #HOOK_ACTIVE_SHIFT, >1 // Hook already active? - |5: // Re-dispatch to static ins. - | ldr TMP0, [TMP1, #GG_G2DISP+GG_DISP2STATIC] - | br TMP0 - | - |->vm_inshook: // Dispatch target for instr/line hooks. - | ldrb TMP2w, GL->hookmask - | ldr TMP3w, GL->hookcount - | tbnz TMP2w, #HOOK_ACTIVE_SHIFT, <5 // Hook already active? - | tst TMP2w, #LUA_MASKLINE|LUA_MASKCOUNT - | beq <5 - | sub TMP3w, TMP3w, #1 - | str TMP3w, GL->hookcount - | cbz TMP3w, >1 - | tbz TMP2w, #LUA_HOOKLINE, <5 - |1: - | mov CARG1, L - | str BASE, L->base - | mov CARG2, PC - | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC. - | bl extern lj_dispatch_ins // (lua_State *L, const BCIns *pc) - |3: - | ldr BASE, L->base - |4: // Re-dispatch to static ins. - | ldr INSw, [PC, #-4] - | add TMP1, GL, INS, uxtb #3 - | decode_RA RA, INS - | ldr TMP0, [TMP1, #GG_G2DISP+GG_DISP2STATIC] - | decode_RD RC, INS - | br TMP0 - | - |->cont_hook: // Continue from hook yield. - | ldr CARG1, [CARG4, #-40] - | add PC, PC, #4 - | str CARG1w, SAVE_MULTRES // Restore MULTRES for *M ins. - | b <4 - | - |->vm_hotloop: // Hot loop counter underflow. - | NYI - | - |->vm_callhook: // Dispatch target for call hooks. - | mov CARG2, PC - |.if JIT - | b >1 - |.endif - | - |->vm_hotcall: // Hot call counter underflow. - |.if JIT - | orr CARG2, PC, #1 - |1: - |.endif - | add TMP1, BASE, NARGS8:RC - | str PC, SAVE_PC - | mov CARG1, L - | sub RA, RA, BASE - | stp BASE, TMP1, L->base - | bl extern lj_dispatch_call // (lua_State *L, const BCIns *pc) - | // Returns ASMFunction. - | ldp BASE, TMP1, L->base - | str xzr, SAVE_PC // Invalidate for subsequent line hook. - | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] - | add RA, BASE, RA - | sub NARGS8:RC, TMP1, BASE - | ldr INSw, [PC, #-4] - | and LFUNC:CARG3, CARG3, #LJ_GCVMASK - | br CRET1 - | - |->cont_stitch: // Trace stitching. - | NYI - | - |->vm_profhook: // Dispatch target for profiler hook. -#if LJ_HASPROFILE - | mov CARG1, L - | str BASE, L->base - | mov CARG2, PC - | bl extern lj_dispatch_profile // (lua_State *L, const BCIns *pc) - | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction. - | ldr BASE, L->base - | sub PC, PC, #4 - | b ->cont_nop -#endif - | - |//----------------------------------------------------------------------- - |//-- Trace exit handler ------------------------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_exit_handler: - | NYI - |->vm_exit_interp: - | NYI - | - |//----------------------------------------------------------------------- - |//-- Math helper functions ---------------------------------------------- - |//----------------------------------------------------------------------- - | - | // int lj_vm_modi(int dividend, int divisor); - |->vm_modi: - | eor CARG4w, CARG1w, CARG2w - | cmp CARG4w, #0 - | eor CARG3w, CARG1w, CARG1w, asr #31 - | eor CARG4w, CARG2w, CARG2w, asr #31 - | sub CARG3w, CARG3w, CARG1w, asr #31 - | sub CARG4w, CARG4w, CARG2w, asr #31 - | udiv CARG1w, CARG3w, CARG4w - | msub CARG1w, CARG1w, CARG4w, CARG3w - | ccmp CARG1w, #0, #4, mi - | sub CARG3w, CARG1w, CARG4w - | csel CARG1w, CARG1w, CARG3w, eq - | eor CARG3w, CARG1w, CARG2w - | cmp CARG3w, #0 - | cneg CARG1w, CARG1w, mi - | ret - | - |//----------------------------------------------------------------------- - |//-- Miscellaneous functions -------------------------------------------- - |//----------------------------------------------------------------------- - | - |//----------------------------------------------------------------------- - |//-- FFI helper functions ----------------------------------------------- - |//----------------------------------------------------------------------- - | - |// Handler for callback functions. - |// Saveregs already performed. Callback slot number in [sp], g in r12. - |->vm_ffi_callback: - |.if FFI - |.type CTSTATE, CTState, PC - | saveregs - | ldr CTSTATE, GL:x10->ctype_state - | mov GL, x10 - | add x10, sp, # CFRAME_SPACE - | str w9, CTSTATE->cb.slot - | stp x0, x1, CTSTATE->cb.gpr[0] - | stp d0, d1, CTSTATE->cb.fpr[0] - | stp x2, x3, CTSTATE->cb.gpr[2] - | stp d2, d3, CTSTATE->cb.fpr[2] - | stp x4, x5, CTSTATE->cb.gpr[4] - | stp d4, d5, CTSTATE->cb.fpr[4] - | stp x6, x7, CTSTATE->cb.gpr[6] - | stp d6, d7, CTSTATE->cb.fpr[6] - | str x10, CTSTATE->cb.stack - | mov CARG1, CTSTATE - | str CTSTATE, SAVE_PC // Any value outside of bytecode is ok. - | mov CARG2, sp - | bl extern lj_ccallback_enter // (CTState *cts, void *cf) - | // Returns lua_State *. - | ldp BASE, RC, L:CRET1->base - | movz TISNUM, #(LJ_TISNUM>>1)&0xffff, lsl #48 - | movz TISNUMhi, #(LJ_TISNUM>>1)&0xffff, lsl #16 - | movn TISNIL, #0 - | mov L, CRET1 - | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] - | sub RC, RC, BASE - | st_vmstate ST_INTERP - | and LFUNC:CARG3, CARG3, #LJ_GCVMASK - | ins_callt - |.endif - | - |->cont_ffi_callback: // Return from FFI callback. - |.if FFI - | ldr CTSTATE, GL->ctype_state - | stp BASE, CARG4, L->base - | str L, CTSTATE->L - | mov CARG1, CTSTATE - | mov CARG2, RA - | bl extern lj_ccallback_leave // (CTState *cts, TValue *o) - | ldp x0, x1, CTSTATE->cb.gpr[0] - | ldp d0, d1, CTSTATE->cb.fpr[0] - | b ->vm_leave_unw - |.endif - | - |->vm_ffi_call: // Call C function via FFI. - | // Caveat: needs special frame unwinding, see below. - |.if FFI - | .type CCSTATE, CCallState, x19 - | stp fp, lr, [sp, #-32]! - | add fp, sp, #0 - | str CCSTATE, [sp, #16] - | mov CCSTATE, x0 - | ldr TMP0w, CCSTATE:x0->spadj - | ldrb TMP1w, CCSTATE->nsp - | add TMP2, CCSTATE, #offsetof(CCallState, stack) - | subs TMP1, TMP1, #1 - | ldr TMP3, CCSTATE->func - | sub sp, fp, TMP0 - | bmi >2 - |1: // Copy stack slots - | ldr TMP0, [TMP2, TMP1, lsl #3] - | str TMP0, [sp, TMP1, lsl #3] - | subs TMP1, TMP1, #1 - | bpl <1 - |2: - | ldp x0, x1, CCSTATE->gpr[0] - | ldp d0, d1, CCSTATE->fpr[0] - | ldp x2, x3, CCSTATE->gpr[2] - | ldp d2, d3, CCSTATE->fpr[2] - | ldp x4, x5, CCSTATE->gpr[4] - | ldp d4, d5, CCSTATE->fpr[4] - | ldp x6, x7, CCSTATE->gpr[6] - | ldp d6, d7, CCSTATE->fpr[6] - | ldr x8, CCSTATE->retp - | blr TMP3 - | mov sp, fp - | stp x0, x1, CCSTATE->gpr[0] - | stp d0, d1, CCSTATE->fpr[0] - | stp d2, d3, CCSTATE->fpr[2] - | ldr CCSTATE, [sp, #16] - | ldp fp, lr, [sp], #32 - | ret - |.endif - |// Note: vm_ffi_call must be the last function in this object file! - | - |//----------------------------------------------------------------------- -} - -/* Generate the code for a single instruction. */ -static void build_ins(BuildCtx *ctx, BCOp op, int defop) -{ - int vk = 0; - |=>defop: - - switch (op) { - - /* -- Comparison ops ---------------------------------------------------- */ - - /* Remember: all ops branch for a true comparison, fall through otherwise. */ - - case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: - | // RA = src1, RC = src2, JMP with RC = target - | ldr CARG1, [BASE, RA, lsl #3] - | ldrh RBw, [PC, #2] - | ldr CARG2, [BASE, RC, lsl #3] - | add PC, PC, #4 - | add RB, PC, RB, lsl #2 - | sub RB, RB, #0x20000 - | checkint CARG1, >3 - | checkint CARG2, >4 - | cmp CARG1w, CARG2w - if (op == BC_ISLT) { - | csel PC, RB, PC, lt - } else if (op == BC_ISGE) { - | csel PC, RB, PC, ge - } else if (op == BC_ISLE) { - | csel PC, RB, PC, le - } else { - | csel PC, RB, PC, gt - } - |1: - | ins_next - | - |3: // RA not int. - | ldr FARG1, [BASE, RA, lsl #3] - | blo ->vmeta_comp - | ldr FARG2, [BASE, RC, lsl #3] - | cmp TISNUMhi, CARG2, lsr #32 - | bhi >5 - | bne ->vmeta_comp - | // RA number, RC int. - | scvtf FARG2, CARG2w - | b >5 - | - |4: // RA int, RC not int - | ldr FARG2, [BASE, RC, lsl #3] - | blo ->vmeta_comp - | // RA int, RC number. - | scvtf FARG1, CARG1w - | - |5: // RA number, RC number - | fcmp FARG1, FARG2 - | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't. - if (op == BC_ISLT) { - | csel PC, RB, PC, lo - } else if (op == BC_ISGE) { - | csel PC, RB, PC, hs - } else if (op == BC_ISLE) { - | csel PC, RB, PC, ls - } else { - | csel PC, RB, PC, hi - } - | b <1 - break; - - case BC_ISEQV: case BC_ISNEV: - vk = op == BC_ISEQV; - | // RA = src1, RC = src2, JMP with RC = target - | ldr CARG1, [BASE, RA, lsl #3] - | add RC, BASE, RC, lsl #3 - | ldrh RBw, [PC, #2] - | ldr CARG3, [RC] - | add PC, PC, #4 - | add RB, PC, RB, lsl #2 - | sub RB, RB, #0x20000 - | asr ITYPE, CARG3, #47 - | cmn ITYPE, #-LJ_TISNUM - if (vk) { - | bls ->BC_ISEQN_Z - } else { - | bls ->BC_ISNEN_Z - } - | // RC is not a number. - | asr TMP0, CARG1, #47 - |.if FFI - | // Check if RC or RA is a cdata. - | cmn ITYPE, #-LJ_TCDATA - | ccmn TMP0, #-LJ_TCDATA, #4, ne - | beq ->vmeta_equal_cd - |.endif - | cmp CARG1, CARG3 - | bne >2 - | // Tag and value are equal. - if (vk) { - |->BC_ISEQV_Z: - | mov PC, RB // Perform branch. - } - |1: - | ins_next - | - |2: // Check if the tags are the same and it's a table or userdata. - | cmp ITYPE, TMP0 - | ccmn ITYPE, #-LJ_TISTABUD, #2, eq - if (vk) { - | bhi <1 - } else { - | bhi ->BC_ISEQV_Z // Reuse code from opposite instruction. - } - | // Different tables or userdatas. Need to check __eq metamethod. - | // Field metatable must be at same offset for GCtab and GCudata! - | and TAB:CARG2, CARG1, #LJ_GCVMASK - | ldr TAB:TMP2, TAB:CARG2->metatable - if (vk) { - | cbz TAB:TMP2, <1 // No metatable? - | ldrb TMP1w, TAB:TMP2->nomm - | mov CARG4, #0 // ne = 0 - | tbnz TMP1w, #MM_eq, <1 // 'no __eq' flag set: done. - } else { - | cbz TAB:TMP2, ->BC_ISEQV_Z // No metatable? - | ldrb TMP1w, TAB:TMP2->nomm - | mov CARG4, #1 // ne = 1. - | tbnz TMP1w, #MM_eq, ->BC_ISEQV_Z // 'no __eq' flag set: done. - } - | b ->vmeta_equal - break; - - case BC_ISEQS: case BC_ISNES: - vk = op == BC_ISEQS; - | // RA = src, RC = str_const (~), JMP with RC = target - | ldr CARG1, [BASE, RA, lsl #3] - | mvn RC, RC - | ldrh RBw, [PC, #2] - | ldr CARG2, [KBASE, RC, lsl #3] - | add PC, PC, #4 - | movn TMP0, #~LJ_TSTR - |.if FFI - | asr ITYPE, CARG1, #47 - |.endif - | add RB, PC, RB, lsl #2 - | add CARG2, CARG2, TMP0, lsl #47 - | sub RB, RB, #0x20000 - |.if FFI - | cmn ITYPE, #-LJ_TCDATA - | beq ->vmeta_equal_cd - |.endif - | cmp CARG1, CARG2 - if (vk) { - | csel PC, RB, PC, eq - } else { - | csel PC, RB, PC, ne - } - | ins_next - break; - - case BC_ISEQN: case BC_ISNEN: - vk = op == BC_ISEQN; - | // RA = src, RC = num_const (~), JMP with RC = target - | ldr CARG1, [BASE, RA, lsl #3] - | add RC, KBASE, RC, lsl #3 - | ldrh RBw, [PC, #2] - | ldr CARG3, [RC] - | add PC, PC, #4 - | add RB, PC, RB, lsl #2 - | sub RB, RB, #0x20000 - if (vk) { - |->BC_ISEQN_Z: - } else { - |->BC_ISNEN_Z: - } - | checkint CARG1, >4 - | checkint CARG3, >6 - | cmp CARG1w, CARG3w - |1: - if (vk) { - | csel PC, RB, PC, eq - |2: - } else { - |2: - | csel PC, RB, PC, ne - } - |3: - | ins_next - | - |4: // RA not int. - |.if FFI - | blo >7 - |.else - | blo <2 - |.endif - | ldr FARG1, [BASE, RA, lsl #3] - | ldr FARG2, [RC] - | cmp TISNUMhi, CARG3, lsr #32 - | bne >5 - | // RA number, RC int. - | scvtf FARG2, CARG3w - |5: - | // RA number, RC number. - | fcmp FARG1, FARG2 - | b <1 - | - |6: // RA int, RC number - | ldr FARG2, [RC] - | scvtf FARG1, CARG1w - | fcmp FARG1, FARG2 - | b <1 - | - |.if FFI - |7: - | asr ITYPE, CARG1, #47 - | cmn ITYPE, #-LJ_TCDATA - | bne <2 - | b ->vmeta_equal_cd - |.endif - break; - - case BC_ISEQP: case BC_ISNEP: - vk = op == BC_ISEQP; - | // RA = src, RC = primitive_type (~), JMP with RC = target - | ldr TMP0, [BASE, RA, lsl #3] - | ldrh RBw, [PC, #2] - | add PC, PC, #4 - | add RC, RC, #1 - | add RB, PC, RB, lsl #2 - |.if FFI - | asr ITYPE, TMP0, #47 - | cmn ITYPE, #-LJ_TCDATA - | beq ->vmeta_equal_cd - | cmn RC, ITYPE - |.else - | cmn RC, TMP0, asr #47 - |.endif - | sub RB, RB, #0x20000 - if (vk) { - | csel PC, RB, PC, eq - } else { - | csel PC, RB, PC, ne - } - | ins_next - break; - - /* -- Unary test and copy ops ------------------------------------------- */ - - case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF: - | // RA = dst or unused, RC = src, JMP with RC = target - | ldrh RBw, [PC, #2] - | ldr TMP0, [BASE, RC, lsl #3] - | add PC, PC, #4 - | mov_false TMP1 - | add RB, PC, RB, lsl #2 - | cmp TMP0, TMP1 - | sub RB, RB, #0x20000 - if (op == BC_ISTC || op == BC_IST) { - if (op == BC_ISTC) { - | csel RA, RA, RC, lo - } - | csel PC, RB, PC, lo - } else { - if (op == BC_ISFC) { - | csel RA, RA, RC, hs - } - | csel PC, RB, PC, hs - } - if (op == BC_ISTC || op == BC_ISFC) { - | str TMP0, [BASE, RA, lsl #3] - } - | ins_next - break; - - case BC_ISTYPE: - | // RA = src, RC = -type - | ldr TMP0, [BASE, RA, lsl #3] - | cmn RC, TMP0, asr #47 - | bne ->vmeta_istype - | ins_next - break; - case BC_ISNUM: - | // RA = src, RC = -(TISNUM-1) - | ldr TMP0, [BASE, RA] - | checknum TMP0, ->vmeta_istype - | ins_next - break; - - /* -- Unary ops --------------------------------------------------------- */ - - case BC_MOV: - | // RA = dst, RC = src - | ldr TMP0, [BASE, RC, lsl #3] - | str TMP0, [BASE, RA, lsl #3] - | ins_next - break; - case BC_NOT: - | // RA = dst, RC = src - | ldr TMP0, [BASE, RC, lsl #3] - | mov_false TMP1 - | mov_true TMP2 - | cmp TMP0, TMP1 - | csel TMP0, TMP1, TMP2, lo - | str TMP0, [BASE, RA, lsl #3] - | ins_next - break; - case BC_UNM: - | // RA = dst, RC = src - | ldr TMP0, [BASE, RC, lsl #3] - | asr ITYPE, TMP0, #47 - | cmn ITYPE, #-LJ_TISNUM - | bhi ->vmeta_unm - | eor TMP0, TMP0, #U64x(80000000,00000000) - | bne >5 - | negs TMP0w, TMP0w - | movz CARG3, #0x41e0, lsl #48 // 2^31. - | add TMP0, TMP0, TISNUM - | csel TMP0, TMP0, CARG3, vc - |5: - | str TMP0, [BASE, RA, lsl #3] - | ins_next - break; - case BC_LEN: - | // RA = dst, RC = src - | ldr CARG1, [BASE, RC, lsl #3] - | asr ITYPE, CARG1, #47 - | cmn ITYPE, #-LJ_TSTR - | and CARG1, CARG1, #LJ_GCVMASK - | bne >2 - | ldr CARG1w, STR:CARG1->len - |1: - | add CARG1, CARG1, TISNUM - | str CARG1, [BASE, RA, lsl #3] - | ins_next - | - |2: - | cmn ITYPE, #-LJ_TTAB - | bne ->vmeta_len -#if LJ_52 - | ldr TAB:CARG2, TAB:CARG1->metatable - | cbnz TAB:CARG2, >9 - |3: -#endif - |->BC_LEN_Z: - | bl extern lj_tab_len // (GCtab *t) - | // Returns uint32_t (but less than 2^31). - | b <1 - | -#if LJ_52 - |9: - | ldrb TMP1w, TAB:CARG2->nomm - | tbnz TMP1w, #MM_len, <3 // 'no __len' flag set: done. - | b ->vmeta_len -#endif - break; - - /* -- Binary ops -------------------------------------------------------- */ - - |.macro ins_arithcheck_int, target - | checkint CARG1, target - | checkint CARG2, target - |.endmacro - | - |.macro ins_arithcheck_num, target - | checknum CARG1, target - | checknum CARG2, target - |.endmacro - | - |.macro ins_arithcheck_nzdiv, target - | cbz CARG2w, target - |.endmacro - | - |.macro ins_arithhead - ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); - ||if (vk == 1) { - | and RC, RC, #255 - | decode_RB RB, INS - ||} else { - | decode_RB RB, INS - | and RC, RC, #255 - ||} - |.endmacro - | - |.macro ins_arithload, reg1, reg2 - | // RA = dst, RB = src1, RC = src2 | num_const - ||switch (vk) { - ||case 0: - | ldr reg1, [BASE, RB, lsl #3] - | ldr reg2, [KBASE, RC, lsl #3] - || break; - ||case 1: - | ldr reg1, [KBASE, RC, lsl #3] - | ldr reg2, [BASE, RB, lsl #3] - || break; - ||default: - | ldr reg1, [BASE, RB, lsl #3] - | ldr reg2, [BASE, RC, lsl #3] - || break; - ||} - |.endmacro - | - |.macro ins_arithfallback, ins - ||switch (vk) { - ||case 0: - | ins ->vmeta_arith_vn - || break; - ||case 1: - | ins ->vmeta_arith_nv - || break; - ||default: - | ins ->vmeta_arith_vv - || break; - ||} - |.endmacro - | - |.macro ins_arithmod, res, reg1, reg2 - | fdiv d2, reg1, reg2 - | frintm d2, d2 - | fmsub res, d2, reg2, reg1 - |.endmacro - | - |.macro ins_arithdn, intins, fpins - | ins_arithhead - | ins_arithload CARG1, CARG2 - | ins_arithcheck_int >5 - |.if "intins" == "smull" - | smull CARG1, CARG1w, CARG2w - | cmp CARG1, CARG1, sxtw - | mov CARG1w, CARG1w - | ins_arithfallback bne - |.elif "intins" == "ins_arithmodi" - | ins_arithfallback ins_arithcheck_nzdiv - | bl ->vm_modi - |.else - | intins CARG1w, CARG1w, CARG2w - | ins_arithfallback bvs - |.endif - | add CARG1, CARG1, TISNUM - | str CARG1, [BASE, RA, lsl #3] - |4: - | ins_next - | - |5: // FP variant. - | ins_arithload FARG1, FARG2 - | ins_arithfallback ins_arithcheck_num - | fpins FARG1, FARG1, FARG2 - | str FARG1, [BASE, RA, lsl #3] - | b <4 - |.endmacro - | - |.macro ins_arithfp, fpins - | ins_arithhead - | ins_arithload CARG1, CARG2 - | ins_arithload FARG1, FARG2 - | ins_arithfallback ins_arithcheck_num - |.if "fpins" == "fpow" - | bl extern pow - |.else - | fpins FARG1, FARG1, FARG2 - |.endif - | str FARG1, [BASE, RA, lsl #3] - | ins_next - |.endmacro - - case BC_ADDVN: case BC_ADDNV: case BC_ADDVV: - | ins_arithdn adds, fadd - break; - case BC_SUBVN: case BC_SUBNV: case BC_SUBVV: - | ins_arithdn subs, fsub - break; - case BC_MULVN: case BC_MULNV: case BC_MULVV: - | ins_arithdn smull, fmul - break; - case BC_DIVVN: case BC_DIVNV: case BC_DIVVV: - | ins_arithfp fdiv - break; - case BC_MODVN: case BC_MODNV: case BC_MODVV: - | ins_arithdn ins_arithmodi, ins_arithmod - break; - case BC_POW: - | // NYI: (partial) integer arithmetic. - | ins_arithfp fpow - break; - - case BC_CAT: - | decode_RB RB, INS - | and RC, RC, #255 - | // RA = dst, RB = src_start, RC = src_end - | str BASE, L->base - | sub CARG3, RC, RB - | add CARG2, BASE, RC, lsl #3 - |->BC_CAT_Z: - | // RA = dst, CARG2 = top-1, CARG3 = left - | mov CARG1, L - | str PC, SAVE_PC - | bl extern lj_meta_cat // (lua_State *L, TValue *top, int left) - | // Returns NULL (finished) or TValue * (metamethod). - | ldrb RBw, [PC, #-1] - | ldr BASE, L->base - | cbnz CRET1, ->vmeta_binop - | ldr TMP0, [BASE, RB, lsl #3] - | str TMP0, [BASE, RA, lsl #3] // Copy result to RA. - | ins_next - break; - - /* -- Constant ops ------------------------------------------------------ */ - - case BC_KSTR: - | // RA = dst, RC = str_const (~) - | mvn RC, RC - | ldr TMP0, [KBASE, RC, lsl #3] - | movn TMP1, #~LJ_TSTR - | add TMP0, TMP0, TMP1, lsl #47 - | str TMP0, [BASE, RA, lsl #3] - | ins_next - break; - case BC_KCDATA: - |.if FFI - | // RA = dst, RC = cdata_const (~) - | mvn RC, RC - | ldr TMP0, [KBASE, RC, lsl #3] - | movn TMP1, #~LJ_TCDATA - | add TMP0, TMP0, TMP1, lsl #47 - | str TMP0, [BASE, RA, lsl #3] - | ins_next - |.endif - break; - case BC_KSHORT: - | // RA = dst, RC = int16_literal - | sxth RCw, RCw - | add TMP0, RC, TISNUM - | str TMP0, [BASE, RA, lsl #3] - | ins_next - break; - case BC_KNUM: - | // RA = dst, RC = num_const - | ldr TMP0, [KBASE, RC, lsl #3] - | str TMP0, [BASE, RA, lsl #3] - | ins_next - break; - case BC_KPRI: - | // RA = dst, RC = primitive_type (~) - | mvn TMP0, RC, lsl #47 - | str TMP0, [BASE, RA, lsl #3] - | ins_next - break; - case BC_KNIL: - | // RA = base, RC = end - | add RA, BASE, RA, lsl #3 - | add RC, BASE, RC, lsl #3 - | str TISNIL, [RA], #8 - |1: - | cmp RA, RC - | str TISNIL, [RA], #8 - | blt <1 - | ins_next_ - break; - - /* -- Upvalue and function ops ------------------------------------------ */ - - case BC_UGET: - | // RA = dst, RC = uvnum - | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] - | add RC, RC, #offsetof(GCfuncL, uvptr)/8 - | and LFUNC:CARG2, CARG2, #LJ_GCVMASK - | ldr UPVAL:CARG2, [LFUNC:CARG2, RC, lsl #3] - | ldr CARG2, UPVAL:CARG2->v - | ldr TMP0, [CARG2] - | str TMP0, [BASE, RA, lsl #3] - | ins_next - break; - case BC_USETV: - | // RA = uvnum, RC = src - | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] - | add RA, RA, #offsetof(GCfuncL, uvptr)/8 - | and LFUNC:CARG2, CARG2, #LJ_GCVMASK - | ldr UPVAL:CARG1, [LFUNC:CARG2, RA, lsl #3] - | ldr CARG3, [BASE, RC, lsl #3] - | ldr CARG2, UPVAL:CARG1->v - | ldrb TMP2w, UPVAL:CARG1->marked - | ldrb TMP0w, UPVAL:CARG1->closed - | asr ITYPE, CARG3, #47 - | str CARG3, [CARG2] - | add ITYPE, ITYPE, #-LJ_TISGCV - | tst TMP2w, #LJ_GC_BLACK // isblack(uv) - | ccmp TMP0w, #0, #4, ne // && uv->closed - | ccmn ITYPE, #-(LJ_TNUMX - LJ_TISGCV), #0, ne // && tvisgcv(v) - | bhi >2 - |1: - | ins_next - | - |2: // Check if new value is white. - | and GCOBJ:CARG3, CARG3, #LJ_GCVMASK - | ldrb TMP1w, GCOBJ:CARG3->gch.marked - | tst TMP1w, #LJ_GC_WHITES // iswhite(str) - | beq <1 - | // Crossed a write barrier. Move the barrier forward. - | mov CARG1, GL - | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv) - | b <1 - break; - case BC_USETS: - | // RA = uvnum, RC = str_const (~) - | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] - | add RA, RA, #offsetof(GCfuncL, uvptr)/8 - | mvn RC, RC - | and LFUNC:CARG2, CARG2, #LJ_GCVMASK - | ldr UPVAL:CARG1, [LFUNC:CARG2, RA, lsl #3] - | ldr STR:CARG3, [KBASE, RC, lsl #3] - | movn TMP0, #~LJ_TSTR - | ldr CARG2, UPVAL:CARG1->v - | ldrb TMP2w, UPVAL:CARG1->marked - | add TMP0, STR:CARG3, TMP0, lsl #47 - | ldrb TMP1w, STR:CARG3->marked - | str TMP0, [CARG2] - | tbnz TMP2w, #2, >2 // isblack(uv) - |1: - | ins_next - | - |2: // Check if string is white and ensure upvalue is closed. - | ldrb TMP0w, UPVAL:CARG1->closed - | tst TMP1w, #LJ_GC_WHITES // iswhite(str) - | ccmp TMP0w, #0, #0, ne - | beq <1 - | // Crossed a write barrier. Move the barrier forward. - | mov CARG1, GL - | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv) - | b <1 - break; - case BC_USETN: - | // RA = uvnum, RC = num_const - | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] - | add RA, RA, #offsetof(GCfuncL, uvptr)/8 - | and LFUNC:CARG2, CARG2, #LJ_GCVMASK - | ldr UPVAL:CARG2, [LFUNC:CARG2, RA, lsl #3] - | ldr TMP0, [KBASE, RC, lsl #3] - | ldr CARG2, UPVAL:CARG2->v - | str TMP0, [CARG2] - | ins_next - break; - case BC_USETP: - | // RA = uvnum, RC = primitive_type (~) - | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] - | add RA, RA, #offsetof(GCfuncL, uvptr)/8 - | and LFUNC:CARG2, CARG2, #LJ_GCVMASK - | ldr UPVAL:CARG2, [LFUNC:CARG2, RA, lsl #3] - | mvn TMP0, RC, lsl #47 - | ldr CARG2, UPVAL:CARG2->v - | str TMP0, [CARG2] - | ins_next - break; - - case BC_UCLO: - | // RA = level, RC = target - | ldr CARG3, L->openupval - | add RC, PC, RC, lsl #2 - | str BASE, L->base - | sub PC, RC, #0x20000 - | cbz CARG3, >1 - | mov CARG1, L - | add CARG2, BASE, RA, lsl #3 - | bl extern lj_func_closeuv // (lua_State *L, TValue *level) - | ldr BASE, L->base - |1: - | ins_next - break; - - case BC_FNEW: - | // RA = dst, RC = proto_const (~) (holding function prototype) - | mvn RC, RC - | str BASE, L->base - | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] - | str PC, SAVE_PC - | ldr CARG2, [KBASE, RC, lsl #3] - | mov CARG1, L - | and LFUNC:CARG3, CARG3, #LJ_GCVMASK - | // (lua_State *L, GCproto *pt, GCfuncL *parent) - | bl extern lj_func_newL_gc - | // Returns GCfuncL *. - | ldr BASE, L->base - | movn TMP0, #~LJ_TFUNC - | add CRET1, CRET1, TMP0, lsl #47 - | str CRET1, [BASE, RA, lsl #3] - | ins_next - break; - - /* -- Table ops --------------------------------------------------------- */ - - case BC_TNEW: - case BC_TDUP: - | // RA = dst, RC = (hbits|asize) | tab_const (~) - | ldp CARG3, CARG4, GL->gc.total // Assumes threshold follows total. - | str BASE, L->base - | str PC, SAVE_PC - | mov CARG1, L - | cmp CARG3, CARG4 - | bhs >5 - |1: - if (op == BC_TNEW) { - | and CARG2, RC, #0x7ff - | lsr CARG3, RC, #11 - | cmp CARG2, #0x7ff - | mov TMP0, #0x801 - | csel CARG2, CARG2, TMP0, ne - | bl extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits) - | // Returns GCtab *. - } else { - | mvn RC, RC - | ldr CARG2, [KBASE, RC, lsl #3] - | bl extern lj_tab_dup // (lua_State *L, Table *kt) - | // Returns GCtab *. - } - | ldr BASE, L->base - | movk CRET1, #(LJ_TTAB>>1)&0xffff, lsl #48 - | str CRET1, [BASE, RA, lsl #3] - | ins_next - | - |5: - | bl extern lj_gc_step_fixtop // (lua_State *L) - | mov CARG1, L - | b <1 - break; - - case BC_GGET: - | // RA = dst, RC = str_const (~) - case BC_GSET: - | // RA = dst, RC = str_const (~) - | ldr LFUNC:CARG1, [BASE, FRAME_FUNC] - | mvn RC, RC - | and LFUNC:CARG1, CARG1, #LJ_GCVMASK - | ldr TAB:CARG2, LFUNC:CARG1->env - | ldr STR:RC, [KBASE, RC, lsl #3] - if (op == BC_GGET) { - | b ->BC_TGETS_Z - } else { - | b ->BC_TSETS_Z - } - break; - - case BC_TGETV: - | decode_RB RB, INS - | and RC, RC, #255 - | // RA = dst, RB = table, RC = key - | ldr CARG2, [BASE, RB, lsl #3] - | ldr TMP1, [BASE, RC, lsl #3] - | checktab CARG2, ->vmeta_tgetv - | checkint TMP1, >9 // Integer key? - | ldr CARG3, TAB:CARG2->array - | ldr CARG1w, TAB:CARG2->asize - | add CARG3, CARG3, TMP1, uxtw #3 - | cmp TMP1w, CARG1w // In array part? - | bhs ->vmeta_tgetv - | ldr TMP0, [CARG3] - | cmp TMP0, TISNIL - | beq >5 - |1: - | str TMP0, [BASE, RA, lsl #3] - | ins_next - | - |5: // Check for __index if table value is nil. - | ldr TAB:CARG1, TAB:CARG2->metatable - | cbz TAB:CARG1, <1 // No metatable: done. - | ldrb TMP1w, TAB:CARG1->nomm - | tbnz TMP1w, #MM_index, <1 // 'no __index' flag set: done. - | b ->vmeta_tgetv - | - |9: - | asr ITYPE, TMP1, #47 - | cmn ITYPE, #-LJ_TSTR // String key? - | bne ->vmeta_tgetv - | and STR:RC, TMP1, #LJ_GCVMASK - | b ->BC_TGETS_Z - break; - case BC_TGETS: - | decode_RB RB, INS - | and RC, RC, #255 - | // RA = dst, RB = table, RC = str_const (~) - | ldr CARG2, [BASE, RB, lsl #3] - | mvn RC, RC - | ldr STR:RC, [KBASE, RC, lsl #3] - | checktab CARG2, ->vmeta_tgets1 - |->BC_TGETS_Z: - | // TAB:CARG2 = GCtab *, STR:RC = GCstr *, RA = dst - | ldr TMP1w, TAB:CARG2->hmask - | ldr TMP2w, STR:RC->hash - | ldr NODE:CARG3, TAB:CARG2->node - | and TMP1w, TMP1w, TMP2w // idx = str->hash & tab->hmask - | add TMP1, TMP1, TMP1, lsl #1 - | movn CARG4, #~LJ_TSTR - | add NODE:CARG3, NODE:CARG3, TMP1, lsl #3 // node = tab->node + idx*3*8 - | add CARG4, STR:RC, CARG4, lsl #47 // Tagged key to look for. - |1: - | ldp TMP0, CARG1, NODE:CARG3->val - | ldr NODE:CARG3, NODE:CARG3->next - | cmp CARG1, CARG4 - | bne >4 - | cmp TMP0, TISNIL - | beq >5 - |3: - | str TMP0, [BASE, RA, lsl #3] - | ins_next - | - |4: // Follow hash chain. - | cbnz NODE:CARG3, <1 - | // End of hash chain: key not found, nil result. - | mov TMP0, TISNIL - | - |5: // Check for __index if table value is nil. - | ldr TAB:CARG1, TAB:CARG2->metatable - | cbz TAB:CARG1, <3 // No metatable: done. - | ldrb TMP1w, TAB:CARG1->nomm - | tbnz TMP1w, #MM_index, <3 // 'no __index' flag set: done. - | b ->vmeta_tgets - break; - case BC_TGETB: - | decode_RB RB, INS - | and RC, RC, #255 - | // RA = dst, RB = table, RC = index - | ldr CARG2, [BASE, RB, lsl #3] - | checktab CARG2, ->vmeta_tgetb - | ldr CARG3, TAB:CARG2->array - | ldr CARG1w, TAB:CARG2->asize - | add CARG3, CARG3, RC, lsl #3 - | cmp RCw, CARG1w // In array part? - | bhs ->vmeta_tgetb - | ldr TMP0, [CARG3] - | cmp TMP0, TISNIL - | beq >5 - |1: - | str TMP0, [BASE, RA, lsl #3] - | ins_next - | - |5: // Check for __index if table value is nil. - | ldr TAB:CARG1, TAB:CARG2->metatable - | cbz TAB:CARG1, <1 // No metatable: done. - | ldrb TMP1w, TAB:CARG1->nomm - | tbnz TMP1w, #MM_index, <1 // 'no __index' flag set: done. - | b ->vmeta_tgetb - break; - case BC_TGETR: - | decode_RB RB, INS - | and RC, RC, #255 - | // RA = dst, RB = table, RC = key - | ldr CARG1, [BASE, RB, lsl #3] - | ldr TMP1, [BASE, RC, lsl #3] - | and TAB:CARG1, CARG1, #LJ_GCVMASK - | ldr CARG3, TAB:CARG1->array - | ldr TMP2w, TAB:CARG1->asize - | add CARG3, CARG3, TMP1w, uxtw #3 - | cmp TMP1w, TMP2w // In array part? - | bhs ->vmeta_tgetr - | ldr TMP0, [CARG3] - |->BC_TGETR_Z: - | str TMP0, [BASE, RA, lsl #3] - | ins_next - break; - - case BC_TSETV: - | decode_RB RB, INS - | and RC, RC, #255 - | // RA = src, RB = table, RC = key - | ldr CARG2, [BASE, RB, lsl #3] - | ldr TMP1, [BASE, RC, lsl #3] - | checktab CARG2, ->vmeta_tsetv - | checkint TMP1, >9 // Integer key? - | ldr CARG3, TAB:CARG2->array - | ldr CARG1w, TAB:CARG2->asize - | add CARG3, CARG3, TMP1, uxtw #3 - | cmp TMP1w, CARG1w // In array part? - | bhs ->vmeta_tsetv - | ldr TMP1, [CARG3] - | ldr TMP0, [BASE, RA, lsl #3] - | ldrb TMP2w, TAB:CARG2->marked - | cmp TMP1, TISNIL // Previous value is nil? - | beq >5 - |1: - | str TMP0, [CARG3] - | tbnz TMP2w, #2, >7 // isblack(table) - |2: - | ins_next - | - |5: // Check for __newindex if previous value is nil. - | ldr TAB:CARG1, TAB:CARG2->metatable - | cbz TAB:CARG1, <1 // No metatable: done. - | ldrb TMP1w, TAB:CARG1->nomm - | tbnz TMP1w, #MM_newindex, <1 // 'no __newindex' flag set: done. - | b ->vmeta_tsetv - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:CARG2, TMP2w, TMP1 - | b <2 - | - |9: - | asr ITYPE, TMP1, #47 - | cmn ITYPE, #-LJ_TSTR // String key? - | bne ->vmeta_tsetv - | and STR:RC, TMP1, #LJ_GCVMASK - | b ->BC_TSETS_Z - break; - case BC_TSETS: - | decode_RB RB, INS - | and RC, RC, #255 - | // RA = dst, RB = table, RC = str_const (~) - | ldr CARG2, [BASE, RB, lsl #3] - | mvn RC, RC - | ldr STR:RC, [KBASE, RC, lsl #3] - | checktab CARG2, ->vmeta_tsets1 - |->BC_TSETS_Z: - | // TAB:CARG2 = GCtab *, STR:RC = GCstr *, RA = src - | ldr TMP1w, TAB:CARG2->hmask - | ldr TMP2w, STR:RC->hash - | ldr NODE:CARG3, TAB:CARG2->node - | and TMP1w, TMP1w, TMP2w // idx = str->hash & tab->hmask - | add TMP1, TMP1, TMP1, lsl #1 - | movn CARG4, #~LJ_TSTR - | add NODE:CARG3, NODE:CARG3, TMP1, lsl #3 // node = tab->node + idx*3*8 - | add CARG4, STR:RC, CARG4, lsl #47 // Tagged key to look for. - | strb wzr, TAB:CARG2->nomm // Clear metamethod cache. - |1: - | ldp TMP1, CARG1, NODE:CARG3->val - | ldr NODE:TMP3, NODE:CARG3->next - | ldrb TMP2w, TAB:CARG2->marked - | cmp CARG1, CARG4 - | bne >5 - | ldr TMP0, [BASE, RA, lsl #3] - | cmp TMP1, TISNIL // Previous value is nil? - | beq >4 - |2: - | str TMP0, NODE:CARG3->val - | tbnz TMP2w, #2, >7 // isblack(table) - |3: - | ins_next - | - |4: // Check for __newindex if previous value is nil. - | ldr TAB:CARG1, TAB:CARG2->metatable - | cbz TAB:CARG1, <2 // No metatable: done. - | ldrb TMP1w, TAB:CARG1->nomm - | tbnz TMP1w, #MM_newindex, <2 // 'no __newindex' flag set: done. - | b ->vmeta_tsets - | - |5: // Follow hash chain. - | mov NODE:CARG3, NODE:TMP3 - | cbnz NODE:TMP3, <1 - | // End of hash chain: key not found, add a new one. - | - | // But check for __newindex first. - | ldr TAB:CARG1, TAB:CARG2->metatable - | cbz TAB:CARG1, >6 // No metatable: continue. - | ldrb TMP1w, TAB:CARG1->nomm - | // 'no __newindex' flag NOT set: check. - | tbz TMP1w, #MM_newindex, ->vmeta_tsets - |6: - | movn TMP1, #~LJ_TSTR - | str PC, SAVE_PC - | add TMP0, STR:RC, TMP1, lsl #47 - | str BASE, L->base - | mov CARG1, L - | str TMP0, TMPD - | add CARG3, sp, TMPDofs - | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k) - | // Returns TValue *. - | ldr BASE, L->base - | ldr TMP0, [BASE, RA, lsl #3] - | str TMP0, [CRET1] - | b <3 // No 2nd write barrier needed. - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:CARG2, TMP2w, TMP1 - | b <3 - break; - case BC_TSETB: - | decode_RB RB, INS - | and RC, RC, #255 - | // RA = src, RB = table, RC = index - | ldr CARG2, [BASE, RB, lsl #3] - | checktab CARG2, ->vmeta_tsetb - | ldr CARG3, TAB:CARG2->array - | ldr CARG1w, TAB:CARG2->asize - | add CARG3, CARG3, RC, lsl #3 - | cmp RCw, CARG1w // In array part? - | bhs ->vmeta_tsetb - | ldr TMP1, [CARG3] - | ldr TMP0, [BASE, RA, lsl #3] - | ldrb TMP2w, TAB:CARG2->marked - | cmp TMP1, TISNIL // Previous value is nil? - | beq >5 - |1: - | str TMP0, [CARG3] - | tbnz TMP2w, #2, >7 // isblack(table) - |2: - | ins_next - | - |5: // Check for __newindex if previous value is nil. - | ldr TAB:CARG1, TAB:CARG2->metatable - | cbz TAB:CARG1, <1 // No metatable: done. - | ldrb TMP1w, TAB:CARG1->nomm - | tbnz TMP1w, #MM_newindex, <1 // 'no __newindex' flag set: done. - | b ->vmeta_tsetb - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:CARG2, TMP2w, TMP1 - | b <2 - break; - case BC_TSETR: - | decode_RB RB, INS - | and RC, RC, #255 - | // RA = src, RB = table, RC = key - | ldr CARG2, [BASE, RB, lsl #3] - | ldr TMP1, [BASE, RC, lsl #3] - | and TAB:CARG2, CARG2, #LJ_GCVMASK - | ldr CARG1, TAB:CARG2->array - | ldrb TMP2w, TAB:CARG2->marked - | ldr CARG4w, TAB:CARG2->asize - | add CARG1, CARG1, TMP1, uxtw #3 - | tbnz TMP2w, #2, >7 // isblack(table) - |2: - | cmp TMP1w, CARG4w // In array part? - | bhs ->vmeta_tsetr - |->BC_TSETR_Z: - | ldr TMP0, [BASE, RA, lsl #3] - | str TMP0, [CARG1] - | ins_next - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:CARG2, TMP2w, TMP0 - | b <2 - break; - - case BC_TSETM: - | // RA = base (table at base-1), RC = num_const (start index) - | add RA, BASE, RA, lsl #3 - |1: - | ldr RBw, SAVE_MULTRES - | ldr TAB:CARG2, [RA, #-8] // Guaranteed to be a table. - | ldr TMP1, [KBASE, RC, lsl #3] // Integer constant is in lo-word. - | sub RB, RB, #8 - | cbz RB, >4 // Nothing to copy? - | and TAB:CARG2, CARG2, #LJ_GCVMASK - | ldr CARG1w, TAB:CARG2->asize - | add CARG3w, TMP1w, RBw, lsr #3 - | ldr CARG4, TAB:CARG2->array - | cmp CARG3, CARG1 - | add RB, RA, RB - | bhi >5 - | add TMP1, CARG4, TMP1w, uxtw #3 - | ldrb TMP2w, TAB:CARG2->marked - |3: // Copy result slots to table. - | ldr TMP0, [RA], #8 - | str TMP0, [TMP1], #8 - | cmp RA, RB - | blo <3 - | tbnz TMP2w, #2, >7 // isblack(table) - |4: - | ins_next - | - |5: // Need to resize array part. - | str BASE, L->base - | mov CARG1, L - | str PC, SAVE_PC - | bl extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize) - | // Must not reallocate the stack. - | b <1 - | - |7: // Possible table write barrier for any value. Skip valiswhite check. - | barrierback TAB:CARG2, TMP2w, TMP1 - | b <4 - break; - - /* -- Calls and vararg handling ----------------------------------------- */ - - case BC_CALLM: - | // RA = base, (RB = nresults+1,) RC = extra_nargs - | ldr TMP0w, SAVE_MULTRES - | decode_RC8RD NARGS8:RC, RC - | add NARGS8:RC, NARGS8:RC, TMP0 - | b ->BC_CALL_Z - break; - case BC_CALL: - | decode_RC8RD NARGS8:RC, RC - | // RA = base, (RB = nresults+1,) RC = (nargs+1)*8 - |->BC_CALL_Z: - | mov RB, BASE // Save old BASE for vmeta_call. - | add BASE, BASE, RA, lsl #3 - | ldr CARG3, [BASE] - | sub NARGS8:RC, NARGS8:RC, #8 - | add BASE, BASE, #16 - | checkfunc CARG3, ->vmeta_call - | ins_call - break; - - case BC_CALLMT: - | // RA = base, (RB = 0,) RC = extra_nargs - | ldr TMP0w, SAVE_MULTRES - | add NARGS8:RC, TMP0, RC, lsl #3 - | b ->BC_CALLT1_Z - break; - case BC_CALLT: - | lsl NARGS8:RC, RC, #3 - | // RA = base, (RB = 0,) RC = (nargs+1)*8 - |->BC_CALLT1_Z: - | add RA, BASE, RA, lsl #3 - | ldr TMP1, [RA] - | sub NARGS8:RC, NARGS8:RC, #8 - | add RA, RA, #16 - | checktp CARG3, TMP1, LJ_TFUNC, ->vmeta_callt - | ldr PC, [BASE, FRAME_PC] - |->BC_CALLT2_Z: - | mov RB, #0 - | ldrb TMP2w, LFUNC:CARG3->ffid - | tst PC, #FRAME_TYPE - | bne >7 - |1: - | str TMP1, [BASE, FRAME_FUNC] // Copy function down, but keep PC. - | cbz NARGS8:RC, >3 - |2: - | ldr TMP0, [RA, RB] - | add TMP1, RB, #8 - | cmp TMP1, NARGS8:RC - | str TMP0, [BASE, RB] - | mov RB, TMP1 - | bne <2 - |3: - | cmp TMP2, #1 // (> FF_C) Calling a fast function? - | bhi >5 - |4: - | ins_callt - | - |5: // Tailcall to a fast function with a Lua frame below. - | ldrb RAw, [PC, #-3] - | sub CARG1, BASE, RA, lsl #3 - | ldr LFUNC:CARG1, [CARG1, #-32] - | and LFUNC:CARG1, CARG1, #LJ_GCVMASK - | ldr CARG1, LFUNC:CARG1->pc - | ldr KBASE, [CARG1, #PC2PROTO(k)] - | b <4 - | - |7: // Tailcall from a vararg function. - | eor PC, PC, #FRAME_VARG - | tst PC, #FRAME_TYPEP // Vararg frame below? - | csel TMP2, RB, TMP2, ne // Clear ffid if no Lua function below. - | bne <1 - | sub BASE, BASE, PC - | ldr PC, [BASE, FRAME_PC] - | tst PC, #FRAME_TYPE - | csel TMP2, RB, TMP2, ne // Clear ffid if no Lua function below. - | b <1 - break; - - case BC_ITERC: - | // RA = base, (RB = nresults+1, RC = nargs+1 (2+1)) - | add RA, BASE, RA, lsl #3 - | ldr CARG3, [RA, #-24] - | mov RB, BASE // Save old BASE for vmeta_call. - | ldp CARG1, CARG2, [RA, #-16] - | add BASE, RA, #16 - | mov NARGS8:RC, #16 // Iterators get 2 arguments. - | str CARG3, [RA] // Copy callable. - | stp CARG1, CARG2, [RA, #16] // Copy state and control var. - | checkfunc CARG3, ->vmeta_call - | ins_call - break; - - case BC_ITERN: - | // RA = base, (RB = nresults+1, RC = nargs+1 (2+1)) - |.if JIT - | // NYI: add hotloop, record BC_ITERN. - |.endif - | add RA, BASE, RA, lsl #3 - | ldr TAB:RB, [RA, #-16] - | ldrh TMP3w, [PC, #2] - | ldr CARG1w, [RA, #-8] // Get index from control var. - | add PC, PC, #4 - | add TMP3, PC, TMP3, lsl #2 - | and TAB:RB, RB, #LJ_GCVMASK - | sub TMP3, TMP3, #0x20000 - | ldr TMP1w, TAB:RB->asize - | ldr CARG2, TAB:RB->array - |1: // Traverse array part. - | subs RC, CARG1, TMP1 - | add CARG3, CARG2, CARG1, lsl #3 - | bhs >5 // Index points after array part? - | ldr TMP0, [CARG3] - | cmp TMP0, TISNIL - | cinc CARG1, CARG1, eq // Skip holes in array part. - | beq <1 - | add CARG1, CARG1, TISNUM - | stp CARG1, TMP0, [RA] - | add CARG1, CARG1, #1 - |3: - | str CARG1w, [RA, #-8] // Update control var. - | mov PC, TMP3 - |4: - | ins_next - | - |5: // Traverse hash part. - | ldr TMP2w, TAB:RB->hmask - | ldr NODE:RB, TAB:RB->node - |6: - | add CARG1, RC, RC, lsl #1 - | cmp RC, TMP2 // End of iteration? Branch to ITERN+1. - | add NODE:CARG3, NODE:RB, CARG1, lsl #3 // node = tab->node + idx*3*8 - | bhi <4 - | ldp TMP0, CARG1, NODE:CARG3->val - | cmp TMP0, TISNIL - | add RC, RC, #1 - | beq <6 // Skip holes in hash part. - | stp CARG1, TMP0, [RA] - | add CARG1, RC, TMP1 - | b <3 - break; - - case BC_ISNEXT: - | // RA = base, RC = target (points to ITERN) - | add RA, BASE, RA, lsl #3 - | ldr CFUNC:CARG1, [RA, #-24] - | add RC, PC, RC, lsl #2 - | ldp TAB:CARG3, CARG4, [RA, #-16] - | sub RC, RC, #0x20000 - | checkfunc CFUNC:CARG1, >5 - | asr TMP0, TAB:CARG3, #47 - | ldrb TMP1w, CFUNC:CARG1->ffid - | cmn TMP0, #-LJ_TTAB - | ccmp CARG4, TISNIL, #0, eq - | ccmp TMP1w, #FF_next_N, #0, eq - | bne >5 - | mov TMP0w, #0xfffe7fff - | lsl TMP0, TMP0, #32 - | str TMP0, [RA, #-8] // Initialize control var. - |1: - | mov PC, RC - | ins_next - | - |5: // Despecialize bytecode if any of the checks fail. - | mov TMP0, #BC_JMP - | mov TMP1, #BC_ITERC - | strb TMP0w, [PC, #-4] - | strb TMP1w, [RC] - | b <1 - break; - - case BC_VARG: - | decode_RB RB, INS - | and RC, RC, #255 - | // RA = base, RB = (nresults+1), RC = numparams - | ldr TMP1, [BASE, FRAME_PC] - | add RC, BASE, RC, lsl #3 - | add RA, BASE, RA, lsl #3 - | add RC, RC, #FRAME_VARG - | add TMP2, RA, RB, lsl #3 - | sub RC, RC, TMP1 // RC = vbase - | // Note: RC may now be even _above_ BASE if nargs was < numparams. - | sub TMP3, BASE, #16 // TMP3 = vtop - | cbz RB, >5 - | sub TMP2, TMP2, #16 - |1: // Copy vararg slots to destination slots. - | cmp RC, TMP3 - | ldr TMP0, [RC], #8 - | csel TMP0, TMP0, TISNIL, lo - | cmp RA, TMP2 - | str TMP0, [RA], #8 - | blo <1 - |2: - | ins_next - | - |5: // Copy all varargs. - | ldr TMP0, L->maxstack - | subs TMP2, TMP3, RC - | csel RB, xzr, TMP2, le // MULTRES = (max(vtop-vbase,0)+1)*8 - | add RB, RB, #8 - | add TMP1, RA, TMP2 - | str RBw, SAVE_MULTRES - | ble <2 // Nothing to copy. - | cmp TMP1, TMP0 - | bhi >7 - |6: - | ldr TMP0, [RC], #8 - | str TMP0, [RA], #8 - | cmp RC, TMP3 - | blo <6 - | b <2 - | - |7: // Grow stack for varargs. - | lsr CARG2, TMP2, #3 - | stp BASE, RA, L->base - | mov CARG1, L - | sub RC, RC, BASE // Need delta, because BASE may change. - | str PC, SAVE_PC - | bl extern lj_state_growstack // (lua_State *L, int n) - | ldp BASE, RA, L->base - | add RC, BASE, RC - | sub TMP3, BASE, #16 - | b <6 - break; - - /* -- Returns ----------------------------------------------------------- */ - - case BC_RETM: - | // RA = results, RC = extra results - | ldr TMP0w, SAVE_MULTRES - | ldr PC, [BASE, FRAME_PC] - | add RA, BASE, RA, lsl #3 - | add RC, TMP0, RC, lsl #3 - | b ->BC_RETM_Z - break; - - case BC_RET: - | // RA = results, RC = nresults+1 - | ldr PC, [BASE, FRAME_PC] - | lsl RC, RC, #3 - | add RA, BASE, RA, lsl #3 - |->BC_RETM_Z: - | str RCw, SAVE_MULTRES - |1: - | ands CARG1, PC, #FRAME_TYPE - | eor CARG2, PC, #FRAME_VARG - | bne ->BC_RETV2_Z - | - |->BC_RET_Z: - | // BASE = base, RA = resultptr, RC = (nresults+1)*8, PC = return - | ldr INSw, [PC, #-4] - | subs TMP1, RC, #8 - | sub CARG3, BASE, #16 - | beq >3 - |2: - | ldr TMP0, [RA], #8 - | add BASE, BASE, #8 - | sub TMP1, TMP1, #8 - | str TMP0, [BASE, #-24] - | cbnz TMP1, <2 - |3: - | decode_RA RA, INS - | sub CARG4, CARG3, RA, lsl #3 - | decode_RB RB, INS - | ldr LFUNC:CARG1, [CARG4, FRAME_FUNC] - |5: - | cmp RC, RB, lsl #3 // More results expected? - | blo >6 - | and LFUNC:CARG1, CARG1, #LJ_GCVMASK - | mov BASE, CARG4 - | ldr CARG2, LFUNC:CARG1->pc - | ldr KBASE, [CARG2, #PC2PROTO(k)] - | ins_next - | - |6: // Fill up results with nil. - | add BASE, BASE, #8 - | add RC, RC, #8 - | str TISNIL, [BASE, #-24] - | b <5 - | - |->BC_RETV1_Z: // Non-standard return case. - | add RA, BASE, RA, lsl #3 - |->BC_RETV2_Z: - | tst CARG2, #FRAME_TYPEP - | bne ->vm_return - | // Return from vararg function: relocate BASE down. - | sub BASE, BASE, CARG2 - | ldr PC, [BASE, FRAME_PC] - | b <1 - break; - - case BC_RET0: case BC_RET1: - | // RA = results, RC = nresults+1 - | ldr PC, [BASE, FRAME_PC] - | lsl RC, RC, #3 - | str RCw, SAVE_MULTRES - | ands CARG1, PC, #FRAME_TYPE - | eor CARG2, PC, #FRAME_VARG - | bne ->BC_RETV1_Z - | ldr INSw, [PC, #-4] - if (op == BC_RET1) { - | ldr TMP0, [BASE, RA, lsl #3] - } - | sub CARG4, BASE, #16 - | decode_RA RA, INS - | sub BASE, CARG4, RA, lsl #3 - if (op == BC_RET1) { - | str TMP0, [CARG4], #8 - } - | decode_RB RB, INS - | ldr LFUNC:CARG1, [BASE, FRAME_FUNC] - |5: - | cmp RC, RB, lsl #3 - | blo >6 - | and LFUNC:CARG1, CARG1, #LJ_GCVMASK - | ldr CARG2, LFUNC:CARG1->pc - | ldr KBASE, [CARG2, #PC2PROTO(k)] - | ins_next - | - |6: // Fill up results with nil. - | add RC, RC, #8 - | str TISNIL, [CARG4], #8 - | b <5 - break; - - /* -- Loops and branches ------------------------------------------------ */ - - |.define FOR_IDX, [RA]; .define FOR_TIDX, [RA, #4] - |.define FOR_STOP, [RA, #8]; .define FOR_TSTOP, [RA, #12] - |.define FOR_STEP, [RA, #16]; .define FOR_TSTEP, [RA, #20] - |.define FOR_EXT, [RA, #24]; .define FOR_TEXT, [RA, #28] - - case BC_FORL: - |.if JIT - | hotloop - |.endif - | // Fall through. Assumes BC_IFORL follows. - break; - - case BC_JFORI: - case BC_JFORL: -#if !LJ_HASJIT - break; -#endif - case BC_FORI: - case BC_IFORL: - | // RA = base, RC = target (after end of loop or start of loop) - vk = (op == BC_IFORL || op == BC_JFORL); - | add RA, BASE, RA, lsl #3 - | ldp CARG1, CARG2, FOR_IDX // CARG1 = IDX, CARG2 = STOP - | ldr CARG3, FOR_STEP // CARG3 = STEP - if (op != BC_JFORL) { - | add RC, PC, RC, lsl #2 - | sub RC, RC, #0x20000 - } - | checkint CARG1, >5 - if (!vk) { - | checkint CARG2, ->vmeta_for - | checkint CARG3, ->vmeta_for - | tbnz CARG3w, #31, >4 - | cmp CARG1w, CARG2w - } else { - | adds CARG1w, CARG1w, CARG3w - | bvs >2 - | add TMP0, CARG1, TISNUM - | tbnz CARG3w, #31, >4 - | cmp CARG1w, CARG2w - } - |1: - if (op == BC_FORI) { - | csel PC, RC, PC, gt - } else if (op == BC_JFORI) { - | ldrh RCw, [RC, #-2] - } else if (op == BC_IFORL) { - | csel PC, RC, PC, le - } - if (vk) { - | str TMP0, FOR_IDX - | str TMP0, FOR_EXT - } else { - | str CARG1, FOR_EXT - } - if (op == BC_JFORI || op == BC_JFORL) { - | ble =>BC_JLOOP - } - |2: - | ins_next - | - |4: // Invert check for negative step. - | cmp CARG2w, CARG1w - | b <1 - | - |5: // FP loop. - | ldp d0, d1, FOR_IDX - | blo ->vmeta_for - if (!vk) { - | checknum CARG2, ->vmeta_for - | checknum CARG3, ->vmeta_for - | str d0, FOR_EXT - } else { - | ldr d2, FOR_STEP - | fadd d0, d0, d2 - } - | tbnz CARG3, #63, >7 - | fcmp d0, d1 - |6: - if (vk) { - | str d0, FOR_IDX - | str d0, FOR_EXT - } - if (op == BC_FORI) { - | csel PC, RC, PC, hi - } else if (op == BC_JFORI) { - | ldrh RCw, [RC, #-2] - | bls =>BC_JLOOP - } else if (op == BC_IFORL) { - | csel PC, RC, PC, ls - } else { - | bls =>BC_JLOOP - } - | b <2 - | - |7: // Invert check for negative step. - | fcmp d1, d0 - | b <6 - break; - - case BC_ITERL: - |.if JIT - | hotloop - |.endif - | // Fall through. Assumes BC_IITERL follows. - break; - - case BC_JITERL: -#if !LJ_HASJIT - break; -#endif - case BC_IITERL: - | // RA = base, RC = target - | ldr CARG1, [BASE, RA, lsl #3] - | add TMP1, BASE, RA, lsl #3 - | cmp CARG1, TISNIL - | beq >1 // Stop if iterator returned nil. - if (op == BC_JITERL) { - | str CARG1, [TMP1, #-8] - | b =>BC_JLOOP - } else { - | add TMP0, PC, RC, lsl #2 // Otherwise save control var + branch. - | sub PC, TMP0, #0x20000 - | str CARG1, [TMP1, #-8] - } - |1: - | ins_next - break; - - case BC_LOOP: - | // RA = base, RC = target (loop extent) - | // Note: RA/RC is only used by trace recorder to determine scope/extent - | // This opcode does NOT jump, it's only purpose is to detect a hot loop. - |.if JIT - | hotloop - |.endif - | // Fall through. Assumes BC_ILOOP follows. - break; - - case BC_ILOOP: - | // RA = base, RC = target (loop extent) - | ins_next - break; - - case BC_JLOOP: - |.if JIT - | NYI - |.endif - break; - - case BC_JMP: - | // RA = base (only used by trace recorder), RC = target - | add RC, PC, RC, lsl #2 - | sub PC, RC, #0x20000 - | ins_next - break; - - /* -- Function headers -------------------------------------------------- */ - - case BC_FUNCF: - |.if JIT - | hotcall - |.endif - case BC_FUNCV: /* NYI: compiled vararg functions. */ - | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow. - break; - - case BC_JFUNCF: -#if !LJ_HASJIT - break; -#endif - case BC_IFUNCF: - | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8 - | ldr CARG1, L->maxstack - | ldrb TMP1w, [PC, #-4+PC2PROTO(numparams)] - | ldr KBASE, [PC, #-4+PC2PROTO(k)] - | cmp RA, CARG1 - | bhi ->vm_growstack_l - |2: - | cmp NARGS8:RC, TMP1, lsl #3 // Check for missing parameters. - | blo >3 - if (op == BC_JFUNCF) { - | decode_RD RC, INS - | b =>BC_JLOOP - } else { - | ins_next - } - | - |3: // Clear missing parameters. - | str TISNIL, [BASE, NARGS8:RC] - | add NARGS8:RC, NARGS8:RC, #8 - | b <2 - break; - - case BC_JFUNCV: -#if !LJ_HASJIT - break; -#endif - | NYI // NYI: compiled vararg functions - break; /* NYI: compiled vararg functions. */ - - case BC_IFUNCV: - | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8 - | ldr CARG1, L->maxstack - | add TMP2, BASE, RC - | add RA, RA, RC - | add TMP0, RC, #16+FRAME_VARG - | str LFUNC:CARG3, [TMP2], #8 // Store (untagged) copy of LFUNC. - | ldr KBASE, [PC, #-4+PC2PROTO(k)] - | cmp RA, CARG1 - | str TMP0, [TMP2], #8 // Store delta + FRAME_VARG. - | bhs ->vm_growstack_l - | sub RC, TMP2, #16 - | ldrb TMP1w, [PC, #-4+PC2PROTO(numparams)] - | mov RA, BASE - | mov BASE, TMP2 - | cbz TMP1, >2 - |1: - | cmp RA, RC // Less args than parameters? - | bhs >3 - | ldr TMP0, [RA] - | sub TMP1, TMP1, #1 - | str TISNIL, [RA], #8 // Clear old fixarg slot (help the GC). - | str TMP0, [TMP2], #8 - | cbnz TMP1, <1 - |2: - | ins_next - | - |3: - | sub TMP1, TMP1, #1 - | str TISNIL, [TMP2], #8 - | cbz TMP1, <2 - | b <3 - break; - - case BC_FUNCC: - case BC_FUNCCW: - | // BASE = new base, RA = BASE+framesize*8, CARG3 = CFUNC, RC = nargs*8 - if (op == BC_FUNCC) { - | ldr CARG4, CFUNC:CARG3->f - } else { - | ldr CARG4, GL->wrapf - } - | add CARG2, RA, NARGS8:RC - | ldr CARG1, L->maxstack - | add RC, BASE, NARGS8:RC - | cmp CARG2, CARG1 - | stp BASE, RC, L->base - if (op == BC_FUNCCW) { - | ldr CARG2, CFUNC:CARG3->f - } - | mv_vmstate TMP0w, C - | mov CARG1, L - | bhi ->vm_growstack_c // Need to grow stack. - | st_vmstate TMP0w - | blr CARG4 // (lua_State *L [, lua_CFunction f]) - | // Returns nresults. - | ldp BASE, TMP1, L->base - | str L, GL->cur_L - | sbfiz RC, CRET1, #3, #32 - | st_vmstate ST_INTERP - | ldr PC, [BASE, FRAME_PC] - | sub RA, TMP1, RC // RA = L->top - nresults*8 - | b ->vm_returnc - break; - - /* ---------------------------------------------------------------------- */ - - default: - fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]); - exit(2); - break; - } -} - -static int build_backend(BuildCtx *ctx) -{ - int op; - - dasm_growpc(Dst, BC__MAX); - - build_subroutines(ctx); - - |.code_op - for (op = 0; op < BC__MAX; op++) - build_ins(ctx, (BCOp)op, op); - - return BC__MAX; -} - -/* Emit pseudo frame-info for all assembler functions. */ -static void emit_asm_debug(BuildCtx *ctx) -{ - int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code); - int i, cf = CFRAME_SIZE >> 3; - switch (ctx->mode) { - case BUILD_elfasm: - fprintf(ctx->fp, "\t.section .debug_frame,\"\",%%progbits\n"); - fprintf(ctx->fp, - ".Lframe0:\n" - "\t.long .LECIE0-.LSCIE0\n" - ".LSCIE0:\n" - "\t.long 0xffffffff\n" - "\t.byte 0x1\n" - "\t.string \"\"\n" - "\t.uleb128 0x1\n" - "\t.sleb128 -8\n" - "\t.byte 30\n" /* Return address is in lr. */ - "\t.byte 0xc\n\t.uleb128 31\n\t.uleb128 0\n" /* def_cfa sp */ - "\t.align 3\n" - ".LECIE0:\n\n"); - fprintf(ctx->fp, - ".LSFDE0:\n" - "\t.long .LEFDE0-.LASFDE0\n" - ".LASFDE0:\n" - "\t.long .Lframe0\n" - "\t.quad .Lbegin\n" - "\t.quad %d\n" - "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ - "\t.byte 0x9d\n\t.uleb128 %d\n" /* offset fp */ - "\t.byte 0x9e\n\t.uleb128 %d\n", /* offset lr */ - fcofs, CFRAME_SIZE, cf, cf-1); - for (i = 19; i <= 28; i++) /* offset x19-x28 */ - fprintf(ctx->fp, "\t.byte 0x%x\n\t.uleb128 %d\n", 0x80+i, cf-i+17); - for (i = 8; i <= 15; i++) /* offset d8-d15 */ - fprintf(ctx->fp, "\t.byte 5\n\t.uleb128 0x%x\n\t.uleb128 %d\n", - 64+i, cf-i-4); - fprintf(ctx->fp, - "\t.align 3\n" - ".LEFDE0:\n\n"); -#if LJ_HASFFI - fprintf(ctx->fp, - ".LSFDE1:\n" - "\t.long .LEFDE1-.LASFDE1\n" - ".LASFDE1:\n" - "\t.long .Lframe0\n" - "\t.quad lj_vm_ffi_call\n" - "\t.quad %d\n" - "\t.byte 0xe\n\t.uleb128 32\n" /* def_cfa_offset */ - "\t.byte 0x9d\n\t.uleb128 4\n" /* offset fp */ - "\t.byte 0x9e\n\t.uleb128 3\n" /* offset lr */ - "\t.byte 0x93\n\t.uleb128 2\n" /* offset x19 */ - "\t.align 3\n" - ".LEFDE1:\n\n", (int)ctx->codesz - fcofs); -#endif - fprintf(ctx->fp, "\t.section .eh_frame,\"a\",%%progbits\n"); - fprintf(ctx->fp, - ".Lframe1:\n" - "\t.long .LECIE1-.LSCIE1\n" - ".LSCIE1:\n" - "\t.long 0\n" - "\t.byte 0x1\n" - "\t.string \"zPR\"\n" - "\t.uleb128 0x1\n" - "\t.sleb128 -8\n" - "\t.byte 30\n" /* Return address is in lr. */ - "\t.uleb128 6\n" /* augmentation length */ - "\t.byte 0x1b\n" /* pcrel|sdata4 */ - "\t.long lj_err_unwind_dwarf-.\n" - "\t.byte 0x1b\n" /* pcrel|sdata4 */ - "\t.byte 0xc\n\t.uleb128 31\n\t.uleb128 0\n" /* def_cfa sp */ - "\t.align 3\n" - ".LECIE1:\n\n"); - fprintf(ctx->fp, - ".LSFDE2:\n" - "\t.long .LEFDE2-.LASFDE2\n" - ".LASFDE2:\n" - "\t.long .LASFDE2-.Lframe1\n" - "\t.long .Lbegin-.\n" - "\t.long %d\n" - "\t.uleb128 0\n" /* augmentation length */ - "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ - "\t.byte 0x9d\n\t.uleb128 %d\n" /* offset fp */ - "\t.byte 0x9e\n\t.uleb128 %d\n", /* offset lr */ - fcofs, CFRAME_SIZE, cf, cf-1); - for (i = 19; i <= 28; i++) /* offset x19-x28 */ - fprintf(ctx->fp, "\t.byte 0x%x\n\t.uleb128 %d\n", 0x80+i, cf-i+17); - for (i = 8; i <= 15; i++) /* offset d8-d15 */ - fprintf(ctx->fp, "\t.byte 5\n\t.uleb128 0x%x\n\t.uleb128 %d\n", - 64+i, cf-i-4); - fprintf(ctx->fp, - "\t.align 3\n" - ".LEFDE2:\n\n"); -#if LJ_HASFFI - fprintf(ctx->fp, - ".Lframe2:\n" - "\t.long .LECIE2-.LSCIE2\n" - ".LSCIE2:\n" - "\t.long 0\n" - "\t.byte 0x1\n" - "\t.string \"zR\"\n" - "\t.uleb128 0x1\n" - "\t.sleb128 -8\n" - "\t.byte 30\n" /* Return address is in lr. */ - "\t.uleb128 1\n" /* augmentation length */ - "\t.byte 0x1b\n" /* pcrel|sdata4 */ - "\t.byte 0xc\n\t.uleb128 31\n\t.uleb128 0\n" /* def_cfa sp */ - "\t.align 3\n" - ".LECIE2:\n\n"); - fprintf(ctx->fp, - ".LSFDE3:\n" - "\t.long .LEFDE3-.LASFDE3\n" - ".LASFDE3:\n" - "\t.long .LASFDE3-.Lframe2\n" - "\t.long lj_vm_ffi_call-.\n" - "\t.long %d\n" - "\t.uleb128 0\n" /* augmentation length */ - "\t.byte 0xe\n\t.uleb128 32\n" /* def_cfa_offset */ - "\t.byte 0x9d\n\t.uleb128 4\n" /* offset fp */ - "\t.byte 0x9e\n\t.uleb128 3\n" /* offset lr */ - "\t.byte 0x93\n\t.uleb128 2\n" /* offset x19 */ - "\t.align 3\n" - ".LEFDE3:\n\n", (int)ctx->codesz - fcofs); -#endif - break; - default: - break; - } -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_mips.dasc b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_mips.dasc deleted file mode 100644 index 7cfdf4b18b5..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_mips.dasc +++ /dev/null @@ -1,4355 +0,0 @@ -|// Low-level VM code for MIPS CPUs. -|// Bytecode interpreter, fast functions and helper functions. -|// Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -| -|.arch mips -|.section code_op, code_sub -| -|.actionlist build_actionlist -|.globals GLOB_ -|.globalnames globnames -|.externnames extnames -| -|// Note: The ragged indentation of the instructions is intentional. -|// The starting columns indicate data dependencies. -| -|//----------------------------------------------------------------------- -| -|// Fixed register assignments for the interpreter. -|// Don't use: r0 = 0, r26/r27 = reserved, r28 = gp, r29 = sp, r31 = ra -| -|// The following must be C callee-save (but BASE is often refetched). -|.define BASE, r16 // Base of current Lua stack frame. -|.define KBASE, r17 // Constants of current Lua function. -|.define PC, r18 // Next PC. -|.define DISPATCH, r19 // Opcode dispatch table. -|.define LREG, r20 // Register holding lua_State (also in SAVE_L). -|.define MULTRES, r21 // Size of multi-result: (nresults+1)*8. -|// NYI: r22 currently unused. -| -|.define JGL, r30 // On-trace: global_State + 32768. -| -|// Constants for type-comparisons, stores and conversions. C callee-save. -|.define TISNIL, r30 -|.define TOBIT, f30 // 2^52 + 2^51. -| -|// The following temporaries are not saved across C calls, except for RA. -|.define RA, r23 // Callee-save. -|.define RB, r8 -|.define RC, r9 -|.define RD, r10 -|.define INS, r11 -| -|.define AT, r1 // Assembler temporary. -|.define TMP0, r12 -|.define TMP1, r13 -|.define TMP2, r14 -|.define TMP3, r15 -| -|// Calling conventions. -|.define CFUNCADDR, r25 -|.define CARG1, r4 -|.define CARG2, r5 -|.define CARG3, r6 -|.define CARG4, r7 -| -|.define CRET1, r2 -|.define CRET2, r3 -| -|.define FARG1, f12 -|.define FARG2, f14 -| -|.define FRET1, f0 -|.define FRET2, f2 -| -|// Stack layout while in interpreter. Must match with lj_frame.h. -|.define CFRAME_SPACE, 112 // Delta for sp. -| -|.define SAVE_ERRF, 124(sp) // 32 bit C frame info. -|.define SAVE_NRES, 120(sp) -|.define SAVE_CFRAME, 116(sp) -|.define SAVE_L, 112(sp) -|//----- 8 byte aligned, ^^^^ 16 byte register save area, owned by interpreter. -|.define SAVE_GPR_, 72 // .. 72+10*4: 32 bit GPR saves. -|.define SAVE_FPR_, 24 // .. 24+6*8: 64 bit FPR saves. -|.define SAVE_PC, 20(sp) -|.define ARG5, 16(sp) -|.define CSAVE_4, 12(sp) -|.define CSAVE_3, 8(sp) -|.define CSAVE_2, 4(sp) -|.define CSAVE_1, 0(sp) -|//----- 8 byte aligned, ^^^^ 16 byte register save area, owned by callee. -| -|.define ARG5_OFS, 16 -|.define SAVE_MULTRES, ARG5 -| -|.macro saveregs -| addiu sp, sp, -CFRAME_SPACE -| sw ra, SAVE_GPR_+9*4(sp) -| sw r30, SAVE_GPR_+8*4(sp) -| sdc1 f30, SAVE_FPR_+5*8(sp) -| sw r23, SAVE_GPR_+7*4(sp) -| sw r22, SAVE_GPR_+6*4(sp) -| sdc1 f28, SAVE_FPR_+4*8(sp) -| sw r21, SAVE_GPR_+5*4(sp) -| sw r20, SAVE_GPR_+4*4(sp) -| sdc1 f26, SAVE_FPR_+3*8(sp) -| sw r19, SAVE_GPR_+3*4(sp) -| sw r18, SAVE_GPR_+2*4(sp) -| sdc1 f24, SAVE_FPR_+2*8(sp) -| sw r17, SAVE_GPR_+1*4(sp) -| sw r16, SAVE_GPR_+0*4(sp) -| sdc1 f22, SAVE_FPR_+1*8(sp) -| sdc1 f20, SAVE_FPR_+0*8(sp) -|.endmacro -| -|.macro restoreregs_ret -| lw ra, SAVE_GPR_+9*4(sp) -| lw r30, SAVE_GPR_+8*4(sp) -| ldc1 f30, SAVE_FPR_+5*8(sp) -| lw r23, SAVE_GPR_+7*4(sp) -| lw r22, SAVE_GPR_+6*4(sp) -| ldc1 f28, SAVE_FPR_+4*8(sp) -| lw r21, SAVE_GPR_+5*4(sp) -| lw r20, SAVE_GPR_+4*4(sp) -| ldc1 f26, SAVE_FPR_+3*8(sp) -| lw r19, SAVE_GPR_+3*4(sp) -| lw r18, SAVE_GPR_+2*4(sp) -| ldc1 f24, SAVE_FPR_+2*8(sp) -| lw r17, SAVE_GPR_+1*4(sp) -| lw r16, SAVE_GPR_+0*4(sp) -| ldc1 f22, SAVE_FPR_+1*8(sp) -| ldc1 f20, SAVE_FPR_+0*8(sp) -| jr ra -| addiu sp, sp, CFRAME_SPACE -|.endmacro -| -|// Type definitions. Some of these are only used for documentation. -|.type L, lua_State, LREG -|.type GL, global_State -|.type TVALUE, TValue -|.type GCOBJ, GCobj -|.type STR, GCstr -|.type TAB, GCtab -|.type LFUNC, GCfuncL -|.type CFUNC, GCfuncC -|.type PROTO, GCproto -|.type UPVAL, GCupval -|.type NODE, Node -|.type NARGS8, int -|.type TRACE, GCtrace -|.type SBUF, SBuf -| -|//----------------------------------------------------------------------- -| -|// Trap for not-yet-implemented parts. -|.macro NYI; .long 0xf0f0f0f0; .endmacro -| -|// Macros to mark delay slots. -|.macro ., a; a; .endmacro -|.macro ., a,b; a,b; .endmacro -|.macro ., a,b,c; a,b,c; .endmacro -| -|//----------------------------------------------------------------------- -| -|// Endian-specific defines. -|.define FRAME_PC, LJ_ENDIAN_SELECT(-4,-8) -|.define FRAME_FUNC, LJ_ENDIAN_SELECT(-8,-4) -|.define HI, LJ_ENDIAN_SELECT(4,0) -|.define LO, LJ_ENDIAN_SELECT(0,4) -|.define OFS_RD, LJ_ENDIAN_SELECT(2,0) -|.define OFS_RA, LJ_ENDIAN_SELECT(1,2) -|.define OFS_OP, LJ_ENDIAN_SELECT(0,3) -| -|// Instruction decode. -|.macro decode_OP1, dst, ins; andi dst, ins, 0xff; .endmacro -|.macro decode_OP4a, dst, ins; andi dst, ins, 0xff; .endmacro -|.macro decode_OP4b, dst; sll dst, dst, 2; .endmacro -|.macro decode_RC4a, dst, ins; srl dst, ins, 14; .endmacro -|.macro decode_RC4b, dst; andi dst, dst, 0x3fc; .endmacro -|.macro decode_RD4b, dst; sll dst, dst, 2; .endmacro -|.macro decode_RA8a, dst, ins; srl dst, ins, 5; .endmacro -|.macro decode_RA8b, dst; andi dst, dst, 0x7f8; .endmacro -|.macro decode_RB8a, dst, ins; srl dst, ins, 21; .endmacro -|.macro decode_RB8b, dst; andi dst, dst, 0x7f8; .endmacro -|.macro decode_RD8a, dst, ins; srl dst, ins, 16; .endmacro -|.macro decode_RD8b, dst; sll dst, dst, 3; .endmacro -|.macro decode_RDtoRC8, dst, src; andi dst, src, 0x7f8; .endmacro -| -|// Instruction fetch. -|.macro ins_NEXT1 -| lw INS, 0(PC) -| addiu PC, PC, 4 -|.endmacro -|// Instruction decode+dispatch. -|.macro ins_NEXT2 -| decode_OP4a TMP1, INS -| decode_OP4b TMP1 -| addu TMP0, DISPATCH, TMP1 -| decode_RD8a RD, INS -| lw AT, 0(TMP0) -| decode_RA8a RA, INS -| decode_RD8b RD -| jr AT -| decode_RA8b RA -|.endmacro -|.macro ins_NEXT -| ins_NEXT1 -| ins_NEXT2 -|.endmacro -| -|// Instruction footer. -|.if 1 -| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use. -| .define ins_next, ins_NEXT -| .define ins_next_, ins_NEXT -| .define ins_next1, ins_NEXT1 -| .define ins_next2, ins_NEXT2 -|.else -| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch. -| // Affects only certain kinds of benchmarks (and only with -j off). -| .macro ins_next -| b ->ins_next -| .endmacro -| .macro ins_next1 -| .endmacro -| .macro ins_next2 -| b ->ins_next -| .endmacro -| .macro ins_next_ -| ->ins_next: -| ins_NEXT -| .endmacro -|.endif -| -|// Call decode and dispatch. -|.macro ins_callt -| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC -| lw PC, LFUNC:RB->pc -| lw INS, 0(PC) -| addiu PC, PC, 4 -| decode_OP4a TMP1, INS -| decode_RA8a RA, INS -| decode_OP4b TMP1 -| decode_RA8b RA -| addu TMP0, DISPATCH, TMP1 -| lw TMP0, 0(TMP0) -| jr TMP0 -| addu RA, RA, BASE -|.endmacro -| -|.macro ins_call -| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, PC = caller PC -| sw PC, FRAME_PC(BASE) -| ins_callt -|.endmacro -| -|//----------------------------------------------------------------------- -| -|.macro branch_RD -| srl TMP0, RD, 1 -| lui AT, (-(BCBIAS_J*4 >> 16) & 65535) -| addu TMP0, TMP0, AT -| addu PC, PC, TMP0 -|.endmacro -| -|// Assumes DISPATCH is relative to GL. -#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field)) -#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field)) -#define GG_DISP2GOT (GG_OFS(got) - GG_OFS(dispatch)) -#define DISPATCH_GOT(name) (GG_DISP2GOT + 4*LJ_GOT_##name) -| -#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto)) -| -|.macro load_got, func -| lw CFUNCADDR, DISPATCH_GOT(func)(DISPATCH) -|.endmacro -|// Much faster. Sadly, there's no easy way to force the required code layout. -|// .macro call_intern, func; bal extern func; .endmacro -|.macro call_intern, func; jalr CFUNCADDR; .endmacro -|.macro call_extern; jalr CFUNCADDR; .endmacro -|.macro jmp_extern; jr CFUNCADDR; .endmacro -| -|.macro hotcheck, delta, target -| srl TMP1, PC, 1 -| andi TMP1, TMP1, 126 -| addu TMP1, TMP1, DISPATCH -| lhu TMP2, GG_DISP2HOT(TMP1) -| addiu TMP2, TMP2, -delta -| bltz TMP2, target -|. sh TMP2, GG_DISP2HOT(TMP1) -|.endmacro -| -|.macro hotloop -| hotcheck HOTCOUNT_LOOP, ->vm_hotloop -|.endmacro -| -|.macro hotcall -| hotcheck HOTCOUNT_CALL, ->vm_hotcall -|.endmacro -| -|// Set current VM state. Uses TMP0. -|.macro li_vmstate, st; li TMP0, ~LJ_VMST_..st; .endmacro -|.macro st_vmstate; sw TMP0, DISPATCH_GL(vmstate)(DISPATCH); .endmacro -| -|// Move table write barrier back. Overwrites mark and tmp. -|.macro barrierback, tab, mark, tmp, target -| lw tmp, DISPATCH_GL(gc.grayagain)(DISPATCH) -| andi mark, mark, ~LJ_GC_BLACK & 255 // black2gray(tab) -| sw tab, DISPATCH_GL(gc.grayagain)(DISPATCH) -| sb mark, tab->marked -| b target -|. sw tmp, tab->gclist -|.endmacro -| -|//----------------------------------------------------------------------- - -/* Generate subroutines used by opcodes and other parts of the VM. */ -/* The .code_sub section should be last to help static branch prediction. */ -static void build_subroutines(BuildCtx *ctx) -{ - |.code_sub - | - |//----------------------------------------------------------------------- - |//-- Return handling ---------------------------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_returnp: - | // See vm_return. Also: TMP2 = previous base. - | andi AT, PC, FRAME_P - | beqz AT, ->cont_dispatch - |. li TMP1, LJ_TTRUE - | - | // Return from pcall or xpcall fast func. - | lw PC, FRAME_PC(TMP2) // Fetch PC of previous frame. - | move BASE, TMP2 // Restore caller base. - | // Prepending may overwrite the pcall frame, so do it at the end. - | sw TMP1, FRAME_PC(RA) // Prepend true to results. - | addiu RA, RA, -8 - | - |->vm_returnc: - | addiu RD, RD, 8 // RD = (nresults+1)*8. - | andi TMP0, PC, FRAME_TYPE - | beqz RD, ->vm_unwind_c_eh - |. li CRET1, LUA_YIELD - | beqz TMP0, ->BC_RET_Z // Handle regular return to Lua. - |. move MULTRES, RD - | - |->vm_return: - | // BASE = base, RA = resultptr, RD/MULTRES = (nresults+1)*8, PC = return - | // TMP0 = PC & FRAME_TYPE - | li TMP2, -8 - | xori AT, TMP0, FRAME_C - | and TMP2, PC, TMP2 - | bnez AT, ->vm_returnp - | subu TMP2, BASE, TMP2 // TMP2 = previous base. - | - | addiu TMP1, RD, -8 - | sw TMP2, L->base - | li_vmstate C - | lw TMP2, SAVE_NRES - | addiu BASE, BASE, -8 - | st_vmstate - | beqz TMP1, >2 - |. sll TMP2, TMP2, 3 - |1: - | addiu TMP1, TMP1, -8 - | ldc1 f0, 0(RA) - | addiu RA, RA, 8 - | sdc1 f0, 0(BASE) - | bnez TMP1, <1 - |. addiu BASE, BASE, 8 - | - |2: - | bne TMP2, RD, >6 - |3: - |. sw BASE, L->top // Store new top. - | - |->vm_leave_cp: - | lw TMP0, SAVE_CFRAME // Restore previous C frame. - | move CRET1, r0 // Ok return status for vm_pcall. - | sw TMP0, L->cframe - | - |->vm_leave_unw: - | restoreregs_ret - | - |6: - | lw TMP1, L->maxstack - | slt AT, TMP2, RD - | bnez AT, >7 // Less results wanted? - | // More results wanted. Check stack size and fill up results with nil. - |. slt AT, BASE, TMP1 - | beqz AT, >8 - |. nop - | sw TISNIL, HI(BASE) - | addiu RD, RD, 8 - | b <2 - |. addiu BASE, BASE, 8 - | - |7: // Less results wanted. - | subu TMP0, RD, TMP2 - | subu TMP0, BASE, TMP0 // Either keep top or shrink it. - | b <3 - |. movn BASE, TMP0, TMP2 // LUA_MULTRET+1 case? - | - |8: // Corner case: need to grow stack for filling up results. - | // This can happen if: - | // - A C function grows the stack (a lot). - | // - The GC shrinks the stack in between. - | // - A return back from a lua_call() with (high) nresults adjustment. - | load_got lj_state_growstack - | move MULTRES, RD - | srl CARG2, TMP2, 3 - | call_intern lj_state_growstack // (lua_State *L, int n) - |. move CARG1, L - | lw TMP2, SAVE_NRES - | lw BASE, L->top // Need the (realloced) L->top in BASE. - | move RD, MULTRES - | b <2 - |. sll TMP2, TMP2, 3 - | - |->vm_unwind_c: // Unwind C stack, return from vm_pcall. - | // (void *cframe, int errcode) - | move sp, CARG1 - | move CRET1, CARG2 - |->vm_unwind_c_eh: // Landing pad for external unwinder. - | lw L, SAVE_L - | li TMP0, ~LJ_VMST_C - | lw GL:TMP1, L->glref - | b ->vm_leave_unw - |. sw TMP0, GL:TMP1->vmstate - | - |->vm_unwind_ff: // Unwind C stack, return from ff pcall. - | // (void *cframe) - | li AT, -4 - | and sp, CARG1, AT - |->vm_unwind_ff_eh: // Landing pad for external unwinder. - | lw L, SAVE_L - | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). - | li TISNIL, LJ_TNIL - | lw BASE, L->base - | lw DISPATCH, L->glref // Setup pointer to dispatch table. - | mtc1 TMP3, TOBIT - | li TMP1, LJ_TFALSE - | li_vmstate INTERP - | lw PC, FRAME_PC(BASE) // Fetch PC of previous frame. - | cvt.d.s TOBIT, TOBIT - | addiu RA, BASE, -8 // Results start at BASE-8. - | addiu DISPATCH, DISPATCH, GG_G2DISP - | sw TMP1, HI(RA) // Prepend false to error message. - | st_vmstate - | b ->vm_returnc - |. li RD, 16 // 2 results: false + error message. - | - |//----------------------------------------------------------------------- - |//-- Grow stack for calls ----------------------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_growstack_c: // Grow stack for C function. - | b >2 - |. li CARG2, LUA_MINSTACK - | - |->vm_growstack_l: // Grow stack for Lua function. - | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC - | addu RC, BASE, RC - | subu RA, RA, BASE - | sw BASE, L->base - | addiu PC, PC, 4 // Must point after first instruction. - | sw RC, L->top - | srl CARG2, RA, 3 - |2: - | // L->base = new base, L->top = top - | load_got lj_state_growstack - | sw PC, SAVE_PC - | call_intern lj_state_growstack // (lua_State *L, int n) - |. move CARG1, L - | lw BASE, L->base - | lw RC, L->top - | lw LFUNC:RB, FRAME_FUNC(BASE) - | subu RC, RC, BASE - | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC - | ins_callt // Just retry the call. - | - |//----------------------------------------------------------------------- - |//-- Entry points into the assembler VM --------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_resume: // Setup C frame and resume thread. - | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0) - | saveregs - | move L, CARG1 - | lw DISPATCH, L->glref // Setup pointer to dispatch table. - | move BASE, CARG2 - | lbu TMP1, L->status - | sw L, SAVE_L - | li PC, FRAME_CP - | addiu TMP0, sp, CFRAME_RESUME - | addiu DISPATCH, DISPATCH, GG_G2DISP - | sw r0, SAVE_NRES - | sw r0, SAVE_ERRF - | sw CARG1, SAVE_PC // Any value outside of bytecode is ok. - | sw r0, SAVE_CFRAME - | beqz TMP1, >3 - |. sw TMP0, L->cframe - | - | // Resume after yield (like a return). - | sw L, DISPATCH_GL(cur_L)(DISPATCH) - | move RA, BASE - | lw BASE, L->base - | lw TMP1, L->top - | lw PC, FRAME_PC(BASE) - | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). - | subu RD, TMP1, BASE - | mtc1 TMP3, TOBIT - | sb r0, L->status - | cvt.d.s TOBIT, TOBIT - | li_vmstate INTERP - | addiu RD, RD, 8 - | st_vmstate - | move MULTRES, RD - | andi TMP0, PC, FRAME_TYPE - | beqz TMP0, ->BC_RET_Z - |. li TISNIL, LJ_TNIL - | b ->vm_return - |. nop - | - |->vm_pcall: // Setup protected C frame and enter VM. - | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef) - | saveregs - | sw CARG4, SAVE_ERRF - | b >1 - |. li PC, FRAME_CP - | - |->vm_call: // Setup C frame and enter VM. - | // (lua_State *L, TValue *base, int nres1) - | saveregs - | li PC, FRAME_C - | - |1: // Entry point for vm_pcall above (PC = ftype). - | lw TMP1, L:CARG1->cframe - | move L, CARG1 - | sw CARG3, SAVE_NRES - | lw DISPATCH, L->glref // Setup pointer to dispatch table. - | sw CARG1, SAVE_L - | move BASE, CARG2 - | addiu DISPATCH, DISPATCH, GG_G2DISP - | sw CARG1, SAVE_PC // Any value outside of bytecode is ok. - | sw TMP1, SAVE_CFRAME - | sw sp, L->cframe // Add our C frame to cframe chain. - | - |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype). - | sw L, DISPATCH_GL(cur_L)(DISPATCH) - | lw TMP2, L->base // TMP2 = old base (used in vmeta_call). - | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). - | lw TMP1, L->top - | mtc1 TMP3, TOBIT - | addu PC, PC, BASE - | subu NARGS8:RC, TMP1, BASE - | subu PC, PC, TMP2 // PC = frame delta + frame type - | cvt.d.s TOBIT, TOBIT - | li_vmstate INTERP - | li TISNIL, LJ_TNIL - | st_vmstate - | - |->vm_call_dispatch: - | // TMP2 = old base, BASE = new base, RC = nargs*8, PC = caller PC - | lw TMP0, FRAME_PC(BASE) - | li AT, LJ_TFUNC - | bne TMP0, AT, ->vmeta_call - |. lw LFUNC:RB, FRAME_FUNC(BASE) - | - |->vm_call_dispatch_f: - | ins_call - | // BASE = new base, RB = func, RC = nargs*8, PC = caller PC - | - |->vm_cpcall: // Setup protected C frame, call C. - | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp) - | saveregs - | move L, CARG1 - | lw TMP0, L:CARG1->stack - | sw CARG1, SAVE_L - | lw TMP1, L->top - | lw DISPATCH, L->glref // Setup pointer to dispatch table. - | sw CARG1, SAVE_PC // Any value outside of bytecode is ok. - | subu TMP0, TMP0, TMP1 // Compute -savestack(L, L->top). - | lw TMP1, L->cframe - | addiu DISPATCH, DISPATCH, GG_G2DISP - | sw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame. - | sw r0, SAVE_ERRF // No error function. - | sw TMP1, SAVE_CFRAME - | sw sp, L->cframe // Add our C frame to cframe chain. - | sw L, DISPATCH_GL(cur_L)(DISPATCH) - | jalr CARG4 // (lua_State *L, lua_CFunction func, void *ud) - |. move CFUNCADDR, CARG4 - | move BASE, CRET1 - | bnez CRET1, <3 // Else continue with the call. - |. li PC, FRAME_CP - | b ->vm_leave_cp // No base? Just remove C frame. - |. nop - | - |//----------------------------------------------------------------------- - |//-- Metamethod handling ------------------------------------------------ - |//----------------------------------------------------------------------- - | - |// The lj_meta_* functions (except for lj_meta_cat) don't reallocate the - |// stack, so BASE doesn't need to be reloaded across these calls. - | - |//-- Continuation dispatch ---------------------------------------------- - | - |->cont_dispatch: - | // BASE = meta base, RA = resultptr, RD = (nresults+1)*8 - | lw TMP0, -16+LO(BASE) // Continuation. - | move RB, BASE - | move BASE, TMP2 // Restore caller BASE. - | lw LFUNC:TMP1, FRAME_FUNC(TMP2) - |.if FFI - | sltiu AT, TMP0, 2 - |.endif - | lw PC, -16+HI(RB) // Restore PC from [cont|PC]. - | addu TMP2, RA, RD - | lw TMP1, LFUNC:TMP1->pc - |.if FFI - | bnez AT, >1 - |.endif - |. sw TISNIL, -8+HI(TMP2) // Ensure one valid arg. - | // BASE = base, RA = resultptr, RB = meta base - | jr TMP0 // Jump to continuation. - |. lw KBASE, PC2PROTO(k)(TMP1) - | - |.if FFI - |1: - | bnez TMP0, ->cont_ffi_callback // cont = 1: return from FFI callback. - | // cont = 0: tailcall from C function. - |. addiu TMP1, RB, -16 - | b ->vm_call_tail - |. subu RC, TMP1, BASE - |.endif - | - |->cont_cat: // RA = resultptr, RB = meta base - | lw INS, -4(PC) - | addiu CARG2, RB, -16 - | ldc1 f0, 0(RA) - | decode_RB8a MULTRES, INS - | decode_RA8a RA, INS - | decode_RB8b MULTRES - | decode_RA8b RA - | addu TMP1, BASE, MULTRES - | sw BASE, L->base - | subu CARG3, CARG2, TMP1 - | bne TMP1, CARG2, ->BC_CAT_Z - |. sdc1 f0, 0(CARG2) - | addu RA, BASE, RA - | b ->cont_nop - |. sdc1 f0, 0(RA) - | - |//-- Table indexing metamethods ----------------------------------------- - | - |->vmeta_tgets1: - | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv) - | li TMP0, LJ_TSTR - | sw STR:RC, LO(CARG3) - | b >1 - |. sw TMP0, HI(CARG3) - | - |->vmeta_tgets: - | addiu CARG2, DISPATCH, DISPATCH_GL(tmptv) - | li TMP0, LJ_TTAB - | sw TAB:RB, LO(CARG2) - | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv2) - | sw TMP0, HI(CARG2) - | li TMP1, LJ_TSTR - | sw STR:RC, LO(CARG3) - | b >1 - |. sw TMP1, HI(CARG3) - | - |->vmeta_tgetb: // TMP0 = index - | mtc1 TMP0, f0 - | cvt.d.w f0, f0 - | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv) - | sdc1 f0, 0(CARG3) - | - |->vmeta_tgetv: - |1: - | load_got lj_meta_tget - | sw BASE, L->base - | sw PC, SAVE_PC - | call_intern lj_meta_tget // (lua_State *L, TValue *o, TValue *k) - |. move CARG1, L - | // Returns TValue * (finished) or NULL (metamethod). - | beqz CRET1, >3 - |. addiu TMP1, BASE, -FRAME_CONT - | ldc1 f0, 0(CRET1) - | ins_next1 - | sdc1 f0, 0(RA) - | ins_next2 - | - |3: // Call __index metamethod. - | // BASE = base, L->top = new base, stack = cont/func/t/k - | lw BASE, L->top - | sw PC, -16+HI(BASE) // [cont|PC] - | subu PC, BASE, TMP1 - | lw LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here. - | b ->vm_call_dispatch_f - |. li NARGS8:RC, 16 // 2 args for func(t, k). - | - |->vmeta_tgetr: - | load_got lj_tab_getinth - | call_intern lj_tab_getinth // (GCtab *t, int32_t key) - |. nop - | // Returns cTValue * or NULL. - | beqz CRET1, >1 - |. nop - | b ->BC_TGETR_Z - |. ldc1 f0, 0(CRET1) - | - |//----------------------------------------------------------------------- - | - |->vmeta_tsets1: - | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv) - | li TMP0, LJ_TSTR - | sw STR:RC, LO(CARG3) - | b >1 - |. sw TMP0, HI(CARG3) - | - |->vmeta_tsets: - | addiu CARG2, DISPATCH, DISPATCH_GL(tmptv) - | li TMP0, LJ_TTAB - | sw TAB:RB, LO(CARG2) - | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv2) - | sw TMP0, HI(CARG2) - | li TMP1, LJ_TSTR - | sw STR:RC, LO(CARG3) - | b >1 - |. sw TMP1, HI(CARG3) - | - |->vmeta_tsetb: // TMP0 = index - | mtc1 TMP0, f0 - | cvt.d.w f0, f0 - | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv) - | sdc1 f0, 0(CARG3) - | - |->vmeta_tsetv: - |1: - | load_got lj_meta_tset - | sw BASE, L->base - | sw PC, SAVE_PC - | call_intern lj_meta_tset // (lua_State *L, TValue *o, TValue *k) - |. move CARG1, L - | // Returns TValue * (finished) or NULL (metamethod). - | beqz CRET1, >3 - |. ldc1 f0, 0(RA) - | // NOBARRIER: lj_meta_tset ensures the table is not black. - | ins_next1 - | sdc1 f0, 0(CRET1) - | ins_next2 - | - |3: // Call __newindex metamethod. - | // BASE = base, L->top = new base, stack = cont/func/t/k/(v) - | addiu TMP1, BASE, -FRAME_CONT - | lw BASE, L->top - | sw PC, -16+HI(BASE) // [cont|PC] - | subu PC, BASE, TMP1 - | lw LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here. - | sdc1 f0, 16(BASE) // Copy value to third argument. - | b ->vm_call_dispatch_f - |. li NARGS8:RC, 24 // 3 args for func(t, k, v) - | - |->vmeta_tsetr: - | load_got lj_tab_setinth - | sw BASE, L->base - | sw PC, SAVE_PC - | call_intern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key) - |. move CARG1, L - | // Returns TValue *. - | b ->BC_TSETR_Z - |. nop - | - |//-- Comparison metamethods --------------------------------------------- - | - |->vmeta_comp: - | // CARG2, CARG3 are already set by BC_ISLT/BC_ISGE/BC_ISLE/BC_ISGT. - | load_got lj_meta_comp - | addiu PC, PC, -4 - | sw BASE, L->base - | sw PC, SAVE_PC - | decode_OP1 CARG4, INS - | call_intern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op) - |. move CARG1, L - | // Returns 0/1 or TValue * (metamethod). - |3: - | sltiu AT, CRET1, 2 - | beqz AT, ->vmeta_binop - | negu TMP2, CRET1 - |4: - | lhu RD, OFS_RD(PC) - | addiu PC, PC, 4 - | lui TMP1, (-(BCBIAS_J*4 >> 16) & 65535) - | sll RD, RD, 2 - | addu RD, RD, TMP1 - | and RD, RD, TMP2 - | addu PC, PC, RD - |->cont_nop: - | ins_next - | - |->cont_ra: // RA = resultptr - | lbu TMP1, -4+OFS_RA(PC) - | ldc1 f0, 0(RA) - | sll TMP1, TMP1, 3 - | addu TMP1, BASE, TMP1 - | b ->cont_nop - |. sdc1 f0, 0(TMP1) - | - |->cont_condt: // RA = resultptr - | lw TMP0, HI(RA) - | sltiu AT, TMP0, LJ_TISTRUECOND - | b <4 - |. negu TMP2, AT // Branch if result is true. - | - |->cont_condf: // RA = resultptr - | lw TMP0, HI(RA) - | sltiu AT, TMP0, LJ_TISTRUECOND - | b <4 - |. addiu TMP2, AT, -1 // Branch if result is false. - | - |->vmeta_equal: - | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV. - | load_got lj_meta_equal - | addiu PC, PC, -4 - | sw BASE, L->base - | sw PC, SAVE_PC - | call_intern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne) - |. move CARG1, L - | // Returns 0/1 or TValue * (metamethod). - | b <3 - |. nop - | - |->vmeta_equal_cd: - |.if FFI - | load_got lj_meta_equal_cd - | move CARG2, INS - | addiu PC, PC, -4 - | sw BASE, L->base - | sw PC, SAVE_PC - | call_intern lj_meta_equal_cd // (lua_State *L, BCIns op) - |. move CARG1, L - | // Returns 0/1 or TValue * (metamethod). - | b <3 - |. nop - |.endif - | - |->vmeta_istype: - | load_got lj_meta_istype - | addiu PC, PC, -4 - | sw BASE, L->base - | srl CARG2, RA, 3 - | srl CARG3, RD, 3 - | sw PC, SAVE_PC - | call_intern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp) - |. move CARG1, L - | b ->cont_nop - |. nop - | - |//-- Arithmetic metamethods --------------------------------------------- - | - |->vmeta_unm: - | move CARG4, CARG3 - | - |->vmeta_arith: - | load_got lj_meta_arith - | decode_OP1 TMP0, INS - | sw BASE, L->base - | sw PC, SAVE_PC - | move CARG2, RA - | sw TMP0, ARG5 - | call_intern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op) - |. move CARG1, L - | // Returns NULL (finished) or TValue * (metamethod). - | beqz CRET1, ->cont_nop - |. nop - | - | // Call metamethod for binary op. - |->vmeta_binop: - | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2 - | subu TMP1, CRET1, BASE - | sw PC, -16+HI(CRET1) // [cont|PC] - | move TMP2, BASE - | addiu PC, TMP1, FRAME_CONT - | move BASE, CRET1 - | b ->vm_call_dispatch - |. li NARGS8:RC, 16 // 2 args for func(o1, o2). - | - |->vmeta_len: - | // CARG2 already set by BC_LEN. -#if LJ_52 - | move MULTRES, CARG1 -#endif - | load_got lj_meta_len - | sw BASE, L->base - | sw PC, SAVE_PC - | call_intern lj_meta_len // (lua_State *L, TValue *o) - |. move CARG1, L - | // Returns NULL (retry) or TValue * (metamethod base). -#if LJ_52 - | bnez CRET1, ->vmeta_binop // Binop call for compatibility. - |. nop - | b ->BC_LEN_Z - |. move CARG1, MULTRES -#else - | b ->vmeta_binop // Binop call for compatibility. - |. nop -#endif - | - |//-- Call metamethod ---------------------------------------------------- - | - |->vmeta_call: // Resolve and call __call metamethod. - | // TMP2 = old base, BASE = new base, RC = nargs*8 - | load_got lj_meta_call - | sw TMP2, L->base // This is the callers base! - | addiu CARG2, BASE, -8 - | sw PC, SAVE_PC - | addu CARG3, BASE, RC - | move MULTRES, NARGS8:RC - | call_intern lj_meta_call // (lua_State *L, TValue *func, TValue *top) - |. move CARG1, L - | lw LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here. - | addiu NARGS8:RC, MULTRES, 8 // Got one more argument now. - | ins_call - | - |->vmeta_callt: // Resolve __call for BC_CALLT. - | // BASE = old base, RA = new base, RC = nargs*8 - | load_got lj_meta_call - | sw BASE, L->base - | addiu CARG2, RA, -8 - | sw PC, SAVE_PC - | addu CARG3, RA, RC - | move MULTRES, NARGS8:RC - | call_intern lj_meta_call // (lua_State *L, TValue *func, TValue *top) - |. move CARG1, L - | lw TMP1, FRAME_PC(BASE) - | lw LFUNC:RB, FRAME_FUNC(RA) // Guaranteed to be a function here. - | b ->BC_CALLT_Z - |. addiu NARGS8:RC, MULTRES, 8 // Got one more argument now. - | - |//-- Argument coercion for 'for' statement ------------------------------ - | - |->vmeta_for: - | load_got lj_meta_for - | sw BASE, L->base - | move CARG2, RA - | sw PC, SAVE_PC - | move MULTRES, INS - | call_intern lj_meta_for // (lua_State *L, TValue *base) - |. move CARG1, L - |.if JIT - | decode_OP1 TMP0, MULTRES - | li AT, BC_JFORI - |.endif - | decode_RA8a RA, MULTRES - | decode_RD8a RD, MULTRES - | decode_RA8b RA - |.if JIT - | beq TMP0, AT, =>BC_JFORI - |. decode_RD8b RD - | b =>BC_FORI - |. nop - |.else - | b =>BC_FORI - |. decode_RD8b RD - |.endif - | - |//----------------------------------------------------------------------- - |//-- Fast functions ----------------------------------------------------- - |//----------------------------------------------------------------------- - | - |.macro .ffunc, name - |->ff_ .. name: - |.endmacro - | - |.macro .ffunc_1, name - |->ff_ .. name: - | beqz NARGS8:RC, ->fff_fallback - |. lw CARG3, HI(BASE) - | lw CARG1, LO(BASE) - |.endmacro - | - |.macro .ffunc_2, name - |->ff_ .. name: - | sltiu AT, NARGS8:RC, 16 - | lw CARG3, HI(BASE) - | bnez AT, ->fff_fallback - |. lw CARG4, 8+HI(BASE) - | lw CARG1, LO(BASE) - | lw CARG2, 8+LO(BASE) - |.endmacro - | - |.macro .ffunc_n, name // Caveat: has delay slot! - |->ff_ .. name: - | lw CARG3, HI(BASE) - | beqz NARGS8:RC, ->fff_fallback - |. ldc1 FARG1, 0(BASE) - | sltiu AT, CARG3, LJ_TISNUM - | beqz AT, ->fff_fallback - |.endmacro - | - |.macro .ffunc_nn, name // Caveat: has delay slot! - |->ff_ .. name: - | sltiu AT, NARGS8:RC, 16 - | lw CARG3, HI(BASE) - | bnez AT, ->fff_fallback - |. lw CARG4, 8+HI(BASE) - | ldc1 FARG1, 0(BASE) - | ldc1 FARG2, 8(BASE) - | sltiu TMP0, CARG3, LJ_TISNUM - | sltiu TMP1, CARG4, LJ_TISNUM - | and TMP0, TMP0, TMP1 - | beqz TMP0, ->fff_fallback - |.endmacro - | - |// Inlined GC threshold check. Caveat: uses TMP0 and TMP1 and has delay slot! - |.macro ffgccheck - | lw TMP0, DISPATCH_GL(gc.total)(DISPATCH) - | lw TMP1, DISPATCH_GL(gc.threshold)(DISPATCH) - | subu AT, TMP0, TMP1 - | bgezal AT, ->fff_gcstep - |.endmacro - | - |//-- Base library: checks ----------------------------------------------- - | - |.ffunc_1 assert - | sltiu AT, CARG3, LJ_TISTRUECOND - | beqz AT, ->fff_fallback - |. addiu RA, BASE, -8 - | lw PC, FRAME_PC(BASE) - | addiu RD, NARGS8:RC, 8 // Compute (nresults+1)*8. - | addu TMP2, RA, NARGS8:RC - | sw CARG3, HI(RA) - | addiu TMP1, BASE, 8 - | beq BASE, TMP2, ->fff_res // Done if exactly 1 argument. - |. sw CARG1, LO(RA) - |1: - | ldc1 f0, 0(TMP1) - | sdc1 f0, -8(TMP1) - | bne TMP1, TMP2, <1 - |. addiu TMP1, TMP1, 8 - | b ->fff_res - |. nop - | - |.ffunc type - | lw CARG3, HI(BASE) - | li TMP1, LJ_TISNUM - | beqz NARGS8:RC, ->fff_fallback - |. sltiu TMP0, CARG3, LJ_TISNUM - | movz TMP1, CARG3, TMP0 - | not TMP1, TMP1 - | sll TMP1, TMP1, 3 - | addu TMP1, CFUNC:RB, TMP1 - | b ->fff_resn - |. ldc1 FRET1, CFUNC:TMP1->upvalue - | - |//-- Base library: getters and setters --------------------------------- - | - |.ffunc_1 getmetatable - | li AT, LJ_TTAB - | bne CARG3, AT, >6 - |. li AT, LJ_TUDATA - |1: // Field metatable must be at same offset for GCtab and GCudata! - | lw TAB:CARG1, TAB:CARG1->metatable - |2: - | lw STR:RC, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])(DISPATCH) - | beqz TAB:CARG1, ->fff_restv - |. li CARG3, LJ_TNIL - | lw TMP0, TAB:CARG1->hmask - | li CARG3, LJ_TTAB // Use metatable as default result. - | lw TMP1, STR:RC->hash - | lw NODE:TMP2, TAB:CARG1->node - | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask - | sll TMP0, TMP1, 5 - | sll TMP1, TMP1, 3 - | subu TMP1, TMP0, TMP1 - | addu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8) - | li AT, LJ_TSTR - |3: // Rearranged logic, because we expect _not_ to find the key. - | lw CARG4, offsetof(Node, key)+HI(NODE:TMP2) - | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2) - | lw NODE:TMP3, NODE:TMP2->next - | bne CARG4, AT, >4 - |. lw CARG2, offsetof(Node, val)+HI(NODE:TMP2) - | beq TMP0, STR:RC, >5 - |. lw TMP1, offsetof(Node, val)+LO(NODE:TMP2) - |4: - | beqz NODE:TMP3, ->fff_restv // Not found, keep default result. - |. move NODE:TMP2, NODE:TMP3 - | b <3 - |. nop - |5: - | beq CARG2, TISNIL, ->fff_restv // Ditto for nil value. - |. nop - | move CARG3, CARG2 // Return value of mt.__metatable. - | b ->fff_restv - |. move CARG1, TMP1 - | - |6: - | beq CARG3, AT, <1 - |. sltiu TMP0, CARG3, LJ_TISNUM - | li TMP1, LJ_TISNUM - | movz TMP1, CARG3, TMP0 - | not TMP1, TMP1 - | sll TMP1, TMP1, 2 - | addu TMP1, DISPATCH, TMP1 - | b <2 - |. lw TAB:CARG1, DISPATCH_GL(gcroot[GCROOT_BASEMT])(TMP1) - | - |.ffunc_2 setmetatable - | // Fast path: no mt for table yet and not clearing the mt. - | li AT, LJ_TTAB - | bne CARG3, AT, ->fff_fallback - |. addiu CARG4, CARG4, -LJ_TTAB - | lw TAB:TMP1, TAB:CARG1->metatable - | lbu TMP3, TAB:CARG1->marked - | or AT, CARG4, TAB:TMP1 - | bnez AT, ->fff_fallback - |. andi AT, TMP3, LJ_GC_BLACK // isblack(table) - | beqz AT, ->fff_restv - |. sw TAB:CARG2, TAB:CARG1->metatable - | barrierback TAB:CARG1, TMP3, TMP0, ->fff_restv - | - |.ffunc rawget - | lw CARG4, HI(BASE) - | sltiu AT, NARGS8:RC, 16 - | lw TAB:CARG2, LO(BASE) - | load_got lj_tab_get - | addiu CARG4, CARG4, -LJ_TTAB - | or AT, AT, CARG4 - | bnez AT, ->fff_fallback - | addiu CARG3, BASE, 8 - | call_intern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key) - |. move CARG1, L - | // Returns cTValue *. - | b ->fff_resn - |. ldc1 FRET1, 0(CRET1) - | - |//-- Base library: conversions ------------------------------------------ - | - |.ffunc tonumber - | // Only handles the number case inline (without a base argument). - | lw CARG1, HI(BASE) - | xori AT, NARGS8:RC, 8 - | sltiu CARG1, CARG1, LJ_TISNUM - | movn CARG1, r0, AT - | beqz CARG1, ->fff_fallback // Exactly one number argument. - |. ldc1 FRET1, 0(BASE) - | b ->fff_resn - |. nop - | - |.ffunc_1 tostring - | // Only handles the string or number case inline. - | li AT, LJ_TSTR - | // A __tostring method in the string base metatable is ignored. - | beq CARG3, AT, ->fff_restv // String key? - | // Handle numbers inline, unless a number base metatable is present. - |. lw TMP1, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])(DISPATCH) - | sltiu TMP0, CARG3, LJ_TISNUM - | sltiu TMP1, TMP1, 1 - | and TMP0, TMP0, TMP1 - | beqz TMP0, ->fff_fallback - |. sw BASE, L->base // Add frame since C call can throw. - | ffgccheck - |. sw PC, SAVE_PC // Redundant (but a defined value). - | load_got lj_strfmt_num - | move CARG1, L - | call_intern lj_strfmt_num // (lua_State *L, lua_Number *np) - |. move CARG2, BASE - | // Returns GCstr *. - | li CARG3, LJ_TSTR - | b ->fff_restv - |. move CARG1, CRET1 - | - |//-- Base library: iterators ------------------------------------------- - | - |.ffunc next - | lw CARG1, HI(BASE) - | lw TAB:CARG2, LO(BASE) - | beqz NARGS8:RC, ->fff_fallback - |. addu TMP2, BASE, NARGS8:RC - | li AT, LJ_TTAB - | sw TISNIL, HI(TMP2) // Set missing 2nd arg to nil. - | bne CARG1, AT, ->fff_fallback - |. lw PC, FRAME_PC(BASE) - | load_got lj_tab_next - | sw BASE, L->base // Add frame since C call can throw. - | sw BASE, L->top // Dummy frame length is ok. - | addiu CARG3, BASE, 8 - | sw PC, SAVE_PC - | call_intern lj_tab_next // (lua_State *L, GCtab *t, TValue *key) - |. move CARG1, L - | // Returns 0 at end of traversal. - | beqz CRET1, ->fff_restv // End of traversal: return nil. - |. li CARG3, LJ_TNIL - | ldc1 f0, 8(BASE) // Copy key and value to results. - | addiu RA, BASE, -8 - | ldc1 f2, 16(BASE) - | li RD, (2+1)*8 - | sdc1 f0, 0(RA) - | b ->fff_res - |. sdc1 f2, 8(RA) - | - |.ffunc_1 pairs - | li AT, LJ_TTAB - | bne CARG3, AT, ->fff_fallback - |. lw PC, FRAME_PC(BASE) -#if LJ_52 - | lw TAB:TMP2, TAB:CARG1->metatable - | ldc1 f0, CFUNC:RB->upvalue[0] - | bnez TAB:TMP2, ->fff_fallback -#else - | ldc1 f0, CFUNC:RB->upvalue[0] -#endif - |. addiu RA, BASE, -8 - | sw TISNIL, 8+HI(BASE) - | li RD, (3+1)*8 - | b ->fff_res - |. sdc1 f0, 0(RA) - | - |.ffunc ipairs_aux - | sltiu AT, NARGS8:RC, 16 - | lw CARG3, HI(BASE) - | lw TAB:CARG1, LO(BASE) - | lw CARG4, 8+HI(BASE) - | bnez AT, ->fff_fallback - |. ldc1 FARG2, 8(BASE) - | addiu CARG3, CARG3, -LJ_TTAB - | sltiu AT, CARG4, LJ_TISNUM - | li TMP0, 1 - | movn AT, r0, CARG3 - | mtc1 TMP0, FARG1 - | beqz AT, ->fff_fallback - |. lw PC, FRAME_PC(BASE) - | trunc.w.d FRET1, FARG2 - | cvt.d.w FARG1, FARG1 - | lw TMP0, TAB:CARG1->asize - | lw TMP1, TAB:CARG1->array - | mfc1 TMP2, FRET1 - | addiu RA, BASE, -8 - | add.d FARG2, FARG2, FARG1 - | addiu TMP2, TMP2, 1 - | sltu AT, TMP2, TMP0 - | sll TMP3, TMP2, 3 - | addu TMP3, TMP1, TMP3 - | beqz AT, >2 // Not in array part? - |. sdc1 FARG2, 0(RA) - | lw TMP2, HI(TMP3) - | ldc1 f0, 0(TMP3) - |1: - | beq TMP2, TISNIL, ->fff_res // End of iteration, return 0 results. - |. li RD, (0+1)*8 - | li RD, (2+1)*8 - | b ->fff_res - |. sdc1 f0, 8(RA) - |2: // Check for empty hash part first. Otherwise call C function. - | lw TMP0, TAB:CARG1->hmask - | load_got lj_tab_getinth - | beqz TMP0, ->fff_res - |. li RD, (0+1)*8 - | call_intern lj_tab_getinth // (GCtab *t, int32_t key) - |. move CARG2, TMP2 - | // Returns cTValue * or NULL. - | beqz CRET1, ->fff_res - |. li RD, (0+1)*8 - | lw TMP2, HI(CRET1) - | b <1 - |. ldc1 f0, 0(CRET1) - | - |.ffunc_1 ipairs - | li AT, LJ_TTAB - | bne CARG3, AT, ->fff_fallback - |. lw PC, FRAME_PC(BASE) -#if LJ_52 - | lw TAB:TMP2, TAB:CARG1->metatable - | ldc1 f0, CFUNC:RB->upvalue[0] - | bnez TAB:TMP2, ->fff_fallback -#else - | ldc1 f0, CFUNC:RB->upvalue[0] -#endif - |. addiu RA, BASE, -8 - | sw r0, 8+HI(BASE) - | sw r0, 8+LO(BASE) - | li RD, (3+1)*8 - | b ->fff_res - |. sdc1 f0, 0(RA) - | - |//-- Base library: catch errors ---------------------------------------- - | - |.ffunc pcall - | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH) - | beqz NARGS8:RC, ->fff_fallback - | move TMP2, BASE - | addiu BASE, BASE, 8 - | // Remember active hook before pcall. - | srl TMP3, TMP3, HOOK_ACTIVE_SHIFT - | andi TMP3, TMP3, 1 - | addiu PC, TMP3, 8+FRAME_PCALL - | b ->vm_call_dispatch - |. addiu NARGS8:RC, NARGS8:RC, -8 - | - |.ffunc xpcall - | sltiu AT, NARGS8:RC, 16 - | lw CARG4, 8+HI(BASE) - | bnez AT, ->fff_fallback - |. ldc1 FARG2, 8(BASE) - | ldc1 FARG1, 0(BASE) - | lbu TMP1, DISPATCH_GL(hookmask)(DISPATCH) - | li AT, LJ_TFUNC - | move TMP2, BASE - | bne CARG4, AT, ->fff_fallback // Traceback must be a function. - | addiu BASE, BASE, 16 - | // Remember active hook before pcall. - | srl TMP3, TMP3, HOOK_ACTIVE_SHIFT - | sdc1 FARG2, 0(TMP2) // Swap function and traceback. - | andi TMP3, TMP3, 1 - | sdc1 FARG1, 8(TMP2) - | addiu PC, TMP3, 16+FRAME_PCALL - | b ->vm_call_dispatch - |. addiu NARGS8:RC, NARGS8:RC, -16 - | - |//-- Coroutine library -------------------------------------------------- - | - |.macro coroutine_resume_wrap, resume - |.if resume - |.ffunc_1 coroutine_resume - | li AT, LJ_TTHREAD - | bne CARG3, AT, ->fff_fallback - |.else - |.ffunc coroutine_wrap_aux - | lw L:CARG1, CFUNC:RB->upvalue[0].gcr - |.endif - | lbu TMP0, L:CARG1->status - | lw TMP1, L:CARG1->cframe - | lw CARG2, L:CARG1->top - | lw TMP2, L:CARG1->base - | addiu TMP3, TMP0, -LUA_YIELD - | bgtz TMP3, ->fff_fallback // st > LUA_YIELD? - |. xor TMP2, TMP2, CARG2 - | bnez TMP1, ->fff_fallback // cframe != 0? - |. or AT, TMP2, TMP0 - | lw TMP0, L:CARG1->maxstack - | beqz AT, ->fff_fallback // base == top && st == 0? - |. lw PC, FRAME_PC(BASE) - | addu TMP2, CARG2, NARGS8:RC - | sltu AT, TMP0, TMP2 - | bnez AT, ->fff_fallback // Stack overflow? - |. sw PC, SAVE_PC - | sw BASE, L->base - |1: - |.if resume - | addiu BASE, BASE, 8 // Keep resumed thread in stack for GC. - | addiu NARGS8:RC, NARGS8:RC, -8 - | addiu TMP2, TMP2, -8 - |.endif - | sw TMP2, L:CARG1->top - | addu TMP1, BASE, NARGS8:RC - | move CARG3, CARG2 - | sw BASE, L->top - |2: // Move args to coroutine. - | ldc1 f0, 0(BASE) - | sltu AT, BASE, TMP1 - | beqz AT, >3 - |. addiu BASE, BASE, 8 - | sdc1 f0, 0(CARG3) - | b <2 - |. addiu CARG3, CARG3, 8 - |3: - | bal ->vm_resume // (lua_State *L, TValue *base, 0, 0) - |. move L:RA, L:CARG1 - | // Returns thread status. - |4: - | lw TMP2, L:RA->base - | sltiu AT, CRET1, LUA_YIELD+1 - | lw TMP3, L:RA->top - | li_vmstate INTERP - | lw BASE, L->base - | sw L, DISPATCH_GL(cur_L)(DISPATCH) - | st_vmstate - | beqz AT, >8 - |. subu RD, TMP3, TMP2 - | lw TMP0, L->maxstack - | beqz RD, >6 // No results? - |. addu TMP1, BASE, RD - | sltu AT, TMP0, TMP1 - | bnez AT, >9 // Need to grow stack? - |. addu TMP3, TMP2, RD - | sw TMP2, L:RA->top // Clear coroutine stack. - | move TMP1, BASE - |5: // Move results from coroutine. - | ldc1 f0, 0(TMP2) - | addiu TMP2, TMP2, 8 - | sltu AT, TMP2, TMP3 - | sdc1 f0, 0(TMP1) - | bnez AT, <5 - |. addiu TMP1, TMP1, 8 - |6: - | andi TMP0, PC, FRAME_TYPE - |.if resume - | li TMP1, LJ_TTRUE - | addiu RA, BASE, -8 - | sw TMP1, -8+HI(BASE) // Prepend true to results. - | addiu RD, RD, 16 - |.else - | move RA, BASE - | addiu RD, RD, 8 - |.endif - |7: - | sw PC, SAVE_PC - | beqz TMP0, ->BC_RET_Z - |. move MULTRES, RD - | b ->vm_return - |. nop - | - |8: // Coroutine returned with error (at co->top-1). - |.if resume - | addiu TMP3, TMP3, -8 - | li TMP1, LJ_TFALSE - | ldc1 f0, 0(TMP3) - | sw TMP3, L:RA->top // Remove error from coroutine stack. - | li RD, (2+1)*8 - | sw TMP1, -8+HI(BASE) // Prepend false to results. - | addiu RA, BASE, -8 - | sdc1 f0, 0(BASE) // Copy error message. - | b <7 - |. andi TMP0, PC, FRAME_TYPE - |.else - | load_got lj_ffh_coroutine_wrap_err - | move CARG2, L:RA - | call_intern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co) - |. move CARG1, L - |.endif - | - |9: // Handle stack expansion on return from yield. - | load_got lj_state_growstack - | srl CARG2, RD, 3 - | call_intern lj_state_growstack // (lua_State *L, int n) - |. move CARG1, L - | b <4 - |. li CRET1, 0 - |.endmacro - | - | coroutine_resume_wrap 1 // coroutine.resume - | coroutine_resume_wrap 0 // coroutine.wrap - | - |.ffunc coroutine_yield - | lw TMP0, L->cframe - | addu TMP1, BASE, NARGS8:RC - | sw BASE, L->base - | andi TMP0, TMP0, CFRAME_RESUME - | sw TMP1, L->top - | beqz TMP0, ->fff_fallback - |. li CRET1, LUA_YIELD - | sw r0, L->cframe - | b ->vm_leave_unw - |. sb CRET1, L->status - | - |//-- Math library ------------------------------------------------------- - | - |.ffunc_n math_abs - |. abs.d FRET1, FARG1 - |->fff_resn: - | lw PC, FRAME_PC(BASE) - | addiu RA, BASE, -8 - | b ->fff_res1 - |. sdc1 FRET1, -8(BASE) - | - |->fff_restv: - | // CARG3/CARG1 = TValue result. - | lw PC, FRAME_PC(BASE) - | sw CARG3, -8+HI(BASE) - | addiu RA, BASE, -8 - | sw CARG1, -8+LO(BASE) - |->fff_res1: - | // RA = results, PC = return. - | li RD, (1+1)*8 - |->fff_res: - | // RA = results, RD = (nresults+1)*8, PC = return. - | andi TMP0, PC, FRAME_TYPE - | bnez TMP0, ->vm_return - |. move MULTRES, RD - | lw INS, -4(PC) - | decode_RB8a RB, INS - | decode_RB8b RB - |5: - | sltu AT, RD, RB - | bnez AT, >6 // More results expected? - |. decode_RA8a TMP0, INS - | decode_RA8b TMP0 - | ins_next1 - | // Adjust BASE. KBASE is assumed to be set for the calling frame. - | subu BASE, RA, TMP0 - | ins_next2 - | - |6: // Fill up results with nil. - | addu TMP1, RA, RD - | addiu RD, RD, 8 - | b <5 - |. sw TISNIL, -8+HI(TMP1) - | - |.macro math_extern, func - |->ff_math_ .. func: - | lw CARG3, HI(BASE) - | beqz NARGS8:RC, ->fff_fallback - |. load_got func - | sltiu AT, CARG3, LJ_TISNUM - | beqz AT, ->fff_fallback - |. nop - | call_extern - |. ldc1 FARG1, 0(BASE) - | b ->fff_resn - |. nop - |.endmacro - | - |.macro math_extern2, func - | .ffunc_nn math_ .. func - |. load_got func - | call_extern - |. nop - | b ->fff_resn - |. nop - |.endmacro - | - |.macro math_round, func - | .ffunc_n math_ .. func - |. nop - | bal ->vm_ .. func - |. nop - | b ->fff_resn - |. nop - |.endmacro - | - | math_round floor - | math_round ceil - | - |.ffunc math_log - | lw CARG3, HI(BASE) - | li AT, 8 - | bne NARGS8:RC, AT, ->fff_fallback // Exactly 1 argument. - |. load_got log - | sltiu AT, CARG3, LJ_TISNUM - | beqz AT, ->fff_fallback - |. nop - | call_extern - |. ldc1 FARG1, 0(BASE) - | b ->fff_resn - |. nop - | - | math_extern log10 - | math_extern exp - | math_extern sin - | math_extern cos - | math_extern tan - | math_extern asin - | math_extern acos - | math_extern atan - | math_extern sinh - | math_extern cosh - | math_extern tanh - | math_extern2 pow - | math_extern2 atan2 - | math_extern2 fmod - | - |.ffunc_n math_sqrt - |. sqrt.d FRET1, FARG1 - | b ->fff_resn - |. nop - | - |.ffunc_nn math_ldexp - | trunc.w.d FARG2, FARG2 - | load_got ldexp - | mfc1 CARG3, FARG2 - | call_extern - |. nop - | b ->fff_resn - |. nop - | - |.ffunc_n math_frexp - | load_got frexp - | lw PC, FRAME_PC(BASE) - | call_extern - |. addiu CARG3, DISPATCH, DISPATCH_GL(tmptv) - | lw TMP1, DISPATCH_GL(tmptv)(DISPATCH) - | addiu RA, BASE, -8 - | mtc1 TMP1, FARG2 - | sdc1 FRET1, 0(RA) - | cvt.d.w FARG2, FARG2 - | sdc1 FARG2, 8(RA) - | b ->fff_res - |. li RD, (2+1)*8 - | - |.ffunc_n math_modf - | load_got modf - | lw PC, FRAME_PC(BASE) - | call_extern - |. addiu CARG3, BASE, -8 - | addiu RA, BASE, -8 - | sdc1 FRET1, 0(BASE) - | b ->fff_res - |. li RD, (2+1)*8 - | - |.macro math_minmax, name, ismax - |->ff_ .. name: - | lw CARG3, HI(BASE) - | beqz NARGS8:RC, ->fff_fallback - |. ldc1 FRET1, 0(BASE) - | sltiu AT, CARG3, LJ_TISNUM - | beqz AT, ->fff_fallback - |. addu TMP2, BASE, NARGS8:RC - | addiu TMP1, BASE, 8 - | beq TMP1, TMP2, ->fff_resn - |1: - |. lw CARG3, HI(TMP1) - | ldc1 FARG1, 0(TMP1) - | addiu TMP1, TMP1, 8 - | sltiu AT, CARG3, LJ_TISNUM - | beqz AT, ->fff_fallback - |.if ismax - |. c.olt.d FARG1, FRET1 - |.else - |. c.olt.d FRET1, FARG1 - |.endif - | bne TMP1, TMP2, <1 - |. movf.d FRET1, FARG1 - | b ->fff_resn - |. nop - |.endmacro - | - | math_minmax math_min, 0 - | math_minmax math_max, 1 - | - |//-- String library ----------------------------------------------------- - | - |.ffunc string_byte // Only handle the 1-arg case here. - | lw CARG3, HI(BASE) - | lw STR:CARG1, LO(BASE) - | xori AT, NARGS8:RC, 8 - | addiu CARG3, CARG3, -LJ_TSTR - | or AT, AT, CARG3 - | bnez AT, ->fff_fallback // Need exactly 1 string argument. - |. nop - | lw TMP0, STR:CARG1->len - | lbu TMP1, STR:CARG1[1] // Access is always ok (NUL at end). - | addiu RA, BASE, -8 - | sltu RD, r0, TMP0 - | mtc1 TMP1, f0 - | addiu RD, RD, 1 - | cvt.d.w f0, f0 - | lw PC, FRAME_PC(BASE) - | sll RD, RD, 3 // RD = ((str->len != 0)+1)*8 - | b ->fff_res - |. sdc1 f0, 0(RA) - | - |.ffunc string_char // Only handle the 1-arg case here. - | ffgccheck - | lw CARG3, HI(BASE) - | ldc1 FARG1, 0(BASE) - | li AT, 8 - | bne NARGS8:RC, AT, ->fff_fallback // Exactly 1 argument. - |. sltiu AT, CARG3, LJ_TISNUM - | beqz AT, ->fff_fallback - |. li CARG3, 1 - | trunc.w.d FARG1, FARG1 - | addiu CARG2, sp, ARG5_OFS - | sltiu AT, TMP0, 256 - | mfc1 TMP0, FARG1 - | beqz AT, ->fff_fallback - |. sw TMP0, ARG5 - |->fff_newstr: - | load_got lj_str_new - | sw BASE, L->base - | sw PC, SAVE_PC - | call_intern lj_str_new // (lua_State *L, char *str, size_t l) - |. move CARG1, L - | // Returns GCstr *. - | lw BASE, L->base - |->fff_resstr: - | move CARG1, CRET1 - | b ->fff_restv - |. li CARG3, LJ_TSTR - | - |.ffunc string_sub - | ffgccheck - | addiu AT, NARGS8:RC, -16 - | lw CARG3, 16+HI(BASE) - | ldc1 f0, 16(BASE) - | lw TMP0, HI(BASE) - | lw STR:CARG1, LO(BASE) - | bltz AT, ->fff_fallback - | lw CARG2, 8+HI(BASE) - | ldc1 f2, 8(BASE) - | beqz AT, >1 - |. li CARG4, -1 - | trunc.w.d f0, f0 - | sltiu AT, CARG3, LJ_TISNUM - | beqz AT, ->fff_fallback - |. mfc1 CARG4, f0 - |1: - | sltiu AT, CARG2, LJ_TISNUM - | beqz AT, ->fff_fallback - |. li AT, LJ_TSTR - | trunc.w.d f2, f2 - | bne TMP0, AT, ->fff_fallback - |. lw CARG2, STR:CARG1->len - | mfc1 CARG3, f2 - | // STR:CARG1 = str, CARG2 = str->len, CARG3 = start, CARG4 = end - | slt AT, CARG4, r0 - | addiu TMP0, CARG2, 1 - | addu TMP1, CARG4, TMP0 - | slt TMP3, CARG3, r0 - | movn CARG4, TMP1, AT // if (end < 0) end += len+1 - | addu TMP1, CARG3, TMP0 - | movn CARG3, TMP1, TMP3 // if (start < 0) start += len+1 - | li TMP2, 1 - | slt AT, CARG4, r0 - | slt TMP3, r0, CARG3 - | movn CARG4, r0, AT // if (end < 0) end = 0 - | movz CARG3, TMP2, TMP3 // if (start < 1) start = 1 - | slt AT, CARG2, CARG4 - | movn CARG4, CARG2, AT // if (end > len) end = len - | addu CARG2, STR:CARG1, CARG3 - | subu CARG3, CARG4, CARG3 // len = end - start - | addiu CARG2, CARG2, sizeof(GCstr)-1 - | bgez CARG3, ->fff_newstr - |. addiu CARG3, CARG3, 1 // len++ - |->fff_emptystr: // Return empty string. - | addiu STR:CARG1, DISPATCH, DISPATCH_GL(strempty) - | b ->fff_restv - |. li CARG3, LJ_TSTR - | - |.macro ffstring_op, name - | .ffunc string_ .. name - | ffgccheck - | lw CARG3, HI(BASE) - | lw STR:CARG2, LO(BASE) - | beqz NARGS8:RC, ->fff_fallback - |. li AT, LJ_TSTR - | bne CARG3, AT, ->fff_fallback - |. addiu SBUF:CARG1, DISPATCH, DISPATCH_GL(tmpbuf) - | load_got lj_buf_putstr_ .. name - | lw TMP0, SBUF:CARG1->b - | sw L, SBUF:CARG1->L - | sw BASE, L->base - | sw TMP0, SBUF:CARG1->p - | call_intern extern lj_buf_putstr_ .. name - |. sw PC, SAVE_PC - | load_got lj_buf_tostr - | call_intern lj_buf_tostr - |. move SBUF:CARG1, SBUF:CRET1 - | b ->fff_resstr - |. lw BASE, L->base - |.endmacro - | - |ffstring_op reverse - |ffstring_op lower - |ffstring_op upper - | - |//-- Bit library -------------------------------------------------------- - | - |.macro .ffunc_bit, name - | .ffunc_n bit_..name - |. add.d FARG1, FARG1, TOBIT - | mfc1 CRET1, FARG1 - |.endmacro - | - |.macro .ffunc_bit_op, name, ins - | .ffunc_bit name - | addiu TMP1, BASE, 8 - | addu TMP2, BASE, NARGS8:RC - |1: - | lw CARG4, HI(TMP1) - | beq TMP1, TMP2, ->fff_resi - |. ldc1 FARG1, 0(TMP1) - | sltiu AT, CARG4, LJ_TISNUM - | beqz AT, ->fff_fallback - | add.d FARG1, FARG1, TOBIT - | mfc1 CARG2, FARG1 - | ins CRET1, CRET1, CARG2 - | b <1 - |. addiu TMP1, TMP1, 8 - |.endmacro - | - |.ffunc_bit_op band, and - |.ffunc_bit_op bor, or - |.ffunc_bit_op bxor, xor - | - |.ffunc_bit bswap - | srl TMP0, CRET1, 24 - | srl TMP2, CRET1, 8 - | sll TMP1, CRET1, 24 - | andi TMP2, TMP2, 0xff00 - | or TMP0, TMP0, TMP1 - | andi CRET1, CRET1, 0xff00 - | or TMP0, TMP0, TMP2 - | sll CRET1, CRET1, 8 - | b ->fff_resi - |. or CRET1, TMP0, CRET1 - | - |.ffunc_bit bnot - | b ->fff_resi - |. not CRET1, CRET1 - | - |.macro .ffunc_bit_sh, name, ins, shmod - | .ffunc_nn bit_..name - |. add.d FARG1, FARG1, TOBIT - | add.d FARG2, FARG2, TOBIT - | mfc1 CARG1, FARG1 - | mfc1 CARG2, FARG2 - |.if shmod == 1 - | li AT, 32 - | subu TMP0, AT, CARG2 - | sllv CARG2, CARG1, CARG2 - | srlv CARG1, CARG1, TMP0 - |.elif shmod == 2 - | li AT, 32 - | subu TMP0, AT, CARG2 - | srlv CARG2, CARG1, CARG2 - | sllv CARG1, CARG1, TMP0 - |.endif - | b ->fff_resi - |. ins CRET1, CARG1, CARG2 - |.endmacro - | - |.ffunc_bit_sh lshift, sllv, 0 - |.ffunc_bit_sh rshift, srlv, 0 - |.ffunc_bit_sh arshift, srav, 0 - |// Can't use rotrv, since it's only in MIPS32R2. - |.ffunc_bit_sh rol, or, 1 - |.ffunc_bit_sh ror, or, 2 - | - |.ffunc_bit tobit - |->fff_resi: - | mtc1 CRET1, FRET1 - | b ->fff_resn - |. cvt.d.w FRET1, FRET1 - | - |//----------------------------------------------------------------------- - | - |->fff_fallback: // Call fast function fallback handler. - | // BASE = new base, RB = CFUNC, RC = nargs*8 - | lw TMP3, CFUNC:RB->f - | addu TMP1, BASE, NARGS8:RC - | lw PC, FRAME_PC(BASE) // Fallback may overwrite PC. - | addiu TMP0, TMP1, 8*LUA_MINSTACK - | lw TMP2, L->maxstack - | sw PC, SAVE_PC // Redundant (but a defined value). - | sltu AT, TMP2, TMP0 - | sw BASE, L->base - | sw TMP1, L->top - | bnez AT, >5 // Need to grow stack. - |. move CFUNCADDR, TMP3 - | jalr TMP3 // (lua_State *L) - |. move CARG1, L - | // Either throws an error, or recovers and returns -1, 0 or nresults+1. - | lw BASE, L->base - | sll RD, CRET1, 3 - | bgtz CRET1, ->fff_res // Returned nresults+1? - |. addiu RA, BASE, -8 - |1: // Returned 0 or -1: retry fast path. - | lw TMP0, L->top - | lw LFUNC:RB, FRAME_FUNC(BASE) - | bnez CRET1, ->vm_call_tail // Returned -1? - |. subu NARGS8:RC, TMP0, BASE - | ins_callt // Returned 0: retry fast path. - | - |// Reconstruct previous base for vmeta_call during tailcall. - |->vm_call_tail: - | andi TMP0, PC, FRAME_TYPE - | li AT, -4 - | bnez TMP0, >3 - |. and TMP1, PC, AT - | lbu TMP1, OFS_RA(PC) - | sll TMP1, TMP1, 3 - | addiu TMP1, TMP1, 8 - |3: - | b ->vm_call_dispatch // Resolve again for tailcall. - |. subu TMP2, BASE, TMP1 - | - |5: // Grow stack for fallback handler. - | load_got lj_state_growstack - | li CARG2, LUA_MINSTACK - | call_intern lj_state_growstack // (lua_State *L, int n) - |. move CARG1, L - | lw BASE, L->base - | b <1 - |. li CRET1, 0 // Force retry. - | - |->fff_gcstep: // Call GC step function. - | // BASE = new base, RC = nargs*8 - | move MULTRES, ra - | load_got lj_gc_step - | sw BASE, L->base - | addu TMP0, BASE, NARGS8:RC - | sw PC, SAVE_PC // Redundant (but a defined value). - | sw TMP0, L->top - | call_intern lj_gc_step // (lua_State *L) - |. move CARG1, L - | lw BASE, L->base - | move ra, MULTRES - | lw TMP0, L->top - | lw CFUNC:RB, FRAME_FUNC(BASE) - | jr ra - |. subu NARGS8:RC, TMP0, BASE - | - |//----------------------------------------------------------------------- - |//-- Special dispatch targets ------------------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_record: // Dispatch target for recording phase. - |.if JIT - | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH) - | andi AT, TMP3, HOOK_VMEVENT // No recording while in vmevent. - | bnez AT, >5 - | // Decrement the hookcount for consistency, but always do the call. - |. lw TMP2, DISPATCH_GL(hookcount)(DISPATCH) - | andi AT, TMP3, HOOK_ACTIVE - | bnez AT, >1 - |. addiu TMP2, TMP2, -1 - | andi AT, TMP3, LUA_MASKLINE|LUA_MASKCOUNT - | beqz AT, >1 - |. nop - | b >1 - |. sw TMP2, DISPATCH_GL(hookcount)(DISPATCH) - |.endif - | - |->vm_rethook: // Dispatch target for return hooks. - | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH) - | andi AT, TMP3, HOOK_ACTIVE // Hook already active? - | beqz AT, >1 - |5: // Re-dispatch to static ins. - |. lw AT, GG_DISP2STATIC(TMP0) // Assumes TMP0 holds DISPATCH+OP*4. - | jr AT - |. nop - | - |->vm_inshook: // Dispatch target for instr/line hooks. - | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH) - | lw TMP2, DISPATCH_GL(hookcount)(DISPATCH) - | andi AT, TMP3, HOOK_ACTIVE // Hook already active? - | bnez AT, <5 - |. andi AT, TMP3, LUA_MASKLINE|LUA_MASKCOUNT - | beqz AT, <5 - |. addiu TMP2, TMP2, -1 - | beqz TMP2, >1 - |. sw TMP2, DISPATCH_GL(hookcount)(DISPATCH) - | andi AT, TMP3, LUA_MASKLINE - | beqz AT, <5 - |1: - |. load_got lj_dispatch_ins - | sw MULTRES, SAVE_MULTRES - | move CARG2, PC - | sw BASE, L->base - | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC. - | call_intern lj_dispatch_ins // (lua_State *L, const BCIns *pc) - |. move CARG1, L - |3: - | lw BASE, L->base - |4: // Re-dispatch to static ins. - | lw INS, -4(PC) - | decode_OP4a TMP1, INS - | decode_OP4b TMP1 - | addu TMP0, DISPATCH, TMP1 - | decode_RD8a RD, INS - | lw AT, GG_DISP2STATIC(TMP0) - | decode_RA8a RA, INS - | decode_RD8b RD - | jr AT - | decode_RA8b RA - | - |->cont_hook: // Continue from hook yield. - | addiu PC, PC, 4 - | b <4 - |. lw MULTRES, -24+LO(RB) // Restore MULTRES for *M ins. - | - |->vm_hotloop: // Hot loop counter underflow. - |.if JIT - | lw LFUNC:TMP1, FRAME_FUNC(BASE) - | addiu CARG1, DISPATCH, GG_DISP2J - | sw PC, SAVE_PC - | lw TMP1, LFUNC:TMP1->pc - | move CARG2, PC - | sw L, DISPATCH_J(L)(DISPATCH) - | lbu TMP1, PC2PROTO(framesize)(TMP1) - | load_got lj_trace_hot - | sw BASE, L->base - | sll TMP1, TMP1, 3 - | addu TMP1, BASE, TMP1 - | call_intern lj_trace_hot // (jit_State *J, const BCIns *pc) - |. sw TMP1, L->top - | b <3 - |. nop - |.endif - | - |->vm_callhook: // Dispatch target for call hooks. - |.if JIT - | b >1 - |.endif - |. move CARG2, PC - | - |->vm_hotcall: // Hot call counter underflow. - |.if JIT - | ori CARG2, PC, 1 - |1: - |.endif - | load_got lj_dispatch_call - | addu TMP0, BASE, RC - | sw PC, SAVE_PC - | sw BASE, L->base - | subu RA, RA, BASE - | sw TMP0, L->top - | call_intern lj_dispatch_call // (lua_State *L, const BCIns *pc) - |. move CARG1, L - | // Returns ASMFunction. - | lw BASE, L->base - | lw TMP0, L->top - | sw r0, SAVE_PC // Invalidate for subsequent line hook. - | subu NARGS8:RC, TMP0, BASE - | addu RA, BASE, RA - | lw LFUNC:RB, FRAME_FUNC(BASE) - | jr CRET1 - |. lw INS, -4(PC) - | - |->cont_stitch: // Trace stitching. - |.if JIT - | // RA = resultptr, RB = meta base - | lw INS, -4(PC) - | lw TMP3, -24+LO(RB) // Save previous trace number. - | decode_RA8a RC, INS - | addiu AT, MULTRES, -8 - | decode_RA8b RC - | beqz AT, >2 - |. addu RC, BASE, RC // Call base. - |1: // Move results down. - | ldc1 f0, 0(RA) - | addiu AT, AT, -8 - | addiu RA, RA, 8 - | sdc1 f0, 0(RC) - | bnez AT, <1 - |. addiu RC, RC, 8 - |2: - | decode_RA8a RA, INS - | decode_RB8a RB, INS - | decode_RA8b RA - | decode_RB8b RB - | addu RA, RA, RB - | lw TMP1, DISPATCH_J(trace)(DISPATCH) - | addu RA, BASE, RA - |3: - | sltu AT, RC, RA - | bnez AT, >9 // More results wanted? - |. sll TMP2, TMP3, 2 - | - | addu TMP2, TMP1, TMP2 - | lw TRACE:TMP2, 0(TMP2) - | beqz TRACE:TMP2, ->cont_nop - |. nop - | lhu RD, TRACE:TMP2->link - | beq RD, TMP3, ->cont_nop // Blacklisted. - |. load_got lj_dispatch_stitch - | bnez RD, =>BC_JLOOP // Jump to stitched trace. - |. sll RD, RD, 3 - | - | // Stitch a new trace to the previous trace. - | sw TMP3, DISPATCH_J(exitno)(DISPATCH) - | sw L, DISPATCH_J(L)(DISPATCH) - | sw BASE, L->base - | addiu CARG1, DISPATCH, GG_DISP2J - | call_intern lj_dispatch_stitch // (jit_State *J, const BCIns *pc) - |. move CARG2, PC - | b ->cont_nop - |. lw BASE, L->base - | - |9: - | sw TISNIL, HI(RC) - | b <3 - |. addiu RC, RC, 8 - |.endif - | - |->vm_profhook: // Dispatch target for profiler hook. -#if LJ_HASPROFILE - | load_got lj_dispatch_profile - | sw MULTRES, SAVE_MULTRES - | move CARG2, PC - | sw BASE, L->base - | call_intern lj_dispatch_profile // (lua_State *L, const BCIns *pc) - |. move CARG1, L - | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction. - | addiu PC, PC, -4 - | b ->cont_nop - |. lw BASE, L->base -#endif - | - |//----------------------------------------------------------------------- - |//-- Trace exit handler ------------------------------------------------- - |//----------------------------------------------------------------------- - | - |.macro savex_, a, b - | sdc1 f..a, 16+a*8(sp) - | sw r..a, 16+32*8+a*4(sp) - | sw r..b, 16+32*8+b*4(sp) - |.endmacro - | - |->vm_exit_handler: - |.if JIT - | addiu sp, sp, -(16+32*8+32*4) - | savex_ 0, 1 - | savex_ 2, 3 - | savex_ 4, 5 - | savex_ 6, 7 - | savex_ 8, 9 - | savex_ 10, 11 - | savex_ 12, 13 - | savex_ 14, 15 - | savex_ 16, 17 - | savex_ 18, 19 - | savex_ 20, 21 - | savex_ 22, 23 - | savex_ 24, 25 - | savex_ 26, 27 - | sdc1 f28, 16+28*8(sp) - | sw r28, 16+32*8+28*4(sp) - | sdc1 f30, 16+30*8(sp) - | sw r30, 16+32*8+30*4(sp) - | sw r0, 16+32*8+31*4(sp) // Clear RID_TMP. - | li_vmstate EXIT - | addiu TMP2, sp, 16+32*8+32*4 // Recompute original value of sp. - | addiu DISPATCH, JGL, -GG_DISP2G-32768 - | lw TMP1, 0(TMP2) // Load exit number. - | st_vmstate - | sw TMP2, 16+32*8+29*4(sp) // Store sp in RID_SP. - | lw L, DISPATCH_GL(cur_L)(DISPATCH) - | lw BASE, DISPATCH_GL(jit_base)(DISPATCH) - | load_got lj_trace_exit - | sw L, DISPATCH_J(L)(DISPATCH) - | sw ra, DISPATCH_J(parent)(DISPATCH) // Store trace number. - | sw BASE, L->base - | sw TMP1, DISPATCH_J(exitno)(DISPATCH) // Store exit number. - | addiu CARG1, DISPATCH, GG_DISP2J - | sw r0, DISPATCH_GL(jit_base)(DISPATCH) - | call_intern lj_trace_exit // (jit_State *J, ExitState *ex) - |. addiu CARG2, sp, 16 - | // Returns MULTRES (unscaled) or negated error code. - | lw TMP1, L->cframe - | li AT, -4 - | lw BASE, L->base - | and sp, TMP1, AT - | lw PC, SAVE_PC // Get SAVE_PC. - | b >1 - |. sw L, SAVE_L // Set SAVE_L (on-trace resume/yield). - |.endif - |->vm_exit_interp: - |.if JIT - | // CRET1 = MULTRES or negated error code, BASE, PC and JGL set. - | lw L, SAVE_L - | addiu DISPATCH, JGL, -GG_DISP2G-32768 - | sw BASE, L->base - |1: - | bltz CRET1, >9 // Check for error from exit. - |. lw LFUNC:RB, FRAME_FUNC(BASE) - | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). - | sll MULTRES, CRET1, 3 - | li TISNIL, LJ_TNIL - | sw MULTRES, SAVE_MULTRES - | mtc1 TMP3, TOBIT - | lw TMP1, LFUNC:RB->pc - | sw r0, DISPATCH_GL(jit_base)(DISPATCH) - | lw KBASE, PC2PROTO(k)(TMP1) - | cvt.d.s TOBIT, TOBIT - | // Modified copy of ins_next which handles function header dispatch, too. - | lw INS, 0(PC) - | addiu PC, PC, 4 - | // Assumes TISNIL == ~LJ_VMST_INTERP == -1 - | sw TISNIL, DISPATCH_GL(vmstate)(DISPATCH) - | decode_OP4a TMP1, INS - | decode_OP4b TMP1 - | sltiu TMP2, TMP1, BC_FUNCF*4 // Function header? - | addu TMP0, DISPATCH, TMP1 - | decode_RD8a RD, INS - | lw AT, 0(TMP0) - | decode_RA8a RA, INS - | beqz TMP2, >2 - |. decode_RA8b RA - | jr AT - |. decode_RD8b RD - |2: - | sltiu TMP2, TMP1, (BC_FUNCC+2)*4 // Fast function? - | bnez TMP2, >3 - |. lw TMP1, FRAME_PC(BASE) - | // Check frame below fast function. - | andi TMP0, TMP1, FRAME_TYPE - | bnez TMP0, >3 // Trace stitching continuation? - |. nop - | // Otherwise set KBASE for Lua function below fast function. - | lw TMP2, -4(TMP1) - | decode_RA8a TMP0, TMP2 - | decode_RA8b TMP0 - | subu TMP1, BASE, TMP0 - | lw LFUNC:TMP2, -8+FRAME_FUNC(TMP1) - | lw TMP1, LFUNC:TMP2->pc - | lw KBASE, PC2PROTO(k)(TMP1) - |3: - | addiu RC, MULTRES, -8 - | jr AT - |. addu RA, RA, BASE - | - |9: // Rethrow error from the right C frame. - | load_got lj_err_throw - | negu CARG2, CRET1 - | call_intern lj_err_throw // (lua_State *L, int errcode) - |. move CARG1, L - |.endif - | - |//----------------------------------------------------------------------- - |//-- Math helper functions ---------------------------------------------- - |//----------------------------------------------------------------------- - | - |// Modifies AT, TMP0, FRET1, FRET2, f4. Keeps all others incl. FARG1. - |.macro vm_round, func - | lui TMP0, 0x4330 // Hiword of 2^52 (double). - | mtc1 r0, f4 - | mtc1 TMP0, f5 - | abs.d FRET2, FARG1 // |x| - | mfc1 AT, f13 - | c.olt.d 0, FRET2, f4 - | add.d FRET1, FRET2, f4 // (|x| + 2^52) - 2^52 - | bc1f 0, >1 // Truncate only if |x| < 2^52. - |. sub.d FRET1, FRET1, f4 - | slt AT, AT, r0 - |.if "func" == "ceil" - | lui TMP0, 0xbff0 // Hiword of -1 (double). Preserves -0. - |.else - | lui TMP0, 0x3ff0 // Hiword of +1 (double). - |.endif - |.if "func" == "trunc" - | mtc1 TMP0, f5 - | c.olt.d 0, FRET2, FRET1 // |x| < result? - | sub.d FRET2, FRET1, f4 - | movt.d FRET1, FRET2, 0 // If yes, subtract +1. - | neg.d FRET2, FRET1 - | jr ra - |. movn.d FRET1, FRET2, AT // Merge sign bit back in. - |.else - | neg.d FRET2, FRET1 - | mtc1 TMP0, f5 - | movn.d FRET1, FRET2, AT // Merge sign bit back in. - |.if "func" == "ceil" - | c.olt.d 0, FRET1, FARG1 // x > result? - |.else - | c.olt.d 0, FARG1, FRET1 // x < result? - |.endif - | sub.d FRET2, FRET1, f4 // If yes, subtract +-1. - | jr ra - |. movt.d FRET1, FRET2, 0 - |.endif - |1: - | jr ra - |. mov.d FRET1, FARG1 - |.endmacro - | - |->vm_floor: - | vm_round floor - |->vm_ceil: - | vm_round ceil - |->vm_trunc: - |.if JIT - | vm_round trunc - |.endif - | - |//----------------------------------------------------------------------- - |//-- Miscellaneous functions -------------------------------------------- - |//----------------------------------------------------------------------- - | - |//----------------------------------------------------------------------- - |//-- FFI helper functions ----------------------------------------------- - |//----------------------------------------------------------------------- - | - |// Handler for callback functions. Callback slot number in r1, g in r2. - |->vm_ffi_callback: - |.if FFI - |.type CTSTATE, CTState, PC - | saveregs - | lw CTSTATE, GL:r2->ctype_state - | addiu DISPATCH, r2, GG_G2DISP - | load_got lj_ccallback_enter - | sw r1, CTSTATE->cb.slot - | sw CARG1, CTSTATE->cb.gpr[0] - | sw CARG2, CTSTATE->cb.gpr[1] - | sdc1 FARG1, CTSTATE->cb.fpr[0] - | sw CARG3, CTSTATE->cb.gpr[2] - | sw CARG4, CTSTATE->cb.gpr[3] - | sdc1 FARG2, CTSTATE->cb.fpr[1] - | addiu TMP0, sp, CFRAME_SPACE+16 - | sw TMP0, CTSTATE->cb.stack - | sw r0, SAVE_PC // Any value outside of bytecode is ok. - | move CARG2, sp - | call_intern lj_ccallback_enter // (CTState *cts, void *cf) - |. move CARG1, CTSTATE - | // Returns lua_State *. - | lw BASE, L:CRET1->base - | lw RC, L:CRET1->top - | move L, CRET1 - | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). - | lw LFUNC:RB, FRAME_FUNC(BASE) - | mtc1 TMP3, TOBIT - | li_vmstate INTERP - | li TISNIL, LJ_TNIL - | subu RC, RC, BASE - | st_vmstate - | cvt.d.s TOBIT, TOBIT - | ins_callt - |.endif - | - |->cont_ffi_callback: // Return from FFI callback. - |.if FFI - | load_got lj_ccallback_leave - | lw CTSTATE, DISPATCH_GL(ctype_state)(DISPATCH) - | sw BASE, L->base - | sw RB, L->top - | sw L, CTSTATE->L - | move CARG2, RA - | call_intern lj_ccallback_leave // (CTState *cts, TValue *o) - |. move CARG1, CTSTATE - | lw CRET1, CTSTATE->cb.gpr[0] - | ldc1 FRET1, CTSTATE->cb.fpr[0] - | lw CRET2, CTSTATE->cb.gpr[1] - | b ->vm_leave_unw - |. ldc1 FRET2, CTSTATE->cb.fpr[1] - |.endif - | - |->vm_ffi_call: // Call C function via FFI. - | // Caveat: needs special frame unwinding, see below. - |.if FFI - | .type CCSTATE, CCallState, CARG1 - | lw TMP1, CCSTATE->spadj - | lbu CARG2, CCSTATE->nsp - | move TMP2, sp - | subu sp, sp, TMP1 - | sw ra, -4(TMP2) - | sll CARG2, CARG2, 2 - | sw r16, -8(TMP2) - | sw CCSTATE, -12(TMP2) - | move r16, TMP2 - | addiu TMP1, CCSTATE, offsetof(CCallState, stack) - | addiu TMP2, sp, 16 - | beqz CARG2, >2 - |. addu TMP3, TMP1, CARG2 - |1: - | lw TMP0, 0(TMP1) - | addiu TMP1, TMP1, 4 - | sltu AT, TMP1, TMP3 - | sw TMP0, 0(TMP2) - | bnez AT, <1 - |. addiu TMP2, TMP2, 4 - |2: - | lw CFUNCADDR, CCSTATE->func - | lw CARG2, CCSTATE->gpr[1] - | lw CARG3, CCSTATE->gpr[2] - | lw CARG4, CCSTATE->gpr[3] - | ldc1 FARG1, CCSTATE->fpr[0] - | ldc1 FARG2, CCSTATE->fpr[1] - | jalr CFUNCADDR - |. lw CARG1, CCSTATE->gpr[0] // Do this last, since CCSTATE is CARG1. - | lw CCSTATE:TMP1, -12(r16) - | lw TMP2, -8(r16) - | lw ra, -4(r16) - | sw CRET1, CCSTATE:TMP1->gpr[0] - | sw CRET2, CCSTATE:TMP1->gpr[1] - | sdc1 FRET1, CCSTATE:TMP1->fpr[0] - | sdc1 FRET2, CCSTATE:TMP1->fpr[1] - | move sp, r16 - | jr ra - |. move r16, TMP2 - |.endif - |// Note: vm_ffi_call must be the last function in this object file! - | - |//----------------------------------------------------------------------- -} - -/* Generate the code for a single instruction. */ -static void build_ins(BuildCtx *ctx, BCOp op, int defop) -{ - int vk = 0; - |=>defop: - - switch (op) { - - /* -- Comparison ops ---------------------------------------------------- */ - - /* Remember: all ops branch for a true comparison, fall through otherwise. */ - - case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: - | // RA = src1*8, RD = src2*8, JMP with RD = target - | addu CARG2, BASE, RA - | addu CARG3, BASE, RD - | lw TMP0, HI(CARG2) - | lw TMP1, HI(CARG3) - | ldc1 f0, 0(CARG2) - | ldc1 f2, 0(CARG3) - | sltiu TMP0, TMP0, LJ_TISNUM - | sltiu TMP1, TMP1, LJ_TISNUM - | lhu TMP2, OFS_RD(PC) - | and TMP0, TMP0, TMP1 - | addiu PC, PC, 4 - | beqz TMP0, ->vmeta_comp - |. lui TMP1, (-(BCBIAS_J*4 >> 16) & 65535) - | decode_RD4b TMP2 - | addu TMP2, TMP2, TMP1 - if (op == BC_ISLT || op == BC_ISGE) { - | c.olt.d f0, f2 - } else { - | c.ole.d f0, f2 - } - if (op == BC_ISLT || op == BC_ISLE) { - | movf TMP2, r0 - } else { - | movt TMP2, r0 - } - | addu PC, PC, TMP2 - |1: - | ins_next - break; - - case BC_ISEQV: case BC_ISNEV: - vk = op == BC_ISEQV; - | // RA = src1*8, RD = src2*8, JMP with RD = target - | addu RA, BASE, RA - | addiu PC, PC, 4 - | lw TMP0, HI(RA) - | ldc1 f0, 0(RA) - | addu RD, BASE, RD - | lhu TMP2, -4+OFS_RD(PC) - | lw TMP1, HI(RD) - | ldc1 f2, 0(RD) - | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535) - | sltiu AT, TMP0, LJ_TISNUM - | sltiu CARG1, TMP1, LJ_TISNUM - | decode_RD4b TMP2 - | and AT, AT, CARG1 - | beqz AT, >5 - |. addu TMP2, TMP2, TMP3 - | c.eq.d f0, f2 - if (vk) { - | movf TMP2, r0 - } else { - | movt TMP2, r0 - } - |1: - | addu PC, PC, TMP2 - | ins_next - |5: // Either or both types are not numbers. - | lw CARG2, LO(RA) - | lw CARG3, LO(RD) - |.if FFI - | li TMP3, LJ_TCDATA - | beq TMP0, TMP3, ->vmeta_equal_cd - |.endif - |. sltiu AT, TMP0, LJ_TISPRI // Not a primitive? - |.if FFI - | beq TMP1, TMP3, ->vmeta_equal_cd - |.endif - |. xor TMP3, CARG2, CARG3 // Same tv? - | xor TMP1, TMP1, TMP0 // Same type? - | sltiu CARG1, TMP0, LJ_TISTABUD+1 // Table or userdata? - | movz TMP3, r0, AT // Ignore tv if primitive. - | movn CARG1, r0, TMP1 // Tab/ud and same type? - | or AT, TMP1, TMP3 // Same type && (pri||same tv). - | movz CARG1, r0, AT - | beqz CARG1, <1 // Done if not tab/ud or not same type or same tv. - if (vk) { - |. movn TMP2, r0, AT - } else { - |. movz TMP2, r0, AT - } - | // Different tables or userdatas. Need to check __eq metamethod. - | // Field metatable must be at same offset for GCtab and GCudata! - | lw TAB:TMP1, TAB:CARG2->metatable - | beqz TAB:TMP1, <1 // No metatable? - |. nop - | lbu TMP1, TAB:TMP1->nomm - | andi TMP1, TMP1, 1<vmeta_equal // Handle __eq metamethod. - |. li CARG4, 1-vk // ne = 0 or 1. - break; - - case BC_ISEQS: case BC_ISNES: - vk = op == BC_ISEQS; - | // RA = src*8, RD = str_const*8 (~), JMP with RD = target - | addu RA, BASE, RA - | addiu PC, PC, 4 - | lw TMP0, HI(RA) - | srl RD, RD, 1 - | lw STR:TMP3, LO(RA) - | subu RD, KBASE, RD - | lhu TMP2, -4+OFS_RD(PC) - |.if FFI - | li AT, LJ_TCDATA - | beq TMP0, AT, ->vmeta_equal_cd - |.endif - |. lw STR:TMP1, -4(RD) // KBASE-4-str_const*4 - | addiu TMP0, TMP0, -LJ_TSTR - | decode_RD4b TMP2 - | xor TMP1, STR:TMP1, STR:TMP3 - | or TMP0, TMP0, TMP1 - | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535) - | addu TMP2, TMP2, TMP3 - if (vk) { - | movn TMP2, r0, TMP0 - } else { - | movz TMP2, r0, TMP0 - } - | addu PC, PC, TMP2 - | ins_next - break; - - case BC_ISEQN: case BC_ISNEN: - vk = op == BC_ISEQN; - | // RA = src*8, RD = num_const*8, JMP with RD = target - | addu RA, BASE, RA - | addiu PC, PC, 4 - | lw TMP0, HI(RA) - | ldc1 f0, 0(RA) - | addu RD, KBASE, RD - | lhu TMP2, -4+OFS_RD(PC) - | ldc1 f2, 0(RD) - | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535) - | sltiu AT, TMP0, LJ_TISNUM - | decode_RD4b TMP2 - |.if FFI - | beqz AT, >5 - |.else - | beqz AT, >1 - |.endif - |. addu TMP2, TMP2, TMP3 - | c.eq.d f0, f2 - if (vk) { - | movf TMP2, r0 - | addu PC, PC, TMP2 - |1: - } else { - | movt TMP2, r0 - |1: - | addu PC, PC, TMP2 - } - | ins_next - |.if FFI - |5: - | li AT, LJ_TCDATA - | beq TMP0, AT, ->vmeta_equal_cd - |. nop - | b <1 - |. nop - |.endif - break; - - case BC_ISEQP: case BC_ISNEP: - vk = op == BC_ISEQP; - | // RA = src*8, RD = primitive_type*8 (~), JMP with RD = target - | addu RA, BASE, RA - | srl TMP1, RD, 3 - | lw TMP0, HI(RA) - | lhu TMP2, OFS_RD(PC) - | not TMP1, TMP1 - | addiu PC, PC, 4 - |.if FFI - | li AT, LJ_TCDATA - | beq TMP0, AT, ->vmeta_equal_cd - |.endif - |. xor TMP0, TMP0, TMP1 - | decode_RD4b TMP2 - | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535) - | addu TMP2, TMP2, TMP3 - if (vk) { - | movn TMP2, r0, TMP0 - } else { - | movz TMP2, r0, TMP0 - } - | addu PC, PC, TMP2 - | ins_next - break; - - /* -- Unary test and copy ops ------------------------------------------- */ - - case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF: - | // RA = dst*8 or unused, RD = src*8, JMP with RD = target - | addu RD, BASE, RD - | lhu TMP2, OFS_RD(PC) - | lw TMP0, HI(RD) - | addiu PC, PC, 4 - if (op == BC_IST || op == BC_ISF) { - | sltiu TMP0, TMP0, LJ_TISTRUECOND - | decode_RD4b TMP2 - | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535) - | addu TMP2, TMP2, TMP3 - if (op == BC_IST) { - | movz TMP2, r0, TMP0 - } else { - | movn TMP2, r0, TMP0 - } - | addu PC, PC, TMP2 - } else { - | sltiu TMP0, TMP0, LJ_TISTRUECOND - | ldc1 f0, 0(RD) - if (op == BC_ISTC) { - | beqz TMP0, >1 - } else { - | bnez TMP0, >1 - } - |. addu RA, BASE, RA - | decode_RD4b TMP2 - | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535) - | addu TMP2, TMP2, TMP3 - | sdc1 f0, 0(RA) - | addu PC, PC, TMP2 - |1: - } - | ins_next - break; - - case BC_ISTYPE: - | // RA = src*8, RD = -type*8 - | addu TMP2, BASE, RA - | srl TMP1, RD, 3 - | lw TMP0, HI(TMP2) - | ins_next1 - | addu AT, TMP0, TMP1 - | bnez AT, ->vmeta_istype - |. ins_next2 - break; - case BC_ISNUM: - | // RA = src*8, RD = -(TISNUM-1)*8 - | addu TMP2, BASE, RA - | lw TMP0, HI(TMP2) - | ins_next1 - | sltiu AT, TMP0, LJ_TISNUM - | beqz AT, ->vmeta_istype - |. ins_next2 - break; - - /* -- Unary ops --------------------------------------------------------- */ - - case BC_MOV: - | // RA = dst*8, RD = src*8 - | addu RD, BASE, RD - | addu RA, BASE, RA - | ldc1 f0, 0(RD) - | ins_next1 - | sdc1 f0, 0(RA) - | ins_next2 - break; - case BC_NOT: - | // RA = dst*8, RD = src*8 - | addu RD, BASE, RD - | addu RA, BASE, RA - | lw TMP0, HI(RD) - | li TMP1, LJ_TFALSE - | sltiu TMP0, TMP0, LJ_TISTRUECOND - | addiu TMP1, TMP0, LJ_TTRUE - | ins_next1 - | sw TMP1, HI(RA) - | ins_next2 - break; - case BC_UNM: - | // RA = dst*8, RD = src*8 - | addu CARG3, BASE, RD - | addu RA, BASE, RA - | lw TMP0, HI(CARG3) - | ldc1 f0, 0(CARG3) - | sltiu AT, TMP0, LJ_TISNUM - | beqz AT, ->vmeta_unm - |. neg.d f0, f0 - | ins_next1 - | sdc1 f0, 0(RA) - | ins_next2 - break; - case BC_LEN: - | // RA = dst*8, RD = src*8 - | addu CARG2, BASE, RD - | addu RA, BASE, RA - | lw TMP0, HI(CARG2) - | lw CARG1, LO(CARG2) - | li AT, LJ_TSTR - | bne TMP0, AT, >2 - |. li AT, LJ_TTAB - | lw CRET1, STR:CARG1->len - |1: - | mtc1 CRET1, f0 - | cvt.d.w f0, f0 - | ins_next1 - | sdc1 f0, 0(RA) - | ins_next2 - |2: - | bne TMP0, AT, ->vmeta_len - |. nop -#if LJ_52 - | lw TAB:TMP2, TAB:CARG1->metatable - | bnez TAB:TMP2, >9 - |. nop - |3: -#endif - |->BC_LEN_Z: - | load_got lj_tab_len - | call_intern lj_tab_len // (GCtab *t) - |. nop - | // Returns uint32_t (but less than 2^31). - | b <1 - |. nop -#if LJ_52 - |9: - | lbu TMP0, TAB:TMP2->nomm - | andi TMP0, TMP0, 1<vmeta_len - |. nop -#endif - break; - - /* -- Binary ops -------------------------------------------------------- */ - - |.macro ins_arithpre - ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); - | decode_RB8a RB, INS - | decode_RB8b RB - | decode_RDtoRC8 RC, RD - | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8 - ||switch (vk) { - ||case 0: - | addu CARG3, BASE, RB - | addu CARG4, KBASE, RC - | lw TMP1, HI(CARG3) - | ldc1 f20, 0(CARG3) - | ldc1 f22, 0(CARG4) - | sltiu AT, TMP1, LJ_TISNUM - || break; - ||case 1: - | addu CARG4, BASE, RB - | addu CARG3, KBASE, RC - | lw TMP1, HI(CARG4) - | ldc1 f22, 0(CARG4) - | ldc1 f20, 0(CARG3) - | sltiu AT, TMP1, LJ_TISNUM - || break; - ||default: - | addu CARG3, BASE, RB - | addu CARG4, BASE, RC - | lw TMP1, HI(CARG3) - | lw TMP2, HI(CARG4) - | ldc1 f20, 0(CARG3) - | ldc1 f22, 0(CARG4) - | sltiu AT, TMP1, LJ_TISNUM - | sltiu TMP0, TMP2, LJ_TISNUM - | and AT, AT, TMP0 - || break; - ||} - | beqz AT, ->vmeta_arith - |. addu RA, BASE, RA - |.endmacro - | - |.macro fpmod, a, b, c - |->BC_MODVN_Z: - | bal ->vm_floor // floor(b/c) - |. div.d FARG1, b, c - | mul.d a, FRET1, c - | sub.d a, b, a // b - floor(b/c)*c - |.endmacro - | - |.macro ins_arith, ins - | ins_arithpre - |.if "ins" == "fpmod_" - | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway. - |. nop - |.else - | ins f0, f20, f22 - | ins_next1 - | sdc1 f0, 0(RA) - | ins_next2 - |.endif - |.endmacro - - case BC_ADDVN: case BC_ADDNV: case BC_ADDVV: - | ins_arith add.d - break; - case BC_SUBVN: case BC_SUBNV: case BC_SUBVV: - | ins_arith sub.d - break; - case BC_MULVN: case BC_MULNV: case BC_MULVV: - | ins_arith mul.d - break; - case BC_DIVVN: case BC_DIVNV: case BC_DIVVV: - | ins_arith div.d - break; - case BC_MODVN: - | ins_arith fpmod - break; - case BC_MODNV: case BC_MODVV: - | ins_arith fpmod_ - break; - case BC_POW: - | decode_RB8a RB, INS - | decode_RB8b RB - | decode_RDtoRC8 RC, RD - | addu CARG3, BASE, RB - | addu CARG4, BASE, RC - | lw TMP1, HI(CARG3) - | lw TMP2, HI(CARG4) - | ldc1 FARG1, 0(CARG3) - | ldc1 FARG2, 0(CARG4) - | sltiu AT, TMP1, LJ_TISNUM - | sltiu TMP0, TMP2, LJ_TISNUM - | and AT, AT, TMP0 - | load_got pow - | beqz AT, ->vmeta_arith - |. addu RA, BASE, RA - | call_extern - |. nop - | ins_next1 - | sdc1 FRET1, 0(RA) - | ins_next2 - break; - - case BC_CAT: - | // RA = dst*8, RB = src_start*8, RC = src_end*8 - | decode_RB8a RB, INS - | decode_RB8b RB - | decode_RDtoRC8 RC, RD - | subu CARG3, RC, RB - | sw BASE, L->base - | addu CARG2, BASE, RC - | move MULTRES, RB - |->BC_CAT_Z: - | load_got lj_meta_cat - | srl CARG3, CARG3, 3 - | sw PC, SAVE_PC - | call_intern lj_meta_cat // (lua_State *L, TValue *top, int left) - |. move CARG1, L - | // Returns NULL (finished) or TValue * (metamethod). - | bnez CRET1, ->vmeta_binop - |. lw BASE, L->base - | addu RB, BASE, MULTRES - | ldc1 f0, 0(RB) - | addu RA, BASE, RA - | ins_next1 - | sdc1 f0, 0(RA) // Copy result from RB to RA. - | ins_next2 - break; - - /* -- Constant ops ------------------------------------------------------ */ - - case BC_KSTR: - | // RA = dst*8, RD = str_const*8 (~) - | srl TMP1, RD, 1 - | subu TMP1, KBASE, TMP1 - | ins_next1 - | lw TMP0, -4(TMP1) // KBASE-4-str_const*4 - | addu RA, BASE, RA - | li TMP2, LJ_TSTR - | sw TMP0, LO(RA) - | sw TMP2, HI(RA) - | ins_next2 - break; - case BC_KCDATA: - |.if FFI - | // RA = dst*8, RD = cdata_const*8 (~) - | srl TMP1, RD, 1 - | subu TMP1, KBASE, TMP1 - | ins_next1 - | lw TMP0, -4(TMP1) // KBASE-4-cdata_const*4 - | addu RA, BASE, RA - | li TMP2, LJ_TCDATA - | sw TMP0, LO(RA) - | sw TMP2, HI(RA) - | ins_next2 - |.endif - break; - case BC_KSHORT: - | // RA = dst*8, RD = int16_literal*8 - | sra RD, INS, 16 - | mtc1 RD, f0 - | addu RA, BASE, RA - | cvt.d.w f0, f0 - | ins_next1 - | sdc1 f0, 0(RA) - | ins_next2 - break; - case BC_KNUM: - | // RA = dst*8, RD = num_const*8 - | addu RD, KBASE, RD - | addu RA, BASE, RA - | ldc1 f0, 0(RD) - | ins_next1 - | sdc1 f0, 0(RA) - | ins_next2 - break; - case BC_KPRI: - | // RA = dst*8, RD = primitive_type*8 (~) - | srl TMP1, RD, 3 - | addu RA, BASE, RA - | not TMP0, TMP1 - | ins_next1 - | sw TMP0, HI(RA) - | ins_next2 - break; - case BC_KNIL: - | // RA = base*8, RD = end*8 - | addu RA, BASE, RA - | sw TISNIL, HI(RA) - | addiu RA, RA, 8 - | addu RD, BASE, RD - |1: - | sw TISNIL, HI(RA) - | slt AT, RA, RD - | bnez AT, <1 - |. addiu RA, RA, 8 - | ins_next_ - break; - - /* -- Upvalue and function ops ------------------------------------------ */ - - case BC_UGET: - | // RA = dst*8, RD = uvnum*8 - | lw LFUNC:RB, FRAME_FUNC(BASE) - | srl RD, RD, 1 - | addu RD, RD, LFUNC:RB - | lw UPVAL:RB, LFUNC:RD->uvptr - | ins_next1 - | lw TMP1, UPVAL:RB->v - | ldc1 f0, 0(TMP1) - | addu RA, BASE, RA - | sdc1 f0, 0(RA) - | ins_next2 - break; - case BC_USETV: - | // RA = uvnum*8, RD = src*8 - | lw LFUNC:RB, FRAME_FUNC(BASE) - | srl RA, RA, 1 - | addu RD, BASE, RD - | addu RA, RA, LFUNC:RB - | ldc1 f0, 0(RD) - | lw UPVAL:RB, LFUNC:RA->uvptr - | lbu TMP3, UPVAL:RB->marked - | lw CARG2, UPVAL:RB->v - | andi TMP3, TMP3, LJ_GC_BLACK // isblack(uv) - | lbu TMP0, UPVAL:RB->closed - | lw TMP2, HI(RD) - | sdc1 f0, 0(CARG2) - | li AT, LJ_GC_BLACK|1 - | or TMP3, TMP3, TMP0 - | beq TMP3, AT, >2 // Upvalue is closed and black? - |. addiu TMP2, TMP2, -(LJ_TNUMX+1) - |1: - | ins_next - | - |2: // Check if new value is collectable. - | sltiu AT, TMP2, LJ_TISGCV - (LJ_TNUMX+1) - | beqz AT, <1 // tvisgcv(v) - |. lw TMP1, LO(RD) - | lbu TMP3, GCOBJ:TMP1->gch.marked - | andi TMP3, TMP3, LJ_GC_WHITES // iswhite(v) - | beqz TMP3, <1 - |. load_got lj_gc_barrieruv - | // Crossed a write barrier. Move the barrier forward. - | call_intern lj_gc_barrieruv // (global_State *g, TValue *tv) - |. addiu CARG1, DISPATCH, GG_DISP2G - | b <1 - |. nop - break; - case BC_USETS: - | // RA = uvnum*8, RD = str_const*8 (~) - | lw LFUNC:RB, FRAME_FUNC(BASE) - | srl RA, RA, 1 - | srl TMP1, RD, 1 - | addu RA, RA, LFUNC:RB - | subu TMP1, KBASE, TMP1 - | lw UPVAL:RB, LFUNC:RA->uvptr - | lw STR:TMP1, -4(TMP1) // KBASE-4-str_const*4 - | lbu TMP2, UPVAL:RB->marked - | lw CARG2, UPVAL:RB->v - | lbu TMP3, STR:TMP1->marked - | andi AT, TMP2, LJ_GC_BLACK // isblack(uv) - | lbu TMP2, UPVAL:RB->closed - | li TMP0, LJ_TSTR - | sw STR:TMP1, LO(CARG2) - | bnez AT, >2 - |. sw TMP0, HI(CARG2) - |1: - | ins_next - | - |2: // Check if string is white and ensure upvalue is closed. - | beqz TMP2, <1 - |. andi AT, TMP3, LJ_GC_WHITES // iswhite(str) - | beqz AT, <1 - |. load_got lj_gc_barrieruv - | // Crossed a write barrier. Move the barrier forward. - | call_intern lj_gc_barrieruv // (global_State *g, TValue *tv) - |. addiu CARG1, DISPATCH, GG_DISP2G - | b <1 - |. nop - break; - case BC_USETN: - | // RA = uvnum*8, RD = num_const*8 - | lw LFUNC:RB, FRAME_FUNC(BASE) - | srl RA, RA, 1 - | addu RD, KBASE, RD - | addu RA, RA, LFUNC:RB - | ldc1 f0, 0(RD) - | lw UPVAL:RB, LFUNC:RA->uvptr - | ins_next1 - | lw TMP1, UPVAL:RB->v - | sdc1 f0, 0(TMP1) - | ins_next2 - break; - case BC_USETP: - | // RA = uvnum*8, RD = primitive_type*8 (~) - | lw LFUNC:RB, FRAME_FUNC(BASE) - | srl RA, RA, 1 - | srl TMP0, RD, 3 - | addu RA, RA, LFUNC:RB - | not TMP0, TMP0 - | lw UPVAL:RB, LFUNC:RA->uvptr - | ins_next1 - | lw TMP1, UPVAL:RB->v - | sw TMP0, HI(TMP1) - | ins_next2 - break; - - case BC_UCLO: - | // RA = level*8, RD = target - | lw TMP2, L->openupval - | branch_RD // Do this first since RD is not saved. - | load_got lj_func_closeuv - | sw BASE, L->base - | beqz TMP2, >1 - |. move CARG1, L - | call_intern lj_func_closeuv // (lua_State *L, TValue *level) - |. addu CARG2, BASE, RA - | lw BASE, L->base - |1: - | ins_next - break; - - case BC_FNEW: - | // RA = dst*8, RD = proto_const*8 (~) (holding function prototype) - | srl TMP1, RD, 1 - | load_got lj_func_newL_gc - | subu TMP1, KBASE, TMP1 - | lw CARG3, FRAME_FUNC(BASE) - | lw CARG2, -4(TMP1) // KBASE-4-tab_const*4 - | sw BASE, L->base - | sw PC, SAVE_PC - | // (lua_State *L, GCproto *pt, GCfuncL *parent) - | call_intern lj_func_newL_gc - |. move CARG1, L - | // Returns GCfuncL *. - | lw BASE, L->base - | li TMP0, LJ_TFUNC - | ins_next1 - | addu RA, BASE, RA - | sw TMP0, HI(RA) - | sw LFUNC:CRET1, LO(RA) - | ins_next2 - break; - - /* -- Table ops --------------------------------------------------------- */ - - case BC_TNEW: - case BC_TDUP: - | // RA = dst*8, RD = (hbits|asize)*8 | tab_const*8 (~) - | lw TMP0, DISPATCH_GL(gc.total)(DISPATCH) - | lw TMP1, DISPATCH_GL(gc.threshold)(DISPATCH) - | sw BASE, L->base - | sw PC, SAVE_PC - | sltu AT, TMP0, TMP1 - | beqz AT, >5 - |1: - if (op == BC_TNEW) { - | load_got lj_tab_new - | srl CARG2, RD, 3 - | andi CARG2, CARG2, 0x7ff - | li TMP0, 0x801 - | addiu AT, CARG2, -0x7ff - | srl CARG3, RD, 14 - | movz CARG2, TMP0, AT - | // (lua_State *L, int32_t asize, uint32_t hbits) - | call_intern lj_tab_new - |. move CARG1, L - | // Returns Table *. - } else { - | load_got lj_tab_dup - | srl TMP1, RD, 1 - | subu TMP1, KBASE, TMP1 - | move CARG1, L - | call_intern lj_tab_dup // (lua_State *L, Table *kt) - |. lw CARG2, -4(TMP1) // KBASE-4-str_const*4 - | // Returns Table *. - } - | lw BASE, L->base - | ins_next1 - | addu RA, BASE, RA - | li TMP0, LJ_TTAB - | sw TAB:CRET1, LO(RA) - | sw TMP0, HI(RA) - | ins_next2 - |5: - | load_got lj_gc_step_fixtop - | move MULTRES, RD - | call_intern lj_gc_step_fixtop // (lua_State *L) - |. move CARG1, L - | b <1 - |. move RD, MULTRES - break; - - case BC_GGET: - | // RA = dst*8, RD = str_const*8 (~) - case BC_GSET: - | // RA = src*8, RD = str_const*8 (~) - | lw LFUNC:TMP2, FRAME_FUNC(BASE) - | srl TMP1, RD, 1 - | subu TMP1, KBASE, TMP1 - | lw TAB:RB, LFUNC:TMP2->env - | lw STR:RC, -4(TMP1) // KBASE-4-str_const*4 - if (op == BC_GGET) { - | b ->BC_TGETS_Z - } else { - | b ->BC_TSETS_Z - } - |. addu RA, BASE, RA - break; - - case BC_TGETV: - | // RA = dst*8, RB = table*8, RC = key*8 - | decode_RB8a RB, INS - | decode_RB8b RB - | decode_RDtoRC8 RC, RD - | addu CARG2, BASE, RB - | addu CARG3, BASE, RC - | lw TMP1, HI(CARG2) - | lw TMP2, HI(CARG3) - | lw TAB:RB, LO(CARG2) - | li AT, LJ_TTAB - | ldc1 f0, 0(CARG3) - | bne TMP1, AT, ->vmeta_tgetv - |. addu RA, BASE, RA - | sltiu AT, TMP2, LJ_TISNUM - | beqz AT, >5 - |. li AT, LJ_TSTR - | - | // Convert number key to integer, check for integerness and range. - | cvt.w.d f2, f0 - | lw TMP0, TAB:RB->asize - | mfc1 TMP2, f2 - | cvt.d.w f4, f2 - | lw TMP1, TAB:RB->array - | c.eq.d f0, f4 - | sltu AT, TMP2, TMP0 - | movf AT, r0 - | sll TMP2, TMP2, 3 - | beqz AT, ->vmeta_tgetv // Integer key and in array part? - |. addu TMP2, TMP1, TMP2 - | lw TMP0, HI(TMP2) - | beq TMP0, TISNIL, >2 - |. ldc1 f0, 0(TMP2) - |1: - | ins_next1 - | sdc1 f0, 0(RA) - | ins_next2 - | - |2: // Check for __index if table value is nil. - | lw TAB:TMP2, TAB:RB->metatable - | beqz TAB:TMP2, <1 // No metatable: done. - |. nop - | lbu TMP0, TAB:TMP2->nomm - | andi TMP0, TMP0, 1<vmeta_tgetv - |. nop - | - |5: - | bne TMP2, AT, ->vmeta_tgetv - |. lw STR:RC, LO(CARG3) - | b ->BC_TGETS_Z // String key? - |. nop - break; - case BC_TGETS: - | // RA = dst*8, RB = table*8, RC = str_const*4 (~) - | decode_RB8a RB, INS - | decode_RB8b RB - | addu CARG2, BASE, RB - | decode_RC4a RC, INS - | lw TMP0, HI(CARG2) - | decode_RC4b RC - | li AT, LJ_TTAB - | lw TAB:RB, LO(CARG2) - | subu CARG3, KBASE, RC - | lw STR:RC, -4(CARG3) // KBASE-4-str_const*4 - | bne TMP0, AT, ->vmeta_tgets1 - |. addu RA, BASE, RA - |->BC_TGETS_Z: - | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8 - | lw TMP0, TAB:RB->hmask - | lw TMP1, STR:RC->hash - | lw NODE:TMP2, TAB:RB->node - | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask - | sll TMP0, TMP1, 5 - | sll TMP1, TMP1, 3 - | subu TMP1, TMP0, TMP1 - | addu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8) - |1: - | lw CARG1, offsetof(Node, key)+HI(NODE:TMP2) - | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2) - | lw NODE:TMP1, NODE:TMP2->next - | lw CARG2, offsetof(Node, val)+HI(NODE:TMP2) - | addiu CARG1, CARG1, -LJ_TSTR - | xor TMP0, TMP0, STR:RC - | or AT, CARG1, TMP0 - | bnez AT, >4 - |. lw TAB:TMP3, TAB:RB->metatable - | beq CARG2, TISNIL, >5 // Key found, but nil value? - |. lw CARG1, offsetof(Node, val)+LO(NODE:TMP2) - |3: - | ins_next1 - | sw CARG2, HI(RA) - | sw CARG1, LO(RA) - | ins_next2 - | - |4: // Follow hash chain. - | bnez NODE:TMP1, <1 - |. move NODE:TMP2, NODE:TMP1 - | // End of hash chain: key not found, nil result. - | - |5: // Check for __index if table value is nil. - | beqz TAB:TMP3, <3 // No metatable: done. - |. li CARG2, LJ_TNIL - | lbu TMP0, TAB:TMP3->nomm - | andi TMP0, TMP0, 1<vmeta_tgets - |. nop - break; - case BC_TGETB: - | // RA = dst*8, RB = table*8, RC = index*8 - | decode_RB8a RB, INS - | decode_RB8b RB - | addu CARG2, BASE, RB - | decode_RDtoRC8 RC, RD - | lw CARG1, HI(CARG2) - | li AT, LJ_TTAB - | lw TAB:RB, LO(CARG2) - | addu RA, BASE, RA - | bne CARG1, AT, ->vmeta_tgetb - |. srl TMP0, RC, 3 - | lw TMP1, TAB:RB->asize - | lw TMP2, TAB:RB->array - | sltu AT, TMP0, TMP1 - | beqz AT, ->vmeta_tgetb - |. addu RC, TMP2, RC - | lw TMP1, HI(RC) - | beq TMP1, TISNIL, >5 - |. ldc1 f0, 0(RC) - |1: - | ins_next1 - | sdc1 f0, 0(RA) - | ins_next2 - | - |5: // Check for __index if table value is nil. - | lw TAB:TMP2, TAB:RB->metatable - | beqz TAB:TMP2, <1 // No metatable: done. - |. nop - | lbu TMP1, TAB:TMP2->nomm - | andi TMP1, TMP1, 1<vmeta_tgetb // Caveat: preserve TMP0! - |. nop - break; - case BC_TGETR: - | // RA = dst*8, RB = table*8, RC = key*8 - | decode_RB8a RB, INS - | decode_RB8b RB - | decode_RDtoRC8 RC, RD - | addu CARG2, BASE, RB - | addu CARG3, BASE, RC - | lw TAB:CARG1, LO(CARG2) - | ldc1 f0, 0(CARG3) - | trunc.w.d f2, f0 - | lw TMP0, TAB:CARG1->asize - | mfc1 CARG2, f2 - | lw TMP1, TAB:CARG1->array - | sltu AT, CARG2, TMP0 - | sll TMP2, CARG2, 3 - | beqz AT, ->vmeta_tgetr // In array part? - |. addu TMP2, TMP1, TMP2 - | ldc1 f0, 0(TMP2) - |->BC_TGETR_Z: - | addu RA, BASE, RA - | ins_next1 - | sdc1 f0, 0(RA) - | ins_next2 - break; - - case BC_TSETV: - | // RA = src*8, RB = table*8, RC = key*8 - | decode_RB8a RB, INS - | decode_RB8b RB - | decode_RDtoRC8 RC, RD - | addu CARG2, BASE, RB - | addu CARG3, BASE, RC - | lw TMP1, HI(CARG2) - | lw TMP2, HI(CARG3) - | lw TAB:RB, LO(CARG2) - | li AT, LJ_TTAB - | ldc1 f0, 0(CARG3) - | bne TMP1, AT, ->vmeta_tsetv - |. addu RA, BASE, RA - | sltiu AT, TMP2, LJ_TISNUM - | beqz AT, >5 - |. li AT, LJ_TSTR - | - | // Convert number key to integer, check for integerness and range. - | cvt.w.d f2, f0 - | lw TMP0, TAB:RB->asize - | mfc1 TMP2, f2 - | cvt.d.w f4, f2 - | lw TMP1, TAB:RB->array - | c.eq.d f0, f4 - | sltu AT, TMP2, TMP0 - | movf AT, r0 - | sll TMP2, TMP2, 3 - | beqz AT, ->vmeta_tsetv // Integer key and in array part? - |. addu TMP1, TMP1, TMP2 - | lbu TMP3, TAB:RB->marked - | lw TMP0, HI(TMP1) - | beq TMP0, TISNIL, >3 - |. ldc1 f0, 0(RA) - |1: - | andi AT, TMP3, LJ_GC_BLACK // isblack(table) - | bnez AT, >7 - |. sdc1 f0, 0(TMP1) - |2: - | ins_next - | - |3: // Check for __newindex if previous value is nil. - | lw TAB:TMP2, TAB:RB->metatable - | beqz TAB:TMP2, <1 // No metatable: done. - |. nop - | lbu TMP2, TAB:TMP2->nomm - | andi TMP2, TMP2, 1<vmeta_tsetv - |. nop - | - |5: - | bne TMP2, AT, ->vmeta_tsetv - |. lw STR:RC, LO(CARG3) - | b ->BC_TSETS_Z // String key? - |. nop - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:RB, TMP3, TMP0, <2 - break; - case BC_TSETS: - | // RA = src*8, RB = table*8, RC = str_const*8 (~) - | decode_RB8a RB, INS - | decode_RB8b RB - | addu CARG2, BASE, RB - | decode_RC4a RC, INS - | lw TMP0, HI(CARG2) - | decode_RC4b RC - | li AT, LJ_TTAB - | subu CARG3, KBASE, RC - | lw TAB:RB, LO(CARG2) - | lw STR:RC, -4(CARG3) // KBASE-4-str_const*4 - | bne TMP0, AT, ->vmeta_tsets1 - |. addu RA, BASE, RA - |->BC_TSETS_Z: - | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = BASE+src*8 - | lw TMP0, TAB:RB->hmask - | lw TMP1, STR:RC->hash - | lw NODE:TMP2, TAB:RB->node - | sb r0, TAB:RB->nomm // Clear metamethod cache. - | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask - | sll TMP0, TMP1, 5 - | sll TMP1, TMP1, 3 - | subu TMP1, TMP0, TMP1 - | addu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8) - | ldc1 f20, 0(RA) - |1: - | lw CARG1, offsetof(Node, key)+HI(NODE:TMP2) - | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2) - | li AT, LJ_TSTR - | lw NODE:TMP1, NODE:TMP2->next - | bne CARG1, AT, >5 - |. lw CARG2, offsetof(Node, val)+HI(NODE:TMP2) - | bne TMP0, STR:RC, >5 - |. lbu TMP3, TAB:RB->marked - | beq CARG2, TISNIL, >4 // Key found, but nil value? - |. lw TAB:TMP0, TAB:RB->metatable - |2: - | andi AT, TMP3, LJ_GC_BLACK // isblack(table) - | bnez AT, >7 - |. sdc1 f20, NODE:TMP2->val - |3: - | ins_next - | - |4: // Check for __newindex if previous value is nil. - | beqz TAB:TMP0, <2 // No metatable: done. - |. nop - | lbu TMP0, TAB:TMP0->nomm - | andi TMP0, TMP0, 1<vmeta_tsets - |. nop - | - |5: // Follow hash chain. - | bnez NODE:TMP1, <1 - |. move NODE:TMP2, NODE:TMP1 - | // End of hash chain: key not found, add a new one - | - | // But check for __newindex first. - | lw TAB:TMP2, TAB:RB->metatable - | beqz TAB:TMP2, >6 // No metatable: continue. - |. addiu CARG3, DISPATCH, DISPATCH_GL(tmptv) - | lbu TMP0, TAB:TMP2->nomm - | andi TMP0, TMP0, 1<vmeta_tsets // 'no __newindex' flag NOT set: check. - |. li AT, LJ_TSTR - |6: - | load_got lj_tab_newkey - | sw STR:RC, LO(CARG3) - | sw AT, HI(CARG3) - | sw BASE, L->base - | move CARG2, TAB:RB - | sw PC, SAVE_PC - | call_intern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k - |. move CARG1, L - | // Returns TValue *. - | lw BASE, L->base - | b <3 // No 2nd write barrier needed. - |. sdc1 f20, 0(CRET1) - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:RB, TMP3, TMP0, <3 - break; - case BC_TSETB: - | // RA = src*8, RB = table*8, RC = index*8 - | decode_RB8a RB, INS - | decode_RB8b RB - | addu CARG2, BASE, RB - | decode_RDtoRC8 RC, RD - | lw CARG1, HI(CARG2) - | li AT, LJ_TTAB - | lw TAB:RB, LO(CARG2) - | addu RA, BASE, RA - | bne CARG1, AT, ->vmeta_tsetb - |. srl TMP0, RC, 3 - | lw TMP1, TAB:RB->asize - | lw TMP2, TAB:RB->array - | sltu AT, TMP0, TMP1 - | beqz AT, ->vmeta_tsetb - |. addu RC, TMP2, RC - | lw TMP1, HI(RC) - | lbu TMP3, TAB:RB->marked - | beq TMP1, TISNIL, >5 - |. ldc1 f0, 0(RA) - |1: - | andi AT, TMP3, LJ_GC_BLACK // isblack(table) - | bnez AT, >7 - |. sdc1 f0, 0(RC) - |2: - | ins_next - | - |5: // Check for __newindex if previous value is nil. - | lw TAB:TMP2, TAB:RB->metatable - | beqz TAB:TMP2, <1 // No metatable: done. - |. nop - | lbu TMP1, TAB:TMP2->nomm - | andi TMP1, TMP1, 1<vmeta_tsetb // Caveat: preserve TMP0! - |. nop - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:RB, TMP3, TMP0, <2 - break; - case BC_TSETR: - | // RA = dst*8, RB = table*8, RC = key*8 - | decode_RB8a RB, INS - | decode_RB8b RB - | decode_RDtoRC8 RC, RD - | addu CARG1, BASE, RB - | addu CARG3, BASE, RC - | lw TAB:CARG2, LO(CARG1) - | ldc1 f0, 0(CARG3) - | trunc.w.d f2, f0 - | lbu TMP3, TAB:CARG2->marked - | lw TMP0, TAB:CARG2->asize - | mfc1 CARG3, f2 - | lw TMP1, TAB:CARG2->array - | andi AT, TMP3, LJ_GC_BLACK // isblack(table) - | bnez AT, >7 - |. addu RA, BASE, RA - |2: - | sltu AT, CARG3, TMP0 - | sll TMP2, CARG3, 3 - | beqz AT, ->vmeta_tsetr // In array part? - |. ldc1 f20, 0(RA) - | addu CRET1, TMP1, TMP2 - |->BC_TSETR_Z: - | ins_next1 - | sdc1 f20, 0(CRET1) - | ins_next2 - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:RB, TMP3, TMP0, <2 - break; - - - case BC_TSETM: - | // RA = base*8 (table at base-1), RD = num_const*8 (start index) - | addu RA, BASE, RA - |1: - | addu TMP3, KBASE, RD - | lw TAB:CARG2, -8+LO(RA) // Guaranteed to be a table. - | addiu TMP0, MULTRES, -8 - | lw TMP3, LO(TMP3) // Integer constant is in lo-word. - | beqz TMP0, >4 // Nothing to copy? - |. srl CARG3, TMP0, 3 - | addu CARG3, CARG3, TMP3 - | lw TMP2, TAB:CARG2->asize - | sll TMP1, TMP3, 3 - | lbu TMP3, TAB:CARG2->marked - | lw CARG1, TAB:CARG2->array - | sltu AT, TMP2, CARG3 - | bnez AT, >5 - |. addu TMP2, RA, TMP0 - | addu TMP1, TMP1, CARG1 - | andi TMP0, TMP3, LJ_GC_BLACK // isblack(table) - |3: // Copy result slots to table. - | ldc1 f0, 0(RA) - | addiu RA, RA, 8 - | sltu AT, RA, TMP2 - | sdc1 f0, 0(TMP1) - | bnez AT, <3 - |. addiu TMP1, TMP1, 8 - | bnez TMP0, >7 - |. nop - |4: - | ins_next - | - |5: // Need to resize array part. - | load_got lj_tab_reasize - | sw BASE, L->base - | sw PC, SAVE_PC - | move BASE, RD - | call_intern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize) - |. move CARG1, L - | // Must not reallocate the stack. - | move RD, BASE - | b <1 - |. lw BASE, L->base // Reload BASE for lack of a saved register. - | - |7: // Possible table write barrier for any value. Skip valiswhite check. - | barrierback TAB:CARG2, TMP3, TMP0, <4 - break; - - /* -- Calls and vararg handling ----------------------------------------- */ - - case BC_CALLM: - | // RA = base*8, (RB = (nresults+1)*8,) RC = extra_nargs*8 - | decode_RDtoRC8 NARGS8:RC, RD - | b ->BC_CALL_Z - |. addu NARGS8:RC, NARGS8:RC, MULTRES - break; - case BC_CALL: - | // RA = base*8, (RB = (nresults+1)*8,) RC = (nargs+1)*8 - | decode_RDtoRC8 NARGS8:RC, RD - |->BC_CALL_Z: - | move TMP2, BASE - | addu BASE, BASE, RA - | li AT, LJ_TFUNC - | lw TMP0, HI(BASE) - | lw LFUNC:RB, LO(BASE) - | addiu BASE, BASE, 8 - | bne TMP0, AT, ->vmeta_call - |. addiu NARGS8:RC, NARGS8:RC, -8 - | ins_call - break; - - case BC_CALLMT: - | // RA = base*8, (RB = 0,) RC = extra_nargs*8 - | addu NARGS8:RD, NARGS8:RD, MULTRES // BC_CALLT gets RC from RD. - | // Fall through. Assumes BC_CALLT follows. - break; - case BC_CALLT: - | // RA = base*8, (RB = 0,) RC = (nargs+1)*8 - | addu RA, BASE, RA - | li AT, LJ_TFUNC - | lw TMP0, HI(RA) - | lw LFUNC:RB, LO(RA) - | move NARGS8:RC, RD - | lw TMP1, FRAME_PC(BASE) - | addiu RA, RA, 8 - | bne TMP0, AT, ->vmeta_callt - |. addiu NARGS8:RC, NARGS8:RC, -8 - |->BC_CALLT_Z: - | andi TMP0, TMP1, FRAME_TYPE // Caveat: preserve TMP0 until the 'or'. - | lbu TMP3, LFUNC:RB->ffid - | bnez TMP0, >7 - |. xori TMP2, TMP1, FRAME_VARG - |1: - | sw LFUNC:RB, FRAME_FUNC(BASE) // Copy function down, but keep PC. - | sltiu AT, TMP3, 2 // (> FF_C) Calling a fast function? - | move TMP2, BASE - | beqz NARGS8:RC, >3 - |. move TMP3, NARGS8:RC - |2: - | ldc1 f0, 0(RA) - | addiu RA, RA, 8 - | addiu TMP3, TMP3, -8 - | sdc1 f0, 0(TMP2) - | bnez TMP3, <2 - |. addiu TMP2, TMP2, 8 - |3: - | or TMP0, TMP0, AT - | beqz TMP0, >5 - |. nop - |4: - | ins_callt - | - |5: // Tailcall to a fast function with a Lua frame below. - | lw INS, -4(TMP1) - | decode_RA8a RA, INS - | decode_RA8b RA - | subu TMP1, BASE, RA - | lw LFUNC:TMP1, -8+FRAME_FUNC(TMP1) - | lw TMP1, LFUNC:TMP1->pc - | b <4 - |. lw KBASE, PC2PROTO(k)(TMP1) // Need to prepare KBASE. - | - |7: // Tailcall from a vararg function. - | andi AT, TMP2, FRAME_TYPEP - | bnez AT, <1 // Vararg frame below? - |. subu TMP2, BASE, TMP2 // Relocate BASE down. - | move BASE, TMP2 - | lw TMP1, FRAME_PC(TMP2) - | b <1 - |. andi TMP0, TMP1, FRAME_TYPE - break; - - case BC_ITERC: - | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 ((2+1)*8)) - | move TMP2, BASE - | addu BASE, BASE, RA - | li AT, LJ_TFUNC - | lw TMP1, -24+HI(BASE) - | lw LFUNC:RB, -24+LO(BASE) - | ldc1 f2, -8(BASE) - | ldc1 f0, -16(BASE) - | sw TMP1, HI(BASE) // Copy callable. - | sw LFUNC:RB, LO(BASE) - | sdc1 f2, 16(BASE) // Copy control var. - | sdc1 f0, 8(BASE) // Copy state. - | addiu BASE, BASE, 8 - | bne TMP1, AT, ->vmeta_call - |. li NARGS8:RC, 16 // Iterators get 2 arguments. - | ins_call - break; - - case BC_ITERN: - | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 (2+1)*8) - |.if JIT - | // NYI: add hotloop, record BC_ITERN. - |.endif - | addu RA, BASE, RA - | lw TAB:RB, -16+LO(RA) - | lw RC, -8+LO(RA) // Get index from control var. - | lw TMP0, TAB:RB->asize - | lw TMP1, TAB:RB->array - | addiu PC, PC, 4 - |1: // Traverse array part. - | sltu AT, RC, TMP0 - | beqz AT, >5 // Index points after array part? - |. sll TMP3, RC, 3 - | addu TMP3, TMP1, TMP3 - | lw TMP2, HI(TMP3) - | ldc1 f0, 0(TMP3) - | mtc1 RC, f2 - | lhu RD, -4+OFS_RD(PC) - | beq TMP2, TISNIL, <1 // Skip holes in array part. - |. addiu RC, RC, 1 - | cvt.d.w f2, f2 - | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535) - | sdc1 f0, 8(RA) - | decode_RD4b RD - | addu RD, RD, TMP3 - | sw RC, -8+LO(RA) // Update control var. - | addu PC, PC, RD - | sdc1 f2, 0(RA) - |3: - | ins_next - | - |5: // Traverse hash part. - | lw TMP1, TAB:RB->hmask - | subu RC, RC, TMP0 - | lw TMP2, TAB:RB->node - |6: - | sltu AT, TMP1, RC // End of iteration? Branch to ITERL+1. - | bnez AT, <3 - |. sll TMP3, RC, 5 - | sll RB, RC, 3 - | subu TMP3, TMP3, RB - | addu NODE:TMP3, TMP3, TMP2 - | lw RB, HI(NODE:TMP3) - | ldc1 f0, 0(NODE:TMP3) - | lhu RD, -4+OFS_RD(PC) - | beq RB, TISNIL, <6 // Skip holes in hash part. - |. addiu RC, RC, 1 - | ldc1 f2, NODE:TMP3->key - | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535) - | sdc1 f0, 8(RA) - | addu RC, RC, TMP0 - | decode_RD4b RD - | addu RD, RD, TMP3 - | sdc1 f2, 0(RA) - | addu PC, PC, RD - | b <3 - |. sw RC, -8+LO(RA) // Update control var. - break; - - case BC_ISNEXT: - | // RA = base*8, RD = target (points to ITERN) - | addu RA, BASE, RA - | lw TMP0, -24+HI(RA) - | lw CFUNC:TMP1, -24+LO(RA) - | lw TMP2, -16+HI(RA) - | lw TMP3, -8+HI(RA) - | li AT, LJ_TFUNC - | bne TMP0, AT, >5 - |. addiu TMP2, TMP2, -LJ_TTAB - | lbu TMP1, CFUNC:TMP1->ffid - | addiu TMP3, TMP3, -LJ_TNIL - | srl TMP0, RD, 1 - | or TMP2, TMP2, TMP3 - | addiu TMP1, TMP1, -FF_next_N - | addu TMP0, PC, TMP0 - | or TMP1, TMP1, TMP2 - | bnez TMP1, >5 - |. lui TMP2, (-(BCBIAS_J*4 >> 16) & 65535) - | addu PC, TMP0, TMP2 - | lui TMP1, 0xfffe - | ori TMP1, TMP1, 0x7fff - | sw r0, -8+LO(RA) // Initialize control var. - | sw TMP1, -8+HI(RA) - |1: - | ins_next - |5: // Despecialize bytecode if any of the checks fail. - | li TMP3, BC_JMP - | li TMP1, BC_ITERC - | sb TMP3, -4+OFS_OP(PC) - | addu PC, TMP0, TMP2 - | b <1 - |. sb TMP1, OFS_OP(PC) - break; - - case BC_VARG: - | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8 - | lw TMP0, FRAME_PC(BASE) - | decode_RDtoRC8 RC, RD - | decode_RB8a RB, INS - | addu RC, BASE, RC - | decode_RB8b RB - | addu RA, BASE, RA - | addiu RC, RC, FRAME_VARG - | addu TMP2, RA, RB - | addiu TMP3, BASE, -8 // TMP3 = vtop - | subu RC, RC, TMP0 // RC = vbase - | // Note: RC may now be even _above_ BASE if nargs was < numparams. - | beqz RB, >5 // Copy all varargs? - |. subu TMP1, TMP3, RC - | addiu TMP2, TMP2, -16 - |1: // Copy vararg slots to destination slots. - | lw CARG1, HI(RC) - | sltu AT, RC, TMP3 - | lw CARG2, LO(RC) - | addiu RC, RC, 8 - | movz CARG1, TISNIL, AT - | sw CARG1, HI(RA) - | sw CARG2, LO(RA) - | sltu AT, RA, TMP2 - | bnez AT, <1 - |. addiu RA, RA, 8 - |3: - | ins_next - | - |5: // Copy all varargs. - | lw TMP0, L->maxstack - | blez TMP1, <3 // No vararg slots? - |. li MULTRES, 8 // MULTRES = (0+1)*8 - | addu TMP2, RA, TMP1 - | sltu AT, TMP0, TMP2 - | bnez AT, >7 - |. addiu MULTRES, TMP1, 8 - |6: - | ldc1 f0, 0(RC) - | addiu RC, RC, 8 - | sdc1 f0, 0(RA) - | sltu AT, RC, TMP3 - | bnez AT, <6 // More vararg slots? - |. addiu RA, RA, 8 - | b <3 - |. nop - | - |7: // Grow stack for varargs. - | load_got lj_state_growstack - | sw RA, L->top - | subu RA, RA, BASE - | sw BASE, L->base - | subu BASE, RC, BASE // Need delta, because BASE may change. - | sw PC, SAVE_PC - | srl CARG2, TMP1, 3 - | call_intern lj_state_growstack // (lua_State *L, int n) - |. move CARG1, L - | move RC, BASE - | lw BASE, L->base - | addu RA, BASE, RA - | addu RC, BASE, RC - | b <6 - |. addiu TMP3, BASE, -8 - break; - - /* -- Returns ----------------------------------------------------------- */ - - case BC_RETM: - | // RA = results*8, RD = extra_nresults*8 - | addu RD, RD, MULTRES // MULTRES >= 8, so RD >= 8. - | // Fall through. Assumes BC_RET follows. - break; - - case BC_RET: - | // RA = results*8, RD = (nresults+1)*8 - | lw PC, FRAME_PC(BASE) - | addu RA, BASE, RA - | move MULTRES, RD - |1: - | andi TMP0, PC, FRAME_TYPE - | bnez TMP0, ->BC_RETV_Z - |. xori TMP1, PC, FRAME_VARG - | - |->BC_RET_Z: - | // BASE = base, RA = resultptr, RD = (nresults+1)*8, PC = return - | lw INS, -4(PC) - | addiu TMP2, BASE, -8 - | addiu RC, RD, -8 - | decode_RA8a TMP0, INS - | decode_RB8a RB, INS - | decode_RA8b TMP0 - | decode_RB8b RB - | addu TMP3, TMP2, RB - | beqz RC, >3 - |. subu BASE, TMP2, TMP0 - |2: - | ldc1 f0, 0(RA) - | addiu RA, RA, 8 - | addiu RC, RC, -8 - | sdc1 f0, 0(TMP2) - | bnez RC, <2 - |. addiu TMP2, TMP2, 8 - |3: - | addiu TMP3, TMP3, -8 - |5: - | sltu AT, TMP2, TMP3 - | bnez AT, >6 - |. lw LFUNC:TMP1, FRAME_FUNC(BASE) - | ins_next1 - | lw TMP1, LFUNC:TMP1->pc - | lw KBASE, PC2PROTO(k)(TMP1) - | ins_next2 - | - |6: // Fill up results with nil. - | sw TISNIL, HI(TMP2) - | b <5 - |. addiu TMP2, TMP2, 8 - | - |->BC_RETV_Z: // Non-standard return case. - | andi TMP2, TMP1, FRAME_TYPEP - | bnez TMP2, ->vm_return - |. nop - | // Return from vararg function: relocate BASE down. - | subu BASE, BASE, TMP1 - | b <1 - |. lw PC, FRAME_PC(BASE) - break; - - case BC_RET0: case BC_RET1: - | // RA = results*8, RD = (nresults+1)*8 - | lw PC, FRAME_PC(BASE) - | addu RA, BASE, RA - | move MULTRES, RD - | andi TMP0, PC, FRAME_TYPE - | bnez TMP0, ->BC_RETV_Z - |. xori TMP1, PC, FRAME_VARG - | - | lw INS, -4(PC) - | addiu TMP2, BASE, -8 - if (op == BC_RET1) { - | ldc1 f0, 0(RA) - } - | decode_RB8a RB, INS - | decode_RA8a RA, INS - | decode_RB8b RB - | decode_RA8b RA - if (op == BC_RET1) { - | sdc1 f0, 0(TMP2) - } - | subu BASE, TMP2, RA - |5: - | sltu AT, RD, RB - | bnez AT, >6 - |. lw LFUNC:TMP1, FRAME_FUNC(BASE) - | ins_next1 - | lw TMP1, LFUNC:TMP1->pc - | lw KBASE, PC2PROTO(k)(TMP1) - | ins_next2 - | - |6: // Fill up results with nil. - | addiu TMP2, TMP2, 8 - | addiu RD, RD, 8 - | b <5 - if (op == BC_RET1) { - |. sw TISNIL, HI(TMP2) - } else { - |. sw TISNIL, -8+HI(TMP2) - } - break; - - /* -- Loops and branches ------------------------------------------------ */ - - case BC_FORL: - |.if JIT - | hotloop - |.endif - | // Fall through. Assumes BC_IFORL follows. - break; - - case BC_JFORI: - case BC_JFORL: -#if !LJ_HASJIT - break; -#endif - case BC_FORI: - case BC_IFORL: - | // RA = base*8, RD = target (after end of loop or start of loop) - vk = (op == BC_IFORL || op == BC_JFORL); - | addu RA, BASE, RA - if (vk) { - | ldc1 f0, FORL_IDX*8(RA) - | ldc1 f4, FORL_STEP*8(RA) - | ldc1 f2, FORL_STOP*8(RA) - | lw TMP3, FORL_STEP*8+HI(RA) - | add.d f0, f0, f4 - | sdc1 f0, FORL_IDX*8(RA) - } else { - | lw TMP1, FORL_IDX*8+HI(RA) - | lw TMP3, FORL_STEP*8+HI(RA) - | lw TMP2, FORL_STOP*8+HI(RA) - | sltiu TMP1, TMP1, LJ_TISNUM - | sltiu TMP0, TMP3, LJ_TISNUM - | sltiu TMP2, TMP2, LJ_TISNUM - | and TMP1, TMP1, TMP0 - | and TMP1, TMP1, TMP2 - | ldc1 f0, FORL_IDX*8(RA) - | beqz TMP1, ->vmeta_for - |. ldc1 f2, FORL_STOP*8(RA) - } - if (op != BC_JFORL) { - | srl RD, RD, 1 - | lui TMP0, (-(BCBIAS_J*4 >> 16) & 65535) - } - | c.le.d 0, f0, f2 - | c.le.d 1, f2, f0 - | sdc1 f0, FORL_EXT*8(RA) - if (op == BC_JFORI) { - | li TMP1, 1 - | li TMP2, 1 - | addu TMP0, RD, TMP0 - | slt TMP3, TMP3, r0 - | movf TMP1, r0, 0 - | addu PC, PC, TMP0 - | movf TMP2, r0, 1 - | lhu RD, -4+OFS_RD(PC) - | movn TMP1, TMP2, TMP3 - | bnez TMP1, =>BC_JLOOP - |. decode_RD8b RD - } else if (op == BC_JFORL) { - | li TMP1, 1 - | li TMP2, 1 - | slt TMP3, TMP3, r0 - | movf TMP1, r0, 0 - | movf TMP2, r0, 1 - | movn TMP1, TMP2, TMP3 - | bnez TMP1, =>BC_JLOOP - |. nop - } else { - | addu TMP1, RD, TMP0 - | slt TMP3, TMP3, r0 - | move TMP2, TMP1 - if (op == BC_FORI) { - | movt TMP1, r0, 0 - | movt TMP2, r0, 1 - } else { - | movf TMP1, r0, 0 - | movf TMP2, r0, 1 - } - | movn TMP1, TMP2, TMP3 - | addu PC, PC, TMP1 - } - | ins_next - break; - - case BC_ITERL: - |.if JIT - | hotloop - |.endif - | // Fall through. Assumes BC_IITERL follows. - break; - - case BC_JITERL: -#if !LJ_HASJIT - break; -#endif - case BC_IITERL: - | // RA = base*8, RD = target - | addu RA, BASE, RA - | lw TMP1, HI(RA) - | beq TMP1, TISNIL, >1 // Stop if iterator returned nil. - |. lw TMP2, LO(RA) - if (op == BC_JITERL) { - | sw TMP1, -8+HI(RA) - | b =>BC_JLOOP - |. sw TMP2, -8+LO(RA) - } else { - | branch_RD // Otherwise save control var + branch. - | sw TMP1, -8+HI(RA) - | sw TMP2, -8+LO(RA) - } - |1: - | ins_next - break; - - case BC_LOOP: - | // RA = base*8, RD = target (loop extent) - | // Note: RA/RD is only used by trace recorder to determine scope/extent - | // This opcode does NOT jump, it's only purpose is to detect a hot loop. - |.if JIT - | hotloop - |.endif - | // Fall through. Assumes BC_ILOOP follows. - break; - - case BC_ILOOP: - | // RA = base*8, RD = target (loop extent) - | ins_next - break; - - case BC_JLOOP: - |.if JIT - | // RA = base*8 (ignored), RD = traceno*8 - | lw TMP1, DISPATCH_J(trace)(DISPATCH) - | srl RD, RD, 1 - | li AT, 0 - | addu TMP1, TMP1, RD - | // Traces on MIPS don't store the trace number, so use 0. - | sw AT, DISPATCH_GL(vmstate)(DISPATCH) - | lw TRACE:TMP2, 0(TMP1) - | sw BASE, DISPATCH_GL(jit_base)(DISPATCH) - | lw TMP2, TRACE:TMP2->mcode - | sw L, DISPATCH_GL(tmpbuf.L)(DISPATCH) - | jr TMP2 - |. addiu JGL, DISPATCH, GG_DISP2G+32768 - |.endif - break; - - case BC_JMP: - | // RA = base*8 (only used by trace recorder), RD = target - | branch_RD - | ins_next - break; - - /* -- Function headers -------------------------------------------------- */ - - case BC_FUNCF: - |.if JIT - | hotcall - |.endif - case BC_FUNCV: /* NYI: compiled vararg functions. */ - | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow. - break; - - case BC_JFUNCF: -#if !LJ_HASJIT - break; -#endif - case BC_IFUNCF: - | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8 - | lw TMP2, L->maxstack - | lbu TMP1, -4+PC2PROTO(numparams)(PC) - | lw KBASE, -4+PC2PROTO(k)(PC) - | sltu AT, TMP2, RA - | bnez AT, ->vm_growstack_l - |. sll TMP1, TMP1, 3 - if (op != BC_JFUNCF) { - | ins_next1 - } - |2: - | sltu AT, NARGS8:RC, TMP1 // Check for missing parameters. - | bnez AT, >3 - |. addu AT, BASE, NARGS8:RC - if (op == BC_JFUNCF) { - | decode_RD8a RD, INS - | b =>BC_JLOOP - |. decode_RD8b RD - } else { - | ins_next2 - } - | - |3: // Clear missing parameters. - | sw TISNIL, HI(AT) - | b <2 - |. addiu NARGS8:RC, NARGS8:RC, 8 - break; - - case BC_JFUNCV: -#if !LJ_HASJIT - break; -#endif - | NYI // NYI: compiled vararg functions - break; /* NYI: compiled vararg functions. */ - - case BC_IFUNCV: - | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8 - | addu TMP1, BASE, RC - | lw TMP2, L->maxstack - | addu TMP0, RA, RC - | sw LFUNC:RB, LO(TMP1) // Store copy of LFUNC. - | addiu TMP3, RC, 8+FRAME_VARG - | sltu AT, TMP0, TMP2 - | lw KBASE, -4+PC2PROTO(k)(PC) - | beqz AT, ->vm_growstack_l - |. sw TMP3, HI(TMP1) // Store delta + FRAME_VARG. - | lbu TMP2, -4+PC2PROTO(numparams)(PC) - | move RA, BASE - | move RC, TMP1 - | ins_next1 - | beqz TMP2, >3 - |. addiu BASE, TMP1, 8 - |1: - | lw TMP0, HI(RA) - | lw TMP3, LO(RA) - | sltu AT, RA, RC // Less args than parameters? - | move CARG1, TMP0 - | movz TMP0, TISNIL, AT // Clear missing parameters. - | movn CARG1, TISNIL, AT // Clear old fixarg slot (help the GC). - | sw TMP3, 8+LO(TMP1) - | addiu TMP2, TMP2, -1 - | sw TMP0, 8+HI(TMP1) - | addiu TMP1, TMP1, 8 - | sw CARG1, HI(RA) - | bnez TMP2, <1 - |. addiu RA, RA, 8 - |3: - | ins_next2 - break; - - case BC_FUNCC: - case BC_FUNCCW: - | // BASE = new base, RA = BASE+framesize*8, RB = CFUNC, RC = nargs*8 - if (op == BC_FUNCC) { - | lw CFUNCADDR, CFUNC:RB->f - } else { - | lw CFUNCADDR, DISPATCH_GL(wrapf)(DISPATCH) - } - | addu TMP1, RA, NARGS8:RC - | lw TMP2, L->maxstack - | addu RC, BASE, NARGS8:RC - | sw BASE, L->base - | sltu AT, TMP2, TMP1 - | sw RC, L->top - | li_vmstate C - if (op == BC_FUNCCW) { - | lw CARG2, CFUNC:RB->f - } - | bnez AT, ->vm_growstack_c // Need to grow stack. - |. move CARG1, L - | jalr CFUNCADDR // (lua_State *L [, lua_CFunction f]) - |. st_vmstate - | // Returns nresults. - | lw BASE, L->base - | sll RD, CRET1, 3 - | lw TMP1, L->top - | li_vmstate INTERP - | lw PC, FRAME_PC(BASE) // Fetch PC of caller. - | subu RA, TMP1, RD // RA = L->top - nresults*8 - | sw L, DISPATCH_GL(cur_L)(DISPATCH) - | b ->vm_returnc - |. st_vmstate - break; - - /* ---------------------------------------------------------------------- */ - - default: - fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]); - exit(2); - break; - } -} - -static int build_backend(BuildCtx *ctx) -{ - int op; - - dasm_growpc(Dst, BC__MAX); - - build_subroutines(ctx); - - |.code_op - for (op = 0; op < BC__MAX; op++) - build_ins(ctx, (BCOp)op, op); - - return BC__MAX; -} - -/* Emit pseudo frame-info for all assembler functions. */ -static void emit_asm_debug(BuildCtx *ctx) -{ - int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code); - int i; - switch (ctx->mode) { - case BUILD_elfasm: - fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n"); - fprintf(ctx->fp, - ".Lframe0:\n" - "\t.4byte .LECIE0-.LSCIE0\n" - ".LSCIE0:\n" - "\t.4byte 0xffffffff\n" - "\t.byte 0x1\n" - "\t.string \"\"\n" - "\t.uleb128 0x1\n" - "\t.sleb128 -4\n" - "\t.byte 31\n" - "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n" - "\t.align 2\n" - ".LECIE0:\n\n"); - fprintf(ctx->fp, - ".LSFDE0:\n" - "\t.4byte .LEFDE0-.LASFDE0\n" - ".LASFDE0:\n" - "\t.4byte .Lframe0\n" - "\t.4byte .Lbegin\n" - "\t.4byte %d\n" - "\t.byte 0xe\n\t.uleb128 %d\n" - "\t.byte 0x9f\n\t.sleb128 1\n" - "\t.byte 0x9e\n\t.sleb128 2\n", - fcofs, CFRAME_SIZE); - for (i = 23; i >= 16; i--) - fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 26-i); - for (i = 30; i >= 20; i -= 2) - fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+32+i, 42-i); - fprintf(ctx->fp, - "\t.align 2\n" - ".LEFDE0:\n\n"); -#if LJ_HASFFI - fprintf(ctx->fp, - ".LSFDE1:\n" - "\t.4byte .LEFDE1-.LASFDE1\n" - ".LASFDE1:\n" - "\t.4byte .Lframe0\n" - "\t.4byte lj_vm_ffi_call\n" - "\t.4byte %d\n" - "\t.byte 0x9f\n\t.uleb128 1\n" - "\t.byte 0x90\n\t.uleb128 2\n" - "\t.byte 0xd\n\t.uleb128 0x10\n" - "\t.align 2\n" - ".LEFDE1:\n\n", (int)ctx->codesz - fcofs); -#endif - fprintf(ctx->fp, "\t.section .eh_frame,\"aw\",@progbits\n"); - fprintf(ctx->fp, - "\t.globl lj_err_unwind_dwarf\n" - ".Lframe1:\n" - "\t.4byte .LECIE1-.LSCIE1\n" - ".LSCIE1:\n" - "\t.4byte 0\n" - "\t.byte 0x1\n" - "\t.string \"zPR\"\n" - "\t.uleb128 0x1\n" - "\t.sleb128 -4\n" - "\t.byte 31\n" - "\t.uleb128 6\n" /* augmentation length */ - "\t.byte 0\n" - "\t.4byte lj_err_unwind_dwarf\n" - "\t.byte 0\n" - "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n" - "\t.align 2\n" - ".LECIE1:\n\n"); - fprintf(ctx->fp, - ".LSFDE2:\n" - "\t.4byte .LEFDE2-.LASFDE2\n" - ".LASFDE2:\n" - "\t.4byte .LASFDE2-.Lframe1\n" - "\t.4byte .Lbegin\n" - "\t.4byte %d\n" - "\t.uleb128 0\n" /* augmentation length */ - "\t.byte 0xe\n\t.uleb128 %d\n" - "\t.byte 0x9f\n\t.sleb128 1\n" - "\t.byte 0x9e\n\t.sleb128 2\n", - fcofs, CFRAME_SIZE); - for (i = 23; i >= 16; i--) - fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 26-i); - for (i = 30; i >= 20; i -= 2) - fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+32+i, 42-i); - fprintf(ctx->fp, - "\t.align 2\n" - ".LEFDE2:\n\n"); -#if LJ_HASFFI - fprintf(ctx->fp, - ".Lframe2:\n" - "\t.4byte .LECIE2-.LSCIE2\n" - ".LSCIE2:\n" - "\t.4byte 0\n" - "\t.byte 0x1\n" - "\t.string \"zR\"\n" - "\t.uleb128 0x1\n" - "\t.sleb128 -4\n" - "\t.byte 31\n" - "\t.uleb128 1\n" /* augmentation length */ - "\t.byte 0\n" - "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n" - "\t.align 2\n" - ".LECIE2:\n\n"); - fprintf(ctx->fp, - ".LSFDE3:\n" - "\t.4byte .LEFDE3-.LASFDE3\n" - ".LASFDE3:\n" - "\t.4byte .LASFDE3-.Lframe2\n" - "\t.4byte lj_vm_ffi_call\n" - "\t.4byte %d\n" - "\t.uleb128 0\n" /* augmentation length */ - "\t.byte 0x9f\n\t.uleb128 1\n" - "\t.byte 0x90\n\t.uleb128 2\n" - "\t.byte 0xd\n\t.uleb128 0x10\n" - "\t.align 2\n" - ".LEFDE3:\n\n", (int)ctx->codesz - fcofs); -#endif - break; - default: - break; - } -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_ppc.dasc b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_ppc.dasc deleted file mode 100644 index 9299c554e2b..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_ppc.dasc +++ /dev/null @@ -1,5252 +0,0 @@ -|// Low-level VM code for PowerPC 32 bit or 32on64 bit mode. -|// Bytecode interpreter, fast functions and helper functions. -|// Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -| -|.arch ppc -|.section code_op, code_sub -| -|.actionlist build_actionlist -|.globals GLOB_ -|.globalnames globnames -|.externnames extnames -| -|// Note: The ragged indentation of the instructions is intentional. -|// The starting columns indicate data dependencies. -| -|//----------------------------------------------------------------------- -| -|// DynASM defines used by the PPC port: -|// -|// P64 64 bit pointers (only for GPR64 testing). -|// Note: see vm_ppc64.dasc for a full PPC64 _LP64 port. -|// GPR64 64 bit registers (but possibly 32 bit pointers, e.g. PS3). -|// Affects reg saves, stack layout, carry/overflow/dot flags etc. -|// FRAME32 Use 32 bit frame layout, even with GPR64 (Xbox 360). -|// TOC Need table of contents (64 bit or 32 bit variant, e.g. PS3). -|// Function pointers are really a struct: code, TOC, env (optional). -|// TOCENV Function pointers have an environment pointer, too (not on PS3). -|// PPE Power Processor Element of Cell (PS3) or Xenon (Xbox 360). -|// Must avoid (slow) micro-coded instructions. -| -|.if P64 -|.define TOC, 1 -|.define TOCENV, 1 -|.macro lpx, a, b, c; ldx a, b, c; .endmacro -|.macro lp, a, b; ld a, b; .endmacro -|.macro stp, a, b; std a, b; .endmacro -|.define decode_OPP, decode_OP8 -|.if FFI -|// Missing: Calling conventions, 64 bit regs, TOC. -|.error lib_ffi not yet implemented for PPC64 -|.endif -|.else -|.macro lpx, a, b, c; lwzx a, b, c; .endmacro -|.macro lp, a, b; lwz a, b; .endmacro -|.macro stp, a, b; stw a, b; .endmacro -|.define decode_OPP, decode_OP4 -|.endif -| -|// Convenience macros for TOC handling. -|.if TOC -|// Linker needs a TOC patch area for every external call relocation. -|.macro blex, target; bl extern target@plt; nop; .endmacro -|.macro .toc, a, b; a, b; .endmacro -|.if P64 -|.define TOC_OFS, 8 -|.define ENV_OFS, 16 -|.else -|.define TOC_OFS, 4 -|.define ENV_OFS, 8 -|.endif -|.else // No TOC. -|.macro blex, target; bl extern target@plt; .endmacro -|.macro .toc, a, b; .endmacro -|.endif -|.macro .tocenv, a, b; .if TOCENV; a, b; .endif; .endmacro -| -|.macro .gpr64, a, b; .if GPR64; a, b; .endif; .endmacro -| -|.macro andix., y, a, i -|.if PPE -| rlwinm y, a, 0, 31-lj_fls(i), 31-lj_ffs(i) -| cmpwi y, 0 -|.else -| andi. y, a, i -|.endif -|.endmacro -| -|.macro clrso, reg -|.if PPE -| li reg, 0 -| mtxer reg -|.else -| mcrxr cr0 -|.endif -|.endmacro -| -|.macro checkov, reg, noov -|.if PPE -| mfxer reg -| add reg, reg, reg -| cmpwi reg, 0 -| li reg, 0 -| mtxer reg -| bgey noov -|.else -| mcrxr cr0 -| bley noov -|.endif -|.endmacro -| -|//----------------------------------------------------------------------- -| -|// Fixed register assignments for the interpreter. -|// Don't use: r1 = sp, r2 and r13 = reserved (TOC, TLS or SDATA) -| -|// The following must be C callee-save (but BASE is often refetched). -|.define BASE, r14 // Base of current Lua stack frame. -|.define KBASE, r15 // Constants of current Lua function. -|.define PC, r16 // Next PC. -|.define DISPATCH, r17 // Opcode dispatch table. -|.define LREG, r18 // Register holding lua_State (also in SAVE_L). -|.define MULTRES, r19 // Size of multi-result: (nresults+1)*8. -|.define JGL, r31 // On-trace: global_State + 32768. -| -|// Constants for type-comparisons, stores and conversions. C callee-save. -|.define TISNUM, r22 -|.define TISNIL, r23 -|.define ZERO, r24 -|.define TOBIT, f30 // 2^52 + 2^51. -|.define TONUM, f31 // 2^52 + 2^51 + 2^31. -| -|// The following temporaries are not saved across C calls, except for RA. -|.define RA, r20 // Callee-save. -|.define RB, r10 -|.define RC, r11 -|.define RD, r12 -|.define INS, r7 // Overlaps CARG5. -| -|.define TMP0, r0 -|.define TMP1, r8 -|.define TMP2, r9 -|.define TMP3, r6 // Overlaps CARG4. -| -|// Saved temporaries. -|.define SAVE0, r21 -| -|// Calling conventions. -|.define CARG1, r3 -|.define CARG2, r4 -|.define CARG3, r5 -|.define CARG4, r6 // Overlaps TMP3. -|.define CARG5, r7 // Overlaps INS. -| -|.define FARG1, f1 -|.define FARG2, f2 -| -|.define CRET1, r3 -|.define CRET2, r4 -| -|.define TOCREG, r2 // TOC register (only used by C code). -|.define ENVREG, r11 // Environment pointer (nested C functions). -| -|// Stack layout while in interpreter. Must match with lj_frame.h. -|.if GPR64 -|.if FRAME32 -| -|// 456(sp) // \ 32/64 bit C frame info -|.define TONUM_LO, 452(sp) // | -|.define TONUM_HI, 448(sp) // | -|.define TMPD_LO, 444(sp) // | -|.define TMPD_HI, 440(sp) // | -|.define SAVE_CR, 432(sp) // | 64 bit CR save. -|.define SAVE_ERRF, 424(sp) // > Parameter save area. -|.define SAVE_NRES, 420(sp) // | -|.define SAVE_L, 416(sp) // | -|.define SAVE_PC, 412(sp) // | -|.define SAVE_MULTRES, 408(sp) // | -|.define SAVE_CFRAME, 400(sp) // / 64 bit C frame chain. -|// 392(sp) // Reserved. -|.define CFRAME_SPACE, 384 // Delta for sp. -|// Back chain for sp: 384(sp) <-- sp entering interpreter -|.define SAVE_LR, 376(sp) // 32 bit LR stored in hi-part. -|.define SAVE_GPR_, 232 // .. 232+18*8: 64 bit GPR saves. -|.define SAVE_FPR_, 88 // .. 88+18*8: 64 bit FPR saves. -|// 80(sp) // Needed for 16 byte stack frame alignment. -|// 16(sp) // Callee parameter save area (ABI mandated). -|// 8(sp) // Reserved -|// Back chain for sp: 0(sp) <-- sp while in interpreter -|// 32 bit sp stored in hi-part of 0(sp). -| -|.define TMPD_BLO, 447(sp) -|.define TMPD, TMPD_HI -|.define TONUM_D, TONUM_HI -| -|.else -| -|// 508(sp) // \ 32 bit C frame info. -|.define SAVE_ERRF, 472(sp) // | -|.define SAVE_NRES, 468(sp) // | -|.define SAVE_L, 464(sp) // > Parameter save area. -|.define SAVE_PC, 460(sp) // | -|.define SAVE_MULTRES, 456(sp) // | -|.define SAVE_CFRAME, 448(sp) // / 64 bit C frame chain. -|.define SAVE_LR, 416(sp) -|.define CFRAME_SPACE, 400 // Delta for sp. -|// Back chain for sp: 400(sp) <-- sp entering interpreter -|.define SAVE_FPR_, 256 // .. 256+18*8: 64 bit FPR saves. -|.define SAVE_GPR_, 112 // .. 112+18*8: 64 bit GPR saves. -|// 48(sp) // Callee parameter save area (ABI mandated). -|.define SAVE_TOC, 40(sp) // TOC save area. -|.define TMPD_LO, 36(sp) // \ Link editor temp (ABI mandated). -|.define TMPD_HI, 32(sp) // / -|.define TONUM_LO, 28(sp) // \ Compiler temp (ABI mandated). -|.define TONUM_HI, 24(sp) // / -|// Next frame lr: 16(sp) -|.define SAVE_CR, 8(sp) // 64 bit CR save. -|// Back chain for sp: 0(sp) <-- sp while in interpreter -| -|.define TMPD_BLO, 39(sp) -|.define TMPD, TMPD_HI -|.define TONUM_D, TONUM_HI -| -|.endif -|.else -| -|.define SAVE_LR, 276(sp) -|.define CFRAME_SPACE, 272 // Delta for sp. -|// Back chain for sp: 272(sp) <-- sp entering interpreter -|.define SAVE_FPR_, 128 // .. 128+18*8: 64 bit FPR saves. -|.define SAVE_GPR_, 56 // .. 56+18*4: 32 bit GPR saves. -|.define SAVE_CR, 52(sp) // 32 bit CR save. -|.define SAVE_ERRF, 48(sp) // 32 bit C frame info. -|.define SAVE_NRES, 44(sp) -|.define SAVE_CFRAME, 40(sp) -|.define SAVE_L, 36(sp) -|.define SAVE_PC, 32(sp) -|.define SAVE_MULTRES, 28(sp) -|.define UNUSED1, 24(sp) -|.define TMPD_LO, 20(sp) -|.define TMPD_HI, 16(sp) -|.define TONUM_LO, 12(sp) -|.define TONUM_HI, 8(sp) -|// Next frame lr: 4(sp) -|// Back chain for sp: 0(sp) <-- sp while in interpreter -| -|.define TMPD_BLO, 23(sp) -|.define TMPD, TMPD_HI -|.define TONUM_D, TONUM_HI -| -|.endif -| -|.macro save_, reg -|.if GPR64 -| std r..reg, SAVE_GPR_+(reg-14)*8(sp) -|.else -| stw r..reg, SAVE_GPR_+(reg-14)*4(sp) -|.endif -| stfd f..reg, SAVE_FPR_+(reg-14)*8(sp) -|.endmacro -|.macro rest_, reg -|.if GPR64 -| ld r..reg, SAVE_GPR_+(reg-14)*8(sp) -|.else -| lwz r..reg, SAVE_GPR_+(reg-14)*4(sp) -|.endif -| lfd f..reg, SAVE_FPR_+(reg-14)*8(sp) -|.endmacro -| -|.macro saveregs -|.if GPR64 and not FRAME32 -| stdu sp, -CFRAME_SPACE(sp) -|.else -| stwu sp, -CFRAME_SPACE(sp) -|.endif -| save_ 14; save_ 15; save_ 16 -| mflr r0 -| save_ 17; save_ 18; save_ 19; save_ 20; save_ 21; save_ 22 -|.if GPR64 and not FRAME32 -| std r0, SAVE_LR -|.else -| stw r0, SAVE_LR -|.endif -| save_ 23; save_ 24; save_ 25 -| mfcr r0 -| save_ 26; save_ 27; save_ 28; save_ 29; save_ 30; save_ 31 -|.if GPR64 -| std r0, SAVE_CR -|.else -| stw r0, SAVE_CR -|.endif -| .toc std TOCREG, SAVE_TOC -|.endmacro -| -|.macro restoreregs -|.if GPR64 and not FRAME32 -| ld r0, SAVE_LR -|.else -| lwz r0, SAVE_LR -|.endif -|.if GPR64 -| ld r12, SAVE_CR -|.else -| lwz r12, SAVE_CR -|.endif -| rest_ 14; rest_ 15; rest_ 16; rest_ 17; rest_ 18; rest_ 19 -| mtlr r0; -|.if PPE; mtocrf 0x20, r12; .else; mtcrf 0x38, r12; .endif -| rest_ 20; rest_ 21; rest_ 22; rest_ 23; rest_ 24; rest_ 25 -|.if PPE; mtocrf 0x10, r12; .endif -| rest_ 26; rest_ 27; rest_ 28; rest_ 29; rest_ 30; rest_ 31 -|.if PPE; mtocrf 0x08, r12; .endif -| addi sp, sp, CFRAME_SPACE -|.endmacro -| -|// Type definitions. Some of these are only used for documentation. -|.type L, lua_State, LREG -|.type GL, global_State -|.type TVALUE, TValue -|.type GCOBJ, GCobj -|.type STR, GCstr -|.type TAB, GCtab -|.type LFUNC, GCfuncL -|.type CFUNC, GCfuncC -|.type PROTO, GCproto -|.type UPVAL, GCupval -|.type NODE, Node -|.type NARGS8, int -|.type TRACE, GCtrace -|.type SBUF, SBuf -| -|//----------------------------------------------------------------------- -| -|// Trap for not-yet-implemented parts. -|.macro NYI; tw 4, sp, sp; .endmacro -| -|// int/FP conversions. -|.macro tonum_i, freg, reg -| xoris reg, reg, 0x8000 -| stw reg, TONUM_LO -| lfd freg, TONUM_D -| fsub freg, freg, TONUM -|.endmacro -| -|.macro tonum_u, freg, reg -| stw reg, TONUM_LO -| lfd freg, TONUM_D -| fsub freg, freg, TOBIT -|.endmacro -| -|.macro toint, reg, freg, tmpfreg -| fctiwz tmpfreg, freg -| stfd tmpfreg, TMPD -| lwz reg, TMPD_LO -|.endmacro -| -|.macro toint, reg, freg -| toint reg, freg, freg -|.endmacro -| -|//----------------------------------------------------------------------- -| -|// Access to frame relative to BASE. -|.define FRAME_PC, -8 -|.define FRAME_FUNC, -4 -| -|// Instruction decode. -|.macro decode_OP4, dst, ins; rlwinm dst, ins, 2, 22, 29; .endmacro -|.macro decode_OP8, dst, ins; rlwinm dst, ins, 3, 21, 28; .endmacro -|.macro decode_RA8, dst, ins; rlwinm dst, ins, 27, 21, 28; .endmacro -|.macro decode_RB8, dst, ins; rlwinm dst, ins, 11, 21, 28; .endmacro -|.macro decode_RC8, dst, ins; rlwinm dst, ins, 19, 21, 28; .endmacro -|.macro decode_RD8, dst, ins; rlwinm dst, ins, 19, 13, 28; .endmacro -| -|.macro decode_OP1, dst, ins; rlwinm dst, ins, 0, 24, 31; .endmacro -|.macro decode_RD4, dst, ins; rlwinm dst, ins, 18, 14, 29; .endmacro -| -|// Instruction fetch. -|.macro ins_NEXT1 -| lwz INS, 0(PC) -| addi PC, PC, 4 -|.endmacro -|// Instruction decode+dispatch. Note: optimized for e300! -|.macro ins_NEXT2 -| decode_OPP TMP1, INS -| lpx TMP0, DISPATCH, TMP1 -| mtctr TMP0 -| decode_RB8 RB, INS -| decode_RD8 RD, INS -| decode_RA8 RA, INS -| decode_RC8 RC, INS -| bctr -|.endmacro -|.macro ins_NEXT -| ins_NEXT1 -| ins_NEXT2 -|.endmacro -| -|// Instruction footer. -|.if 1 -| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use. -| .define ins_next, ins_NEXT -| .define ins_next_, ins_NEXT -| .define ins_next1, ins_NEXT1 -| .define ins_next2, ins_NEXT2 -|.else -| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch. -| // Affects only certain kinds of benchmarks (and only with -j off). -| .macro ins_next -| b ->ins_next -| .endmacro -| .macro ins_next1 -| .endmacro -| .macro ins_next2 -| b ->ins_next -| .endmacro -| .macro ins_next_ -| ->ins_next: -| ins_NEXT -| .endmacro -|.endif -| -|// Call decode and dispatch. -|.macro ins_callt -| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC -| lwz PC, LFUNC:RB->pc -| lwz INS, 0(PC) -| addi PC, PC, 4 -| decode_OPP TMP1, INS -| decode_RA8 RA, INS -| lpx TMP0, DISPATCH, TMP1 -| add RA, RA, BASE -| mtctr TMP0 -| bctr -|.endmacro -| -|.macro ins_call -| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, PC = caller PC -| stw PC, FRAME_PC(BASE) -| ins_callt -|.endmacro -| -|//----------------------------------------------------------------------- -| -|// Macros to test operand types. -|.macro checknum, reg; cmplw reg, TISNUM; .endmacro -|.macro checknum, cr, reg; cmplw cr, reg, TISNUM; .endmacro -|.macro checkstr, reg; cmpwi reg, LJ_TSTR; .endmacro -|.macro checktab, reg; cmpwi reg, LJ_TTAB; .endmacro -|.macro checkfunc, reg; cmpwi reg, LJ_TFUNC; .endmacro -|.macro checknil, reg; cmpwi reg, LJ_TNIL; .endmacro -| -|.macro branch_RD -| srwi TMP0, RD, 1 -| addis PC, PC, -(BCBIAS_J*4 >> 16) -| add PC, PC, TMP0 -|.endmacro -| -|// Assumes DISPATCH is relative to GL. -#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field)) -#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field)) -| -#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto)) -| -|.macro hotcheck, delta, target -| rlwinm TMP1, PC, 31, 25, 30 -| addi TMP1, TMP1, GG_DISP2HOT -| lhzx TMP2, DISPATCH, TMP1 -| addic. TMP2, TMP2, -delta -| sthx TMP2, DISPATCH, TMP1 -| blt target -|.endmacro -| -|.macro hotloop -| hotcheck HOTCOUNT_LOOP, ->vm_hotloop -|.endmacro -| -|.macro hotcall -| hotcheck HOTCOUNT_CALL, ->vm_hotcall -|.endmacro -| -|// Set current VM state. Uses TMP0. -|.macro li_vmstate, st; li TMP0, ~LJ_VMST_..st; .endmacro -|.macro st_vmstate; stw TMP0, DISPATCH_GL(vmstate)(DISPATCH); .endmacro -| -|// Move table write barrier back. Overwrites mark and tmp. -|.macro barrierback, tab, mark, tmp -| lwz tmp, DISPATCH_GL(gc.grayagain)(DISPATCH) -| // Assumes LJ_GC_BLACK is 0x04. -| rlwinm mark, mark, 0, 30, 28 // black2gray(tab) -| stw tab, DISPATCH_GL(gc.grayagain)(DISPATCH) -| stb mark, tab->marked -| stw tmp, tab->gclist -|.endmacro -| -|//----------------------------------------------------------------------- - -/* Generate subroutines used by opcodes and other parts of the VM. */ -/* The .code_sub section should be last to help static branch prediction. */ -static void build_subroutines(BuildCtx *ctx) -{ - |.code_sub - | - |//----------------------------------------------------------------------- - |//-- Return handling ---------------------------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_returnp: - | // See vm_return. Also: TMP2 = previous base. - | andix. TMP0, PC, FRAME_P - | li TMP1, LJ_TTRUE - | beq ->cont_dispatch - | - | // Return from pcall or xpcall fast func. - | lwz PC, FRAME_PC(TMP2) // Fetch PC of previous frame. - | mr BASE, TMP2 // Restore caller base. - | // Prepending may overwrite the pcall frame, so do it at the end. - | stwu TMP1, FRAME_PC(RA) // Prepend true to results. - | - |->vm_returnc: - | addi RD, RD, 8 // RD = (nresults+1)*8. - | andix. TMP0, PC, FRAME_TYPE - | cmpwi cr1, RD, 0 - | li CRET1, LUA_YIELD - | beq cr1, ->vm_unwind_c_eh - | mr MULTRES, RD - | beq ->BC_RET_Z // Handle regular return to Lua. - | - |->vm_return: - | // BASE = base, RA = resultptr, RD/MULTRES = (nresults+1)*8, PC = return - | // TMP0 = PC & FRAME_TYPE - | cmpwi TMP0, FRAME_C - | rlwinm TMP2, PC, 0, 0, 28 - | li_vmstate C - | sub TMP2, BASE, TMP2 // TMP2 = previous base. - | bney ->vm_returnp - | - | addic. TMP1, RD, -8 - | stp TMP2, L->base - | lwz TMP2, SAVE_NRES - | subi BASE, BASE, 8 - | st_vmstate - | slwi TMP2, TMP2, 3 - | beq >2 - |1: - | addic. TMP1, TMP1, -8 - | lfd f0, 0(RA) - | addi RA, RA, 8 - | stfd f0, 0(BASE) - | addi BASE, BASE, 8 - | bney <1 - | - |2: - | cmpw TMP2, RD // More/less results wanted? - | bne >6 - |3: - | stp BASE, L->top // Store new top. - | - |->vm_leave_cp: - | lp TMP0, SAVE_CFRAME // Restore previous C frame. - | li CRET1, 0 // Ok return status for vm_pcall. - | stp TMP0, L->cframe - | - |->vm_leave_unw: - | restoreregs - | blr - | - |6: - | ble >7 // Less results wanted? - | // More results wanted. Check stack size and fill up results with nil. - | lwz TMP1, L->maxstack - | cmplw BASE, TMP1 - | bge >8 - | stw TISNIL, 0(BASE) - | addi RD, RD, 8 - | addi BASE, BASE, 8 - | b <2 - | - |7: // Less results wanted. - | subfic TMP3, TMP2, 0 // LUA_MULTRET+1 case? - | sub TMP0, RD, TMP2 - | subfe TMP1, TMP1, TMP1 // TMP1 = TMP2 == 0 ? 0 : -1 - | and TMP0, TMP0, TMP1 - | sub BASE, BASE, TMP0 // Either keep top or shrink it. - | b <3 - | - |8: // Corner case: need to grow stack for filling up results. - | // This can happen if: - | // - A C function grows the stack (a lot). - | // - The GC shrinks the stack in between. - | // - A return back from a lua_call() with (high) nresults adjustment. - | stp BASE, L->top // Save current top held in BASE (yes). - | mr SAVE0, RD - | srwi CARG2, TMP2, 3 - | mr CARG1, L - | bl extern lj_state_growstack // (lua_State *L, int n) - | lwz TMP2, SAVE_NRES - | mr RD, SAVE0 - | slwi TMP2, TMP2, 3 - | lp BASE, L->top // Need the (realloced) L->top in BASE. - | b <2 - | - |->vm_unwind_c: // Unwind C stack, return from vm_pcall. - | // (void *cframe, int errcode) - | mr sp, CARG1 - | mr CRET1, CARG2 - |->vm_unwind_c_eh: // Landing pad for external unwinder. - | lwz L, SAVE_L - | .toc ld TOCREG, SAVE_TOC - | li TMP0, ~LJ_VMST_C - | lwz GL:TMP1, L->glref - | stw TMP0, GL:TMP1->vmstate - | b ->vm_leave_unw - | - |->vm_unwind_ff: // Unwind C stack, return from ff pcall. - | // (void *cframe) - |.if GPR64 - | rldicr sp, CARG1, 0, 61 - |.else - | rlwinm sp, CARG1, 0, 0, 29 - |.endif - |->vm_unwind_ff_eh: // Landing pad for external unwinder. - | lwz L, SAVE_L - | .toc ld TOCREG, SAVE_TOC - | li TISNUM, LJ_TISNUM // Setup type comparison constants. - | lp BASE, L->base - | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). - | lwz DISPATCH, L->glref // Setup pointer to dispatch table. - | li ZERO, 0 - | stw TMP3, TMPD - | li TMP1, LJ_TFALSE - | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float). - | li TISNIL, LJ_TNIL - | li_vmstate INTERP - | lfs TOBIT, TMPD - | lwz PC, FRAME_PC(BASE) // Fetch PC of previous frame. - | la RA, -8(BASE) // Results start at BASE-8. - | stw TMP3, TMPD - | addi DISPATCH, DISPATCH, GG_G2DISP - | stw TMP1, 0(RA) // Prepend false to error message. - | li RD, 16 // 2 results: false + error message. - | st_vmstate - | lfs TONUM, TMPD - | b ->vm_returnc - | - |//----------------------------------------------------------------------- - |//-- Grow stack for calls ----------------------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_growstack_c: // Grow stack for C function. - | li CARG2, LUA_MINSTACK - | b >2 - | - |->vm_growstack_l: // Grow stack for Lua function. - | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC - | add RC, BASE, RC - | sub RA, RA, BASE - | stp BASE, L->base - | addi PC, PC, 4 // Must point after first instruction. - | stp RC, L->top - | srwi CARG2, RA, 3 - |2: - | // L->base = new base, L->top = top - | stw PC, SAVE_PC - | mr CARG1, L - | bl extern lj_state_growstack // (lua_State *L, int n) - | lp BASE, L->base - | lp RC, L->top - | lwz LFUNC:RB, FRAME_FUNC(BASE) - | sub RC, RC, BASE - | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC - | ins_callt // Just retry the call. - | - |//----------------------------------------------------------------------- - |//-- Entry points into the assembler VM --------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_resume: // Setup C frame and resume thread. - | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0) - | saveregs - | mr L, CARG1 - | lwz DISPATCH, L->glref // Setup pointer to dispatch table. - | mr BASE, CARG2 - | lbz TMP1, L->status - | stw L, SAVE_L - | li PC, FRAME_CP - | addi TMP0, sp, CFRAME_RESUME - | addi DISPATCH, DISPATCH, GG_G2DISP - | stw CARG3, SAVE_NRES - | cmplwi TMP1, 0 - | stw CARG3, SAVE_ERRF - | stp CARG3, SAVE_CFRAME - | stw CARG1, SAVE_PC // Any value outside of bytecode is ok. - | stp TMP0, L->cframe - | beq >3 - | - | // Resume after yield (like a return). - | stw L, DISPATCH_GL(cur_L)(DISPATCH) - | mr RA, BASE - | lp BASE, L->base - | li TISNUM, LJ_TISNUM // Setup type comparison constants. - | lp TMP1, L->top - | lwz PC, FRAME_PC(BASE) - | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). - | stb CARG3, L->status - | stw TMP3, TMPD - | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float). - | lfs TOBIT, TMPD - | sub RD, TMP1, BASE - | stw TMP3, TMPD - | lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double) - | addi RD, RD, 8 - | stw TMP0, TONUM_HI - | li_vmstate INTERP - | li ZERO, 0 - | st_vmstate - | andix. TMP0, PC, FRAME_TYPE - | mr MULTRES, RD - | lfs TONUM, TMPD - | li TISNIL, LJ_TNIL - | beq ->BC_RET_Z - | b ->vm_return - | - |->vm_pcall: // Setup protected C frame and enter VM. - | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef) - | saveregs - | li PC, FRAME_CP - | stw CARG4, SAVE_ERRF - | b >1 - | - |->vm_call: // Setup C frame and enter VM. - | // (lua_State *L, TValue *base, int nres1) - | saveregs - | li PC, FRAME_C - | - |1: // Entry point for vm_pcall above (PC = ftype). - | lp TMP1, L:CARG1->cframe - | mr L, CARG1 - | stw CARG3, SAVE_NRES - | lwz DISPATCH, L->glref // Setup pointer to dispatch table. - | stw CARG1, SAVE_L - | mr BASE, CARG2 - | addi DISPATCH, DISPATCH, GG_G2DISP - | stw CARG1, SAVE_PC // Any value outside of bytecode is ok. - | stp TMP1, SAVE_CFRAME - | stp sp, L->cframe // Add our C frame to cframe chain. - | - |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype). - | stw L, DISPATCH_GL(cur_L)(DISPATCH) - | lp TMP2, L->base // TMP2 = old base (used in vmeta_call). - | li TISNUM, LJ_TISNUM // Setup type comparison constants. - | lp TMP1, L->top - | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). - | add PC, PC, BASE - | stw TMP3, TMPD - | li ZERO, 0 - | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float). - | lfs TOBIT, TMPD - | sub PC, PC, TMP2 // PC = frame delta + frame type - | stw TMP3, TMPD - | lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double) - | sub NARGS8:RC, TMP1, BASE - | stw TMP0, TONUM_HI - | li_vmstate INTERP - | lfs TONUM, TMPD - | li TISNIL, LJ_TNIL - | st_vmstate - | - |->vm_call_dispatch: - | // TMP2 = old base, BASE = new base, RC = nargs*8, PC = caller PC - | lwz TMP0, FRAME_PC(BASE) - | lwz LFUNC:RB, FRAME_FUNC(BASE) - | checkfunc TMP0; bne ->vmeta_call - | - |->vm_call_dispatch_f: - | ins_call - | // BASE = new base, RB = func, RC = nargs*8, PC = caller PC - | - |->vm_cpcall: // Setup protected C frame, call C. - | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp) - | saveregs - | mr L, CARG1 - | lwz TMP0, L:CARG1->stack - | stw CARG1, SAVE_L - | lp TMP1, L->top - | lwz DISPATCH, L->glref // Setup pointer to dispatch table. - | stw CARG1, SAVE_PC // Any value outside of bytecode is ok. - | sub TMP0, TMP0, TMP1 // Compute -savestack(L, L->top). - | lp TMP1, L->cframe - | addi DISPATCH, DISPATCH, GG_G2DISP - | .toc lp CARG4, 0(CARG4) - | li TMP2, 0 - | stw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame. - | stw TMP2, SAVE_ERRF // No error function. - | stp TMP1, SAVE_CFRAME - | stp sp, L->cframe // Add our C frame to cframe chain. - | stw L, DISPATCH_GL(cur_L)(DISPATCH) - | mtctr CARG4 - | bctrl // (lua_State *L, lua_CFunction func, void *ud) - |.if PPE - | mr BASE, CRET1 - | cmpwi CRET1, 0 - |.else - | mr. BASE, CRET1 - |.endif - | li PC, FRAME_CP - | bne <3 // Else continue with the call. - | b ->vm_leave_cp // No base? Just remove C frame. - | - |//----------------------------------------------------------------------- - |//-- Metamethod handling ------------------------------------------------ - |//----------------------------------------------------------------------- - | - |// The lj_meta_* functions (except for lj_meta_cat) don't reallocate the - |// stack, so BASE doesn't need to be reloaded across these calls. - | - |//-- Continuation dispatch ---------------------------------------------- - | - |->cont_dispatch: - | // BASE = meta base, RA = resultptr, RD = (nresults+1)*8 - | lwz TMP0, -12(BASE) // Continuation. - | mr RB, BASE - | mr BASE, TMP2 // Restore caller BASE. - | lwz LFUNC:TMP1, FRAME_FUNC(TMP2) - |.if FFI - | cmplwi TMP0, 1 - |.endif - | lwz PC, -16(RB) // Restore PC from [cont|PC]. - | subi TMP2, RD, 8 - | lwz TMP1, LFUNC:TMP1->pc - | stwx TISNIL, RA, TMP2 // Ensure one valid arg. - |.if FFI - | ble >1 - |.endif - | lwz KBASE, PC2PROTO(k)(TMP1) - | // BASE = base, RA = resultptr, RB = meta base - | mtctr TMP0 - | bctr // Jump to continuation. - | - |.if FFI - |1: - | beq ->cont_ffi_callback // cont = 1: return from FFI callback. - | // cont = 0: tailcall from C function. - | subi TMP1, RB, 16 - | sub RC, TMP1, BASE - | b ->vm_call_tail - |.endif - | - |->cont_cat: // RA = resultptr, RB = meta base - | lwz INS, -4(PC) - | subi CARG2, RB, 16 - | decode_RB8 SAVE0, INS - | lfd f0, 0(RA) - | add TMP1, BASE, SAVE0 - | stp BASE, L->base - | cmplw TMP1, CARG2 - | sub CARG3, CARG2, TMP1 - | decode_RA8 RA, INS - | stfd f0, 0(CARG2) - | bney ->BC_CAT_Z - | stfdx f0, BASE, RA - | b ->cont_nop - | - |//-- Table indexing metamethods ----------------------------------------- - | - |->vmeta_tgets1: - | la CARG3, DISPATCH_GL(tmptv)(DISPATCH) - | li TMP0, LJ_TSTR - | decode_RB8 RB, INS - | stw STR:RC, 4(CARG3) - | add CARG2, BASE, RB - | stw TMP0, 0(CARG3) - | b >1 - | - |->vmeta_tgets: - | la CARG2, DISPATCH_GL(tmptv)(DISPATCH) - | li TMP0, LJ_TTAB - | stw TAB:RB, 4(CARG2) - | la CARG3, DISPATCH_GL(tmptv2)(DISPATCH) - | stw TMP0, 0(CARG2) - | li TMP1, LJ_TSTR - | stw STR:RC, 4(CARG3) - | stw TMP1, 0(CARG3) - | b >1 - | - |->vmeta_tgetb: // TMP0 = index - |.if not DUALNUM - | tonum_u f0, TMP0 - |.endif - | decode_RB8 RB, INS - | la CARG3, DISPATCH_GL(tmptv)(DISPATCH) - | add CARG2, BASE, RB - |.if DUALNUM - | stw TISNUM, 0(CARG3) - | stw TMP0, 4(CARG3) - |.else - | stfd f0, 0(CARG3) - |.endif - | b >1 - | - |->vmeta_tgetv: - | decode_RB8 RB, INS - | decode_RC8 RC, INS - | add CARG2, BASE, RB - | add CARG3, BASE, RC - |1: - | stp BASE, L->base - | mr CARG1, L - | stw PC, SAVE_PC - | bl extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k) - | // Returns TValue * (finished) or NULL (metamethod). - | cmplwi CRET1, 0 - | beq >3 - | lfd f0, 0(CRET1) - | ins_next1 - | stfdx f0, BASE, RA - | ins_next2 - | - |3: // Call __index metamethod. - | // BASE = base, L->top = new base, stack = cont/func/t/k - | subfic TMP1, BASE, FRAME_CONT - | lp BASE, L->top - | stw PC, -16(BASE) // [cont|PC] - | add PC, TMP1, BASE - | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here. - | li NARGS8:RC, 16 // 2 args for func(t, k). - | b ->vm_call_dispatch_f - | - |->vmeta_tgetr: - | bl extern lj_tab_getinth // (GCtab *t, int32_t key) - | // Returns cTValue * or NULL. - | cmplwi CRET1, 0 - | beq >1 - | lfd f14, 0(CRET1) - | b ->BC_TGETR_Z - |1: - | stwx TISNIL, BASE, RA - | b ->cont_nop - | - |//----------------------------------------------------------------------- - | - |->vmeta_tsets1: - | la CARG3, DISPATCH_GL(tmptv)(DISPATCH) - | li TMP0, LJ_TSTR - | decode_RB8 RB, INS - | stw STR:RC, 4(CARG3) - | add CARG2, BASE, RB - | stw TMP0, 0(CARG3) - | b >1 - | - |->vmeta_tsets: - | la CARG2, DISPATCH_GL(tmptv)(DISPATCH) - | li TMP0, LJ_TTAB - | stw TAB:RB, 4(CARG2) - | la CARG3, DISPATCH_GL(tmptv2)(DISPATCH) - | stw TMP0, 0(CARG2) - | li TMP1, LJ_TSTR - | stw STR:RC, 4(CARG3) - | stw TMP1, 0(CARG3) - | b >1 - | - |->vmeta_tsetb: // TMP0 = index - |.if not DUALNUM - | tonum_u f0, TMP0 - |.endif - | decode_RB8 RB, INS - | la CARG3, DISPATCH_GL(tmptv)(DISPATCH) - | add CARG2, BASE, RB - |.if DUALNUM - | stw TISNUM, 0(CARG3) - | stw TMP0, 4(CARG3) - |.else - | stfd f0, 0(CARG3) - |.endif - | b >1 - | - |->vmeta_tsetv: - | decode_RB8 RB, INS - | decode_RC8 RC, INS - | add CARG2, BASE, RB - | add CARG3, BASE, RC - |1: - | stp BASE, L->base - | mr CARG1, L - | stw PC, SAVE_PC - | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k) - | // Returns TValue * (finished) or NULL (metamethod). - | cmplwi CRET1, 0 - | lfdx f0, BASE, RA - | beq >3 - | // NOBARRIER: lj_meta_tset ensures the table is not black. - | ins_next1 - | stfd f0, 0(CRET1) - | ins_next2 - | - |3: // Call __newindex metamethod. - | // BASE = base, L->top = new base, stack = cont/func/t/k/(v) - | subfic TMP1, BASE, FRAME_CONT - | lp BASE, L->top - | stw PC, -16(BASE) // [cont|PC] - | add PC, TMP1, BASE - | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here. - | li NARGS8:RC, 24 // 3 args for func(t, k, v) - | stfd f0, 16(BASE) // Copy value to third argument. - | b ->vm_call_dispatch_f - | - |->vmeta_tsetr: - | stp BASE, L->base - | stw PC, SAVE_PC - | bl extern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key) - | // Returns TValue *. - | stfd f14, 0(CRET1) - | b ->cont_nop - | - |//-- Comparison metamethods --------------------------------------------- - | - |->vmeta_comp: - | mr CARG1, L - | subi PC, PC, 4 - |.if DUALNUM - | mr CARG2, RA - |.else - | add CARG2, BASE, RA - |.endif - | stw PC, SAVE_PC - |.if DUALNUM - | mr CARG3, RD - |.else - | add CARG3, BASE, RD - |.endif - | stp BASE, L->base - | decode_OP1 CARG4, INS - | bl extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op) - | // Returns 0/1 or TValue * (metamethod). - |3: - | cmplwi CRET1, 1 - | bgt ->vmeta_binop - | subfic CRET1, CRET1, 0 - |4: - | lwz INS, 0(PC) - | addi PC, PC, 4 - | decode_RD4 TMP2, INS - | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) - | and TMP2, TMP2, CRET1 - | add PC, PC, TMP2 - |->cont_nop: - | ins_next - | - |->cont_ra: // RA = resultptr - | lwz INS, -4(PC) - | lfd f0, 0(RA) - | decode_RA8 TMP1, INS - | stfdx f0, BASE, TMP1 - | b ->cont_nop - | - |->cont_condt: // RA = resultptr - | lwz TMP0, 0(RA) - | .gpr64 extsw TMP0, TMP0 - | subfic TMP0, TMP0, LJ_TTRUE // Branch if result is true. - | subfe CRET1, CRET1, CRET1 - | not CRET1, CRET1 - | b <4 - | - |->cont_condf: // RA = resultptr - | lwz TMP0, 0(RA) - | .gpr64 extsw TMP0, TMP0 - | subfic TMP0, TMP0, LJ_TTRUE // Branch if result is false. - | subfe CRET1, CRET1, CRET1 - | b <4 - | - |->vmeta_equal: - | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV. - | subi PC, PC, 4 - | stp BASE, L->base - | mr CARG1, L - | stw PC, SAVE_PC - | bl extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne) - | // Returns 0/1 or TValue * (metamethod). - | b <3 - | - |->vmeta_equal_cd: - |.if FFI - | mr CARG2, INS - | subi PC, PC, 4 - | stp BASE, L->base - | mr CARG1, L - | stw PC, SAVE_PC - | bl extern lj_meta_equal_cd // (lua_State *L, BCIns op) - | // Returns 0/1 or TValue * (metamethod). - | b <3 - |.endif - | - |->vmeta_istype: - | subi PC, PC, 4 - | stp BASE, L->base - | srwi CARG2, RA, 3 - | mr CARG1, L - | srwi CARG3, RD, 3 - | stw PC, SAVE_PC - | bl extern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp) - | b ->cont_nop - | - |//-- Arithmetic metamethods --------------------------------------------- - | - |->vmeta_arith_nv: - | add CARG3, KBASE, RC - | add CARG4, BASE, RB - | b >1 - |->vmeta_arith_nv2: - |.if DUALNUM - | mr CARG3, RC - | mr CARG4, RB - | b >1 - |.endif - | - |->vmeta_unm: - | mr CARG3, RD - | mr CARG4, RD - | b >1 - | - |->vmeta_arith_vn: - | add CARG3, BASE, RB - | add CARG4, KBASE, RC - | b >1 - | - |->vmeta_arith_vv: - | add CARG3, BASE, RB - | add CARG4, BASE, RC - |.if DUALNUM - | b >1 - |.endif - |->vmeta_arith_vn2: - |->vmeta_arith_vv2: - |.if DUALNUM - | mr CARG3, RB - | mr CARG4, RC - |.endif - |1: - | add CARG2, BASE, RA - | stp BASE, L->base - | mr CARG1, L - | stw PC, SAVE_PC - | decode_OP1 CARG5, INS // Caveat: CARG5 overlaps INS. - | bl extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op) - | // Returns NULL (finished) or TValue * (metamethod). - | cmplwi CRET1, 0 - | beq ->cont_nop - | - | // Call metamethod for binary op. - |->vmeta_binop: - | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2 - | sub TMP1, CRET1, BASE - | stw PC, -16(CRET1) // [cont|PC] - | mr TMP2, BASE - | addi PC, TMP1, FRAME_CONT - | mr BASE, CRET1 - | li NARGS8:RC, 16 // 2 args for func(o1, o2). - | b ->vm_call_dispatch - | - |->vmeta_len: -#if LJ_52 - | mr SAVE0, CARG1 -#endif - | mr CARG2, RD - | stp BASE, L->base - | mr CARG1, L - | stw PC, SAVE_PC - | bl extern lj_meta_len // (lua_State *L, TValue *o) - | // Returns NULL (retry) or TValue * (metamethod base). -#if LJ_52 - | cmplwi CRET1, 0 - | bne ->vmeta_binop // Binop call for compatibility. - | mr CARG1, SAVE0 - | b ->BC_LEN_Z -#else - | b ->vmeta_binop // Binop call for compatibility. -#endif - | - |//-- Call metamethod ---------------------------------------------------- - | - |->vmeta_call: // Resolve and call __call metamethod. - | // TMP2 = old base, BASE = new base, RC = nargs*8 - | mr CARG1, L - | stp TMP2, L->base // This is the callers base! - | subi CARG2, BASE, 8 - | stw PC, SAVE_PC - | add CARG3, BASE, RC - | mr SAVE0, NARGS8:RC - | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top) - | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here. - | addi NARGS8:RC, SAVE0, 8 // Got one more argument now. - | ins_call - | - |->vmeta_callt: // Resolve __call for BC_CALLT. - | // BASE = old base, RA = new base, RC = nargs*8 - | mr CARG1, L - | stp BASE, L->base - | subi CARG2, RA, 8 - | stw PC, SAVE_PC - | add CARG3, RA, RC - | mr SAVE0, NARGS8:RC - | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top) - | lwz TMP1, FRAME_PC(BASE) - | addi NARGS8:RC, SAVE0, 8 // Got one more argument now. - | lwz LFUNC:RB, FRAME_FUNC(RA) // Guaranteed to be a function here. - | b ->BC_CALLT_Z - | - |//-- Argument coercion for 'for' statement ------------------------------ - | - |->vmeta_for: - | mr CARG1, L - | stp BASE, L->base - | mr CARG2, RA - | stw PC, SAVE_PC - | mr SAVE0, INS - | bl extern lj_meta_for // (lua_State *L, TValue *base) - |.if JIT - | decode_OP1 TMP0, SAVE0 - |.endif - | decode_RA8 RA, SAVE0 - |.if JIT - | cmpwi TMP0, BC_JFORI - |.endif - | decode_RD8 RD, SAVE0 - |.if JIT - | beqy =>BC_JFORI - |.endif - | b =>BC_FORI - | - |//----------------------------------------------------------------------- - |//-- Fast functions ----------------------------------------------------- - |//----------------------------------------------------------------------- - | - |.macro .ffunc, name - |->ff_ .. name: - |.endmacro - | - |.macro .ffunc_1, name - |->ff_ .. name: - | cmplwi NARGS8:RC, 8 - | lwz CARG3, 0(BASE) - | lwz CARG1, 4(BASE) - | blt ->fff_fallback - |.endmacro - | - |.macro .ffunc_2, name - |->ff_ .. name: - | cmplwi NARGS8:RC, 16 - | lwz CARG3, 0(BASE) - | lwz CARG4, 8(BASE) - | lwz CARG1, 4(BASE) - | lwz CARG2, 12(BASE) - | blt ->fff_fallback - |.endmacro - | - |.macro .ffunc_n, name - |->ff_ .. name: - | cmplwi NARGS8:RC, 8 - | lwz CARG3, 0(BASE) - | lfd FARG1, 0(BASE) - | blt ->fff_fallback - | checknum CARG3; bge ->fff_fallback - |.endmacro - | - |.macro .ffunc_nn, name - |->ff_ .. name: - | cmplwi NARGS8:RC, 16 - | lwz CARG3, 0(BASE) - | lfd FARG1, 0(BASE) - | lwz CARG4, 8(BASE) - | lfd FARG2, 8(BASE) - | blt ->fff_fallback - | checknum CARG3; bge ->fff_fallback - | checknum CARG4; bge ->fff_fallback - |.endmacro - | - |// Inlined GC threshold check. Caveat: uses TMP0 and TMP1. - |.macro ffgccheck - | lwz TMP0, DISPATCH_GL(gc.total)(DISPATCH) - | lwz TMP1, DISPATCH_GL(gc.threshold)(DISPATCH) - | cmplw TMP0, TMP1 - | bgel ->fff_gcstep - |.endmacro - | - |//-- Base library: checks ----------------------------------------------- - | - |.ffunc_1 assert - | li TMP1, LJ_TFALSE - | la RA, -8(BASE) - | cmplw cr1, CARG3, TMP1 - | lwz PC, FRAME_PC(BASE) - | bge cr1, ->fff_fallback - | stw CARG3, 0(RA) - | addi RD, NARGS8:RC, 8 // Compute (nresults+1)*8. - | stw CARG1, 4(RA) - | beq ->fff_res // Done if exactly 1 argument. - | li TMP1, 8 - | subi RC, RC, 8 - |1: - | cmplw TMP1, RC - | lfdx f0, BASE, TMP1 - | stfdx f0, RA, TMP1 - | addi TMP1, TMP1, 8 - | bney <1 - | b ->fff_res - | - |.ffunc type - | cmplwi NARGS8:RC, 8 - | lwz CARG1, 0(BASE) - | blt ->fff_fallback - | .gpr64 extsw CARG1, CARG1 - | subfc TMP0, TISNUM, CARG1 - | subfe TMP2, CARG1, CARG1 - | orc TMP1, TMP2, TMP0 - | addi TMP1, TMP1, ~LJ_TISNUM+1 - | slwi TMP1, TMP1, 3 - | la TMP2, CFUNC:RB->upvalue - | lfdx FARG1, TMP2, TMP1 - | b ->fff_resn - | - |//-- Base library: getters and setters --------------------------------- - | - |.ffunc_1 getmetatable - | checktab CARG3; bne >6 - |1: // Field metatable must be at same offset for GCtab and GCudata! - | lwz TAB:CARG1, TAB:CARG1->metatable - |2: - | li CARG3, LJ_TNIL - | cmplwi TAB:CARG1, 0 - | lwz STR:RC, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])(DISPATCH) - | beq ->fff_restv - | lwz TMP0, TAB:CARG1->hmask - | li CARG3, LJ_TTAB // Use metatable as default result. - | lwz TMP1, STR:RC->hash - | lwz NODE:TMP2, TAB:CARG1->node - | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask - | slwi TMP0, TMP1, 5 - | slwi TMP1, TMP1, 3 - | sub TMP1, TMP0, TMP1 - | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8) - |3: // Rearranged logic, because we expect _not_ to find the key. - | lwz CARG4, NODE:TMP2->key - | lwz TMP0, 4+offsetof(Node, key)(NODE:TMP2) - | lwz CARG2, NODE:TMP2->val - | lwz TMP1, 4+offsetof(Node, val)(NODE:TMP2) - | checkstr CARG4; bne >4 - | cmpw TMP0, STR:RC; beq >5 - |4: - | lwz NODE:TMP2, NODE:TMP2->next - | cmplwi NODE:TMP2, 0 - | beq ->fff_restv // Not found, keep default result. - | b <3 - |5: - | checknil CARG2 - | beq ->fff_restv // Ditto for nil value. - | mr CARG3, CARG2 // Return value of mt.__metatable. - | mr CARG1, TMP1 - | b ->fff_restv - | - |6: - | cmpwi CARG3, LJ_TUDATA; beq <1 - | .gpr64 extsw CARG3, CARG3 - | subfc TMP0, TISNUM, CARG3 - | subfe TMP2, CARG3, CARG3 - | orc TMP1, TMP2, TMP0 - | addi TMP1, TMP1, ~LJ_TISNUM+1 - | slwi TMP1, TMP1, 2 - | la TMP2, DISPATCH_GL(gcroot[GCROOT_BASEMT])(DISPATCH) - | lwzx TAB:CARG1, TMP2, TMP1 - | b <2 - | - |.ffunc_2 setmetatable - | // Fast path: no mt for table yet and not clearing the mt. - | checktab CARG3; bne ->fff_fallback - | lwz TAB:TMP1, TAB:CARG1->metatable - | checktab CARG4; bne ->fff_fallback - | cmplwi TAB:TMP1, 0 - | lbz TMP3, TAB:CARG1->marked - | bne ->fff_fallback - | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table) - | stw TAB:CARG2, TAB:CARG1->metatable - | beq ->fff_restv - | barrierback TAB:CARG1, TMP3, TMP0 - | b ->fff_restv - | - |.ffunc rawget - | cmplwi NARGS8:RC, 16 - | lwz CARG4, 0(BASE) - | lwz TAB:CARG2, 4(BASE) - | blt ->fff_fallback - | checktab CARG4; bne ->fff_fallback - | la CARG3, 8(BASE) - | mr CARG1, L - | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key) - | // Returns cTValue *. - | lfd FARG1, 0(CRET1) - | b ->fff_resn - | - |//-- Base library: conversions ------------------------------------------ - | - |.ffunc tonumber - | // Only handles the number case inline (without a base argument). - | cmplwi NARGS8:RC, 8 - | lwz CARG1, 0(BASE) - | lfd FARG1, 0(BASE) - | bne ->fff_fallback // Exactly one argument. - | checknum CARG1; bgt ->fff_fallback - | b ->fff_resn - | - |.ffunc_1 tostring - | // Only handles the string or number case inline. - | checkstr CARG3 - | // A __tostring method in the string base metatable is ignored. - | beq ->fff_restv // String key? - | // Handle numbers inline, unless a number base metatable is present. - | lwz TMP0, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])(DISPATCH) - | checknum CARG3 - | cmplwi cr1, TMP0, 0 - | stp BASE, L->base // Add frame since C call can throw. - | crorc 4*cr0+eq, 4*cr0+gt, 4*cr1+eq - | stw PC, SAVE_PC // Redundant (but a defined value). - | beq ->fff_fallback - | ffgccheck - | mr CARG1, L - | mr CARG2, BASE - |.if DUALNUM - | bl extern lj_strfmt_number // (lua_State *L, cTValue *o) - |.else - | bl extern lj_strfmt_num // (lua_State *L, lua_Number *np) - |.endif - | // Returns GCstr *. - | li CARG3, LJ_TSTR - | b ->fff_restv - | - |//-- Base library: iterators ------------------------------------------- - | - |.ffunc next - | cmplwi NARGS8:RC, 8 - | lwz CARG1, 0(BASE) - | lwz TAB:CARG2, 4(BASE) - | blt ->fff_fallback - | stwx TISNIL, BASE, NARGS8:RC // Set missing 2nd arg to nil. - | checktab CARG1 - | lwz PC, FRAME_PC(BASE) - | bne ->fff_fallback - | stp BASE, L->base // Add frame since C call can throw. - | mr CARG1, L - | stp BASE, L->top // Dummy frame length is ok. - | la CARG3, 8(BASE) - | stw PC, SAVE_PC - | bl extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key) - | // Returns 0 at end of traversal. - | cmplwi CRET1, 0 - | li CARG3, LJ_TNIL - | beq ->fff_restv // End of traversal: return nil. - | lfd f0, 8(BASE) // Copy key and value to results. - | la RA, -8(BASE) - | lfd f1, 16(BASE) - | stfd f0, 0(RA) - | li RD, (2+1)*8 - | stfd f1, 8(RA) - | b ->fff_res - | - |.ffunc_1 pairs - | checktab CARG3 - | lwz PC, FRAME_PC(BASE) - | bne ->fff_fallback -#if LJ_52 - | lwz TAB:TMP2, TAB:CARG1->metatable - | lfd f0, CFUNC:RB->upvalue[0] - | cmplwi TAB:TMP2, 0 - | la RA, -8(BASE) - | bne ->fff_fallback -#else - | lfd f0, CFUNC:RB->upvalue[0] - | la RA, -8(BASE) -#endif - | stw TISNIL, 8(BASE) - | li RD, (3+1)*8 - | stfd f0, 0(RA) - | b ->fff_res - | - |.ffunc ipairs_aux - | cmplwi NARGS8:RC, 16 - | lwz CARG3, 0(BASE) - | lwz TAB:CARG1, 4(BASE) - | lwz CARG4, 8(BASE) - |.if DUALNUM - | lwz TMP2, 12(BASE) - |.else - | lfd FARG2, 8(BASE) - |.endif - | blt ->fff_fallback - | checktab CARG3 - | checknum cr1, CARG4 - | lwz PC, FRAME_PC(BASE) - |.if DUALNUM - | bne ->fff_fallback - | bne cr1, ->fff_fallback - |.else - | lus TMP0, 0x3ff0 - | stw ZERO, TMPD_LO - | bne ->fff_fallback - | stw TMP0, TMPD_HI - | bge cr1, ->fff_fallback - | lfd FARG1, TMPD - | toint TMP2, FARG2, f0 - |.endif - | lwz TMP0, TAB:CARG1->asize - | lwz TMP1, TAB:CARG1->array - |.if not DUALNUM - | fadd FARG2, FARG2, FARG1 - |.endif - | addi TMP2, TMP2, 1 - | la RA, -8(BASE) - | cmplw TMP0, TMP2 - |.if DUALNUM - | stw TISNUM, 0(RA) - | slwi TMP3, TMP2, 3 - | stw TMP2, 4(RA) - |.else - | slwi TMP3, TMP2, 3 - | stfd FARG2, 0(RA) - |.endif - | ble >2 // Not in array part? - | lwzx TMP2, TMP1, TMP3 - | lfdx f0, TMP1, TMP3 - |1: - | checknil TMP2 - | li RD, (0+1)*8 - | beq ->fff_res // End of iteration, return 0 results. - | li RD, (2+1)*8 - | stfd f0, 8(RA) - | b ->fff_res - |2: // Check for empty hash part first. Otherwise call C function. - | lwz TMP0, TAB:CARG1->hmask - | cmplwi TMP0, 0 - | li RD, (0+1)*8 - | beq ->fff_res - | mr CARG2, TMP2 - | bl extern lj_tab_getinth // (GCtab *t, int32_t key) - | // Returns cTValue * or NULL. - | cmplwi CRET1, 0 - | li RD, (0+1)*8 - | beq ->fff_res - | lwz TMP2, 0(CRET1) - | lfd f0, 0(CRET1) - | b <1 - | - |.ffunc_1 ipairs - | checktab CARG3 - | lwz PC, FRAME_PC(BASE) - | bne ->fff_fallback -#if LJ_52 - | lwz TAB:TMP2, TAB:CARG1->metatable - | lfd f0, CFUNC:RB->upvalue[0] - | cmplwi TAB:TMP2, 0 - | la RA, -8(BASE) - | bne ->fff_fallback -#else - | lfd f0, CFUNC:RB->upvalue[0] - | la RA, -8(BASE) -#endif - |.if DUALNUM - | stw TISNUM, 8(BASE) - |.else - | stw ZERO, 8(BASE) - |.endif - | stw ZERO, 12(BASE) - | li RD, (3+1)*8 - | stfd f0, 0(RA) - | b ->fff_res - | - |//-- Base library: catch errors ---------------------------------------- - | - |.ffunc pcall - | cmplwi NARGS8:RC, 8 - | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH) - | blt ->fff_fallback - | mr TMP2, BASE - | la BASE, 8(BASE) - | // Remember active hook before pcall. - | rlwinm TMP3, TMP3, 32-HOOK_ACTIVE_SHIFT, 31, 31 - | subi NARGS8:RC, NARGS8:RC, 8 - | addi PC, TMP3, 8+FRAME_PCALL - | b ->vm_call_dispatch - | - |.ffunc xpcall - | cmplwi NARGS8:RC, 16 - | lwz CARG4, 8(BASE) - | lfd FARG2, 8(BASE) - | lfd FARG1, 0(BASE) - | blt ->fff_fallback - | lbz TMP1, DISPATCH_GL(hookmask)(DISPATCH) - | mr TMP2, BASE - | checkfunc CARG4; bne ->fff_fallback // Traceback must be a function. - | la BASE, 16(BASE) - | // Remember active hook before pcall. - | rlwinm TMP1, TMP1, 32-HOOK_ACTIVE_SHIFT, 31, 31 - | stfd FARG2, 0(TMP2) // Swap function and traceback. - | subi NARGS8:RC, NARGS8:RC, 16 - | stfd FARG1, 8(TMP2) - | addi PC, TMP1, 16+FRAME_PCALL - | b ->vm_call_dispatch - | - |//-- Coroutine library -------------------------------------------------- - | - |.macro coroutine_resume_wrap, resume - |.if resume - |.ffunc_1 coroutine_resume - | cmpwi CARG3, LJ_TTHREAD; bne ->fff_fallback - |.else - |.ffunc coroutine_wrap_aux - | lwz L:CARG1, CFUNC:RB->upvalue[0].gcr - |.endif - | lbz TMP0, L:CARG1->status - | lp TMP1, L:CARG1->cframe - | lp CARG2, L:CARG1->top - | cmplwi cr0, TMP0, LUA_YIELD - | lp TMP2, L:CARG1->base - | cmplwi cr1, TMP1, 0 - | lwz TMP0, L:CARG1->maxstack - | cmplw cr7, CARG2, TMP2 - | lwz PC, FRAME_PC(BASE) - | crorc 4*cr6+lt, 4*cr0+gt, 4*cr1+eq // st>LUA_YIELD || cframe!=0 - | add TMP2, CARG2, NARGS8:RC - | crandc 4*cr6+gt, 4*cr7+eq, 4*cr0+eq // base==top && st!=LUA_YIELD - | cmplw cr1, TMP2, TMP0 - | cror 4*cr6+lt, 4*cr6+lt, 4*cr6+gt - | stw PC, SAVE_PC - | cror 4*cr6+lt, 4*cr6+lt, 4*cr1+gt // cond1 || cond2 || stackov - | stp BASE, L->base - | blt cr6, ->fff_fallback - |1: - |.if resume - | addi BASE, BASE, 8 // Keep resumed thread in stack for GC. - | subi NARGS8:RC, NARGS8:RC, 8 - | subi TMP2, TMP2, 8 - |.endif - | stp TMP2, L:CARG1->top - | li TMP1, 0 - | stp BASE, L->top - |2: // Move args to coroutine. - | cmpw TMP1, NARGS8:RC - | lfdx f0, BASE, TMP1 - | beq >3 - | stfdx f0, CARG2, TMP1 - | addi TMP1, TMP1, 8 - | b <2 - |3: - | li CARG3, 0 - | mr L:SAVE0, L:CARG1 - | li CARG4, 0 - | bl ->vm_resume // (lua_State *L, TValue *base, 0, 0) - | // Returns thread status. - |4: - | lp TMP2, L:SAVE0->base - | cmplwi CRET1, LUA_YIELD - | lp TMP3, L:SAVE0->top - | li_vmstate INTERP - | lp BASE, L->base - | stw L, DISPATCH_GL(cur_L)(DISPATCH) - | st_vmstate - | bgt >8 - | sub RD, TMP3, TMP2 - | lwz TMP0, L->maxstack - | cmplwi RD, 0 - | add TMP1, BASE, RD - | beq >6 // No results? - | cmplw TMP1, TMP0 - | li TMP1, 0 - | bgt >9 // Need to grow stack? - | - | subi TMP3, RD, 8 - | stp TMP2, L:SAVE0->top // Clear coroutine stack. - |5: // Move results from coroutine. - | cmplw TMP1, TMP3 - | lfdx f0, TMP2, TMP1 - | stfdx f0, BASE, TMP1 - | addi TMP1, TMP1, 8 - | bne <5 - |6: - | andix. TMP0, PC, FRAME_TYPE - |.if resume - | li TMP1, LJ_TTRUE - | la RA, -8(BASE) - | stw TMP1, -8(BASE) // Prepend true to results. - | addi RD, RD, 16 - |.else - | mr RA, BASE - | addi RD, RD, 8 - |.endif - |7: - | stw PC, SAVE_PC - | mr MULTRES, RD - | beq ->BC_RET_Z - | b ->vm_return - | - |8: // Coroutine returned with error (at co->top-1). - |.if resume - | andix. TMP0, PC, FRAME_TYPE - | la TMP3, -8(TMP3) - | li TMP1, LJ_TFALSE - | lfd f0, 0(TMP3) - | stp TMP3, L:SAVE0->top // Remove error from coroutine stack. - | li RD, (2+1)*8 - | stw TMP1, -8(BASE) // Prepend false to results. - | la RA, -8(BASE) - | stfd f0, 0(BASE) // Copy error message. - | b <7 - |.else - | mr CARG1, L - | mr CARG2, L:SAVE0 - | bl extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co) - |.endif - | - |9: // Handle stack expansion on return from yield. - | mr CARG1, L - | srwi CARG2, RD, 3 - | bl extern lj_state_growstack // (lua_State *L, int n) - | li CRET1, 0 - | b <4 - |.endmacro - | - | coroutine_resume_wrap 1 // coroutine.resume - | coroutine_resume_wrap 0 // coroutine.wrap - | - |.ffunc coroutine_yield - | lp TMP0, L->cframe - | add TMP1, BASE, NARGS8:RC - | stp BASE, L->base - | andix. TMP0, TMP0, CFRAME_RESUME - | stp TMP1, L->top - | li CRET1, LUA_YIELD - | beq ->fff_fallback - | stp ZERO, L->cframe - | stb CRET1, L->status - | b ->vm_leave_unw - | - |//-- Math library ------------------------------------------------------- - | - |.ffunc_1 math_abs - | checknum CARG3 - |.if DUALNUM - | bne >2 - | srawi TMP1, CARG1, 31 - | xor TMP2, TMP1, CARG1 - |.if GPR64 - | lus TMP0, 0x8000 - | sub CARG1, TMP2, TMP1 - | cmplw CARG1, TMP0 - | beq >1 - |.else - | sub. CARG1, TMP2, TMP1 - | blt >1 - |.endif - |->fff_resi: - | lwz PC, FRAME_PC(BASE) - | la RA, -8(BASE) - | stw TISNUM, -8(BASE) - | stw CRET1, -4(BASE) - | b ->fff_res1 - |1: - | lus CARG3, 0x41e0 // 2^31. - | li CARG1, 0 - | b ->fff_restv - |2: - |.endif - | bge ->fff_fallback - | rlwinm CARG3, CARG3, 0, 1, 31 - | // Fallthrough. - | - |->fff_restv: - | // CARG3/CARG1 = TValue result. - | lwz PC, FRAME_PC(BASE) - | stw CARG3, -8(BASE) - | la RA, -8(BASE) - | stw CARG1, -4(BASE) - |->fff_res1: - | // RA = results, PC = return. - | li RD, (1+1)*8 - |->fff_res: - | // RA = results, RD = (nresults+1)*8, PC = return. - | andix. TMP0, PC, FRAME_TYPE - | mr MULTRES, RD - | bney ->vm_return - | lwz INS, -4(PC) - | decode_RB8 RB, INS - |5: - | cmplw RB, RD // More results expected? - | decode_RA8 TMP0, INS - | bgt >6 - | ins_next1 - | // Adjust BASE. KBASE is assumed to be set for the calling frame. - | sub BASE, RA, TMP0 - | ins_next2 - | - |6: // Fill up results with nil. - | subi TMP1, RD, 8 - | addi RD, RD, 8 - | stwx TISNIL, RA, TMP1 - | b <5 - | - |.macro math_extern, func - | .ffunc_n math_ .. func - | blex func - | b ->fff_resn - |.endmacro - | - |.macro math_extern2, func - | .ffunc_nn math_ .. func - | blex func - | b ->fff_resn - |.endmacro - | - |.macro math_round, func - | .ffunc_1 math_ .. func - | checknum CARG3; beqy ->fff_restv - | rlwinm TMP2, CARG3, 12, 21, 31 - | bge ->fff_fallback - | addic. TMP2, TMP2, -1023 // exp = exponent(x) - 1023 - | cmplwi cr1, TMP2, 31 // 0 <= exp < 31? - | subfic TMP0, TMP2, 31 - | blt >3 - | slwi TMP1, CARG3, 11 - | srwi TMP3, CARG1, 21 - | oris TMP1, TMP1, 0x8000 - | addi TMP2, TMP2, 1 - | or TMP1, TMP1, TMP3 - | slwi CARG2, CARG1, 11 - | bge cr1, >4 - | slw TMP3, TMP1, TMP2 - | srw RD, TMP1, TMP0 - | or TMP3, TMP3, CARG2 - | srawi TMP2, CARG3, 31 - |.if "func" == "floor" - | and TMP1, TMP3, TMP2 - | addic TMP0, TMP1, -1 - | subfe TMP1, TMP0, TMP1 - | add CARG1, RD, TMP1 - | xor CARG1, CARG1, TMP2 - | sub CARG1, CARG1, TMP2 - | b ->fff_resi - |.else - | andc TMP1, TMP3, TMP2 - | addic TMP0, TMP1, -1 - | subfe TMP1, TMP0, TMP1 - | add CARG1, RD, TMP1 - | cmpw CARG1, RD - | xor CARG1, CARG1, TMP2 - | sub CARG1, CARG1, TMP2 - | bge ->fff_resi - | // Overflow to 2^31. - | lus CARG3, 0x41e0 // 2^31. - | li CARG1, 0 - | b ->fff_restv - |.endif - |3: // |x| < 1 - | slwi TMP2, CARG3, 1 - | srawi TMP1, CARG3, 31 - | or TMP2, CARG1, TMP2 // ztest = (hi+hi) | lo - |.if "func" == "floor" - | and TMP1, TMP2, TMP1 // (ztest & sign) == 0 ? 0 : -1 - | subfic TMP2, TMP1, 0 - | subfe CARG1, CARG1, CARG1 - |.else - | andc TMP1, TMP2, TMP1 // (ztest & ~sign) == 0 ? 0 : 1 - | addic TMP2, TMP1, -1 - | subfe CARG1, TMP2, TMP1 - |.endif - | b ->fff_resi - |4: // exp >= 31. Check for -(2^31). - | xoris TMP1, TMP1, 0x8000 - | srawi TMP2, CARG3, 31 - |.if "func" == "floor" - | or TMP1, TMP1, CARG2 - |.endif - |.if PPE - | orc TMP1, TMP1, TMP2 - | cmpwi TMP1, 0 - |.else - | orc. TMP1, TMP1, TMP2 - |.endif - | crand 4*cr0+eq, 4*cr0+eq, 4*cr1+eq - | lus CARG1, 0x8000 // -(2^31). - | beqy ->fff_resi - |5: - | lfd FARG1, 0(BASE) - | blex func - | b ->fff_resn - |.endmacro - | - |.if DUALNUM - | math_round floor - | math_round ceil - |.else - | // NYI: use internal implementation. - | math_extern floor - | math_extern ceil - |.endif - | - |.if SQRT - |.ffunc_n math_sqrt - | fsqrt FARG1, FARG1 - | b ->fff_resn - |.else - | math_extern sqrt - |.endif - | - |.ffunc math_log - | cmplwi NARGS8:RC, 8 - | lwz CARG3, 0(BASE) - | lfd FARG1, 0(BASE) - | bne ->fff_fallback // Need exactly 1 argument. - | checknum CARG3; bge ->fff_fallback - | blex log - | b ->fff_resn - | - | math_extern log10 - | math_extern exp - | math_extern sin - | math_extern cos - | math_extern tan - | math_extern asin - | math_extern acos - | math_extern atan - | math_extern sinh - | math_extern cosh - | math_extern tanh - | math_extern2 pow - | math_extern2 atan2 - | math_extern2 fmod - | - |.if DUALNUM - |.ffunc math_ldexp - | cmplwi NARGS8:RC, 16 - | lwz CARG3, 0(BASE) - | lfd FARG1, 0(BASE) - | lwz CARG4, 8(BASE) - |.if GPR64 - | lwz CARG2, 12(BASE) - |.else - | lwz CARG1, 12(BASE) - |.endif - | blt ->fff_fallback - | checknum CARG3; bge ->fff_fallback - | checknum CARG4; bne ->fff_fallback - |.else - |.ffunc_nn math_ldexp - |.if GPR64 - | toint CARG2, FARG2 - |.else - | toint CARG1, FARG2 - |.endif - |.endif - | blex ldexp - | b ->fff_resn - | - |.ffunc_n math_frexp - |.if GPR64 - | la CARG2, DISPATCH_GL(tmptv)(DISPATCH) - |.else - | la CARG1, DISPATCH_GL(tmptv)(DISPATCH) - |.endif - | lwz PC, FRAME_PC(BASE) - | blex frexp - | lwz TMP1, DISPATCH_GL(tmptv)(DISPATCH) - | la RA, -8(BASE) - |.if not DUALNUM - | tonum_i FARG2, TMP1 - |.endif - | stfd FARG1, 0(RA) - | li RD, (2+1)*8 - |.if DUALNUM - | stw TISNUM, 8(RA) - | stw TMP1, 12(RA) - |.else - | stfd FARG2, 8(RA) - |.endif - | b ->fff_res - | - |.ffunc_n math_modf - |.if GPR64 - | la CARG2, -8(BASE) - |.else - | la CARG1, -8(BASE) - |.endif - | lwz PC, FRAME_PC(BASE) - | blex modf - | la RA, -8(BASE) - | stfd FARG1, 0(BASE) - | li RD, (2+1)*8 - | b ->fff_res - | - |.macro math_minmax, name, ismax - |.if DUALNUM - | .ffunc_1 name - | checknum CARG3 - | addi TMP1, BASE, 8 - | add TMP2, BASE, NARGS8:RC - | bne >4 - |1: // Handle integers. - | lwz CARG4, 0(TMP1) - | cmplw cr1, TMP1, TMP2 - | lwz CARG2, 4(TMP1) - | bge cr1, ->fff_resi - | checknum CARG4 - | xoris TMP0, CARG1, 0x8000 - | xoris TMP3, CARG2, 0x8000 - | bne >3 - | subfc TMP3, TMP3, TMP0 - | subfe TMP0, TMP0, TMP0 - |.if ismax - | andc TMP3, TMP3, TMP0 - |.else - | and TMP3, TMP3, TMP0 - |.endif - | add CARG1, TMP3, CARG2 - |.if GPR64 - | rldicl CARG1, CARG1, 0, 32 - |.endif - | addi TMP1, TMP1, 8 - | b <1 - |3: - | bge ->fff_fallback - | // Convert intermediate result to number and continue below. - | tonum_i FARG1, CARG1 - | lfd FARG2, 0(TMP1) - | b >6 - |4: - | lfd FARG1, 0(BASE) - | bge ->fff_fallback - |5: // Handle numbers. - | lwz CARG4, 0(TMP1) - | cmplw cr1, TMP1, TMP2 - | lfd FARG2, 0(TMP1) - | bge cr1, ->fff_resn - | checknum CARG4; bge >7 - |6: - | fsub f0, FARG1, FARG2 - | addi TMP1, TMP1, 8 - |.if ismax - | fsel FARG1, f0, FARG1, FARG2 - |.else - | fsel FARG1, f0, FARG2, FARG1 - |.endif - | b <5 - |7: // Convert integer to number and continue above. - | lwz CARG2, 4(TMP1) - | bne ->fff_fallback - | tonum_i FARG2, CARG2 - | b <6 - |.else - | .ffunc_n name - | li TMP1, 8 - |1: - | lwzx CARG2, BASE, TMP1 - | lfdx FARG2, BASE, TMP1 - | cmplw cr1, TMP1, NARGS8:RC - | checknum CARG2 - | bge cr1, ->fff_resn - | bge ->fff_fallback - | fsub f0, FARG1, FARG2 - | addi TMP1, TMP1, 8 - |.if ismax - | fsel FARG1, f0, FARG1, FARG2 - |.else - | fsel FARG1, f0, FARG2, FARG1 - |.endif - | b <1 - |.endif - |.endmacro - | - | math_minmax math_min, 0 - | math_minmax math_max, 1 - | - |//-- String library ----------------------------------------------------- - | - |.ffunc string_byte // Only handle the 1-arg case here. - | cmplwi NARGS8:RC, 8 - | lwz CARG3, 0(BASE) - | lwz STR:CARG1, 4(BASE) - | bne ->fff_fallback // Need exactly 1 argument. - | checkstr CARG3 - | bne ->fff_fallback - | lwz TMP0, STR:CARG1->len - |.if DUALNUM - | lbz CARG1, STR:CARG1[1] // Access is always ok (NUL at end). - | li RD, (0+1)*8 - | lwz PC, FRAME_PC(BASE) - | cmplwi TMP0, 0 - | la RA, -8(BASE) - | beqy ->fff_res - | b ->fff_resi - |.else - | lbz TMP1, STR:CARG1[1] // Access is always ok (NUL at end). - | addic TMP3, TMP0, -1 // RD = ((str->len != 0)+1)*8 - | subfe RD, TMP3, TMP0 - | stw TMP1, TONUM_LO // Inlined tonum_u f0, TMP1. - | addi RD, RD, 1 - | lfd f0, TONUM_D - | la RA, -8(BASE) - | lwz PC, FRAME_PC(BASE) - | fsub f0, f0, TOBIT - | slwi RD, RD, 3 - | stfd f0, 0(RA) - | b ->fff_res - |.endif - | - |.ffunc string_char // Only handle the 1-arg case here. - | ffgccheck - | cmplwi NARGS8:RC, 8 - | lwz CARG3, 0(BASE) - |.if DUALNUM - | lwz TMP0, 4(BASE) - | bne ->fff_fallback // Exactly 1 argument. - | checknum CARG3; bne ->fff_fallback - | la CARG2, 7(BASE) - |.else - | lfd FARG1, 0(BASE) - | bne ->fff_fallback // Exactly 1 argument. - | checknum CARG3; bge ->fff_fallback - | toint TMP0, FARG1 - | la CARG2, TMPD_BLO - |.endif - | li CARG3, 1 - | cmplwi TMP0, 255; bgt ->fff_fallback - |->fff_newstr: - | mr CARG1, L - | stp BASE, L->base - | stw PC, SAVE_PC - | bl extern lj_str_new // (lua_State *L, char *str, size_t l) - |->fff_resstr: - | // Returns GCstr *. - | lp BASE, L->base - | li CARG3, LJ_TSTR - | b ->fff_restv - | - |.ffunc string_sub - | ffgccheck - | cmplwi NARGS8:RC, 16 - | lwz CARG3, 16(BASE) - |.if not DUALNUM - | lfd f0, 16(BASE) - |.endif - | lwz TMP0, 0(BASE) - | lwz STR:CARG1, 4(BASE) - | blt ->fff_fallback - | lwz CARG2, 8(BASE) - |.if DUALNUM - | lwz TMP1, 12(BASE) - |.else - | lfd f1, 8(BASE) - |.endif - | li TMP2, -1 - | beq >1 - |.if DUALNUM - | checknum CARG3 - | lwz TMP2, 20(BASE) - | bne ->fff_fallback - |1: - | checknum CARG2; bne ->fff_fallback - |.else - | checknum CARG3; bge ->fff_fallback - | toint TMP2, f0 - |1: - | checknum CARG2; bge ->fff_fallback - |.endif - | checkstr TMP0; bne ->fff_fallback - |.if not DUALNUM - | toint TMP1, f1 - |.endif - | lwz TMP0, STR:CARG1->len - | cmplw TMP0, TMP2 // len < end? (unsigned compare) - | addi TMP3, TMP2, 1 - | blt >5 - |2: - | cmpwi TMP1, 0 // start <= 0? - | add TMP3, TMP1, TMP0 - | ble >7 - |3: - | sub CARG3, TMP2, TMP1 - | addi CARG2, STR:CARG1, #STR-1 - | srawi TMP0, CARG3, 31 - | addi CARG3, CARG3, 1 - | add CARG2, CARG2, TMP1 - | andc CARG3, CARG3, TMP0 - |.if GPR64 - | rldicl CARG2, CARG2, 0, 32 - | rldicl CARG3, CARG3, 0, 32 - |.endif - | b ->fff_newstr - | - |5: // Negative end or overflow. - | cmpw TMP0, TMP2 // len >= end? (signed compare) - | add TMP2, TMP0, TMP3 // Negative end: end = end+len+1. - | bge <2 - | mr TMP2, TMP0 // Overflow: end = len. - | b <2 - | - |7: // Negative start or underflow. - | .gpr64 extsw TMP1, TMP1 - | addic CARG3, TMP1, -1 - | subfe CARG3, CARG3, CARG3 - | srawi CARG2, TMP3, 31 // Note: modifies carry. - | andc TMP3, TMP3, CARG3 - | andc TMP1, TMP3, CARG2 - | addi TMP1, TMP1, 1 // start = 1 + (start ? start+len : 0) - | b <3 - | - |.macro ffstring_op, name - | .ffunc string_ .. name - | ffgccheck - | cmplwi NARGS8:RC, 8 - | lwz CARG3, 0(BASE) - | lwz STR:CARG2, 4(BASE) - | blt ->fff_fallback - | checkstr CARG3 - | la SBUF:CARG1, DISPATCH_GL(tmpbuf)(DISPATCH) - | bne ->fff_fallback - | lwz TMP0, SBUF:CARG1->b - | stw L, SBUF:CARG1->L - | stp BASE, L->base - | stw PC, SAVE_PC - | stw TMP0, SBUF:CARG1->p - | bl extern lj_buf_putstr_ .. name - | bl extern lj_buf_tostr - | b ->fff_resstr - |.endmacro - | - |ffstring_op reverse - |ffstring_op lower - |ffstring_op upper - | - |//-- Bit library -------------------------------------------------------- - | - |.macro .ffunc_bit, name - |.if DUALNUM - | .ffunc_1 bit_..name - | checknum CARG3; bnel ->fff_tobit_fb - |.else - | .ffunc_n bit_..name - | fadd FARG1, FARG1, TOBIT - | stfd FARG1, TMPD - | lwz CARG1, TMPD_LO - |.endif - |.endmacro - | - |.macro .ffunc_bit_op, name, ins - | .ffunc_bit name - | addi TMP1, BASE, 8 - | add TMP2, BASE, NARGS8:RC - |1: - | lwz CARG4, 0(TMP1) - | cmplw cr1, TMP1, TMP2 - |.if DUALNUM - | lwz CARG2, 4(TMP1) - |.else - | lfd FARG1, 0(TMP1) - |.endif - | bgey cr1, ->fff_resi - | checknum CARG4 - |.if DUALNUM - | bnel ->fff_bitop_fb - |.else - | fadd FARG1, FARG1, TOBIT - | bge ->fff_fallback - | stfd FARG1, TMPD - | lwz CARG2, TMPD_LO - |.endif - | ins CARG1, CARG1, CARG2 - | addi TMP1, TMP1, 8 - | b <1 - |.endmacro - | - |.ffunc_bit_op band, and - |.ffunc_bit_op bor, or - |.ffunc_bit_op bxor, xor - | - |.ffunc_bit bswap - | rotlwi TMP0, CARG1, 8 - | rlwimi TMP0, CARG1, 24, 0, 7 - | rlwimi TMP0, CARG1, 24, 16, 23 - | mr CRET1, TMP0 - | b ->fff_resi - | - |.ffunc_bit bnot - | not CRET1, CARG1 - | b ->fff_resi - | - |.macro .ffunc_bit_sh, name, ins, shmod - |.if DUALNUM - | .ffunc_2 bit_..name - | checknum CARG3; bnel ->fff_tobit_fb - | // Note: no inline conversion from number for 2nd argument! - | checknum CARG4; bne ->fff_fallback - |.else - | .ffunc_nn bit_..name - | fadd FARG1, FARG1, TOBIT - | fadd FARG2, FARG2, TOBIT - | stfd FARG1, TMPD - | lwz CARG1, TMPD_LO - | stfd FARG2, TMPD - | lwz CARG2, TMPD_LO - |.endif - |.if shmod == 1 - | rlwinm CARG2, CARG2, 0, 27, 31 - |.elif shmod == 2 - | neg CARG2, CARG2 - |.endif - | ins CRET1, CARG1, CARG2 - | b ->fff_resi - |.endmacro - | - |.ffunc_bit_sh lshift, slw, 1 - |.ffunc_bit_sh rshift, srw, 1 - |.ffunc_bit_sh arshift, sraw, 1 - |.ffunc_bit_sh rol, rotlw, 0 - |.ffunc_bit_sh ror, rotlw, 2 - | - |.ffunc_bit tobit - |.if DUALNUM - | b ->fff_resi - |.else - |->fff_resi: - | tonum_i FARG1, CRET1 - |.endif - |->fff_resn: - | lwz PC, FRAME_PC(BASE) - | la RA, -8(BASE) - | stfd FARG1, -8(BASE) - | b ->fff_res1 - | - |// Fallback FP number to bit conversion. - |->fff_tobit_fb: - |.if DUALNUM - | lfd FARG1, 0(BASE) - | bgt ->fff_fallback - | fadd FARG1, FARG1, TOBIT - | stfd FARG1, TMPD - | lwz CARG1, TMPD_LO - | blr - |.endif - |->fff_bitop_fb: - |.if DUALNUM - | lfd FARG1, 0(TMP1) - | bgt ->fff_fallback - | fadd FARG1, FARG1, TOBIT - | stfd FARG1, TMPD - | lwz CARG2, TMPD_LO - | blr - |.endif - | - |//----------------------------------------------------------------------- - | - |->fff_fallback: // Call fast function fallback handler. - | // BASE = new base, RB = CFUNC, RC = nargs*8 - | lp TMP3, CFUNC:RB->f - | add TMP1, BASE, NARGS8:RC - | lwz PC, FRAME_PC(BASE) // Fallback may overwrite PC. - | addi TMP0, TMP1, 8*LUA_MINSTACK - | lwz TMP2, L->maxstack - | stw PC, SAVE_PC // Redundant (but a defined value). - | .toc lp TMP3, 0(TMP3) - | cmplw TMP0, TMP2 - | stp BASE, L->base - | stp TMP1, L->top - | mr CARG1, L - | bgt >5 // Need to grow stack. - | mtctr TMP3 - | bctrl // (lua_State *L) - | // Either throws an error, or recovers and returns -1, 0 or nresults+1. - | lp BASE, L->base - | cmpwi CRET1, 0 - | slwi RD, CRET1, 3 - | la RA, -8(BASE) - | bgt ->fff_res // Returned nresults+1? - |1: // Returned 0 or -1: retry fast path. - | lp TMP0, L->top - | lwz LFUNC:RB, FRAME_FUNC(BASE) - | sub NARGS8:RC, TMP0, BASE - | bne ->vm_call_tail // Returned -1? - | ins_callt // Returned 0: retry fast path. - | - |// Reconstruct previous base for vmeta_call during tailcall. - |->vm_call_tail: - | andix. TMP0, PC, FRAME_TYPE - | rlwinm TMP1, PC, 0, 0, 28 - | bne >3 - | lwz INS, -4(PC) - | decode_RA8 TMP1, INS - | addi TMP1, TMP1, 8 - |3: - | sub TMP2, BASE, TMP1 - | b ->vm_call_dispatch // Resolve again for tailcall. - | - |5: // Grow stack for fallback handler. - | li CARG2, LUA_MINSTACK - | bl extern lj_state_growstack // (lua_State *L, int n) - | lp BASE, L->base - | cmpw TMP0, TMP0 // Set 4*cr0+eq to force retry. - | b <1 - | - |->fff_gcstep: // Call GC step function. - | // BASE = new base, RC = nargs*8 - | mflr SAVE0 - | stp BASE, L->base - | add TMP0, BASE, NARGS8:RC - | stw PC, SAVE_PC // Redundant (but a defined value). - | stp TMP0, L->top - | mr CARG1, L - | bl extern lj_gc_step // (lua_State *L) - | lp BASE, L->base - | mtlr SAVE0 - | lp TMP0, L->top - | sub NARGS8:RC, TMP0, BASE - | lwz CFUNC:RB, FRAME_FUNC(BASE) - | blr - | - |//----------------------------------------------------------------------- - |//-- Special dispatch targets ------------------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_record: // Dispatch target for recording phase. - |.if JIT - | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH) - | andix. TMP0, TMP3, HOOK_VMEVENT // No recording while in vmevent. - | bne >5 - | // Decrement the hookcount for consistency, but always do the call. - | lwz TMP2, DISPATCH_GL(hookcount)(DISPATCH) - | andix. TMP0, TMP3, HOOK_ACTIVE - | bne >1 - | subi TMP2, TMP2, 1 - | andi. TMP0, TMP3, LUA_MASKLINE|LUA_MASKCOUNT - | beqy >1 - | stw TMP2, DISPATCH_GL(hookcount)(DISPATCH) - | b >1 - |.endif - | - |->vm_rethook: // Dispatch target for return hooks. - | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH) - | andix. TMP0, TMP3, HOOK_ACTIVE // Hook already active? - | beq >1 - |5: // Re-dispatch to static ins. - | addi TMP1, TMP1, GG_DISP2STATIC // Assumes decode_OPP TMP1, INS. - | lpx TMP0, DISPATCH, TMP1 - | mtctr TMP0 - | bctr - | - |->vm_inshook: // Dispatch target for instr/line hooks. - | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH) - | lwz TMP2, DISPATCH_GL(hookcount)(DISPATCH) - | andix. TMP0, TMP3, HOOK_ACTIVE // Hook already active? - | rlwinm TMP0, TMP3, 31-LUA_HOOKLINE, 31, 0 - | bne <5 - | - | cmpwi cr1, TMP0, 0 - | addic. TMP2, TMP2, -1 - | beq cr1, <5 - | stw TMP2, DISPATCH_GL(hookcount)(DISPATCH) - | beq >1 - | bge cr1, <5 - |1: - | mr CARG1, L - | stw MULTRES, SAVE_MULTRES - | mr CARG2, PC - | stp BASE, L->base - | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC. - | bl extern lj_dispatch_ins // (lua_State *L, const BCIns *pc) - |3: - | lp BASE, L->base - |4: // Re-dispatch to static ins. - | lwz INS, -4(PC) - | decode_OPP TMP1, INS - | decode_RB8 RB, INS - | addi TMP1, TMP1, GG_DISP2STATIC - | decode_RD8 RD, INS - | lpx TMP0, DISPATCH, TMP1 - | decode_RA8 RA, INS - | decode_RC8 RC, INS - | mtctr TMP0 - | bctr - | - |->cont_hook: // Continue from hook yield. - | addi PC, PC, 4 - | lwz MULTRES, -20(RB) // Restore MULTRES for *M ins. - | b <4 - | - |->vm_hotloop: // Hot loop counter underflow. - |.if JIT - | lwz LFUNC:TMP1, FRAME_FUNC(BASE) - | addi CARG1, DISPATCH, GG_DISP2J - | stw PC, SAVE_PC - | lwz TMP1, LFUNC:TMP1->pc - | mr CARG2, PC - | stw L, DISPATCH_J(L)(DISPATCH) - | lbz TMP1, PC2PROTO(framesize)(TMP1) - | stp BASE, L->base - | slwi TMP1, TMP1, 3 - | add TMP1, BASE, TMP1 - | stp TMP1, L->top - | bl extern lj_trace_hot // (jit_State *J, const BCIns *pc) - | b <3 - |.endif - | - |->vm_callhook: // Dispatch target for call hooks. - | mr CARG2, PC - |.if JIT - | b >1 - |.endif - | - |->vm_hotcall: // Hot call counter underflow. - |.if JIT - | ori CARG2, PC, 1 - |1: - |.endif - | add TMP0, BASE, RC - | stw PC, SAVE_PC - | mr CARG1, L - | stp BASE, L->base - | sub RA, RA, BASE - | stp TMP0, L->top - | bl extern lj_dispatch_call // (lua_State *L, const BCIns *pc) - | // Returns ASMFunction. - | lp BASE, L->base - | lp TMP0, L->top - | stw ZERO, SAVE_PC // Invalidate for subsequent line hook. - | sub NARGS8:RC, TMP0, BASE - | add RA, BASE, RA - | lwz LFUNC:RB, FRAME_FUNC(BASE) - | lwz INS, -4(PC) - | mtctr CRET1 - | bctr - | - |->cont_stitch: // Trace stitching. - |.if JIT - | // RA = resultptr, RB = meta base - | lwz INS, -4(PC) - | lwz TMP3, -20(RB) // Save previous trace number. - | addic. TMP1, MULTRES, -8 - | decode_RA8 RC, INS // Call base. - | beq >2 - |1: // Move results down. - | lfd f0, 0(RA) - | addic. TMP1, TMP1, -8 - | addi RA, RA, 8 - | stfdx f0, BASE, RC - | addi RC, RC, 8 - | bne <1 - |2: - | decode_RA8 RA, INS - | decode_RB8 RB, INS - | add RA, RA, RB - | lwz TMP1, DISPATCH_J(trace)(DISPATCH) - |3: - | cmplw RA, RC - | bgt >9 // More results wanted? - | - | slwi TMP2, TMP3, 2 - | lwzx TRACE:TMP2, TMP1, TMP2 - | cmpwi TRACE:TMP2, 0 - | beq ->cont_nop - | lhz RD, TRACE:TMP2->link - | cmpw RD, TMP3 - | cmpwi cr1, RD, 0 - | beq ->cont_nop // Blacklisted. - | slwi RD, RD, 3 - | bne cr1, =>BC_JLOOP // Jump to stitched trace. - | - | // Stitch a new trace to the previous trace. - | stw TMP3, DISPATCH_J(exitno)(DISPATCH) - | stp L, DISPATCH_J(L)(DISPATCH) - | stp BASE, L->base - | addi CARG1, DISPATCH, GG_DISP2J - | mr CARG2, PC - | bl extern lj_dispatch_stitch // (jit_State *J, const BCIns *pc) - | lp BASE, L->base - | b ->cont_nop - | - |9: - | stwx TISNIL, BASE, RC - | addi RC, RC, 8 - | b <3 - |.endif - | - |->vm_profhook: // Dispatch target for profiler hook. -#if LJ_HASPROFILE - | mr CARG1, L - | stw MULTRES, SAVE_MULTRES - | mr CARG2, PC - | stp BASE, L->base - | bl extern lj_dispatch_profile // (lua_State *L, const BCIns *pc) - | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction. - | lp BASE, L->base - | subi PC, PC, 4 - | b ->cont_nop -#endif - | - |//----------------------------------------------------------------------- - |//-- Trace exit handler ------------------------------------------------- - |//----------------------------------------------------------------------- - | - |.macro savex_, a, b, c, d - | stfd f..a, 16+a*8(sp) - | stfd f..b, 16+b*8(sp) - | stfd f..c, 16+c*8(sp) - | stfd f..d, 16+d*8(sp) - |.endmacro - | - |->vm_exit_handler: - |.if JIT - | addi sp, sp, -(16+32*8+32*4) - | stmw r2, 16+32*8+2*4(sp) - | addi DISPATCH, JGL, -GG_DISP2G-32768 - | li CARG2, ~LJ_VMST_EXIT - | lwz CARG1, 16+32*8+32*4(sp) // Get stack chain. - | stw CARG2, DISPATCH_GL(vmstate)(DISPATCH) - | savex_ 0,1,2,3 - | stw CARG1, 0(sp) // Store extended stack chain. - | clrso TMP1 - | savex_ 4,5,6,7 - | addi CARG2, sp, 16+32*8+32*4 // Recompute original value of sp. - | savex_ 8,9,10,11 - | stw CARG2, 16+32*8+1*4(sp) // Store sp in RID_SP. - | savex_ 12,13,14,15 - | mflr CARG3 - | li TMP1, 0 - | savex_ 16,17,18,19 - | stw TMP1, 16+32*8+0*4(sp) // Clear RID_TMP. - | savex_ 20,21,22,23 - | lhz CARG4, 2(CARG3) // Load trace number. - | savex_ 24,25,26,27 - | lwz L, DISPATCH_GL(cur_L)(DISPATCH) - | savex_ 28,29,30,31 - | sub CARG3, TMP0, CARG3 // Compute exit number. - | lp BASE, DISPATCH_GL(jit_base)(DISPATCH) - | srwi CARG3, CARG3, 2 - | stp L, DISPATCH_J(L)(DISPATCH) - | subi CARG3, CARG3, 2 - | stp BASE, L->base - | stw CARG4, DISPATCH_J(parent)(DISPATCH) - | stw TMP1, DISPATCH_GL(jit_base)(DISPATCH) - | addi CARG1, DISPATCH, GG_DISP2J - | stw CARG3, DISPATCH_J(exitno)(DISPATCH) - | addi CARG2, sp, 16 - | bl extern lj_trace_exit // (jit_State *J, ExitState *ex) - | // Returns MULTRES (unscaled) or negated error code. - | lp TMP1, L->cframe - | lwz TMP2, 0(sp) - | lp BASE, L->base - |.if GPR64 - | rldicr sp, TMP1, 0, 61 - |.else - | rlwinm sp, TMP1, 0, 0, 29 - |.endif - | lwz PC, SAVE_PC // Get SAVE_PC. - | stw TMP2, 0(sp) - | stw L, SAVE_L // Set SAVE_L (on-trace resume/yield). - | b >1 - |.endif - |->vm_exit_interp: - |.if JIT - | // CARG1 = MULTRES or negated error code, BASE, PC and JGL set. - | lwz L, SAVE_L - | addi DISPATCH, JGL, -GG_DISP2G-32768 - | stp BASE, L->base - |1: - | cmpwi CARG1, 0 - | blt >9 // Check for error from exit. - | lwz LFUNC:RB, FRAME_FUNC(BASE) - | slwi MULTRES, CARG1, 3 - | li TMP2, 0 - | stw MULTRES, SAVE_MULTRES - | lwz TMP1, LFUNC:RB->pc - | stw TMP2, DISPATCH_GL(jit_base)(DISPATCH) - | lwz KBASE, PC2PROTO(k)(TMP1) - | // Setup type comparison constants. - | li TISNUM, LJ_TISNUM - | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). - | stw TMP3, TMPD - | li ZERO, 0 - | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float). - | lfs TOBIT, TMPD - | stw TMP3, TMPD - | lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double) - | li TISNIL, LJ_TNIL - | stw TMP0, TONUM_HI - | lfs TONUM, TMPD - | // Modified copy of ins_next which handles function header dispatch, too. - | lwz INS, 0(PC) - | addi PC, PC, 4 - | // Assumes TISNIL == ~LJ_VMST_INTERP == -1. - | stw TISNIL, DISPATCH_GL(vmstate)(DISPATCH) - | decode_OPP TMP1, INS - | decode_RA8 RA, INS - | lpx TMP0, DISPATCH, TMP1 - | mtctr TMP0 - | cmplwi TMP1, BC_FUNCF*4 // Function header? - | bge >2 - | decode_RB8 RB, INS - | decode_RD8 RD, INS - | decode_RC8 RC, INS - | bctr - |2: - | cmplwi TMP1, (BC_FUNCC+2)*4 // Fast function? - | blt >3 - | // Check frame below fast function. - | lwz TMP1, FRAME_PC(BASE) - | andix. TMP0, TMP1, FRAME_TYPE - | bney >3 // Trace stitching continuation? - | // Otherwise set KBASE for Lua function below fast function. - | lwz TMP2, -4(TMP1) - | decode_RA8 TMP0, TMP2 - | sub TMP1, BASE, TMP0 - | lwz LFUNC:TMP2, -12(TMP1) - | lwz TMP1, LFUNC:TMP2->pc - | lwz KBASE, PC2PROTO(k)(TMP1) - |3: - | subi RC, MULTRES, 8 - | add RA, RA, BASE - | bctr - | - |9: // Rethrow error from the right C frame. - | neg CARG2, CARG1 - | mr CARG1, L - | bl extern lj_err_throw // (lua_State *L, int errcode) - |.endif - | - |//----------------------------------------------------------------------- - |//-- Math helper functions ---------------------------------------------- - |//----------------------------------------------------------------------- - | - |// NYI: Use internal implementations of floor, ceil, trunc. - | - |->vm_modi: - | divwo. TMP0, CARG1, CARG2 - | bso >1 - |.if GPR64 - | xor CARG3, CARG1, CARG2 - | cmpwi CARG3, 0 - |.else - | xor. CARG3, CARG1, CARG2 - |.endif - | mullw TMP0, TMP0, CARG2 - | sub CARG1, CARG1, TMP0 - | bgelr - | cmpwi CARG1, 0; beqlr - | add CARG1, CARG1, CARG2 - | blr - |1: - | cmpwi CARG2, 0 - | li CARG1, 0 - | beqlr - | clrso TMP0 // Clear SO for -2147483648 % -1 and return 0. - | blr - | - |//----------------------------------------------------------------------- - |//-- Miscellaneous functions -------------------------------------------- - |//----------------------------------------------------------------------- - | - |// void lj_vm_cachesync(void *start, void *end) - |// Flush D-Cache and invalidate I-Cache. Assumes 32 byte cache line size. - |// This is a good lower bound, except for very ancient PPC models. - |->vm_cachesync: - |.if JIT or FFI - | // Compute start of first cache line and number of cache lines. - | rlwinm CARG1, CARG1, 0, 0, 26 - | sub CARG2, CARG2, CARG1 - | addi CARG2, CARG2, 31 - | rlwinm. CARG2, CARG2, 27, 5, 31 - | beqlr - | mtctr CARG2 - | mr CARG3, CARG1 - |1: // Flush D-Cache. - | dcbst r0, CARG1 - | addi CARG1, CARG1, 32 - | bdnz <1 - | sync - | mtctr CARG2 - |1: // Invalidate I-Cache. - | icbi r0, CARG3 - | addi CARG3, CARG3, 32 - | bdnz <1 - | isync - | blr - |.endif - | - |//----------------------------------------------------------------------- - |//-- FFI helper functions ----------------------------------------------- - |//----------------------------------------------------------------------- - | - |// Handler for callback functions. Callback slot number in r11, g in r12. - |->vm_ffi_callback: - |.if FFI - |.type CTSTATE, CTState, PC - | saveregs - | lwz CTSTATE, GL:r12->ctype_state - | addi DISPATCH, r12, GG_G2DISP - | stw r11, CTSTATE->cb.slot - | stw r3, CTSTATE->cb.gpr[0] - | stfd f1, CTSTATE->cb.fpr[0] - | stw r4, CTSTATE->cb.gpr[1] - | stfd f2, CTSTATE->cb.fpr[1] - | stw r5, CTSTATE->cb.gpr[2] - | stfd f3, CTSTATE->cb.fpr[2] - | stw r6, CTSTATE->cb.gpr[3] - | stfd f4, CTSTATE->cb.fpr[3] - | stw r7, CTSTATE->cb.gpr[4] - | stfd f5, CTSTATE->cb.fpr[4] - | stw r8, CTSTATE->cb.gpr[5] - | stfd f6, CTSTATE->cb.fpr[5] - | stw r9, CTSTATE->cb.gpr[6] - | stfd f7, CTSTATE->cb.fpr[6] - | stw r10, CTSTATE->cb.gpr[7] - | stfd f8, CTSTATE->cb.fpr[7] - | addi TMP0, sp, CFRAME_SPACE+8 - | stw TMP0, CTSTATE->cb.stack - | mr CARG1, CTSTATE - | stw CTSTATE, SAVE_PC // Any value outside of bytecode is ok. - | mr CARG2, sp - | bl extern lj_ccallback_enter // (CTState *cts, void *cf) - | // Returns lua_State *. - | lp BASE, L:CRET1->base - | li TISNUM, LJ_TISNUM // Setup type comparison constants. - | lp RC, L:CRET1->top - | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). - | li ZERO, 0 - | mr L, CRET1 - | stw TMP3, TMPD - | lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double) - | lwz LFUNC:RB, FRAME_FUNC(BASE) - | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float). - | stw TMP0, TONUM_HI - | li TISNIL, LJ_TNIL - | li_vmstate INTERP - | lfs TOBIT, TMPD - | stw TMP3, TMPD - | sub RC, RC, BASE - | st_vmstate - | lfs TONUM, TMPD - | ins_callt - |.endif - | - |->cont_ffi_callback: // Return from FFI callback. - |.if FFI - | lwz CTSTATE, DISPATCH_GL(ctype_state)(DISPATCH) - | stp BASE, L->base - | stp RB, L->top - | stp L, CTSTATE->L - | mr CARG1, CTSTATE - | mr CARG2, RA - | bl extern lj_ccallback_leave // (CTState *cts, TValue *o) - | lwz CRET1, CTSTATE->cb.gpr[0] - | lfd FARG1, CTSTATE->cb.fpr[0] - | lwz CRET2, CTSTATE->cb.gpr[1] - | b ->vm_leave_unw - |.endif - | - |->vm_ffi_call: // Call C function via FFI. - | // Caveat: needs special frame unwinding, see below. - |.if FFI - | .type CCSTATE, CCallState, CARG1 - | lwz TMP1, CCSTATE->spadj - | mflr TMP0 - | lbz CARG2, CCSTATE->nsp - | lbz CARG3, CCSTATE->nfpr - | neg TMP1, TMP1 - | stw TMP0, 4(sp) - | cmpwi cr1, CARG3, 0 - | mr TMP2, sp - | addic. CARG2, CARG2, -1 - | stwux sp, sp, TMP1 - | crnot 4*cr1+eq, 4*cr1+eq // For vararg calls. - | stw r14, -4(TMP2) - | stw CCSTATE, -8(TMP2) - | mr r14, TMP2 - | la TMP1, CCSTATE->stack - | slwi CARG2, CARG2, 2 - | blty >2 - | la TMP2, 8(sp) - |1: - | lwzx TMP0, TMP1, CARG2 - | stwx TMP0, TMP2, CARG2 - | addic. CARG2, CARG2, -4 - | bge <1 - |2: - | bney cr1, >3 - | lfd f1, CCSTATE->fpr[0] - | lfd f2, CCSTATE->fpr[1] - | lfd f3, CCSTATE->fpr[2] - | lfd f4, CCSTATE->fpr[3] - | lfd f5, CCSTATE->fpr[4] - | lfd f6, CCSTATE->fpr[5] - | lfd f7, CCSTATE->fpr[6] - | lfd f8, CCSTATE->fpr[7] - |3: - | lp TMP0, CCSTATE->func - | lwz CARG2, CCSTATE->gpr[1] - | lwz CARG3, CCSTATE->gpr[2] - | lwz CARG4, CCSTATE->gpr[3] - | lwz CARG5, CCSTATE->gpr[4] - | mtctr TMP0 - | lwz r8, CCSTATE->gpr[5] - | lwz r9, CCSTATE->gpr[6] - | lwz r10, CCSTATE->gpr[7] - | lwz CARG1, CCSTATE->gpr[0] // Do this last, since CCSTATE is CARG1. - | bctrl - | lwz CCSTATE:TMP1, -8(r14) - | lwz TMP2, -4(r14) - | lwz TMP0, 4(r14) - | stw CARG1, CCSTATE:TMP1->gpr[0] - | stfd FARG1, CCSTATE:TMP1->fpr[0] - | stw CARG2, CCSTATE:TMP1->gpr[1] - | mtlr TMP0 - | stw CARG3, CCSTATE:TMP1->gpr[2] - | mr sp, r14 - | stw CARG4, CCSTATE:TMP1->gpr[3] - | mr r14, TMP2 - | blr - |.endif - |// Note: vm_ffi_call must be the last function in this object file! - | - |//----------------------------------------------------------------------- -} - -/* Generate the code for a single instruction. */ -static void build_ins(BuildCtx *ctx, BCOp op, int defop) -{ - int vk = 0; - |=>defop: - - switch (op) { - - /* -- Comparison ops ---------------------------------------------------- */ - - /* Remember: all ops branch for a true comparison, fall through otherwise. */ - - case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: - | // RA = src1*8, RD = src2*8, JMP with RD = target - |.if DUALNUM - | lwzux TMP0, RA, BASE - | addi PC, PC, 4 - | lwz CARG2, 4(RA) - | lwzux TMP1, RD, BASE - | lwz TMP2, -4(PC) - | checknum cr0, TMP0 - | lwz CARG3, 4(RD) - | decode_RD4 TMP2, TMP2 - | checknum cr1, TMP1 - | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) - | bne cr0, >7 - | bne cr1, >8 - | cmpw CARG2, CARG3 - if (op == BC_ISLT) { - | bge >2 - } else if (op == BC_ISGE) { - | blt >2 - } else if (op == BC_ISLE) { - | bgt >2 - } else { - | ble >2 - } - |1: - | add PC, PC, TMP2 - |2: - | ins_next - | - |7: // RA is not an integer. - | bgt cr0, ->vmeta_comp - | // RA is a number. - | lfd f0, 0(RA) - | bgt cr1, ->vmeta_comp - | blt cr1, >4 - | // RA is a number, RD is an integer. - | tonum_i f1, CARG3 - | b >5 - | - |8: // RA is an integer, RD is not an integer. - | bgt cr1, ->vmeta_comp - | // RA is an integer, RD is a number. - | tonum_i f0, CARG2 - |4: - | lfd f1, 0(RD) - |5: - | fcmpu cr0, f0, f1 - if (op == BC_ISLT) { - | bge <2 - } else if (op == BC_ISGE) { - | blt <2 - } else if (op == BC_ISLE) { - | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq - | bge <2 - } else { - | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq - | blt <2 - } - | b <1 - |.else - | lwzx TMP0, BASE, RA - | addi PC, PC, 4 - | lfdx f0, BASE, RA - | lwzx TMP1, BASE, RD - | checknum cr0, TMP0 - | lwz TMP2, -4(PC) - | lfdx f1, BASE, RD - | checknum cr1, TMP1 - | decode_RD4 TMP2, TMP2 - | bge cr0, ->vmeta_comp - | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) - | bge cr1, ->vmeta_comp - | fcmpu cr0, f0, f1 - if (op == BC_ISLT) { - | bge >1 - } else if (op == BC_ISGE) { - | blt >1 - } else if (op == BC_ISLE) { - | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq - | bge >1 - } else { - | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq - | blt >1 - } - | add PC, PC, TMP2 - |1: - | ins_next - |.endif - break; - - case BC_ISEQV: case BC_ISNEV: - vk = op == BC_ISEQV; - | // RA = src1*8, RD = src2*8, JMP with RD = target - |.if DUALNUM - | lwzux TMP0, RA, BASE - | addi PC, PC, 4 - | lwz CARG2, 4(RA) - | lwzux TMP1, RD, BASE - | checknum cr0, TMP0 - | lwz TMP2, -4(PC) - | checknum cr1, TMP1 - | decode_RD4 TMP2, TMP2 - | lwz CARG3, 4(RD) - | cror 4*cr7+gt, 4*cr0+gt, 4*cr1+gt - | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) - if (vk) { - | ble cr7, ->BC_ISEQN_Z - } else { - | ble cr7, ->BC_ISNEN_Z - } - |.else - | lwzux TMP0, RA, BASE - | lwz TMP2, 0(PC) - | lfd f0, 0(RA) - | addi PC, PC, 4 - | lwzux TMP1, RD, BASE - | checknum cr0, TMP0 - | decode_RD4 TMP2, TMP2 - | lfd f1, 0(RD) - | checknum cr1, TMP1 - | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) - | bge cr0, >5 - | bge cr1, >5 - | fcmpu cr0, f0, f1 - if (vk) { - | bne >1 - | add PC, PC, TMP2 - } else { - | beq >1 - | add PC, PC, TMP2 - } - |1: - | ins_next - |.endif - |5: // Either or both types are not numbers. - |.if not DUALNUM - | lwz CARG2, 4(RA) - | lwz CARG3, 4(RD) - |.endif - |.if FFI - | cmpwi cr7, TMP0, LJ_TCDATA - | cmpwi cr5, TMP1, LJ_TCDATA - |.endif - | not TMP3, TMP0 - | cmplw TMP0, TMP1 - | cmplwi cr1, TMP3, ~LJ_TISPRI // Primitive? - |.if FFI - | cror 4*cr7+eq, 4*cr7+eq, 4*cr5+eq - |.endif - | cmplwi cr6, TMP3, ~LJ_TISTABUD // Table or userdata? - |.if FFI - | beq cr7, ->vmeta_equal_cd - |.endif - | cmplw cr5, CARG2, CARG3 - | crandc 4*cr0+gt, 4*cr0+eq, 4*cr1+gt // 2: Same type and primitive. - | crorc 4*cr0+lt, 4*cr5+eq, 4*cr0+eq // 1: Same tv or different type. - | crand 4*cr0+eq, 4*cr0+eq, 4*cr5+eq // 0: Same type and same tv. - | mr SAVE0, PC - | cror 4*cr0+eq, 4*cr0+eq, 4*cr0+gt // 0 or 2. - | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+gt // 1 or 2. - if (vk) { - | bne cr0, >6 - | add PC, PC, TMP2 - |6: - } else { - | beq cr0, >6 - | add PC, PC, TMP2 - |6: - } - |.if DUALNUM - | bge cr0, >2 // Done if 1 or 2. - |1: - | ins_next - |2: - |.else - | blt cr0, <1 // Done if 1 or 2. - |.endif - | blt cr6, <1 // Done if not tab/ud. - | - | // Different tables or userdatas. Need to check __eq metamethod. - | // Field metatable must be at same offset for GCtab and GCudata! - | lwz TAB:TMP2, TAB:CARG2->metatable - | li CARG4, 1-vk // ne = 0 or 1. - | cmplwi TAB:TMP2, 0 - | beq <1 // No metatable? - | lbz TMP2, TAB:TMP2->nomm - | andix. TMP2, TMP2, 1<vmeta_equal // Handle __eq metamethod. - break; - - case BC_ISEQS: case BC_ISNES: - vk = op == BC_ISEQS; - | // RA = src*8, RD = str_const*8 (~), JMP with RD = target - | lwzux TMP0, RA, BASE - | srwi RD, RD, 1 - | lwz STR:TMP3, 4(RA) - | lwz TMP2, 0(PC) - | subfic RD, RD, -4 - | addi PC, PC, 4 - |.if FFI - | cmpwi TMP0, LJ_TCDATA - |.endif - | lwzx STR:TMP1, KBASE, RD // KBASE-4-str_const*4 - | .gpr64 extsw TMP0, TMP0 - | subfic TMP0, TMP0, LJ_TSTR - |.if FFI - | beq ->vmeta_equal_cd - |.endif - | sub TMP1, STR:TMP1, STR:TMP3 - | or TMP0, TMP0, TMP1 - | decode_RD4 TMP2, TMP2 - | subfic TMP0, TMP0, 0 - | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) - | subfe TMP1, TMP1, TMP1 - if (vk) { - | andc TMP2, TMP2, TMP1 - } else { - | and TMP2, TMP2, TMP1 - } - | add PC, PC, TMP2 - | ins_next - break; - - case BC_ISEQN: case BC_ISNEN: - vk = op == BC_ISEQN; - | // RA = src*8, RD = num_const*8, JMP with RD = target - |.if DUALNUM - | lwzux TMP0, RA, BASE - | addi PC, PC, 4 - | lwz CARG2, 4(RA) - | lwzux TMP1, RD, KBASE - | checknum cr0, TMP0 - | lwz TMP2, -4(PC) - | checknum cr1, TMP1 - | decode_RD4 TMP2, TMP2 - | lwz CARG3, 4(RD) - | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) - if (vk) { - |->BC_ISEQN_Z: - } else { - |->BC_ISNEN_Z: - } - | bne cr0, >7 - | bne cr1, >8 - | cmpw CARG2, CARG3 - |4: - |.else - if (vk) { - |->BC_ISEQN_Z: // Dummy label. - } else { - |->BC_ISNEN_Z: // Dummy label. - } - | lwzx TMP0, BASE, RA - | addi PC, PC, 4 - | lfdx f0, BASE, RA - | lwz TMP2, -4(PC) - | lfdx f1, KBASE, RD - | decode_RD4 TMP2, TMP2 - | checknum TMP0 - | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) - | bge >3 - | fcmpu cr0, f0, f1 - |.endif - if (vk) { - | bne >1 - | add PC, PC, TMP2 - |1: - |.if not FFI - |3: - |.endif - } else { - | beq >2 - |1: - |.if not FFI - |3: - |.endif - | add PC, PC, TMP2 - |2: - } - | ins_next - |.if FFI - |3: - | cmpwi TMP0, LJ_TCDATA - | beq ->vmeta_equal_cd - | b <1 - |.endif - |.if DUALNUM - |7: // RA is not an integer. - | bge cr0, <3 - | // RA is a number. - | lfd f0, 0(RA) - | blt cr1, >1 - | // RA is a number, RD is an integer. - | tonum_i f1, CARG3 - | b >2 - | - |8: // RA is an integer, RD is a number. - | tonum_i f0, CARG2 - |1: - | lfd f1, 0(RD) - |2: - | fcmpu cr0, f0, f1 - | b <4 - |.endif - break; - - case BC_ISEQP: case BC_ISNEP: - vk = op == BC_ISEQP; - | // RA = src*8, RD = primitive_type*8 (~), JMP with RD = target - | lwzx TMP0, BASE, RA - | srwi TMP1, RD, 3 - | lwz TMP2, 0(PC) - | not TMP1, TMP1 - | addi PC, PC, 4 - |.if FFI - | cmpwi TMP0, LJ_TCDATA - |.endif - | sub TMP0, TMP0, TMP1 - |.if FFI - | beq ->vmeta_equal_cd - |.endif - | decode_RD4 TMP2, TMP2 - | .gpr64 extsw TMP0, TMP0 - | addic TMP0, TMP0, -1 - | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) - | subfe TMP1, TMP1, TMP1 - if (vk) { - | and TMP2, TMP2, TMP1 - } else { - | andc TMP2, TMP2, TMP1 - } - | add PC, PC, TMP2 - | ins_next - break; - - /* -- Unary test and copy ops ------------------------------------------- */ - - case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF: - | // RA = dst*8 or unused, RD = src*8, JMP with RD = target - | lwzx TMP0, BASE, RD - | lwz INS, 0(PC) - | addi PC, PC, 4 - if (op == BC_IST || op == BC_ISF) { - | .gpr64 extsw TMP0, TMP0 - | subfic TMP0, TMP0, LJ_TTRUE - | decode_RD4 TMP2, INS - | subfe TMP1, TMP1, TMP1 - | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) - if (op == BC_IST) { - | andc TMP2, TMP2, TMP1 - } else { - | and TMP2, TMP2, TMP1 - } - | add PC, PC, TMP2 - } else { - | li TMP1, LJ_TFALSE - | lfdx f0, BASE, RD - | cmplw TMP0, TMP1 - if (op == BC_ISTC) { - | bge >1 - } else { - | blt >1 - } - | addis PC, PC, -(BCBIAS_J*4 >> 16) - | decode_RD4 TMP2, INS - | stfdx f0, BASE, RA - | add PC, PC, TMP2 - |1: - } - | ins_next - break; - - case BC_ISTYPE: - | // RA = src*8, RD = -type*8 - | lwzx TMP0, BASE, RA - | srwi TMP1, RD, 3 - | ins_next1 - |.if not PPE and not GPR64 - | add. TMP0, TMP0, TMP1 - |.else - | neg TMP1, TMP1 - | cmpw TMP0, TMP1 - |.endif - | bne ->vmeta_istype - | ins_next2 - break; - case BC_ISNUM: - | // RA = src*8, RD = -(TISNUM-1)*8 - | lwzx TMP0, BASE, RA - | ins_next1 - | checknum TMP0 - | bge ->vmeta_istype - | ins_next2 - break; - - /* -- Unary ops --------------------------------------------------------- */ - - case BC_MOV: - | // RA = dst*8, RD = src*8 - | ins_next1 - | lfdx f0, BASE, RD - | stfdx f0, BASE, RA - | ins_next2 - break; - case BC_NOT: - | // RA = dst*8, RD = src*8 - | ins_next1 - | lwzx TMP0, BASE, RD - | .gpr64 extsw TMP0, TMP0 - | subfic TMP1, TMP0, LJ_TTRUE - | adde TMP0, TMP0, TMP1 - | stwx TMP0, BASE, RA - | ins_next2 - break; - case BC_UNM: - | // RA = dst*8, RD = src*8 - | lwzux TMP1, RD, BASE - | lwz TMP0, 4(RD) - | checknum TMP1 - |.if DUALNUM - | bne >5 - |.if GPR64 - | lus TMP2, 0x8000 - | neg TMP0, TMP0 - | cmplw TMP0, TMP2 - | beq >4 - |.else - | nego. TMP0, TMP0 - | bso >4 - |1: - |.endif - | ins_next1 - | stwux TISNUM, RA, BASE - | stw TMP0, 4(RA) - |3: - | ins_next2 - |4: - |.if not GPR64 - | // Potential overflow. - | checkov TMP1, <1 // Ignore unrelated overflow. - |.endif - | lus TMP1, 0x41e0 // 2^31. - | li TMP0, 0 - | b >7 - |.endif - |5: - | bge ->vmeta_unm - | xoris TMP1, TMP1, 0x8000 - |7: - | ins_next1 - | stwux TMP1, RA, BASE - | stw TMP0, 4(RA) - |.if DUALNUM - | b <3 - |.else - | ins_next2 - |.endif - break; - case BC_LEN: - | // RA = dst*8, RD = src*8 - | lwzux TMP0, RD, BASE - | lwz CARG1, 4(RD) - | checkstr TMP0; bne >2 - | lwz CRET1, STR:CARG1->len - |1: - |.if DUALNUM - | ins_next1 - | stwux TISNUM, RA, BASE - | stw CRET1, 4(RA) - |.else - | tonum_u f0, CRET1 // Result is a non-negative integer. - | ins_next1 - | stfdx f0, BASE, RA - |.endif - | ins_next2 - |2: - | checktab TMP0; bne ->vmeta_len -#if LJ_52 - | lwz TAB:TMP2, TAB:CARG1->metatable - | cmplwi TAB:TMP2, 0 - | bne >9 - |3: -#endif - |->BC_LEN_Z: - | bl extern lj_tab_len // (GCtab *t) - | // Returns uint32_t (but less than 2^31). - | b <1 -#if LJ_52 - |9: - | lbz TMP0, TAB:TMP2->nomm - | andix. TMP0, TMP0, 1<vmeta_len -#endif - break; - - /* -- Binary ops -------------------------------------------------------- */ - - |.macro ins_arithpre - | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8 - ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); - ||switch (vk) { - ||case 0: - | lwzx TMP1, BASE, RB - | .if DUALNUM - | lwzx TMP2, KBASE, RC - | .endif - | lfdx f14, BASE, RB - | lfdx f15, KBASE, RC - | .if DUALNUM - | checknum cr0, TMP1 - | checknum cr1, TMP2 - | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt - | bge ->vmeta_arith_vn - | .else - | checknum TMP1; bge ->vmeta_arith_vn - | .endif - || break; - ||case 1: - | lwzx TMP1, BASE, RB - | .if DUALNUM - | lwzx TMP2, KBASE, RC - | .endif - | lfdx f15, BASE, RB - | lfdx f14, KBASE, RC - | .if DUALNUM - | checknum cr0, TMP1 - | checknum cr1, TMP2 - | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt - | bge ->vmeta_arith_nv - | .else - | checknum TMP1; bge ->vmeta_arith_nv - | .endif - || break; - ||default: - | lwzx TMP1, BASE, RB - | lwzx TMP2, BASE, RC - | lfdx f14, BASE, RB - | lfdx f15, BASE, RC - | checknum cr0, TMP1 - | checknum cr1, TMP2 - | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt - | bge ->vmeta_arith_vv - || break; - ||} - |.endmacro - | - |.macro ins_arithfallback, ins - ||switch (vk) { - ||case 0: - | ins ->vmeta_arith_vn2 - || break; - ||case 1: - | ins ->vmeta_arith_nv2 - || break; - ||default: - | ins ->vmeta_arith_vv2 - || break; - ||} - |.endmacro - | - |.macro intmod, a, b, c - | bl ->vm_modi - |.endmacro - | - |.macro fpmod, a, b, c - |->BC_MODVN_Z: - | fdiv FARG1, b, c - | // NYI: Use internal implementation of floor. - | blex floor // floor(b/c) - | fmul a, FARG1, c - | fsub a, b, a // b - floor(b/c)*c - |.endmacro - | - |.macro ins_arithfp, fpins - | ins_arithpre - |.if "fpins" == "fpmod_" - | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway. - |.else - | fpins f0, f14, f15 - | ins_next1 - | stfdx f0, BASE, RA - | ins_next2 - |.endif - |.endmacro - | - |.macro ins_arithdn, intins, fpins - | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8 - ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); - ||switch (vk) { - ||case 0: - | lwzux TMP1, RB, BASE - | lwzux TMP2, RC, KBASE - | lwz CARG1, 4(RB) - | checknum cr0, TMP1 - | lwz CARG2, 4(RC) - || break; - ||case 1: - | lwzux TMP1, RB, BASE - | lwzux TMP2, RC, KBASE - | lwz CARG2, 4(RB) - | checknum cr0, TMP1 - | lwz CARG1, 4(RC) - || break; - ||default: - | lwzux TMP1, RB, BASE - | lwzux TMP2, RC, BASE - | lwz CARG1, 4(RB) - | checknum cr0, TMP1 - | lwz CARG2, 4(RC) - || break; - ||} - | checknum cr1, TMP2 - | bne >5 - | bne cr1, >5 - | intins CARG1, CARG1, CARG2 - | bso >4 - |1: - | ins_next1 - | stwux TISNUM, RA, BASE - | stw CARG1, 4(RA) - |2: - | ins_next2 - |4: // Overflow. - | checkov TMP0, <1 // Ignore unrelated overflow. - | ins_arithfallback b - |5: // FP variant. - ||if (vk == 1) { - | lfd f15, 0(RB) - | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt - | lfd f14, 0(RC) - ||} else { - | lfd f14, 0(RB) - | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt - | lfd f15, 0(RC) - ||} - | ins_arithfallback bge - |.if "fpins" == "fpmod_" - | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway. - |.else - | fpins f0, f14, f15 - | ins_next1 - | stfdx f0, BASE, RA - | b <2 - |.endif - |.endmacro - | - |.macro ins_arith, intins, fpins - |.if DUALNUM - | ins_arithdn intins, fpins - |.else - | ins_arithfp fpins - |.endif - |.endmacro - - case BC_ADDVN: case BC_ADDNV: case BC_ADDVV: - |.if GPR64 - |.macro addo32., y, a, b - | // Need to check overflow for (a<<32) + (b<<32). - | rldicr TMP0, a, 32, 31 - | rldicr TMP3, b, 32, 31 - | addo. TMP0, TMP0, TMP3 - | add y, a, b - |.endmacro - | ins_arith addo32., fadd - |.else - | ins_arith addo., fadd - |.endif - break; - case BC_SUBVN: case BC_SUBNV: case BC_SUBVV: - |.if GPR64 - |.macro subo32., y, a, b - | // Need to check overflow for (a<<32) - (b<<32). - | rldicr TMP0, a, 32, 31 - | rldicr TMP3, b, 32, 31 - | subo. TMP0, TMP0, TMP3 - | sub y, a, b - |.endmacro - | ins_arith subo32., fsub - |.else - | ins_arith subo., fsub - |.endif - break; - case BC_MULVN: case BC_MULNV: case BC_MULVV: - | ins_arith mullwo., fmul - break; - case BC_DIVVN: case BC_DIVNV: case BC_DIVVV: - | ins_arithfp fdiv - break; - case BC_MODVN: - | ins_arith intmod, fpmod - break; - case BC_MODNV: case BC_MODVV: - | ins_arith intmod, fpmod_ - break; - case BC_POW: - | // NYI: (partial) integer arithmetic. - | lwzx TMP1, BASE, RB - | lfdx FARG1, BASE, RB - | lwzx TMP2, BASE, RC - | lfdx FARG2, BASE, RC - | checknum cr0, TMP1 - | checknum cr1, TMP2 - | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt - | bge ->vmeta_arith_vv - | blex pow - | ins_next1 - | stfdx FARG1, BASE, RA - | ins_next2 - break; - - case BC_CAT: - | // RA = dst*8, RB = src_start*8, RC = src_end*8 - | sub CARG3, RC, RB - | stp BASE, L->base - | add CARG2, BASE, RC - | mr SAVE0, RB - |->BC_CAT_Z: - | stw PC, SAVE_PC - | mr CARG1, L - | srwi CARG3, CARG3, 3 - | bl extern lj_meta_cat // (lua_State *L, TValue *top, int left) - | // Returns NULL (finished) or TValue * (metamethod). - | cmplwi CRET1, 0 - | lp BASE, L->base - | bne ->vmeta_binop - | ins_next1 - | lfdx f0, BASE, SAVE0 // Copy result from RB to RA. - | stfdx f0, BASE, RA - | ins_next2 - break; - - /* -- Constant ops ------------------------------------------------------ */ - - case BC_KSTR: - | // RA = dst*8, RD = str_const*8 (~) - | srwi TMP1, RD, 1 - | subfic TMP1, TMP1, -4 - | ins_next1 - | lwzx TMP0, KBASE, TMP1 // KBASE-4-str_const*4 - | li TMP2, LJ_TSTR - | stwux TMP2, RA, BASE - | stw TMP0, 4(RA) - | ins_next2 - break; - case BC_KCDATA: - |.if FFI - | // RA = dst*8, RD = cdata_const*8 (~) - | srwi TMP1, RD, 1 - | subfic TMP1, TMP1, -4 - | ins_next1 - | lwzx TMP0, KBASE, TMP1 // KBASE-4-cdata_const*4 - | li TMP2, LJ_TCDATA - | stwux TMP2, RA, BASE - | stw TMP0, 4(RA) - | ins_next2 - |.endif - break; - case BC_KSHORT: - | // RA = dst*8, RD = int16_literal*8 - |.if DUALNUM - | slwi RD, RD, 13 - | srawi RD, RD, 16 - | ins_next1 - | stwux TISNUM, RA, BASE - | stw RD, 4(RA) - | ins_next2 - |.else - | // The soft-float approach is faster. - | slwi RD, RD, 13 - | srawi TMP1, RD, 31 - | xor TMP2, TMP1, RD - | sub TMP2, TMP2, TMP1 // TMP2 = abs(x) - | cntlzw TMP3, TMP2 - | subfic TMP1, TMP3, 0x40d // TMP1 = exponent-1 - | slw TMP2, TMP2, TMP3 // TMP2 = left aligned mantissa - | subfic TMP3, RD, 0 - | slwi TMP1, TMP1, 20 - | rlwimi RD, TMP2, 21, 1, 31 // hi = sign(x) | (mantissa>>11) - | subfe TMP0, TMP0, TMP0 - | add RD, RD, TMP1 // hi = hi + exponent-1 - | and RD, RD, TMP0 // hi = x == 0 ? 0 : hi - | ins_next1 - | stwux RD, RA, BASE - | stw ZERO, 4(RA) - | ins_next2 - |.endif - break; - case BC_KNUM: - | // RA = dst*8, RD = num_const*8 - | ins_next1 - | lfdx f0, KBASE, RD - | stfdx f0, BASE, RA - | ins_next2 - break; - case BC_KPRI: - | // RA = dst*8, RD = primitive_type*8 (~) - | srwi TMP1, RD, 3 - | not TMP0, TMP1 - | ins_next1 - | stwx TMP0, BASE, RA - | ins_next2 - break; - case BC_KNIL: - | // RA = base*8, RD = end*8 - | stwx TISNIL, BASE, RA - | addi RA, RA, 8 - |1: - | stwx TISNIL, BASE, RA - | cmpw RA, RD - | addi RA, RA, 8 - | blt <1 - | ins_next_ - break; - - /* -- Upvalue and function ops ------------------------------------------ */ - - case BC_UGET: - | // RA = dst*8, RD = uvnum*8 - | lwz LFUNC:RB, FRAME_FUNC(BASE) - | srwi RD, RD, 1 - | addi RD, RD, offsetof(GCfuncL, uvptr) - | lwzx UPVAL:RB, LFUNC:RB, RD - | ins_next1 - | lwz TMP1, UPVAL:RB->v - | lfd f0, 0(TMP1) - | stfdx f0, BASE, RA - | ins_next2 - break; - case BC_USETV: - | // RA = uvnum*8, RD = src*8 - | lwz LFUNC:RB, FRAME_FUNC(BASE) - | srwi RA, RA, 1 - | addi RA, RA, offsetof(GCfuncL, uvptr) - | lfdux f0, RD, BASE - | lwzx UPVAL:RB, LFUNC:RB, RA - | lbz TMP3, UPVAL:RB->marked - | lwz CARG2, UPVAL:RB->v - | andix. TMP3, TMP3, LJ_GC_BLACK // isblack(uv) - | lbz TMP0, UPVAL:RB->closed - | lwz TMP2, 0(RD) - | stfd f0, 0(CARG2) - | cmplwi cr1, TMP0, 0 - | lwz TMP1, 4(RD) - | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq - | subi TMP2, TMP2, (LJ_TNUMX+1) - | bne >2 // Upvalue is closed and black? - |1: - | ins_next - | - |2: // Check if new value is collectable. - | cmplwi TMP2, LJ_TISGCV - (LJ_TNUMX+1) - | bge <1 // tvisgcv(v) - | lbz TMP3, GCOBJ:TMP1->gch.marked - | andix. TMP3, TMP3, LJ_GC_WHITES // iswhite(v) - | la CARG1, GG_DISP2G(DISPATCH) - | // Crossed a write barrier. Move the barrier forward. - | beq <1 - | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv) - | b <1 - break; - case BC_USETS: - | // RA = uvnum*8, RD = str_const*8 (~) - | lwz LFUNC:RB, FRAME_FUNC(BASE) - | srwi TMP1, RD, 1 - | srwi RA, RA, 1 - | subfic TMP1, TMP1, -4 - | addi RA, RA, offsetof(GCfuncL, uvptr) - | lwzx STR:TMP1, KBASE, TMP1 // KBASE-4-str_const*4 - | lwzx UPVAL:RB, LFUNC:RB, RA - | lbz TMP3, UPVAL:RB->marked - | lwz CARG2, UPVAL:RB->v - | andix. TMP3, TMP3, LJ_GC_BLACK // isblack(uv) - | lbz TMP3, STR:TMP1->marked - | lbz TMP2, UPVAL:RB->closed - | li TMP0, LJ_TSTR - | stw STR:TMP1, 4(CARG2) - | stw TMP0, 0(CARG2) - | bne >2 - |1: - | ins_next - | - |2: // Check if string is white and ensure upvalue is closed. - | andix. TMP3, TMP3, LJ_GC_WHITES // iswhite(str) - | cmplwi cr1, TMP2, 0 - | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq - | la CARG1, GG_DISP2G(DISPATCH) - | // Crossed a write barrier. Move the barrier forward. - | beq <1 - | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv) - | b <1 - break; - case BC_USETN: - | // RA = uvnum*8, RD = num_const*8 - | lwz LFUNC:RB, FRAME_FUNC(BASE) - | srwi RA, RA, 1 - | addi RA, RA, offsetof(GCfuncL, uvptr) - | lfdx f0, KBASE, RD - | lwzx UPVAL:RB, LFUNC:RB, RA - | ins_next1 - | lwz TMP1, UPVAL:RB->v - | stfd f0, 0(TMP1) - | ins_next2 - break; - case BC_USETP: - | // RA = uvnum*8, RD = primitive_type*8 (~) - | lwz LFUNC:RB, FRAME_FUNC(BASE) - | srwi RA, RA, 1 - | srwi TMP0, RD, 3 - | addi RA, RA, offsetof(GCfuncL, uvptr) - | not TMP0, TMP0 - | lwzx UPVAL:RB, LFUNC:RB, RA - | ins_next1 - | lwz TMP1, UPVAL:RB->v - | stw TMP0, 0(TMP1) - | ins_next2 - break; - - case BC_UCLO: - | // RA = level*8, RD = target - | lwz TMP1, L->openupval - | branch_RD // Do this first since RD is not saved. - | stp BASE, L->base - | cmplwi TMP1, 0 - | mr CARG1, L - | beq >1 - | add CARG2, BASE, RA - | bl extern lj_func_closeuv // (lua_State *L, TValue *level) - | lp BASE, L->base - |1: - | ins_next - break; - - case BC_FNEW: - | // RA = dst*8, RD = proto_const*8 (~) (holding function prototype) - | srwi TMP1, RD, 1 - | stp BASE, L->base - | subfic TMP1, TMP1, -4 - | stw PC, SAVE_PC - | lwzx CARG2, KBASE, TMP1 // KBASE-4-tab_const*4 - | mr CARG1, L - | lwz CARG3, FRAME_FUNC(BASE) - | // (lua_State *L, GCproto *pt, GCfuncL *parent) - | bl extern lj_func_newL_gc - | // Returns GCfuncL *. - | lp BASE, L->base - | li TMP0, LJ_TFUNC - | stwux TMP0, RA, BASE - | stw LFUNC:CRET1, 4(RA) - | ins_next - break; - - /* -- Table ops --------------------------------------------------------- */ - - case BC_TNEW: - case BC_TDUP: - | // RA = dst*8, RD = (hbits|asize)*8 | tab_const*8 (~) - | lwz TMP0, DISPATCH_GL(gc.total)(DISPATCH) - | mr CARG1, L - | lwz TMP1, DISPATCH_GL(gc.threshold)(DISPATCH) - | stp BASE, L->base - | cmplw TMP0, TMP1 - | stw PC, SAVE_PC - | bge >5 - |1: - if (op == BC_TNEW) { - | rlwinm CARG2, RD, 29, 21, 31 - | rlwinm CARG3, RD, 18, 27, 31 - | cmpwi CARG2, 0x7ff; beq >3 - |2: - | bl extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits) - | // Returns Table *. - } else { - | srwi TMP1, RD, 1 - | subfic TMP1, TMP1, -4 - | lwzx CARG2, KBASE, TMP1 // KBASE-4-tab_const*4 - | bl extern lj_tab_dup // (lua_State *L, Table *kt) - | // Returns Table *. - } - | lp BASE, L->base - | li TMP0, LJ_TTAB - | stwux TMP0, RA, BASE - | stw TAB:CRET1, 4(RA) - | ins_next - if (op == BC_TNEW) { - |3: - | li CARG2, 0x801 - | b <2 - } - |5: - | mr SAVE0, RD - | bl extern lj_gc_step_fixtop // (lua_State *L) - | mr RD, SAVE0 - | mr CARG1, L - | b <1 - break; - - case BC_GGET: - | // RA = dst*8, RD = str_const*8 (~) - case BC_GSET: - | // RA = src*8, RD = str_const*8 (~) - | lwz LFUNC:TMP2, FRAME_FUNC(BASE) - | srwi TMP1, RD, 1 - | lwz TAB:RB, LFUNC:TMP2->env - | subfic TMP1, TMP1, -4 - | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4 - if (op == BC_GGET) { - | b ->BC_TGETS_Z - } else { - | b ->BC_TSETS_Z - } - break; - - case BC_TGETV: - | // RA = dst*8, RB = table*8, RC = key*8 - | lwzux CARG1, RB, BASE - | lwzux CARG2, RC, BASE - | lwz TAB:RB, 4(RB) - |.if DUALNUM - | lwz RC, 4(RC) - |.else - | lfd f0, 0(RC) - |.endif - | checktab CARG1 - | checknum cr1, CARG2 - | bne ->vmeta_tgetv - |.if DUALNUM - | lwz TMP0, TAB:RB->asize - | bne cr1, >5 - | lwz TMP1, TAB:RB->array - | cmplw TMP0, RC - | slwi TMP2, RC, 3 - |.else - | bge cr1, >5 - | // Convert number key to integer, check for integerness and range. - | fctiwz f1, f0 - | fadd f2, f0, TOBIT - | stfd f1, TMPD - | lwz TMP0, TAB:RB->asize - | fsub f2, f2, TOBIT - | lwz TMP2, TMPD_LO - | lwz TMP1, TAB:RB->array - | fcmpu cr1, f0, f2 - | cmplw cr0, TMP0, TMP2 - | crand 4*cr0+gt, 4*cr0+gt, 4*cr1+eq - | slwi TMP2, TMP2, 3 - |.endif - | ble ->vmeta_tgetv // Integer key and in array part? - | lwzx TMP0, TMP1, TMP2 - | lfdx f14, TMP1, TMP2 - | checknil TMP0; beq >2 - |1: - | ins_next1 - | stfdx f14, BASE, RA - | ins_next2 - | - |2: // Check for __index if table value is nil. - | lwz TAB:TMP2, TAB:RB->metatable - | cmplwi TAB:TMP2, 0 - | beq <1 // No metatable: done. - | lbz TMP0, TAB:TMP2->nomm - | andix. TMP0, TMP0, 1<vmeta_tgetv - | - |5: - | checkstr CARG2; bne ->vmeta_tgetv - |.if not DUALNUM - | lwz STR:RC, 4(RC) - |.endif - | b ->BC_TGETS_Z // String key? - break; - case BC_TGETS: - | // RA = dst*8, RB = table*8, RC = str_const*8 (~) - | lwzux CARG1, RB, BASE - | srwi TMP1, RC, 1 - | lwz TAB:RB, 4(RB) - | subfic TMP1, TMP1, -4 - | checktab CARG1 - | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4 - | bne ->vmeta_tgets1 - |->BC_TGETS_Z: - | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8 - | lwz TMP0, TAB:RB->hmask - | lwz TMP1, STR:RC->hash - | lwz NODE:TMP2, TAB:RB->node - | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask - | slwi TMP0, TMP1, 5 - | slwi TMP1, TMP1, 3 - | sub TMP1, TMP0, TMP1 - | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8) - |1: - | lwz CARG1, NODE:TMP2->key - | lwz TMP0, 4+offsetof(Node, key)(NODE:TMP2) - | lwz CARG2, NODE:TMP2->val - | lwz TMP1, 4+offsetof(Node, val)(NODE:TMP2) - | checkstr CARG1; bne >4 - | cmpw TMP0, STR:RC; bne >4 - | checknil CARG2; beq >5 // Key found, but nil value? - |3: - | stwux CARG2, RA, BASE - | stw TMP1, 4(RA) - | ins_next - | - |4: // Follow hash chain. - | lwz NODE:TMP2, NODE:TMP2->next - | cmplwi NODE:TMP2, 0 - | bne <1 - | // End of hash chain: key not found, nil result. - | li CARG2, LJ_TNIL - | - |5: // Check for __index if table value is nil. - | lwz TAB:TMP2, TAB:RB->metatable - | cmplwi TAB:TMP2, 0 - | beq <3 // No metatable: done. - | lbz TMP0, TAB:TMP2->nomm - | andix. TMP0, TMP0, 1<vmeta_tgets - break; - case BC_TGETB: - | // RA = dst*8, RB = table*8, RC = index*8 - | lwzux CARG1, RB, BASE - | srwi TMP0, RC, 3 - | lwz TAB:RB, 4(RB) - | checktab CARG1; bne ->vmeta_tgetb - | lwz TMP1, TAB:RB->asize - | lwz TMP2, TAB:RB->array - | cmplw TMP0, TMP1; bge ->vmeta_tgetb - | lwzx TMP1, TMP2, RC - | lfdx f0, TMP2, RC - | checknil TMP1; beq >5 - |1: - | ins_next1 - | stfdx f0, BASE, RA - | ins_next2 - | - |5: // Check for __index if table value is nil. - | lwz TAB:TMP2, TAB:RB->metatable - | cmplwi TAB:TMP2, 0 - | beq <1 // No metatable: done. - | lbz TMP2, TAB:TMP2->nomm - | andix. TMP2, TMP2, 1<vmeta_tgetb // Caveat: preserve TMP0! - break; - case BC_TGETR: - | // RA = dst*8, RB = table*8, RC = key*8 - | add RB, BASE, RB - | lwz TAB:CARG1, 4(RB) - |.if DUALNUM - | add RC, BASE, RC - | lwz TMP0, TAB:CARG1->asize - | lwz CARG2, 4(RC) - | lwz TMP1, TAB:CARG1->array - |.else - | lfdx f0, BASE, RC - | lwz TMP0, TAB:CARG1->asize - | toint CARG2, f0 - | lwz TMP1, TAB:CARG1->array - |.endif - | cmplw TMP0, CARG2 - | slwi TMP2, CARG2, 3 - | ble ->vmeta_tgetr // In array part? - | lfdx f14, TMP1, TMP2 - |->BC_TGETR_Z: - | ins_next1 - | stfdx f14, BASE, RA - | ins_next2 - break; - - case BC_TSETV: - | // RA = src*8, RB = table*8, RC = key*8 - | lwzux CARG1, RB, BASE - | lwzux CARG2, RC, BASE - | lwz TAB:RB, 4(RB) - |.if DUALNUM - | lwz RC, 4(RC) - |.else - | lfd f0, 0(RC) - |.endif - | checktab CARG1 - | checknum cr1, CARG2 - | bne ->vmeta_tsetv - |.if DUALNUM - | lwz TMP0, TAB:RB->asize - | bne cr1, >5 - | lwz TMP1, TAB:RB->array - | cmplw TMP0, RC - | slwi TMP0, RC, 3 - |.else - | bge cr1, >5 - | // Convert number key to integer, check for integerness and range. - | fctiwz f1, f0 - | fadd f2, f0, TOBIT - | stfd f1, TMPD - | lwz TMP0, TAB:RB->asize - | fsub f2, f2, TOBIT - | lwz TMP2, TMPD_LO - | lwz TMP1, TAB:RB->array - | fcmpu cr1, f0, f2 - | cmplw cr0, TMP0, TMP2 - | crand 4*cr0+gt, 4*cr0+gt, 4*cr1+eq - | slwi TMP0, TMP2, 3 - |.endif - | ble ->vmeta_tsetv // Integer key and in array part? - | lwzx TMP2, TMP1, TMP0 - | lbz TMP3, TAB:RB->marked - | lfdx f14, BASE, RA - | checknil TMP2; beq >3 - |1: - | andix. TMP2, TMP3, LJ_GC_BLACK // isblack(table) - | stfdx f14, TMP1, TMP0 - | bne >7 - |2: - | ins_next - | - |3: // Check for __newindex if previous value is nil. - | lwz TAB:TMP2, TAB:RB->metatable - | cmplwi TAB:TMP2, 0 - | beq <1 // No metatable: done. - | lbz TMP2, TAB:TMP2->nomm - | andix. TMP2, TMP2, 1<vmeta_tsetv - | - |5: - | checkstr CARG2; bne ->vmeta_tsetv - |.if not DUALNUM - | lwz STR:RC, 4(RC) - |.endif - | b ->BC_TSETS_Z // String key? - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:RB, TMP3, TMP0 - | b <2 - break; - case BC_TSETS: - | // RA = src*8, RB = table*8, RC = str_const*8 (~) - | lwzux CARG1, RB, BASE - | srwi TMP1, RC, 1 - | lwz TAB:RB, 4(RB) - | subfic TMP1, TMP1, -4 - | checktab CARG1 - | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4 - | bne ->vmeta_tsets1 - |->BC_TSETS_Z: - | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = src*8 - | lwz TMP0, TAB:RB->hmask - | lwz TMP1, STR:RC->hash - | lwz NODE:TMP2, TAB:RB->node - | stb ZERO, TAB:RB->nomm // Clear metamethod cache. - | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask - | lfdx f14, BASE, RA - | slwi TMP0, TMP1, 5 - | slwi TMP1, TMP1, 3 - | sub TMP1, TMP0, TMP1 - | lbz TMP3, TAB:RB->marked - | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8) - |1: - | lwz CARG1, NODE:TMP2->key - | lwz TMP0, 4+offsetof(Node, key)(NODE:TMP2) - | lwz CARG2, NODE:TMP2->val - | lwz NODE:TMP1, NODE:TMP2->next - | checkstr CARG1; bne >5 - | cmpw TMP0, STR:RC; bne >5 - | checknil CARG2; beq >4 // Key found, but nil value? - |2: - | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table) - | stfd f14, NODE:TMP2->val - | bne >7 - |3: - | ins_next - | - |4: // Check for __newindex if previous value is nil. - | lwz TAB:TMP1, TAB:RB->metatable - | cmplwi TAB:TMP1, 0 - | beq <2 // No metatable: done. - | lbz TMP0, TAB:TMP1->nomm - | andix. TMP0, TMP0, 1<vmeta_tsets - | - |5: // Follow hash chain. - | cmplwi NODE:TMP1, 0 - | mr NODE:TMP2, NODE:TMP1 - | bne <1 - | // End of hash chain: key not found, add a new one. - | - | // But check for __newindex first. - | lwz TAB:TMP1, TAB:RB->metatable - | la CARG3, DISPATCH_GL(tmptv)(DISPATCH) - | stw PC, SAVE_PC - | mr CARG1, L - | cmplwi TAB:TMP1, 0 - | stp BASE, L->base - | beq >6 // No metatable: continue. - | lbz TMP0, TAB:TMP1->nomm - | andix. TMP0, TMP0, 1<vmeta_tsets // 'no __newindex' flag NOT set: check. - |6: - | li TMP0, LJ_TSTR - | stw STR:RC, 4(CARG3) - | mr CARG2, TAB:RB - | stw TMP0, 0(CARG3) - | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k) - | // Returns TValue *. - | lp BASE, L->base - | stfd f14, 0(CRET1) - | b <3 // No 2nd write barrier needed. - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:RB, TMP3, TMP0 - | b <3 - break; - case BC_TSETB: - | // RA = src*8, RB = table*8, RC = index*8 - | lwzux CARG1, RB, BASE - | srwi TMP0, RC, 3 - | lwz TAB:RB, 4(RB) - | checktab CARG1; bne ->vmeta_tsetb - | lwz TMP1, TAB:RB->asize - | lwz TMP2, TAB:RB->array - | lbz TMP3, TAB:RB->marked - | cmplw TMP0, TMP1 - | lfdx f14, BASE, RA - | bge ->vmeta_tsetb - | lwzx TMP1, TMP2, RC - | checknil TMP1; beq >5 - |1: - | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table) - | stfdx f14, TMP2, RC - | bne >7 - |2: - | ins_next - | - |5: // Check for __newindex if previous value is nil. - | lwz TAB:TMP1, TAB:RB->metatable - | cmplwi TAB:TMP1, 0 - | beq <1 // No metatable: done. - | lbz TMP1, TAB:TMP1->nomm - | andix. TMP1, TMP1, 1<vmeta_tsetb // Caveat: preserve TMP0! - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:RB, TMP3, TMP0 - | b <2 - break; - case BC_TSETR: - | // RA = dst*8, RB = table*8, RC = key*8 - | add RB, BASE, RB - | lwz TAB:CARG2, 4(RB) - |.if DUALNUM - | add RC, BASE, RC - | lbz TMP3, TAB:CARG2->marked - | lwz TMP0, TAB:CARG2->asize - | lwz CARG3, 4(RC) - | lwz TMP1, TAB:CARG2->array - |.else - | lfdx f0, BASE, RC - | lbz TMP3, TAB:CARG2->marked - | lwz TMP0, TAB:CARG2->asize - | toint CARG3, f0 - | lwz TMP1, TAB:CARG2->array - |.endif - | andix. TMP2, TMP3, LJ_GC_BLACK // isblack(table) - | bne >7 - |2: - | cmplw TMP0, CARG3 - | slwi TMP2, CARG3, 3 - | lfdx f14, BASE, RA - | ble ->vmeta_tsetr // In array part? - | ins_next1 - | stfdx f14, TMP1, TMP2 - | ins_next2 - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:CARG2, TMP3, TMP2 - | b <2 - break; - - - case BC_TSETM: - | // RA = base*8 (table at base-1), RD = num_const*8 (start index) - | add RA, BASE, RA - |1: - | add TMP3, KBASE, RD - | lwz TAB:CARG2, -4(RA) // Guaranteed to be a table. - | addic. TMP0, MULTRES, -8 - | lwz TMP3, 4(TMP3) // Integer constant is in lo-word. - | srwi CARG3, TMP0, 3 - | beq >4 // Nothing to copy? - | add CARG3, CARG3, TMP3 - | lwz TMP2, TAB:CARG2->asize - | slwi TMP1, TMP3, 3 - | lbz TMP3, TAB:CARG2->marked - | cmplw CARG3, TMP2 - | add TMP2, RA, TMP0 - | lwz TMP0, TAB:CARG2->array - | bgt >5 - | add TMP1, TMP1, TMP0 - | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table) - |3: // Copy result slots to table. - | lfd f0, 0(RA) - | addi RA, RA, 8 - | cmpw cr1, RA, TMP2 - | stfd f0, 0(TMP1) - | addi TMP1, TMP1, 8 - | blt cr1, <3 - | bne >7 - |4: - | ins_next - | - |5: // Need to resize array part. - | stp BASE, L->base - | mr CARG1, L - | stw PC, SAVE_PC - | mr SAVE0, RD - | bl extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize) - | // Must not reallocate the stack. - | mr RD, SAVE0 - | b <1 - | - |7: // Possible table write barrier for any value. Skip valiswhite check. - | barrierback TAB:CARG2, TMP3, TMP0 - | b <4 - break; - - /* -- Calls and vararg handling ----------------------------------------- */ - - case BC_CALLM: - | // RA = base*8, (RB = (nresults+1)*8,) RC = extra_nargs*8 - | add NARGS8:RC, NARGS8:RC, MULTRES - | // Fall through. Assumes BC_CALL follows. - break; - case BC_CALL: - | // RA = base*8, (RB = (nresults+1)*8,) RC = (nargs+1)*8 - | mr TMP2, BASE - | lwzux TMP0, BASE, RA - | lwz LFUNC:RB, 4(BASE) - | subi NARGS8:RC, NARGS8:RC, 8 - | addi BASE, BASE, 8 - | checkfunc TMP0; bne ->vmeta_call - | ins_call - break; - - case BC_CALLMT: - | // RA = base*8, (RB = 0,) RC = extra_nargs*8 - | add NARGS8:RC, NARGS8:RC, MULTRES - | // Fall through. Assumes BC_CALLT follows. - break; - case BC_CALLT: - | // RA = base*8, (RB = 0,) RC = (nargs+1)*8 - | lwzux TMP0, RA, BASE - | lwz LFUNC:RB, 4(RA) - | subi NARGS8:RC, NARGS8:RC, 8 - | lwz TMP1, FRAME_PC(BASE) - | checkfunc TMP0 - | addi RA, RA, 8 - | bne ->vmeta_callt - |->BC_CALLT_Z: - | andix. TMP0, TMP1, FRAME_TYPE // Caveat: preserve cr0 until the crand. - | lbz TMP3, LFUNC:RB->ffid - | xori TMP2, TMP1, FRAME_VARG - | cmplwi cr1, NARGS8:RC, 0 - | bne >7 - |1: - | stw LFUNC:RB, FRAME_FUNC(BASE) // Copy function down, but keep PC. - | li TMP2, 0 - | cmplwi cr7, TMP3, 1 // (> FF_C) Calling a fast function? - | beq cr1, >3 - |2: - | addi TMP3, TMP2, 8 - | lfdx f0, RA, TMP2 - | cmplw cr1, TMP3, NARGS8:RC - | stfdx f0, BASE, TMP2 - | mr TMP2, TMP3 - | bne cr1, <2 - |3: - | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+gt - | beq >5 - |4: - | ins_callt - | - |5: // Tailcall to a fast function with a Lua frame below. - | lwz INS, -4(TMP1) - | decode_RA8 RA, INS - | sub TMP1, BASE, RA - | lwz LFUNC:TMP1, FRAME_FUNC-8(TMP1) - | lwz TMP1, LFUNC:TMP1->pc - | lwz KBASE, PC2PROTO(k)(TMP1) // Need to prepare KBASE. - | b <4 - | - |7: // Tailcall from a vararg function. - | andix. TMP0, TMP2, FRAME_TYPEP - | bne <1 // Vararg frame below? - | sub BASE, BASE, TMP2 // Relocate BASE down. - | lwz TMP1, FRAME_PC(BASE) - | andix. TMP0, TMP1, FRAME_TYPE - | b <1 - break; - - case BC_ITERC: - | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 ((2+1)*8)) - | mr TMP2, BASE - | add BASE, BASE, RA - | lwz TMP1, -24(BASE) - | lwz LFUNC:RB, -20(BASE) - | lfd f1, -8(BASE) - | lfd f0, -16(BASE) - | stw TMP1, 0(BASE) // Copy callable. - | stw LFUNC:RB, 4(BASE) - | checkfunc TMP1 - | stfd f1, 16(BASE) // Copy control var. - | li NARGS8:RC, 16 // Iterators get 2 arguments. - | stfdu f0, 8(BASE) // Copy state. - | bne ->vmeta_call - | ins_call - break; - - case BC_ITERN: - | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 (2+1)*8) - |.if JIT - | // NYI: add hotloop, record BC_ITERN. - |.endif - | add RA, BASE, RA - | lwz TAB:RB, -12(RA) - | lwz RC, -4(RA) // Get index from control var. - | lwz TMP0, TAB:RB->asize - | lwz TMP1, TAB:RB->array - | addi PC, PC, 4 - |1: // Traverse array part. - | cmplw RC, TMP0 - | slwi TMP3, RC, 3 - | bge >5 // Index points after array part? - | lwzx TMP2, TMP1, TMP3 - | lfdx f0, TMP1, TMP3 - | checknil TMP2 - | lwz INS, -4(PC) - | beq >4 - |.if DUALNUM - | stw RC, 4(RA) - | stw TISNUM, 0(RA) - |.else - | tonum_u f1, RC - |.endif - | addi RC, RC, 1 - | addis TMP3, PC, -(BCBIAS_J*4 >> 16) - | stfd f0, 8(RA) - | decode_RD4 TMP1, INS - | stw RC, -4(RA) // Update control var. - | add PC, TMP1, TMP3 - |.if not DUALNUM - | stfd f1, 0(RA) - |.endif - |3: - | ins_next - | - |4: // Skip holes in array part. - | addi RC, RC, 1 - | b <1 - | - |5: // Traverse hash part. - | lwz TMP1, TAB:RB->hmask - | sub RC, RC, TMP0 - | lwz TMP2, TAB:RB->node - |6: - | cmplw RC, TMP1 // End of iteration? Branch to ITERL+1. - | slwi TMP3, RC, 5 - | bgty <3 - | slwi RB, RC, 3 - | sub TMP3, TMP3, RB - | lwzx RB, TMP2, TMP3 - | lfdx f0, TMP2, TMP3 - | add NODE:TMP3, TMP2, TMP3 - | checknil RB - | lwz INS, -4(PC) - | beq >7 - | lfd f1, NODE:TMP3->key - | addis TMP2, PC, -(BCBIAS_J*4 >> 16) - | stfd f0, 8(RA) - | add RC, RC, TMP0 - | decode_RD4 TMP1, INS - | stfd f1, 0(RA) - | addi RC, RC, 1 - | add PC, TMP1, TMP2 - | stw RC, -4(RA) // Update control var. - | b <3 - | - |7: // Skip holes in hash part. - | addi RC, RC, 1 - | b <6 - break; - - case BC_ISNEXT: - | // RA = base*8, RD = target (points to ITERN) - | add RA, BASE, RA - | lwz TMP0, -24(RA) - | lwz CFUNC:TMP1, -20(RA) - | lwz TMP2, -16(RA) - | lwz TMP3, -8(RA) - | cmpwi cr0, TMP2, LJ_TTAB - | cmpwi cr1, TMP0, LJ_TFUNC - | cmpwi cr6, TMP3, LJ_TNIL - | bne cr1, >5 - | lbz TMP1, CFUNC:TMP1->ffid - | crand 4*cr0+eq, 4*cr0+eq, 4*cr6+eq - | cmpwi cr7, TMP1, FF_next_N - | srwi TMP0, RD, 1 - | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+eq - | add TMP3, PC, TMP0 - | bne cr0, >5 - | lus TMP1, 0xfffe - | ori TMP1, TMP1, 0x7fff - | stw ZERO, -4(RA) // Initialize control var. - | stw TMP1, -8(RA) - | addis PC, TMP3, -(BCBIAS_J*4 >> 16) - |1: - | ins_next - |5: // Despecialize bytecode if any of the checks fail. - | li TMP0, BC_JMP - | li TMP1, BC_ITERC - | stb TMP0, -1(PC) - | addis PC, TMP3, -(BCBIAS_J*4 >> 16) - | stb TMP1, 3(PC) - | b <1 - break; - - case BC_VARG: - | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8 - | lwz TMP0, FRAME_PC(BASE) - | add RC, BASE, RC - | add RA, BASE, RA - | addi RC, RC, FRAME_VARG - | add TMP2, RA, RB - | subi TMP3, BASE, 8 // TMP3 = vtop - | sub RC, RC, TMP0 // RC = vbase - | // Note: RC may now be even _above_ BASE if nargs was < numparams. - | cmplwi cr1, RB, 0 - |.if PPE - | sub TMP1, TMP3, RC - | cmpwi TMP1, 0 - |.else - | sub. TMP1, TMP3, RC - |.endif - | beq cr1, >5 // Copy all varargs? - | subi TMP2, TMP2, 16 - | ble >2 // No vararg slots? - |1: // Copy vararg slots to destination slots. - | lfd f0, 0(RC) - | addi RC, RC, 8 - | stfd f0, 0(RA) - | cmplw RA, TMP2 - | cmplw cr1, RC, TMP3 - | bge >3 // All destination slots filled? - | addi RA, RA, 8 - | blt cr1, <1 // More vararg slots? - |2: // Fill up remainder with nil. - | stw TISNIL, 0(RA) - | cmplw RA, TMP2 - | addi RA, RA, 8 - | blt <2 - |3: - | ins_next - | - |5: // Copy all varargs. - | lwz TMP0, L->maxstack - | li MULTRES, 8 // MULTRES = (0+1)*8 - | bley <3 // No vararg slots? - | add TMP2, RA, TMP1 - | cmplw TMP2, TMP0 - | addi MULTRES, TMP1, 8 - | bgt >7 - |6: - | lfd f0, 0(RC) - | addi RC, RC, 8 - | stfd f0, 0(RA) - | cmplw RC, TMP3 - | addi RA, RA, 8 - | blt <6 // More vararg slots? - | b <3 - | - |7: // Grow stack for varargs. - | mr CARG1, L - | stp RA, L->top - | sub SAVE0, RC, BASE // Need delta, because BASE may change. - | stp BASE, L->base - | sub RA, RA, BASE - | stw PC, SAVE_PC - | srwi CARG2, TMP1, 3 - | bl extern lj_state_growstack // (lua_State *L, int n) - | lp BASE, L->base - | add RA, BASE, RA - | add RC, BASE, SAVE0 - | subi TMP3, BASE, 8 - | b <6 - break; - - /* -- Returns ----------------------------------------------------------- */ - - case BC_RETM: - | // RA = results*8, RD = extra_nresults*8 - | add RD, RD, MULTRES // MULTRES >= 8, so RD >= 8. - | // Fall through. Assumes BC_RET follows. - break; - - case BC_RET: - | // RA = results*8, RD = (nresults+1)*8 - | lwz PC, FRAME_PC(BASE) - | add RA, BASE, RA - | mr MULTRES, RD - |1: - | andix. TMP0, PC, FRAME_TYPE - | xori TMP1, PC, FRAME_VARG - | bne ->BC_RETV_Z - | - |->BC_RET_Z: - | // BASE = base, RA = resultptr, RD = (nresults+1)*8, PC = return - | lwz INS, -4(PC) - | cmpwi RD, 8 - | subi TMP2, BASE, 8 - | subi RC, RD, 8 - | decode_RB8 RB, INS - | beq >3 - | li TMP1, 0 - |2: - | addi TMP3, TMP1, 8 - | lfdx f0, RA, TMP1 - | cmpw TMP3, RC - | stfdx f0, TMP2, TMP1 - | beq >3 - | addi TMP1, TMP3, 8 - | lfdx f1, RA, TMP3 - | cmpw TMP1, RC - | stfdx f1, TMP2, TMP3 - | bne <2 - |3: - |5: - | cmplw RB, RD - | decode_RA8 RA, INS - | bgt >6 - | sub BASE, TMP2, RA - | lwz LFUNC:TMP1, FRAME_FUNC(BASE) - | ins_next1 - | lwz TMP1, LFUNC:TMP1->pc - | lwz KBASE, PC2PROTO(k)(TMP1) - | ins_next2 - | - |6: // Fill up results with nil. - | subi TMP1, RD, 8 - | addi RD, RD, 8 - | stwx TISNIL, TMP2, TMP1 - | b <5 - | - |->BC_RETV_Z: // Non-standard return case. - | andix. TMP2, TMP1, FRAME_TYPEP - | bne ->vm_return - | // Return from vararg function: relocate BASE down. - | sub BASE, BASE, TMP1 - | lwz PC, FRAME_PC(BASE) - | b <1 - break; - - case BC_RET0: case BC_RET1: - | // RA = results*8, RD = (nresults+1)*8 - | lwz PC, FRAME_PC(BASE) - | add RA, BASE, RA - | mr MULTRES, RD - | andix. TMP0, PC, FRAME_TYPE - | xori TMP1, PC, FRAME_VARG - | bney ->BC_RETV_Z - | - | lwz INS, -4(PC) - | subi TMP2, BASE, 8 - | decode_RB8 RB, INS - if (op == BC_RET1) { - | lfd f0, 0(RA) - | stfd f0, 0(TMP2) - } - |5: - | cmplw RB, RD - | decode_RA8 RA, INS - | bgt >6 - | sub BASE, TMP2, RA - | lwz LFUNC:TMP1, FRAME_FUNC(BASE) - | ins_next1 - | lwz TMP1, LFUNC:TMP1->pc - | lwz KBASE, PC2PROTO(k)(TMP1) - | ins_next2 - | - |6: // Fill up results with nil. - | subi TMP1, RD, 8 - | addi RD, RD, 8 - | stwx TISNIL, TMP2, TMP1 - | b <5 - break; - - /* -- Loops and branches ------------------------------------------------ */ - - case BC_FORL: - |.if JIT - | hotloop - |.endif - | // Fall through. Assumes BC_IFORL follows. - break; - - case BC_JFORI: - case BC_JFORL: -#if !LJ_HASJIT - break; -#endif - case BC_FORI: - case BC_IFORL: - | // RA = base*8, RD = target (after end of loop or start of loop) - vk = (op == BC_IFORL || op == BC_JFORL); - |.if DUALNUM - | // Integer loop. - | lwzux TMP1, RA, BASE - | lwz CARG1, FORL_IDX*8+4(RA) - | cmplw cr0, TMP1, TISNUM - if (vk) { - | lwz CARG3, FORL_STEP*8+4(RA) - | bne >9 - |.if GPR64 - | // Need to check overflow for (a<<32) + (b<<32). - | rldicr TMP0, CARG1, 32, 31 - | rldicr TMP2, CARG3, 32, 31 - | add CARG1, CARG1, CARG3 - | addo. TMP0, TMP0, TMP2 - |.else - | addo. CARG1, CARG1, CARG3 - |.endif - | cmpwi cr6, CARG3, 0 - | lwz CARG2, FORL_STOP*8+4(RA) - | bso >6 - |4: - | stw CARG1, FORL_IDX*8+4(RA) - } else { - | lwz TMP3, FORL_STEP*8(RA) - | lwz CARG3, FORL_STEP*8+4(RA) - | lwz TMP2, FORL_STOP*8(RA) - | lwz CARG2, FORL_STOP*8+4(RA) - | cmplw cr7, TMP3, TISNUM - | cmplw cr1, TMP2, TISNUM - | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+eq - | crand 4*cr0+eq, 4*cr0+eq, 4*cr1+eq - | cmpwi cr6, CARG3, 0 - | bne >9 - } - | blt cr6, >5 - | cmpw CARG1, CARG2 - |1: - | stw TISNUM, FORL_EXT*8(RA) - if (op != BC_JFORL) { - | srwi RD, RD, 1 - } - | stw CARG1, FORL_EXT*8+4(RA) - if (op != BC_JFORL) { - | add RD, PC, RD - } - if (op == BC_FORI) { - | bgt >3 // See FP loop below. - } else if (op == BC_JFORI) { - | addis PC, RD, -(BCBIAS_J*4 >> 16) - | bley >7 - } else if (op == BC_IFORL) { - | bgt >2 - | addis PC, RD, -(BCBIAS_J*4 >> 16) - } else { - | bley =>BC_JLOOP - } - |2: - | ins_next - |5: // Invert check for negative step. - | cmpw CARG2, CARG1 - | b <1 - if (vk) { - |6: // Potential overflow. - | checkov TMP0, <4 // Ignore unrelated overflow. - | b <2 - } - |.endif - if (vk) { - |.if DUALNUM - |9: // FP loop. - | lfd f1, FORL_IDX*8(RA) - |.else - | lfdux f1, RA, BASE - |.endif - | lfd f3, FORL_STEP*8(RA) - | lfd f2, FORL_STOP*8(RA) - | lwz TMP3, FORL_STEP*8(RA) - | fadd f1, f1, f3 - | stfd f1, FORL_IDX*8(RA) - } else { - |.if DUALNUM - |9: // FP loop. - |.else - | lwzux TMP1, RA, BASE - | lwz TMP3, FORL_STEP*8(RA) - | lwz TMP2, FORL_STOP*8(RA) - | cmplw cr0, TMP1, TISNUM - | cmplw cr7, TMP3, TISNUM - | cmplw cr1, TMP2, TISNUM - |.endif - | lfd f1, FORL_IDX*8(RA) - | crand 4*cr0+lt, 4*cr0+lt, 4*cr7+lt - | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt - | lfd f2, FORL_STOP*8(RA) - | bge ->vmeta_for - } - | cmpwi cr6, TMP3, 0 - if (op != BC_JFORL) { - | srwi RD, RD, 1 - } - | stfd f1, FORL_EXT*8(RA) - if (op != BC_JFORL) { - | add RD, PC, RD - } - | fcmpu cr0, f1, f2 - if (op == BC_JFORI) { - | addis PC, RD, -(BCBIAS_J*4 >> 16) - } - | blt cr6, >5 - if (op == BC_FORI) { - | bgt >3 - } else if (op == BC_IFORL) { - |.if DUALNUM - | bgty <2 - |.else - | bgt >2 - |.endif - |1: - | addis PC, RD, -(BCBIAS_J*4 >> 16) - } else if (op == BC_JFORI) { - | bley >7 - } else { - | bley =>BC_JLOOP - } - |.if DUALNUM - | b <2 - |.else - |2: - | ins_next - |.endif - |5: // Negative step. - if (op == BC_FORI) { - | bge <2 - |3: // Used by integer loop, too. - | addis PC, RD, -(BCBIAS_J*4 >> 16) - } else if (op == BC_IFORL) { - | bgey <1 - } else if (op == BC_JFORI) { - | bgey >7 - } else { - | bgey =>BC_JLOOP - } - | b <2 - if (op == BC_JFORI) { - |7: - | lwz INS, -4(PC) - | decode_RD8 RD, INS - | b =>BC_JLOOP - } - break; - - case BC_ITERL: - |.if JIT - | hotloop - |.endif - | // Fall through. Assumes BC_IITERL follows. - break; - - case BC_JITERL: -#if !LJ_HASJIT - break; -#endif - case BC_IITERL: - | // RA = base*8, RD = target - | lwzux TMP1, RA, BASE - | lwz TMP2, 4(RA) - | checknil TMP1; beq >1 // Stop if iterator returned nil. - if (op == BC_JITERL) { - | stw TMP1, -8(RA) - | stw TMP2, -4(RA) - | b =>BC_JLOOP - } else { - | branch_RD // Otherwise save control var + branch. - | stw TMP1, -8(RA) - | stw TMP2, -4(RA) - } - |1: - | ins_next - break; - - case BC_LOOP: - | // RA = base*8, RD = target (loop extent) - | // Note: RA/RD is only used by trace recorder to determine scope/extent - | // This opcode does NOT jump, it's only purpose is to detect a hot loop. - |.if JIT - | hotloop - |.endif - | // Fall through. Assumes BC_ILOOP follows. - break; - - case BC_ILOOP: - | // RA = base*8, RD = target (loop extent) - | ins_next - break; - - case BC_JLOOP: - |.if JIT - | // RA = base*8 (ignored), RD = traceno*8 - | lwz TMP1, DISPATCH_J(trace)(DISPATCH) - | srwi RD, RD, 1 - | // Traces on PPC don't store the trace number, so use 0. - | stw ZERO, DISPATCH_GL(vmstate)(DISPATCH) - | lwzx TRACE:TMP2, TMP1, RD - | clrso TMP1 - | lp TMP2, TRACE:TMP2->mcode - | stw BASE, DISPATCH_GL(jit_base)(DISPATCH) - | mtctr TMP2 - | addi JGL, DISPATCH, GG_DISP2G+32768 - | stw L, DISPATCH_GL(tmpbuf.L)(DISPATCH) - | bctr - |.endif - break; - - case BC_JMP: - | // RA = base*8 (only used by trace recorder), RD = target - | branch_RD - | ins_next - break; - - /* -- Function headers -------------------------------------------------- */ - - case BC_FUNCF: - |.if JIT - | hotcall - |.endif - case BC_FUNCV: /* NYI: compiled vararg functions. */ - | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow. - break; - - case BC_JFUNCF: -#if !LJ_HASJIT - break; -#endif - case BC_IFUNCF: - | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8 - | lwz TMP2, L->maxstack - | lbz TMP1, -4+PC2PROTO(numparams)(PC) - | lwz KBASE, -4+PC2PROTO(k)(PC) - | cmplw RA, TMP2 - | slwi TMP1, TMP1, 3 - | bgt ->vm_growstack_l - if (op != BC_JFUNCF) { - | ins_next1 - } - |2: - | cmplw NARGS8:RC, TMP1 // Check for missing parameters. - | blt >3 - if (op == BC_JFUNCF) { - | decode_RD8 RD, INS - | b =>BC_JLOOP - } else { - | ins_next2 - } - | - |3: // Clear missing parameters. - | stwx TISNIL, BASE, NARGS8:RC - | addi NARGS8:RC, NARGS8:RC, 8 - | b <2 - break; - - case BC_JFUNCV: -#if !LJ_HASJIT - break; -#endif - | NYI // NYI: compiled vararg functions - break; /* NYI: compiled vararg functions. */ - - case BC_IFUNCV: - | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8 - | lwz TMP2, L->maxstack - | add TMP1, BASE, RC - | add TMP0, RA, RC - | stw LFUNC:RB, 4(TMP1) // Store copy of LFUNC. - | addi TMP3, RC, 8+FRAME_VARG - | lwz KBASE, -4+PC2PROTO(k)(PC) - | cmplw TMP0, TMP2 - | stw TMP3, 0(TMP1) // Store delta + FRAME_VARG. - | bge ->vm_growstack_l - | lbz TMP2, -4+PC2PROTO(numparams)(PC) - | mr RA, BASE - | mr RC, TMP1 - | ins_next1 - | cmpwi TMP2, 0 - | addi BASE, TMP1, 8 - | beq >3 - |1: - | cmplw RA, RC // Less args than parameters? - | lwz TMP0, 0(RA) - | lwz TMP3, 4(RA) - | bge >4 - | stw TISNIL, 0(RA) // Clear old fixarg slot (help the GC). - | addi RA, RA, 8 - |2: - | addic. TMP2, TMP2, -1 - | stw TMP0, 8(TMP1) - | stw TMP3, 12(TMP1) - | addi TMP1, TMP1, 8 - | bne <1 - |3: - | ins_next2 - | - |4: // Clear missing parameters. - | li TMP0, LJ_TNIL - | b <2 - break; - - case BC_FUNCC: - case BC_FUNCCW: - | // BASE = new base, RA = BASE+framesize*8, RB = CFUNC, RC = nargs*8 - if (op == BC_FUNCC) { - | lp RD, CFUNC:RB->f - } else { - | lp RD, DISPATCH_GL(wrapf)(DISPATCH) - } - | add TMP1, RA, NARGS8:RC - | lwz TMP2, L->maxstack - | .toc lp TMP3, 0(RD) - | add RC, BASE, NARGS8:RC - | stp BASE, L->base - | cmplw TMP1, TMP2 - | stp RC, L->top - | li_vmstate C - |.if TOC - | mtctr TMP3 - |.else - | mtctr RD - |.endif - if (op == BC_FUNCCW) { - | lp CARG2, CFUNC:RB->f - } - | mr CARG1, L - | bgt ->vm_growstack_c // Need to grow stack. - | .toc lp TOCREG, TOC_OFS(RD) - | .tocenv lp ENVREG, ENV_OFS(RD) - | st_vmstate - | bctrl // (lua_State *L [, lua_CFunction f]) - | // Returns nresults. - | lp BASE, L->base - | .toc ld TOCREG, SAVE_TOC - | slwi RD, CRET1, 3 - | lp TMP1, L->top - | li_vmstate INTERP - | lwz PC, FRAME_PC(BASE) // Fetch PC of caller. - | stw L, DISPATCH_GL(cur_L)(DISPATCH) - | sub RA, TMP1, RD // RA = L->top - nresults*8 - | st_vmstate - | b ->vm_returnc - break; - - /* ---------------------------------------------------------------------- */ - - default: - fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]); - exit(2); - break; - } -} - -static int build_backend(BuildCtx *ctx) -{ - int op; - - dasm_growpc(Dst, BC__MAX); - - build_subroutines(ctx); - - |.code_op - for (op = 0; op < BC__MAX; op++) - build_ins(ctx, (BCOp)op, op); - - return BC__MAX; -} - -/* Emit pseudo frame-info for all assembler functions. */ -static void emit_asm_debug(BuildCtx *ctx) -{ - int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code); - int i; - switch (ctx->mode) { - case BUILD_elfasm: - fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n"); - fprintf(ctx->fp, - ".Lframe0:\n" - "\t.long .LECIE0-.LSCIE0\n" - ".LSCIE0:\n" - "\t.long 0xffffffff\n" - "\t.byte 0x1\n" - "\t.string \"\"\n" - "\t.uleb128 0x1\n" - "\t.sleb128 -4\n" - "\t.byte 65\n" - "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n" - "\t.align 2\n" - ".LECIE0:\n\n"); - fprintf(ctx->fp, - ".LSFDE0:\n" - "\t.long .LEFDE0-.LASFDE0\n" - ".LASFDE0:\n" - "\t.long .Lframe0\n" - "\t.long .Lbegin\n" - "\t.long %d\n" - "\t.byte 0xe\n\t.uleb128 %d\n" - "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n" - "\t.byte 0x5\n\t.uleb128 70\n\t.uleb128 55\n", - fcofs, CFRAME_SIZE); - for (i = 14; i <= 31; i++) - fprintf(ctx->fp, - "\t.byte %d\n\t.uleb128 %d\n" - "\t.byte %d\n\t.uleb128 %d\n", - 0x80+i, 37+(31-i), 0x80+32+i, 2+2*(31-i)); - fprintf(ctx->fp, - "\t.align 2\n" - ".LEFDE0:\n\n"); -#if LJ_HASFFI - fprintf(ctx->fp, - ".LSFDE1:\n" - "\t.long .LEFDE1-.LASFDE1\n" - ".LASFDE1:\n" - "\t.long .Lframe0\n" -#if LJ_TARGET_PS3 - "\t.long .lj_vm_ffi_call\n" -#else - "\t.long lj_vm_ffi_call\n" -#endif - "\t.long %d\n" - "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n" - "\t.byte 0x8e\n\t.uleb128 2\n" - "\t.byte 0xd\n\t.uleb128 0xe\n" - "\t.align 2\n" - ".LEFDE1:\n\n", (int)ctx->codesz - fcofs); -#endif -#if !LJ_NO_UNWIND - fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n"); - fprintf(ctx->fp, - ".Lframe1:\n" - "\t.long .LECIE1-.LSCIE1\n" - ".LSCIE1:\n" - "\t.long 0\n" - "\t.byte 0x1\n" - "\t.string \"zPR\"\n" - "\t.uleb128 0x1\n" - "\t.sleb128 -4\n" - "\t.byte 65\n" - "\t.uleb128 6\n" /* augmentation length */ - "\t.byte 0x1b\n" /* pcrel|sdata4 */ - "\t.long lj_err_unwind_dwarf-.\n" - "\t.byte 0x1b\n" /* pcrel|sdata4 */ - "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n" - "\t.align 2\n" - ".LECIE1:\n\n"); - fprintf(ctx->fp, - ".LSFDE2:\n" - "\t.long .LEFDE2-.LASFDE2\n" - ".LASFDE2:\n" - "\t.long .LASFDE2-.Lframe1\n" - "\t.long .Lbegin-.\n" - "\t.long %d\n" - "\t.uleb128 0\n" /* augmentation length */ - "\t.byte 0xe\n\t.uleb128 %d\n" - "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n" - "\t.byte 0x5\n\t.uleb128 70\n\t.uleb128 55\n", - fcofs, CFRAME_SIZE); - for (i = 14; i <= 31; i++) - fprintf(ctx->fp, - "\t.byte %d\n\t.uleb128 %d\n" - "\t.byte %d\n\t.uleb128 %d\n", - 0x80+i, 37+(31-i), 0x80+32+i, 2+2*(31-i)); - fprintf(ctx->fp, - "\t.align 2\n" - ".LEFDE2:\n\n"); -#if LJ_HASFFI - fprintf(ctx->fp, - ".Lframe2:\n" - "\t.long .LECIE2-.LSCIE2\n" - ".LSCIE2:\n" - "\t.long 0\n" - "\t.byte 0x1\n" - "\t.string \"zR\"\n" - "\t.uleb128 0x1\n" - "\t.sleb128 -4\n" - "\t.byte 65\n" - "\t.uleb128 1\n" /* augmentation length */ - "\t.byte 0x1b\n" /* pcrel|sdata4 */ - "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n" - "\t.align 2\n" - ".LECIE2:\n\n"); - fprintf(ctx->fp, - ".LSFDE3:\n" - "\t.long .LEFDE3-.LASFDE3\n" - ".LASFDE3:\n" - "\t.long .LASFDE3-.Lframe2\n" - "\t.long lj_vm_ffi_call-.\n" - "\t.long %d\n" - "\t.uleb128 0\n" /* augmentation length */ - "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n" - "\t.byte 0x8e\n\t.uleb128 2\n" - "\t.byte 0xd\n\t.uleb128 0xe\n" - "\t.align 2\n" - ".LEFDE3:\n\n", (int)ctx->codesz - fcofs); -#endif -#endif - break; - default: - break; - } -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_x64.dasc b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_x64.dasc deleted file mode 100644 index e7e990ae27f..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_x64.dasc +++ /dev/null @@ -1,4902 +0,0 @@ -|// Low-level VM code for x64 CPUs in LJ_GC64 mode. -|// Bytecode interpreter, fast functions and helper functions. -|// Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -| -|.arch x64 -|.section code_op, code_sub -| -|.actionlist build_actionlist -|.globals GLOB_ -|.globalnames globnames -|.externnames extnames -| -|//----------------------------------------------------------------------- -| -|.if WIN -|.define X64WIN, 1 // Windows/x64 calling conventions. -|.endif -| -|// Fixed register assignments for the interpreter. -|// This is very fragile and has many dependencies. Caveat emptor. -|.define BASE, rdx // Not C callee-save, refetched anyway. -|.if X64WIN -|.define KBASE, rdi // Must be C callee-save. -|.define PC, rsi // Must be C callee-save. -|.define DISPATCH, rbx // Must be C callee-save. -|.define KBASEd, edi -|.define PCd, esi -|.define DISPATCHd, ebx -|.else -|.define KBASE, r15 // Must be C callee-save. -|.define PC, rbx // Must be C callee-save. -|.define DISPATCH, r14 // Must be C callee-save. -|.define KBASEd, r15d -|.define PCd, ebx -|.define DISPATCHd, r14d -|.endif -| -|.define RA, rcx -|.define RAd, ecx -|.define RAH, ch -|.define RAL, cl -|.define RB, rbp // Must be rbp (C callee-save). -|.define RBd, ebp -|.define RC, rax // Must be rax. -|.define RCd, eax -|.define RCW, ax -|.define RCH, ah -|.define RCL, al -|.define OP, RBd -|.define RD, RC -|.define RDd, RCd -|.define RDW, RCW -|.define RDL, RCL -|.define TMPR, r10 -|.define TMPRd, r10d -|.define ITYPE, r11 -|.define ITYPEd, r11d -| -|.if X64WIN -|.define CARG1, rcx // x64/WIN64 C call arguments. -|.define CARG2, rdx -|.define CARG3, r8 -|.define CARG4, r9 -|.define CARG1d, ecx -|.define CARG2d, edx -|.define CARG3d, r8d -|.define CARG4d, r9d -|.else -|.define CARG1, rdi // x64/POSIX C call arguments. -|.define CARG2, rsi -|.define CARG3, rdx -|.define CARG4, rcx -|.define CARG5, r8 -|.define CARG6, r9 -|.define CARG1d, edi -|.define CARG2d, esi -|.define CARG3d, edx -|.define CARG4d, ecx -|.define CARG5d, r8d -|.define CARG6d, r9d -|.endif -| -|// Type definitions. Some of these are only used for documentation. -|.type L, lua_State -|.type GL, global_State -|.type TVALUE, TValue -|.type GCOBJ, GCobj -|.type STR, GCstr -|.type TAB, GCtab -|.type LFUNC, GCfuncL -|.type CFUNC, GCfuncC -|.type PROTO, GCproto -|.type UPVAL, GCupval -|.type NODE, Node -|.type NARGS, int -|.type TRACE, GCtrace -|.type SBUF, SBuf -| -|// Stack layout while in interpreter. Must match with lj_frame.h. -|//----------------------------------------------------------------------- -|.if X64WIN // x64/Windows stack layout -| -|.define CFRAME_SPACE, aword*5 // Delta for rsp (see <--). -|.macro saveregs_ -| push rdi; push rsi; push rbx -| sub rsp, CFRAME_SPACE -|.endmacro -|.macro saveregs -| push rbp; saveregs_ -|.endmacro -|.macro restoreregs -| add rsp, CFRAME_SPACE -| pop rbx; pop rsi; pop rdi; pop rbp -|.endmacro -| -|.define SAVE_CFRAME, aword [rsp+aword*13] -|.define SAVE_PC, aword [rsp+aword*12] -|.define SAVE_L, aword [rsp+aword*11] -|.define SAVE_ERRF, dword [rsp+dword*21] -|.define SAVE_NRES, dword [rsp+dword*20] -|//----- 16 byte aligned, ^^^ 32 byte register save area, owned by interpreter -|.define SAVE_RET, aword [rsp+aword*9] //<-- rsp entering interpreter. -|.define SAVE_R4, aword [rsp+aword*8] -|.define SAVE_R3, aword [rsp+aword*7] -|.define SAVE_R2, aword [rsp+aword*6] -|.define SAVE_R1, aword [rsp+aword*5] //<-- rsp after register saves. -|.define ARG5, aword [rsp+aword*4] -|.define CSAVE_4, aword [rsp+aword*3] -|.define CSAVE_3, aword [rsp+aword*2] -|.define CSAVE_2, aword [rsp+aword*1] -|.define CSAVE_1, aword [rsp] //<-- rsp while in interpreter. -|//----- 16 byte aligned, ^^^ 32 byte register save area, owned by callee -| -|.define ARG5d, dword [rsp+dword*8] -|.define TMP1, ARG5 // TMP1 overlaps ARG5 -|.define TMP1d, ARG5d -|.define TMP1hi, dword [rsp+dword*9] -|.define MULTRES, TMP1d // MULTRES overlaps TMP1d. -| -|//----------------------------------------------------------------------- -|.else // x64/POSIX stack layout -| -|.define CFRAME_SPACE, aword*5 // Delta for rsp (see <--). -|.macro saveregs_ -| push rbx; push r15; push r14 -|.if NO_UNWIND -| push r13; push r12 -|.endif -| sub rsp, CFRAME_SPACE -|.endmacro -|.macro saveregs -| push rbp; saveregs_ -|.endmacro -|.macro restoreregs -| add rsp, CFRAME_SPACE -|.if NO_UNWIND -| pop r12; pop r13 -|.endif -| pop r14; pop r15; pop rbx; pop rbp -|.endmacro -| -|//----- 16 byte aligned, -|.if NO_UNWIND -|.define SAVE_RET, aword [rsp+aword*11] //<-- rsp entering interpreter. -|.define SAVE_R4, aword [rsp+aword*10] -|.define SAVE_R3, aword [rsp+aword*9] -|.define SAVE_R2, aword [rsp+aword*8] -|.define SAVE_R1, aword [rsp+aword*7] -|.define SAVE_RU2, aword [rsp+aword*6] -|.define SAVE_RU1, aword [rsp+aword*5] //<-- rsp after register saves. -|.else -|.define SAVE_RET, aword [rsp+aword*9] //<-- rsp entering interpreter. -|.define SAVE_R4, aword [rsp+aword*8] -|.define SAVE_R3, aword [rsp+aword*7] -|.define SAVE_R2, aword [rsp+aword*6] -|.define SAVE_R1, aword [rsp+aword*5] //<-- rsp after register saves. -|.endif -|.define SAVE_CFRAME, aword [rsp+aword*4] -|.define SAVE_PC, aword [rsp+aword*3] -|.define SAVE_L, aword [rsp+aword*2] -|.define SAVE_ERRF, dword [rsp+dword*3] -|.define SAVE_NRES, dword [rsp+dword*2] -|.define TMP1, aword [rsp] //<-- rsp while in interpreter. -|//----- 16 byte aligned -| -|.define TMP1d, dword [rsp] -|.define TMP1hi, dword [rsp+dword*1] -|.define MULTRES, TMP1d // MULTRES overlaps TMP1d. -| -|.endif -| -|//----------------------------------------------------------------------- -| -|// Instruction headers. -|.macro ins_A; .endmacro -|.macro ins_AD; .endmacro -|.macro ins_AJ; .endmacro -|.macro ins_ABC; movzx RBd, RCH; movzx RCd, RCL; .endmacro -|.macro ins_AB_; movzx RBd, RCH; .endmacro -|.macro ins_A_C; movzx RCd, RCL; .endmacro -|.macro ins_AND; not RD; .endmacro -| -|// Instruction decode+dispatch. Carefully tuned (nope, lodsd is not faster). -|.macro ins_NEXT -| mov RCd, [PC] -| movzx RAd, RCH -| movzx OP, RCL -| add PC, 4 -| shr RCd, 16 -| jmp aword [DISPATCH+OP*8] -|.endmacro -| -|// Instruction footer. -|.if 1 -| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use. -| .define ins_next, ins_NEXT -| .define ins_next_, ins_NEXT -|.else -| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch. -| // Affects only certain kinds of benchmarks (and only with -j off). -| // Around 10%-30% slower on Core2, a lot more slower on P4. -| .macro ins_next -| jmp ->ins_next -| .endmacro -| .macro ins_next_ -| ->ins_next: -| ins_NEXT -| .endmacro -|.endif -| -|// Call decode and dispatch. -|.macro ins_callt -| // BASE = new base, RB = LFUNC, RD = nargs+1, [BASE-8] = PC -| mov PC, LFUNC:RB->pc -| mov RAd, [PC] -| movzx OP, RAL -| movzx RAd, RAH -| add PC, 4 -| jmp aword [DISPATCH+OP*8] -|.endmacro -| -|.macro ins_call -| // BASE = new base, RB = LFUNC, RD = nargs+1 -| mov [BASE-8], PC -| ins_callt -|.endmacro -| -|//----------------------------------------------------------------------- -| -|// Macros to clear or set tags. -|.macro cleartp, reg; shl reg, 17; shr reg, 17; .endmacro -|.macro settp, reg, tp -| mov64 ITYPE, ((int64_t)tp<<47) -| or reg, ITYPE -|.endmacro -|.macro settp, dst, reg, tp -| mov64 dst, ((int64_t)tp<<47) -| or dst, reg -|.endmacro -|.macro setint, reg -| settp reg, LJ_TISNUM -|.endmacro -|.macro setint, dst, reg -| settp dst, reg, LJ_TISNUM -|.endmacro -| -|// Macros to test operand types. -|.macro checktp_nc, reg, tp, target -| mov ITYPE, reg -| sar ITYPE, 47 -| cmp ITYPEd, tp -| jne target -|.endmacro -|.macro checktp, reg, tp, target -| mov ITYPE, reg -| cleartp reg -| sar ITYPE, 47 -| cmp ITYPEd, tp -| jne target -|.endmacro -|.macro checktptp, src, tp, target -| mov ITYPE, src -| sar ITYPE, 47 -| cmp ITYPEd, tp -| jne target -|.endmacro -|.macro checkstr, reg, target; checktp reg, LJ_TSTR, target; .endmacro -|.macro checktab, reg, target; checktp reg, LJ_TTAB, target; .endmacro -|.macro checkfunc, reg, target; checktp reg, LJ_TFUNC, target; .endmacro -| -|.macro checknumx, reg, target, jump -| mov ITYPE, reg -| sar ITYPE, 47 -| cmp ITYPEd, LJ_TISNUM -| jump target -|.endmacro -|.macro checkint, reg, target; checknumx reg, target, jne; .endmacro -|.macro checkinttp, src, target; checknumx src, target, jne; .endmacro -|.macro checknum, reg, target; checknumx reg, target, jae; .endmacro -|.macro checknumtp, src, target; checknumx src, target, jae; .endmacro -|.macro checknumber, src, target; checknumx src, target, ja; .endmacro -| -|.macro mov_false, reg; mov64 reg, (int64_t)~((uint64_t)1<<47); .endmacro -|.macro mov_true, reg; mov64 reg, (int64_t)~((uint64_t)2<<47); .endmacro -| -|// These operands must be used with movzx. -|.define PC_OP, byte [PC-4] -|.define PC_RA, byte [PC-3] -|.define PC_RB, byte [PC-1] -|.define PC_RC, byte [PC-2] -|.define PC_RD, word [PC-2] -| -|.macro branchPC, reg -| lea PC, [PC+reg*4-BCBIAS_J*4] -|.endmacro -| -|// Assumes DISPATCH is relative to GL. -#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field)) -#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field)) -| -#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto)) -| -|// Decrement hashed hotcount and trigger trace recorder if zero. -|.macro hotloop, reg -| mov reg, PCd -| shr reg, 1 -| and reg, HOTCOUNT_PCMASK -| sub word [DISPATCH+reg+GG_DISP2HOT], HOTCOUNT_LOOP -| jb ->vm_hotloop -|.endmacro -| -|.macro hotcall, reg -| mov reg, PCd -| shr reg, 1 -| and reg, HOTCOUNT_PCMASK -| sub word [DISPATCH+reg+GG_DISP2HOT], HOTCOUNT_CALL -| jb ->vm_hotcall -|.endmacro -| -|// Set current VM state. -|.macro set_vmstate, st -| mov dword [DISPATCH+DISPATCH_GL(vmstate)], ~LJ_VMST_..st -|.endmacro -| -|.macro fpop1; fstp st1; .endmacro -| -|// Synthesize SSE FP constants. -|.macro sseconst_abs, reg, tmp // Synthesize abs mask. -| mov64 tmp, U64x(7fffffff,ffffffff); movd reg, tmp -|.endmacro -| -|.macro sseconst_hi, reg, tmp, val // Synthesize hi-32 bit const. -| mov64 tmp, U64x(val,00000000); movd reg, tmp -|.endmacro -| -|.macro sseconst_sign, reg, tmp // Synthesize sign mask. -| sseconst_hi reg, tmp, 80000000 -|.endmacro -|.macro sseconst_1, reg, tmp // Synthesize 1.0. -| sseconst_hi reg, tmp, 3ff00000 -|.endmacro -|.macro sseconst_m1, reg, tmp // Synthesize -1.0. -| sseconst_hi reg, tmp, bff00000 -|.endmacro -|.macro sseconst_2p52, reg, tmp // Synthesize 2^52. -| sseconst_hi reg, tmp, 43300000 -|.endmacro -|.macro sseconst_tobit, reg, tmp // Synthesize 2^52 + 2^51. -| sseconst_hi reg, tmp, 43380000 -|.endmacro -| -|// Move table write barrier back. Overwrites reg. -|.macro barrierback, tab, reg -| and byte tab->marked, (uint8_t)~LJ_GC_BLACK // black2gray(tab) -| mov reg, [DISPATCH+DISPATCH_GL(gc.grayagain)] -| mov [DISPATCH+DISPATCH_GL(gc.grayagain)], tab -| mov tab->gclist, reg -|.endmacro -| -|//----------------------------------------------------------------------- - -/* Generate subroutines used by opcodes and other parts of the VM. */ -/* The .code_sub section should be last to help static branch prediction. */ -static void build_subroutines(BuildCtx *ctx) -{ - |.code_sub - | - |//----------------------------------------------------------------------- - |//-- Return handling ---------------------------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_returnp: - | test PCd, FRAME_P - | jz ->cont_dispatch - | - | // Return from pcall or xpcall fast func. - | and PC, -8 - | sub BASE, PC // Restore caller base. - | lea RA, [RA+PC-8] // Rebase RA and prepend one result. - | mov PC, [BASE-8] // Fetch PC of previous frame. - | // Prepending may overwrite the pcall frame, so do it at the end. - | mov_true ITYPE - | mov aword [BASE+RA], ITYPE // Prepend true to results. - | - |->vm_returnc: - | add RDd, 1 // RD = nresults+1 - | jz ->vm_unwind_yield - | mov MULTRES, RDd - | test PC, FRAME_TYPE - | jz ->BC_RET_Z // Handle regular return to Lua. - | - |->vm_return: - | // BASE = base, RA = resultofs, RD = nresults+1 (= MULTRES), PC = return - | xor PC, FRAME_C - | test PCd, FRAME_TYPE - | jnz ->vm_returnp - | - | // Return to C. - | set_vmstate C - | and PC, -8 - | sub PC, BASE - | neg PC // Previous base = BASE - delta. - | - | sub RDd, 1 - | jz >2 - |1: // Move results down. - | mov RB, [BASE+RA] - | mov [BASE-16], RB - | add BASE, 8 - | sub RDd, 1 - | jnz <1 - |2: - | mov L:RB, SAVE_L - | mov L:RB->base, PC - |3: - | mov RDd, MULTRES - | mov RAd, SAVE_NRES // RA = wanted nresults+1 - |4: - | cmp RAd, RDd - | jne >6 // More/less results wanted? - |5: - | sub BASE, 16 - | mov L:RB->top, BASE - | - |->vm_leave_cp: - | mov RA, SAVE_CFRAME // Restore previous C frame. - | mov L:RB->cframe, RA - | xor eax, eax // Ok return status for vm_pcall. - | - |->vm_leave_unw: - | restoreregs - | ret - | - |6: - | jb >7 // Less results wanted? - | // More results wanted. Check stack size and fill up results with nil. - | cmp BASE, L:RB->maxstack - | ja >8 - | mov aword [BASE-16], LJ_TNIL - | add BASE, 8 - | add RDd, 1 - | jmp <4 - | - |7: // Less results wanted. - | test RAd, RAd - | jz <5 // But check for LUA_MULTRET+1. - | sub RA, RD // Negative result! - | lea BASE, [BASE+RA*8] // Correct top. - | jmp <5 - | - |8: // Corner case: need to grow stack for filling up results. - | // This can happen if: - | // - A C function grows the stack (a lot). - | // - The GC shrinks the stack in between. - | // - A return back from a lua_call() with (high) nresults adjustment. - | mov L:RB->top, BASE // Save current top held in BASE (yes). - | mov MULTRES, RDd // Need to fill only remainder with nil. - | mov CARG2d, RAd - | mov CARG1, L:RB - | call extern lj_state_growstack // (lua_State *L, int n) - | mov BASE, L:RB->top // Need the (realloced) L->top in BASE. - | jmp <3 - | - |->vm_unwind_yield: - | mov al, LUA_YIELD - | jmp ->vm_unwind_c_eh - | - |->vm_unwind_c: // Unwind C stack, return from vm_pcall. - | // (void *cframe, int errcode) - | mov eax, CARG2d // Error return status for vm_pcall. - | mov rsp, CARG1 - |->vm_unwind_c_eh: // Landing pad for external unwinder. - | mov L:RB, SAVE_L - | mov GL:RB, L:RB->glref - | mov dword GL:RB->vmstate, ~LJ_VMST_C - | jmp ->vm_leave_unw - | - |->vm_unwind_rethrow: - |.if not X64WIN - | mov CARG1, SAVE_L - | mov CARG2d, eax - | restoreregs - | jmp extern lj_err_throw // (lua_State *L, int errcode) - |.endif - | - |->vm_unwind_ff: // Unwind C stack, return from ff pcall. - | // (void *cframe) - | and CARG1, CFRAME_RAWMASK - | mov rsp, CARG1 - |->vm_unwind_ff_eh: // Landing pad for external unwinder. - | mov L:RB, SAVE_L - | mov RDd, 1+1 // Really 1+2 results, incr. later. - | mov BASE, L:RB->base - | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table. - | add DISPATCH, GG_G2DISP - | mov PC, [BASE-8] // Fetch PC of previous frame. - | mov_false RA - | mov RB, [BASE] - | mov [BASE-16], RA // Prepend false to error message. - | mov [BASE-8], RB - | mov RA, -16 // Results start at BASE+RA = BASE-16. - | set_vmstate INTERP - | jmp ->vm_returnc // Increments RD/MULTRES and returns. - | - |//----------------------------------------------------------------------- - |//-- Grow stack for calls ----------------------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_growstack_c: // Grow stack for C function. - | mov CARG2d, LUA_MINSTACK - | jmp >2 - | - |->vm_growstack_v: // Grow stack for vararg Lua function. - | sub RD, 8 - | jmp >1 - | - |->vm_growstack_f: // Grow stack for fixarg Lua function. - | // BASE = new base, RD = nargs+1, RB = L, PC = first PC - | lea RD, [BASE+NARGS:RD*8-8] - |1: - | movzx RAd, byte [PC-4+PC2PROTO(framesize)] - | add PC, 4 // Must point after first instruction. - | mov L:RB->base, BASE - | mov L:RB->top, RD - | mov SAVE_PC, PC - | mov CARG2, RA - |2: - | // RB = L, L->base = new base, L->top = top - | mov CARG1, L:RB - | call extern lj_state_growstack // (lua_State *L, int n) - | mov BASE, L:RB->base - | mov RD, L:RB->top - | mov LFUNC:RB, [BASE-16] - | cleartp LFUNC:RB - | sub RD, BASE - | shr RDd, 3 - | add NARGS:RDd, 1 - | // BASE = new base, RB = LFUNC, RD = nargs+1 - | ins_callt // Just retry the call. - | - |//----------------------------------------------------------------------- - |//-- Entry points into the assembler VM --------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_resume: // Setup C frame and resume thread. - | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0) - | saveregs - | mov L:RB, CARG1 // Caveat: CARG1 may be RA. - | mov SAVE_L, CARG1 - | mov RA, CARG2 - | mov PCd, FRAME_CP - | xor RDd, RDd - | lea KBASE, [esp+CFRAME_RESUME] - | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table. - | add DISPATCH, GG_G2DISP - | mov SAVE_PC, RD // Any value outside of bytecode is ok. - | mov SAVE_CFRAME, RD - | mov SAVE_NRES, RDd - | mov SAVE_ERRF, RDd - | mov L:RB->cframe, KBASE - | cmp byte L:RB->status, RDL - | je >2 // Initial resume (like a call). - | - | // Resume after yield (like a return). - | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB - | set_vmstate INTERP - | mov byte L:RB->status, RDL - | mov BASE, L:RB->base - | mov RD, L:RB->top - | sub RD, RA - | shr RDd, 3 - | add RDd, 1 // RD = nresults+1 - | sub RA, BASE // RA = resultofs - | mov PC, [BASE-8] - | mov MULTRES, RDd - | test PCd, FRAME_TYPE - | jz ->BC_RET_Z - | jmp ->vm_return - | - |->vm_pcall: // Setup protected C frame and enter VM. - | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef) - | saveregs - | mov PCd, FRAME_CP - | mov SAVE_ERRF, CARG4d - | jmp >1 - | - |->vm_call: // Setup C frame and enter VM. - | // (lua_State *L, TValue *base, int nres1) - | saveregs - | mov PCd, FRAME_C - | - |1: // Entry point for vm_pcall above (PC = ftype). - | mov SAVE_NRES, CARG3d - | mov L:RB, CARG1 // Caveat: CARG1 may be RA. - | mov SAVE_L, CARG1 - | mov RA, CARG2 - | - | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table. - | mov KBASE, L:RB->cframe // Add our C frame to cframe chain. - | mov SAVE_CFRAME, KBASE - | mov SAVE_PC, L:RB // Any value outside of bytecode is ok. - | add DISPATCH, GG_G2DISP - | mov L:RB->cframe, rsp - | - |2: // Entry point for vm_resume/vm_cpcall (RA = base, RB = L, PC = ftype). - | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB - | set_vmstate INTERP - | mov BASE, L:RB->base // BASE = old base (used in vmeta_call). - | add PC, RA - | sub PC, BASE // PC = frame delta + frame type - | - | mov RD, L:RB->top - | sub RD, RA - | shr NARGS:RDd, 3 - | add NARGS:RDd, 1 // RD = nargs+1 - | - |->vm_call_dispatch: - | mov LFUNC:RB, [RA-16] - | checkfunc LFUNC:RB, ->vmeta_call // Ensure KBASE defined and != BASE. - | - |->vm_call_dispatch_f: - | mov BASE, RA - | ins_call - | // BASE = new base, RB = func, RD = nargs+1, PC = caller PC - | - |->vm_cpcall: // Setup protected C frame, call C. - | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp) - | saveregs - | mov L:RB, CARG1 // Caveat: CARG1 may be RA. - | mov SAVE_L, CARG1 - | mov SAVE_PC, L:RB // Any value outside of bytecode is ok. - | - | mov KBASE, L:RB->stack // Compute -savestack(L, L->top). - | sub KBASE, L:RB->top - | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table. - | mov SAVE_ERRF, 0 // No error function. - | mov SAVE_NRES, KBASEd // Neg. delta means cframe w/o frame. - | add DISPATCH, GG_G2DISP - | // Handler may change cframe_nres(L->cframe) or cframe_errfunc(L->cframe). - | - | mov KBASE, L:RB->cframe // Add our C frame to cframe chain. - | mov SAVE_CFRAME, KBASE - | mov L:RB->cframe, rsp - | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB - | - | call CARG4 // (lua_State *L, lua_CFunction func, void *ud) - | // TValue * (new base) or NULL returned in eax (RC). - | test RC, RC - | jz ->vm_leave_cp // No base? Just remove C frame. - | mov RA, RC - | mov PCd, FRAME_CP - | jmp <2 // Else continue with the call. - | - |//----------------------------------------------------------------------- - |//-- Metamethod handling ------------------------------------------------ - |//----------------------------------------------------------------------- - | - |//-- Continuation dispatch ---------------------------------------------- - | - |->cont_dispatch: - | // BASE = meta base, RA = resultofs, RD = nresults+1 (also in MULTRES) - | add RA, BASE - | and PC, -8 - | mov RB, BASE - | sub BASE, PC // Restore caller BASE. - | mov aword [RA+RD*8-8], LJ_TNIL // Ensure one valid arg. - | mov RC, RA // ... in [RC] - | mov PC, [RB-24] // Restore PC from [cont|PC]. - | mov RA, qword [RB-32] // May be negative on WIN64 with debug. - |.if FFI - | cmp RA, 1 - | jbe >1 - |.endif - | mov LFUNC:KBASE, [BASE-16] - | cleartp LFUNC:KBASE - | mov KBASE, LFUNC:KBASE->pc - | mov KBASE, [KBASE+PC2PROTO(k)] - | // BASE = base, RC = result, RB = meta base - | jmp RA // Jump to continuation. - | - |.if FFI - |1: - | je ->cont_ffi_callback // cont = 1: return from FFI callback. - | // cont = 0: Tail call from C function. - | sub RB, BASE - | shr RBd, 3 - | lea RDd, [RBd-3] - | jmp ->vm_call_tail - |.endif - | - |->cont_cat: // BASE = base, RC = result, RB = mbase - | movzx RAd, PC_RB - | sub RB, 32 - | lea RA, [BASE+RA*8] - | sub RA, RB - | je ->cont_ra - | neg RA - | shr RAd, 3 - |.if X64WIN - | mov CARG3d, RAd - | mov L:CARG1, SAVE_L - | mov L:CARG1->base, BASE - | mov RC, [RC] - | mov [RB], RC - | mov CARG2, RB - |.else - | mov L:CARG1, SAVE_L - | mov L:CARG1->base, BASE - | mov CARG3d, RAd - | mov RA, [RC] - | mov [RB], RA - | mov CARG2, RB - |.endif - | jmp ->BC_CAT_Z - | - |//-- Table indexing metamethods ----------------------------------------- - | - |->vmeta_tgets: - | settp STR:RC, LJ_TSTR // STR:RC = GCstr * - | mov TMP1, STR:RC - | lea RC, TMP1 - | cmp PC_OP, BC_GGET - | jne >1 - | settp TAB:RA, TAB:RB, LJ_TTAB // TAB:RB = GCtab * - | lea RB, [DISPATCH+DISPATCH_GL(tmptv)] // Store fn->l.env in g->tmptv. - | mov [RB], TAB:RA - | jmp >2 - | - |->vmeta_tgetb: - | movzx RCd, PC_RC - |.if DUALNUM - | setint RC - | mov TMP1, RC - |.else - | cvtsi2sd xmm0, RCd - | movsd TMP1, xmm0 - |.endif - | lea RC, TMP1 - | jmp >1 - | - |->vmeta_tgetv: - | movzx RCd, PC_RC // Reload TValue *k from RC. - | lea RC, [BASE+RC*8] - |1: - | movzx RBd, PC_RB // Reload TValue *t from RB. - | lea RB, [BASE+RB*8] - |2: - | mov L:CARG1, SAVE_L - | mov L:CARG1->base, BASE // Caveat: CARG2/CARG3 may be BASE. - | mov CARG2, RB - | mov CARG3, RC - | mov L:RB, L:CARG1 - | mov SAVE_PC, PC - | call extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k) - | // TValue * (finished) or NULL (metamethod) returned in eax (RC). - | mov BASE, L:RB->base - | test RC, RC - | jz >3 - |->cont_ra: // BASE = base, RC = result - | movzx RAd, PC_RA - | mov RB, [RC] - | mov [BASE+RA*8], RB - | ins_next - | - |3: // Call __index metamethod. - | // BASE = base, L->top = new base, stack = cont/func/t/k - | mov RA, L:RB->top - | mov [RA-24], PC // [cont|PC] - | lea PC, [RA+FRAME_CONT] - | sub PC, BASE - | mov LFUNC:RB, [RA-16] // Guaranteed to be a function here. - | mov NARGS:RDd, 2+1 // 2 args for func(t, k). - | cleartp LFUNC:RB - | jmp ->vm_call_dispatch_f - | - |->vmeta_tgetr: - | mov CARG1, TAB:RB - | mov RB, BASE // Save BASE. - | mov CARG2d, RCd // Caveat: CARG2 == BASE - | call extern lj_tab_getinth // (GCtab *t, int32_t key) - | // cTValue * or NULL returned in eax (RC). - | movzx RAd, PC_RA - | mov BASE, RB // Restore BASE. - | test RC, RC - | jnz ->BC_TGETR_Z - | mov ITYPE, LJ_TNIL - | jmp ->BC_TGETR2_Z - | - |//----------------------------------------------------------------------- - | - |->vmeta_tsets: - | settp STR:RC, LJ_TSTR // STR:RC = GCstr * - | mov TMP1, STR:RC - | lea RC, TMP1 - | cmp PC_OP, BC_GSET - | jne >1 - | settp TAB:RA, TAB:RB, LJ_TTAB // TAB:RB = GCtab * - | lea RB, [DISPATCH+DISPATCH_GL(tmptv)] // Store fn->l.env in g->tmptv. - | mov [RB], TAB:RA - | jmp >2 - | - |->vmeta_tsetb: - | movzx RCd, PC_RC - |.if DUALNUM - | setint RC - | mov TMP1, RC - |.else - | cvtsi2sd xmm0, RCd - | movsd TMP1, xmm0 - |.endif - | lea RC, TMP1 - | jmp >1 - | - |->vmeta_tsetv: - | movzx RCd, PC_RC // Reload TValue *k from RC. - | lea RC, [BASE+RC*8] - |1: - | movzx RBd, PC_RB // Reload TValue *t from RB. - | lea RB, [BASE+RB*8] - |2: - | mov L:CARG1, SAVE_L - | mov L:CARG1->base, BASE // Caveat: CARG2/CARG3 may be BASE. - | mov CARG2, RB - | mov CARG3, RC - | mov L:RB, L:CARG1 - | mov SAVE_PC, PC - | call extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k) - | // TValue * (finished) or NULL (metamethod) returned in eax (RC). - | mov BASE, L:RB->base - | test RC, RC - | jz >3 - | // NOBARRIER: lj_meta_tset ensures the table is not black. - | movzx RAd, PC_RA - | mov RB, [BASE+RA*8] - | mov [RC], RB - |->cont_nop: // BASE = base, (RC = result) - | ins_next - | - |3: // Call __newindex metamethod. - | // BASE = base, L->top = new base, stack = cont/func/t/k/(v) - | mov RA, L:RB->top - | mov [RA-24], PC // [cont|PC] - | movzx RCd, PC_RA - | // Copy value to third argument. - | mov RB, [BASE+RC*8] - | mov [RA+16], RB - | lea PC, [RA+FRAME_CONT] - | sub PC, BASE - | mov LFUNC:RB, [RA-16] // Guaranteed to be a function here. - | mov NARGS:RDd, 3+1 // 3 args for func(t, k, v). - | cleartp LFUNC:RB - | jmp ->vm_call_dispatch_f - | - |->vmeta_tsetr: - |.if X64WIN - | mov L:CARG1, SAVE_L - | mov CARG3d, RCd - | mov L:CARG1->base, BASE - | xchg CARG2, TAB:RB // Caveat: CARG2 == BASE. - |.else - | mov L:CARG1, SAVE_L - | mov CARG2, TAB:RB - | mov L:CARG1->base, BASE - | mov RB, BASE // Save BASE. - | mov CARG3d, RCd // Caveat: CARG3 == BASE. - |.endif - | mov SAVE_PC, PC - | call extern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key) - | // TValue * returned in eax (RC). - | movzx RAd, PC_RA - | mov BASE, RB // Restore BASE. - | jmp ->BC_TSETR_Z - | - |//-- Comparison metamethods --------------------------------------------- - | - |->vmeta_comp: - | movzx RDd, PC_RD - | movzx RAd, PC_RA - | mov L:RB, SAVE_L - | mov L:RB->base, BASE // Caveat: CARG2/CARG3 == BASE. - |.if X64WIN - | lea CARG3, [BASE+RD*8] - | lea CARG2, [BASE+RA*8] - |.else - | lea CARG2, [BASE+RA*8] - | lea CARG3, [BASE+RD*8] - |.endif - | mov CARG1, L:RB // Caveat: CARG1/CARG4 == RA. - | movzx CARG4d, PC_OP - | mov SAVE_PC, PC - | call extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op) - | // 0/1 or TValue * (metamethod) returned in eax (RC). - |3: - | mov BASE, L:RB->base - | cmp RC, 1 - | ja ->vmeta_binop - |4: - | lea PC, [PC+4] - | jb >6 - |5: - | movzx RDd, PC_RD - | branchPC RD - |6: - | ins_next - | - |->cont_condt: // BASE = base, RC = result - | add PC, 4 - | mov ITYPE, [RC] - | sar ITYPE, 47 - | cmp ITYPEd, LJ_TISTRUECOND // Branch if result is true. - | jb <5 - | jmp <6 - | - |->cont_condf: // BASE = base, RC = result - | mov ITYPE, [RC] - | sar ITYPE, 47 - | cmp ITYPEd, LJ_TISTRUECOND // Branch if result is false. - | jmp <4 - | - |->vmeta_equal: - | cleartp TAB:RD - | sub PC, 4 - |.if X64WIN - | mov CARG3, RD - | mov CARG4d, RBd - | mov L:RB, SAVE_L - | mov L:RB->base, BASE // Caveat: CARG2 == BASE. - | mov CARG2, RA - | mov CARG1, L:RB // Caveat: CARG1 == RA. - |.else - | mov CARG2, RA - | mov CARG4d, RBd // Caveat: CARG4 == RA. - | mov L:RB, SAVE_L - | mov L:RB->base, BASE // Caveat: CARG3 == BASE. - | mov CARG3, RD - | mov CARG1, L:RB - |.endif - | mov SAVE_PC, PC - | call extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne) - | // 0/1 or TValue * (metamethod) returned in eax (RC). - | jmp <3 - | - |->vmeta_equal_cd: - |.if FFI - | sub PC, 4 - | mov L:RB, SAVE_L - | mov L:RB->base, BASE - | mov CARG1, L:RB - | mov CARG2d, dword [PC-4] - | mov SAVE_PC, PC - | call extern lj_meta_equal_cd // (lua_State *L, BCIns ins) - | // 0/1 or TValue * (metamethod) returned in eax (RC). - | jmp <3 - |.endif - | - |->vmeta_istype: - | mov L:RB, SAVE_L - | mov L:RB->base, BASE // Caveat: CARG2/CARG3 may be BASE. - | mov CARG2d, RAd - | mov CARG3d, RDd - | mov L:CARG1, L:RB - | mov SAVE_PC, PC - | call extern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp) - | mov BASE, L:RB->base - | jmp <6 - | - |//-- Arithmetic metamethods --------------------------------------------- - | - |->vmeta_arith_vno: - |.if DUALNUM - | movzx RBd, PC_RB - | movzx RCd, PC_RC - |.endif - |->vmeta_arith_vn: - | lea RC, [KBASE+RC*8] - | jmp >1 - | - |->vmeta_arith_nvo: - |.if DUALNUM - | movzx RBd, PC_RB - | movzx RCd, PC_RC - |.endif - |->vmeta_arith_nv: - | lea TMPR, [KBASE+RC*8] - | lea RC, [BASE+RB*8] - | mov RB, TMPR - | jmp >2 - | - |->vmeta_unm: - | lea RC, [BASE+RD*8] - | mov RB, RC - | jmp >2 - | - |->vmeta_arith_vvo: - |.if DUALNUM - | movzx RBd, PC_RB - | movzx RCd, PC_RC - |.endif - |->vmeta_arith_vv: - | lea RC, [BASE+RC*8] - |1: - | lea RB, [BASE+RB*8] - |2: - | lea RA, [BASE+RA*8] - |.if X64WIN - | mov CARG3, RB - | mov CARG4, RC - | movzx RCd, PC_OP - | mov ARG5d, RCd - | mov L:RB, SAVE_L - | mov L:RB->base, BASE // Caveat: CARG2 == BASE. - | mov CARG2, RA - | mov CARG1, L:RB // Caveat: CARG1 == RA. - |.else - | movzx CARG5d, PC_OP - | mov CARG2, RA - | mov CARG4, RC // Caveat: CARG4 == RA. - | mov L:CARG1, SAVE_L - | mov L:CARG1->base, BASE // Caveat: CARG3 == BASE. - | mov CARG3, RB - | mov L:RB, L:CARG1 - |.endif - | mov SAVE_PC, PC - | call extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op) - | // NULL (finished) or TValue * (metamethod) returned in eax (RC). - | mov BASE, L:RB->base - | test RC, RC - | jz ->cont_nop - | - | // Call metamethod for binary op. - |->vmeta_binop: - | // BASE = base, RC = new base, stack = cont/func/o1/o2 - | mov RA, RC - | sub RC, BASE - | mov [RA-24], PC // [cont|PC] - | lea PC, [RC+FRAME_CONT] - | mov NARGS:RDd, 2+1 // 2 args for func(o1, o2). - | jmp ->vm_call_dispatch - | - |->vmeta_len: - | movzx RDd, PC_RD - | mov L:RB, SAVE_L - | mov L:RB->base, BASE - | lea CARG2, [BASE+RD*8] // Caveat: CARG2 == BASE - | mov L:CARG1, L:RB - | mov SAVE_PC, PC - | call extern lj_meta_len // (lua_State *L, TValue *o) - | // NULL (retry) or TValue * (metamethod) returned in eax (RC). - | mov BASE, L:RB->base -#if LJ_52 - | test RC, RC - | jne ->vmeta_binop // Binop call for compatibility. - | movzx RDd, PC_RD - | mov TAB:CARG1, [BASE+RD*8] - | cleartp TAB:CARG1 - | jmp ->BC_LEN_Z -#else - | jmp ->vmeta_binop // Binop call for compatibility. -#endif - | - |//-- Call metamethod ---------------------------------------------------- - | - |->vmeta_call_ra: - | lea RA, [BASE+RA*8+16] - |->vmeta_call: // Resolve and call __call metamethod. - | // BASE = old base, RA = new base, RC = nargs+1, PC = return - | mov TMP1d, NARGS:RDd // Save RA, RC for us. - | mov RB, RA - |.if X64WIN - | mov L:TMPR, SAVE_L - | mov L:TMPR->base, BASE // Caveat: CARG2 is BASE. - | lea CARG2, [RA-16] - | lea CARG3, [RA+NARGS:RD*8-8] - | mov CARG1, L:TMPR // Caveat: CARG1 is RA. - |.else - | mov L:CARG1, SAVE_L - | mov L:CARG1->base, BASE // Caveat: CARG3 is BASE. - | lea CARG2, [RA-16] - | lea CARG3, [RA+NARGS:RD*8-8] - |.endif - | mov SAVE_PC, PC - | call extern lj_meta_call // (lua_State *L, TValue *func, TValue *top) - | mov RA, RB - | mov L:RB, SAVE_L - | mov BASE, L:RB->base - | mov NARGS:RDd, TMP1d - | mov LFUNC:RB, [RA-16] - | cleartp LFUNC:RB - | add NARGS:RDd, 1 - | // This is fragile. L->base must not move, KBASE must always be defined. - | cmp KBASE, BASE // Continue with CALLT if flag set. - | je ->BC_CALLT_Z - | mov BASE, RA - | ins_call // Otherwise call resolved metamethod. - | - |//-- Argument coercion for 'for' statement ------------------------------ - | - |->vmeta_for: - | mov L:RB, SAVE_L - | mov L:RB->base, BASE - | mov CARG2, RA // Caveat: CARG2 == BASE - | mov L:CARG1, L:RB // Caveat: CARG1 == RA - | mov SAVE_PC, PC - | call extern lj_meta_for // (lua_State *L, TValue *base) - | mov BASE, L:RB->base - | mov RCd, [PC-4] - | movzx RAd, RCH - | movzx OP, RCL - | shr RCd, 16 - | jmp aword [DISPATCH+OP*8+GG_DISP2STATIC] // Retry FORI or JFORI. - | - |//----------------------------------------------------------------------- - |//-- Fast functions ----------------------------------------------------- - |//----------------------------------------------------------------------- - | - |.macro .ffunc, name - |->ff_ .. name: - |.endmacro - | - |.macro .ffunc_1, name - |->ff_ .. name: - | cmp NARGS:RDd, 1+1; jb ->fff_fallback - |.endmacro - | - |.macro .ffunc_2, name - |->ff_ .. name: - | cmp NARGS:RDd, 2+1; jb ->fff_fallback - |.endmacro - | - |.macro .ffunc_n, name, op - | .ffunc_1 name - | checknumtp [BASE], ->fff_fallback - | op xmm0, qword [BASE] - |.endmacro - | - |.macro .ffunc_n, name - | .ffunc_n name, movsd - |.endmacro - | - |.macro .ffunc_nn, name - | .ffunc_2 name - | checknumtp [BASE], ->fff_fallback - | checknumtp [BASE+8], ->fff_fallback - | movsd xmm0, qword [BASE] - | movsd xmm1, qword [BASE+8] - |.endmacro - | - |// Inlined GC threshold check. Caveat: uses label 1. - |.macro ffgccheck - | mov RB, [DISPATCH+DISPATCH_GL(gc.total)] - | cmp RB, [DISPATCH+DISPATCH_GL(gc.threshold)] - | jb >1 - | call ->fff_gcstep - |1: - |.endmacro - | - |//-- Base library: checks ----------------------------------------------- - | - |.ffunc_1 assert - | mov ITYPE, [BASE] - | mov RB, ITYPE - | sar ITYPE, 47 - | cmp ITYPEd, LJ_TISTRUECOND; jae ->fff_fallback - | mov PC, [BASE-8] - | mov MULTRES, RDd - | mov RB, [BASE] - | mov [BASE-16], RB - | sub RDd, 2 - | jz >2 - | mov RA, BASE - |1: - | add RA, 8 - | mov RB, [RA] - | mov [RA-16], RB - | sub RDd, 1 - | jnz <1 - |2: - | mov RDd, MULTRES - | jmp ->fff_res_ - | - |.ffunc_1 type - | mov RC, [BASE] - | sar RC, 47 - | mov RBd, LJ_TISNUM - | cmp RCd, RBd - | cmovb RCd, RBd - | not RCd - |2: - | mov CFUNC:RB, [BASE-16] - | cleartp CFUNC:RB - | mov STR:RC, [CFUNC:RB+RC*8+((char *)(&((GCfuncC *)0)->upvalue))] - | mov PC, [BASE-8] - | settp STR:RC, LJ_TSTR - | mov [BASE-16], STR:RC - | jmp ->fff_res1 - | - |//-- Base library: getters and setters --------------------------------- - | - |.ffunc_1 getmetatable - | mov TAB:RB, [BASE] - | mov PC, [BASE-8] - | checktab TAB:RB, >6 - |1: // Field metatable must be at same offset for GCtab and GCudata! - | mov TAB:RB, TAB:RB->metatable - |2: - | test TAB:RB, TAB:RB - | mov aword [BASE-16], LJ_TNIL - | jz ->fff_res1 - | settp TAB:RC, TAB:RB, LJ_TTAB - | mov [BASE-16], TAB:RC // Store metatable as default result. - | mov STR:RC, [DISPATCH+DISPATCH_GL(gcroot)+8*(GCROOT_MMNAME+MM_metatable)] - | mov RAd, TAB:RB->hmask - | and RAd, STR:RC->hash - | settp STR:RC, LJ_TSTR - | imul RAd, #NODE - | add NODE:RA, TAB:RB->node - |3: // Rearranged logic, because we expect _not_ to find the key. - | cmp NODE:RA->key, STR:RC - | je >5 - |4: - | mov NODE:RA, NODE:RA->next - | test NODE:RA, NODE:RA - | jnz <3 - | jmp ->fff_res1 // Not found, keep default result. - |5: - | mov RB, NODE:RA->val - | cmp RB, LJ_TNIL; je ->fff_res1 // Ditto for nil value. - | mov [BASE-16], RB // Return value of mt.__metatable. - | jmp ->fff_res1 - | - |6: - | cmp ITYPEd, LJ_TUDATA; je <1 - | cmp ITYPEd, LJ_TISNUM; ja >7 - | mov ITYPEd, LJ_TISNUM - |7: - | not ITYPEd - | mov TAB:RB, [DISPATCH+ITYPE*8+DISPATCH_GL(gcroot[GCROOT_BASEMT])] - | jmp <2 - | - |.ffunc_2 setmetatable - | mov TAB:RB, [BASE] - | mov TAB:TMPR, TAB:RB - | checktab TAB:RB, ->fff_fallback - | // Fast path: no mt for table yet and not clearing the mt. - | cmp aword TAB:RB->metatable, 0; jne ->fff_fallback - | mov TAB:RA, [BASE+8] - | checktab TAB:RA, ->fff_fallback - | mov TAB:RB->metatable, TAB:RA - | mov PC, [BASE-8] - | mov [BASE-16], TAB:TMPR // Return original table. - | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) - | jz >1 - | // Possible write barrier. Table is black, but skip iswhite(mt) check. - | barrierback TAB:RB, RC - |1: - | jmp ->fff_res1 - | - |.ffunc_2 rawget - |.if X64WIN - | mov TAB:RA, [BASE] - | checktab TAB:RA, ->fff_fallback - | mov RB, BASE // Save BASE. - | lea CARG3, [BASE+8] - | mov CARG2, TAB:RA // Caveat: CARG2 == BASE. - | mov CARG1, SAVE_L - |.else - | mov TAB:CARG2, [BASE] - | checktab TAB:CARG2, ->fff_fallback - | mov RB, BASE // Save BASE. - | lea CARG3, [BASE+8] // Caveat: CARG3 == BASE. - | mov CARG1, SAVE_L - |.endif - | call extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key) - | // cTValue * returned in eax (RD). - | mov BASE, RB // Restore BASE. - | // Copy table slot. - | mov RB, [RD] - | mov PC, [BASE-8] - | mov [BASE-16], RB - | jmp ->fff_res1 - | - |//-- Base library: conversions ------------------------------------------ - | - |.ffunc tonumber - | // Only handles the number case inline (without a base argument). - | cmp NARGS:RDd, 1+1; jne ->fff_fallback // Exactly one argument. - | mov RB, [BASE] - | checknumber RB, ->fff_fallback - | mov PC, [BASE-8] - | mov [BASE-16], RB - | jmp ->fff_res1 - | - |.ffunc_1 tostring - | // Only handles the string or number case inline. - | mov PC, [BASE-8] - | mov STR:RB, [BASE] - | checktp_nc STR:RB, LJ_TSTR, >3 - | // A __tostring method in the string base metatable is ignored. - |2: - | mov [BASE-16], STR:RB - | jmp ->fff_res1 - |3: // Handle numbers inline, unless a number base metatable is present. - | cmp ITYPEd, LJ_TISNUM; ja ->fff_fallback_1 - | cmp aword [DISPATCH+DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])], 0 - | jne ->fff_fallback - | ffgccheck // Caveat: uses label 1. - | mov L:RB, SAVE_L - | mov L:RB->base, BASE // Add frame since C call can throw. - | mov SAVE_PC, PC // Redundant (but a defined value). - |.if not X64WIN - | mov CARG2, BASE // Otherwise: CARG2 == BASE - |.endif - | mov L:CARG1, L:RB - |.if DUALNUM - | call extern lj_strfmt_number // (lua_State *L, cTValue *o) - |.else - | call extern lj_strfmt_num // (lua_State *L, lua_Number *np) - |.endif - | // GCstr returned in eax (RD). - | mov BASE, L:RB->base - | settp STR:RB, RD, LJ_TSTR - | jmp <2 - | - |//-- Base library: iterators ------------------------------------------- - | - |.ffunc_1 next - | je >2 // Missing 2nd arg? - |1: - |.if X64WIN - | mov RA, [BASE] - | checktab RA, ->fff_fallback - |.else - | mov CARG2, [BASE] - | checktab CARG2, ->fff_fallback - |.endif - | mov L:RB, SAVE_L - | mov L:RB->base, BASE // Add frame since C call can throw. - | mov L:RB->top, BASE // Dummy frame length is ok. - | mov PC, [BASE-8] - |.if X64WIN - | lea CARG3, [BASE+8] - | mov CARG2, RA // Caveat: CARG2 == BASE. - | mov CARG1, L:RB - |.else - | lea CARG3, [BASE+8] // Caveat: CARG3 == BASE. - | mov CARG1, L:RB - |.endif - | mov SAVE_PC, PC // Needed for ITERN fallback. - | call extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key) - | // Flag returned in eax (RD). - | mov BASE, L:RB->base - | test RDd, RDd; jz >3 // End of traversal? - | // Copy key and value to results. - | mov RB, [BASE+8] - | mov RD, [BASE+16] - | mov [BASE-16], RB - | mov [BASE-8], RD - |->fff_res2: - | mov RDd, 1+2 - | jmp ->fff_res - |2: // Set missing 2nd arg to nil. - | mov aword [BASE+8], LJ_TNIL - | jmp <1 - |3: // End of traversal: return nil. - | mov aword [BASE-16], LJ_TNIL - | jmp ->fff_res1 - | - |.ffunc_1 pairs - | mov TAB:RB, [BASE] - | mov TMPR, TAB:RB - | checktab TAB:RB, ->fff_fallback -#if LJ_52 - | cmp aword TAB:RB->metatable, 0; jne ->fff_fallback -#endif - | mov CFUNC:RD, [BASE-16] - | cleartp CFUNC:RD - | mov CFUNC:RD, CFUNC:RD->upvalue[0] - | settp CFUNC:RD, LJ_TFUNC - | mov PC, [BASE-8] - | mov [BASE-16], CFUNC:RD - | mov [BASE-8], TMPR - | mov aword [BASE], LJ_TNIL - | mov RDd, 1+3 - | jmp ->fff_res - | - |.ffunc_2 ipairs_aux - | mov TAB:RB, [BASE] - | checktab TAB:RB, ->fff_fallback - |.if DUALNUM - | mov RA, [BASE+8] - | checkint RA, ->fff_fallback - |.else - | checknumtp [BASE+8], ->fff_fallback - | movsd xmm0, qword [BASE+8] - |.endif - | mov PC, [BASE-8] - |.if DUALNUM - | add RAd, 1 - | setint ITYPE, RA - | mov [BASE-16], ITYPE - |.else - | sseconst_1 xmm1, TMPR - | addsd xmm0, xmm1 - | cvttsd2si RAd, xmm0 - | movsd qword [BASE-16], xmm0 - |.endif - | cmp RAd, TAB:RB->asize; jae >2 // Not in array part? - | mov RD, TAB:RB->array - | lea RD, [RD+RA*8] - |1: - | cmp aword [RD], LJ_TNIL; je ->fff_res0 - | // Copy array slot. - | mov RB, [RD] - | mov [BASE-8], RB - | jmp ->fff_res2 - |2: // Check for empty hash part first. Otherwise call C function. - | cmp dword TAB:RB->hmask, 0; je ->fff_res0 - |.if X64WIN - | mov TMPR, BASE - | mov CARG2d, RAd - | mov CARG1, TAB:RB - | mov RB, TMPR - |.else - | mov CARG1, TAB:RB - | mov RB, BASE // Save BASE. - | mov CARG2d, RAd // Caveat: CARG2 == BASE - |.endif - | call extern lj_tab_getinth // (GCtab *t, int32_t key) - | // cTValue * or NULL returned in eax (RD). - | mov BASE, RB - | test RD, RD - | jnz <1 - |->fff_res0: - | mov RDd, 1+0 - | jmp ->fff_res - | - |.ffunc_1 ipairs - | mov TAB:RB, [BASE] - | mov TMPR, TAB:RB - | checktab TAB:RB, ->fff_fallback -#if LJ_52 - | cmp aword TAB:RB->metatable, 0; jne ->fff_fallback -#endif - | mov CFUNC:RD, [BASE-16] - | cleartp CFUNC:RD - | mov CFUNC:RD, CFUNC:RD->upvalue[0] - | settp CFUNC:RD, LJ_TFUNC - | mov PC, [BASE-8] - | mov [BASE-16], CFUNC:RD - | mov [BASE-8], TMPR - |.if DUALNUM - | mov64 RD, ((int64_t)LJ_TISNUM<<47) - | mov [BASE], RD - |.else - | mov qword [BASE], 0 - |.endif - | mov RDd, 1+3 - | jmp ->fff_res - | - |//-- Base library: catch errors ---------------------------------------- - | - |.ffunc_1 pcall - | lea RA, [BASE+16] - | sub NARGS:RDd, 1 - | mov PCd, 16+FRAME_PCALL - |1: - | movzx RBd, byte [DISPATCH+DISPATCH_GL(hookmask)] - | shr RB, HOOK_ACTIVE_SHIFT - | and RB, 1 - | add PC, RB // Remember active hook before pcall. - | // Note: this does a (harmless) copy of the function to the PC slot, too. - | mov KBASE, RD - |2: - | mov RB, [RA+KBASE*8-24] - | mov [RA+KBASE*8-16], RB - | sub KBASE, 1 - | ja <2 - | jmp ->vm_call_dispatch - | - |.ffunc_2 xpcall - | mov LFUNC:RA, [BASE+8] - | checktp_nc LFUNC:RA, LJ_TFUNC, ->fff_fallback - | mov LFUNC:RB, [BASE] // Swap function and traceback. - | mov [BASE], LFUNC:RA - | mov [BASE+8], LFUNC:RB - | lea RA, [BASE+24] - | sub NARGS:RDd, 2 - | mov PCd, 24+FRAME_PCALL - | jmp <1 - | - |//-- Coroutine library -------------------------------------------------- - | - |.macro coroutine_resume_wrap, resume - |.if resume - |.ffunc_1 coroutine_resume - | mov L:RB, [BASE] - | cleartp L:RB - |.else - |.ffunc coroutine_wrap_aux - | mov CFUNC:RB, [BASE-16] - | cleartp CFUNC:RB - | mov L:RB, CFUNC:RB->upvalue[0].gcr - | cleartp L:RB - |.endif - | mov PC, [BASE-8] - | mov SAVE_PC, PC - | mov TMP1, L:RB - |.if resume - | checktptp [BASE], LJ_TTHREAD, ->fff_fallback - |.endif - | cmp aword L:RB->cframe, 0; jne ->fff_fallback - | cmp byte L:RB->status, LUA_YIELD; ja ->fff_fallback - | mov RA, L:RB->top - | je >1 // Status != LUA_YIELD (i.e. 0)? - | cmp RA, L:RB->base // Check for presence of initial func. - | je ->fff_fallback - | mov PC, [RA-8] // Move initial function up. - | mov [RA], PC - | add RA, 8 - |1: - |.if resume - | lea PC, [RA+NARGS:RD*8-16] // Check stack space (-1-thread). - |.else - | lea PC, [RA+NARGS:RD*8-8] // Check stack space (-1). - |.endif - | cmp PC, L:RB->maxstack; ja ->fff_fallback - | mov L:RB->top, PC - | - | mov L:RB, SAVE_L - | mov L:RB->base, BASE - |.if resume - | add BASE, 8 // Keep resumed thread in stack for GC. - |.endif - | mov L:RB->top, BASE - |.if resume - | lea RB, [BASE+NARGS:RD*8-24] // RB = end of source for stack move. - |.else - | lea RB, [BASE+NARGS:RD*8-16] // RB = end of source for stack move. - |.endif - | sub RB, PC // Relative to PC. - | - | cmp PC, RA - | je >3 - |2: // Move args to coroutine. - | mov RC, [PC+RB] - | mov [PC-8], RC - | sub PC, 8 - | cmp PC, RA - | jne <2 - |3: - | mov CARG2, RA - | mov CARG1, TMP1 - | call ->vm_resume // (lua_State *L, TValue *base, 0, 0) - | - | mov L:RB, SAVE_L - | mov L:PC, TMP1 - | mov BASE, L:RB->base - | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB - | set_vmstate INTERP - | - | cmp eax, LUA_YIELD - | ja >8 - |4: - | mov RA, L:PC->base - | mov KBASE, L:PC->top - | mov L:PC->top, RA // Clear coroutine stack. - | mov PC, KBASE - | sub PC, RA - | je >6 // No results? - | lea RD, [BASE+PC] - | shr PCd, 3 - | cmp RD, L:RB->maxstack - | ja >9 // Need to grow stack? - | - | mov RB, BASE - | sub RB, RA - |5: // Move results from coroutine. - | mov RD, [RA] - | mov [RA+RB], RD - | add RA, 8 - | cmp RA, KBASE - | jne <5 - |6: - |.if resume - | lea RDd, [PCd+2] // nresults+1 = 1 + true + results. - | mov_true ITYPE // Prepend true to results. - | mov [BASE-8], ITYPE - |.else - | lea RDd, [PCd+1] // nresults+1 = 1 + results. - |.endif - |7: - | mov PC, SAVE_PC - | mov MULTRES, RDd - |.if resume - | mov RA, -8 - |.else - | xor RAd, RAd - |.endif - | test PCd, FRAME_TYPE - | jz ->BC_RET_Z - | jmp ->vm_return - | - |8: // Coroutine returned with error (at co->top-1). - |.if resume - | mov_false ITYPE // Prepend false to results. - | mov [BASE-8], ITYPE - | mov RA, L:PC->top - | sub RA, 8 - | mov L:PC->top, RA // Clear error from coroutine stack. - | // Copy error message. - | mov RD, [RA] - | mov [BASE], RD - | mov RDd, 1+2 // nresults+1 = 1 + false + error. - | jmp <7 - |.else - | mov CARG2, L:PC - | mov CARG1, L:RB - | call extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co) - | // Error function does not return. - |.endif - | - |9: // Handle stack expansion on return from yield. - | mov L:RA, TMP1 - | mov L:RA->top, KBASE // Undo coroutine stack clearing. - | mov CARG2, PC - | mov CARG1, L:RB - | call extern lj_state_growstack // (lua_State *L, int n) - | mov L:PC, TMP1 - | mov BASE, L:RB->base - | jmp <4 // Retry the stack move. - |.endmacro - | - | coroutine_resume_wrap 1 // coroutine.resume - | coroutine_resume_wrap 0 // coroutine.wrap - | - |.ffunc coroutine_yield - | mov L:RB, SAVE_L - | test aword L:RB->cframe, CFRAME_RESUME - | jz ->fff_fallback - | mov L:RB->base, BASE - | lea RD, [BASE+NARGS:RD*8-8] - | mov L:RB->top, RD - | xor RDd, RDd - | mov aword L:RB->cframe, RD - | mov al, LUA_YIELD - | mov byte L:RB->status, al - | jmp ->vm_leave_unw - | - |//-- Math library ------------------------------------------------------- - | - | .ffunc_1 math_abs - | mov RB, [BASE] - |.if DUALNUM - | checkint RB, >3 - | cmp RBd, 0; jns ->fff_resi - | neg RBd; js >2 - |->fff_resbit: - |->fff_resi: - | setint RB - |->fff_resRB: - | mov PC, [BASE-8] - | mov [BASE-16], RB - | jmp ->fff_res1 - |2: - | mov64 RB, U64x(41e00000,00000000) // 2^31. - | jmp ->fff_resRB - |3: - | ja ->fff_fallback - |.else - | checknum RB, ->fff_fallback - |.endif - | shl RB, 1 - | shr RB, 1 - | mov PC, [BASE-8] - | mov [BASE-16], RB - | jmp ->fff_res1 - | - |.ffunc_n math_sqrt, sqrtsd - |->fff_resxmm0: - | mov PC, [BASE-8] - | movsd qword [BASE-16], xmm0 - | // fallthrough - | - |->fff_res1: - | mov RDd, 1+1 - |->fff_res: - | mov MULTRES, RDd - |->fff_res_: - | test PCd, FRAME_TYPE - | jnz >7 - |5: - | cmp PC_RB, RDL // More results expected? - | ja >6 - | // Adjust BASE. KBASE is assumed to be set for the calling frame. - | movzx RAd, PC_RA - | neg RA - | lea BASE, [BASE+RA*8-16] // base = base - (RA+2)*8 - | ins_next - | - |6: // Fill up results with nil. - | mov aword [BASE+RD*8-24], LJ_TNIL - | add RD, 1 - | jmp <5 - | - |7: // Non-standard return case. - | mov RA, -16 // Results start at BASE+RA = BASE-16. - | jmp ->vm_return - | - |.macro math_round, func - | .ffunc math_ .. func - |.if DUALNUM - | mov RB, [BASE] - | checknumx RB, ->fff_resRB, je - | ja ->fff_fallback - |.else - | checknumtp [BASE], ->fff_fallback - |.endif - | movsd xmm0, qword [BASE] - | call ->vm_ .. func .. _sse - |.if DUALNUM - | cvttsd2si RBd, xmm0 - | cmp RBd, 0x80000000 - | jne ->fff_resi - | cvtsi2sd xmm1, RBd - | ucomisd xmm0, xmm1 - | jp ->fff_resxmm0 - | je ->fff_resi - |.endif - | jmp ->fff_resxmm0 - |.endmacro - | - | math_round floor - | math_round ceil - | - |.ffunc math_log - | cmp NARGS:RDd, 1+1; jne ->fff_fallback // Exactly one argument. - | checknumtp [BASE], ->fff_fallback - | movsd xmm0, qword [BASE] - | mov RB, BASE - | call extern log - | mov BASE, RB - | jmp ->fff_resxmm0 - | - |.macro math_extern, func - | .ffunc_n math_ .. func - | mov RB, BASE - | call extern func - | mov BASE, RB - | jmp ->fff_resxmm0 - |.endmacro - | - |.macro math_extern2, func - | .ffunc_nn math_ .. func - | mov RB, BASE - | call extern func - | mov BASE, RB - | jmp ->fff_resxmm0 - |.endmacro - | - | math_extern log10 - | math_extern exp - | math_extern sin - | math_extern cos - | math_extern tan - | math_extern asin - | math_extern acos - | math_extern atan - | math_extern sinh - | math_extern cosh - | math_extern tanh - | math_extern2 pow - | math_extern2 atan2 - | math_extern2 fmod - | - |.ffunc_2 math_ldexp - | checknumtp [BASE], ->fff_fallback - | checknumtp [BASE+8], ->fff_fallback - | fld qword [BASE+8] - | fld qword [BASE] - | fscale - | fpop1 - | mov PC, [BASE-8] - | fstp qword [BASE-16] - | jmp ->fff_res1 - | - |.ffunc_n math_frexp - | lea CARG1, TMP1 - | mov RB, BASE - | call extern frexp - | mov BASE, RB - | mov RBd, TMP1d - | mov PC, [BASE-8] - | movsd qword [BASE-16], xmm0 - |.if DUALNUM - | setint RB - | mov [BASE-8], RB - |.else - | cvtsi2sd xmm1, RBd - | movsd qword [BASE-8], xmm1 - |.endif - | mov RDd, 1+2 - | jmp ->fff_res - | - |.ffunc_n math_modf - | lea CARG1, [BASE-16] - | mov PC, [BASE-8] - | mov RB, BASE - | call extern modf - | mov BASE, RB - | mov PC, [BASE-8] - | movsd qword [BASE-8], xmm0 - | mov RDd, 1+2 - | jmp ->fff_res - | - |.macro math_minmax, name, cmovop, sseop - | .ffunc name - | mov RAd, 2 - |.if DUALNUM - | mov RB, [BASE] - | checkint RB, >4 - |1: // Handle integers. - | cmp RAd, RDd; jae ->fff_resRB - | mov TMPR, [BASE+RA*8-8] - | checkint TMPR, >3 - | cmp RBd, TMPRd - | cmovop RB, TMPR - | add RAd, 1 - | jmp <1 - |3: - | ja ->fff_fallback - | // Convert intermediate result to number and continue below. - | cvtsi2sd xmm0, RBd - | jmp >6 - |4: - | ja ->fff_fallback - |.else - | checknumtp [BASE], ->fff_fallback - |.endif - | - | movsd xmm0, qword [BASE] - |5: // Handle numbers or integers. - | cmp RAd, RDd; jae ->fff_resxmm0 - |.if DUALNUM - | mov RB, [BASE+RA*8-8] - | checknumx RB, >6, jb - | ja ->fff_fallback - | cvtsi2sd xmm1, RBd - | jmp >7 - |.else - | checknumtp [BASE+RA*8-8], ->fff_fallback - |.endif - |6: - | movsd xmm1, qword [BASE+RA*8-8] - |7: - | sseop xmm0, xmm1 - | add RAd, 1 - | jmp <5 - |.endmacro - | - | math_minmax math_min, cmovg, minsd - | math_minmax math_max, cmovl, maxsd - | - |//-- String library ----------------------------------------------------- - | - |.ffunc string_byte // Only handle the 1-arg case here. - | cmp NARGS:RDd, 1+1; jne ->fff_fallback - | mov STR:RB, [BASE] - | checkstr STR:RB, ->fff_fallback - | mov PC, [BASE-8] - | cmp dword STR:RB->len, 1 - | jb ->fff_res0 // Return no results for empty string. - | movzx RBd, byte STR:RB[1] - |.if DUALNUM - | jmp ->fff_resi - |.else - | cvtsi2sd xmm0, RBd; jmp ->fff_resxmm0 - |.endif - | - |.ffunc string_char // Only handle the 1-arg case here. - | ffgccheck - | cmp NARGS:RDd, 1+1; jne ->fff_fallback // *Exactly* 1 arg. - |.if DUALNUM - | mov RB, [BASE] - | checkint RB, ->fff_fallback - |.else - | checknumtp [BASE], ->fff_fallback - | cvttsd2si RBd, qword [BASE] - |.endif - | cmp RBd, 255; ja ->fff_fallback - | mov TMP1d, RBd - | mov TMPRd, 1 - | lea RD, TMP1 // Points to stack. Little-endian. - |->fff_newstr: - | mov L:RB, SAVE_L - | mov L:RB->base, BASE - | mov CARG3d, TMPRd // Zero-extended to size_t. - | mov CARG2, RD - | mov CARG1, L:RB - | mov SAVE_PC, PC - | call extern lj_str_new // (lua_State *L, char *str, size_t l) - |->fff_resstr: - | // GCstr * returned in eax (RD). - | mov BASE, L:RB->base - | mov PC, [BASE-8] - | settp STR:RD, LJ_TSTR - | mov [BASE-16], STR:RD - | jmp ->fff_res1 - | - |.ffunc string_sub - | ffgccheck - | mov TMPRd, -1 - | cmp NARGS:RDd, 1+2; jb ->fff_fallback - | jna >1 - |.if DUALNUM - | mov TMPR, [BASE+16] - | checkint TMPR, ->fff_fallback - |.else - | checknumtp [BASE+16], ->fff_fallback - | cvttsd2si TMPRd, qword [BASE+16] - |.endif - |1: - | mov STR:RB, [BASE] - | checkstr STR:RB, ->fff_fallback - |.if DUALNUM - | mov ITYPE, [BASE+8] - | mov RAd, ITYPEd // Must clear hiword for lea below. - | sar ITYPE, 47 - | cmp ITYPEd, LJ_TISNUM - | jne ->fff_fallback - |.else - | checknumtp [BASE+8], ->fff_fallback - | cvttsd2si RAd, qword [BASE+8] - |.endif - | mov RCd, STR:RB->len - | cmp RCd, TMPRd // len < end? (unsigned compare) - | jb >5 - |2: - | test RAd, RAd // start <= 0? - | jle >7 - |3: - | sub TMPRd, RAd // start > end? - | jl ->fff_emptystr - | lea RD, [STR:RB+RAd+#STR-1] - | add TMPRd, 1 - |4: - | jmp ->fff_newstr - | - |5: // Negative end or overflow. - | jl >6 - | lea TMPRd, [TMPRd+RCd+1] // end = end+(len+1) - | jmp <2 - |6: // Overflow. - | mov TMPRd, RCd // end = len - | jmp <2 - | - |7: // Negative start or underflow. - | je >8 - | add RAd, RCd // start = start+(len+1) - | add RAd, 1 - | jg <3 // start > 0? - |8: // Underflow. - | mov RAd, 1 // start = 1 - | jmp <3 - | - |->fff_emptystr: // Range underflow. - | xor TMPRd, TMPRd // Zero length. Any ptr in RD is ok. - | jmp <4 - | - |.macro ffstring_op, name - | .ffunc_1 string_ .. name - | ffgccheck - |.if X64WIN - | mov STR:TMPR, [BASE] - | checkstr STR:TMPR, ->fff_fallback - |.else - | mov STR:CARG2, [BASE] - | checkstr STR:CARG2, ->fff_fallback - |.endif - | mov L:RB, SAVE_L - | lea SBUF:CARG1, [DISPATCH+DISPATCH_GL(tmpbuf)] - | mov L:RB->base, BASE - |.if X64WIN - | mov STR:CARG2, STR:TMPR // Caveat: CARG2 == BASE - |.endif - | mov RC, SBUF:CARG1->b - | mov SBUF:CARG1->L, L:RB - | mov SBUF:CARG1->p, RC - | mov SAVE_PC, PC - | call extern lj_buf_putstr_ .. name - | mov CARG1, rax - | call extern lj_buf_tostr - | jmp ->fff_resstr - |.endmacro - | - |ffstring_op reverse - |ffstring_op lower - |ffstring_op upper - | - |//-- Bit library -------------------------------------------------------- - | - |.macro .ffunc_bit, name, kind, fdef - | fdef name - |.if kind == 2 - | sseconst_tobit xmm1, RB - |.endif - |.if DUALNUM - | mov RB, [BASE] - | checkint RB, >1 - |.if kind > 0 - | jmp >2 - |.else - | jmp ->fff_resbit - |.endif - |1: - | ja ->fff_fallback - | movd xmm0, RB - |.else - | checknumtp [BASE], ->fff_fallback - | movsd xmm0, qword [BASE] - |.endif - |.if kind < 2 - | sseconst_tobit xmm1, RB - |.endif - | addsd xmm0, xmm1 - | movd RBd, xmm0 - |2: - |.endmacro - | - |.macro .ffunc_bit, name, kind - | .ffunc_bit name, kind, .ffunc_1 - |.endmacro - | - |.ffunc_bit bit_tobit, 0 - | jmp ->fff_resbit - | - |.macro .ffunc_bit_op, name, ins - | .ffunc_bit name, 2 - | mov TMPRd, NARGS:RDd // Save for fallback. - | lea RD, [BASE+NARGS:RD*8-16] - |1: - | cmp RD, BASE - | jbe ->fff_resbit - |.if DUALNUM - | mov RA, [RD] - | checkint RA, >2 - | ins RBd, RAd - | sub RD, 8 - | jmp <1 - |2: - | ja ->fff_fallback_bit_op - | movd xmm0, RA - |.else - | checknumtp [RD], ->fff_fallback_bit_op - | movsd xmm0, qword [RD] - |.endif - | addsd xmm0, xmm1 - | movd RAd, xmm0 - | ins RBd, RAd - | sub RD, 8 - | jmp <1 - |.endmacro - | - |.ffunc_bit_op bit_band, and - |.ffunc_bit_op bit_bor, or - |.ffunc_bit_op bit_bxor, xor - | - |.ffunc_bit bit_bswap, 1 - | bswap RBd - | jmp ->fff_resbit - | - |.ffunc_bit bit_bnot, 1 - | not RBd - |.if DUALNUM - | jmp ->fff_resbit - |.else - |->fff_resbit: - | cvtsi2sd xmm0, RBd - | jmp ->fff_resxmm0 - |.endif - | - |->fff_fallback_bit_op: - | mov NARGS:RDd, TMPRd // Restore for fallback - | jmp ->fff_fallback - | - |.macro .ffunc_bit_sh, name, ins - |.if DUALNUM - | .ffunc_bit name, 1, .ffunc_2 - | // Note: no inline conversion from number for 2nd argument! - | mov RA, [BASE+8] - | checkint RA, ->fff_fallback - |.else - | .ffunc_nn name - | sseconst_tobit xmm2, RB - | addsd xmm0, xmm2 - | addsd xmm1, xmm2 - | movd RBd, xmm0 - | movd RAd, xmm1 - |.endif - | ins RBd, cl // Assumes RA is ecx. - | jmp ->fff_resbit - |.endmacro - | - |.ffunc_bit_sh bit_lshift, shl - |.ffunc_bit_sh bit_rshift, shr - |.ffunc_bit_sh bit_arshift, sar - |.ffunc_bit_sh bit_rol, rol - |.ffunc_bit_sh bit_ror, ror - | - |//----------------------------------------------------------------------- - | - |->fff_fallback_2: - | mov NARGS:RDd, 1+2 // Other args are ignored, anyway. - | jmp ->fff_fallback - |->fff_fallback_1: - | mov NARGS:RDd, 1+1 // Other args are ignored, anyway. - |->fff_fallback: // Call fast function fallback handler. - | // BASE = new base, RD = nargs+1 - | mov L:RB, SAVE_L - | mov PC, [BASE-8] // Fallback may overwrite PC. - | mov SAVE_PC, PC // Redundant (but a defined value). - | mov L:RB->base, BASE - | lea RD, [BASE+NARGS:RD*8-8] - | lea RA, [RD+8*LUA_MINSTACK] // Ensure enough space for handler. - | mov L:RB->top, RD - | mov CFUNC:RD, [BASE-16] - | cleartp CFUNC:RD - | cmp RA, L:RB->maxstack - | ja >5 // Need to grow stack. - | mov CARG1, L:RB - | call aword CFUNC:RD->f // (lua_State *L) - | mov BASE, L:RB->base - | // Either throws an error, or recovers and returns -1, 0 or nresults+1. - | test RDd, RDd; jg ->fff_res // Returned nresults+1? - |1: - | mov RA, L:RB->top - | sub RA, BASE - | shr RAd, 3 - | test RDd, RDd - | lea NARGS:RDd, [RAd+1] - | mov LFUNC:RB, [BASE-16] - | jne ->vm_call_tail // Returned -1? - | cleartp LFUNC:RB - | ins_callt // Returned 0: retry fast path. - | - |// Reconstruct previous base for vmeta_call during tailcall. - |->vm_call_tail: - | mov RA, BASE - | test PCd, FRAME_TYPE - | jnz >3 - | movzx RBd, PC_RA - | neg RB - | lea BASE, [BASE+RB*8-16] // base = base - (RB+2)*8 - | jmp ->vm_call_dispatch // Resolve again for tailcall. - |3: - | mov RB, PC - | and RB, -8 - | sub BASE, RB - | jmp ->vm_call_dispatch // Resolve again for tailcall. - | - |5: // Grow stack for fallback handler. - | mov CARG2d, LUA_MINSTACK - | mov CARG1, L:RB - | call extern lj_state_growstack // (lua_State *L, int n) - | mov BASE, L:RB->base - | xor RDd, RDd // Simulate a return 0. - | jmp <1 // Dumb retry (goes through ff first). - | - |->fff_gcstep: // Call GC step function. - | // BASE = new base, RD = nargs+1 - | pop RB // Must keep stack at same level. - | mov TMP1, RB // Save return address - | mov L:RB, SAVE_L - | mov SAVE_PC, PC // Redundant (but a defined value). - | mov L:RB->base, BASE - | lea RD, [BASE+NARGS:RD*8-8] - | mov CARG1, L:RB - | mov L:RB->top, RD - | call extern lj_gc_step // (lua_State *L) - | mov BASE, L:RB->base - | mov RD, L:RB->top - | sub RD, BASE - | shr RDd, 3 - | add NARGS:RDd, 1 - | mov RB, TMP1 - | push RB // Restore return address. - | ret - | - |//----------------------------------------------------------------------- - |//-- Special dispatch targets ------------------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_record: // Dispatch target for recording phase. - |.if JIT - | movzx RDd, byte [DISPATCH+DISPATCH_GL(hookmask)] - | test RDL, HOOK_VMEVENT // No recording while in vmevent. - | jnz >5 - | // Decrement the hookcount for consistency, but always do the call. - | test RDL, HOOK_ACTIVE - | jnz >1 - | test RDL, LUA_MASKLINE|LUA_MASKCOUNT - | jz >1 - | dec dword [DISPATCH+DISPATCH_GL(hookcount)] - | jmp >1 - |.endif - | - |->vm_rethook: // Dispatch target for return hooks. - | movzx RDd, byte [DISPATCH+DISPATCH_GL(hookmask)] - | test RDL, HOOK_ACTIVE // Hook already active? - | jnz >5 - | jmp >1 - | - |->vm_inshook: // Dispatch target for instr/line hooks. - | movzx RDd, byte [DISPATCH+DISPATCH_GL(hookmask)] - | test RDL, HOOK_ACTIVE // Hook already active? - | jnz >5 - | - | test RDL, LUA_MASKLINE|LUA_MASKCOUNT - | jz >5 - | dec dword [DISPATCH+DISPATCH_GL(hookcount)] - | jz >1 - | test RDL, LUA_MASKLINE - | jz >5 - |1: - | mov L:RB, SAVE_L - | mov L:RB->base, BASE - | mov CARG2, PC // Caveat: CARG2 == BASE - | mov CARG1, L:RB - | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC. - | call extern lj_dispatch_ins // (lua_State *L, const BCIns *pc) - |3: - | mov BASE, L:RB->base - |4: - | movzx RAd, PC_RA - |5: - | movzx OP, PC_OP - | movzx RDd, PC_RD - | jmp aword [DISPATCH+OP*8+GG_DISP2STATIC] // Re-dispatch to static ins. - | - |->cont_hook: // Continue from hook yield. - | add PC, 4 - | mov RA, [RB-40] - | mov MULTRES, RAd // Restore MULTRES for *M ins. - | jmp <4 - | - |->vm_hotloop: // Hot loop counter underflow. - |.if JIT - | mov LFUNC:RB, [BASE-16] // Same as curr_topL(L). - | cleartp LFUNC:RB - | mov RB, LFUNC:RB->pc - | movzx RDd, byte [RB+PC2PROTO(framesize)] - | lea RD, [BASE+RD*8] - | mov L:RB, SAVE_L - | mov L:RB->base, BASE - | mov L:RB->top, RD - | mov CARG2, PC - | lea CARG1, [DISPATCH+GG_DISP2J] - | mov aword [DISPATCH+DISPATCH_J(L)], L:RB - | mov SAVE_PC, PC - | call extern lj_trace_hot // (jit_State *J, const BCIns *pc) - | jmp <3 - |.endif - | - |->vm_callhook: // Dispatch target for call hooks. - | mov SAVE_PC, PC - |.if JIT - | jmp >1 - |.endif - | - |->vm_hotcall: // Hot call counter underflow. - |.if JIT - | mov SAVE_PC, PC - | or PC, 1 // Marker for hot call. - |1: - |.endif - | lea RD, [BASE+NARGS:RD*8-8] - | mov L:RB, SAVE_L - | mov L:RB->base, BASE - | mov L:RB->top, RD - | mov CARG2, PC - | mov CARG1, L:RB - | call extern lj_dispatch_call // (lua_State *L, const BCIns *pc) - | // ASMFunction returned in eax/rax (RD). - | mov SAVE_PC, 0 // Invalidate for subsequent line hook. - |.if JIT - | and PC, -2 - |.endif - | mov BASE, L:RB->base - | mov RA, RD - | mov RD, L:RB->top - | sub RD, BASE - | mov RB, RA - | movzx RAd, PC_RA - | shr RDd, 3 - | add NARGS:RDd, 1 - | jmp RB - | - |->cont_stitch: // Trace stitching. - |.if JIT - | // BASE = base, RC = result, RB = mbase - | mov ITYPEd, [RB-24] // Save previous trace number. - | mov TMPRd, MULTRES - | movzx RAd, PC_RA - | lea RA, [BASE+RA*8] // Call base. - | sub TMPRd, 1 - | jz >2 - |1: // Move results down. - | mov RB, [RC] - | mov [RA], RB - | add RC, 8 - | add RA, 8 - | sub TMPRd, 1 - | jnz <1 - |2: - | movzx RCd, PC_RA - | movzx RBd, PC_RB - | add RC, RB - | lea RC, [BASE+RC*8-8] - |3: - | cmp RC, RA - | ja >9 // More results wanted? - | - | mov RA, [DISPATCH+DISPATCH_J(trace)] - | mov TRACE:RD, [RA+ITYPE*8] - | test TRACE:RD, TRACE:RD - | jz ->cont_nop - | movzx RDd, word TRACE:RD->link - | cmp RDd, RBd - | je ->cont_nop // Blacklisted. - | test RDd, RDd - | jne =>BC_JLOOP // Jump to stitched trace. - | - | // Stitch a new trace to the previous trace. - | mov [DISPATCH+DISPATCH_J(exitno)], RB - | mov L:RB, SAVE_L - | mov L:RB->base, BASE - | mov CARG2, PC - | lea CARG1, [DISPATCH+GG_DISP2J] - | mov aword [DISPATCH+DISPATCH_J(L)], L:RB - | call extern lj_dispatch_stitch // (jit_State *J, const BCIns *pc) - | mov BASE, L:RB->base - | jmp ->cont_nop - | - |9: // Fill up results with nil. - | mov aword [RA], LJ_TNIL - | add RA, 8 - | jmp <3 - |.endif - | - |->vm_profhook: // Dispatch target for profiler hook. -#if LJ_HASPROFILE - | mov L:RB, SAVE_L - | mov L:RB->base, BASE - | mov CARG2, PC // Caveat: CARG2 == BASE - | mov CARG1, L:RB - | call extern lj_dispatch_profile // (lua_State *L, const BCIns *pc) - | mov BASE, L:RB->base - | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction. - | sub PC, 4 - | jmp ->cont_nop -#endif - | - |//----------------------------------------------------------------------- - |//-- Trace exit handler ------------------------------------------------- - |//----------------------------------------------------------------------- - | - |// Called from an exit stub with the exit number on the stack. - |// The 16 bit exit number is stored with two (sign-extended) push imm8. - |->vm_exit_handler: - |.if JIT - | push r13; push r12 - | push r11; push r10; push r9; push r8 - | push rdi; push rsi; push rbp; lea rbp, [rsp+88]; push rbp - | push rbx; push rdx; push rcx; push rax - | movzx RCd, byte [rbp-8] // Reconstruct exit number. - | mov RCH, byte [rbp-16] - | mov [rbp-8], r15; mov [rbp-16], r14 - | // Caveat: DISPATCH is rbx. - | mov DISPATCH, [ebp] - | mov RA, [DISPATCH+DISPATCH_GL(vmstate)] // Get trace number. - | set_vmstate EXIT - | mov [DISPATCH+DISPATCH_J(exitno)], RC - | mov [DISPATCH+DISPATCH_J(parent)], RA - |.if X64WIN - | sub rsp, 16*8+4*8 // Room for SSE regs + save area. - |.else - | sub rsp, 16*8 // Room for SSE regs. - |.endif - | add rbp, -128 - | movsd qword [rbp-8], xmm15; movsd qword [rbp-16], xmm14 - | movsd qword [rbp-24], xmm13; movsd qword [rbp-32], xmm12 - | movsd qword [rbp-40], xmm11; movsd qword [rbp-48], xmm10 - | movsd qword [rbp-56], xmm9; movsd qword [rbp-64], xmm8 - | movsd qword [rbp-72], xmm7; movsd qword [rbp-80], xmm6 - | movsd qword [rbp-88], xmm5; movsd qword [rbp-96], xmm4 - | movsd qword [rbp-104], xmm3; movsd qword [rbp-112], xmm2 - | movsd qword [rbp-120], xmm1; movsd qword [rbp-128], xmm0 - | // Caveat: RB is rbp. - | mov L:RB, [DISPATCH+DISPATCH_GL(cur_L)] - | mov BASE, [DISPATCH+DISPATCH_GL(jit_base)] - | mov aword [DISPATCH+DISPATCH_J(L)], L:RB - | mov L:RB->base, BASE - |.if X64WIN - | lea CARG2, [rsp+4*8] - |.else - | mov CARG2, rsp - |.endif - | lea CARG1, [DISPATCH+GG_DISP2J] - | mov dword [DISPATCH+DISPATCH_GL(jit_base)], 0 - | call extern lj_trace_exit // (jit_State *J, ExitState *ex) - | // MULTRES or negated error code returned in eax (RD). - | mov RA, L:RB->cframe - | and RA, CFRAME_RAWMASK - | mov [RA+CFRAME_OFS_L], L:RB // Set SAVE_L (on-trace resume/yield). - | mov BASE, L:RB->base - | mov PC, [RA+CFRAME_OFS_PC] // Get SAVE_PC. - | jmp >1 - |.endif - |->vm_exit_interp: - | // RD = MULTRES or negated error code, BASE, PC and DISPATCH set. - |.if JIT - | // Restore additional callee-save registers only used in compiled code. - |.if X64WIN - | lea RA, [rsp+10*16+4*8] - |1: - | movdqa xmm15, [RA-10*16] - | movdqa xmm14, [RA-9*16] - | movdqa xmm13, [RA-8*16] - | movdqa xmm12, [RA-7*16] - | movdqa xmm11, [RA-6*16] - | movdqa xmm10, [RA-5*16] - | movdqa xmm9, [RA-4*16] - | movdqa xmm8, [RA-3*16] - | movdqa xmm7, [RA-2*16] - | mov rsp, RA // Reposition stack to C frame. - | movdqa xmm6, [RA-1*16] - | mov r15, CSAVE_1 - | mov r14, CSAVE_2 - | mov r13, CSAVE_3 - | mov r12, CSAVE_4 - |.else - | lea RA, [rsp+16] - |1: - | mov r13, [RA-8] - | mov r12, [RA] - | mov rsp, RA // Reposition stack to C frame. - |.endif - | test RDd, RDd; js >9 // Check for error from exit. - | mov L:RB, SAVE_L - | mov MULTRES, RDd - | mov LFUNC:KBASE, [BASE-16] - | cleartp LFUNC:KBASE - | mov KBASE, LFUNC:KBASE->pc - | mov KBASE, [KBASE+PC2PROTO(k)] - | mov L:RB->base, BASE - | mov dword [DISPATCH+DISPATCH_GL(jit_base)], 0 - | set_vmstate INTERP - | // Modified copy of ins_next which handles function header dispatch, too. - | mov RCd, [PC] - | movzx RAd, RCH - | movzx OP, RCL - | add PC, 4 - | shr RCd, 16 - | cmp OP, BC_FUNCF // Function header? - | jb >3 - | cmp OP, BC_FUNCC+2 // Fast function? - | jae >4 - |2: - | mov RCd, MULTRES // RC/RD holds nres+1. - |3: - | jmp aword [DISPATCH+OP*8] - | - |4: // Check frame below fast function. - | mov RC, [BASE-8] - | test RCd, FRAME_TYPE - | jnz <2 // Trace stitching continuation? - | // Otherwise set KBASE for Lua function below fast function. - | movzx RCd, byte [RC-3] - | neg RC - | mov LFUNC:KBASE, [BASE+RC*8-24] - | cleartp LFUNC:KBASE - | mov KBASE, LFUNC:KBASE->pc - | mov KBASE, [KBASE+PC2PROTO(k)] - | jmp <2 - | - |9: // Rethrow error from the right C frame. - | neg RD - | mov CARG1, L:RB - | mov CARG2, RD - | call extern lj_err_throw // (lua_State *L, int errcode) - |.endif - | - |//----------------------------------------------------------------------- - |//-- Math helper functions ---------------------------------------------- - |//----------------------------------------------------------------------- - | - |// FP value rounding. Called by math.floor/math.ceil fast functions - |// and from JIT code. arg/ret is xmm0. xmm0-xmm3 and RD (eax) modified. - |.macro vm_round, name, mode, cond - |->name: - |->name .. _sse: - | sseconst_abs xmm2, RD - | sseconst_2p52 xmm3, RD - | movaps xmm1, xmm0 - | andpd xmm1, xmm2 // |x| - | ucomisd xmm3, xmm1 // No truncation if 2^52 <= |x|. - | jbe >1 - | andnpd xmm2, xmm0 // Isolate sign bit. - |.if mode == 2 // trunc(x)? - | movaps xmm0, xmm1 - | addsd xmm1, xmm3 // (|x| + 2^52) - 2^52 - | subsd xmm1, xmm3 - | sseconst_1 xmm3, RD - | cmpsd xmm0, xmm1, 1 // |x| < result? - | andpd xmm0, xmm3 - | subsd xmm1, xmm0 // If yes, subtract -1. - | orpd xmm1, xmm2 // Merge sign bit back in. - |.else - | addsd xmm1, xmm3 // (|x| + 2^52) - 2^52 - | subsd xmm1, xmm3 - | orpd xmm1, xmm2 // Merge sign bit back in. - | .if mode == 1 // ceil(x)? - | sseconst_m1 xmm2, RD // Must subtract -1 to preserve -0. - | cmpsd xmm0, xmm1, 6 // x > result? - | .else // floor(x)? - | sseconst_1 xmm2, RD - | cmpsd xmm0, xmm1, 1 // x < result? - | .endif - | andpd xmm0, xmm2 - | subsd xmm1, xmm0 // If yes, subtract +-1. - |.endif - | movaps xmm0, xmm1 - |1: - | ret - |.endmacro - | - | vm_round vm_floor, 0, 1 - | vm_round vm_ceil, 1, JIT - | vm_round vm_trunc, 2, JIT - | - |// FP modulo x%y. Called by BC_MOD* and vm_arith. - |->vm_mod: - |// Args in xmm0/xmm1, return value in xmm0. - |// Caveat: xmm0-xmm5 and RC (eax) modified! - | movaps xmm5, xmm0 - | divsd xmm0, xmm1 - | sseconst_abs xmm2, RD - | sseconst_2p52 xmm3, RD - | movaps xmm4, xmm0 - | andpd xmm4, xmm2 // |x/y| - | ucomisd xmm3, xmm4 // No truncation if 2^52 <= |x/y|. - | jbe >1 - | andnpd xmm2, xmm0 // Isolate sign bit. - | addsd xmm4, xmm3 // (|x/y| + 2^52) - 2^52 - | subsd xmm4, xmm3 - | orpd xmm4, xmm2 // Merge sign bit back in. - | sseconst_1 xmm2, RD - | cmpsd xmm0, xmm4, 1 // x/y < result? - | andpd xmm0, xmm2 - | subsd xmm4, xmm0 // If yes, subtract 1.0. - | movaps xmm0, xmm5 - | mulsd xmm1, xmm4 - | subsd xmm0, xmm1 - | ret - |1: - | mulsd xmm1, xmm0 - | movaps xmm0, xmm5 - | subsd xmm0, xmm1 - | ret - | - |// Args in xmm0/eax. Ret in xmm0. xmm0-xmm1 and eax modified. - |->vm_powi_sse: - | cmp eax, 1; jle >6 // i<=1? - | // Now 1 < (unsigned)i <= 0x80000000. - |1: // Handle leading zeros. - | test eax, 1; jnz >2 - | mulsd xmm0, xmm0 - | shr eax, 1 - | jmp <1 - |2: - | shr eax, 1; jz >5 - | movaps xmm1, xmm0 - |3: // Handle trailing bits. - | mulsd xmm0, xmm0 - | shr eax, 1; jz >4 - | jnc <3 - | mulsd xmm1, xmm0 - | jmp <3 - |4: - | mulsd xmm0, xmm1 - |5: - | ret - |6: - | je <5 // x^1 ==> x - | jb >7 // x^0 ==> 1 - | neg eax - | call <1 - | sseconst_1 xmm1, RD - | divsd xmm1, xmm0 - | movaps xmm0, xmm1 - | ret - |7: - | sseconst_1 xmm0, RD - | ret - | - |//----------------------------------------------------------------------- - |//-- Miscellaneous functions -------------------------------------------- - |//----------------------------------------------------------------------- - | - |// int lj_vm_cpuid(uint32_t f, uint32_t res[4]) - |->vm_cpuid: - | mov eax, CARG1d - | .if X64WIN; push rsi; mov rsi, CARG2; .endif - | push rbx - | cpuid - | mov [rsi], eax - | mov [rsi+4], ebx - | mov [rsi+8], ecx - | mov [rsi+12], edx - | pop rbx - | .if X64WIN; pop rsi; .endif - | ret - | - |//----------------------------------------------------------------------- - |//-- Assertions --------------------------------------------------------- - |//----------------------------------------------------------------------- - | - |->assert_bad_for_arg_type: -#ifdef LUA_USE_ASSERT - | int3 -#endif - | int3 - | - |//----------------------------------------------------------------------- - |//-- FFI helper functions ----------------------------------------------- - |//----------------------------------------------------------------------- - | - |// Handler for callback functions. Callback slot number in ah/al. - |->vm_ffi_callback: - |.if FFI - |.type CTSTATE, CTState, PC - | saveregs_ // ebp/rbp already saved. ebp now holds global_State *. - | lea DISPATCH, [ebp+GG_G2DISP] - | mov CTSTATE, GL:ebp->ctype_state - | movzx eax, ax - | mov CTSTATE->cb.slot, eax - | mov CTSTATE->cb.gpr[0], CARG1 - | mov CTSTATE->cb.gpr[1], CARG2 - | mov CTSTATE->cb.gpr[2], CARG3 - | mov CTSTATE->cb.gpr[3], CARG4 - | movsd qword CTSTATE->cb.fpr[0], xmm0 - | movsd qword CTSTATE->cb.fpr[1], xmm1 - | movsd qword CTSTATE->cb.fpr[2], xmm2 - | movsd qword CTSTATE->cb.fpr[3], xmm3 - |.if X64WIN - | lea rax, [rsp+CFRAME_SIZE+4*8] - |.else - | lea rax, [rsp+CFRAME_SIZE] - | mov CTSTATE->cb.gpr[4], CARG5 - | mov CTSTATE->cb.gpr[5], CARG6 - | movsd qword CTSTATE->cb.fpr[4], xmm4 - | movsd qword CTSTATE->cb.fpr[5], xmm5 - | movsd qword CTSTATE->cb.fpr[6], xmm6 - | movsd qword CTSTATE->cb.fpr[7], xmm7 - |.endif - | mov CTSTATE->cb.stack, rax - | mov CARG2, rsp - | mov SAVE_PC, CTSTATE // Any value outside of bytecode is ok. - | mov CARG1, CTSTATE - | call extern lj_ccallback_enter // (CTState *cts, void *cf) - | // lua_State * returned in eax (RD). - | set_vmstate INTERP - | mov BASE, L:RD->base - | mov RD, L:RD->top - | sub RD, BASE - | mov LFUNC:RB, [BASE-16] - | cleartp LFUNC:RB - | shr RD, 3 - | add RD, 1 - | ins_callt - |.endif - | - |->cont_ffi_callback: // Return from FFI callback. - |.if FFI - | mov L:RA, SAVE_L - | mov CTSTATE, [DISPATCH+DISPATCH_GL(ctype_state)] - | mov aword CTSTATE->L, L:RA - | mov L:RA->base, BASE - | mov L:RA->top, RB - | mov CARG1, CTSTATE - | mov CARG2, RC - | call extern lj_ccallback_leave // (CTState *cts, TValue *o) - | mov rax, CTSTATE->cb.gpr[0] - | movsd xmm0, qword CTSTATE->cb.fpr[0] - | jmp ->vm_leave_unw - |.endif - | - |->vm_ffi_call: // Call C function via FFI. - | // Caveat: needs special frame unwinding, see below. - |.if FFI - | .type CCSTATE, CCallState, rbx - | push rbp; mov rbp, rsp; push rbx; mov CCSTATE, CARG1 - | - | // Readjust stack. - | mov eax, CCSTATE->spadj - | sub rsp, rax - | - | // Copy stack slots. - | movzx ecx, byte CCSTATE->nsp - | sub ecx, 1 - | js >2 - |1: - | mov rax, [CCSTATE+rcx*8+offsetof(CCallState, stack)] - | mov [rsp+rcx*8+CCALL_SPS_EXTRA*8], rax - | sub ecx, 1 - | jns <1 - |2: - | - | movzx eax, byte CCSTATE->nfpr - | mov CARG1, CCSTATE->gpr[0] - | mov CARG2, CCSTATE->gpr[1] - | mov CARG3, CCSTATE->gpr[2] - | mov CARG4, CCSTATE->gpr[3] - |.if not X64WIN - | mov CARG5, CCSTATE->gpr[4] - | mov CARG6, CCSTATE->gpr[5] - |.endif - | test eax, eax; jz >5 - | movaps xmm0, CCSTATE->fpr[0] - | movaps xmm1, CCSTATE->fpr[1] - | movaps xmm2, CCSTATE->fpr[2] - | movaps xmm3, CCSTATE->fpr[3] - |.if not X64WIN - | cmp eax, 4; jbe >5 - | movaps xmm4, CCSTATE->fpr[4] - | movaps xmm5, CCSTATE->fpr[5] - | movaps xmm6, CCSTATE->fpr[6] - | movaps xmm7, CCSTATE->fpr[7] - |.endif - |5: - | - | call aword CCSTATE->func - | - | mov CCSTATE->gpr[0], rax - | movaps CCSTATE->fpr[0], xmm0 - |.if not X64WIN - | mov CCSTATE->gpr[1], rdx - | movaps CCSTATE->fpr[1], xmm1 - |.endif - | - | mov rbx, [rbp-8]; leave; ret - |.endif - |// Note: vm_ffi_call must be the last function in this object file! - | - |//----------------------------------------------------------------------- -} - -/* Generate the code for a single instruction. */ -static void build_ins(BuildCtx *ctx, BCOp op, int defop) -{ - int vk = 0; - |// Note: aligning all instructions does not pay off. - |=>defop: - - switch (op) { - - /* -- Comparison ops ---------------------------------------------------- */ - - /* Remember: all ops branch for a true comparison, fall through otherwise. */ - - |.macro jmp_comp, lt, ge, le, gt, target - ||switch (op) { - ||case BC_ISLT: - | lt target - ||break; - ||case BC_ISGE: - | ge target - ||break; - ||case BC_ISLE: - | le target - ||break; - ||case BC_ISGT: - | gt target - ||break; - ||default: break; /* Shut up GCC. */ - ||} - |.endmacro - - case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: - | // RA = src1, RD = src2, JMP with RD = target - | ins_AD - | mov ITYPE, [BASE+RA*8] - | mov RB, [BASE+RD*8] - | mov RA, ITYPE - | mov RD, RB - | sar ITYPE, 47 - | sar RB, 47 - |.if DUALNUM - | cmp ITYPEd, LJ_TISNUM; jne >7 - | cmp RBd, LJ_TISNUM; jne >8 - | add PC, 4 - | cmp RAd, RDd - | jmp_comp jge, jl, jg, jle, >9 - |6: - | movzx RDd, PC_RD - | branchPC RD - |9: - | ins_next - | - |7: // RA is not an integer. - | ja ->vmeta_comp - | // RA is a number. - | cmp RBd, LJ_TISNUM; jb >1; jne ->vmeta_comp - | // RA is a number, RD is an integer. - | cvtsi2sd xmm0, RDd - | jmp >2 - | - |8: // RA is an integer, RD is not an integer. - | ja ->vmeta_comp - | // RA is an integer, RD is a number. - | cvtsi2sd xmm1, RAd - | movd xmm0, RD - | jmp >3 - |.else - | cmp ITYPEd, LJ_TISNUM; jae ->vmeta_comp - | cmp RBd, LJ_TISNUM; jae ->vmeta_comp - |.endif - |1: - | movd xmm0, RD - |2: - | movd xmm1, RA - |3: - | add PC, 4 - | ucomisd xmm0, xmm1 - | // Unordered: all of ZF CF PF set, ordered: PF clear. - | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't. - |.if DUALNUM - | jmp_comp jbe, ja, jb, jae, <9 - | jmp <6 - |.else - | jmp_comp jbe, ja, jb, jae, >1 - | movzx RDd, PC_RD - | branchPC RD - |1: - | ins_next - |.endif - break; - - case BC_ISEQV: case BC_ISNEV: - vk = op == BC_ISEQV; - | ins_AD // RA = src1, RD = src2, JMP with RD = target - | mov RB, [BASE+RD*8] - | mov ITYPE, [BASE+RA*8] - | add PC, 4 - | mov RD, RB - | mov RA, ITYPE - | sar RB, 47 - | sar ITYPE, 47 - |.if DUALNUM - | cmp RBd, LJ_TISNUM; jne >7 - | cmp ITYPEd, LJ_TISNUM; jne >8 - | cmp RDd, RAd - if (vk) { - | jne >9 - } else { - | je >9 - } - | movzx RDd, PC_RD - | branchPC RD - |9: - | ins_next - | - |7: // RD is not an integer. - | ja >5 - | // RD is a number. - | movd xmm1, RD - | cmp ITYPEd, LJ_TISNUM; jb >1; jne >5 - | // RD is a number, RA is an integer. - | cvtsi2sd xmm0, RAd - | jmp >2 - | - |8: // RD is an integer, RA is not an integer. - | ja >5 - | // RD is an integer, RA is a number. - | cvtsi2sd xmm1, RDd - | jmp >1 - | - |.else - | cmp RBd, LJ_TISNUM; jae >5 - | cmp ITYPEd, LJ_TISNUM; jae >5 - | movd xmm1, RD - |.endif - |1: - | movd xmm0, RA - |2: - | ucomisd xmm0, xmm1 - |4: - iseqne_fp: - if (vk) { - | jp >2 // Unordered means not equal. - | jne >2 - } else { - | jp >2 // Unordered means not equal. - | je >1 - } - iseqne_end: - if (vk) { - |1: // EQ: Branch to the target. - | movzx RDd, PC_RD - | branchPC RD - |2: // NE: Fallthrough to next instruction. - |.if not FFI - |3: - |.endif - } else { - |.if not FFI - |3: - |.endif - |2: // NE: Branch to the target. - | movzx RDd, PC_RD - | branchPC RD - |1: // EQ: Fallthrough to next instruction. - } - if (LJ_DUALNUM && (op == BC_ISEQV || op == BC_ISNEV || - op == BC_ISEQN || op == BC_ISNEN)) { - | jmp <9 - } else { - | ins_next - } - | - if (op == BC_ISEQV || op == BC_ISNEV) { - |5: // Either or both types are not numbers. - |.if FFI - | cmp RBd, LJ_TCDATA; je ->vmeta_equal_cd - | cmp ITYPEd, LJ_TCDATA; je ->vmeta_equal_cd - |.endif - | cmp RA, RD - | je <1 // Same GCobjs or pvalues? - | cmp RBd, ITYPEd - | jne <2 // Not the same type? - | cmp RBd, LJ_TISTABUD - | ja <2 // Different objects and not table/ud? - | - | // Different tables or userdatas. Need to check __eq metamethod. - | // Field metatable must be at same offset for GCtab and GCudata! - | cleartp TAB:RA - | mov TAB:RB, TAB:RA->metatable - | test TAB:RB, TAB:RB - | jz <2 // No metatable? - | test byte TAB:RB->nomm, 1<vmeta_equal // Handle __eq metamethod. - } else { - |.if FFI - |3: - | cmp ITYPEd, LJ_TCDATA - if (LJ_DUALNUM && vk) { - | jne <9 - } else { - | jne <2 - } - | jmp ->vmeta_equal_cd - |.endif - } - break; - case BC_ISEQS: case BC_ISNES: - vk = op == BC_ISEQS; - | ins_AND // RA = src, RD = str const, JMP with RD = target - | mov RB, [BASE+RA*8] - | add PC, 4 - | checkstr RB, >3 - | cmp RB, [KBASE+RD*8] - iseqne_test: - if (vk) { - | jne >2 - } else { - | je >1 - } - goto iseqne_end; - case BC_ISEQN: case BC_ISNEN: - vk = op == BC_ISEQN; - | ins_AD // RA = src, RD = num const, JMP with RD = target - | mov RB, [BASE+RA*8] - | add PC, 4 - |.if DUALNUM - | checkint RB, >7 - | mov RD, [KBASE+RD*8] - | checkint RD, >8 - | cmp RBd, RDd - if (vk) { - | jne >9 - } else { - | je >9 - } - | movzx RDd, PC_RD - | branchPC RD - |9: - | ins_next - | - |7: // RA is not an integer. - | ja >3 - | // RA is a number. - | mov RD, [KBASE+RD*8] - | checkint RD, >1 - | // RA is a number, RD is an integer. - | cvtsi2sd xmm0, RDd - | jmp >2 - | - |8: // RA is an integer, RD is a number. - | cvtsi2sd xmm0, RBd - | movd xmm1, RD - | ucomisd xmm0, xmm1 - | jmp >4 - |1: - | movd xmm0, RD - |.else - | checknum RB, >3 - |1: - | movsd xmm0, qword [KBASE+RD*8] - |.endif - |2: - | ucomisd xmm0, qword [BASE+RA*8] - |4: - goto iseqne_fp; - case BC_ISEQP: case BC_ISNEP: - vk = op == BC_ISEQP; - | ins_AND // RA = src, RD = primitive type (~), JMP with RD = target - | mov RB, [BASE+RA*8] - | sar RB, 47 - | add PC, 4 - | cmp RBd, RDd - if (!LJ_HASFFI) goto iseqne_test; - if (vk) { - | jne >3 - | movzx RDd, PC_RD - | branchPC RD - |2: - | ins_next - |3: - | cmp RBd, LJ_TCDATA; jne <2 - | jmp ->vmeta_equal_cd - } else { - | je >2 - | cmp RBd, LJ_TCDATA; je ->vmeta_equal_cd - | movzx RDd, PC_RD - | branchPC RD - |2: - | ins_next - } - break; - - /* -- Unary test and copy ops ------------------------------------------- */ - - case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF: - | ins_AD // RA = dst or unused, RD = src, JMP with RD = target - | mov ITYPE, [BASE+RD*8] - | add PC, 4 - if (op == BC_ISTC || op == BC_ISFC) { - | mov RB, ITYPE - } - | sar ITYPE, 47 - | cmp ITYPEd, LJ_TISTRUECOND - if (op == BC_IST || op == BC_ISTC) { - | jae >1 - } else { - | jb >1 - } - if (op == BC_ISTC || op == BC_ISFC) { - | mov [BASE+RA*8], RB - } - | movzx RDd, PC_RD - | branchPC RD - |1: // Fallthrough to the next instruction. - | ins_next - break; - - case BC_ISTYPE: - | ins_AD // RA = src, RD = -type - | mov RB, [BASE+RA*8] - | sar RB, 47 - | add RBd, RDd - | jne ->vmeta_istype - | ins_next - break; - case BC_ISNUM: - | ins_AD // RA = src, RD = -(TISNUM-1) - | checknumtp [BASE+RA*8], ->vmeta_istype - | ins_next - break; - - /* -- Unary ops --------------------------------------------------------- */ - - case BC_MOV: - | ins_AD // RA = dst, RD = src - | mov RB, [BASE+RD*8] - | mov [BASE+RA*8], RB - | ins_next_ - break; - case BC_NOT: - | ins_AD // RA = dst, RD = src - | mov RB, [BASE+RD*8] - | sar RB, 47 - | mov RCd, 2 - | cmp RB, LJ_TISTRUECOND - | sbb RCd, 0 - | shl RC, 47 - | not RC - | mov [BASE+RA*8], RC - | ins_next - break; - case BC_UNM: - | ins_AD // RA = dst, RD = src - | mov RB, [BASE+RD*8] - |.if DUALNUM - | checkint RB, >5 - | neg RBd - | jo >4 - | setint RB - |9: - | mov [BASE+RA*8], RB - | ins_next - |4: - | mov64 RB, U64x(41e00000,00000000) // 2^31. - | jmp <9 - |5: - | ja ->vmeta_unm - |.else - | checknum RB, ->vmeta_unm - |.endif - | mov64 RD, U64x(80000000,00000000) - | xor RB, RD - |.if DUALNUM - | jmp <9 - |.else - | mov [BASE+RA*8], RB - | ins_next - |.endif - break; - case BC_LEN: - | ins_AD // RA = dst, RD = src - | mov RD, [BASE+RD*8] - | checkstr RD, >2 - |.if DUALNUM - | mov RDd, dword STR:RD->len - |1: - | setint RD - | mov [BASE+RA*8], RD - |.else - | xorps xmm0, xmm0 - | cvtsi2sd xmm0, dword STR:RD->len - |1: - | movsd qword [BASE+RA*8], xmm0 - |.endif - | ins_next - |2: - | cmp ITYPEd, LJ_TTAB; jne ->vmeta_len - | mov TAB:CARG1, TAB:RD -#if LJ_52 - | mov TAB:RB, TAB:RD->metatable - | cmp TAB:RB, 0 - | jnz >9 - |3: -#endif - |->BC_LEN_Z: - | mov RB, BASE // Save BASE. - | call extern lj_tab_len // (GCtab *t) - | // Length of table returned in eax (RD). - |.if DUALNUM - | // Nothing to do. - |.else - | cvtsi2sd xmm0, RDd - |.endif - | mov BASE, RB // Restore BASE. - | movzx RAd, PC_RA - | jmp <1 -#if LJ_52 - |9: // Check for __len. - | test byte TAB:RB->nomm, 1<vmeta_len // 'no __len' flag NOT set: check. -#endif - break; - - /* -- Binary ops -------------------------------------------------------- */ - - |.macro ins_arithpre, sseins, ssereg - | ins_ABC - ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); - ||switch (vk) { - ||case 0: - | checknumtp [BASE+RB*8], ->vmeta_arith_vn - | .if DUALNUM - | checknumtp [KBASE+RC*8], ->vmeta_arith_vn - | .endif - | movsd xmm0, qword [BASE+RB*8] - | sseins ssereg, qword [KBASE+RC*8] - || break; - ||case 1: - | checknumtp [BASE+RB*8], ->vmeta_arith_nv - | .if DUALNUM - | checknumtp [KBASE+RC*8], ->vmeta_arith_nv - | .endif - | movsd xmm0, qword [KBASE+RC*8] - | sseins ssereg, qword [BASE+RB*8] - || break; - ||default: - | checknumtp [BASE+RB*8], ->vmeta_arith_vv - | checknumtp [BASE+RC*8], ->vmeta_arith_vv - | movsd xmm0, qword [BASE+RB*8] - | sseins ssereg, qword [BASE+RC*8] - || break; - ||} - |.endmacro - | - |.macro ins_arithdn, intins - | ins_ABC - ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); - ||switch (vk) { - ||case 0: - | mov RB, [BASE+RB*8] - | mov RC, [KBASE+RC*8] - | checkint RB, ->vmeta_arith_vno - | checkint RC, ->vmeta_arith_vno - | intins RBd, RCd; jo ->vmeta_arith_vno - || break; - ||case 1: - | mov RB, [BASE+RB*8] - | mov RC, [KBASE+RC*8] - | checkint RB, ->vmeta_arith_nvo - | checkint RC, ->vmeta_arith_nvo - | intins RCd, RBd; jo ->vmeta_arith_nvo - || break; - ||default: - | mov RB, [BASE+RB*8] - | mov RC, [BASE+RC*8] - | checkint RB, ->vmeta_arith_vvo - | checkint RC, ->vmeta_arith_vvo - | intins RBd, RCd; jo ->vmeta_arith_vvo - || break; - ||} - ||if (vk == 1) { - | setint RC - | mov [BASE+RA*8], RC - ||} else { - | setint RB - | mov [BASE+RA*8], RB - ||} - | ins_next - |.endmacro - | - |.macro ins_arithpost - | movsd qword [BASE+RA*8], xmm0 - |.endmacro - | - |.macro ins_arith, sseins - | ins_arithpre sseins, xmm0 - | ins_arithpost - | ins_next - |.endmacro - | - |.macro ins_arith, intins, sseins - |.if DUALNUM - | ins_arithdn intins - |.else - | ins_arith, sseins - |.endif - |.endmacro - - | // RA = dst, RB = src1 or num const, RC = src2 or num const - case BC_ADDVN: case BC_ADDNV: case BC_ADDVV: - | ins_arith add, addsd - break; - case BC_SUBVN: case BC_SUBNV: case BC_SUBVV: - | ins_arith sub, subsd - break; - case BC_MULVN: case BC_MULNV: case BC_MULVV: - | ins_arith imul, mulsd - break; - case BC_DIVVN: case BC_DIVNV: case BC_DIVVV: - | ins_arith divsd - break; - case BC_MODVN: - | ins_arithpre movsd, xmm1 - |->BC_MODVN_Z: - | call ->vm_mod - | ins_arithpost - | ins_next - break; - case BC_MODNV: case BC_MODVV: - | ins_arithpre movsd, xmm1 - | jmp ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway. - break; - case BC_POW: - | ins_arithpre movsd, xmm1 - | mov RB, BASE - | call extern pow - | movzx RAd, PC_RA - | mov BASE, RB - | ins_arithpost - | ins_next - break; - - case BC_CAT: - | ins_ABC // RA = dst, RB = src_start, RC = src_end - | mov L:CARG1, SAVE_L - | mov L:CARG1->base, BASE - | lea CARG2, [BASE+RC*8] - | mov CARG3d, RCd - | sub CARG3d, RBd - |->BC_CAT_Z: - | mov L:RB, L:CARG1 - | mov SAVE_PC, PC - | call extern lj_meta_cat // (lua_State *L, TValue *top, int left) - | // NULL (finished) or TValue * (metamethod) returned in eax (RC). - | mov BASE, L:RB->base - | test RC, RC - | jnz ->vmeta_binop - | movzx RBd, PC_RB // Copy result to Stk[RA] from Stk[RB]. - | movzx RAd, PC_RA - | mov RC, [BASE+RB*8] - | mov [BASE+RA*8], RC - | ins_next - break; - - /* -- Constant ops ------------------------------------------------------ */ - - case BC_KSTR: - | ins_AND // RA = dst, RD = str const (~) - | mov RD, [KBASE+RD*8] - | settp RD, LJ_TSTR - | mov [BASE+RA*8], RD - | ins_next - break; - case BC_KCDATA: - |.if FFI - | ins_AND // RA = dst, RD = cdata const (~) - | mov RD, [KBASE+RD*8] - | settp RD, LJ_TCDATA - | mov [BASE+RA*8], RD - | ins_next - |.endif - break; - case BC_KSHORT: - | ins_AD // RA = dst, RD = signed int16 literal - |.if DUALNUM - | movsx RDd, RDW - | setint RD - | mov [BASE+RA*8], RD - |.else - | movsx RDd, RDW // Sign-extend literal. - | cvtsi2sd xmm0, RDd - | movsd qword [BASE+RA*8], xmm0 - |.endif - | ins_next - break; - case BC_KNUM: - | ins_AD // RA = dst, RD = num const - | movsd xmm0, qword [KBASE+RD*8] - | movsd qword [BASE+RA*8], xmm0 - | ins_next - break; - case BC_KPRI: - | ins_AD // RA = dst, RD = primitive type (~) - | shl RD, 47 - | not RD - | mov [BASE+RA*8], RD - | ins_next - break; - case BC_KNIL: - | ins_AD // RA = dst_start, RD = dst_end - | lea RA, [BASE+RA*8+8] - | lea RD, [BASE+RD*8] - | mov RB, LJ_TNIL - | mov [RA-8], RB // Sets minimum 2 slots. - |1: - | mov [RA], RB - | add RA, 8 - | cmp RA, RD - | jbe <1 - | ins_next - break; - - /* -- Upvalue and function ops ------------------------------------------ */ - - case BC_UGET: - | ins_AD // RA = dst, RD = upvalue # - | mov LFUNC:RB, [BASE-16] - | cleartp LFUNC:RB - | mov UPVAL:RB, [LFUNC:RB+RD*8+offsetof(GCfuncL, uvptr)] - | mov RB, UPVAL:RB->v - | mov RD, [RB] - | mov [BASE+RA*8], RD - | ins_next - break; - case BC_USETV: -#define TV2MARKOFS \ - ((int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)) - | ins_AD // RA = upvalue #, RD = src - | mov LFUNC:RB, [BASE-16] - | cleartp LFUNC:RB - | mov UPVAL:RB, [LFUNC:RB+RA*8+offsetof(GCfuncL, uvptr)] - | cmp byte UPVAL:RB->closed, 0 - | mov RB, UPVAL:RB->v - | mov RA, [BASE+RD*8] - | mov [RB], RA - | jz >1 - | // Check barrier for closed upvalue. - | test byte [RB+TV2MARKOFS], LJ_GC_BLACK // isblack(uv) - | jnz >2 - |1: - | ins_next - | - |2: // Upvalue is black. Check if new value is collectable and white. - | mov RD, RA - | sar RD, 47 - | sub RDd, LJ_TISGCV - | cmp RDd, LJ_TNUMX - LJ_TISGCV // tvisgcv(v) - | jbe <1 - | cleartp GCOBJ:RA - | test byte GCOBJ:RA->gch.marked, LJ_GC_WHITES // iswhite(v) - | jz <1 - | // Crossed a write barrier. Move the barrier forward. - |.if not X64WIN - | mov CARG2, RB - | mov RB, BASE // Save BASE. - |.else - | xchg CARG2, RB // Save BASE (CARG2 == BASE). - |.endif - | lea GL:CARG1, [DISPATCH+GG_DISP2G] - | call extern lj_gc_barrieruv // (global_State *g, TValue *tv) - | mov BASE, RB // Restore BASE. - | jmp <1 - break; -#undef TV2MARKOFS - case BC_USETS: - | ins_AND // RA = upvalue #, RD = str const (~) - | mov LFUNC:RB, [BASE-16] - | cleartp LFUNC:RB - | mov UPVAL:RB, [LFUNC:RB+RA*8+offsetof(GCfuncL, uvptr)] - | mov STR:RA, [KBASE+RD*8] - | mov RD, UPVAL:RB->v - | settp STR:ITYPE, STR:RA, LJ_TSTR - | mov [RD], STR:ITYPE - | test byte UPVAL:RB->marked, LJ_GC_BLACK // isblack(uv) - | jnz >2 - |1: - | ins_next - | - |2: // Check if string is white and ensure upvalue is closed. - | test byte GCOBJ:RA->gch.marked, LJ_GC_WHITES // iswhite(str) - | jz <1 - | cmp byte UPVAL:RB->closed, 0 - | jz <1 - | // Crossed a write barrier. Move the barrier forward. - | mov RB, BASE // Save BASE (CARG2 == BASE). - | mov CARG2, RD - | lea GL:CARG1, [DISPATCH+GG_DISP2G] - | call extern lj_gc_barrieruv // (global_State *g, TValue *tv) - | mov BASE, RB // Restore BASE. - | jmp <1 - break; - case BC_USETN: - | ins_AD // RA = upvalue #, RD = num const - | mov LFUNC:RB, [BASE-16] - | cleartp LFUNC:RB - | movsd xmm0, qword [KBASE+RD*8] - | mov UPVAL:RB, [LFUNC:RB+RA*8+offsetof(GCfuncL, uvptr)] - | mov RA, UPVAL:RB->v - | movsd qword [RA], xmm0 - | ins_next - break; - case BC_USETP: - | ins_AD // RA = upvalue #, RD = primitive type (~) - | mov LFUNC:RB, [BASE-16] - | cleartp LFUNC:RB - | mov UPVAL:RB, [LFUNC:RB+RA*8+offsetof(GCfuncL, uvptr)] - | shl RD, 47 - | not RD - | mov RA, UPVAL:RB->v - | mov [RA], RD - | ins_next - break; - case BC_UCLO: - | ins_AD // RA = level, RD = target - | branchPC RD // Do this first to free RD. - | mov L:RB, SAVE_L - | cmp dword L:RB->openupval, 0 - | je >1 - | mov L:RB->base, BASE - | lea CARG2, [BASE+RA*8] // Caveat: CARG2 == BASE - | mov L:CARG1, L:RB // Caveat: CARG1 == RA - | call extern lj_func_closeuv // (lua_State *L, TValue *level) - | mov BASE, L:RB->base - |1: - | ins_next - break; - - case BC_FNEW: - | ins_AND // RA = dst, RD = proto const (~) (holding function prototype) - | mov L:RB, SAVE_L - | mov L:RB->base, BASE // Caveat: CARG2/CARG3 may be BASE. - | mov CARG3, [BASE-16] - | cleartp CARG3 - | mov CARG2, [KBASE+RD*8] // Fetch GCproto *. - | mov CARG1, L:RB - | mov SAVE_PC, PC - | // (lua_State *L, GCproto *pt, GCfuncL *parent) - | call extern lj_func_newL_gc - | // GCfuncL * returned in eax (RC). - | mov BASE, L:RB->base - | movzx RAd, PC_RA - | settp LFUNC:RC, LJ_TFUNC - | mov [BASE+RA*8], LFUNC:RC - | ins_next - break; - - /* -- Table ops --------------------------------------------------------- */ - - case BC_TNEW: - | ins_AD // RA = dst, RD = hbits|asize - | mov L:RB, SAVE_L - | mov L:RB->base, BASE - | mov RA, [DISPATCH+DISPATCH_GL(gc.total)] - | cmp RA, [DISPATCH+DISPATCH_GL(gc.threshold)] - | mov SAVE_PC, PC - | jae >5 - |1: - | mov CARG3d, RDd - | and RDd, 0x7ff - | shr CARG3d, 11 - | cmp RDd, 0x7ff - | je >3 - |2: - | mov L:CARG1, L:RB - | mov CARG2d, RDd - | call extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits) - | // Table * returned in eax (RC). - | mov BASE, L:RB->base - | movzx RAd, PC_RA - | settp TAB:RC, LJ_TTAB - | mov [BASE+RA*8], TAB:RC - | ins_next - |3: // Turn 0x7ff into 0x801. - | mov RDd, 0x801 - | jmp <2 - |5: - | mov L:CARG1, L:RB - | call extern lj_gc_step_fixtop // (lua_State *L) - | movzx RDd, PC_RD - | jmp <1 - break; - case BC_TDUP: - | ins_AND // RA = dst, RD = table const (~) (holding template table) - | mov L:RB, SAVE_L - | mov RA, [DISPATCH+DISPATCH_GL(gc.total)] - | mov SAVE_PC, PC - | cmp RA, [DISPATCH+DISPATCH_GL(gc.threshold)] - | mov L:RB->base, BASE - | jae >3 - |2: - | mov TAB:CARG2, [KBASE+RD*8] // Caveat: CARG2 == BASE - | mov L:CARG1, L:RB // Caveat: CARG1 == RA - | call extern lj_tab_dup // (lua_State *L, Table *kt) - | // Table * returned in eax (RC). - | mov BASE, L:RB->base - | movzx RAd, PC_RA - | settp TAB:RC, LJ_TTAB - | mov [BASE+RA*8], TAB:RC - | ins_next - |3: - | mov L:CARG1, L:RB - | call extern lj_gc_step_fixtop // (lua_State *L) - | movzx RDd, PC_RD // Need to reload RD. - | not RD - | jmp <2 - break; - - case BC_GGET: - | ins_AND // RA = dst, RD = str const (~) - | mov LFUNC:RB, [BASE-16] - | cleartp LFUNC:RB - | mov TAB:RB, LFUNC:RB->env - | mov STR:RC, [KBASE+RD*8] - | jmp ->BC_TGETS_Z - break; - case BC_GSET: - | ins_AND // RA = src, RD = str const (~) - | mov LFUNC:RB, [BASE-16] - | cleartp LFUNC:RB - | mov TAB:RB, LFUNC:RB->env - | mov STR:RC, [KBASE+RD*8] - | jmp ->BC_TSETS_Z - break; - - case BC_TGETV: - | ins_ABC // RA = dst, RB = table, RC = key - | mov TAB:RB, [BASE+RB*8] - | mov RC, [BASE+RC*8] - | checktab TAB:RB, ->vmeta_tgetv - | - | // Integer key? - |.if DUALNUM - | checkint RC, >5 - |.else - | // Convert number to int and back and compare. - | checknum RC, >5 - | movd xmm0, RC - | cvttsd2si RCd, xmm0 - | cvtsi2sd xmm1, RCd - | ucomisd xmm0, xmm1 - | jne ->vmeta_tgetv // Generic numeric key? Use fallback. - |.endif - | cmp RCd, TAB:RB->asize // Takes care of unordered, too. - | jae ->vmeta_tgetv // Not in array part? Use fallback. - | shl RCd, 3 - | add RC, TAB:RB->array - | // Get array slot. - | mov ITYPE, [RC] - | cmp ITYPE, LJ_TNIL // Avoid overwriting RB in fastpath. - | je >2 - |1: - | mov [BASE+RA*8], ITYPE - | ins_next - | - |2: // Check for __index if table value is nil. - | mov TAB:TMPR, TAB:RB->metatable - | test TAB:TMPR, TAB:TMPR - | jz <1 - | test byte TAB:TMPR->nomm, 1<vmeta_tgetv // 'no __index' flag NOT set: check. - | jmp <1 - | - |5: // String key? - | cmp ITYPEd, LJ_TSTR; jne ->vmeta_tgetv - | cleartp STR:RC - | jmp ->BC_TGETS_Z - break; - case BC_TGETS: - | ins_ABC // RA = dst, RB = table, RC = str const (~) - | mov TAB:RB, [BASE+RB*8] - | not RC - | mov STR:RC, [KBASE+RC*8] - | checktab TAB:RB, ->vmeta_tgets - |->BC_TGETS_Z: // RB = GCtab *, RC = GCstr * - | mov TMPRd, TAB:RB->hmask - | and TMPRd, STR:RC->hash - | imul TMPRd, #NODE - | add NODE:TMPR, TAB:RB->node - | settp ITYPE, STR:RC, LJ_TSTR - |1: - | cmp NODE:TMPR->key, ITYPE - | jne >4 - | // Get node value. - | mov ITYPE, NODE:TMPR->val - | cmp ITYPE, LJ_TNIL - | je >5 // Key found, but nil value? - |2: - | mov [BASE+RA*8], ITYPE - | ins_next - | - |4: // Follow hash chain. - | mov NODE:TMPR, NODE:TMPR->next - | test NODE:TMPR, NODE:TMPR - | jnz <1 - | // End of hash chain: key not found, nil result. - | mov ITYPE, LJ_TNIL - | - |5: // Check for __index if table value is nil. - | mov TAB:TMPR, TAB:RB->metatable - | test TAB:TMPR, TAB:TMPR - | jz <2 // No metatable: done. - | test byte TAB:TMPR->nomm, 1<vmeta_tgets // Caveat: preserve STR:RC. - break; - case BC_TGETB: - | ins_ABC // RA = dst, RB = table, RC = byte literal - | mov TAB:RB, [BASE+RB*8] - | checktab TAB:RB, ->vmeta_tgetb - | cmp RCd, TAB:RB->asize - | jae ->vmeta_tgetb - | shl RCd, 3 - | add RC, TAB:RB->array - | // Get array slot. - | mov ITYPE, [RC] - | cmp ITYPE, LJ_TNIL - | je >2 - |1: - | mov [BASE+RA*8], ITYPE - | ins_next - | - |2: // Check for __index if table value is nil. - | mov TAB:TMPR, TAB:RB->metatable - | test TAB:TMPR, TAB:TMPR - | jz <1 - | test byte TAB:TMPR->nomm, 1<vmeta_tgetb // 'no __index' flag NOT set: check. - | jmp <1 - break; - case BC_TGETR: - | ins_ABC // RA = dst, RB = table, RC = key - | mov TAB:RB, [BASE+RB*8] - | cleartp TAB:RB - |.if DUALNUM - | mov RCd, dword [BASE+RC*8] - |.else - | cvttsd2si RCd, qword [BASE+RC*8] - |.endif - | cmp RCd, TAB:RB->asize - | jae ->vmeta_tgetr // Not in array part? Use fallback. - | shl RCd, 3 - | add RC, TAB:RB->array - | // Get array slot. - |->BC_TGETR_Z: - | mov ITYPE, [RC] - |->BC_TGETR2_Z: - | mov [BASE+RA*8], ITYPE - | ins_next - break; - - case BC_TSETV: - | ins_ABC // RA = src, RB = table, RC = key - | mov TAB:RB, [BASE+RB*8] - | mov RC, [BASE+RC*8] - | checktab TAB:RB, ->vmeta_tsetv - | - | // Integer key? - |.if DUALNUM - | checkint RC, >5 - |.else - | // Convert number to int and back and compare. - | checknum RC, >5 - | movd xmm0, RC - | cvttsd2si RCd, xmm0 - | cvtsi2sd xmm1, RCd - | ucomisd xmm0, xmm1 - | jne ->vmeta_tsetv // Generic numeric key? Use fallback. - |.endif - | cmp RCd, TAB:RB->asize // Takes care of unordered, too. - | jae ->vmeta_tsetv - | shl RCd, 3 - | add RC, TAB:RB->array - | cmp aword [RC], LJ_TNIL - | je >3 // Previous value is nil? - |1: - | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) - | jnz >7 - |2: // Set array slot. - | mov RB, [BASE+RA*8] - | mov [RC], RB - | ins_next - | - |3: // Check for __newindex if previous value is nil. - | mov TAB:TMPR, TAB:RB->metatable - | test TAB:TMPR, TAB:TMPR - | jz <1 - | test byte TAB:TMPR->nomm, 1<vmeta_tsetv // 'no __newindex' flag NOT set: check. - | jmp <1 - | - |5: // String key? - | cmp ITYPEd, LJ_TSTR; jne ->vmeta_tsetv - | cleartp STR:RC - | jmp ->BC_TSETS_Z - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:RB, TMPR - | jmp <2 - break; - case BC_TSETS: - | ins_ABC // RA = src, RB = table, RC = str const (~) - | mov TAB:RB, [BASE+RB*8] - | not RC - | mov STR:RC, [KBASE+RC*8] - | checktab TAB:RB, ->vmeta_tsets - |->BC_TSETS_Z: // RB = GCtab *, RC = GCstr * - | mov TMPRd, TAB:RB->hmask - | and TMPRd, STR:RC->hash - | imul TMPRd, #NODE - | mov byte TAB:RB->nomm, 0 // Clear metamethod cache. - | add NODE:TMPR, TAB:RB->node - | settp ITYPE, STR:RC, LJ_TSTR - |1: - | cmp NODE:TMPR->key, ITYPE - | jne >5 - | // Ok, key found. Assumes: offsetof(Node, val) == 0 - | cmp aword [TMPR], LJ_TNIL - | je >4 // Previous value is nil? - |2: - | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) - | jnz >7 - |3: // Set node value. - | mov ITYPE, [BASE+RA*8] - | mov [TMPR], ITYPE - | ins_next - | - |4: // Check for __newindex if previous value is nil. - | mov TAB:ITYPE, TAB:RB->metatable - | test TAB:ITYPE, TAB:ITYPE - | jz <2 - | test byte TAB:ITYPE->nomm, 1<vmeta_tsets // 'no __newindex' flag NOT set: check. - | jmp <2 - | - |5: // Follow hash chain. - | mov NODE:TMPR, NODE:TMPR->next - | test NODE:TMPR, NODE:TMPR - | jnz <1 - | // End of hash chain: key not found, add a new one. - | - | // But check for __newindex first. - | mov TAB:TMPR, TAB:RB->metatable - | test TAB:TMPR, TAB:TMPR - | jz >6 // No metatable: continue. - | test byte TAB:TMPR->nomm, 1<vmeta_tsets // 'no __newindex' flag NOT set: check. - |6: - | mov TMP1, ITYPE - | mov L:CARG1, SAVE_L - | mov L:CARG1->base, BASE - | lea CARG3, TMP1 - | mov CARG2, TAB:RB - | mov SAVE_PC, PC - | call extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k) - | // Handles write barrier for the new key. TValue * returned in eax (RC). - | mov L:CARG1, SAVE_L - | mov BASE, L:CARG1->base - | mov TMPR, rax - | movzx RAd, PC_RA - | jmp <2 // Must check write barrier for value. - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:RB, ITYPE - | jmp <3 - break; - case BC_TSETB: - | ins_ABC // RA = src, RB = table, RC = byte literal - | mov TAB:RB, [BASE+RB*8] - | checktab TAB:RB, ->vmeta_tsetb - | cmp RCd, TAB:RB->asize - | jae ->vmeta_tsetb - | shl RCd, 3 - | add RC, TAB:RB->array - | cmp aword [RC], LJ_TNIL - | je >3 // Previous value is nil? - |1: - | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) - | jnz >7 - |2: // Set array slot. - | mov ITYPE, [BASE+RA*8] - | mov [RC], ITYPE - | ins_next - | - |3: // Check for __newindex if previous value is nil. - | mov TAB:TMPR, TAB:RB->metatable - | test TAB:TMPR, TAB:TMPR - | jz <1 - | test byte TAB:TMPR->nomm, 1<vmeta_tsetb // 'no __newindex' flag NOT set: check. - | jmp <1 - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:RB, TMPR - | jmp <2 - break; - case BC_TSETR: - | ins_ABC // RA = src, RB = table, RC = key - | mov TAB:RB, [BASE+RB*8] - | cleartp TAB:RB - |.if DUALNUM - | mov RC, [BASE+RC*8] - |.else - | cvttsd2si RCd, qword [BASE+RC*8] - |.endif - | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) - | jnz >7 - |2: - | cmp RCd, TAB:RB->asize - | jae ->vmeta_tsetr - | shl RCd, 3 - | add RC, TAB:RB->array - | // Set array slot. - |->BC_TSETR_Z: - | mov ITYPE, [BASE+RA*8] - | mov [RC], ITYPE - | ins_next - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:RB, TMPR - | jmp <2 - break; - - case BC_TSETM: - | ins_AD // RA = base (table at base-1), RD = num const (start index) - |1: - | mov TMPRd, dword [KBASE+RD*8] // Integer constant is in lo-word. - | lea RA, [BASE+RA*8] - | mov TAB:RB, [RA-8] // Guaranteed to be a table. - | cleartp TAB:RB - | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) - | jnz >7 - |2: - | mov RDd, MULTRES - | sub RDd, 1 - | jz >4 // Nothing to copy? - | add RDd, TMPRd // Compute needed size. - | cmp RDd, TAB:RB->asize - | ja >5 // Doesn't fit into array part? - | sub RDd, TMPRd - | shl TMPRd, 3 - | add TMPR, TAB:RB->array - |3: // Copy result slots to table. - | mov RB, [RA] - | add RA, 8 - | mov [TMPR], RB - | add TMPR, 8 - | sub RDd, 1 - | jnz <3 - |4: - | ins_next - | - |5: // Need to resize array part. - | mov L:CARG1, SAVE_L - | mov L:CARG1->base, BASE // Caveat: CARG2/CARG3 may be BASE. - | mov CARG2, TAB:RB - | mov CARG3d, RDd - | mov L:RB, L:CARG1 - | mov SAVE_PC, PC - | call extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize) - | mov BASE, L:RB->base - | movzx RAd, PC_RA // Restore RA. - | movzx RDd, PC_RD // Restore RD. - | jmp <1 // Retry. - | - |7: // Possible table write barrier for any value. Skip valiswhite check. - | barrierback TAB:RB, RD - | jmp <2 - break; - - /* -- Calls and vararg handling ----------------------------------------- */ - - case BC_CALL: case BC_CALLM: - | ins_A_C // RA = base, (RB = nresults+1,) RC = nargs+1 | extra_nargs - if (op == BC_CALLM) { - | add NARGS:RDd, MULTRES - } - | mov LFUNC:RB, [BASE+RA*8] - | checkfunc LFUNC:RB, ->vmeta_call_ra - | lea BASE, [BASE+RA*8+16] - | ins_call - break; - - case BC_CALLMT: - | ins_AD // RA = base, RD = extra_nargs - | add NARGS:RDd, MULTRES - | // Fall through. Assumes BC_CALLT follows and ins_AD is a no-op. - break; - case BC_CALLT: - | ins_AD // RA = base, RD = nargs+1 - | lea RA, [BASE+RA*8+16] - | mov KBASE, BASE // Use KBASE for move + vmeta_call hint. - | mov LFUNC:RB, [RA-16] - | checktp_nc LFUNC:RB, LJ_TFUNC, ->vmeta_call - |->BC_CALLT_Z: - | mov PC, [BASE-8] - | test PCd, FRAME_TYPE - | jnz >7 - |1: - | mov [BASE-16], LFUNC:RB // Copy func+tag down, reloaded below. - | mov MULTRES, NARGS:RDd - | sub NARGS:RDd, 1 - | jz >3 - |2: // Move args down. - | mov RB, [RA] - | add RA, 8 - | mov [KBASE], RB - | add KBASE, 8 - | sub NARGS:RDd, 1 - | jnz <2 - | - | mov LFUNC:RB, [BASE-16] - |3: - | cleartp LFUNC:RB - | mov NARGS:RDd, MULTRES - | cmp byte LFUNC:RB->ffid, 1 // (> FF_C) Calling a fast function? - | ja >5 - |4: - | ins_callt - | - |5: // Tailcall to a fast function. - | test PCd, FRAME_TYPE // Lua frame below? - | jnz <4 - | movzx RAd, PC_RA - | neg RA - | mov LFUNC:KBASE, [BASE+RA*8-32] // Need to prepare KBASE. - | cleartp LFUNC:KBASE - | mov KBASE, LFUNC:KBASE->pc - | mov KBASE, [KBASE+PC2PROTO(k)] - | jmp <4 - | - |7: // Tailcall from a vararg function. - | sub PC, FRAME_VARG - | test PCd, FRAME_TYPEP - | jnz >8 // Vararg frame below? - | sub BASE, PC // Need to relocate BASE/KBASE down. - | mov KBASE, BASE - | mov PC, [BASE-8] - | jmp <1 - |8: - | add PCd, FRAME_VARG - | jmp <1 - break; - - case BC_ITERC: - | ins_A // RA = base, (RB = nresults+1,) RC = nargs+1 (2+1) - | lea RA, [BASE+RA*8+16] // fb = base+2 - | mov RB, [RA-32] // Copy state. fb[0] = fb[-4]. - | mov RC, [RA-24] // Copy control var. fb[1] = fb[-3]. - | mov [RA], RB - | mov [RA+8], RC - | mov LFUNC:RB, [RA-40] // Copy callable. fb[-1] = fb[-5] - | mov [RA-16], LFUNC:RB - | mov NARGS:RDd, 2+1 // Handle like a regular 2-arg call. - | checkfunc LFUNC:RB, ->vmeta_call - | mov BASE, RA - | ins_call - break; - - case BC_ITERN: - | ins_A // RA = base, (RB = nresults+1, RC = nargs+1 (2+1)) - |.if JIT - | // NYI: add hotloop, record BC_ITERN. - |.endif - | mov TAB:RB, [BASE+RA*8-16] - | cleartp TAB:RB - | mov RCd, [BASE+RA*8-8] // Get index from control var. - | mov TMPRd, TAB:RB->asize - | add PC, 4 - | mov ITYPE, TAB:RB->array - |1: // Traverse array part. - | cmp RCd, TMPRd; jae >5 // Index points after array part? - | cmp aword [ITYPE+RC*8], LJ_TNIL; je >4 - |.if not DUALNUM - | cvtsi2sd xmm0, RCd - |.endif - | // Copy array slot to returned value. - | mov RB, [ITYPE+RC*8] - | mov [BASE+RA*8+8], RB - | // Return array index as a numeric key. - |.if DUALNUM - | setint ITYPE, RC - | mov [BASE+RA*8], ITYPE - |.else - | movsd qword [BASE+RA*8], xmm0 - |.endif - | add RCd, 1 - | mov [BASE+RA*8-8], RCd // Update control var. - |2: - | movzx RDd, PC_RD // Get target from ITERL. - | branchPC RD - |3: - | ins_next - | - |4: // Skip holes in array part. - | add RCd, 1 - | jmp <1 - | - |5: // Traverse hash part. - | sub RCd, TMPRd - |6: - | cmp RCd, TAB:RB->hmask; ja <3 // End of iteration? Branch to ITERL+1. - | imul ITYPEd, RCd, #NODE - | add NODE:ITYPE, TAB:RB->node - | cmp aword NODE:ITYPE->val, LJ_TNIL; je >7 - | lea TMPRd, [RCd+TMPRd+1] - | // Copy key and value from hash slot. - | mov RB, NODE:ITYPE->key - | mov RC, NODE:ITYPE->val - | mov [BASE+RA*8], RB - | mov [BASE+RA*8+8], RC - | mov [BASE+RA*8-8], TMPRd - | jmp <2 - | - |7: // Skip holes in hash part. - | add RCd, 1 - | jmp <6 - break; - - case BC_ISNEXT: - | ins_AD // RA = base, RD = target (points to ITERN) - | mov CFUNC:RB, [BASE+RA*8-24] - | checkfunc CFUNC:RB, >5 - | checktptp [BASE+RA*8-16], LJ_TTAB, >5 - | cmp aword [BASE+RA*8-8], LJ_TNIL; jne >5 - | cmp byte CFUNC:RB->ffid, FF_next_N; jne >5 - | branchPC RD - | mov64 TMPR, U64x(fffe7fff, 00000000) - | mov [BASE+RA*8-8], TMPR // Initialize control var. - |1: - | ins_next - |5: // Despecialize bytecode if any of the checks fail. - | mov PC_OP, BC_JMP - | branchPC RD - | mov byte [PC], BC_ITERC - | jmp <1 - break; - - case BC_VARG: - | ins_ABC // RA = base, RB = nresults+1, RC = numparams - | lea TMPR, [BASE+RC*8+(16+FRAME_VARG)] - | lea RA, [BASE+RA*8] - | sub TMPR, [BASE-8] - | // Note: TMPR may now be even _above_ BASE if nargs was < numparams. - | test RB, RB - | jz >5 // Copy all varargs? - | lea RB, [RA+RB*8-8] - | cmp TMPR, BASE // No vararg slots? - | jnb >2 - |1: // Copy vararg slots to destination slots. - | mov RC, [TMPR-16] - | add TMPR, 8 - | mov [RA], RC - | add RA, 8 - | cmp RA, RB // All destination slots filled? - | jnb >3 - | cmp TMPR, BASE // No more vararg slots? - | jb <1 - |2: // Fill up remainder with nil. - | mov aword [RA], LJ_TNIL - | add RA, 8 - | cmp RA, RB - | jb <2 - |3: - | ins_next - | - |5: // Copy all varargs. - | mov MULTRES, 1 // MULTRES = 0+1 - | mov RC, BASE - | sub RC, TMPR - | jbe <3 // No vararg slots? - | mov RBd, RCd - | shr RBd, 3 - | add RBd, 1 - | mov MULTRES, RBd // MULTRES = #varargs+1 - | mov L:RB, SAVE_L - | add RC, RA - | cmp RC, L:RB->maxstack - | ja >7 // Need to grow stack? - |6: // Copy all vararg slots. - | mov RC, [TMPR-16] - | add TMPR, 8 - | mov [RA], RC - | add RA, 8 - | cmp TMPR, BASE // No more vararg slots? - | jb <6 - | jmp <3 - | - |7: // Grow stack for varargs. - | mov L:RB->base, BASE - | mov L:RB->top, RA - | mov SAVE_PC, PC - | sub TMPR, BASE // Need delta, because BASE may change. - | mov TMP1hi, TMPRd - | mov CARG2d, MULTRES - | sub CARG2d, 1 - | mov CARG1, L:RB - | call extern lj_state_growstack // (lua_State *L, int n) - | mov BASE, L:RB->base - | movsxd TMPR, TMP1hi - | mov RA, L:RB->top - | add TMPR, BASE - | jmp <6 - break; - - /* -- Returns ----------------------------------------------------------- */ - - case BC_RETM: - | ins_AD // RA = results, RD = extra_nresults - | add RDd, MULTRES // MULTRES >=1, so RD >=1. - | // Fall through. Assumes BC_RET follows and ins_AD is a no-op. - break; - - case BC_RET: case BC_RET0: case BC_RET1: - | ins_AD // RA = results, RD = nresults+1 - if (op != BC_RET0) { - | shl RAd, 3 - } - |1: - | mov PC, [BASE-8] - | mov MULTRES, RDd // Save nresults+1. - | test PCd, FRAME_TYPE // Check frame type marker. - | jnz >7 // Not returning to a fixarg Lua func? - switch (op) { - case BC_RET: - |->BC_RET_Z: - | mov KBASE, BASE // Use KBASE for result move. - | sub RDd, 1 - | jz >3 - |2: // Move results down. - | mov RB, [KBASE+RA] - | mov [KBASE-16], RB - | add KBASE, 8 - | sub RDd, 1 - | jnz <2 - |3: - | mov RDd, MULTRES // Note: MULTRES may be >255. - | movzx RBd, PC_RB // So cannot compare with RDL! - |5: - | cmp RBd, RDd // More results expected? - | ja >6 - break; - case BC_RET1: - | mov RB, [BASE+RA] - | mov [BASE-16], RB - /* fallthrough */ - case BC_RET0: - |5: - | cmp PC_RB, RDL // More results expected? - | ja >6 - default: - break; - } - | movzx RAd, PC_RA - | neg RA - | lea BASE, [BASE+RA*8-16] // base = base - (RA+2)*8 - | mov LFUNC:KBASE, [BASE-16] - | cleartp LFUNC:KBASE - | mov KBASE, LFUNC:KBASE->pc - | mov KBASE, [KBASE+PC2PROTO(k)] - | ins_next - | - |6: // Fill up results with nil. - if (op == BC_RET) { - | mov aword [KBASE-16], LJ_TNIL // Note: relies on shifted base. - | add KBASE, 8 - } else { - | mov aword [BASE+RD*8-24], LJ_TNIL - } - | add RD, 1 - | jmp <5 - | - |7: // Non-standard return case. - | lea RB, [PC-FRAME_VARG] - | test RBd, FRAME_TYPEP - | jnz ->vm_return - | // Return from vararg function: relocate BASE down and RA up. - | sub BASE, RB - if (op != BC_RET0) { - | add RA, RB - } - | jmp <1 - break; - - /* -- Loops and branches ------------------------------------------------ */ - - |.define FOR_IDX, [RA] - |.define FOR_STOP, [RA+8] - |.define FOR_STEP, [RA+16] - |.define FOR_EXT, [RA+24] - - case BC_FORL: - |.if JIT - | hotloop RBd - |.endif - | // Fall through. Assumes BC_IFORL follows and ins_AJ is a no-op. - break; - - case BC_JFORI: - case BC_JFORL: -#if !LJ_HASJIT - break; -#endif - case BC_FORI: - case BC_IFORL: - vk = (op == BC_IFORL || op == BC_JFORL); - | ins_AJ // RA = base, RD = target (after end of loop or start of loop) - | lea RA, [BASE+RA*8] - if (LJ_DUALNUM) { - | mov RB, FOR_IDX - | checkint RB, >9 - | mov TMPR, FOR_STOP - if (!vk) { - | checkint TMPR, ->vmeta_for - | mov ITYPE, FOR_STEP - | test ITYPEd, ITYPEd; js >5 - | sar ITYPE, 47; - | cmp ITYPEd, LJ_TISNUM; jne ->vmeta_for - } else { -#ifdef LUA_USE_ASSERT - | checkinttp FOR_STOP, ->assert_bad_for_arg_type - | checkinttp FOR_STEP, ->assert_bad_for_arg_type -#endif - | mov ITYPE, FOR_STEP - | test ITYPEd, ITYPEd; js >5 - | add RBd, ITYPEd; jo >1 - | setint RB - | mov FOR_IDX, RB - } - | cmp RBd, TMPRd - | mov FOR_EXT, RB - if (op == BC_FORI) { - | jle >7 - |1: - |6: - | branchPC RD - } else if (op == BC_JFORI) { - | branchPC RD - | movzx RDd, PC_RD - | jle =>BC_JLOOP - |1: - |6: - } else if (op == BC_IFORL) { - | jg >7 - |6: - | branchPC RD - |1: - } else { - | jle =>BC_JLOOP - |1: - |6: - } - |7: - | ins_next - | - |5: // Invert check for negative step. - if (!vk) { - | sar ITYPE, 47; - | cmp ITYPEd, LJ_TISNUM; jne ->vmeta_for - } else { - | add RBd, ITYPEd; jo <1 - | setint RB - | mov FOR_IDX, RB - } - | cmp RBd, TMPRd - | mov FOR_EXT, RB - if (op == BC_FORI) { - | jge <7 - } else if (op == BC_JFORI) { - | branchPC RD - | movzx RDd, PC_RD - | jge =>BC_JLOOP - } else if (op == BC_IFORL) { - | jl <7 - } else { - | jge =>BC_JLOOP - } - | jmp <6 - |9: // Fallback to FP variant. - if (!vk) { - | jae ->vmeta_for - } - } else if (!vk) { - | checknumtp FOR_IDX, ->vmeta_for - } - if (!vk) { - | checknumtp FOR_STOP, ->vmeta_for - } else { -#ifdef LUA_USE_ASSERT - | checknumtp FOR_STOP, ->assert_bad_for_arg_type - | checknumtp FOR_STEP, ->assert_bad_for_arg_type -#endif - } - | mov RB, FOR_STEP - if (!vk) { - | checknum RB, ->vmeta_for - } - | movsd xmm0, qword FOR_IDX - | movsd xmm1, qword FOR_STOP - if (vk) { - | addsd xmm0, qword FOR_STEP - | movsd qword FOR_IDX, xmm0 - | test RB, RB; js >3 - } else { - | jl >3 - } - | ucomisd xmm1, xmm0 - |1: - | movsd qword FOR_EXT, xmm0 - if (op == BC_FORI) { - |.if DUALNUM - | jnb <7 - |.else - | jnb >2 - | branchPC RD - |.endif - } else if (op == BC_JFORI) { - | branchPC RD - | movzx RDd, PC_RD - | jnb =>BC_JLOOP - } else if (op == BC_IFORL) { - |.if DUALNUM - | jb <7 - |.else - | jb >2 - | branchPC RD - |.endif - } else { - | jnb =>BC_JLOOP - } - |.if DUALNUM - | jmp <6 - |.else - |2: - | ins_next - |.endif - | - |3: // Invert comparison if step is negative. - | ucomisd xmm0, xmm1 - | jmp <1 - break; - - case BC_ITERL: - |.if JIT - | hotloop RBd - |.endif - | // Fall through. Assumes BC_IITERL follows and ins_AJ is a no-op. - break; - - case BC_JITERL: -#if !LJ_HASJIT - break; -#endif - case BC_IITERL: - | ins_AJ // RA = base, RD = target - | lea RA, [BASE+RA*8] - | mov RB, [RA] - | cmp RB, LJ_TNIL; je >1 // Stop if iterator returned nil. - if (op == BC_JITERL) { - | mov [RA-8], RB - | jmp =>BC_JLOOP - } else { - | branchPC RD // Otherwise save control var + branch. - | mov [RA-8], RB - } - |1: - | ins_next - break; - - case BC_LOOP: - | ins_A // RA = base, RD = target (loop extent) - | // Note: RA/RD is only used by trace recorder to determine scope/extent - | // This opcode does NOT jump, it's only purpose is to detect a hot loop. - |.if JIT - | hotloop RBd - |.endif - | // Fall through. Assumes BC_ILOOP follows and ins_A is a no-op. - break; - - case BC_ILOOP: - | ins_A // RA = base, RD = target (loop extent) - | ins_next - break; - - case BC_JLOOP: - |.if JIT - | ins_AD // RA = base (ignored), RD = traceno - | mov RA, [DISPATCH+DISPATCH_J(trace)] - | mov TRACE:RD, [RA+RD*8] - | mov RD, TRACE:RD->mcode - | mov L:RB, SAVE_L - | mov [DISPATCH+DISPATCH_GL(jit_base)], BASE - | mov [DISPATCH+DISPATCH_GL(tmpbuf.L)], L:RB - | // Save additional callee-save registers only used in compiled code. - |.if X64WIN - | mov CSAVE_4, r12 - | mov CSAVE_3, r13 - | mov CSAVE_2, r14 - | mov CSAVE_1, r15 - | mov RA, rsp - | sub rsp, 10*16+4*8 - | movdqa [RA-1*16], xmm6 - | movdqa [RA-2*16], xmm7 - | movdqa [RA-3*16], xmm8 - | movdqa [RA-4*16], xmm9 - | movdqa [RA-5*16], xmm10 - | movdqa [RA-6*16], xmm11 - | movdqa [RA-7*16], xmm12 - | movdqa [RA-8*16], xmm13 - | movdqa [RA-9*16], xmm14 - | movdqa [RA-10*16], xmm15 - |.else - | sub rsp, 16 - | mov [rsp+16], r12 - | mov [rsp+8], r13 - |.endif - | jmp RD - |.endif - break; - - case BC_JMP: - | ins_AJ // RA = unused, RD = target - | branchPC RD - | ins_next - break; - - /* -- Function headers -------------------------------------------------- */ - - /* - ** Reminder: A function may be called with func/args above L->maxstack, - ** i.e. occupying EXTRA_STACK slots. And vmeta_call may add one extra slot, - ** too. This means all FUNC* ops (including fast functions) must check - ** for stack overflow _before_ adding more slots! - */ - - case BC_FUNCF: - |.if JIT - | hotcall RBd - |.endif - case BC_FUNCV: /* NYI: compiled vararg functions. */ - | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow and ins_AD is a no-op. - break; - - case BC_JFUNCF: -#if !LJ_HASJIT - break; -#endif - case BC_IFUNCF: - | ins_AD // BASE = new base, RA = framesize, RD = nargs+1 - | mov KBASE, [PC-4+PC2PROTO(k)] - | mov L:RB, SAVE_L - | lea RA, [BASE+RA*8] // Top of frame. - | cmp RA, L:RB->maxstack - | ja ->vm_growstack_f - | movzx RAd, byte [PC-4+PC2PROTO(numparams)] - | cmp NARGS:RDd, RAd // Check for missing parameters. - | jbe >3 - |2: - if (op == BC_JFUNCF) { - | movzx RDd, PC_RD - | jmp =>BC_JLOOP - } else { - | ins_next - } - | - |3: // Clear missing parameters. - | mov aword [BASE+NARGS:RD*8-8], LJ_TNIL - | add NARGS:RDd, 1 - | cmp NARGS:RDd, RAd - | jbe <3 - | jmp <2 - break; - - case BC_JFUNCV: -#if !LJ_HASJIT - break; -#endif - | int3 // NYI: compiled vararg functions - break; /* NYI: compiled vararg functions. */ - - case BC_IFUNCV: - | ins_AD // BASE = new base, RA = framesize, RD = nargs+1 - | lea RBd, [NARGS:RD*8+FRAME_VARG+8] - | lea RD, [BASE+NARGS:RD*8+8] - | mov LFUNC:KBASE, [BASE-16] - | mov [RD-8], RB // Store delta + FRAME_VARG. - | mov [RD-16], LFUNC:KBASE // Store copy of LFUNC. - | mov L:RB, SAVE_L - | lea RA, [RD+RA*8] - | cmp RA, L:RB->maxstack - | ja ->vm_growstack_v // Need to grow stack. - | mov RA, BASE - | mov BASE, RD - | movzx RBd, byte [PC-4+PC2PROTO(numparams)] - | test RBd, RBd - | jz >2 - | add RA, 8 - |1: // Copy fixarg slots up to new frame. - | add RA, 8 - | cmp RA, BASE - | jnb >3 // Less args than parameters? - | mov KBASE, [RA-16] - | mov [RD], KBASE - | add RD, 8 - | mov aword [RA-16], LJ_TNIL // Clear old fixarg slot (help the GC). - | sub RBd, 1 - | jnz <1 - |2: - if (op == BC_JFUNCV) { - | movzx RDd, PC_RD - | jmp =>BC_JLOOP - } else { - | mov KBASE, [PC-4+PC2PROTO(k)] - | ins_next - } - | - |3: // Clear missing parameters. - | mov aword [RD], LJ_TNIL - | add RD, 8 - | sub RBd, 1 - | jnz <3 - | jmp <2 - break; - - case BC_FUNCC: - case BC_FUNCCW: - | ins_AD // BASE = new base, RA = ins RA|RD (unused), RD = nargs+1 - | mov CFUNC:RB, [BASE-16] - | cleartp CFUNC:RB - | mov KBASE, CFUNC:RB->f - | mov L:RB, SAVE_L - | lea RD, [BASE+NARGS:RD*8-8] - | mov L:RB->base, BASE - | lea RA, [RD+8*LUA_MINSTACK] - | cmp RA, L:RB->maxstack - | mov L:RB->top, RD - if (op == BC_FUNCC) { - | mov CARG1, L:RB // Caveat: CARG1 may be RA. - } else { - | mov CARG2, KBASE - | mov CARG1, L:RB // Caveat: CARG1 may be RA. - } - | ja ->vm_growstack_c // Need to grow stack. - | set_vmstate C - if (op == BC_FUNCC) { - | call KBASE // (lua_State *L) - } else { - | // (lua_State *L, lua_CFunction f) - | call aword [DISPATCH+DISPATCH_GL(wrapf)] - } - | // nresults returned in eax (RD). - | mov BASE, L:RB->base - | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB - | set_vmstate INTERP - | lea RA, [BASE+RD*8] - | neg RA - | add RA, L:RB->top // RA = (L->top-(L->base+nresults))*8 - | mov PC, [BASE-8] // Fetch PC of caller. - | jmp ->vm_returnc - break; - - /* ---------------------------------------------------------------------- */ - - default: - fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]); - exit(2); - break; - } -} - -static int build_backend(BuildCtx *ctx) -{ - int op; - dasm_growpc(Dst, BC__MAX); - build_subroutines(ctx); - |.code_op - for (op = 0; op < BC__MAX; op++) - build_ins(ctx, (BCOp)op, op); - return BC__MAX; -} - -/* Emit pseudo frame-info for all assembler functions. */ -static void emit_asm_debug(BuildCtx *ctx) -{ - int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code); - switch (ctx->mode) { - case BUILD_elfasm: - fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n"); - fprintf(ctx->fp, - ".Lframe0:\n" - "\t.long .LECIE0-.LSCIE0\n" - ".LSCIE0:\n" - "\t.long 0xffffffff\n" - "\t.byte 0x1\n" - "\t.string \"\"\n" - "\t.uleb128 0x1\n" - "\t.sleb128 -8\n" - "\t.byte 0x10\n" - "\t.byte 0xc\n\t.uleb128 0x7\n\t.uleb128 8\n" - "\t.byte 0x80+0x10\n\t.uleb128 0x1\n" - "\t.align 8\n" - ".LECIE0:\n\n"); - fprintf(ctx->fp, - ".LSFDE0:\n" - "\t.long .LEFDE0-.LASFDE0\n" - ".LASFDE0:\n" - "\t.long .Lframe0\n" - "\t.quad .Lbegin\n" - "\t.quad %d\n" - "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ - "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ - "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ - "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */ - "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */ -#if LJ_NO_UNWIND - "\t.byte 0x8d\n\t.uleb128 0x6\n" /* offset r13 */ - "\t.byte 0x8c\n\t.uleb128 0x7\n" /* offset r12 */ -#endif - "\t.align 8\n" - ".LEFDE0:\n\n", fcofs, CFRAME_SIZE); -#if LJ_HASFFI - fprintf(ctx->fp, - ".LSFDE1:\n" - "\t.long .LEFDE1-.LASFDE1\n" - ".LASFDE1:\n" - "\t.long .Lframe0\n" - "\t.quad lj_vm_ffi_call\n" - "\t.quad %d\n" - "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */ - "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ - "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */ - "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ - "\t.align 8\n" - ".LEFDE1:\n\n", (int)ctx->codesz - fcofs); -#endif -#if !LJ_NO_UNWIND -#if (defined(__sun__) && defined(__svr4__)) - fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@unwind\n"); -#else - fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n"); -#endif - fprintf(ctx->fp, - ".Lframe1:\n" - "\t.long .LECIE1-.LSCIE1\n" - ".LSCIE1:\n" - "\t.long 0\n" - "\t.byte 0x1\n" - "\t.string \"zPR\"\n" - "\t.uleb128 0x1\n" - "\t.sleb128 -8\n" - "\t.byte 0x10\n" - "\t.uleb128 6\n" /* augmentation length */ - "\t.byte 0x1b\n" /* pcrel|sdata4 */ - "\t.long lj_err_unwind_dwarf-.\n" - "\t.byte 0x1b\n" /* pcrel|sdata4 */ - "\t.byte 0xc\n\t.uleb128 0x7\n\t.uleb128 8\n" - "\t.byte 0x80+0x10\n\t.uleb128 0x1\n" - "\t.align 8\n" - ".LECIE1:\n\n"); - fprintf(ctx->fp, - ".LSFDE2:\n" - "\t.long .LEFDE2-.LASFDE2\n" - ".LASFDE2:\n" - "\t.long .LASFDE2-.Lframe1\n" - "\t.long .Lbegin-.\n" - "\t.long %d\n" - "\t.uleb128 0\n" /* augmentation length */ - "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ - "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ - "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ - "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */ - "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */ - "\t.align 8\n" - ".LEFDE2:\n\n", fcofs, CFRAME_SIZE); -#if LJ_HASFFI - fprintf(ctx->fp, - ".Lframe2:\n" - "\t.long .LECIE2-.LSCIE2\n" - ".LSCIE2:\n" - "\t.long 0\n" - "\t.byte 0x1\n" - "\t.string \"zR\"\n" - "\t.uleb128 0x1\n" - "\t.sleb128 -8\n" - "\t.byte 0x10\n" - "\t.uleb128 1\n" /* augmentation length */ - "\t.byte 0x1b\n" /* pcrel|sdata4 */ - "\t.byte 0xc\n\t.uleb128 0x7\n\t.uleb128 8\n" - "\t.byte 0x80+0x10\n\t.uleb128 0x1\n" - "\t.align 8\n" - ".LECIE2:\n\n"); - fprintf(ctx->fp, - ".LSFDE3:\n" - "\t.long .LEFDE3-.LASFDE3\n" - ".LASFDE3:\n" - "\t.long .LASFDE3-.Lframe2\n" - "\t.long lj_vm_ffi_call-.\n" - "\t.long %d\n" - "\t.uleb128 0\n" /* augmentation length */ - "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */ - "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ - "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */ - "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ - "\t.align 8\n" - ".LEFDE3:\n\n", (int)ctx->codesz - fcofs); -#endif -#endif - break; -#if !LJ_NO_UNWIND - /* Mental note: never let Apple design an assembler. - ** Or a linker. Or a plastic case. But I digress. - */ - case BUILD_machasm: { -#if LJ_HASFFI - int fcsize = 0; -#endif - int i; - fprintf(ctx->fp, "\t.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support\n"); - fprintf(ctx->fp, - "EH_frame1:\n" - "\t.set L$set$x,LECIEX-LSCIEX\n" - "\t.long L$set$x\n" - "LSCIEX:\n" - "\t.long 0\n" - "\t.byte 0x1\n" - "\t.ascii \"zPR\\0\"\n" - "\t.byte 0x1\n" - "\t.byte 128-8\n" - "\t.byte 0x10\n" - "\t.byte 6\n" /* augmentation length */ - "\t.byte 0x9b\n" /* indirect|pcrel|sdata4 */ - "\t.long _lj_err_unwind_dwarf+4@GOTPCREL\n" - "\t.byte 0x1b\n" /* pcrel|sdata4 */ - "\t.byte 0xc\n\t.byte 0x7\n\t.byte 8\n" - "\t.byte 0x80+0x10\n\t.byte 0x1\n" - "\t.align 3\n" - "LECIEX:\n\n"); - for (i = 0; i < ctx->nsym; i++) { - const char *name = ctx->sym[i].name; - int32_t size = ctx->sym[i+1].ofs - ctx->sym[i].ofs; - if (size == 0) continue; -#if LJ_HASFFI - if (!strcmp(name, "_lj_vm_ffi_call")) { fcsize = size; continue; } -#endif - fprintf(ctx->fp, - "%s.eh:\n" - "LSFDE%d:\n" - "\t.set L$set$%d,LEFDE%d-LASFDE%d\n" - "\t.long L$set$%d\n" - "LASFDE%d:\n" - "\t.long LASFDE%d-EH_frame1\n" - "\t.long %s-.\n" - "\t.long %d\n" - "\t.byte 0\n" /* augmentation length */ - "\t.byte 0xe\n\t.byte %d\n" /* def_cfa_offset */ - "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */ - "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */ - "\t.byte 0x8f\n\t.byte 0x4\n" /* offset r15 */ - "\t.byte 0x8e\n\t.byte 0x5\n" /* offset r14 */ - "\t.align 3\n" - "LEFDE%d:\n\n", - name, i, i, i, i, i, i, i, name, size, CFRAME_SIZE, i); - } -#if LJ_HASFFI - if (fcsize) { - fprintf(ctx->fp, - "EH_frame2:\n" - "\t.set L$set$y,LECIEY-LSCIEY\n" - "\t.long L$set$y\n" - "LSCIEY:\n" - "\t.long 0\n" - "\t.byte 0x1\n" - "\t.ascii \"zR\\0\"\n" - "\t.byte 0x1\n" - "\t.byte 128-8\n" - "\t.byte 0x10\n" - "\t.byte 1\n" /* augmentation length */ - "\t.byte 0x1b\n" /* pcrel|sdata4 */ - "\t.byte 0xc\n\t.byte 0x7\n\t.byte 8\n" - "\t.byte 0x80+0x10\n\t.byte 0x1\n" - "\t.align 3\n" - "LECIEY:\n\n"); - fprintf(ctx->fp, - "_lj_vm_ffi_call.eh:\n" - "LSFDEY:\n" - "\t.set L$set$yy,LEFDEY-LASFDEY\n" - "\t.long L$set$yy\n" - "LASFDEY:\n" - "\t.long LASFDEY-EH_frame2\n" - "\t.long _lj_vm_ffi_call-.\n" - "\t.long %d\n" - "\t.byte 0\n" /* augmentation length */ - "\t.byte 0xe\n\t.byte 16\n" /* def_cfa_offset */ - "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */ - "\t.byte 0xd\n\t.byte 0x6\n" /* def_cfa_register rbp */ - "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */ - "\t.align 3\n" - "LEFDEY:\n\n", fcsize); - } -#endif - fprintf(ctx->fp, ".subsections_via_symbols\n"); - } - break; -#endif - default: /* Difficult for other modes. */ - break; - } -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_x86.dasc b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_x86.dasc deleted file mode 100644 index f31e595b3f4..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/vm_x86.dasc +++ /dev/null @@ -1,5710 +0,0 @@ -|// Low-level VM code for x86 CPUs. -|// Bytecode interpreter, fast functions and helper functions. -|// Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h -| -|.if P64 -|.arch x64 -|.else -|.arch x86 -|.endif -|.section code_op, code_sub -| -|.actionlist build_actionlist -|.globals GLOB_ -|.globalnames globnames -|.externnames extnames -| -|//----------------------------------------------------------------------- -| -|.if P64 -|.define X64, 1 -|.if WIN -|.define X64WIN, 1 -|.endif -|.endif -| -|// Fixed register assignments for the interpreter. -|// This is very fragile and has many dependencies. Caveat emptor. -|.define BASE, edx // Not C callee-save, refetched anyway. -|.if not X64 -|.define KBASE, edi // Must be C callee-save. -|.define KBASEa, KBASE -|.define PC, esi // Must be C callee-save. -|.define PCa, PC -|.define DISPATCH, ebx // Must be C callee-save. -|.elif X64WIN -|.define KBASE, edi // Must be C callee-save. -|.define KBASEa, rdi -|.define PC, esi // Must be C callee-save. -|.define PCa, rsi -|.define DISPATCH, ebx // Must be C callee-save. -|.else -|.define KBASE, r15d // Must be C callee-save. -|.define KBASEa, r15 -|.define PC, ebx // Must be C callee-save. -|.define PCa, rbx -|.define DISPATCH, r14d // Must be C callee-save. -|.endif -| -|.define RA, ecx -|.define RAH, ch -|.define RAL, cl -|.define RB, ebp // Must be ebp (C callee-save). -|.define RC, eax // Must be eax. -|.define RCW, ax -|.define RCH, ah -|.define RCL, al -|.define OP, RB -|.define RD, RC -|.define RDW, RCW -|.define RDL, RCL -|.if X64 -|.define RAa, rcx -|.define RBa, rbp -|.define RCa, rax -|.define RDa, rax -|.else -|.define RAa, RA -|.define RBa, RB -|.define RCa, RC -|.define RDa, RD -|.endif -| -|.if not X64 -|.define FCARG1, ecx // x86 fastcall arguments. -|.define FCARG2, edx -|.elif X64WIN -|.define CARG1, rcx // x64/WIN64 C call arguments. -|.define CARG2, rdx -|.define CARG3, r8 -|.define CARG4, r9 -|.define CARG1d, ecx -|.define CARG2d, edx -|.define CARG3d, r8d -|.define CARG4d, r9d -|.define FCARG1, CARG1d // Upwards compatible to x86 fastcall. -|.define FCARG2, CARG2d -|.else -|.define CARG1, rdi // x64/POSIX C call arguments. -|.define CARG2, rsi -|.define CARG3, rdx -|.define CARG4, rcx -|.define CARG5, r8 -|.define CARG6, r9 -|.define CARG1d, edi -|.define CARG2d, esi -|.define CARG3d, edx -|.define CARG4d, ecx -|.define CARG5d, r8d -|.define CARG6d, r9d -|.define FCARG1, CARG1d // Simulate x86 fastcall. -|.define FCARG2, CARG2d -|.endif -| -|// Type definitions. Some of these are only used for documentation. -|.type L, lua_State -|.type GL, global_State -|.type TVALUE, TValue -|.type GCOBJ, GCobj -|.type STR, GCstr -|.type TAB, GCtab -|.type LFUNC, GCfuncL -|.type CFUNC, GCfuncC -|.type PROTO, GCproto -|.type UPVAL, GCupval -|.type NODE, Node -|.type NARGS, int -|.type TRACE, GCtrace -|.type SBUF, SBuf -| -|// Stack layout while in interpreter. Must match with lj_frame.h. -|//----------------------------------------------------------------------- -|.if not X64 // x86 stack layout. -| -|.define CFRAME_SPACE, aword*7 // Delta for esp (see <--). -|.macro saveregs_ -| push edi; push esi; push ebx -| sub esp, CFRAME_SPACE -|.endmacro -|.macro saveregs -| push ebp; saveregs_ -|.endmacro -|.macro restoreregs -| add esp, CFRAME_SPACE -| pop ebx; pop esi; pop edi; pop ebp -|.endmacro -| -|.define SAVE_ERRF, aword [esp+aword*15] // vm_pcall/vm_cpcall only. -|.define SAVE_NRES, aword [esp+aword*14] -|.define SAVE_CFRAME, aword [esp+aword*13] -|.define SAVE_L, aword [esp+aword*12] -|//----- 16 byte aligned, ^^^ arguments from C caller -|.define SAVE_RET, aword [esp+aword*11] //<-- esp entering interpreter. -|.define SAVE_R4, aword [esp+aword*10] -|.define SAVE_R3, aword [esp+aword*9] -|.define SAVE_R2, aword [esp+aword*8] -|//----- 16 byte aligned -|.define SAVE_R1, aword [esp+aword*7] //<-- esp after register saves. -|.define SAVE_PC, aword [esp+aword*6] -|.define TMP2, aword [esp+aword*5] -|.define TMP1, aword [esp+aword*4] -|//----- 16 byte aligned -|.define ARG4, aword [esp+aword*3] -|.define ARG3, aword [esp+aword*2] -|.define ARG2, aword [esp+aword*1] -|.define ARG1, aword [esp] //<-- esp while in interpreter. -|//----- 16 byte aligned, ^^^ arguments for C callee -| -|// FPARGx overlaps ARGx and ARG(x+1) on x86. -|.define FPARG3, qword [esp+qword*1] -|.define FPARG1, qword [esp] -|// TMPQ overlaps TMP1/TMP2. ARG5/MULTRES overlap TMP1/TMP2 (and TMPQ). -|.define TMPQ, qword [esp+aword*4] -|.define TMP3, ARG4 -|.define ARG5, TMP1 -|.define TMPa, TMP1 -|.define MULTRES, TMP2 -| -|// Arguments for vm_call and vm_pcall. -|.define INARG_BASE, SAVE_CFRAME // Overwritten by SAVE_CFRAME! -| -|// Arguments for vm_cpcall. -|.define INARG_CP_CALL, SAVE_ERRF -|.define INARG_CP_UD, SAVE_NRES -|.define INARG_CP_FUNC, SAVE_CFRAME -| -|//----------------------------------------------------------------------- -|.elif X64WIN // x64/Windows stack layout -| -|.define CFRAME_SPACE, aword*5 // Delta for rsp (see <--). -|.macro saveregs_ -| push rdi; push rsi; push rbx -| sub rsp, CFRAME_SPACE -|.endmacro -|.macro saveregs -| push rbp; saveregs_ -|.endmacro -|.macro restoreregs -| add rsp, CFRAME_SPACE -| pop rbx; pop rsi; pop rdi; pop rbp -|.endmacro -| -|.define SAVE_CFRAME, aword [rsp+aword*13] -|.define SAVE_PC, dword [rsp+dword*25] -|.define SAVE_L, dword [rsp+dword*24] -|.define SAVE_ERRF, dword [rsp+dword*23] -|.define SAVE_NRES, dword [rsp+dword*22] -|.define TMP2, dword [rsp+dword*21] -|.define TMP1, dword [rsp+dword*20] -|//----- 16 byte aligned, ^^^ 32 byte register save area, owned by interpreter -|.define SAVE_RET, aword [rsp+aword*9] //<-- rsp entering interpreter. -|.define SAVE_R4, aword [rsp+aword*8] -|.define SAVE_R3, aword [rsp+aword*7] -|.define SAVE_R2, aword [rsp+aword*6] -|.define SAVE_R1, aword [rsp+aword*5] //<-- rsp after register saves. -|.define ARG5, aword [rsp+aword*4] -|.define CSAVE_4, aword [rsp+aword*3] -|.define CSAVE_3, aword [rsp+aword*2] -|.define CSAVE_2, aword [rsp+aword*1] -|.define CSAVE_1, aword [rsp] //<-- rsp while in interpreter. -|//----- 16 byte aligned, ^^^ 32 byte register save area, owned by callee -| -|// TMPQ overlaps TMP1/TMP2. MULTRES overlaps TMP2 (and TMPQ). -|.define TMPQ, qword [rsp+aword*10] -|.define MULTRES, TMP2 -|.define TMPa, ARG5 -|.define ARG5d, dword [rsp+aword*4] -|.define TMP3, ARG5d -| -|//----------------------------------------------------------------------- -|.else // x64/POSIX stack layout -| -|.define CFRAME_SPACE, aword*5 // Delta for rsp (see <--). -|.macro saveregs_ -| push rbx; push r15; push r14 -|.if NO_UNWIND -| push r13; push r12 -|.endif -| sub rsp, CFRAME_SPACE -|.endmacro -|.macro saveregs -| push rbp; saveregs_ -|.endmacro -|.macro restoreregs -| add rsp, CFRAME_SPACE -|.if NO_UNWIND -| pop r12; pop r13 -|.endif -| pop r14; pop r15; pop rbx; pop rbp -|.endmacro -| -|//----- 16 byte aligned, -|.if NO_UNWIND -|.define SAVE_RET, aword [rsp+aword*11] //<-- rsp entering interpreter. -|.define SAVE_R4, aword [rsp+aword*10] -|.define SAVE_R3, aword [rsp+aword*9] -|.define SAVE_R2, aword [rsp+aword*8] -|.define SAVE_R1, aword [rsp+aword*7] -|.define SAVE_RU2, aword [rsp+aword*6] -|.define SAVE_RU1, aword [rsp+aword*5] //<-- rsp after register saves. -|.else -|.define SAVE_RET, aword [rsp+aword*9] //<-- rsp entering interpreter. -|.define SAVE_R4, aword [rsp+aword*8] -|.define SAVE_R3, aword [rsp+aword*7] -|.define SAVE_R2, aword [rsp+aword*6] -|.define SAVE_R1, aword [rsp+aword*5] //<-- rsp after register saves. -|.endif -|.define SAVE_CFRAME, aword [rsp+aword*4] -|.define SAVE_PC, dword [rsp+dword*7] -|.define SAVE_L, dword [rsp+dword*6] -|.define SAVE_ERRF, dword [rsp+dword*5] -|.define SAVE_NRES, dword [rsp+dword*4] -|.define TMPa, aword [rsp+aword*1] -|.define TMP2, dword [rsp+dword*1] -|.define TMP1, dword [rsp] //<-- rsp while in interpreter. -|//----- 16 byte aligned -| -|// TMPQ overlaps TMP1/TMP2. MULTRES overlaps TMP2 (and TMPQ). -|.define TMPQ, qword [rsp] -|.define TMP3, dword [rsp+aword*1] -|.define MULTRES, TMP2 -| -|.endif -| -|//----------------------------------------------------------------------- -| -|// Instruction headers. -|.macro ins_A; .endmacro -|.macro ins_AD; .endmacro -|.macro ins_AJ; .endmacro -|.macro ins_ABC; movzx RB, RCH; movzx RC, RCL; .endmacro -|.macro ins_AB_; movzx RB, RCH; .endmacro -|.macro ins_A_C; movzx RC, RCL; .endmacro -|.macro ins_AND; not RDa; .endmacro -| -|// Instruction decode+dispatch. Carefully tuned (nope, lodsd is not faster). -|.macro ins_NEXT -| mov RC, [PC] -| movzx RA, RCH -| movzx OP, RCL -| add PC, 4 -| shr RC, 16 -|.if X64 -| jmp aword [DISPATCH+OP*8] -|.else -| jmp aword [DISPATCH+OP*4] -|.endif -|.endmacro -| -|// Instruction footer. -|.if 1 -| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use. -| .define ins_next, ins_NEXT -| .define ins_next_, ins_NEXT -|.else -| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch. -| // Affects only certain kinds of benchmarks (and only with -j off). -| // Around 10%-30% slower on Core2, a lot more slower on P4. -| .macro ins_next -| jmp ->ins_next -| .endmacro -| .macro ins_next_ -| ->ins_next: -| ins_NEXT -| .endmacro -|.endif -| -|// Call decode and dispatch. -|.macro ins_callt -| // BASE = new base, RB = LFUNC, RD = nargs+1, [BASE-4] = PC -| mov PC, LFUNC:RB->pc -| mov RA, [PC] -| movzx OP, RAL -| movzx RA, RAH -| add PC, 4 -|.if X64 -| jmp aword [DISPATCH+OP*8] -|.else -| jmp aword [DISPATCH+OP*4] -|.endif -|.endmacro -| -|.macro ins_call -| // BASE = new base, RB = LFUNC, RD = nargs+1 -| mov [BASE-4], PC -| ins_callt -|.endmacro -| -|//----------------------------------------------------------------------- -| -|// Macros to test operand types. -|.macro checktp, reg, tp; cmp dword [BASE+reg*8+4], tp; .endmacro -|.macro checknum, reg, target; checktp reg, LJ_TISNUM; jae target; .endmacro -|.macro checkint, reg, target; checktp reg, LJ_TISNUM; jne target; .endmacro -|.macro checkstr, reg, target; checktp reg, LJ_TSTR; jne target; .endmacro -|.macro checktab, reg, target; checktp reg, LJ_TTAB; jne target; .endmacro -| -|// These operands must be used with movzx. -|.define PC_OP, byte [PC-4] -|.define PC_RA, byte [PC-3] -|.define PC_RB, byte [PC-1] -|.define PC_RC, byte [PC-2] -|.define PC_RD, word [PC-2] -| -|.macro branchPC, reg -| lea PC, [PC+reg*4-BCBIAS_J*4] -|.endmacro -| -|// Assumes DISPATCH is relative to GL. -#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field)) -#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field)) -| -#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto)) -| -|// Decrement hashed hotcount and trigger trace recorder if zero. -|.macro hotloop, reg -| mov reg, PC -| shr reg, 1 -| and reg, HOTCOUNT_PCMASK -| sub word [DISPATCH+reg+GG_DISP2HOT], HOTCOUNT_LOOP -| jb ->vm_hotloop -|.endmacro -| -|.macro hotcall, reg -| mov reg, PC -| shr reg, 1 -| and reg, HOTCOUNT_PCMASK -| sub word [DISPATCH+reg+GG_DISP2HOT], HOTCOUNT_CALL -| jb ->vm_hotcall -|.endmacro -| -|// Set current VM state. -|.macro set_vmstate, st -| mov dword [DISPATCH+DISPATCH_GL(vmstate)], ~LJ_VMST_..st -|.endmacro -| -|// x87 compares. -|.macro fcomparepp // Compare and pop st0 >< st1. -| fucomip st1 -| fpop -|.endmacro -| -|.macro fpop1; fstp st1; .endmacro -| -|// Synthesize SSE FP constants. -|.macro sseconst_abs, reg, tmp // Synthesize abs mask. -|.if X64 -| mov64 tmp, U64x(7fffffff,ffffffff); movd reg, tmp -|.else -| pxor reg, reg; pcmpeqd reg, reg; psrlq reg, 1 -|.endif -|.endmacro -| -|.macro sseconst_hi, reg, tmp, val // Synthesize hi-32 bit const. -|.if X64 -| mov64 tmp, U64x(val,00000000); movd reg, tmp -|.else -| mov tmp, 0x .. val; movd reg, tmp; pshufd reg, reg, 0x51 -|.endif -|.endmacro -| -|.macro sseconst_sign, reg, tmp // Synthesize sign mask. -| sseconst_hi reg, tmp, 80000000 -|.endmacro -|.macro sseconst_1, reg, tmp // Synthesize 1.0. -| sseconst_hi reg, tmp, 3ff00000 -|.endmacro -|.macro sseconst_m1, reg, tmp // Synthesize -1.0. -| sseconst_hi reg, tmp, bff00000 -|.endmacro -|.macro sseconst_2p52, reg, tmp // Synthesize 2^52. -| sseconst_hi reg, tmp, 43300000 -|.endmacro -|.macro sseconst_tobit, reg, tmp // Synthesize 2^52 + 2^51. -| sseconst_hi reg, tmp, 43380000 -|.endmacro -| -|// Move table write barrier back. Overwrites reg. -|.macro barrierback, tab, reg -| and byte tab->marked, (uint8_t)~LJ_GC_BLACK // black2gray(tab) -| mov reg, [DISPATCH+DISPATCH_GL(gc.grayagain)] -| mov [DISPATCH+DISPATCH_GL(gc.grayagain)], tab -| mov tab->gclist, reg -|.endmacro -| -|//----------------------------------------------------------------------- - -/* Generate subroutines used by opcodes and other parts of the VM. */ -/* The .code_sub section should be last to help static branch prediction. */ -static void build_subroutines(BuildCtx *ctx) -{ - |.code_sub - | - |//----------------------------------------------------------------------- - |//-- Return handling ---------------------------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_returnp: - | test PC, FRAME_P - | jz ->cont_dispatch - | - | // Return from pcall or xpcall fast func. - | and PC, -8 - | sub BASE, PC // Restore caller base. - | lea RAa, [RA+PC-8] // Rebase RA and prepend one result. - | mov PC, [BASE-4] // Fetch PC of previous frame. - | // Prepending may overwrite the pcall frame, so do it at the end. - | mov dword [BASE+RA+4], LJ_TTRUE // Prepend true to results. - | - |->vm_returnc: - | add RD, 1 // RD = nresults+1 - | jz ->vm_unwind_yield - | mov MULTRES, RD - | test PC, FRAME_TYPE - | jz ->BC_RET_Z // Handle regular return to Lua. - | - |->vm_return: - | // BASE = base, RA = resultofs, RD = nresults+1 (= MULTRES), PC = return - | xor PC, FRAME_C - | test PC, FRAME_TYPE - | jnz ->vm_returnp - | - | // Return to C. - | set_vmstate C - | and PC, -8 - | sub PC, BASE - | neg PC // Previous base = BASE - delta. - | - | sub RD, 1 - | jz >2 - |1: // Move results down. - |.if X64 - | mov RBa, [BASE+RA] - | mov [BASE-8], RBa - |.else - | mov RB, [BASE+RA] - | mov [BASE-8], RB - | mov RB, [BASE+RA+4] - | mov [BASE-4], RB - |.endif - | add BASE, 8 - | sub RD, 1 - | jnz <1 - |2: - | mov L:RB, SAVE_L - | mov L:RB->base, PC - |3: - | mov RD, MULTRES - | mov RA, SAVE_NRES // RA = wanted nresults+1 - |4: - | cmp RA, RD - | jne >6 // More/less results wanted? - |5: - | sub BASE, 8 - | mov L:RB->top, BASE - | - |->vm_leave_cp: - | mov RAa, SAVE_CFRAME // Restore previous C frame. - | mov L:RB->cframe, RAa - | xor eax, eax // Ok return status for vm_pcall. - | - |->vm_leave_unw: - | restoreregs - | ret - | - |6: - | jb >7 // Less results wanted? - | // More results wanted. Check stack size and fill up results with nil. - | cmp BASE, L:RB->maxstack - | ja >8 - | mov dword [BASE-4], LJ_TNIL - | add BASE, 8 - | add RD, 1 - | jmp <4 - | - |7: // Less results wanted. - | test RA, RA - | jz <5 // But check for LUA_MULTRET+1. - | sub RA, RD // Negative result! - | lea BASE, [BASE+RA*8] // Correct top. - | jmp <5 - | - |8: // Corner case: need to grow stack for filling up results. - | // This can happen if: - | // - A C function grows the stack (a lot). - | // - The GC shrinks the stack in between. - | // - A return back from a lua_call() with (high) nresults adjustment. - | mov L:RB->top, BASE // Save current top held in BASE (yes). - | mov MULTRES, RD // Need to fill only remainder with nil. - | mov FCARG2, RA - | mov FCARG1, L:RB - | call extern lj_state_growstack@8 // (lua_State *L, int n) - | mov BASE, L:RB->top // Need the (realloced) L->top in BASE. - | jmp <3 - | - |->vm_unwind_yield: - | mov al, LUA_YIELD - | jmp ->vm_unwind_c_eh - | - |->vm_unwind_c@8: // Unwind C stack, return from vm_pcall. - | // (void *cframe, int errcode) - |.if X64 - | mov eax, CARG2d // Error return status for vm_pcall. - | mov rsp, CARG1 - |.else - | mov eax, FCARG2 // Error return status for vm_pcall. - | mov esp, FCARG1 - |.endif - |->vm_unwind_c_eh: // Landing pad for external unwinder. - | mov L:RB, SAVE_L - | mov GL:RB, L:RB->glref - | mov dword GL:RB->vmstate, ~LJ_VMST_C - | jmp ->vm_leave_unw - | - |->vm_unwind_rethrow: - |.if X64 and not X64WIN - | mov FCARG1, SAVE_L - | mov FCARG2, eax - | restoreregs - | jmp extern lj_err_throw@8 // (lua_State *L, int errcode) - |.endif - | - |->vm_unwind_ff@4: // Unwind C stack, return from ff pcall. - | // (void *cframe) - |.if X64 - | and CARG1, CFRAME_RAWMASK - | mov rsp, CARG1 - |.else - | and FCARG1, CFRAME_RAWMASK - | mov esp, FCARG1 - |.endif - |->vm_unwind_ff_eh: // Landing pad for external unwinder. - | mov L:RB, SAVE_L - | mov RAa, -8 // Results start at BASE+RA = BASE-8. - | mov RD, 1+1 // Really 1+2 results, incr. later. - | mov BASE, L:RB->base - | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table. - | add DISPATCH, GG_G2DISP - | mov PC, [BASE-4] // Fetch PC of previous frame. - | mov dword [BASE-4], LJ_TFALSE // Prepend false to error message. - | set_vmstate INTERP - | jmp ->vm_returnc // Increments RD/MULTRES and returns. - | - |//----------------------------------------------------------------------- - |//-- Grow stack for calls ----------------------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_growstack_c: // Grow stack for C function. - | mov FCARG2, LUA_MINSTACK - | jmp >2 - | - |->vm_growstack_v: // Grow stack for vararg Lua function. - | sub RD, 8 - | jmp >1 - | - |->vm_growstack_f: // Grow stack for fixarg Lua function. - | // BASE = new base, RD = nargs+1, RB = L, PC = first PC - | lea RD, [BASE+NARGS:RD*8-8] - |1: - | movzx RA, byte [PC-4+PC2PROTO(framesize)] - | add PC, 4 // Must point after first instruction. - | mov L:RB->base, BASE - | mov L:RB->top, RD - | mov SAVE_PC, PC - | mov FCARG2, RA - |2: - | // RB = L, L->base = new base, L->top = top - | mov FCARG1, L:RB - | call extern lj_state_growstack@8 // (lua_State *L, int n) - | mov BASE, L:RB->base - | mov RD, L:RB->top - | mov LFUNC:RB, [BASE-8] - | sub RD, BASE - | shr RD, 3 - | add NARGS:RD, 1 - | // BASE = new base, RB = LFUNC, RD = nargs+1 - | ins_callt // Just retry the call. - | - |//----------------------------------------------------------------------- - |//-- Entry points into the assembler VM --------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_resume: // Setup C frame and resume thread. - | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0) - | saveregs - |.if X64 - | mov L:RB, CARG1d // Caveat: CARG1d may be RA. - | mov SAVE_L, CARG1d - | mov RA, CARG2d - |.else - | mov L:RB, SAVE_L - | mov RA, INARG_BASE // Caveat: overlaps SAVE_CFRAME! - |.endif - | mov PC, FRAME_CP - | xor RD, RD - | lea KBASEa, [esp+CFRAME_RESUME] - | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table. - | add DISPATCH, GG_G2DISP - | mov SAVE_PC, RD // Any value outside of bytecode is ok. - | mov SAVE_CFRAME, RDa - |.if X64 - | mov SAVE_NRES, RD - | mov SAVE_ERRF, RD - |.endif - | mov L:RB->cframe, KBASEa - | cmp byte L:RB->status, RDL - | je >2 // Initial resume (like a call). - | - | // Resume after yield (like a return). - | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB - | set_vmstate INTERP - | mov byte L:RB->status, RDL - | mov BASE, L:RB->base - | mov RD, L:RB->top - | sub RD, RA - | shr RD, 3 - | add RD, 1 // RD = nresults+1 - | sub RA, BASE // RA = resultofs - | mov PC, [BASE-4] - | mov MULTRES, RD - | test PC, FRAME_TYPE - | jz ->BC_RET_Z - | jmp ->vm_return - | - |->vm_pcall: // Setup protected C frame and enter VM. - | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef) - | saveregs - | mov PC, FRAME_CP - |.if X64 - | mov SAVE_ERRF, CARG4d - |.endif - | jmp >1 - | - |->vm_call: // Setup C frame and enter VM. - | // (lua_State *L, TValue *base, int nres1) - | saveregs - | mov PC, FRAME_C - | - |1: // Entry point for vm_pcall above (PC = ftype). - |.if X64 - | mov SAVE_NRES, CARG3d - | mov L:RB, CARG1d // Caveat: CARG1d may be RA. - | mov SAVE_L, CARG1d - | mov RA, CARG2d - |.else - | mov L:RB, SAVE_L - | mov RA, INARG_BASE // Caveat: overlaps SAVE_CFRAME! - |.endif - | - | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table. - | mov KBASEa, L:RB->cframe // Add our C frame to cframe chain. - | mov SAVE_CFRAME, KBASEa - | mov SAVE_PC, L:RB // Any value outside of bytecode is ok. - | add DISPATCH, GG_G2DISP - |.if X64 - | mov L:RB->cframe, rsp - |.else - | mov L:RB->cframe, esp - |.endif - | - |2: // Entry point for vm_resume/vm_cpcall (RA = base, RB = L, PC = ftype). - | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB - | set_vmstate INTERP - | mov BASE, L:RB->base // BASE = old base (used in vmeta_call). - | add PC, RA - | sub PC, BASE // PC = frame delta + frame type - | - | mov RD, L:RB->top - | sub RD, RA - | shr NARGS:RD, 3 - | add NARGS:RD, 1 // RD = nargs+1 - | - |->vm_call_dispatch: - | mov LFUNC:RB, [RA-8] - | cmp dword [RA-4], LJ_TFUNC - | jne ->vmeta_call // Ensure KBASE defined and != BASE. - | - |->vm_call_dispatch_f: - | mov BASE, RA - | ins_call - | // BASE = new base, RB = func, RD = nargs+1, PC = caller PC - | - |->vm_cpcall: // Setup protected C frame, call C. - | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp) - | saveregs - |.if X64 - | mov L:RB, CARG1d // Caveat: CARG1d may be RA. - | mov SAVE_L, CARG1d - |.else - | mov L:RB, SAVE_L - | // Caveat: INARG_CP_* and SAVE_CFRAME/SAVE_NRES/SAVE_ERRF overlap! - | mov RC, INARG_CP_UD // Get args before they are overwritten. - | mov RA, INARG_CP_FUNC - | mov BASE, INARG_CP_CALL - |.endif - | mov SAVE_PC, L:RB // Any value outside of bytecode is ok. - | - | mov KBASE, L:RB->stack // Compute -savestack(L, L->top). - | sub KBASE, L:RB->top - | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table. - | mov SAVE_ERRF, 0 // No error function. - | mov SAVE_NRES, KBASE // Neg. delta means cframe w/o frame. - | add DISPATCH, GG_G2DISP - | // Handler may change cframe_nres(L->cframe) or cframe_errfunc(L->cframe). - | - |.if X64 - | mov KBASEa, L:RB->cframe // Add our C frame to cframe chain. - | mov SAVE_CFRAME, KBASEa - | mov L:RB->cframe, rsp - | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB - | - | call CARG4 // (lua_State *L, lua_CFunction func, void *ud) - |.else - | mov ARG3, RC // Have to copy args downwards. - | mov ARG2, RA - | mov ARG1, L:RB - | - | mov KBASE, L:RB->cframe // Add our C frame to cframe chain. - | mov SAVE_CFRAME, KBASE - | mov L:RB->cframe, esp - | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB - | - | call BASE // (lua_State *L, lua_CFunction func, void *ud) - |.endif - | // TValue * (new base) or NULL returned in eax (RC). - | test RC, RC - | jz ->vm_leave_cp // No base? Just remove C frame. - | mov RA, RC - | mov PC, FRAME_CP - | jmp <2 // Else continue with the call. - | - |//----------------------------------------------------------------------- - |//-- Metamethod handling ------------------------------------------------ - |//----------------------------------------------------------------------- - | - |//-- Continuation dispatch ---------------------------------------------- - | - |->cont_dispatch: - | // BASE = meta base, RA = resultofs, RD = nresults+1 (also in MULTRES) - | add RA, BASE - | and PC, -8 - | mov RB, BASE - | sub BASE, PC // Restore caller BASE. - | mov dword [RA+RD*8-4], LJ_TNIL // Ensure one valid arg. - | mov RC, RA // ... in [RC] - | mov PC, [RB-12] // Restore PC from [cont|PC]. - |.if X64 - | movsxd RAa, dword [RB-16] // May be negative on WIN64 with debug. - |.if FFI - | cmp RA, 1 - | jbe >1 - |.endif - | lea KBASEa, qword [=>0] - | add RAa, KBASEa - |.else - | mov RA, dword [RB-16] - |.if FFI - | cmp RA, 1 - | jbe >1 - |.endif - |.endif - | mov LFUNC:KBASE, [BASE-8] - | mov KBASE, LFUNC:KBASE->pc - | mov KBASE, [KBASE+PC2PROTO(k)] - | // BASE = base, RC = result, RB = meta base - | jmp RAa // Jump to continuation. - | - |.if FFI - |1: - | je ->cont_ffi_callback // cont = 1: return from FFI callback. - | // cont = 0: Tail call from C function. - | sub RB, BASE - | shr RB, 3 - | lea RD, [RB-1] - | jmp ->vm_call_tail - |.endif - | - |->cont_cat: // BASE = base, RC = result, RB = mbase - | movzx RA, PC_RB - | sub RB, 16 - | lea RA, [BASE+RA*8] - | sub RA, RB - | je ->cont_ra - | neg RA - | shr RA, 3 - |.if X64WIN - | mov CARG3d, RA - | mov L:CARG1d, SAVE_L - | mov L:CARG1d->base, BASE - | mov RCa, [RC] - | mov [RB], RCa - | mov CARG2d, RB - |.elif X64 - | mov L:CARG1d, SAVE_L - | mov L:CARG1d->base, BASE - | mov CARG3d, RA - | mov RAa, [RC] - | mov [RB], RAa - | mov CARG2d, RB - |.else - | mov ARG3, RA - | mov RA, [RC+4] - | mov RC, [RC] - | mov [RB+4], RA - | mov [RB], RC - | mov ARG2, RB - |.endif - | jmp ->BC_CAT_Z - | - |//-- Table indexing metamethods ----------------------------------------- - | - |->vmeta_tgets: - | mov TMP1, RC // RC = GCstr * - | mov TMP2, LJ_TSTR - | lea RCa, TMP1 // Store temp. TValue in TMP1/TMP2. - | cmp PC_OP, BC_GGET - | jne >1 - | lea RA, [DISPATCH+DISPATCH_GL(tmptv)] // Store fn->l.env in g->tmptv. - | mov [RA], TAB:RB // RB = GCtab * - | mov dword [RA+4], LJ_TTAB - | mov RB, RA - | jmp >2 - | - |->vmeta_tgetb: - | movzx RC, PC_RC - |.if DUALNUM - | mov TMP2, LJ_TISNUM - | mov TMP1, RC - |.else - | cvtsi2sd xmm0, RC - | movsd TMPQ, xmm0 - |.endif - | lea RCa, TMPQ // Store temp. TValue in TMPQ. - | jmp >1 - | - |->vmeta_tgetv: - | movzx RC, PC_RC // Reload TValue *k from RC. - | lea RC, [BASE+RC*8] - |1: - | movzx RB, PC_RB // Reload TValue *t from RB. - | lea RB, [BASE+RB*8] - |2: - |.if X64 - | mov L:CARG1d, SAVE_L - | mov L:CARG1d->base, BASE // Caveat: CARG2d/CARG3d may be BASE. - | mov CARG2d, RB - | mov CARG3, RCa // May be 64 bit ptr to stack. - | mov L:RB, L:CARG1d - |.else - | mov ARG2, RB - | mov L:RB, SAVE_L - | mov ARG3, RC - | mov ARG1, L:RB - | mov L:RB->base, BASE - |.endif - | mov SAVE_PC, PC - | call extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k) - | // TValue * (finished) or NULL (metamethod) returned in eax (RC). - | mov BASE, L:RB->base - | test RC, RC - | jz >3 - |->cont_ra: // BASE = base, RC = result - | movzx RA, PC_RA - |.if X64 - | mov RBa, [RC] - | mov [BASE+RA*8], RBa - |.else - | mov RB, [RC+4] - | mov RC, [RC] - | mov [BASE+RA*8+4], RB - | mov [BASE+RA*8], RC - |.endif - | ins_next - | - |3: // Call __index metamethod. - | // BASE = base, L->top = new base, stack = cont/func/t/k - | mov RA, L:RB->top - | mov [RA-12], PC // [cont|PC] - | lea PC, [RA+FRAME_CONT] - | sub PC, BASE - | mov LFUNC:RB, [RA-8] // Guaranteed to be a function here. - | mov NARGS:RD, 2+1 // 2 args for func(t, k). - | jmp ->vm_call_dispatch_f - | - |->vmeta_tgetr: - | mov FCARG1, TAB:RB - | mov RB, BASE // Save BASE. - | mov FCARG2, RC // Caveat: FCARG2 == BASE - | call extern lj_tab_getinth@8 // (GCtab *t, int32_t key) - | // cTValue * or NULL returned in eax (RC). - | movzx RA, PC_RA - | mov BASE, RB // Restore BASE. - | test RC, RC - | jnz ->BC_TGETR_Z - | mov dword [BASE+RA*8+4], LJ_TNIL - | jmp ->BC_TGETR2_Z - | - |//----------------------------------------------------------------------- - | - |->vmeta_tsets: - | mov TMP1, RC // RC = GCstr * - | mov TMP2, LJ_TSTR - | lea RCa, TMP1 // Store temp. TValue in TMP1/TMP2. - | cmp PC_OP, BC_GSET - | jne >1 - | lea RA, [DISPATCH+DISPATCH_GL(tmptv)] // Store fn->l.env in g->tmptv. - | mov [RA], TAB:RB // RB = GCtab * - | mov dword [RA+4], LJ_TTAB - | mov RB, RA - | jmp >2 - | - |->vmeta_tsetb: - | movzx RC, PC_RC - |.if DUALNUM - | mov TMP2, LJ_TISNUM - | mov TMP1, RC - |.else - | cvtsi2sd xmm0, RC - | movsd TMPQ, xmm0 - |.endif - | lea RCa, TMPQ // Store temp. TValue in TMPQ. - | jmp >1 - | - |->vmeta_tsetv: - | movzx RC, PC_RC // Reload TValue *k from RC. - | lea RC, [BASE+RC*8] - |1: - | movzx RB, PC_RB // Reload TValue *t from RB. - | lea RB, [BASE+RB*8] - |2: - |.if X64 - | mov L:CARG1d, SAVE_L - | mov L:CARG1d->base, BASE // Caveat: CARG2d/CARG3d may be BASE. - | mov CARG2d, RB - | mov CARG3, RCa // May be 64 bit ptr to stack. - | mov L:RB, L:CARG1d - |.else - | mov ARG2, RB - | mov L:RB, SAVE_L - | mov ARG3, RC - | mov ARG1, L:RB - | mov L:RB->base, BASE - |.endif - | mov SAVE_PC, PC - | call extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k) - | // TValue * (finished) or NULL (metamethod) returned in eax (RC). - | mov BASE, L:RB->base - | test RC, RC - | jz >3 - | // NOBARRIER: lj_meta_tset ensures the table is not black. - | movzx RA, PC_RA - |.if X64 - | mov RBa, [BASE+RA*8] - | mov [RC], RBa - |.else - | mov RB, [BASE+RA*8+4] - | mov RA, [BASE+RA*8] - | mov [RC+4], RB - | mov [RC], RA - |.endif - |->cont_nop: // BASE = base, (RC = result) - | ins_next - | - |3: // Call __newindex metamethod. - | // BASE = base, L->top = new base, stack = cont/func/t/k/(v) - | mov RA, L:RB->top - | mov [RA-12], PC // [cont|PC] - | movzx RC, PC_RA - | // Copy value to third argument. - |.if X64 - | mov RBa, [BASE+RC*8] - | mov [RA+16], RBa - |.else - | mov RB, [BASE+RC*8+4] - | mov RC, [BASE+RC*8] - | mov [RA+20], RB - | mov [RA+16], RC - |.endif - | lea PC, [RA+FRAME_CONT] - | sub PC, BASE - | mov LFUNC:RB, [RA-8] // Guaranteed to be a function here. - | mov NARGS:RD, 3+1 // 3 args for func(t, k, v). - | jmp ->vm_call_dispatch_f - | - |->vmeta_tsetr: - |.if X64WIN - | mov L:CARG1d, SAVE_L - | mov CARG3d, RC - | mov L:CARG1d->base, BASE - | xchg CARG2d, TAB:RB // Caveat: CARG2d == BASE. - |.elif X64 - | mov L:CARG1d, SAVE_L - | mov CARG2d, TAB:RB - | mov L:CARG1d->base, BASE - | mov RB, BASE // Save BASE. - | mov CARG3d, RC // Caveat: CARG3d == BASE. - |.else - | mov L:RA, SAVE_L - | mov ARG2, TAB:RB - | mov RB, BASE // Save BASE. - | mov ARG3, RC - | mov ARG1, L:RA - | mov L:RA->base, BASE - |.endif - | mov SAVE_PC, PC - | call extern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key) - | // TValue * returned in eax (RC). - | movzx RA, PC_RA - | mov BASE, RB // Restore BASE. - | jmp ->BC_TSETR_Z - | - |//-- Comparison metamethods --------------------------------------------- - | - |->vmeta_comp: - |.if X64 - | mov L:RB, SAVE_L - | mov L:RB->base, BASE // Caveat: CARG2d/CARG3d == BASE. - |.if X64WIN - | lea CARG3d, [BASE+RD*8] - | lea CARG2d, [BASE+RA*8] - |.else - | lea CARG2d, [BASE+RA*8] - | lea CARG3d, [BASE+RD*8] - |.endif - | mov CARG1d, L:RB // Caveat: CARG1d/CARG4d == RA. - | movzx CARG4d, PC_OP - |.else - | movzx RB, PC_OP - | lea RD, [BASE+RD*8] - | lea RA, [BASE+RA*8] - | mov ARG4, RB - | mov L:RB, SAVE_L - | mov ARG3, RD - | mov ARG2, RA - | mov ARG1, L:RB - | mov L:RB->base, BASE - |.endif - | mov SAVE_PC, PC - | call extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op) - | // 0/1 or TValue * (metamethod) returned in eax (RC). - |3: - | mov BASE, L:RB->base - | cmp RC, 1 - | ja ->vmeta_binop - |4: - | lea PC, [PC+4] - | jb >6 - |5: - | movzx RD, PC_RD - | branchPC RD - |6: - | ins_next - | - |->cont_condt: // BASE = base, RC = result - | add PC, 4 - | cmp dword [RC+4], LJ_TISTRUECOND // Branch if result is true. - | jb <5 - | jmp <6 - | - |->cont_condf: // BASE = base, RC = result - | cmp dword [RC+4], LJ_TISTRUECOND // Branch if result is false. - | jmp <4 - | - |->vmeta_equal: - | sub PC, 4 - |.if X64WIN - | mov CARG3d, RD - | mov CARG4d, RB - | mov L:RB, SAVE_L - | mov L:RB->base, BASE // Caveat: CARG2d == BASE. - | mov CARG2d, RA - | mov CARG1d, L:RB // Caveat: CARG1d == RA. - |.elif X64 - | mov CARG2d, RA - | mov CARG4d, RB // Caveat: CARG4d == RA. - | mov L:RB, SAVE_L - | mov L:RB->base, BASE // Caveat: CARG3d == BASE. - | mov CARG3d, RD - | mov CARG1d, L:RB - |.else - | mov ARG4, RB - | mov L:RB, SAVE_L - | mov ARG3, RD - | mov ARG2, RA - | mov ARG1, L:RB - | mov L:RB->base, BASE - |.endif - | mov SAVE_PC, PC - | call extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne) - | // 0/1 or TValue * (metamethod) returned in eax (RC). - | jmp <3 - | - |->vmeta_equal_cd: - |.if FFI - | sub PC, 4 - | mov L:RB, SAVE_L - | mov L:RB->base, BASE - | mov FCARG1, L:RB - | mov FCARG2, dword [PC-4] - | mov SAVE_PC, PC - | call extern lj_meta_equal_cd@8 // (lua_State *L, BCIns ins) - | // 0/1 or TValue * (metamethod) returned in eax (RC). - | jmp <3 - |.endif - | - |->vmeta_istype: - |.if X64 - | mov L:RB, SAVE_L - | mov L:RB->base, BASE // Caveat: CARG2d/CARG3d may be BASE. - | mov CARG2d, RA - | movzx CARG3d, PC_RD - | mov L:CARG1d, L:RB - |.else - | movzx RD, PC_RD - | mov ARG2, RA - | mov L:RB, SAVE_L - | mov ARG3, RD - | mov ARG1, L:RB - | mov L:RB->base, BASE - |.endif - | mov SAVE_PC, PC - | call extern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp) - | mov BASE, L:RB->base - | jmp <6 - | - |//-- Arithmetic metamethods --------------------------------------------- - | - |->vmeta_arith_vno: - |.if DUALNUM - | movzx RB, PC_RB - |.endif - |->vmeta_arith_vn: - | lea RC, [KBASE+RC*8] - | jmp >1 - | - |->vmeta_arith_nvo: - |.if DUALNUM - | movzx RC, PC_RC - |.endif - |->vmeta_arith_nv: - | lea RC, [KBASE+RC*8] - | lea RB, [BASE+RB*8] - | xchg RB, RC - | jmp >2 - | - |->vmeta_unm: - | lea RC, [BASE+RD*8] - | mov RB, RC - | jmp >2 - | - |->vmeta_arith_vvo: - |.if DUALNUM - | movzx RB, PC_RB - |.endif - |->vmeta_arith_vv: - | lea RC, [BASE+RC*8] - |1: - | lea RB, [BASE+RB*8] - |2: - | lea RA, [BASE+RA*8] - |.if X64WIN - | mov CARG3d, RB - | mov CARG4d, RC - | movzx RC, PC_OP - | mov ARG5d, RC - | mov L:RB, SAVE_L - | mov L:RB->base, BASE // Caveat: CARG2d == BASE. - | mov CARG2d, RA - | mov CARG1d, L:RB // Caveat: CARG1d == RA. - |.elif X64 - | movzx CARG5d, PC_OP - | mov CARG2d, RA - | mov CARG4d, RC // Caveat: CARG4d == RA. - | mov L:CARG1d, SAVE_L - | mov L:CARG1d->base, BASE // Caveat: CARG3d == BASE. - | mov CARG3d, RB - | mov L:RB, L:CARG1d - |.else - | mov ARG3, RB - | mov L:RB, SAVE_L - | mov ARG4, RC - | movzx RC, PC_OP - | mov ARG2, RA - | mov ARG5, RC - | mov ARG1, L:RB - | mov L:RB->base, BASE - |.endif - | mov SAVE_PC, PC - | call extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op) - | // NULL (finished) or TValue * (metamethod) returned in eax (RC). - | mov BASE, L:RB->base - | test RC, RC - | jz ->cont_nop - | - | // Call metamethod for binary op. - |->vmeta_binop: - | // BASE = base, RC = new base, stack = cont/func/o1/o2 - | mov RA, RC - | sub RC, BASE - | mov [RA-12], PC // [cont|PC] - | lea PC, [RC+FRAME_CONT] - | mov NARGS:RD, 2+1 // 2 args for func(o1, o2). - | jmp ->vm_call_dispatch - | - |->vmeta_len: - | mov L:RB, SAVE_L - | mov L:RB->base, BASE - | lea FCARG2, [BASE+RD*8] // Caveat: FCARG2 == BASE - | mov L:FCARG1, L:RB - | mov SAVE_PC, PC - | call extern lj_meta_len@8 // (lua_State *L, TValue *o) - | // NULL (retry) or TValue * (metamethod) returned in eax (RC). - | mov BASE, L:RB->base -#if LJ_52 - | test RC, RC - | jne ->vmeta_binop // Binop call for compatibility. - | movzx RD, PC_RD - | mov TAB:FCARG1, [BASE+RD*8] - | jmp ->BC_LEN_Z -#else - | jmp ->vmeta_binop // Binop call for compatibility. -#endif - | - |//-- Call metamethod ---------------------------------------------------- - | - |->vmeta_call_ra: - | lea RA, [BASE+RA*8+8] - |->vmeta_call: // Resolve and call __call metamethod. - | // BASE = old base, RA = new base, RC = nargs+1, PC = return - | mov TMP2, RA // Save RA, RC for us. - | mov TMP1, NARGS:RD - | sub RA, 8 - |.if X64 - | mov L:RB, SAVE_L - | mov L:RB->base, BASE // Caveat: CARG2d/CARG3d may be BASE. - | mov CARG2d, RA - | lea CARG3d, [RA+NARGS:RD*8] - | mov CARG1d, L:RB // Caveat: CARG1d may be RA. - |.else - | lea RC, [RA+NARGS:RD*8] - | mov L:RB, SAVE_L - | mov ARG2, RA - | mov ARG3, RC - | mov ARG1, L:RB - | mov L:RB->base, BASE // This is the callers base! - |.endif - | mov SAVE_PC, PC - | call extern lj_meta_call // (lua_State *L, TValue *func, TValue *top) - | mov BASE, L:RB->base - | mov RA, TMP2 - | mov NARGS:RD, TMP1 - | mov LFUNC:RB, [RA-8] - | add NARGS:RD, 1 - | // This is fragile. L->base must not move, KBASE must always be defined. - | cmp KBASE, BASE // Continue with CALLT if flag set. - | je ->BC_CALLT_Z - | mov BASE, RA - | ins_call // Otherwise call resolved metamethod. - | - |//-- Argument coercion for 'for' statement ------------------------------ - | - |->vmeta_for: - | mov L:RB, SAVE_L - | mov L:RB->base, BASE - | mov FCARG2, RA // Caveat: FCARG2 == BASE - | mov L:FCARG1, L:RB // Caveat: FCARG1 == RA - | mov SAVE_PC, PC - | call extern lj_meta_for@8 // (lua_State *L, TValue *base) - | mov BASE, L:RB->base - | mov RC, [PC-4] - | movzx RA, RCH - | movzx OP, RCL - | shr RC, 16 - |.if X64 - | jmp aword [DISPATCH+OP*8+GG_DISP2STATIC] // Retry FORI or JFORI. - |.else - | jmp aword [DISPATCH+OP*4+GG_DISP2STATIC] // Retry FORI or JFORI. - |.endif - | - |//----------------------------------------------------------------------- - |//-- Fast functions ----------------------------------------------------- - |//----------------------------------------------------------------------- - | - |.macro .ffunc, name - |->ff_ .. name: - |.endmacro - | - |.macro .ffunc_1, name - |->ff_ .. name: - | cmp NARGS:RD, 1+1; jb ->fff_fallback - |.endmacro - | - |.macro .ffunc_2, name - |->ff_ .. name: - | cmp NARGS:RD, 2+1; jb ->fff_fallback - |.endmacro - | - |.macro .ffunc_nsse, name, op - | .ffunc_1 name - | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback - | op xmm0, qword [BASE] - |.endmacro - | - |.macro .ffunc_nsse, name - | .ffunc_nsse name, movsd - |.endmacro - | - |.macro .ffunc_nnsse, name - | .ffunc_2 name - | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback - | cmp dword [BASE+12], LJ_TISNUM; jae ->fff_fallback - | movsd xmm0, qword [BASE] - | movsd xmm1, qword [BASE+8] - |.endmacro - | - |.macro .ffunc_nnr, name - | .ffunc_2 name - | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback - | cmp dword [BASE+12], LJ_TISNUM; jae ->fff_fallback - | fld qword [BASE+8] - | fld qword [BASE] - |.endmacro - | - |// Inlined GC threshold check. Caveat: uses label 1. - |.macro ffgccheck - | mov RB, [DISPATCH+DISPATCH_GL(gc.total)] - | cmp RB, [DISPATCH+DISPATCH_GL(gc.threshold)] - | jb >1 - | call ->fff_gcstep - |1: - |.endmacro - | - |//-- Base library: checks ----------------------------------------------- - | - |.ffunc_1 assert - | mov RB, [BASE+4] - | cmp RB, LJ_TISTRUECOND; jae ->fff_fallback - | mov PC, [BASE-4] - | mov MULTRES, RD - | mov [BASE-4], RB - | mov RB, [BASE] - | mov [BASE-8], RB - | sub RD, 2 - | jz >2 - | mov RA, BASE - |1: - | add RA, 8 - |.if X64 - | mov RBa, [RA] - | mov [RA-8], RBa - |.else - | mov RB, [RA+4] - | mov [RA-4], RB - | mov RB, [RA] - | mov [RA-8], RB - |.endif - | sub RD, 1 - | jnz <1 - |2: - | mov RD, MULTRES - | jmp ->fff_res_ - | - |.ffunc_1 type - | mov RB, [BASE+4] - |.if X64 - | mov RA, RB - | sar RA, 15 - | cmp RA, -2 - | je >3 - |.endif - | mov RC, ~LJ_TNUMX - | not RB - | cmp RC, RB - | cmova RC, RB - |2: - | mov CFUNC:RB, [BASE-8] - | mov STR:RC, [CFUNC:RB+RC*8+((char *)(&((GCfuncC *)0)->upvalue))] - | mov PC, [BASE-4] - | mov dword [BASE-4], LJ_TSTR - | mov [BASE-8], STR:RC - | jmp ->fff_res1 - |.if X64 - |3: - | mov RC, ~LJ_TLIGHTUD - | jmp <2 - |.endif - | - |//-- Base library: getters and setters --------------------------------- - | - |.ffunc_1 getmetatable - | mov RB, [BASE+4] - | mov PC, [BASE-4] - | cmp RB, LJ_TTAB; jne >6 - |1: // Field metatable must be at same offset for GCtab and GCudata! - | mov TAB:RB, [BASE] - | mov TAB:RB, TAB:RB->metatable - |2: - | test TAB:RB, TAB:RB - | mov dword [BASE-4], LJ_TNIL - | jz ->fff_res1 - | mov STR:RC, [DISPATCH+DISPATCH_GL(gcroot)+4*(GCROOT_MMNAME+MM_metatable)] - | mov dword [BASE-4], LJ_TTAB // Store metatable as default result. - | mov [BASE-8], TAB:RB - | mov RA, TAB:RB->hmask - | and RA, STR:RC->hash - | imul RA, #NODE - | add NODE:RA, TAB:RB->node - |3: // Rearranged logic, because we expect _not_ to find the key. - | cmp dword NODE:RA->key.it, LJ_TSTR - | jne >4 - | cmp dword NODE:RA->key.gcr, STR:RC - | je >5 - |4: - | mov NODE:RA, NODE:RA->next - | test NODE:RA, NODE:RA - | jnz <3 - | jmp ->fff_res1 // Not found, keep default result. - |5: - | mov RB, [RA+4] - | cmp RB, LJ_TNIL; je ->fff_res1 // Ditto for nil value. - | mov RC, [RA] - | mov [BASE-4], RB // Return value of mt.__metatable. - | mov [BASE-8], RC - | jmp ->fff_res1 - | - |6: - | cmp RB, LJ_TUDATA; je <1 - |.if X64 - | cmp RB, LJ_TNUMX; ja >8 - | cmp RB, LJ_TISNUM; jbe >7 - | mov RB, LJ_TLIGHTUD - | jmp >8 - |7: - |.else - | cmp RB, LJ_TISNUM; ja >8 - |.endif - | mov RB, LJ_TNUMX - |8: - | not RB - | mov TAB:RB, [DISPATCH+RB*4+DISPATCH_GL(gcroot[GCROOT_BASEMT])] - | jmp <2 - | - |.ffunc_2 setmetatable - | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback - | // Fast path: no mt for table yet and not clearing the mt. - | mov TAB:RB, [BASE] - | cmp dword TAB:RB->metatable, 0; jne ->fff_fallback - | cmp dword [BASE+12], LJ_TTAB; jne ->fff_fallback - | mov TAB:RC, [BASE+8] - | mov TAB:RB->metatable, TAB:RC - | mov PC, [BASE-4] - | mov dword [BASE-4], LJ_TTAB // Return original table. - | mov [BASE-8], TAB:RB - | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) - | jz >1 - | // Possible write barrier. Table is black, but skip iswhite(mt) check. - | barrierback TAB:RB, RC - |1: - | jmp ->fff_res1 - | - |.ffunc_2 rawget - | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback - |.if X64WIN - | mov RB, BASE // Save BASE. - | lea CARG3d, [BASE+8] - | mov CARG2d, [BASE] // Caveat: CARG2d == BASE. - | mov CARG1d, SAVE_L - |.elif X64 - | mov RB, BASE // Save BASE. - | mov CARG2d, [BASE] - | lea CARG3d, [BASE+8] // Caveat: CARG3d == BASE. - | mov CARG1d, SAVE_L - |.else - | mov TAB:RD, [BASE] - | mov L:RB, SAVE_L - | mov ARG2, TAB:RD - | mov ARG1, L:RB - | mov RB, BASE // Save BASE. - | add BASE, 8 - | mov ARG3, BASE - |.endif - | call extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key) - | // cTValue * returned in eax (RD). - | mov BASE, RB // Restore BASE. - | // Copy table slot. - |.if X64 - | mov RBa, [RD] - | mov PC, [BASE-4] - | mov [BASE-8], RBa - |.else - | mov RB, [RD] - | mov RD, [RD+4] - | mov PC, [BASE-4] - | mov [BASE-8], RB - | mov [BASE-4], RD - |.endif - | jmp ->fff_res1 - | - |//-- Base library: conversions ------------------------------------------ - | - |.ffunc tonumber - | // Only handles the number case inline (without a base argument). - | cmp NARGS:RD, 1+1; jne ->fff_fallback // Exactly one argument. - | cmp dword [BASE+4], LJ_TISNUM - |.if DUALNUM - | jne >1 - | mov RB, dword [BASE]; jmp ->fff_resi - |1: - | ja ->fff_fallback - |.else - | jae ->fff_fallback - |.endif - | movsd xmm0, qword [BASE]; jmp ->fff_resxmm0 - | - |.ffunc_1 tostring - | // Only handles the string or number case inline. - | mov PC, [BASE-4] - | cmp dword [BASE+4], LJ_TSTR; jne >3 - | // A __tostring method in the string base metatable is ignored. - | mov STR:RD, [BASE] - |2: - | mov dword [BASE-4], LJ_TSTR - | mov [BASE-8], STR:RD - | jmp ->fff_res1 - |3: // Handle numbers inline, unless a number base metatable is present. - | cmp dword [BASE+4], LJ_TISNUM; ja ->fff_fallback - | cmp dword [DISPATCH+DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])], 0 - | jne ->fff_fallback - | ffgccheck // Caveat: uses label 1. - | mov L:RB, SAVE_L - | mov L:RB->base, BASE // Add frame since C call can throw. - | mov SAVE_PC, PC // Redundant (but a defined value). - |.if X64 and not X64WIN - | mov FCARG2, BASE // Otherwise: FCARG2 == BASE - |.endif - | mov L:FCARG1, L:RB - |.if DUALNUM - | call extern lj_strfmt_number@8 // (lua_State *L, cTValue *o) - |.else - | call extern lj_strfmt_num@8 // (lua_State *L, lua_Number *np) - |.endif - | // GCstr returned in eax (RD). - | mov BASE, L:RB->base - | jmp <2 - | - |//-- Base library: iterators ------------------------------------------- - | - |.ffunc_1 next - | je >2 // Missing 2nd arg? - |1: - | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback - | mov L:RB, SAVE_L - | mov L:RB->base, BASE // Add frame since C call can throw. - | mov L:RB->top, BASE // Dummy frame length is ok. - | mov PC, [BASE-4] - |.if X64WIN - | lea CARG3d, [BASE+8] - | mov CARG2d, [BASE] // Caveat: CARG2d == BASE. - | mov CARG1d, L:RB - |.elif X64 - | mov CARG2d, [BASE] - | lea CARG3d, [BASE+8] // Caveat: CARG3d == BASE. - | mov CARG1d, L:RB - |.else - | mov TAB:RD, [BASE] - | mov ARG2, TAB:RD - | mov ARG1, L:RB - | add BASE, 8 - | mov ARG3, BASE - |.endif - | mov SAVE_PC, PC // Needed for ITERN fallback. - | call extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key) - | // Flag returned in eax (RD). - | mov BASE, L:RB->base - | test RD, RD; jz >3 // End of traversal? - | // Copy key and value to results. - |.if X64 - | mov RBa, [BASE+8] - | mov RDa, [BASE+16] - | mov [BASE-8], RBa - | mov [BASE], RDa - |.else - | mov RB, [BASE+8] - | mov RD, [BASE+12] - | mov [BASE-8], RB - | mov [BASE-4], RD - | mov RB, [BASE+16] - | mov RD, [BASE+20] - | mov [BASE], RB - | mov [BASE+4], RD - |.endif - |->fff_res2: - | mov RD, 1+2 - | jmp ->fff_res - |2: // Set missing 2nd arg to nil. - | mov dword [BASE+12], LJ_TNIL - | jmp <1 - |3: // End of traversal: return nil. - | mov dword [BASE-4], LJ_TNIL - | jmp ->fff_res1 - | - |.ffunc_1 pairs - | mov TAB:RB, [BASE] - | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback -#if LJ_52 - | cmp dword TAB:RB->metatable, 0; jne ->fff_fallback -#endif - | mov CFUNC:RB, [BASE-8] - | mov CFUNC:RD, CFUNC:RB->upvalue[0] - | mov PC, [BASE-4] - | mov dword [BASE-4], LJ_TFUNC - | mov [BASE-8], CFUNC:RD - | mov dword [BASE+12], LJ_TNIL - | mov RD, 1+3 - | jmp ->fff_res - | - |.ffunc_2 ipairs_aux - | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback - | cmp dword [BASE+12], LJ_TISNUM - |.if DUALNUM - | jne ->fff_fallback - |.else - | jae ->fff_fallback - |.endif - | mov PC, [BASE-4] - |.if DUALNUM - | mov RD, dword [BASE+8] - | add RD, 1 - | mov dword [BASE-4], LJ_TISNUM - | mov dword [BASE-8], RD - |.else - | movsd xmm0, qword [BASE+8] - | sseconst_1 xmm1, RBa - | addsd xmm0, xmm1 - | cvttsd2si RD, xmm0 - | movsd qword [BASE-8], xmm0 - |.endif - | mov TAB:RB, [BASE] - | cmp RD, TAB:RB->asize; jae >2 // Not in array part? - | shl RD, 3 - | add RD, TAB:RB->array - |1: - | cmp dword [RD+4], LJ_TNIL; je ->fff_res0 - | // Copy array slot. - |.if X64 - | mov RBa, [RD] - | mov [BASE], RBa - |.else - | mov RB, [RD] - | mov RD, [RD+4] - | mov [BASE], RB - | mov [BASE+4], RD - |.endif - | jmp ->fff_res2 - |2: // Check for empty hash part first. Otherwise call C function. - | cmp dword TAB:RB->hmask, 0; je ->fff_res0 - | mov FCARG1, TAB:RB - | mov RB, BASE // Save BASE. - | mov FCARG2, RD // Caveat: FCARG2 == BASE - | call extern lj_tab_getinth@8 // (GCtab *t, int32_t key) - | // cTValue * or NULL returned in eax (RD). - | mov BASE, RB - | test RD, RD - | jnz <1 - |->fff_res0: - | mov RD, 1+0 - | jmp ->fff_res - | - |.ffunc_1 ipairs - | mov TAB:RB, [BASE] - | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback -#if LJ_52 - | cmp dword TAB:RB->metatable, 0; jne ->fff_fallback -#endif - | mov CFUNC:RB, [BASE-8] - | mov CFUNC:RD, CFUNC:RB->upvalue[0] - | mov PC, [BASE-4] - | mov dword [BASE-4], LJ_TFUNC - | mov [BASE-8], CFUNC:RD - |.if DUALNUM - | mov dword [BASE+12], LJ_TISNUM - | mov dword [BASE+8], 0 - |.else - | xorps xmm0, xmm0 - | movsd qword [BASE+8], xmm0 - |.endif - | mov RD, 1+3 - | jmp ->fff_res - | - |//-- Base library: catch errors ---------------------------------------- - | - |.ffunc_1 pcall - | lea RA, [BASE+8] - | sub NARGS:RD, 1 - | mov PC, 8+FRAME_PCALL - |1: - | movzx RB, byte [DISPATCH+DISPATCH_GL(hookmask)] - | shr RB, HOOK_ACTIVE_SHIFT - | and RB, 1 - | add PC, RB // Remember active hook before pcall. - | jmp ->vm_call_dispatch - | - |.ffunc_2 xpcall - | cmp dword [BASE+12], LJ_TFUNC; jne ->fff_fallback - | mov RB, [BASE+4] // Swap function and traceback. - | mov [BASE+12], RB - | mov dword [BASE+4], LJ_TFUNC - | mov LFUNC:RB, [BASE] - | mov PC, [BASE+8] - | mov [BASE+8], LFUNC:RB - | mov [BASE], PC - | lea RA, [BASE+16] - | sub NARGS:RD, 2 - | mov PC, 16+FRAME_PCALL - | jmp <1 - | - |//-- Coroutine library -------------------------------------------------- - | - |.macro coroutine_resume_wrap, resume - |.if resume - |.ffunc_1 coroutine_resume - | mov L:RB, [BASE] - |.else - |.ffunc coroutine_wrap_aux - | mov CFUNC:RB, [BASE-8] - | mov L:RB, CFUNC:RB->upvalue[0].gcr - |.endif - | mov PC, [BASE-4] - | mov SAVE_PC, PC - |.if X64 - | mov TMP1, L:RB - |.else - | mov ARG1, L:RB - |.endif - |.if resume - | cmp dword [BASE+4], LJ_TTHREAD; jne ->fff_fallback - |.endif - | cmp aword L:RB->cframe, 0; jne ->fff_fallback - | cmp byte L:RB->status, LUA_YIELD; ja ->fff_fallback - | mov RA, L:RB->top - | je >1 // Status != LUA_YIELD (i.e. 0)? - | cmp RA, L:RB->base // Check for presence of initial func. - | je ->fff_fallback - |1: - |.if resume - | lea PC, [RA+NARGS:RD*8-16] // Check stack space (-1-thread). - |.else - | lea PC, [RA+NARGS:RD*8-8] // Check stack space (-1). - |.endif - | cmp PC, L:RB->maxstack; ja ->fff_fallback - | mov L:RB->top, PC - | - | mov L:RB, SAVE_L - | mov L:RB->base, BASE - |.if resume - | add BASE, 8 // Keep resumed thread in stack for GC. - |.endif - | mov L:RB->top, BASE - |.if resume - | lea RB, [BASE+NARGS:RD*8-24] // RB = end of source for stack move. - |.else - | lea RB, [BASE+NARGS:RD*8-16] // RB = end of source for stack move. - |.endif - | sub RBa, PCa // Relative to PC. - | - | cmp PC, RA - | je >3 - |2: // Move args to coroutine. - |.if X64 - | mov RCa, [PC+RB] - | mov [PC-8], RCa - |.else - | mov RC, [PC+RB+4] - | mov [PC-4], RC - | mov RC, [PC+RB] - | mov [PC-8], RC - |.endif - | sub PC, 8 - | cmp PC, RA - | jne <2 - |3: - |.if X64 - | mov CARG2d, RA - | mov CARG1d, TMP1 - |.else - | mov ARG2, RA - | xor RA, RA - | mov ARG4, RA - | mov ARG3, RA - |.endif - | call ->vm_resume // (lua_State *L, TValue *base, 0, 0) - | - | mov L:RB, SAVE_L - |.if X64 - | mov L:PC, TMP1 - |.else - | mov L:PC, ARG1 // The callee doesn't modify SAVE_L. - |.endif - | mov BASE, L:RB->base - | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB - | set_vmstate INTERP - | - | cmp eax, LUA_YIELD - | ja >8 - |4: - | mov RA, L:PC->base - | mov KBASE, L:PC->top - | mov L:PC->top, RA // Clear coroutine stack. - | mov PC, KBASE - | sub PC, RA - | je >6 // No results? - | lea RD, [BASE+PC] - | shr PC, 3 - | cmp RD, L:RB->maxstack - | ja >9 // Need to grow stack? - | - | mov RB, BASE - | sub RBa, RAa - |5: // Move results from coroutine. - |.if X64 - | mov RDa, [RA] - | mov [RA+RB], RDa - |.else - | mov RD, [RA] - | mov [RA+RB], RD - | mov RD, [RA+4] - | mov [RA+RB+4], RD - |.endif - | add RA, 8 - | cmp RA, KBASE - | jne <5 - |6: - |.if resume - | lea RD, [PC+2] // nresults+1 = 1 + true + results. - | mov dword [BASE-4], LJ_TTRUE // Prepend true to results. - |.else - | lea RD, [PC+1] // nresults+1 = 1 + results. - |.endif - |7: - | mov PC, SAVE_PC - | mov MULTRES, RD - |.if resume - | mov RAa, -8 - |.else - | xor RA, RA - |.endif - | test PC, FRAME_TYPE - | jz ->BC_RET_Z - | jmp ->vm_return - | - |8: // Coroutine returned with error (at co->top-1). - |.if resume - | mov dword [BASE-4], LJ_TFALSE // Prepend false to results. - | mov RA, L:PC->top - | sub RA, 8 - | mov L:PC->top, RA // Clear error from coroutine stack. - | // Copy error message. - |.if X64 - | mov RDa, [RA] - | mov [BASE], RDa - |.else - | mov RD, [RA] - | mov [BASE], RD - | mov RD, [RA+4] - | mov [BASE+4], RD - |.endif - | mov RD, 1+2 // nresults+1 = 1 + false + error. - | jmp <7 - |.else - | mov FCARG2, L:PC - | mov FCARG1, L:RB - | call extern lj_ffh_coroutine_wrap_err@8 // (lua_State *L, lua_State *co) - | // Error function does not return. - |.endif - | - |9: // Handle stack expansion on return from yield. - |.if X64 - | mov L:RA, TMP1 - |.else - | mov L:RA, ARG1 // The callee doesn't modify SAVE_L. - |.endif - | mov L:RA->top, KBASE // Undo coroutine stack clearing. - | mov FCARG2, PC - | mov FCARG1, L:RB - | call extern lj_state_growstack@8 // (lua_State *L, int n) - |.if X64 - | mov L:PC, TMP1 - |.else - | mov L:PC, ARG1 - |.endif - | mov BASE, L:RB->base - | jmp <4 // Retry the stack move. - |.endmacro - | - | coroutine_resume_wrap 1 // coroutine.resume - | coroutine_resume_wrap 0 // coroutine.wrap - | - |.ffunc coroutine_yield - | mov L:RB, SAVE_L - | test aword L:RB->cframe, CFRAME_RESUME - | jz ->fff_fallback - | mov L:RB->base, BASE - | lea RD, [BASE+NARGS:RD*8-8] - | mov L:RB->top, RD - | xor RD, RD - | mov aword L:RB->cframe, RDa - | mov al, LUA_YIELD - | mov byte L:RB->status, al - | jmp ->vm_leave_unw - | - |//-- Math library ------------------------------------------------------- - | - |.if not DUALNUM - |->fff_resi: // Dummy. - |.endif - | - |->fff_resn: - | mov PC, [BASE-4] - | fstp qword [BASE-8] - | jmp ->fff_res1 - | - | .ffunc_1 math_abs - |.if DUALNUM - | cmp dword [BASE+4], LJ_TISNUM; jne >2 - | mov RB, dword [BASE] - | cmp RB, 0; jns ->fff_resi - | neg RB; js >1 - |->fff_resbit: - |->fff_resi: - | mov PC, [BASE-4] - | mov dword [BASE-4], LJ_TISNUM - | mov dword [BASE-8], RB - | jmp ->fff_res1 - |1: - | mov PC, [BASE-4] - | mov dword [BASE-4], 0x41e00000 // 2^31. - | mov dword [BASE-8], 0 - | jmp ->fff_res1 - |2: - | ja ->fff_fallback - |.else - | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback - |.endif - | movsd xmm0, qword [BASE] - | sseconst_abs xmm1, RDa - | andps xmm0, xmm1 - |->fff_resxmm0: - | mov PC, [BASE-4] - | movsd qword [BASE-8], xmm0 - | // fallthrough - | - |->fff_res1: - | mov RD, 1+1 - |->fff_res: - | mov MULTRES, RD - |->fff_res_: - | test PC, FRAME_TYPE - | jnz >7 - |5: - | cmp PC_RB, RDL // More results expected? - | ja >6 - | // Adjust BASE. KBASE is assumed to be set for the calling frame. - | movzx RA, PC_RA - | not RAa // Note: ~RA = -(RA+1) - | lea BASE, [BASE+RA*8] // base = base - (RA+1)*8 - | ins_next - | - |6: // Fill up results with nil. - | mov dword [BASE+RD*8-12], LJ_TNIL - | add RD, 1 - | jmp <5 - | - |7: // Non-standard return case. - | mov RAa, -8 // Results start at BASE+RA = BASE-8. - | jmp ->vm_return - | - |.if X64 - |.define fff_resfp, fff_resxmm0 - |.else - |.define fff_resfp, fff_resn - |.endif - | - |.macro math_round, func - | .ffunc math_ .. func - |.if DUALNUM - | cmp dword [BASE+4], LJ_TISNUM; jne >1 - | mov RB, dword [BASE]; jmp ->fff_resi - |1: - | ja ->fff_fallback - |.else - | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback - |.endif - | movsd xmm0, qword [BASE] - | call ->vm_ .. func .. _sse - |.if DUALNUM - | cvttsd2si RB, xmm0 - | cmp RB, 0x80000000 - | jne ->fff_resi - | cvtsi2sd xmm1, RB - | ucomisd xmm0, xmm1 - | jp ->fff_resxmm0 - | je ->fff_resi - |.endif - | jmp ->fff_resxmm0 - |.endmacro - | - | math_round floor - | math_round ceil - | - |.ffunc_nsse math_sqrt, sqrtsd; jmp ->fff_resxmm0 - | - |.ffunc math_log - | cmp NARGS:RD, 1+1; jne ->fff_fallback // Exactly one argument. - | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback - | movsd xmm0, qword [BASE] - |.if not X64 - | movsd FPARG1, xmm0 - |.endif - | mov RB, BASE - | call extern log - | mov BASE, RB - | jmp ->fff_resfp - | - |.macro math_extern, func - | .ffunc_nsse math_ .. func - |.if not X64 - | movsd FPARG1, xmm0 - |.endif - | mov RB, BASE - | call extern func - | mov BASE, RB - | jmp ->fff_resfp - |.endmacro - | - |.macro math_extern2, func - | .ffunc_nnsse math_ .. func - |.if not X64 - | movsd FPARG1, xmm0 - | movsd FPARG3, xmm1 - |.endif - | mov RB, BASE - | call extern func - | mov BASE, RB - | jmp ->fff_resfp - |.endmacro - | - | math_extern log10 - | math_extern exp - | math_extern sin - | math_extern cos - | math_extern tan - | math_extern asin - | math_extern acos - | math_extern atan - | math_extern sinh - | math_extern cosh - | math_extern tanh - | math_extern2 pow - | math_extern2 atan2 - | math_extern2 fmod - | - |.ffunc_nnr math_ldexp; fscale; fpop1; jmp ->fff_resn - | - |.ffunc_1 math_frexp - | mov RB, [BASE+4] - | cmp RB, LJ_TISNUM; jae ->fff_fallback - | mov PC, [BASE-4] - | mov RC, [BASE] - | mov [BASE-4], RB; mov [BASE-8], RC - | shl RB, 1; cmp RB, 0xffe00000; jae >3 - | or RC, RB; jz >3 - | mov RC, 1022 - | cmp RB, 0x00200000; jb >4 - |1: - | shr RB, 21; sub RB, RC // Extract and unbias exponent. - | cvtsi2sd xmm0, RB - | mov RB, [BASE-4] - | and RB, 0x800fffff // Mask off exponent. - | or RB, 0x3fe00000 // Put mantissa in range [0.5,1) or 0. - | mov [BASE-4], RB - |2: - | movsd qword [BASE], xmm0 - | mov RD, 1+2 - | jmp ->fff_res - |3: // Return +-0, +-Inf, NaN unmodified and an exponent of 0. - | xorps xmm0, xmm0; jmp <2 - |4: // Handle denormals by multiplying with 2^54 and adjusting the bias. - | movsd xmm0, qword [BASE] - | sseconst_hi xmm1, RBa, 43500000 // 2^54. - | mulsd xmm0, xmm1 - | movsd qword [BASE-8], xmm0 - | mov RB, [BASE-4]; mov RC, 1076; shl RB, 1; jmp <1 - | - |.ffunc_nsse math_modf - | mov RB, [BASE+4] - | mov PC, [BASE-4] - | shl RB, 1; cmp RB, 0xffe00000; je >4 // +-Inf? - | movaps xmm4, xmm0 - | call ->vm_trunc_sse - | subsd xmm4, xmm0 - |1: - | movsd qword [BASE-8], xmm0 - | movsd qword [BASE], xmm4 - | mov RC, [BASE-4]; mov RB, [BASE+4] - | xor RC, RB; js >3 // Need to adjust sign? - |2: - | mov RD, 1+2 - | jmp ->fff_res - |3: - | xor RB, 0x80000000; mov [BASE+4], RB // Flip sign of fraction. - | jmp <2 - |4: - | xorps xmm4, xmm4; jmp <1 // Return +-Inf and +-0. - | - |.macro math_minmax, name, cmovop, sseop - | .ffunc name - | mov RA, 2 - | cmp dword [BASE+4], LJ_TISNUM - |.if DUALNUM - | jne >4 - | mov RB, dword [BASE] - |1: // Handle integers. - | cmp RA, RD; jae ->fff_resi - | cmp dword [BASE+RA*8-4], LJ_TISNUM; jne >3 - | cmp RB, dword [BASE+RA*8-8] - | cmovop RB, dword [BASE+RA*8-8] - | add RA, 1 - | jmp <1 - |3: - | ja ->fff_fallback - | // Convert intermediate result to number and continue below. - | cvtsi2sd xmm0, RB - | jmp >6 - |4: - | ja ->fff_fallback - |.else - | jae ->fff_fallback - |.endif - | - | movsd xmm0, qword [BASE] - |5: // Handle numbers or integers. - | cmp RA, RD; jae ->fff_resxmm0 - | cmp dword [BASE+RA*8-4], LJ_TISNUM - |.if DUALNUM - | jb >6 - | ja ->fff_fallback - | cvtsi2sd xmm1, dword [BASE+RA*8-8] - | jmp >7 - |.else - | jae ->fff_fallback - |.endif - |6: - | movsd xmm1, qword [BASE+RA*8-8] - |7: - | sseop xmm0, xmm1 - | add RA, 1 - | jmp <5 - |.endmacro - | - | math_minmax math_min, cmovg, minsd - | math_minmax math_max, cmovl, maxsd - | - |//-- String library ----------------------------------------------------- - | - |.ffunc string_byte // Only handle the 1-arg case here. - | cmp NARGS:RD, 1+1; jne ->fff_fallback - | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback - | mov STR:RB, [BASE] - | mov PC, [BASE-4] - | cmp dword STR:RB->len, 1 - | jb ->fff_res0 // Return no results for empty string. - | movzx RB, byte STR:RB[1] - |.if DUALNUM - | jmp ->fff_resi - |.else - | cvtsi2sd xmm0, RB; jmp ->fff_resxmm0 - |.endif - | - |.ffunc string_char // Only handle the 1-arg case here. - | ffgccheck - | cmp NARGS:RD, 1+1; jne ->fff_fallback // *Exactly* 1 arg. - | cmp dword [BASE+4], LJ_TISNUM - |.if DUALNUM - | jne ->fff_fallback - | mov RB, dword [BASE] - | cmp RB, 255; ja ->fff_fallback - | mov TMP2, RB - |.else - | jae ->fff_fallback - | cvttsd2si RB, qword [BASE] - | cmp RB, 255; ja ->fff_fallback - | mov TMP2, RB - |.endif - |.if X64 - | mov TMP3, 1 - |.else - | mov ARG3, 1 - |.endif - | lea RDa, TMP2 // Points to stack. Little-endian. - |->fff_newstr: - | mov L:RB, SAVE_L - | mov L:RB->base, BASE - |.if X64 - | mov CARG3d, TMP3 // Zero-extended to size_t. - | mov CARG2, RDa // May be 64 bit ptr to stack. - | mov CARG1d, L:RB - |.else - | mov ARG2, RD - | mov ARG1, L:RB - |.endif - | mov SAVE_PC, PC - | call extern lj_str_new // (lua_State *L, char *str, size_t l) - |->fff_resstr: - | // GCstr * returned in eax (RD). - | mov BASE, L:RB->base - | mov PC, [BASE-4] - | mov dword [BASE-4], LJ_TSTR - | mov [BASE-8], STR:RD - | jmp ->fff_res1 - | - |.ffunc string_sub - | ffgccheck - | mov TMP2, -1 - | cmp NARGS:RD, 1+2; jb ->fff_fallback - | jna >1 - | cmp dword [BASE+20], LJ_TISNUM - |.if DUALNUM - | jne ->fff_fallback - | mov RB, dword [BASE+16] - | mov TMP2, RB - |.else - | jae ->fff_fallback - | cvttsd2si RB, qword [BASE+16] - | mov TMP2, RB - |.endif - |1: - | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback - | cmp dword [BASE+12], LJ_TISNUM - |.if DUALNUM - | jne ->fff_fallback - |.else - | jae ->fff_fallback - |.endif - | mov STR:RB, [BASE] - | mov TMP3, STR:RB - | mov RB, STR:RB->len - |.if DUALNUM - | mov RA, dword [BASE+8] - |.else - | cvttsd2si RA, qword [BASE+8] - |.endif - | mov RC, TMP2 - | cmp RB, RC // len < end? (unsigned compare) - | jb >5 - |2: - | test RA, RA // start <= 0? - | jle >7 - |3: - | mov STR:RB, TMP3 - | sub RC, RA // start > end? - | jl ->fff_emptystr - | lea RB, [STR:RB+RA+#STR-1] - | add RC, 1 - |4: - |.if X64 - | mov TMP3, RC - |.else - | mov ARG3, RC - |.endif - | mov RD, RB - | jmp ->fff_newstr - | - |5: // Negative end or overflow. - | jl >6 - | lea RC, [RC+RB+1] // end = end+(len+1) - | jmp <2 - |6: // Overflow. - | mov RC, RB // end = len - | jmp <2 - | - |7: // Negative start or underflow. - | je >8 - | add RA, RB // start = start+(len+1) - | add RA, 1 - | jg <3 // start > 0? - |8: // Underflow. - | mov RA, 1 // start = 1 - | jmp <3 - | - |->fff_emptystr: // Range underflow. - | xor RC, RC // Zero length. Any ptr in RB is ok. - | jmp <4 - | - |.macro ffstring_op, name - | .ffunc_1 string_ .. name - | ffgccheck - | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback - | mov L:RB, SAVE_L - | lea SBUF:FCARG1, [DISPATCH+DISPATCH_GL(tmpbuf)] - | mov L:RB->base, BASE - | mov STR:FCARG2, [BASE] // Caveat: FCARG2 == BASE - | mov RC, SBUF:FCARG1->b - | mov SBUF:FCARG1->L, L:RB - | mov SBUF:FCARG1->p, RC - | mov SAVE_PC, PC - | call extern lj_buf_putstr_ .. name .. @8 - | mov FCARG1, eax - | call extern lj_buf_tostr@4 - | jmp ->fff_resstr - |.endmacro - | - |ffstring_op reverse - |ffstring_op lower - |ffstring_op upper - | - |//-- Bit library -------------------------------------------------------- - | - |.macro .ffunc_bit, name, kind, fdef - | fdef name - |.if kind == 2 - | sseconst_tobit xmm1, RBa - |.endif - | cmp dword [BASE+4], LJ_TISNUM - |.if DUALNUM - | jne >1 - | mov RB, dword [BASE] - |.if kind > 0 - | jmp >2 - |.else - | jmp ->fff_resbit - |.endif - |1: - | ja ->fff_fallback - |.else - | jae ->fff_fallback - |.endif - | movsd xmm0, qword [BASE] - |.if kind < 2 - | sseconst_tobit xmm1, RBa - |.endif - | addsd xmm0, xmm1 - | movd RB, xmm0 - |2: - |.endmacro - | - |.macro .ffunc_bit, name, kind - | .ffunc_bit name, kind, .ffunc_1 - |.endmacro - | - |.ffunc_bit bit_tobit, 0 - | jmp ->fff_resbit - | - |.macro .ffunc_bit_op, name, ins - | .ffunc_bit name, 2 - | mov TMP2, NARGS:RD // Save for fallback. - | lea RD, [BASE+NARGS:RD*8-16] - |1: - | cmp RD, BASE - | jbe ->fff_resbit - | cmp dword [RD+4], LJ_TISNUM - |.if DUALNUM - | jne >2 - | ins RB, dword [RD] - | sub RD, 8 - | jmp <1 - |2: - | ja ->fff_fallback_bit_op - |.else - | jae ->fff_fallback_bit_op - |.endif - | movsd xmm0, qword [RD] - | addsd xmm0, xmm1 - | movd RA, xmm0 - | ins RB, RA - | sub RD, 8 - | jmp <1 - |.endmacro - | - |.ffunc_bit_op bit_band, and - |.ffunc_bit_op bit_bor, or - |.ffunc_bit_op bit_bxor, xor - | - |.ffunc_bit bit_bswap, 1 - | bswap RB - | jmp ->fff_resbit - | - |.ffunc_bit bit_bnot, 1 - | not RB - |.if DUALNUM - | jmp ->fff_resbit - |.else - |->fff_resbit: - | cvtsi2sd xmm0, RB - | jmp ->fff_resxmm0 - |.endif - | - |->fff_fallback_bit_op: - | mov NARGS:RD, TMP2 // Restore for fallback - | jmp ->fff_fallback - | - |.macro .ffunc_bit_sh, name, ins - |.if DUALNUM - | .ffunc_bit name, 1, .ffunc_2 - | // Note: no inline conversion from number for 2nd argument! - | cmp dword [BASE+12], LJ_TISNUM; jne ->fff_fallback - | mov RA, dword [BASE+8] - |.else - | .ffunc_nnsse name - | sseconst_tobit xmm2, RBa - | addsd xmm0, xmm2 - | addsd xmm1, xmm2 - | movd RB, xmm0 - | movd RA, xmm1 - |.endif - | ins RB, cl // Assumes RA is ecx. - | jmp ->fff_resbit - |.endmacro - | - |.ffunc_bit_sh bit_lshift, shl - |.ffunc_bit_sh bit_rshift, shr - |.ffunc_bit_sh bit_arshift, sar - |.ffunc_bit_sh bit_rol, rol - |.ffunc_bit_sh bit_ror, ror - | - |//----------------------------------------------------------------------- - | - |->fff_fallback_2: - | mov NARGS:RD, 1+2 // Other args are ignored, anyway. - | jmp ->fff_fallback - |->fff_fallback_1: - | mov NARGS:RD, 1+1 // Other args are ignored, anyway. - |->fff_fallback: // Call fast function fallback handler. - | // BASE = new base, RD = nargs+1 - | mov L:RB, SAVE_L - | mov PC, [BASE-4] // Fallback may overwrite PC. - | mov SAVE_PC, PC // Redundant (but a defined value). - | mov L:RB->base, BASE - | lea RD, [BASE+NARGS:RD*8-8] - | lea RA, [RD+8*LUA_MINSTACK] // Ensure enough space for handler. - | mov L:RB->top, RD - | mov CFUNC:RD, [BASE-8] - | cmp RA, L:RB->maxstack - | ja >5 // Need to grow stack. - |.if X64 - | mov CARG1d, L:RB - |.else - | mov ARG1, L:RB - |.endif - | call aword CFUNC:RD->f // (lua_State *L) - | mov BASE, L:RB->base - | // Either throws an error, or recovers and returns -1, 0 or nresults+1. - | test RD, RD; jg ->fff_res // Returned nresults+1? - |1: - | mov RA, L:RB->top - | sub RA, BASE - | shr RA, 3 - | test RD, RD - | lea NARGS:RD, [RA+1] - | mov LFUNC:RB, [BASE-8] - | jne ->vm_call_tail // Returned -1? - | ins_callt // Returned 0: retry fast path. - | - |// Reconstruct previous base for vmeta_call during tailcall. - |->vm_call_tail: - | mov RA, BASE - | test PC, FRAME_TYPE - | jnz >3 - | movzx RB, PC_RA - | not RBa // Note: ~RB = -(RB+1) - | lea BASE, [BASE+RB*8] // base = base - (RB+1)*8 - | jmp ->vm_call_dispatch // Resolve again for tailcall. - |3: - | mov RB, PC - | and RB, -8 - | sub BASE, RB - | jmp ->vm_call_dispatch // Resolve again for tailcall. - | - |5: // Grow stack for fallback handler. - | mov FCARG2, LUA_MINSTACK - | mov FCARG1, L:RB - | call extern lj_state_growstack@8 // (lua_State *L, int n) - | mov BASE, L:RB->base - | xor RD, RD // Simulate a return 0. - | jmp <1 // Dumb retry (goes through ff first). - | - |->fff_gcstep: // Call GC step function. - | // BASE = new base, RD = nargs+1 - | pop RBa // Must keep stack at same level. - | mov TMPa, RBa // Save return address - | mov L:RB, SAVE_L - | mov SAVE_PC, PC // Redundant (but a defined value). - | mov L:RB->base, BASE - | lea RD, [BASE+NARGS:RD*8-8] - | mov FCARG1, L:RB - | mov L:RB->top, RD - | call extern lj_gc_step@4 // (lua_State *L) - | mov BASE, L:RB->base - | mov RD, L:RB->top - | sub RD, BASE - | shr RD, 3 - | add NARGS:RD, 1 - | mov RBa, TMPa - | push RBa // Restore return address. - | ret - | - |//----------------------------------------------------------------------- - |//-- Special dispatch targets ------------------------------------------- - |//----------------------------------------------------------------------- - | - |->vm_record: // Dispatch target for recording phase. - |.if JIT - | movzx RD, byte [DISPATCH+DISPATCH_GL(hookmask)] - | test RDL, HOOK_VMEVENT // No recording while in vmevent. - | jnz >5 - | // Decrement the hookcount for consistency, but always do the call. - | test RDL, HOOK_ACTIVE - | jnz >1 - | test RDL, LUA_MASKLINE|LUA_MASKCOUNT - | jz >1 - | dec dword [DISPATCH+DISPATCH_GL(hookcount)] - | jmp >1 - |.endif - | - |->vm_rethook: // Dispatch target for return hooks. - | movzx RD, byte [DISPATCH+DISPATCH_GL(hookmask)] - | test RDL, HOOK_ACTIVE // Hook already active? - | jnz >5 - | jmp >1 - | - |->vm_inshook: // Dispatch target for instr/line hooks. - | movzx RD, byte [DISPATCH+DISPATCH_GL(hookmask)] - | test RDL, HOOK_ACTIVE // Hook already active? - | jnz >5 - | - | test RDL, LUA_MASKLINE|LUA_MASKCOUNT - | jz >5 - | dec dword [DISPATCH+DISPATCH_GL(hookcount)] - | jz >1 - | test RDL, LUA_MASKLINE - | jz >5 - |1: - | mov L:RB, SAVE_L - | mov L:RB->base, BASE - | mov FCARG2, PC // Caveat: FCARG2 == BASE - | mov FCARG1, L:RB - | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC. - | call extern lj_dispatch_ins@8 // (lua_State *L, const BCIns *pc) - |3: - | mov BASE, L:RB->base - |4: - | movzx RA, PC_RA - |5: - | movzx OP, PC_OP - | movzx RD, PC_RD - |.if X64 - | jmp aword [DISPATCH+OP*8+GG_DISP2STATIC] // Re-dispatch to static ins. - |.else - | jmp aword [DISPATCH+OP*4+GG_DISP2STATIC] // Re-dispatch to static ins. - |.endif - | - |->cont_hook: // Continue from hook yield. - | add PC, 4 - | mov RA, [RB-24] - | mov MULTRES, RA // Restore MULTRES for *M ins. - | jmp <4 - | - |->vm_hotloop: // Hot loop counter underflow. - |.if JIT - | mov LFUNC:RB, [BASE-8] // Same as curr_topL(L). - | mov RB, LFUNC:RB->pc - | movzx RD, byte [RB+PC2PROTO(framesize)] - | lea RD, [BASE+RD*8] - | mov L:RB, SAVE_L - | mov L:RB->base, BASE - | mov L:RB->top, RD - | mov FCARG2, PC - | lea FCARG1, [DISPATCH+GG_DISP2J] - | mov aword [DISPATCH+DISPATCH_J(L)], L:RBa - | mov SAVE_PC, PC - | call extern lj_trace_hot@8 // (jit_State *J, const BCIns *pc) - | jmp <3 - |.endif - | - |->vm_callhook: // Dispatch target for call hooks. - | mov SAVE_PC, PC - |.if JIT - | jmp >1 - |.endif - | - |->vm_hotcall: // Hot call counter underflow. - |.if JIT - | mov SAVE_PC, PC - | or PC, 1 // Marker for hot call. - |1: - |.endif - | lea RD, [BASE+NARGS:RD*8-8] - | mov L:RB, SAVE_L - | mov L:RB->base, BASE - | mov L:RB->top, RD - | mov FCARG2, PC - | mov FCARG1, L:RB - | call extern lj_dispatch_call@8 // (lua_State *L, const BCIns *pc) - | // ASMFunction returned in eax/rax (RDa). - | mov SAVE_PC, 0 // Invalidate for subsequent line hook. - |.if JIT - | and PC, -2 - |.endif - | mov BASE, L:RB->base - | mov RAa, RDa - | mov RD, L:RB->top - | sub RD, BASE - | mov RBa, RAa - | movzx RA, PC_RA - | shr RD, 3 - | add NARGS:RD, 1 - | jmp RBa - | - |->cont_stitch: // Trace stitching. - |.if JIT - | // BASE = base, RC = result, RB = mbase - | mov RA, [RB-24] // Save previous trace number. - | mov TMP1, RA - | mov TMP3, DISPATCH // Need one more register. - | mov DISPATCH, MULTRES - | movzx RA, PC_RA - | lea RA, [BASE+RA*8] // Call base. - | sub DISPATCH, 1 - | jz >2 - |1: // Move results down. - |.if X64 - | mov RBa, [RC] - | mov [RA], RBa - |.else - | mov RB, [RC] - | mov [RA], RB - | mov RB, [RC+4] - | mov [RA+4], RB - |.endif - | add RC, 8 - | add RA, 8 - | sub DISPATCH, 1 - | jnz <1 - |2: - | movzx RC, PC_RA - | movzx RB, PC_RB - | add RC, RB - | lea RC, [BASE+RC*8-8] - |3: - | cmp RC, RA - | ja >9 // More results wanted? - | - | mov DISPATCH, TMP3 - | mov RB, TMP1 // Get previous trace number. - | mov RA, [DISPATCH+DISPATCH_J(trace)] - | mov TRACE:RD, [RA+RB*4] - | test TRACE:RD, TRACE:RD - | jz ->cont_nop - | movzx RD, word TRACE:RD->link - | cmp RD, RB - | je ->cont_nop // Blacklisted. - | test RD, RD - | jne =>BC_JLOOP // Jump to stitched trace. - | - | // Stitch a new trace to the previous trace. - | mov [DISPATCH+DISPATCH_J(exitno)], RB - | mov L:RB, SAVE_L - | mov L:RB->base, BASE - | mov FCARG2, PC - | lea FCARG1, [DISPATCH+GG_DISP2J] - | mov aword [DISPATCH+DISPATCH_J(L)], L:RBa - | call extern lj_dispatch_stitch@8 // (jit_State *J, const BCIns *pc) - | mov BASE, L:RB->base - | jmp ->cont_nop - | - |9: // Fill up results with nil. - | mov dword [RA+4], LJ_TNIL - | add RA, 8 - | jmp <3 - |.endif - | - |->vm_profhook: // Dispatch target for profiler hook. -#if LJ_HASPROFILE - | mov L:RB, SAVE_L - | mov L:RB->base, BASE - | mov FCARG2, PC // Caveat: FCARG2 == BASE - | mov FCARG1, L:RB - | call extern lj_dispatch_profile@8 // (lua_State *L, const BCIns *pc) - | mov BASE, L:RB->base - | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction. - | sub PC, 4 - | jmp ->cont_nop -#endif - | - |//----------------------------------------------------------------------- - |//-- Trace exit handler ------------------------------------------------- - |//----------------------------------------------------------------------- - | - |// Called from an exit stub with the exit number on the stack. - |// The 16 bit exit number is stored with two (sign-extended) push imm8. - |->vm_exit_handler: - |.if JIT - |.if X64 - | push r13; push r12 - | push r11; push r10; push r9; push r8 - | push rdi; push rsi; push rbp; lea rbp, [rsp+88]; push rbp - | push rbx; push rdx; push rcx; push rax - | movzx RC, byte [rbp-8] // Reconstruct exit number. - | mov RCH, byte [rbp-16] - | mov [rbp-8], r15; mov [rbp-16], r14 - |.else - | push ebp; lea ebp, [esp+12]; push ebp - | push ebx; push edx; push ecx; push eax - | movzx RC, byte [ebp-4] // Reconstruct exit number. - | mov RCH, byte [ebp-8] - | mov [ebp-4], edi; mov [ebp-8], esi - |.endif - | // Caveat: DISPATCH is ebx. - | mov DISPATCH, [ebp] - | mov RA, [DISPATCH+DISPATCH_GL(vmstate)] // Get trace number. - | set_vmstate EXIT - | mov [DISPATCH+DISPATCH_J(exitno)], RC - | mov [DISPATCH+DISPATCH_J(parent)], RA - |.if X64 - |.if X64WIN - | sub rsp, 16*8+4*8 // Room for SSE regs + save area. - |.else - | sub rsp, 16*8 // Room for SSE regs. - |.endif - | add rbp, -128 - | movsd qword [rbp-8], xmm15; movsd qword [rbp-16], xmm14 - | movsd qword [rbp-24], xmm13; movsd qword [rbp-32], xmm12 - | movsd qword [rbp-40], xmm11; movsd qword [rbp-48], xmm10 - | movsd qword [rbp-56], xmm9; movsd qword [rbp-64], xmm8 - | movsd qword [rbp-72], xmm7; movsd qword [rbp-80], xmm6 - | movsd qword [rbp-88], xmm5; movsd qword [rbp-96], xmm4 - | movsd qword [rbp-104], xmm3; movsd qword [rbp-112], xmm2 - | movsd qword [rbp-120], xmm1; movsd qword [rbp-128], xmm0 - |.else - | sub esp, 8*8+16 // Room for SSE regs + args. - | movsd qword [ebp-40], xmm7; movsd qword [ebp-48], xmm6 - | movsd qword [ebp-56], xmm5; movsd qword [ebp-64], xmm4 - | movsd qword [ebp-72], xmm3; movsd qword [ebp-80], xmm2 - | movsd qword [ebp-88], xmm1; movsd qword [ebp-96], xmm0 - |.endif - | // Caveat: RB is ebp. - | mov L:RB, [DISPATCH+DISPATCH_GL(cur_L)] - | mov BASE, [DISPATCH+DISPATCH_GL(jit_base)] - | mov aword [DISPATCH+DISPATCH_J(L)], L:RBa - | mov L:RB->base, BASE - |.if X64WIN - | lea CARG2, [rsp+4*8] - |.elif X64 - | mov CARG2, rsp - |.else - | lea FCARG2, [esp+16] - |.endif - | lea FCARG1, [DISPATCH+GG_DISP2J] - | mov dword [DISPATCH+DISPATCH_GL(jit_base)], 0 - | call extern lj_trace_exit@8 // (jit_State *J, ExitState *ex) - | // MULTRES or negated error code returned in eax (RD). - | mov RAa, L:RB->cframe - | and RAa, CFRAME_RAWMASK - |.if X64WIN - | // Reposition stack later. - |.elif X64 - | mov rsp, RAa // Reposition stack to C frame. - |.else - | mov esp, RAa // Reposition stack to C frame. - |.endif - | mov [RAa+CFRAME_OFS_L], L:RB // Set SAVE_L (on-trace resume/yield). - | mov BASE, L:RB->base - | mov PC, [RAa+CFRAME_OFS_PC] // Get SAVE_PC. - |.if X64 - | jmp >1 - |.endif - |.endif - |->vm_exit_interp: - | // RD = MULTRES or negated error code, BASE, PC and DISPATCH set. - |.if JIT - |.if X64 - | // Restore additional callee-save registers only used in compiled code. - |.if X64WIN - | lea RAa, [rsp+9*16+4*8] - |1: - | movdqa xmm15, [RAa-9*16] - | movdqa xmm14, [RAa-8*16] - | movdqa xmm13, [RAa-7*16] - | movdqa xmm12, [RAa-6*16] - | movdqa xmm11, [RAa-5*16] - | movdqa xmm10, [RAa-4*16] - | movdqa xmm9, [RAa-3*16] - | movdqa xmm8, [RAa-2*16] - | movdqa xmm7, [RAa-1*16] - | mov rsp, RAa // Reposition stack to C frame. - | movdqa xmm6, [RAa] - | mov r15, CSAVE_3 - | mov r14, CSAVE_4 - |.else - | add rsp, 16 // Reposition stack to C frame. - |1: - |.endif - | mov r13, TMPa - | mov r12, TMPQ - |.endif - | test RD, RD; js >9 // Check for error from exit. - | mov L:RB, SAVE_L - | mov MULTRES, RD - | mov LFUNC:KBASE, [BASE-8] - | mov KBASE, LFUNC:KBASE->pc - | mov KBASE, [KBASE+PC2PROTO(k)] - | mov L:RB->base, BASE - | mov dword [DISPATCH+DISPATCH_GL(jit_base)], 0 - | set_vmstate INTERP - | // Modified copy of ins_next which handles function header dispatch, too. - | mov RC, [PC] - | movzx RA, RCH - | movzx OP, RCL - | add PC, 4 - | shr RC, 16 - | cmp OP, BC_FUNCF // Function header? - | jb >3 - | cmp OP, BC_FUNCC+2 // Fast function? - | jae >4 - |2: - | mov RC, MULTRES // RC/RD holds nres+1. - |3: - |.if X64 - | jmp aword [DISPATCH+OP*8] - |.else - | jmp aword [DISPATCH+OP*4] - |.endif - | - |4: // Check frame below fast function. - | mov RC, [BASE-4] - | test RC, FRAME_TYPE - | jnz <2 // Trace stitching continuation? - | // Otherwise set KBASE for Lua function below fast function. - | movzx RC, byte [RC-3] - | not RCa - | mov LFUNC:KBASE, [BASE+RC*8-8] - | mov KBASE, LFUNC:KBASE->pc - | mov KBASE, [KBASE+PC2PROTO(k)] - | jmp <2 - | - |9: // Rethrow error from the right C frame. - | neg RD - | mov FCARG1, L:RB - | mov FCARG2, RD - | call extern lj_err_throw@8 // (lua_State *L, int errcode) - |.endif - | - |//----------------------------------------------------------------------- - |//-- Math helper functions ---------------------------------------------- - |//----------------------------------------------------------------------- - | - |// FP value rounding. Called by math.floor/math.ceil fast functions - |// and from JIT code. arg/ret is xmm0. xmm0-xmm3 and RD (eax) modified. - |.macro vm_round, name, mode, cond - |->name: - |.if not X64 and cond - | movsd xmm0, qword [esp+4] - | call ->name .. _sse - | movsd qword [esp+4], xmm0 // Overwrite callee-owned arg. - | fld qword [esp+4] - | ret - |.endif - | - |->name .. _sse: - | sseconst_abs xmm2, RDa - | sseconst_2p52 xmm3, RDa - | movaps xmm1, xmm0 - | andpd xmm1, xmm2 // |x| - | ucomisd xmm3, xmm1 // No truncation if 2^52 <= |x|. - | jbe >1 - | andnpd xmm2, xmm0 // Isolate sign bit. - |.if mode == 2 // trunc(x)? - | movaps xmm0, xmm1 - | addsd xmm1, xmm3 // (|x| + 2^52) - 2^52 - | subsd xmm1, xmm3 - | sseconst_1 xmm3, RDa - | cmpsd xmm0, xmm1, 1 // |x| < result? - | andpd xmm0, xmm3 - | subsd xmm1, xmm0 // If yes, subtract -1. - | orpd xmm1, xmm2 // Merge sign bit back in. - |.else - | addsd xmm1, xmm3 // (|x| + 2^52) - 2^52 - | subsd xmm1, xmm3 - | orpd xmm1, xmm2 // Merge sign bit back in. - | .if mode == 1 // ceil(x)? - | sseconst_m1 xmm2, RDa // Must subtract -1 to preserve -0. - | cmpsd xmm0, xmm1, 6 // x > result? - | .else // floor(x)? - | sseconst_1 xmm2, RDa - | cmpsd xmm0, xmm1, 1 // x < result? - | .endif - | andpd xmm0, xmm2 - | subsd xmm1, xmm0 // If yes, subtract +-1. - |.endif - | movaps xmm0, xmm1 - |1: - | ret - |.endmacro - | - | vm_round vm_floor, 0, 1 - | vm_round vm_ceil, 1, JIT - | vm_round vm_trunc, 2, JIT - | - |// FP modulo x%y. Called by BC_MOD* and vm_arith. - |->vm_mod: - |// Args in xmm0/xmm1, return value in xmm0. - |// Caveat: xmm0-xmm5 and RC (eax) modified! - | movaps xmm5, xmm0 - | divsd xmm0, xmm1 - | sseconst_abs xmm2, RDa - | sseconst_2p52 xmm3, RDa - | movaps xmm4, xmm0 - | andpd xmm4, xmm2 // |x/y| - | ucomisd xmm3, xmm4 // No truncation if 2^52 <= |x/y|. - | jbe >1 - | andnpd xmm2, xmm0 // Isolate sign bit. - | addsd xmm4, xmm3 // (|x/y| + 2^52) - 2^52 - | subsd xmm4, xmm3 - | orpd xmm4, xmm2 // Merge sign bit back in. - | sseconst_1 xmm2, RDa - | cmpsd xmm0, xmm4, 1 // x/y < result? - | andpd xmm0, xmm2 - | subsd xmm4, xmm0 // If yes, subtract 1.0. - | movaps xmm0, xmm5 - | mulsd xmm1, xmm4 - | subsd xmm0, xmm1 - | ret - |1: - | mulsd xmm1, xmm0 - | movaps xmm0, xmm5 - | subsd xmm0, xmm1 - | ret - | - |// Args in xmm0/eax. Ret in xmm0. xmm0-xmm1 and eax modified. - |->vm_powi_sse: - | cmp eax, 1; jle >6 // i<=1? - | // Now 1 < (unsigned)i <= 0x80000000. - |1: // Handle leading zeros. - | test eax, 1; jnz >2 - | mulsd xmm0, xmm0 - | shr eax, 1 - | jmp <1 - |2: - | shr eax, 1; jz >5 - | movaps xmm1, xmm0 - |3: // Handle trailing bits. - | mulsd xmm0, xmm0 - | shr eax, 1; jz >4 - | jnc <3 - | mulsd xmm1, xmm0 - | jmp <3 - |4: - | mulsd xmm0, xmm1 - |5: - | ret - |6: - | je <5 // x^1 ==> x - | jb >7 // x^0 ==> 1 - | neg eax - | call <1 - | sseconst_1 xmm1, RDa - | divsd xmm1, xmm0 - | movaps xmm0, xmm1 - | ret - |7: - | sseconst_1 xmm0, RDa - | ret - | - |//----------------------------------------------------------------------- - |//-- Miscellaneous functions -------------------------------------------- - |//----------------------------------------------------------------------- - | - |// int lj_vm_cpuid(uint32_t f, uint32_t res[4]) - |->vm_cpuid: - |.if X64 - | mov eax, CARG1d - | .if X64WIN; push rsi; mov rsi, CARG2; .endif - | push rbx - | cpuid - | mov [rsi], eax - | mov [rsi+4], ebx - | mov [rsi+8], ecx - | mov [rsi+12], edx - | pop rbx - | .if X64WIN; pop rsi; .endif - | ret - |.else - | pushfd - | pop edx - | mov ecx, edx - | xor edx, 0x00200000 // Toggle ID bit in flags. - | push edx - | popfd - | pushfd - | pop edx - | xor eax, eax // Zero means no features supported. - | cmp ecx, edx - | jz >1 // No ID toggle means no CPUID support. - | mov eax, [esp+4] // Argument 1 is function number. - | push edi - | push ebx - | cpuid - | mov edi, [esp+16] // Argument 2 is result area. - | mov [edi], eax - | mov [edi+4], ebx - | mov [edi+8], ecx - | mov [edi+12], edx - | pop ebx - | pop edi - |1: - | ret - |.endif - | - |//----------------------------------------------------------------------- - |//-- Assertions --------------------------------------------------------- - |//----------------------------------------------------------------------- - | - |->assert_bad_for_arg_type: -#ifdef LUA_USE_ASSERT - | int3 -#endif - | int3 - | - |//----------------------------------------------------------------------- - |//-- FFI helper functions ----------------------------------------------- - |//----------------------------------------------------------------------- - | - |// Handler for callback functions. Callback slot number in ah/al. - |->vm_ffi_callback: - |.if FFI - |.type CTSTATE, CTState, PC - |.if not X64 - | sub esp, 16 // Leave room for SAVE_ERRF etc. - |.endif - | saveregs_ // ebp/rbp already saved. ebp now holds global_State *. - | lea DISPATCH, [ebp+GG_G2DISP] - | mov CTSTATE, GL:ebp->ctype_state - | movzx eax, ax - | mov CTSTATE->cb.slot, eax - |.if X64 - | mov CTSTATE->cb.gpr[0], CARG1 - | mov CTSTATE->cb.gpr[1], CARG2 - | mov CTSTATE->cb.gpr[2], CARG3 - | mov CTSTATE->cb.gpr[3], CARG4 - | movsd qword CTSTATE->cb.fpr[0], xmm0 - | movsd qword CTSTATE->cb.fpr[1], xmm1 - | movsd qword CTSTATE->cb.fpr[2], xmm2 - | movsd qword CTSTATE->cb.fpr[3], xmm3 - |.if X64WIN - | lea rax, [rsp+CFRAME_SIZE+4*8] - |.else - | lea rax, [rsp+CFRAME_SIZE] - | mov CTSTATE->cb.gpr[4], CARG5 - | mov CTSTATE->cb.gpr[5], CARG6 - | movsd qword CTSTATE->cb.fpr[4], xmm4 - | movsd qword CTSTATE->cb.fpr[5], xmm5 - | movsd qword CTSTATE->cb.fpr[6], xmm6 - | movsd qword CTSTATE->cb.fpr[7], xmm7 - |.endif - | mov CTSTATE->cb.stack, rax - | mov CARG2, rsp - |.else - | lea eax, [esp+CFRAME_SIZE+16] - | mov CTSTATE->cb.gpr[0], FCARG1 - | mov CTSTATE->cb.gpr[1], FCARG2 - | mov CTSTATE->cb.stack, eax - | mov FCARG1, [esp+CFRAME_SIZE+12] // Move around misplaced retaddr/ebp. - | mov FCARG2, [esp+CFRAME_SIZE+8] - | mov SAVE_RET, FCARG1 - | mov SAVE_R4, FCARG2 - | mov FCARG2, esp - |.endif - | mov SAVE_PC, CTSTATE // Any value outside of bytecode is ok. - | mov FCARG1, CTSTATE - | call extern lj_ccallback_enter@8 // (CTState *cts, void *cf) - | // lua_State * returned in eax (RD). - | set_vmstate INTERP - | mov BASE, L:RD->base - | mov RD, L:RD->top - | sub RD, BASE - | mov LFUNC:RB, [BASE-8] - | shr RD, 3 - | add RD, 1 - | ins_callt - |.endif - | - |->cont_ffi_callback: // Return from FFI callback. - |.if FFI - | mov L:RA, SAVE_L - | mov CTSTATE, [DISPATCH+DISPATCH_GL(ctype_state)] - | mov aword CTSTATE->L, L:RAa - | mov L:RA->base, BASE - | mov L:RA->top, RB - | mov FCARG1, CTSTATE - | mov FCARG2, RC - | call extern lj_ccallback_leave@8 // (CTState *cts, TValue *o) - |.if X64 - | mov rax, CTSTATE->cb.gpr[0] - | movsd xmm0, qword CTSTATE->cb.fpr[0] - | jmp ->vm_leave_unw - |.else - | mov L:RB, SAVE_L - | mov eax, CTSTATE->cb.gpr[0] - | mov edx, CTSTATE->cb.gpr[1] - | cmp dword CTSTATE->cb.gpr[2], 1 - | jb >7 - | je >6 - | fld qword CTSTATE->cb.fpr[0].d - | jmp >7 - |6: - | fld dword CTSTATE->cb.fpr[0].f - |7: - | mov ecx, L:RB->top - | movzx ecx, word [ecx+6] // Get stack adjustment and copy up. - | mov SAVE_L, ecx // Must be one slot above SAVE_RET - | restoreregs - | pop ecx // Move return addr from SAVE_RET. - | add esp, [esp] // Adjust stack. - | add esp, 16 - | push ecx - | ret - |.endif - |.endif - | - |->vm_ffi_call@4: // Call C function via FFI. - | // Caveat: needs special frame unwinding, see below. - |.if FFI - |.if X64 - | .type CCSTATE, CCallState, rbx - | push rbp; mov rbp, rsp; push rbx; mov CCSTATE, CARG1 - |.else - | .type CCSTATE, CCallState, ebx - | push ebp; mov ebp, esp; push ebx; mov CCSTATE, FCARG1 - |.endif - | - | // Readjust stack. - |.if X64 - | mov eax, CCSTATE->spadj - | sub rsp, rax - |.else - | sub esp, CCSTATE->spadj - |.if WIN - | mov CCSTATE->spadj, esp - |.endif - |.endif - | - | // Copy stack slots. - | movzx ecx, byte CCSTATE->nsp - | sub ecx, 1 - | js >2 - |1: - |.if X64 - | mov rax, [CCSTATE+rcx*8+offsetof(CCallState, stack)] - | mov [rsp+rcx*8+CCALL_SPS_EXTRA*8], rax - |.else - | mov eax, [CCSTATE+ecx*4+offsetof(CCallState, stack)] - | mov [esp+ecx*4], eax - |.endif - | sub ecx, 1 - | jns <1 - |2: - | - |.if X64 - | movzx eax, byte CCSTATE->nfpr - | mov CARG1, CCSTATE->gpr[0] - | mov CARG2, CCSTATE->gpr[1] - | mov CARG3, CCSTATE->gpr[2] - | mov CARG4, CCSTATE->gpr[3] - |.if not X64WIN - | mov CARG5, CCSTATE->gpr[4] - | mov CARG6, CCSTATE->gpr[5] - |.endif - | test eax, eax; jz >5 - | movaps xmm0, CCSTATE->fpr[0] - | movaps xmm1, CCSTATE->fpr[1] - | movaps xmm2, CCSTATE->fpr[2] - | movaps xmm3, CCSTATE->fpr[3] - |.if not X64WIN - | cmp eax, 4; jbe >5 - | movaps xmm4, CCSTATE->fpr[4] - | movaps xmm5, CCSTATE->fpr[5] - | movaps xmm6, CCSTATE->fpr[6] - | movaps xmm7, CCSTATE->fpr[7] - |.endif - |5: - |.else - | mov FCARG1, CCSTATE->gpr[0] - | mov FCARG2, CCSTATE->gpr[1] - |.endif - | - | call aword CCSTATE->func - | - |.if X64 - | mov CCSTATE->gpr[0], rax - | movaps CCSTATE->fpr[0], xmm0 - |.if not X64WIN - | mov CCSTATE->gpr[1], rdx - | movaps CCSTATE->fpr[1], xmm1 - |.endif - |.else - | mov CCSTATE->gpr[0], eax - | mov CCSTATE->gpr[1], edx - | cmp byte CCSTATE->resx87, 1 - | jb >7 - | je >6 - | fstp qword CCSTATE->fpr[0].d[0] - | jmp >7 - |6: - | fstp dword CCSTATE->fpr[0].f[0] - |7: - |.if WIN - | sub CCSTATE->spadj, esp - |.endif - |.endif - | - |.if X64 - | mov rbx, [rbp-8]; leave; ret - |.else - | mov ebx, [ebp-4]; leave; ret - |.endif - |.endif - |// Note: vm_ffi_call must be the last function in this object file! - | - |//----------------------------------------------------------------------- -} - -/* Generate the code for a single instruction. */ -static void build_ins(BuildCtx *ctx, BCOp op, int defop) -{ - int vk = 0; - |// Note: aligning all instructions does not pay off. - |=>defop: - - switch (op) { - - /* -- Comparison ops ---------------------------------------------------- */ - - /* Remember: all ops branch for a true comparison, fall through otherwise. */ - - |.macro jmp_comp, lt, ge, le, gt, target - ||switch (op) { - ||case BC_ISLT: - | lt target - ||break; - ||case BC_ISGE: - | ge target - ||break; - ||case BC_ISLE: - | le target - ||break; - ||case BC_ISGT: - | gt target - ||break; - ||default: break; /* Shut up GCC. */ - ||} - |.endmacro - - case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: - | // RA = src1, RD = src2, JMP with RD = target - | ins_AD - |.if DUALNUM - | checkint RA, >7 - | checkint RD, >8 - | mov RB, dword [BASE+RA*8] - | add PC, 4 - | cmp RB, dword [BASE+RD*8] - | jmp_comp jge, jl, jg, jle, >9 - |6: - | movzx RD, PC_RD - | branchPC RD - |9: - | ins_next - | - |7: // RA is not an integer. - | ja ->vmeta_comp - | // RA is a number. - | cmp dword [BASE+RD*8+4], LJ_TISNUM; jb >1; jne ->vmeta_comp - | // RA is a number, RD is an integer. - | cvtsi2sd xmm0, dword [BASE+RD*8] - | jmp >2 - | - |8: // RA is an integer, RD is not an integer. - | ja ->vmeta_comp - | // RA is an integer, RD is a number. - | cvtsi2sd xmm1, dword [BASE+RA*8] - | movsd xmm0, qword [BASE+RD*8] - | add PC, 4 - | ucomisd xmm0, xmm1 - | jmp_comp jbe, ja, jb, jae, <9 - | jmp <6 - |.else - | checknum RA, ->vmeta_comp - | checknum RD, ->vmeta_comp - |.endif - |1: - | movsd xmm0, qword [BASE+RD*8] - |2: - | add PC, 4 - | ucomisd xmm0, qword [BASE+RA*8] - |3: - | // Unordered: all of ZF CF PF set, ordered: PF clear. - | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't. - |.if DUALNUM - | jmp_comp jbe, ja, jb, jae, <9 - | jmp <6 - |.else - | jmp_comp jbe, ja, jb, jae, >1 - | movzx RD, PC_RD - | branchPC RD - |1: - | ins_next - |.endif - break; - - case BC_ISEQV: case BC_ISNEV: - vk = op == BC_ISEQV; - | ins_AD // RA = src1, RD = src2, JMP with RD = target - | mov RB, [BASE+RD*8+4] - | add PC, 4 - |.if DUALNUM - | cmp RB, LJ_TISNUM; jne >7 - | checkint RA, >8 - | mov RB, dword [BASE+RD*8] - | cmp RB, dword [BASE+RA*8] - if (vk) { - | jne >9 - } else { - | je >9 - } - | movzx RD, PC_RD - | branchPC RD - |9: - | ins_next - | - |7: // RD is not an integer. - | ja >5 - | // RD is a number. - | cmp dword [BASE+RA*8+4], LJ_TISNUM; jb >1; jne >5 - | // RD is a number, RA is an integer. - | cvtsi2sd xmm0, dword [BASE+RA*8] - | jmp >2 - | - |8: // RD is an integer, RA is not an integer. - | ja >5 - | // RD is an integer, RA is a number. - | cvtsi2sd xmm0, dword [BASE+RD*8] - | ucomisd xmm0, qword [BASE+RA*8] - | jmp >4 - | - |.else - | cmp RB, LJ_TISNUM; jae >5 - | checknum RA, >5 - |.endif - |1: - | movsd xmm0, qword [BASE+RA*8] - |2: - | ucomisd xmm0, qword [BASE+RD*8] - |4: - iseqne_fp: - if (vk) { - | jp >2 // Unordered means not equal. - | jne >2 - } else { - | jp >2 // Unordered means not equal. - | je >1 - } - iseqne_end: - if (vk) { - |1: // EQ: Branch to the target. - | movzx RD, PC_RD - | branchPC RD - |2: // NE: Fallthrough to next instruction. - |.if not FFI - |3: - |.endif - } else { - |.if not FFI - |3: - |.endif - |2: // NE: Branch to the target. - | movzx RD, PC_RD - | branchPC RD - |1: // EQ: Fallthrough to next instruction. - } - if (LJ_DUALNUM && (op == BC_ISEQV || op == BC_ISNEV || - op == BC_ISEQN || op == BC_ISNEN)) { - | jmp <9 - } else { - | ins_next - } - | - if (op == BC_ISEQV || op == BC_ISNEV) { - |5: // Either or both types are not numbers. - |.if FFI - | cmp RB, LJ_TCDATA; je ->vmeta_equal_cd - | checktp RA, LJ_TCDATA; je ->vmeta_equal_cd - |.endif - | checktp RA, RB // Compare types. - | jne <2 // Not the same type? - | cmp RB, LJ_TISPRI - | jae <1 // Same type and primitive type? - | - | // Same types and not a primitive type. Compare GCobj or pvalue. - | mov RA, [BASE+RA*8] - | mov RD, [BASE+RD*8] - | cmp RA, RD - | je <1 // Same GCobjs or pvalues? - | cmp RB, LJ_TISTABUD - | ja <2 // Different objects and not table/ud? - |.if X64 - | cmp RB, LJ_TUDATA // And not 64 bit lightuserdata. - | jb <2 - |.endif - | - | // Different tables or userdatas. Need to check __eq metamethod. - | // Field metatable must be at same offset for GCtab and GCudata! - | mov TAB:RB, TAB:RA->metatable - | test TAB:RB, TAB:RB - | jz <2 // No metatable? - | test byte TAB:RB->nomm, 1<vmeta_equal // Handle __eq metamethod. - } else { - |.if FFI - |3: - | cmp RB, LJ_TCDATA - if (LJ_DUALNUM && vk) { - | jne <9 - } else { - | jne <2 - } - | jmp ->vmeta_equal_cd - |.endif - } - break; - case BC_ISEQS: case BC_ISNES: - vk = op == BC_ISEQS; - | ins_AND // RA = src, RD = str const, JMP with RD = target - | mov RB, [BASE+RA*8+4] - | add PC, 4 - | cmp RB, LJ_TSTR; jne >3 - | mov RA, [BASE+RA*8] - | cmp RA, [KBASE+RD*4] - iseqne_test: - if (vk) { - | jne >2 - } else { - | je >1 - } - goto iseqne_end; - case BC_ISEQN: case BC_ISNEN: - vk = op == BC_ISEQN; - | ins_AD // RA = src, RD = num const, JMP with RD = target - | mov RB, [BASE+RA*8+4] - | add PC, 4 - |.if DUALNUM - | cmp RB, LJ_TISNUM; jne >7 - | cmp dword [KBASE+RD*8+4], LJ_TISNUM; jne >8 - | mov RB, dword [KBASE+RD*8] - | cmp RB, dword [BASE+RA*8] - if (vk) { - | jne >9 - } else { - | je >9 - } - | movzx RD, PC_RD - | branchPC RD - |9: - | ins_next - | - |7: // RA is not an integer. - | ja >3 - | // RA is a number. - | cmp dword [KBASE+RD*8+4], LJ_TISNUM; jb >1 - | // RA is a number, RD is an integer. - | cvtsi2sd xmm0, dword [KBASE+RD*8] - | jmp >2 - | - |8: // RA is an integer, RD is a number. - | cvtsi2sd xmm0, dword [BASE+RA*8] - | ucomisd xmm0, qword [KBASE+RD*8] - | jmp >4 - |.else - | cmp RB, LJ_TISNUM; jae >3 - |.endif - |1: - | movsd xmm0, qword [KBASE+RD*8] - |2: - | ucomisd xmm0, qword [BASE+RA*8] - |4: - goto iseqne_fp; - case BC_ISEQP: case BC_ISNEP: - vk = op == BC_ISEQP; - | ins_AND // RA = src, RD = primitive type (~), JMP with RD = target - | mov RB, [BASE+RA*8+4] - | add PC, 4 - | cmp RB, RD - if (!LJ_HASFFI) goto iseqne_test; - if (vk) { - | jne >3 - | movzx RD, PC_RD - | branchPC RD - |2: - | ins_next - |3: - | cmp RB, LJ_TCDATA; jne <2 - | jmp ->vmeta_equal_cd - } else { - | je >2 - | cmp RB, LJ_TCDATA; je ->vmeta_equal_cd - | movzx RD, PC_RD - | branchPC RD - |2: - | ins_next - } - break; - - /* -- Unary test and copy ops ------------------------------------------- */ - - case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF: - | ins_AD // RA = dst or unused, RD = src, JMP with RD = target - | mov RB, [BASE+RD*8+4] - | add PC, 4 - | cmp RB, LJ_TISTRUECOND - if (op == BC_IST || op == BC_ISTC) { - | jae >1 - } else { - | jb >1 - } - if (op == BC_ISTC || op == BC_ISFC) { - | mov [BASE+RA*8+4], RB - | mov RB, [BASE+RD*8] - | mov [BASE+RA*8], RB - } - | movzx RD, PC_RD - | branchPC RD - |1: // Fallthrough to the next instruction. - | ins_next - break; - - case BC_ISTYPE: - | ins_AD // RA = src, RD = -type - | add RD, [BASE+RA*8+4] - | jne ->vmeta_istype - | ins_next - break; - case BC_ISNUM: - | ins_AD // RA = src, RD = -(TISNUM-1) - | checknum RA, ->vmeta_istype - | ins_next - break; - - /* -- Unary ops --------------------------------------------------------- */ - - case BC_MOV: - | ins_AD // RA = dst, RD = src - |.if X64 - | mov RBa, [BASE+RD*8] - | mov [BASE+RA*8], RBa - |.else - | mov RB, [BASE+RD*8+4] - | mov RD, [BASE+RD*8] - | mov [BASE+RA*8+4], RB - | mov [BASE+RA*8], RD - |.endif - | ins_next_ - break; - case BC_NOT: - | ins_AD // RA = dst, RD = src - | xor RB, RB - | checktp RD, LJ_TISTRUECOND - | adc RB, LJ_TTRUE - | mov [BASE+RA*8+4], RB - | ins_next - break; - case BC_UNM: - | ins_AD // RA = dst, RD = src - |.if DUALNUM - | checkint RD, >5 - | mov RB, [BASE+RD*8] - | neg RB - | jo >4 - | mov dword [BASE+RA*8+4], LJ_TISNUM - | mov dword [BASE+RA*8], RB - |9: - | ins_next - |4: - | mov dword [BASE+RA*8+4], 0x41e00000 // 2^31. - | mov dword [BASE+RA*8], 0 - | jmp <9 - |5: - | ja ->vmeta_unm - |.else - | checknum RD, ->vmeta_unm - |.endif - | movsd xmm0, qword [BASE+RD*8] - | sseconst_sign xmm1, RDa - | xorps xmm0, xmm1 - | movsd qword [BASE+RA*8], xmm0 - |.if DUALNUM - | jmp <9 - |.else - | ins_next - |.endif - break; - case BC_LEN: - | ins_AD // RA = dst, RD = src - | checkstr RD, >2 - | mov STR:RD, [BASE+RD*8] - |.if DUALNUM - | mov RD, dword STR:RD->len - |1: - | mov dword [BASE+RA*8+4], LJ_TISNUM - | mov dword [BASE+RA*8], RD - |.else - | xorps xmm0, xmm0 - | cvtsi2sd xmm0, dword STR:RD->len - |1: - | movsd qword [BASE+RA*8], xmm0 - |.endif - | ins_next - |2: - | checktab RD, ->vmeta_len - | mov TAB:FCARG1, [BASE+RD*8] -#if LJ_52 - | mov TAB:RB, TAB:FCARG1->metatable - | cmp TAB:RB, 0 - | jnz >9 - |3: -#endif - |->BC_LEN_Z: - | mov RB, BASE // Save BASE. - | call extern lj_tab_len@4 // (GCtab *t) - | // Length of table returned in eax (RD). - |.if DUALNUM - | // Nothing to do. - |.else - | cvtsi2sd xmm0, RD - |.endif - | mov BASE, RB // Restore BASE. - | movzx RA, PC_RA - | jmp <1 -#if LJ_52 - |9: // Check for __len. - | test byte TAB:RB->nomm, 1<vmeta_len // 'no __len' flag NOT set: check. -#endif - break; - - /* -- Binary ops -------------------------------------------------------- */ - - |.macro ins_arithpre, sseins, ssereg - | ins_ABC - ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); - ||switch (vk) { - ||case 0: - | checknum RB, ->vmeta_arith_vn - | .if DUALNUM - | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jae ->vmeta_arith_vn - | .endif - | movsd xmm0, qword [BASE+RB*8] - | sseins ssereg, qword [KBASE+RC*8] - || break; - ||case 1: - | checknum RB, ->vmeta_arith_nv - | .if DUALNUM - | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jae ->vmeta_arith_nv - | .endif - | movsd xmm0, qword [KBASE+RC*8] - | sseins ssereg, qword [BASE+RB*8] - || break; - ||default: - | checknum RB, ->vmeta_arith_vv - | checknum RC, ->vmeta_arith_vv - | movsd xmm0, qword [BASE+RB*8] - | sseins ssereg, qword [BASE+RC*8] - || break; - ||} - |.endmacro - | - |.macro ins_arithdn, intins - | ins_ABC - ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); - ||switch (vk) { - ||case 0: - | checkint RB, ->vmeta_arith_vn - | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jne ->vmeta_arith_vn - | mov RB, [BASE+RB*8] - | intins RB, [KBASE+RC*8]; jo ->vmeta_arith_vno - || break; - ||case 1: - | checkint RB, ->vmeta_arith_nv - | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jne ->vmeta_arith_nv - | mov RC, [KBASE+RC*8] - | intins RC, [BASE+RB*8]; jo ->vmeta_arith_nvo - || break; - ||default: - | checkint RB, ->vmeta_arith_vv - | checkint RC, ->vmeta_arith_vv - | mov RB, [BASE+RB*8] - | intins RB, [BASE+RC*8]; jo ->vmeta_arith_vvo - || break; - ||} - | mov dword [BASE+RA*8+4], LJ_TISNUM - ||if (vk == 1) { - | mov dword [BASE+RA*8], RC - ||} else { - | mov dword [BASE+RA*8], RB - ||} - | ins_next - |.endmacro - | - |.macro ins_arithpost - | movsd qword [BASE+RA*8], xmm0 - |.endmacro - | - |.macro ins_arith, sseins - | ins_arithpre sseins, xmm0 - | ins_arithpost - | ins_next - |.endmacro - | - |.macro ins_arith, intins, sseins - |.if DUALNUM - | ins_arithdn intins - |.else - | ins_arith, sseins - |.endif - |.endmacro - - | // RA = dst, RB = src1 or num const, RC = src2 or num const - case BC_ADDVN: case BC_ADDNV: case BC_ADDVV: - | ins_arith add, addsd - break; - case BC_SUBVN: case BC_SUBNV: case BC_SUBVV: - | ins_arith sub, subsd - break; - case BC_MULVN: case BC_MULNV: case BC_MULVV: - | ins_arith imul, mulsd - break; - case BC_DIVVN: case BC_DIVNV: case BC_DIVVV: - | ins_arith divsd - break; - case BC_MODVN: - | ins_arithpre movsd, xmm1 - |->BC_MODVN_Z: - | call ->vm_mod - | ins_arithpost - | ins_next - break; - case BC_MODNV: case BC_MODVV: - | ins_arithpre movsd, xmm1 - | jmp ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway. - break; - case BC_POW: - | ins_arithpre movsd, xmm1 - | mov RB, BASE - |.if not X64 - | movsd FPARG1, xmm0 - | movsd FPARG3, xmm1 - |.endif - | call extern pow - | movzx RA, PC_RA - | mov BASE, RB - |.if X64 - | ins_arithpost - |.else - | fstp qword [BASE+RA*8] - |.endif - | ins_next - break; - - case BC_CAT: - | ins_ABC // RA = dst, RB = src_start, RC = src_end - |.if X64 - | mov L:CARG1d, SAVE_L - | mov L:CARG1d->base, BASE - | lea CARG2d, [BASE+RC*8] - | mov CARG3d, RC - | sub CARG3d, RB - |->BC_CAT_Z: - | mov L:RB, L:CARG1d - |.else - | lea RA, [BASE+RC*8] - | sub RC, RB - | mov ARG2, RA - | mov ARG3, RC - |->BC_CAT_Z: - | mov L:RB, SAVE_L - | mov ARG1, L:RB - | mov L:RB->base, BASE - |.endif - | mov SAVE_PC, PC - | call extern lj_meta_cat // (lua_State *L, TValue *top, int left) - | // NULL (finished) or TValue * (metamethod) returned in eax (RC). - | mov BASE, L:RB->base - | test RC, RC - | jnz ->vmeta_binop - | movzx RB, PC_RB // Copy result to Stk[RA] from Stk[RB]. - | movzx RA, PC_RA - |.if X64 - | mov RCa, [BASE+RB*8] - | mov [BASE+RA*8], RCa - |.else - | mov RC, [BASE+RB*8+4] - | mov RB, [BASE+RB*8] - | mov [BASE+RA*8+4], RC - | mov [BASE+RA*8], RB - |.endif - | ins_next - break; - - /* -- Constant ops ------------------------------------------------------ */ - - case BC_KSTR: - | ins_AND // RA = dst, RD = str const (~) - | mov RD, [KBASE+RD*4] - | mov dword [BASE+RA*8+4], LJ_TSTR - | mov [BASE+RA*8], RD - | ins_next - break; - case BC_KCDATA: - |.if FFI - | ins_AND // RA = dst, RD = cdata const (~) - | mov RD, [KBASE+RD*4] - | mov dword [BASE+RA*8+4], LJ_TCDATA - | mov [BASE+RA*8], RD - | ins_next - |.endif - break; - case BC_KSHORT: - | ins_AD // RA = dst, RD = signed int16 literal - |.if DUALNUM - | movsx RD, RDW - | mov dword [BASE+RA*8+4], LJ_TISNUM - | mov dword [BASE+RA*8], RD - |.else - | movsx RD, RDW // Sign-extend literal. - | cvtsi2sd xmm0, RD - | movsd qword [BASE+RA*8], xmm0 - |.endif - | ins_next - break; - case BC_KNUM: - | ins_AD // RA = dst, RD = num const - | movsd xmm0, qword [KBASE+RD*8] - | movsd qword [BASE+RA*8], xmm0 - | ins_next - break; - case BC_KPRI: - | ins_AND // RA = dst, RD = primitive type (~) - | mov [BASE+RA*8+4], RD - | ins_next - break; - case BC_KNIL: - | ins_AD // RA = dst_start, RD = dst_end - | lea RA, [BASE+RA*8+12] - | lea RD, [BASE+RD*8+4] - | mov RB, LJ_TNIL - | mov [RA-8], RB // Sets minimum 2 slots. - |1: - | mov [RA], RB - | add RA, 8 - | cmp RA, RD - | jbe <1 - | ins_next - break; - - /* -- Upvalue and function ops ------------------------------------------ */ - - case BC_UGET: - | ins_AD // RA = dst, RD = upvalue # - | mov LFUNC:RB, [BASE-8] - | mov UPVAL:RB, [LFUNC:RB+RD*4+offsetof(GCfuncL, uvptr)] - | mov RB, UPVAL:RB->v - |.if X64 - | mov RDa, [RB] - | mov [BASE+RA*8], RDa - |.else - | mov RD, [RB+4] - | mov RB, [RB] - | mov [BASE+RA*8+4], RD - | mov [BASE+RA*8], RB - |.endif - | ins_next - break; - case BC_USETV: -#define TV2MARKOFS \ - ((int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)) - | ins_AD // RA = upvalue #, RD = src - | mov LFUNC:RB, [BASE-8] - | mov UPVAL:RB, [LFUNC:RB+RA*4+offsetof(GCfuncL, uvptr)] - | cmp byte UPVAL:RB->closed, 0 - | mov RB, UPVAL:RB->v - | mov RA, [BASE+RD*8] - | mov RD, [BASE+RD*8+4] - | mov [RB], RA - | mov [RB+4], RD - | jz >1 - | // Check barrier for closed upvalue. - | test byte [RB+TV2MARKOFS], LJ_GC_BLACK // isblack(uv) - | jnz >2 - |1: - | ins_next - | - |2: // Upvalue is black. Check if new value is collectable and white. - | sub RD, LJ_TISGCV - | cmp RD, LJ_TNUMX - LJ_TISGCV // tvisgcv(v) - | jbe <1 - | test byte GCOBJ:RA->gch.marked, LJ_GC_WHITES // iswhite(v) - | jz <1 - | // Crossed a write barrier. Move the barrier forward. - |.if X64 and not X64WIN - | mov FCARG2, RB - | mov RB, BASE // Save BASE. - |.else - | xchg FCARG2, RB // Save BASE (FCARG2 == BASE). - |.endif - | lea GL:FCARG1, [DISPATCH+GG_DISP2G] - | call extern lj_gc_barrieruv@8 // (global_State *g, TValue *tv) - | mov BASE, RB // Restore BASE. - | jmp <1 - break; -#undef TV2MARKOFS - case BC_USETS: - | ins_AND // RA = upvalue #, RD = str const (~) - | mov LFUNC:RB, [BASE-8] - | mov UPVAL:RB, [LFUNC:RB+RA*4+offsetof(GCfuncL, uvptr)] - | mov GCOBJ:RA, [KBASE+RD*4] - | mov RD, UPVAL:RB->v - | mov [RD], GCOBJ:RA - | mov dword [RD+4], LJ_TSTR - | test byte UPVAL:RB->marked, LJ_GC_BLACK // isblack(uv) - | jnz >2 - |1: - | ins_next - | - |2: // Check if string is white and ensure upvalue is closed. - | test byte GCOBJ:RA->gch.marked, LJ_GC_WHITES // iswhite(str) - | jz <1 - | cmp byte UPVAL:RB->closed, 0 - | jz <1 - | // Crossed a write barrier. Move the barrier forward. - | mov RB, BASE // Save BASE (FCARG2 == BASE). - | mov FCARG2, RD - | lea GL:FCARG1, [DISPATCH+GG_DISP2G] - | call extern lj_gc_barrieruv@8 // (global_State *g, TValue *tv) - | mov BASE, RB // Restore BASE. - | jmp <1 - break; - case BC_USETN: - | ins_AD // RA = upvalue #, RD = num const - | mov LFUNC:RB, [BASE-8] - | movsd xmm0, qword [KBASE+RD*8] - | mov UPVAL:RB, [LFUNC:RB+RA*4+offsetof(GCfuncL, uvptr)] - | mov RA, UPVAL:RB->v - | movsd qword [RA], xmm0 - | ins_next - break; - case BC_USETP: - | ins_AND // RA = upvalue #, RD = primitive type (~) - | mov LFUNC:RB, [BASE-8] - | mov UPVAL:RB, [LFUNC:RB+RA*4+offsetof(GCfuncL, uvptr)] - | mov RA, UPVAL:RB->v - | mov [RA+4], RD - | ins_next - break; - case BC_UCLO: - | ins_AD // RA = level, RD = target - | branchPC RD // Do this first to free RD. - | mov L:RB, SAVE_L - | cmp dword L:RB->openupval, 0 - | je >1 - | mov L:RB->base, BASE - | lea FCARG2, [BASE+RA*8] // Caveat: FCARG2 == BASE - | mov L:FCARG1, L:RB // Caveat: FCARG1 == RA - | call extern lj_func_closeuv@8 // (lua_State *L, TValue *level) - | mov BASE, L:RB->base - |1: - | ins_next - break; - - case BC_FNEW: - | ins_AND // RA = dst, RD = proto const (~) (holding function prototype) - |.if X64 - | mov L:RB, SAVE_L - | mov L:RB->base, BASE // Caveat: CARG2d/CARG3d may be BASE. - | mov CARG3d, [BASE-8] - | mov CARG2d, [KBASE+RD*4] // Fetch GCproto *. - | mov CARG1d, L:RB - |.else - | mov LFUNC:RA, [BASE-8] - | mov PROTO:RD, [KBASE+RD*4] // Fetch GCproto *. - | mov L:RB, SAVE_L - | mov ARG3, LFUNC:RA - | mov ARG2, PROTO:RD - | mov ARG1, L:RB - | mov L:RB->base, BASE - |.endif - | mov SAVE_PC, PC - | // (lua_State *L, GCproto *pt, GCfuncL *parent) - | call extern lj_func_newL_gc - | // GCfuncL * returned in eax (RC). - | mov BASE, L:RB->base - | movzx RA, PC_RA - | mov [BASE+RA*8], LFUNC:RC - | mov dword [BASE+RA*8+4], LJ_TFUNC - | ins_next - break; - - /* -- Table ops --------------------------------------------------------- */ - - case BC_TNEW: - | ins_AD // RA = dst, RD = hbits|asize - | mov L:RB, SAVE_L - | mov L:RB->base, BASE - | mov RA, [DISPATCH+DISPATCH_GL(gc.total)] - | cmp RA, [DISPATCH+DISPATCH_GL(gc.threshold)] - | mov SAVE_PC, PC - | jae >5 - |1: - |.if X64 - | mov CARG3d, RD - | and RD, 0x7ff - | shr CARG3d, 11 - |.else - | mov RA, RD - | and RD, 0x7ff - | shr RA, 11 - | mov ARG3, RA - |.endif - | cmp RD, 0x7ff - | je >3 - |2: - |.if X64 - | mov L:CARG1d, L:RB - | mov CARG2d, RD - |.else - | mov ARG1, L:RB - | mov ARG2, RD - |.endif - | call extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits) - | // Table * returned in eax (RC). - | mov BASE, L:RB->base - | movzx RA, PC_RA - | mov [BASE+RA*8], TAB:RC - | mov dword [BASE+RA*8+4], LJ_TTAB - | ins_next - |3: // Turn 0x7ff into 0x801. - | mov RD, 0x801 - | jmp <2 - |5: - | mov L:FCARG1, L:RB - | call extern lj_gc_step_fixtop@4 // (lua_State *L) - | movzx RD, PC_RD - | jmp <1 - break; - case BC_TDUP: - | ins_AND // RA = dst, RD = table const (~) (holding template table) - | mov L:RB, SAVE_L - | mov RA, [DISPATCH+DISPATCH_GL(gc.total)] - | mov SAVE_PC, PC - | cmp RA, [DISPATCH+DISPATCH_GL(gc.threshold)] - | mov L:RB->base, BASE - | jae >3 - |2: - | mov TAB:FCARG2, [KBASE+RD*4] // Caveat: FCARG2 == BASE - | mov L:FCARG1, L:RB // Caveat: FCARG1 == RA - | call extern lj_tab_dup@8 // (lua_State *L, Table *kt) - | // Table * returned in eax (RC). - | mov BASE, L:RB->base - | movzx RA, PC_RA - | mov [BASE+RA*8], TAB:RC - | mov dword [BASE+RA*8+4], LJ_TTAB - | ins_next - |3: - | mov L:FCARG1, L:RB - | call extern lj_gc_step_fixtop@4 // (lua_State *L) - | movzx RD, PC_RD // Need to reload RD. - | not RDa - | jmp <2 - break; - - case BC_GGET: - | ins_AND // RA = dst, RD = str const (~) - | mov LFUNC:RB, [BASE-8] - | mov TAB:RB, LFUNC:RB->env - | mov STR:RC, [KBASE+RD*4] - | jmp ->BC_TGETS_Z - break; - case BC_GSET: - | ins_AND // RA = src, RD = str const (~) - | mov LFUNC:RB, [BASE-8] - | mov TAB:RB, LFUNC:RB->env - | mov STR:RC, [KBASE+RD*4] - | jmp ->BC_TSETS_Z - break; - - case BC_TGETV: - | ins_ABC // RA = dst, RB = table, RC = key - | checktab RB, ->vmeta_tgetv - | mov TAB:RB, [BASE+RB*8] - | - | // Integer key? - |.if DUALNUM - | checkint RC, >5 - | mov RC, dword [BASE+RC*8] - |.else - | // Convert number to int and back and compare. - | checknum RC, >5 - | movsd xmm0, qword [BASE+RC*8] - | cvttsd2si RC, xmm0 - | cvtsi2sd xmm1, RC - | ucomisd xmm0, xmm1 - | jne ->vmeta_tgetv // Generic numeric key? Use fallback. - |.endif - | cmp RC, TAB:RB->asize // Takes care of unordered, too. - | jae ->vmeta_tgetv // Not in array part? Use fallback. - | shl RC, 3 - | add RC, TAB:RB->array - | cmp dword [RC+4], LJ_TNIL // Avoid overwriting RB in fastpath. - | je >2 - | // Get array slot. - |.if X64 - | mov RBa, [RC] - | mov [BASE+RA*8], RBa - |.else - | mov RB, [RC] - | mov RC, [RC+4] - | mov [BASE+RA*8], RB - | mov [BASE+RA*8+4], RC - |.endif - |1: - | ins_next - | - |2: // Check for __index if table value is nil. - | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath. - | jz >3 - | mov TAB:RA, TAB:RB->metatable - | test byte TAB:RA->nomm, 1<vmeta_tgetv // 'no __index' flag NOT set: check. - | movzx RA, PC_RA // Restore RA. - |3: - | mov dword [BASE+RA*8+4], LJ_TNIL - | jmp <1 - | - |5: // String key? - | checkstr RC, ->vmeta_tgetv - | mov STR:RC, [BASE+RC*8] - | jmp ->BC_TGETS_Z - break; - case BC_TGETS: - | ins_ABC // RA = dst, RB = table, RC = str const (~) - | not RCa - | mov STR:RC, [KBASE+RC*4] - | checktab RB, ->vmeta_tgets - | mov TAB:RB, [BASE+RB*8] - |->BC_TGETS_Z: // RB = GCtab *, RC = GCstr *, refetches PC_RA. - | mov RA, TAB:RB->hmask - | and RA, STR:RC->hash - | imul RA, #NODE - | add NODE:RA, TAB:RB->node - |1: - | cmp dword NODE:RA->key.it, LJ_TSTR - | jne >4 - | cmp dword NODE:RA->key.gcr, STR:RC - | jne >4 - | // Ok, key found. Assumes: offsetof(Node, val) == 0 - | cmp dword [RA+4], LJ_TNIL // Avoid overwriting RB in fastpath. - | je >5 // Key found, but nil value? - | movzx RC, PC_RA - | // Get node value. - |.if X64 - | mov RBa, [RA] - | mov [BASE+RC*8], RBa - |.else - | mov RB, [RA] - | mov RA, [RA+4] - | mov [BASE+RC*8], RB - | mov [BASE+RC*8+4], RA - |.endif - |2: - | ins_next - | - |3: - | movzx RC, PC_RA - | mov dword [BASE+RC*8+4], LJ_TNIL - | jmp <2 - | - |4: // Follow hash chain. - | mov NODE:RA, NODE:RA->next - | test NODE:RA, NODE:RA - | jnz <1 - | // End of hash chain: key not found, nil result. - | - |5: // Check for __index if table value is nil. - | mov TAB:RA, TAB:RB->metatable - | test TAB:RA, TAB:RA - | jz <3 // No metatable: done. - | test byte TAB:RA->nomm, 1<vmeta_tgets // Caveat: preserve STR:RC. - break; - case BC_TGETB: - | ins_ABC // RA = dst, RB = table, RC = byte literal - | checktab RB, ->vmeta_tgetb - | mov TAB:RB, [BASE+RB*8] - | cmp RC, TAB:RB->asize - | jae ->vmeta_tgetb - | shl RC, 3 - | add RC, TAB:RB->array - | cmp dword [RC+4], LJ_TNIL // Avoid overwriting RB in fastpath. - | je >2 - | // Get array slot. - |.if X64 - | mov RBa, [RC] - | mov [BASE+RA*8], RBa - |.else - | mov RB, [RC] - | mov RC, [RC+4] - | mov [BASE+RA*8], RB - | mov [BASE+RA*8+4], RC - |.endif - |1: - | ins_next - | - |2: // Check for __index if table value is nil. - | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath. - | jz >3 - | mov TAB:RA, TAB:RB->metatable - | test byte TAB:RA->nomm, 1<vmeta_tgetb // 'no __index' flag NOT set: check. - | movzx RA, PC_RA // Restore RA. - |3: - | mov dword [BASE+RA*8+4], LJ_TNIL - | jmp <1 - break; - case BC_TGETR: - | ins_ABC // RA = dst, RB = table, RC = key - | mov TAB:RB, [BASE+RB*8] - |.if DUALNUM - | mov RC, dword [BASE+RC*8] - |.else - | cvttsd2si RC, qword [BASE+RC*8] - |.endif - | cmp RC, TAB:RB->asize - | jae ->vmeta_tgetr // Not in array part? Use fallback. - | shl RC, 3 - | add RC, TAB:RB->array - | // Get array slot. - |->BC_TGETR_Z: - |.if X64 - | mov RBa, [RC] - | mov [BASE+RA*8], RBa - |.else - | mov RB, [RC] - | mov RC, [RC+4] - | mov [BASE+RA*8], RB - | mov [BASE+RA*8+4], RC - |.endif - |->BC_TGETR2_Z: - | ins_next - break; - - case BC_TSETV: - | ins_ABC // RA = src, RB = table, RC = key - | checktab RB, ->vmeta_tsetv - | mov TAB:RB, [BASE+RB*8] - | - | // Integer key? - |.if DUALNUM - | checkint RC, >5 - | mov RC, dword [BASE+RC*8] - |.else - | // Convert number to int and back and compare. - | checknum RC, >5 - | movsd xmm0, qword [BASE+RC*8] - | cvttsd2si RC, xmm0 - | cvtsi2sd xmm1, RC - | ucomisd xmm0, xmm1 - | jne ->vmeta_tsetv // Generic numeric key? Use fallback. - |.endif - | cmp RC, TAB:RB->asize // Takes care of unordered, too. - | jae ->vmeta_tsetv - | shl RC, 3 - | add RC, TAB:RB->array - | cmp dword [RC+4], LJ_TNIL - | je >3 // Previous value is nil? - |1: - | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) - | jnz >7 - |2: // Set array slot. - |.if X64 - | mov RBa, [BASE+RA*8] - | mov [RC], RBa - |.else - | mov RB, [BASE+RA*8+4] - | mov RA, [BASE+RA*8] - | mov [RC+4], RB - | mov [RC], RA - |.endif - | ins_next - | - |3: // Check for __newindex if previous value is nil. - | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath. - | jz <1 - | mov TAB:RA, TAB:RB->metatable - | test byte TAB:RA->nomm, 1<vmeta_tsetv // 'no __newindex' flag NOT set: check. - | movzx RA, PC_RA // Restore RA. - | jmp <1 - | - |5: // String key? - | checkstr RC, ->vmeta_tsetv - | mov STR:RC, [BASE+RC*8] - | jmp ->BC_TSETS_Z - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:RB, RA - | movzx RA, PC_RA // Restore RA. - | jmp <2 - break; - case BC_TSETS: - | ins_ABC // RA = src, RB = table, RC = str const (~) - | not RCa - | mov STR:RC, [KBASE+RC*4] - | checktab RB, ->vmeta_tsets - | mov TAB:RB, [BASE+RB*8] - |->BC_TSETS_Z: // RB = GCtab *, RC = GCstr *, refetches PC_RA. - | mov RA, TAB:RB->hmask - | and RA, STR:RC->hash - | imul RA, #NODE - | mov byte TAB:RB->nomm, 0 // Clear metamethod cache. - | add NODE:RA, TAB:RB->node - |1: - | cmp dword NODE:RA->key.it, LJ_TSTR - | jne >5 - | cmp dword NODE:RA->key.gcr, STR:RC - | jne >5 - | // Ok, key found. Assumes: offsetof(Node, val) == 0 - | cmp dword [RA+4], LJ_TNIL - | je >4 // Previous value is nil? - |2: - | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) - | jnz >7 - |3: // Set node value. - | movzx RC, PC_RA - |.if X64 - | mov RBa, [BASE+RC*8] - | mov [RA], RBa - |.else - | mov RB, [BASE+RC*8+4] - | mov RC, [BASE+RC*8] - | mov [RA+4], RB - | mov [RA], RC - |.endif - | ins_next - | - |4: // Check for __newindex if previous value is nil. - | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath. - | jz <2 - | mov TMP1, RA // Save RA. - | mov TAB:RA, TAB:RB->metatable - | test byte TAB:RA->nomm, 1<vmeta_tsets // 'no __newindex' flag NOT set: check. - | mov RA, TMP1 // Restore RA. - | jmp <2 - | - |5: // Follow hash chain. - | mov NODE:RA, NODE:RA->next - | test NODE:RA, NODE:RA - | jnz <1 - | // End of hash chain: key not found, add a new one. - | - | // But check for __newindex first. - | mov TAB:RA, TAB:RB->metatable - | test TAB:RA, TAB:RA - | jz >6 // No metatable: continue. - | test byte TAB:RA->nomm, 1<vmeta_tsets // 'no __newindex' flag NOT set: check. - |6: - | mov TMP1, STR:RC - | mov TMP2, LJ_TSTR - | mov TMP3, TAB:RB // Save TAB:RB for us. - |.if X64 - | mov L:CARG1d, SAVE_L - | mov L:CARG1d->base, BASE - | lea CARG3, TMP1 - | mov CARG2d, TAB:RB - | mov L:RB, L:CARG1d - |.else - | lea RC, TMP1 // Store temp. TValue in TMP1/TMP2. - | mov ARG2, TAB:RB - | mov L:RB, SAVE_L - | mov ARG3, RC - | mov ARG1, L:RB - | mov L:RB->base, BASE - |.endif - | mov SAVE_PC, PC - | call extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k) - | // Handles write barrier for the new key. TValue * returned in eax (RC). - | mov BASE, L:RB->base - | mov TAB:RB, TMP3 // Need TAB:RB for barrier. - | mov RA, eax - | jmp <2 // Must check write barrier for value. - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:RB, RC // Destroys STR:RC. - | jmp <3 - break; - case BC_TSETB: - | ins_ABC // RA = src, RB = table, RC = byte literal - | checktab RB, ->vmeta_tsetb - | mov TAB:RB, [BASE+RB*8] - | cmp RC, TAB:RB->asize - | jae ->vmeta_tsetb - | shl RC, 3 - | add RC, TAB:RB->array - | cmp dword [RC+4], LJ_TNIL - | je >3 // Previous value is nil? - |1: - | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) - | jnz >7 - |2: // Set array slot. - |.if X64 - | mov RAa, [BASE+RA*8] - | mov [RC], RAa - |.else - | mov RB, [BASE+RA*8+4] - | mov RA, [BASE+RA*8] - | mov [RC+4], RB - | mov [RC], RA - |.endif - | ins_next - | - |3: // Check for __newindex if previous value is nil. - | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath. - | jz <1 - | mov TAB:RA, TAB:RB->metatable - | test byte TAB:RA->nomm, 1<vmeta_tsetb // 'no __newindex' flag NOT set: check. - | movzx RA, PC_RA // Restore RA. - | jmp <1 - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:RB, RA - | movzx RA, PC_RA // Restore RA. - | jmp <2 - break; - case BC_TSETR: - | ins_ABC // RA = src, RB = table, RC = key - | mov TAB:RB, [BASE+RB*8] - |.if DUALNUM - | mov RC, dword [BASE+RC*8] - |.else - | cvttsd2si RC, qword [BASE+RC*8] - |.endif - | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) - | jnz >7 - |2: - | cmp RC, TAB:RB->asize - | jae ->vmeta_tsetr - | shl RC, 3 - | add RC, TAB:RB->array - | // Set array slot. - |->BC_TSETR_Z: - |.if X64 - | mov RBa, [BASE+RA*8] - | mov [RC], RBa - |.else - | mov RB, [BASE+RA*8+4] - | mov RA, [BASE+RA*8] - | mov [RC+4], RB - | mov [RC], RA - |.endif - | ins_next - | - |7: // Possible table write barrier for the value. Skip valiswhite check. - | barrierback TAB:RB, RA - | movzx RA, PC_RA // Restore RA. - | jmp <2 - break; - - case BC_TSETM: - | ins_AD // RA = base (table at base-1), RD = num const (start index) - | mov TMP1, KBASE // Need one more free register. - | mov KBASE, dword [KBASE+RD*8] // Integer constant is in lo-word. - |1: - | lea RA, [BASE+RA*8] - | mov TAB:RB, [RA-8] // Guaranteed to be a table. - | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) - | jnz >7 - |2: - | mov RD, MULTRES - | sub RD, 1 - | jz >4 // Nothing to copy? - | add RD, KBASE // Compute needed size. - | cmp RD, TAB:RB->asize - | ja >5 // Doesn't fit into array part? - | sub RD, KBASE - | shl KBASE, 3 - | add KBASE, TAB:RB->array - |3: // Copy result slots to table. - |.if X64 - | mov RBa, [RA] - | add RA, 8 - | mov [KBASE], RBa - |.else - | mov RB, [RA] - | mov [KBASE], RB - | mov RB, [RA+4] - | add RA, 8 - | mov [KBASE+4], RB - |.endif - | add KBASE, 8 - | sub RD, 1 - | jnz <3 - |4: - | mov KBASE, TMP1 - | ins_next - | - |5: // Need to resize array part. - |.if X64 - | mov L:CARG1d, SAVE_L - | mov L:CARG1d->base, BASE // Caveat: CARG2d/CARG3d may be BASE. - | mov CARG2d, TAB:RB - | mov CARG3d, RD - | mov L:RB, L:CARG1d - |.else - | mov ARG2, TAB:RB - | mov L:RB, SAVE_L - | mov L:RB->base, BASE - | mov ARG3, RD - | mov ARG1, L:RB - |.endif - | mov SAVE_PC, PC - | call extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize) - | mov BASE, L:RB->base - | movzx RA, PC_RA // Restore RA. - | jmp <1 // Retry. - | - |7: // Possible table write barrier for any value. Skip valiswhite check. - | barrierback TAB:RB, RD - | jmp <2 - break; - - /* -- Calls and vararg handling ----------------------------------------- */ - - case BC_CALL: case BC_CALLM: - | ins_A_C // RA = base, (RB = nresults+1,) RC = nargs+1 | extra_nargs - if (op == BC_CALLM) { - | add NARGS:RD, MULTRES - } - | cmp dword [BASE+RA*8+4], LJ_TFUNC - | mov LFUNC:RB, [BASE+RA*8] - | jne ->vmeta_call_ra - | lea BASE, [BASE+RA*8+8] - | ins_call - break; - - case BC_CALLMT: - | ins_AD // RA = base, RD = extra_nargs - | add NARGS:RD, MULTRES - | // Fall through. Assumes BC_CALLT follows and ins_AD is a no-op. - break; - case BC_CALLT: - | ins_AD // RA = base, RD = nargs+1 - | lea RA, [BASE+RA*8+8] - | mov KBASE, BASE // Use KBASE for move + vmeta_call hint. - | mov LFUNC:RB, [RA-8] - | cmp dword [RA-4], LJ_TFUNC - | jne ->vmeta_call - |->BC_CALLT_Z: - | mov PC, [BASE-4] - | test PC, FRAME_TYPE - | jnz >7 - |1: - | mov [BASE-8], LFUNC:RB // Copy function down, reloaded below. - | mov MULTRES, NARGS:RD - | sub NARGS:RD, 1 - | jz >3 - |2: // Move args down. - |.if X64 - | mov RBa, [RA] - | add RA, 8 - | mov [KBASE], RBa - |.else - | mov RB, [RA] - | mov [KBASE], RB - | mov RB, [RA+4] - | add RA, 8 - | mov [KBASE+4], RB - |.endif - | add KBASE, 8 - | sub NARGS:RD, 1 - | jnz <2 - | - | mov LFUNC:RB, [BASE-8] - |3: - | mov NARGS:RD, MULTRES - | cmp byte LFUNC:RB->ffid, 1 // (> FF_C) Calling a fast function? - | ja >5 - |4: - | ins_callt - | - |5: // Tailcall to a fast function. - | test PC, FRAME_TYPE // Lua frame below? - | jnz <4 - | movzx RA, PC_RA - | not RAa - | mov LFUNC:KBASE, [BASE+RA*8-8] // Need to prepare KBASE. - | mov KBASE, LFUNC:KBASE->pc - | mov KBASE, [KBASE+PC2PROTO(k)] - | jmp <4 - | - |7: // Tailcall from a vararg function. - | sub PC, FRAME_VARG - | test PC, FRAME_TYPEP - | jnz >8 // Vararg frame below? - | sub BASE, PC // Need to relocate BASE/KBASE down. - | mov KBASE, BASE - | mov PC, [BASE-4] - | jmp <1 - |8: - | add PC, FRAME_VARG - | jmp <1 - break; - - case BC_ITERC: - | ins_A // RA = base, (RB = nresults+1,) RC = nargs+1 (2+1) - | lea RA, [BASE+RA*8+8] // fb = base+1 - |.if X64 - | mov RBa, [RA-24] // Copy state. fb[0] = fb[-3]. - | mov RCa, [RA-16] // Copy control var. fb[1] = fb[-2]. - | mov [RA], RBa - | mov [RA+8], RCa - |.else - | mov RB, [RA-24] // Copy state. fb[0] = fb[-3]. - | mov RC, [RA-20] - | mov [RA], RB - | mov [RA+4], RC - | mov RB, [RA-16] // Copy control var. fb[1] = fb[-2]. - | mov RC, [RA-12] - | mov [RA+8], RB - | mov [RA+12], RC - |.endif - | mov LFUNC:RB, [RA-32] // Copy callable. fb[-1] = fb[-4] - | mov RC, [RA-28] - | mov [RA-8], LFUNC:RB - | mov [RA-4], RC - | cmp RC, LJ_TFUNC // Handle like a regular 2-arg call. - | mov NARGS:RD, 2+1 - | jne ->vmeta_call - | mov BASE, RA - | ins_call - break; - - case BC_ITERN: - | ins_A // RA = base, (RB = nresults+1, RC = nargs+1 (2+1)) - |.if JIT - | // NYI: add hotloop, record BC_ITERN. - |.endif - | mov TMP1, KBASE // Need two more free registers. - | mov TMP2, DISPATCH - | mov TAB:RB, [BASE+RA*8-16] - | mov RC, [BASE+RA*8-8] // Get index from control var. - | mov DISPATCH, TAB:RB->asize - | add PC, 4 - | mov KBASE, TAB:RB->array - |1: // Traverse array part. - | cmp RC, DISPATCH; jae >5 // Index points after array part? - | cmp dword [KBASE+RC*8+4], LJ_TNIL; je >4 - |.if DUALNUM - | mov dword [BASE+RA*8+4], LJ_TISNUM - | mov dword [BASE+RA*8], RC - |.else - | cvtsi2sd xmm0, RC - |.endif - | // Copy array slot to returned value. - |.if X64 - | mov RBa, [KBASE+RC*8] - | mov [BASE+RA*8+8], RBa - |.else - | mov RB, [KBASE+RC*8+4] - | mov [BASE+RA*8+12], RB - | mov RB, [KBASE+RC*8] - | mov [BASE+RA*8+8], RB - |.endif - | add RC, 1 - | // Return array index as a numeric key. - |.if DUALNUM - | // See above. - |.else - | movsd qword [BASE+RA*8], xmm0 - |.endif - | mov [BASE+RA*8-8], RC // Update control var. - |2: - | movzx RD, PC_RD // Get target from ITERL. - | branchPC RD - |3: - | mov DISPATCH, TMP2 - | mov KBASE, TMP1 - | ins_next - | - |4: // Skip holes in array part. - | add RC, 1 - | jmp <1 - | - |5: // Traverse hash part. - | sub RC, DISPATCH - |6: - | cmp RC, TAB:RB->hmask; ja <3 // End of iteration? Branch to ITERL+1. - | imul KBASE, RC, #NODE - | add NODE:KBASE, TAB:RB->node - | cmp dword NODE:KBASE->val.it, LJ_TNIL; je >7 - | lea DISPATCH, [RC+DISPATCH+1] - | // Copy key and value from hash slot. - |.if X64 - | mov RBa, NODE:KBASE->key - | mov RCa, NODE:KBASE->val - | mov [BASE+RA*8], RBa - | mov [BASE+RA*8+8], RCa - |.else - | mov RB, NODE:KBASE->key.gcr - | mov RC, NODE:KBASE->key.it - | mov [BASE+RA*8], RB - | mov [BASE+RA*8+4], RC - | mov RB, NODE:KBASE->val.gcr - | mov RC, NODE:KBASE->val.it - | mov [BASE+RA*8+8], RB - | mov [BASE+RA*8+12], RC - |.endif - | mov [BASE+RA*8-8], DISPATCH - | jmp <2 - | - |7: // Skip holes in hash part. - | add RC, 1 - | jmp <6 - break; - - case BC_ISNEXT: - | ins_AD // RA = base, RD = target (points to ITERN) - | cmp dword [BASE+RA*8-20], LJ_TFUNC; jne >5 - | mov CFUNC:RB, [BASE+RA*8-24] - | cmp dword [BASE+RA*8-12], LJ_TTAB; jne >5 - | cmp dword [BASE+RA*8-4], LJ_TNIL; jne >5 - | cmp byte CFUNC:RB->ffid, FF_next_N; jne >5 - | branchPC RD - | mov dword [BASE+RA*8-8], 0 // Initialize control var. - | mov dword [BASE+RA*8-4], 0xfffe7fff - |1: - | ins_next - |5: // Despecialize bytecode if any of the checks fail. - | mov PC_OP, BC_JMP - | branchPC RD - | mov byte [PC], BC_ITERC - | jmp <1 - break; - - case BC_VARG: - | ins_ABC // RA = base, RB = nresults+1, RC = numparams - | mov TMP1, KBASE // Need one more free register. - | lea KBASE, [BASE+RC*8+(8+FRAME_VARG)] - | lea RA, [BASE+RA*8] - | sub KBASE, [BASE-4] - | // Note: KBASE may now be even _above_ BASE if nargs was < numparams. - | test RB, RB - | jz >5 // Copy all varargs? - | lea RB, [RA+RB*8-8] - | cmp KBASE, BASE // No vararg slots? - | jnb >2 - |1: // Copy vararg slots to destination slots. - |.if X64 - | mov RCa, [KBASE-8] - | add KBASE, 8 - | mov [RA], RCa - |.else - | mov RC, [KBASE-8] - | mov [RA], RC - | mov RC, [KBASE-4] - | add KBASE, 8 - | mov [RA+4], RC - |.endif - | add RA, 8 - | cmp RA, RB // All destination slots filled? - | jnb >3 - | cmp KBASE, BASE // No more vararg slots? - | jb <1 - |2: // Fill up remainder with nil. - | mov dword [RA+4], LJ_TNIL - | add RA, 8 - | cmp RA, RB - | jb <2 - |3: - | mov KBASE, TMP1 - | ins_next - | - |5: // Copy all varargs. - | mov MULTRES, 1 // MULTRES = 0+1 - | mov RC, BASE - | sub RC, KBASE - | jbe <3 // No vararg slots? - | mov RB, RC - | shr RB, 3 - | add RB, 1 - | mov MULTRES, RB // MULTRES = #varargs+1 - | mov L:RB, SAVE_L - | add RC, RA - | cmp RC, L:RB->maxstack - | ja >7 // Need to grow stack? - |6: // Copy all vararg slots. - |.if X64 - | mov RCa, [KBASE-8] - | add KBASE, 8 - | mov [RA], RCa - |.else - | mov RC, [KBASE-8] - | mov [RA], RC - | mov RC, [KBASE-4] - | add KBASE, 8 - | mov [RA+4], RC - |.endif - | add RA, 8 - | cmp KBASE, BASE // No more vararg slots? - | jb <6 - | jmp <3 - | - |7: // Grow stack for varargs. - | mov L:RB->base, BASE - | mov L:RB->top, RA - | mov SAVE_PC, PC - | sub KBASE, BASE // Need delta, because BASE may change. - | mov FCARG2, MULTRES - | sub FCARG2, 1 - | mov FCARG1, L:RB - | call extern lj_state_growstack@8 // (lua_State *L, int n) - | mov BASE, L:RB->base - | mov RA, L:RB->top - | add KBASE, BASE - | jmp <6 - break; - - /* -- Returns ----------------------------------------------------------- */ - - case BC_RETM: - | ins_AD // RA = results, RD = extra_nresults - | add RD, MULTRES // MULTRES >=1, so RD >=1. - | // Fall through. Assumes BC_RET follows and ins_AD is a no-op. - break; - - case BC_RET: case BC_RET0: case BC_RET1: - | ins_AD // RA = results, RD = nresults+1 - if (op != BC_RET0) { - | shl RA, 3 - } - |1: - | mov PC, [BASE-4] - | mov MULTRES, RD // Save nresults+1. - | test PC, FRAME_TYPE // Check frame type marker. - | jnz >7 // Not returning to a fixarg Lua func? - switch (op) { - case BC_RET: - |->BC_RET_Z: - | mov KBASE, BASE // Use KBASE for result move. - | sub RD, 1 - | jz >3 - |2: // Move results down. - |.if X64 - | mov RBa, [KBASE+RA] - | mov [KBASE-8], RBa - |.else - | mov RB, [KBASE+RA] - | mov [KBASE-8], RB - | mov RB, [KBASE+RA+4] - | mov [KBASE-4], RB - |.endif - | add KBASE, 8 - | sub RD, 1 - | jnz <2 - |3: - | mov RD, MULTRES // Note: MULTRES may be >255. - | movzx RB, PC_RB // So cannot compare with RDL! - |5: - | cmp RB, RD // More results expected? - | ja >6 - break; - case BC_RET1: - |.if X64 - | mov RBa, [BASE+RA] - | mov [BASE-8], RBa - |.else - | mov RB, [BASE+RA+4] - | mov [BASE-4], RB - | mov RB, [BASE+RA] - | mov [BASE-8], RB - |.endif - /* fallthrough */ - case BC_RET0: - |5: - | cmp PC_RB, RDL // More results expected? - | ja >6 - default: - break; - } - | movzx RA, PC_RA - | not RAa // Note: ~RA = -(RA+1) - | lea BASE, [BASE+RA*8] // base = base - (RA+1)*8 - | mov LFUNC:KBASE, [BASE-8] - | mov KBASE, LFUNC:KBASE->pc - | mov KBASE, [KBASE+PC2PROTO(k)] - | ins_next - | - |6: // Fill up results with nil. - if (op == BC_RET) { - | mov dword [KBASE-4], LJ_TNIL // Note: relies on shifted base. - | add KBASE, 8 - } else { - | mov dword [BASE+RD*8-12], LJ_TNIL - } - | add RD, 1 - | jmp <5 - | - |7: // Non-standard return case. - | lea RB, [PC-FRAME_VARG] - | test RB, FRAME_TYPEP - | jnz ->vm_return - | // Return from vararg function: relocate BASE down and RA up. - | sub BASE, RB - if (op != BC_RET0) { - | add RA, RB - } - | jmp <1 - break; - - /* -- Loops and branches ------------------------------------------------ */ - - |.define FOR_IDX, [RA]; .define FOR_TIDX, dword [RA+4] - |.define FOR_STOP, [RA+8]; .define FOR_TSTOP, dword [RA+12] - |.define FOR_STEP, [RA+16]; .define FOR_TSTEP, dword [RA+20] - |.define FOR_EXT, [RA+24]; .define FOR_TEXT, dword [RA+28] - - case BC_FORL: - |.if JIT - | hotloop RB - |.endif - | // Fall through. Assumes BC_IFORL follows and ins_AJ is a no-op. - break; - - case BC_JFORI: - case BC_JFORL: -#if !LJ_HASJIT - break; -#endif - case BC_FORI: - case BC_IFORL: - vk = (op == BC_IFORL || op == BC_JFORL); - | ins_AJ // RA = base, RD = target (after end of loop or start of loop) - | lea RA, [BASE+RA*8] - if (LJ_DUALNUM) { - | cmp FOR_TIDX, LJ_TISNUM; jne >9 - if (!vk) { - | cmp FOR_TSTOP, LJ_TISNUM; jne ->vmeta_for - | cmp FOR_TSTEP, LJ_TISNUM; jne ->vmeta_for - | mov RB, dword FOR_IDX - | cmp dword FOR_STEP, 0; jl >5 - } else { -#ifdef LUA_USE_ASSERT - | cmp FOR_TSTOP, LJ_TISNUM; jne ->assert_bad_for_arg_type - | cmp FOR_TSTEP, LJ_TISNUM; jne ->assert_bad_for_arg_type -#endif - | mov RB, dword FOR_STEP - | test RB, RB; js >5 - | add RB, dword FOR_IDX; jo >1 - | mov dword FOR_IDX, RB - } - | cmp RB, dword FOR_STOP - | mov FOR_TEXT, LJ_TISNUM - | mov dword FOR_EXT, RB - if (op == BC_FORI) { - | jle >7 - |1: - |6: - | branchPC RD - } else if (op == BC_JFORI) { - | branchPC RD - | movzx RD, PC_RD - | jle =>BC_JLOOP - |1: - |6: - } else if (op == BC_IFORL) { - | jg >7 - |6: - | branchPC RD - |1: - } else { - | jle =>BC_JLOOP - |1: - |6: - } - |7: - | ins_next - | - |5: // Invert check for negative step. - if (vk) { - | add RB, dword FOR_IDX; jo <1 - | mov dword FOR_IDX, RB - } - | cmp RB, dword FOR_STOP - | mov FOR_TEXT, LJ_TISNUM - | mov dword FOR_EXT, RB - if (op == BC_FORI) { - | jge <7 - } else if (op == BC_JFORI) { - | branchPC RD - | movzx RD, PC_RD - | jge =>BC_JLOOP - } else if (op == BC_IFORL) { - | jl <7 - } else { - | jge =>BC_JLOOP - } - | jmp <6 - |9: // Fallback to FP variant. - } else if (!vk) { - | cmp FOR_TIDX, LJ_TISNUM - } - if (!vk) { - | jae ->vmeta_for - | cmp FOR_TSTOP, LJ_TISNUM; jae ->vmeta_for - } else { -#ifdef LUA_USE_ASSERT - | cmp FOR_TSTOP, LJ_TISNUM; jae ->assert_bad_for_arg_type - | cmp FOR_TSTEP, LJ_TISNUM; jae ->assert_bad_for_arg_type -#endif - } - | mov RB, FOR_TSTEP // Load type/hiword of for step. - if (!vk) { - | cmp RB, LJ_TISNUM; jae ->vmeta_for - } - | movsd xmm0, qword FOR_IDX - | movsd xmm1, qword FOR_STOP - if (vk) { - | addsd xmm0, qword FOR_STEP - | movsd qword FOR_IDX, xmm0 - | test RB, RB; js >3 - } else { - | jl >3 - } - | ucomisd xmm1, xmm0 - |1: - | movsd qword FOR_EXT, xmm0 - if (op == BC_FORI) { - |.if DUALNUM - | jnb <7 - |.else - | jnb >2 - | branchPC RD - |.endif - } else if (op == BC_JFORI) { - | branchPC RD - | movzx RD, PC_RD - | jnb =>BC_JLOOP - } else if (op == BC_IFORL) { - |.if DUALNUM - | jb <7 - |.else - | jb >2 - | branchPC RD - |.endif - } else { - | jnb =>BC_JLOOP - } - |.if DUALNUM - | jmp <6 - |.else - |2: - | ins_next - |.endif - | - |3: // Invert comparison if step is negative. - | ucomisd xmm0, xmm1 - | jmp <1 - break; - - case BC_ITERL: - |.if JIT - | hotloop RB - |.endif - | // Fall through. Assumes BC_IITERL follows and ins_AJ is a no-op. - break; - - case BC_JITERL: -#if !LJ_HASJIT - break; -#endif - case BC_IITERL: - | ins_AJ // RA = base, RD = target - | lea RA, [BASE+RA*8] - | mov RB, [RA+4] - | cmp RB, LJ_TNIL; je >1 // Stop if iterator returned nil. - if (op == BC_JITERL) { - | mov [RA-4], RB - | mov RB, [RA] - | mov [RA-8], RB - | jmp =>BC_JLOOP - } else { - | branchPC RD // Otherwise save control var + branch. - | mov RD, [RA] - | mov [RA-4], RB - | mov [RA-8], RD - } - |1: - | ins_next - break; - - case BC_LOOP: - | ins_A // RA = base, RD = target (loop extent) - | // Note: RA/RD is only used by trace recorder to determine scope/extent - | // This opcode does NOT jump, it's only purpose is to detect a hot loop. - |.if JIT - | hotloop RB - |.endif - | // Fall through. Assumes BC_ILOOP follows and ins_A is a no-op. - break; - - case BC_ILOOP: - | ins_A // RA = base, RD = target (loop extent) - | ins_next - break; - - case BC_JLOOP: - |.if JIT - | ins_AD // RA = base (ignored), RD = traceno - | mov RA, [DISPATCH+DISPATCH_J(trace)] - | mov TRACE:RD, [RA+RD*4] - | mov RDa, TRACE:RD->mcode - | mov L:RB, SAVE_L - | mov [DISPATCH+DISPATCH_GL(jit_base)], BASE - | mov [DISPATCH+DISPATCH_GL(tmpbuf.L)], L:RB - | // Save additional callee-save registers only used in compiled code. - |.if X64WIN - | mov TMPQ, r12 - | mov TMPa, r13 - | mov CSAVE_4, r14 - | mov CSAVE_3, r15 - | mov RAa, rsp - | sub rsp, 9*16+4*8 - | movdqa [RAa], xmm6 - | movdqa [RAa-1*16], xmm7 - | movdqa [RAa-2*16], xmm8 - | movdqa [RAa-3*16], xmm9 - | movdqa [RAa-4*16], xmm10 - | movdqa [RAa-5*16], xmm11 - | movdqa [RAa-6*16], xmm12 - | movdqa [RAa-7*16], xmm13 - | movdqa [RAa-8*16], xmm14 - | movdqa [RAa-9*16], xmm15 - |.elif X64 - | mov TMPQ, r12 - | mov TMPa, r13 - | sub rsp, 16 - |.endif - | jmp RDa - |.endif - break; - - case BC_JMP: - | ins_AJ // RA = unused, RD = target - | branchPC RD - | ins_next - break; - - /* -- Function headers -------------------------------------------------- */ - - /* - ** Reminder: A function may be called with func/args above L->maxstack, - ** i.e. occupying EXTRA_STACK slots. And vmeta_call may add one extra slot, - ** too. This means all FUNC* ops (including fast functions) must check - ** for stack overflow _before_ adding more slots! - */ - - case BC_FUNCF: - |.if JIT - | hotcall RB - |.endif - case BC_FUNCV: /* NYI: compiled vararg functions. */ - | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow and ins_AD is a no-op. - break; - - case BC_JFUNCF: -#if !LJ_HASJIT - break; -#endif - case BC_IFUNCF: - | ins_AD // BASE = new base, RA = framesize, RD = nargs+1 - | mov KBASE, [PC-4+PC2PROTO(k)] - | mov L:RB, SAVE_L - | lea RA, [BASE+RA*8] // Top of frame. - | cmp RA, L:RB->maxstack - | ja ->vm_growstack_f - | movzx RA, byte [PC-4+PC2PROTO(numparams)] - | cmp NARGS:RD, RA // Check for missing parameters. - | jbe >3 - |2: - if (op == BC_JFUNCF) { - | movzx RD, PC_RD - | jmp =>BC_JLOOP - } else { - | ins_next - } - | - |3: // Clear missing parameters. - | mov dword [BASE+NARGS:RD*8-4], LJ_TNIL - | add NARGS:RD, 1 - | cmp NARGS:RD, RA - | jbe <3 - | jmp <2 - break; - - case BC_JFUNCV: -#if !LJ_HASJIT - break; -#endif - | int3 // NYI: compiled vararg functions - break; /* NYI: compiled vararg functions. */ - - case BC_IFUNCV: - | ins_AD // BASE = new base, RA = framesize, RD = nargs+1 - | lea RB, [NARGS:RD*8+FRAME_VARG] - | lea RD, [BASE+NARGS:RD*8] - | mov LFUNC:KBASE, [BASE-8] - | mov [RD-4], RB // Store delta + FRAME_VARG. - | mov [RD-8], LFUNC:KBASE // Store copy of LFUNC. - | mov L:RB, SAVE_L - | lea RA, [RD+RA*8] - | cmp RA, L:RB->maxstack - | ja ->vm_growstack_v // Need to grow stack. - | mov RA, BASE - | mov BASE, RD - | movzx RB, byte [PC-4+PC2PROTO(numparams)] - | test RB, RB - | jz >2 - |1: // Copy fixarg slots up to new frame. - | add RA, 8 - | cmp RA, BASE - | jnb >3 // Less args than parameters? - | mov KBASE, [RA-8] - | mov [RD], KBASE - | mov KBASE, [RA-4] - | mov [RD+4], KBASE - | add RD, 8 - | mov dword [RA-4], LJ_TNIL // Clear old fixarg slot (help the GC). - | sub RB, 1 - | jnz <1 - |2: - if (op == BC_JFUNCV) { - | movzx RD, PC_RD - | jmp =>BC_JLOOP - } else { - | mov KBASE, [PC-4+PC2PROTO(k)] - | ins_next - } - | - |3: // Clear missing parameters. - | mov dword [RD+4], LJ_TNIL - | add RD, 8 - | sub RB, 1 - | jnz <3 - | jmp <2 - break; - - case BC_FUNCC: - case BC_FUNCCW: - | ins_AD // BASE = new base, RA = ins RA|RD (unused), RD = nargs+1 - | mov CFUNC:RB, [BASE-8] - | mov KBASEa, CFUNC:RB->f - | mov L:RB, SAVE_L - | lea RD, [BASE+NARGS:RD*8-8] - | mov L:RB->base, BASE - | lea RA, [RD+8*LUA_MINSTACK] - | cmp RA, L:RB->maxstack - | mov L:RB->top, RD - if (op == BC_FUNCC) { - |.if X64 - | mov CARG1d, L:RB // Caveat: CARG1d may be RA. - |.else - | mov ARG1, L:RB - |.endif - } else { - |.if X64 - | mov CARG2, KBASEa - | mov CARG1d, L:RB // Caveat: CARG1d may be RA. - |.else - | mov ARG2, KBASEa - | mov ARG1, L:RB - |.endif - } - | ja ->vm_growstack_c // Need to grow stack. - | set_vmstate C - if (op == BC_FUNCC) { - | call KBASEa // (lua_State *L) - } else { - | // (lua_State *L, lua_CFunction f) - | call aword [DISPATCH+DISPATCH_GL(wrapf)] - } - | // nresults returned in eax (RD). - | mov BASE, L:RB->base - | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB - | set_vmstate INTERP - | lea RA, [BASE+RD*8] - | neg RA - | add RA, L:RB->top // RA = (L->top-(L->base+nresults))*8 - | mov PC, [BASE-4] // Fetch PC of caller. - | jmp ->vm_returnc - break; - - /* ---------------------------------------------------------------------- */ - - default: - fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]); - exit(2); - break; - } -} - -static int build_backend(BuildCtx *ctx) -{ - int op; - dasm_growpc(Dst, BC__MAX); - build_subroutines(ctx); - |.code_op - for (op = 0; op < BC__MAX; op++) - build_ins(ctx, (BCOp)op, op); - return BC__MAX; -} - -/* Emit pseudo frame-info for all assembler functions. */ -static void emit_asm_debug(BuildCtx *ctx) -{ - int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code); -#if LJ_64 -#define SZPTR "8" -#define BSZPTR "3" -#define REG_SP "0x7" -#define REG_RA "0x10" -#else -#define SZPTR "4" -#define BSZPTR "2" -#define REG_SP "0x4" -#define REG_RA "0x8" -#endif - switch (ctx->mode) { - case BUILD_elfasm: - fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n"); - fprintf(ctx->fp, - ".Lframe0:\n" - "\t.long .LECIE0-.LSCIE0\n" - ".LSCIE0:\n" - "\t.long 0xffffffff\n" - "\t.byte 0x1\n" - "\t.string \"\"\n" - "\t.uleb128 0x1\n" - "\t.sleb128 -" SZPTR "\n" - "\t.byte " REG_RA "\n" - "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n" - "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n" - "\t.align " SZPTR "\n" - ".LECIE0:\n\n"); - fprintf(ctx->fp, - ".LSFDE0:\n" - "\t.long .LEFDE0-.LASFDE0\n" - ".LASFDE0:\n" - "\t.long .Lframe0\n" -#if LJ_64 - "\t.quad .Lbegin\n" - "\t.quad %d\n" - "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ - "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ - "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ - "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */ - "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */ -#if LJ_NO_UNWIND - "\t.byte 0x8d\n\t.uleb128 0x6\n" /* offset r13 */ - "\t.byte 0x8c\n\t.uleb128 0x7\n" /* offset r12 */ -#endif -#else - "\t.long .Lbegin\n" - "\t.long %d\n" - "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ - "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */ - "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */ - "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */ - "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */ -#endif - "\t.align " SZPTR "\n" - ".LEFDE0:\n\n", fcofs, CFRAME_SIZE); -#if LJ_HASFFI - fprintf(ctx->fp, - ".LSFDE1:\n" - "\t.long .LEFDE1-.LASFDE1\n" - ".LASFDE1:\n" - "\t.long .Lframe0\n" -#if LJ_64 - "\t.quad lj_vm_ffi_call\n" - "\t.quad %d\n" - "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */ - "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ - "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */ - "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ -#else - "\t.long lj_vm_ffi_call\n" - "\t.long %d\n" - "\t.byte 0xe\n\t.uleb128 8\n" /* def_cfa_offset */ - "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */ - "\t.byte 0xd\n\t.uleb128 0x5\n" /* def_cfa_register ebp */ - "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset ebx */ -#endif - "\t.align " SZPTR "\n" - ".LEFDE1:\n\n", (int)ctx->codesz - fcofs); -#endif -#if !LJ_NO_UNWIND -#if (defined(__sun__) && defined(__svr4__)) -#if LJ_64 - fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@unwind\n"); -#else - fprintf(ctx->fp, "\t.section .eh_frame,\"aw\",@progbits\n"); -#endif -#else - fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n"); -#endif - fprintf(ctx->fp, - ".Lframe1:\n" - "\t.long .LECIE1-.LSCIE1\n" - ".LSCIE1:\n" - "\t.long 0\n" - "\t.byte 0x1\n" - "\t.string \"zPR\"\n" - "\t.uleb128 0x1\n" - "\t.sleb128 -" SZPTR "\n" - "\t.byte " REG_RA "\n" - "\t.uleb128 6\n" /* augmentation length */ - "\t.byte 0x1b\n" /* pcrel|sdata4 */ - "\t.long lj_err_unwind_dwarf-.\n" - "\t.byte 0x1b\n" /* pcrel|sdata4 */ - "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n" - "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n" - "\t.align " SZPTR "\n" - ".LECIE1:\n\n"); - fprintf(ctx->fp, - ".LSFDE2:\n" - "\t.long .LEFDE2-.LASFDE2\n" - ".LASFDE2:\n" - "\t.long .LASFDE2-.Lframe1\n" - "\t.long .Lbegin-.\n" - "\t.long %d\n" - "\t.uleb128 0\n" /* augmentation length */ - "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ -#if LJ_64 - "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ - "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ - "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */ - "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */ -#else - "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */ - "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */ - "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */ - "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */ -#endif - "\t.align " SZPTR "\n" - ".LEFDE2:\n\n", fcofs, CFRAME_SIZE); -#if LJ_HASFFI - fprintf(ctx->fp, - ".Lframe2:\n" - "\t.long .LECIE2-.LSCIE2\n" - ".LSCIE2:\n" - "\t.long 0\n" - "\t.byte 0x1\n" - "\t.string \"zR\"\n" - "\t.uleb128 0x1\n" - "\t.sleb128 -" SZPTR "\n" - "\t.byte " REG_RA "\n" - "\t.uleb128 1\n" /* augmentation length */ - "\t.byte 0x1b\n" /* pcrel|sdata4 */ - "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n" - "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n" - "\t.align " SZPTR "\n" - ".LECIE2:\n\n"); - fprintf(ctx->fp, - ".LSFDE3:\n" - "\t.long .LEFDE3-.LASFDE3\n" - ".LASFDE3:\n" - "\t.long .LASFDE3-.Lframe2\n" - "\t.long lj_vm_ffi_call-.\n" - "\t.long %d\n" - "\t.uleb128 0\n" /* augmentation length */ -#if LJ_64 - "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */ - "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ - "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */ - "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ -#else - "\t.byte 0xe\n\t.uleb128 8\n" /* def_cfa_offset */ - "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */ - "\t.byte 0xd\n\t.uleb128 0x5\n" /* def_cfa_register ebp */ - "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset ebx */ -#endif - "\t.align " SZPTR "\n" - ".LEFDE3:\n\n", (int)ctx->codesz - fcofs); -#endif -#endif - break; -#if !LJ_NO_UNWIND - /* Mental note: never let Apple design an assembler. - ** Or a linker. Or a plastic case. But I digress. - */ - case BUILD_machasm: { -#if LJ_HASFFI - int fcsize = 0; -#endif - int i; - fprintf(ctx->fp, "\t.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support\n"); - fprintf(ctx->fp, - "EH_frame1:\n" - "\t.set L$set$x,LECIEX-LSCIEX\n" - "\t.long L$set$x\n" - "LSCIEX:\n" - "\t.long 0\n" - "\t.byte 0x1\n" - "\t.ascii \"zPR\\0\"\n" - "\t.byte 0x1\n" - "\t.byte 128-" SZPTR "\n" - "\t.byte " REG_RA "\n" - "\t.byte 6\n" /* augmentation length */ - "\t.byte 0x9b\n" /* indirect|pcrel|sdata4 */ -#if LJ_64 - "\t.long _lj_err_unwind_dwarf+4@GOTPCREL\n" - "\t.byte 0x1b\n" /* pcrel|sdata4 */ - "\t.byte 0xc\n\t.byte " REG_SP "\n\t.byte " SZPTR "\n" -#else - "\t.long L_lj_err_unwind_dwarf$non_lazy_ptr-.\n" - "\t.byte 0x1b\n" /* pcrel|sdata4 */ - "\t.byte 0xc\n\t.byte 0x5\n\t.byte 0x4\n" /* esp=5 on 32 bit MACH-O. */ -#endif - "\t.byte 0x80+" REG_RA "\n\t.byte 0x1\n" - "\t.align " BSZPTR "\n" - "LECIEX:\n\n"); - for (i = 0; i < ctx->nsym; i++) { - const char *name = ctx->sym[i].name; - int32_t size = ctx->sym[i+1].ofs - ctx->sym[i].ofs; - if (size == 0) continue; -#if LJ_HASFFI - if (!strcmp(name, "_lj_vm_ffi_call")) { fcsize = size; continue; } -#endif - fprintf(ctx->fp, - "%s.eh:\n" - "LSFDE%d:\n" - "\t.set L$set$%d,LEFDE%d-LASFDE%d\n" - "\t.long L$set$%d\n" - "LASFDE%d:\n" - "\t.long LASFDE%d-EH_frame1\n" - "\t.long %s-.\n" - "\t.long %d\n" - "\t.byte 0\n" /* augmentation length */ - "\t.byte 0xe\n\t.byte %d\n" /* def_cfa_offset */ -#if LJ_64 - "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */ - "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */ - "\t.byte 0x8f\n\t.byte 0x4\n" /* offset r15 */ - "\t.byte 0x8e\n\t.byte 0x5\n" /* offset r14 */ -#else - "\t.byte 0x84\n\t.byte 0x2\n" /* offset ebp (4 for MACH-O)*/ - "\t.byte 0x87\n\t.byte 0x3\n" /* offset edi */ - "\t.byte 0x86\n\t.byte 0x4\n" /* offset esi */ - "\t.byte 0x83\n\t.byte 0x5\n" /* offset ebx */ -#endif - "\t.align " BSZPTR "\n" - "LEFDE%d:\n\n", - name, i, i, i, i, i, i, i, name, size, CFRAME_SIZE, i); - } -#if LJ_HASFFI - if (fcsize) { - fprintf(ctx->fp, - "EH_frame2:\n" - "\t.set L$set$y,LECIEY-LSCIEY\n" - "\t.long L$set$y\n" - "LSCIEY:\n" - "\t.long 0\n" - "\t.byte 0x1\n" - "\t.ascii \"zR\\0\"\n" - "\t.byte 0x1\n" - "\t.byte 128-" SZPTR "\n" - "\t.byte " REG_RA "\n" - "\t.byte 1\n" /* augmentation length */ -#if LJ_64 - "\t.byte 0x1b\n" /* pcrel|sdata4 */ - "\t.byte 0xc\n\t.byte " REG_SP "\n\t.byte " SZPTR "\n" -#else - "\t.byte 0x1b\n" /* pcrel|sdata4 */ - "\t.byte 0xc\n\t.byte 0x5\n\t.byte 0x4\n" /* esp=5 on 32 bit MACH. */ -#endif - "\t.byte 0x80+" REG_RA "\n\t.byte 0x1\n" - "\t.align " BSZPTR "\n" - "LECIEY:\n\n"); - fprintf(ctx->fp, - "_lj_vm_ffi_call.eh:\n" - "LSFDEY:\n" - "\t.set L$set$yy,LEFDEY-LASFDEY\n" - "\t.long L$set$yy\n" - "LASFDEY:\n" - "\t.long LASFDEY-EH_frame2\n" - "\t.long _lj_vm_ffi_call-.\n" - "\t.long %d\n" - "\t.byte 0\n" /* augmentation length */ -#if LJ_64 - "\t.byte 0xe\n\t.byte 16\n" /* def_cfa_offset */ - "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */ - "\t.byte 0xd\n\t.byte 0x6\n" /* def_cfa_register rbp */ - "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */ -#else - "\t.byte 0xe\n\t.byte 8\n" /* def_cfa_offset */ - "\t.byte 0x84\n\t.byte 0x2\n" /* offset ebp (4 for MACH-O)*/ - "\t.byte 0xd\n\t.byte 0x4\n" /* def_cfa_register ebp */ - "\t.byte 0x83\n\t.byte 0x3\n" /* offset ebx */ -#endif - "\t.align " BSZPTR "\n" - "LEFDEY:\n\n", fcsize); - } -#endif -#if !LJ_64 - fprintf(ctx->fp, - "\t.non_lazy_symbol_pointer\n" - "L_lj_err_unwind_dwarf$non_lazy_ptr:\n" - ".indirect_symbol _lj_err_unwind_dwarf\n" - ".long 0\n\n"); - fprintf(ctx->fp, "\t.section __IMPORT,__jump_table,symbol_stubs,pure_instructions+self_modifying_code,5\n"); - { - const char *const *xn; - for (xn = ctx->extnames; *xn; xn++) - if (strncmp(*xn, LABEL_PREFIX, sizeof(LABEL_PREFIX)-1)) - fprintf(ctx->fp, "L_%s$stub:\n\t.indirect_symbol _%s\n\t.ascii \"\\364\\364\\364\\364\\364\"\n", *xn, *xn); - } -#endif - fprintf(ctx->fp, ".subsections_via_symbols\n"); - } - break; -#endif - default: /* Difficult for other modes. */ - break; - } -} - diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/xb1build.bat b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/xb1build.bat deleted file mode 100755 index 847e84a5557..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/xb1build.bat +++ /dev/null @@ -1,101 +0,0 @@ -@rem Script to build LuaJIT with the Xbox One SDK. -@rem Donated to the public domain. -@rem -@rem Open a "Visual Studio .NET Command Prompt" (64 bit host compiler) -@rem Then cd to this directory and run this script. - -@if not defined INCLUDE goto :FAIL -@if not defined DurangoXDK goto :FAIL - -@setlocal -@echo ---- Host compiler ---- -@set LJCOMPILE=cl /nologo /c /MD /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE /DLUAJIT_ENABLE_GC64 -@set LJLINK=link /nologo -@set LJMT=mt /nologo -@set DASMDIR=..\dynasm -@set DASM=%DASMDIR%\dynasm.lua -@set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c - -%LJCOMPILE% host\minilua.c -@if errorlevel 1 goto :BAD -%LJLINK% /out:minilua.exe minilua.obj -@if errorlevel 1 goto :BAD -if exist minilua.exe.manifest^ - %LJMT% -manifest minilua.exe.manifest -outputresource:minilua.exe - -@rem Error out for 64 bit host compiler -@minilua -@if not errorlevel 8 goto :FAIL - -@set DASMFLAGS=-D WIN -D FFI -D P64 -minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h vm_x64.dasc -@if errorlevel 1 goto :BAD - -%LJCOMPILE% /I "." /I %DASMDIR% /D_DURANGO host\buildvm*.c -@if errorlevel 1 goto :BAD -%LJLINK% /out:buildvm.exe buildvm*.obj -@if errorlevel 1 goto :BAD -if exist buildvm.exe.manifest^ - %LJMT% -manifest buildvm.exe.manifest -outputresource:buildvm.exe - -buildvm -m peobj -o lj_vm.obj -@if errorlevel 1 goto :BAD -buildvm -m bcdef -o lj_bcdef.h %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m ffdef -o lj_ffdef.h %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m libdef -o lj_libdef.h %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m recdef -o lj_recdef.h %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m vmdef -o jit\vmdef.lua %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m folddef -o lj_folddef.h lj_opt_fold.c -@if errorlevel 1 goto :BAD - -@echo ---- Cross compiler ---- - -@set CWD=%cd% -@call "%DurangoXDK%\xdk\DurangoVars.cmd" XDK -@cd /D "%CWD%" -@shift - -@set LJCOMPILE="cl" /nologo /c /W3 /GF /Gm- /GR- /GS- /Gy /openmp- /D_CRT_SECURE_NO_DEPRECATE /D_LIB /D_UNICODE /D_DURANGO -@set LJLIB="lib" /nologo - -@if "%1"=="debug" ( - @shift - @set LJCOMPILE=%LJCOMPILE% /Zi /MDd /Od - @set LJLINK=%LJLINK% /debug -) else ( - @set LJCOMPILE=%LJCOMPILE% /MD /O2 /DNDEBUG -) - -@if "%1"=="amalg" goto :AMALG -%LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c -@if errorlevel 1 goto :BAD -%LJLIB% /OUT:luajit.lib lj_*.obj lib_*.obj -@if errorlevel 1 goto :BAD -@goto :NOAMALG -:AMALG -%LJCOMPILE% /DLUA_BUILD_AS_DLL ljamalg.c -@if errorlevel 1 goto :BAD -%LJLIB% /OUT:luajit.lib ljamalg.obj lj_vm.obj -@if errorlevel 1 goto :BAD -:NOAMALG - -@del *.obj *.manifest minilua.exe buildvm.exe -@echo. -@echo === Successfully built LuaJIT for Xbox One === - -@goto :END -:BAD -@echo. -@echo ******************************************************* -@echo *** Build FAILED -- Please check the error messages *** -@echo ******************************************************* -@goto :END -:FAIL -@echo To run this script you must open a "Visual Studio .NET Command Prompt" -@echo (64 bit host compiler). The Xbox One SDK must be installed, too. -:END diff --git a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/xedkbuild.bat b/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/xedkbuild.bat deleted file mode 100755 index 240ec878daf..00000000000 --- a/Build/source/libs/luajit/LuaJIT-2.1.0-beta1/src/xedkbuild.bat +++ /dev/null @@ -1,92 +0,0 @@ -@rem Script to build LuaJIT with the Xbox 360 SDK. -@rem Donated to the public domain. -@rem -@rem Open a "Visual Studio .NET Command Prompt" (32 bit host compiler) -@rem Then cd to this directory and run this script. - -@if not defined INCLUDE goto :FAIL -@if not defined XEDK goto :FAIL - -@setlocal -@rem ---- Host compiler ---- -@set LJCOMPILE=cl /nologo /c /MD /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE -@set LJLINK=link /nologo -@set LJMT=mt /nologo -@set DASMDIR=..\dynasm -@set DASM=%DASMDIR%\dynasm.lua -@set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c - -%LJCOMPILE% host\minilua.c -@if errorlevel 1 goto :BAD -%LJLINK% /out:minilua.exe minilua.obj -@if errorlevel 1 goto :BAD -if exist minilua.exe.manifest^ - %LJMT% -manifest minilua.exe.manifest -outputresource:minilua.exe - -@rem Error out for 64 bit host compiler -@minilua -@if errorlevel 8 goto :FAIL - -@set DASMFLAGS=-D GPR64 -D FRAME32 -D PPE -D SQRT -D DUALNUM -minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h vm_ppc.dasc -@if errorlevel 1 goto :BAD - -%LJCOMPILE% /I "." /I %DASMDIR% /D_XBOX_VER=200 /DLUAJIT_TARGET=LUAJIT_ARCH_PPC host\buildvm*.c -@if errorlevel 1 goto :BAD -%LJLINK% /out:buildvm.exe buildvm*.obj -@if errorlevel 1 goto :BAD -if exist buildvm.exe.manifest^ - %LJMT% -manifest buildvm.exe.manifest -outputresource:buildvm.exe - -buildvm -m peobj -o lj_vm.obj -@if errorlevel 1 goto :BAD -buildvm -m bcdef -o lj_bcdef.h %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m ffdef -o lj_ffdef.h %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m libdef -o lj_libdef.h %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m recdef -o lj_recdef.h %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m vmdef -o jit\vmdef.lua %ALL_LIB% -@if errorlevel 1 goto :BAD -buildvm -m folddef -o lj_folddef.h lj_opt_fold.c -@if errorlevel 1 goto :BAD - -@rem ---- Cross compiler ---- -@set LJCOMPILE="%XEDK%\bin\win32\cl" /nologo /c /MT /O2 /W3 /GF /Gm- /GR- /GS- /Gy /openmp- /D_CRT_SECURE_NO_DEPRECATE /DNDEBUG /D_XBOX /D_LIB /DLUAJIT_USE_SYSMALLOC -@set LJLIB="%XEDK%\bin\win32\lib" /nologo -@set "INCLUDE=%XEDK%\include\xbox" - -@if "%1" neq "debug" goto :NODEBUG -@shift -@set "LJCOMPILE=%LJCOMPILE% /Zi" -:NODEBUG -@if "%1"=="amalg" goto :AMALG -%LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c -@if errorlevel 1 goto :BAD -%LJLIB% /OUT:luajit20.lib lj_*.obj lib_*.obj -@if errorlevel 1 goto :BAD -@goto :NOAMALG -:AMALG -%LJCOMPILE% /DLUA_BUILD_AS_DLL ljamalg.c -@if errorlevel 1 goto :BAD -%LJLIB% /OUT:luajit20.lib ljamalg.obj lj_vm.obj -@if errorlevel 1 goto :BAD -:NOAMALG - -@del *.obj *.manifest minilua.exe buildvm.exe -@echo. -@echo === Successfully built LuaJIT for Xbox 360 === - -@goto :END -:BAD -@echo. -@echo ******************************************************* -@echo *** Build FAILED -- Please check the error messages *** -@echo ******************************************************* -@goto :END -:FAIL -@echo To run this script you must open a "Visual Studio .NET Command Prompt" -@echo (32 bit host compiler). The Xbox 360 SDK must be installed, too. -:END diff --git a/Build/source/libs/luajit/LuaJIT-PATCHES/ChangeLog b/Build/source/libs/luajit/LuaJIT-PATCHES/ChangeLog new file mode 100644 index 00000000000..0306037354a --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-PATCHES/ChangeLog @@ -0,0 +1,61 @@ +2015-09-27 Peter Breitenlohner + + * patch-07-x86_64-cygwin (new): Backport from upstream git. + +2015-09-07 Peter Breitenlohner + + Import LuaJIT-2.1.0-beta1. + + * patch-01-LuaJITTeX: Adapted. + * patch-05-LuaJITTeX: Adapted. + * patch-06-ppc-darwin: Adapted. + +2015-06-12 Peter Breitenlohner + + Import LuaJIT-2.0.4. + + * patch-01-LuaJITTeX: Adapted. + * patch-05-LuaJITTeX: Adapted. + * patch-06-ppc-darwin: Adapted. + +2015-01-19 Peter Breitenlohner + + * patch-06-ppc-darwin (new): Build fix for PPC OS X. + From http://permalink.gmane.org/gmane.comp.lang.lua.luajit/5936. + +2014-06-01 Peter Breitenlohner + + * patch-05-LuaJITTeX (lj_str_new): Replace '6' by LUAI_HASHLIMIT + and use '#define LUAI_HASHLIMIT 5' as default as in + ../../libs/lua52/lua-5.2.3/src/lstring.c. + +2014-03-14 Peter Breitenlohner + + Import LuaJIT-2.0.3. + + * patch-01-LuaJITTeX: Adapted. + * patch-05-LuaJITTeX (new): More changes from Luigi and Taco. + + * patch-02-unwind, patch-03-OSX-build, patch-04-NetBSD64-build + (removed): Now in distribution. + +2014-02-23 Peter Breitenlohner + + * patch-04-NetBSD64-build (new): Fix NetBSD/amd64 build from + Marc Baudoin . + +2014-02-07 Peter Breitenlohner + + * patch-03-OSX-build (new): Backport from upstream git. + Fix OSX build issue by Mike Pall. + +2014-02-06 Peter Breitenlohner + + * patch-02-unwind (new): Bug fix. + +2014-02-05 Peter Breitenlohner + + Import LuaJIT-2.0.2. + + * patch-01-LuaJITTeX (new): Adapt for use in LuaJITTeX. + diff --git a/Build/source/libs/luajit/LuaJIT-PATCHES/patch-01-LuaJITTeX b/Build/source/libs/luajit/LuaJIT-PATCHES/patch-01-LuaJITTeX new file mode 100644 index 00000000000..4853f00cc52 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-PATCHES/patch-01-LuaJITTeX @@ -0,0 +1,175 @@ +diff -ur LuaJIT-2.1.0-beta1.orig/src/lauxlib.h LuaJIT-2.1.0-beta1/src/lauxlib.h +--- LuaJIT-2.1.0-beta1.orig/src/lauxlib.h 2015-08-25 23:35:00.000000000 +0200 ++++ LuaJIT-2.1.0-beta1/src/lauxlib.h 2015-09-04 08:42:39.000000000 +0200 +@@ -86,6 +86,32 @@ + int level); + + ++ ++/* ++** {====================================================== ++** File handles for IO library ++** ======================================================= ++*/ ++ ++/* ++** A file handle is a userdata with metatable 'LUA_FILEHANDLE' and ++** initial structure 'luaL_Stream' (it may contain other fields ++** after that initial structure). ++*/ ++ ++#define LUA_FILEHANDLE "FILE*" ++ ++ ++typedef struct luaL_Stream { ++ FILE *f; /* stream (NULL for incompletely created streams) */ ++ lua_CFunction closef; /* to close stream (NULL for closed streams) */ ++} luaL_Stream; ++ ++/* }====================================================== */ ++ ++ ++ ++ + /* + ** =============================================================== + ** some useful macros +diff -ur LuaJIT-2.1.0-beta1.orig/src/lib_init.c LuaJIT-2.1.0-beta1/src/lib_init.c +--- LuaJIT-2.1.0-beta1.orig/src/lib_init.c 2015-08-25 23:35:00.000000000 +0200 ++++ LuaJIT-2.1.0-beta1/src/lib_init.c 2015-09-04 08:42:39.000000000 +0200 +@@ -26,6 +26,7 @@ + { LUA_DBLIBNAME, luaopen_debug }, + { LUA_BITLIBNAME, luaopen_bit }, + { LUA_JITLIBNAME, luaopen_jit }, ++ { LUA_BITLIBNAME_32, luaopen_bit32 }, + { NULL, NULL } + }; + +diff -ur LuaJIT-2.1.0-beta1.orig/src/lib_package.c LuaJIT-2.1.0-beta1/src/lib_package.c +--- LuaJIT-2.1.0-beta1.orig/src/lib_package.c 2015-08-25 23:35:00.000000000 +0200 ++++ LuaJIT-2.1.0-beta1/src/lib_package.c 2015-09-04 08:42:39.000000000 +0200 +@@ -362,6 +362,29 @@ + return 1; /* library loaded successfully */ + } + ++#define LUA_POF "luaopen_" ++#define LUA_OFSEP "_" ++#define POF LUA_POF ++ ++static const char *mkfuncname (lua_State *L, const char *modname) { ++ const char *funcname; ++ const char *mark = strchr(modname, *LUA_IGMARK); ++ if (mark) modname = mark + 1; ++ funcname = luaL_gsub(L, modname, ".", LUA_OFSEP); ++ funcname = lua_pushfstring(L, POF"%s", funcname); ++ lua_remove(L, -2); /* remove 'gsub' result */ ++ return funcname; ++} ++ ++ ++int loader_C_luatex (lua_State *L, const char *name, const char *filename) { ++ const char *funcname; ++ funcname = mkfuncname(L, name); ++ if (ll_loadfunc(L, filename, funcname,0) != 0) ++ loaderror(L, filename); ++ return 1; /* library loaded successfully */ ++} ++ + static int lj_cf_package_loader_croot(lua_State *L) + { + const char *filename; +@@ -381,6 +404,21 @@ + return 1; + } + ++int loader_Call_luatex (lua_State *L, const char *name, const char *filename) { ++ const char *funcname; ++ int stat; ++ if (filename == NULL) return 1; /* root not found */ ++ funcname = mkfuncname(L, name); ++ if ((stat = ll_loadfunc(L, filename, funcname,0)) != 0) { ++ if (stat != PACKAGE_ERR_FUNC) loaderror(L, filename); /* real error */ ++ lua_pushfstring(L, "\n\tno module " LUA_QS " in file " LUA_QS, ++ name, filename); ++ return 1; /* function not found */ ++ } ++ return 1; /* library loaded successfully */ ++} ++ ++ + static int lj_cf_package_loader_preload(lua_State *L) + { + const char *name = luaL_checkstring(L, 1); +diff -ur LuaJIT-2.1.0-beta1.orig/src/lua.h LuaJIT-2.1.0-beta1/src/lua.h +--- LuaJIT-2.1.0-beta1.orig/src/lua.h 2015-08-25 23:35:00.000000000 +0200 ++++ LuaJIT-2.1.0-beta1/src/lua.h 2015-09-04 08:42:39.000000000 +0200 +@@ -348,6 +348,16 @@ + const char *chunkname, const char *mode); + + ++#define LUA_OPEQ 0 ++#define LUA_OPLT 1 ++#define LUA_OPLE 2 ++#define LUA_OK 0 ++ ++/* see http://comments.gmane.org/gmane.comp.programming.swig/18673 */ ++# define lua_rawlen lua_objlen ++ ++ ++ + struct lua_Debug { + int event; + const char *name; /* (n) */ +diff -ur LuaJIT-2.1.0-beta1.orig/src/lualib.h LuaJIT-2.1.0-beta1/src/lualib.h +--- LuaJIT-2.1.0-beta1.orig/src/lualib.h 2015-08-25 23:35:00.000000000 +0200 ++++ LuaJIT-2.1.0-beta1/src/lualib.h 2015-09-04 08:42:39.000000000 +0200 +@@ -22,6 +22,8 @@ + #define LUA_JITLIBNAME "jit" + #define LUA_FFILIBNAME "ffi" + ++#define LUA_BITLIBNAME_32 "bit32" ++ + LUALIB_API int luaopen_base(lua_State *L); + LUALIB_API int luaopen_math(lua_State *L); + LUALIB_API int luaopen_string(lua_State *L); +@@ -34,6 +36,8 @@ + LUALIB_API int luaopen_jit(lua_State *L); + LUALIB_API int luaopen_ffi(lua_State *L); + ++LUALIB_API int luaopen_bit32(lua_State *L); ++ + LUALIB_API void luaL_openlibs(lua_State *L); + + #ifndef lua_assert +diff -ur LuaJIT-2.1.0-beta1.orig/src/Makefile LuaJIT-2.1.0-beta1/src/Makefile +--- LuaJIT-2.1.0-beta1.orig/src/Makefile 2015-08-25 23:35:00.000000000 +0200 ++++ LuaJIT-2.1.0-beta1/src/Makefile 2015-09-04 08:42:39.000000000 +0200 +@@ -97,7 +97,7 @@ + # enabled by default. Some other features that *might* break some existing + # code (e.g. __pairs or os.execute() return values) can be enabled here. + # Note: this does not provide full compatibility with Lua 5.2 at this time. +-#XCFLAGS+= -DLUAJIT_ENABLE_LUA52COMPAT ++XCFLAGS+= -DLUAJIT_ENABLE_LUA52COMPAT + # + # Disable the JIT compiler, i.e. turn LuaJIT into a pure interpreter. + #XCFLAGS+= -DLUAJIT_DISABLE_JIT +@@ -456,7 +456,7 @@ + LJVM_BOUT= $(LJVM_S) + LJVM_MODE= elfasm + +-LJLIB_O= lib_base.o lib_math.o lib_bit.o lib_string.o lib_table.o \ ++LJLIB_O= lib_base.o lib_math.o lbitlib.o lib_bit.o lib_string.o lib_table.o \ + lib_io.o lib_os.o lib_package.o lib_debug.o lib_jit.o lib_ffi.o + LJLIB_C= $(LJLIB_O:.o=.c) + +diff -ur LuaJIT-2.1.0-beta1.orig/src/Makefile.dep LuaJIT-2.1.0-beta1/src/Makefile.dep +--- LuaJIT-2.1.0-beta1.orig/src/Makefile.dep 2015-08-25 23:35:00.000000000 +0200 ++++ LuaJIT-2.1.0-beta1/src/Makefile.dep 2015-09-04 08:46:10.000000000 +0200 +@@ -6,6 +6,7 @@ + lj_tab.h lj_meta.h lj_state.h lj_ctype.h lj_cconv.h lj_bc.h lj_ff.h \ + lj_ffdef.h lj_dispatch.h lj_jit.h lj_ir.h lj_char.h lj_strscan.h \ + lj_strfmt.h lj_lib.h lj_libdef.h ++lbitlib.o: lbitlib.c lua.h luaconf.h lauxlib.h lualib.h + lib_bit.o: lib_bit.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \ + lj_arch.h lj_err.h lj_errmsg.h lj_buf.h lj_gc.h lj_str.h lj_strscan.h \ + lj_strfmt.h lj_ctype.h lj_cdata.h lj_cconv.h lj_carith.h lj_ff.h \ diff --git a/Build/source/libs/luajit/LuaJIT-PATCHES/patch-05-LuaJITTeX b/Build/source/libs/luajit/LuaJIT-PATCHES/patch-05-LuaJITTeX new file mode 100644 index 00000000000..3a05a52d896 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-PATCHES/patch-05-LuaJITTeX @@ -0,0 +1,108 @@ +diff -ur LuaJIT-2.1.0-beta1.orig/src/lj_def.h LuaJIT-2.1.0-beta1/src/lj_def.h +--- LuaJIT-2.1.0-beta1.orig/src/lj_def.h 2015-08-25 23:35:00.000000000 +0200 ++++ LuaJIT-2.1.0-beta1/src/lj_def.h 2015-09-04 08:51:52.000000000 +0200 +@@ -66,7 +66,7 @@ + #define LJ_MAX_BCINS (1<<26) /* Max. # of bytecode instructions. */ + #define LJ_MAX_SLOTS 250 /* Max. # of slots in a Lua func. */ + #define LJ_MAX_LOCVAR 200 /* Max. # of local variables. */ +-#define LJ_MAX_UPVAL 60 /* Max. # of upvalues. */ ++#define LJ_MAX_UPVAL 249 /* Max. # of upvalues. */ + + #define LJ_MAX_IDXCHAIN 100 /* __index/__newindex chain limit. */ + #define LJ_STACK_EXTRA (5+2*LJ_FR2) /* Extra stack space (metamethods). */ +diff -ur LuaJIT-2.1.0-beta1.orig/src/lj_str.c LuaJIT-2.1.0-beta1/src/lj_str.c +--- LuaJIT-2.1.0-beta1.orig/src/lj_str.c 2015-08-25 23:35:00.000000000 +0200 ++++ LuaJIT-2.1.0-beta1/src/lj_str.c 2015-09-04 08:51:52.000000000 +0200 +@@ -118,6 +118,16 @@ + g->strhash = newhash; + } + ++/* ++** Lua will use at most ~(2^LUAI_HASHLIMIT) bytes from a string to ++** compute its hash ++*/ ++#if !defined(LUAI_HASHLIMIT) ++#define LUAI_HASHLIMIT 5 ++#endif ++ ++#define cast(t, exp) ((t)(exp)) ++int luajittex_choose_hash_function = 0 ; + /* Intern a string and return string object. */ + GCstr *lj_str_new(lua_State *L, const char *str, size_t lenx) + { +@@ -126,27 +136,44 @@ + GCobj *o; + MSize len = (MSize)lenx; + MSize a, b, h = len; ++ size_t step ; ++ size_t l1 ; + if (lenx >= LJ_MAX_STR) + lj_err_msg(L, LJ_ERR_STROV); + g = G(L); ++ ++ if (len==0) ++ return &g->strempty; ++ if (luajittex_choose_hash_function==0) { ++ /* Lua 5.1.5 hash function */ ++ /* for 5.2 max methods we also need to patch the vm eq */ ++ step = (len>>LUAI_HASHLIMIT)+1; /* if string is too long, don't hash all its chars */ ++ for (l1=len; l1>=step; l1-=step) /* compute hash */ ++ h = h ^ ((h<<5)+(h>>2)+cast(unsigned char, str[l1-1])); ++ } else { ++ /* LuaJIT 2.0.2 hash function */ + /* Compute string hash. Constants taken from lookup3 hash by Bob Jenkins. */ +- if (len >= 4) { /* Caveat: unaligned access! */ +- a = lj_getu32(str); +- h ^= lj_getu32(str+len-4); +- b = lj_getu32(str+(len>>1)-2); +- h ^= b; h -= lj_rol(b, 14); +- b += lj_getu32(str+(len>>2)-1); +- } else if (len > 0) { +- a = *(const uint8_t *)str; +- h ^= *(const uint8_t *)(str+len-1); +- b = *(const uint8_t *)(str+(len>>1)); +- h ^= b; h -= lj_rol(b, 14); +- } else { +- return &g->strempty; +- } +- a ^= h; a -= lj_rol(h, 11); +- b ^= a; b -= lj_rol(a, 25); +- h ^= b; h -= lj_rol(b, 16); ++ if (len >= 4) { /* Caveat: unaligned access! */ ++ a = lj_getu32(str); ++ h ^= lj_getu32(str+len-4); ++ b = lj_getu32(str+(len>>1)-2); ++ h ^= b; h -= lj_rol(b, 14); ++ b += lj_getu32(str+(len>>2)-1); ++ } else if (len > 0) { ++ a = *(const uint8_t *)str; ++ h ^= *(const uint8_t *)(str+len-1); ++ b = *(const uint8_t *)(str+(len>>1)); ++ h ^= b; h -= lj_rol(b, 14); ++ } else { ++ /* Already done, kept for reference */ ++ return &g->strempty; ++ } ++ a ^= h; a -= lj_rol(h, 11); ++ b ^= a; b -= lj_rol(a, 25); ++ h ^= b; h -= lj_rol(b, 16); ++ } ++ ++ + /* Check if the string has already been interned. */ + o = gcref(g->strhash[h & g->strmask]); + if (LJ_LIKELY((((uintptr_t)str+len-1) & (LJ_PAGESIZE-1)) <= LJ_PAGESIZE-4)) { +Only in LuaJIT-2.1.0-beta1/src: lj_str.c.orig +diff -ur LuaJIT-2.1.0-beta1.orig/src/lua.h LuaJIT-2.1.0-beta1/src/lua.h +--- LuaJIT-2.1.0-beta1.orig/src/lua.h 2015-09-04 08:42:39.000000000 +0200 ++++ LuaJIT-2.1.0-beta1/src/lua.h 2015-09-04 08:51:52.000000000 +0200 +@@ -103,6 +103,9 @@ + typedef LUA_INTEGER lua_Integer; + + ++/* communication with LuaJiTTeX */ ++LUA_API int luajittex_choose_hash_function; ++ + + /* + ** state manipulation diff --git a/Build/source/libs/luajit/LuaJIT-PATCHES/patch-06-ppc-darwin b/Build/source/libs/luajit/LuaJIT-PATCHES/patch-06-ppc-darwin new file mode 100644 index 00000000000..17f7e4a1c08 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-PATCHES/patch-06-ppc-darwin @@ -0,0 +1,24 @@ +diff -ur LuaJIT-2.1.0-beta1.orig/src/host/buildvm.c LuaJIT-2.1.0-beta1/src/host/buildvm.c +--- LuaJIT-2.1.0-beta1.orig/src/host/buildvm.c 2015-08-25 23:35:00.000000000 +0200 ++++ LuaJIT-2.1.0-beta1/src/host/buildvm.c 2015-09-04 09:03:00.000000000 +0200 +@@ -113,7 +113,7 @@ + name[0] = '@'; + else + *p = '\0'; +-#elif LJ_TARGET_PPC && !LJ_TARGET_CONSOLE ++#elif LJ_TARGET_PPC && !LJ_TARGET_OSX && !LJ_TARGET_CONSOLE + /* Keep @plt etc. */ + #else + *p = '\0'; +diff -ur LuaJIT-2.1.0-beta1.orig/src/lj_arch.h LuaJIT-2.1.0-beta1/src/lj_arch.h +--- LuaJIT-2.1.0-beta1.orig/src/lj_arch.h 2015-08-25 23:35:00.000000000 +0200 ++++ LuaJIT-2.1.0-beta1/src/lj_arch.h 2015-09-04 08:58:07.000000000 +0200 +@@ -324,7 +324,7 @@ + #if __GNUC__ < 4 + #error "Need at least GCC 4.0 or newer" + #endif +-#elif LJ_TARGET_ARM ++#elif LJ_TARGET_ARM || LJ_TARGET_PPC + #if (__GNUC__ < 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ < 2) + #error "Need at least GCC 4.2 or newer" + #endif diff --git a/Build/source/libs/luajit/LuaJIT-PATCHES/patch-07-x86_64-cygwin b/Build/source/libs/luajit/LuaJIT-PATCHES/patch-07-x86_64-cygwin new file mode 100644 index 00000000000..b3bdead9dd6 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-PATCHES/patch-07-x86_64-cygwin @@ -0,0 +1,59 @@ +diff -ur LuaJIT-2.1.0-beta1.orig/src/lj_alloc.c LuaJIT-2.1.0-beta1/src/lj_alloc.c +--- LuaJIT-2.1.0-beta1.orig/src/lj_alloc.c 2015-08-25 23:35:00.000000000 +0200 ++++ LuaJIT-2.1.0-beta1/src/lj_alloc.c 2015-09-27 19:05:37.000000000 +0200 +@@ -196,7 +196,7 @@ + return ptr; + } + +-#elif LJ_TARGET_OSX || LJ_TARGET_PS4 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__sun__) ++#elif LJ_TARGET_OSX || LJ_TARGET_PS4 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__sun__) || defined(__CYGWIN__) + + /* OSX and FreeBSD mmap() use a naive first-fit linear search. + ** That's perfect for us. Except that -pagezero_size must be set for OSX, +diff -ur LuaJIT-2.1.0-beta1.orig/src/lj_arch.h LuaJIT-2.1.0-beta1/src/lj_arch.h +--- LuaJIT-2.1.0-beta1.orig/src/lj_arch.h 2015-09-04 08:58:07.000000000 +0200 ++++ LuaJIT-2.1.0-beta1/src/lj_arch.h 2015-09-27 19:05:37.000000000 +0200 +@@ -155,7 +155,11 @@ + #define LJ_ARCH_NAME "x64" + #define LJ_ARCH_BITS 64 + #define LJ_ARCH_ENDIAN LUAJIT_LE +-#define LJ_ABI_WIN LJ_TARGET_WINDOWS ++#if LJ_TARGET_WINDOWS || __CYGWIN__ ++#define LJ_ABI_WIN 1 ++#else ++#define LJ_ABI_WIN 0 ++#endif + #define LJ_TARGET_X64 1 + #define LJ_TARGET_X86ORX64 1 + #define LJ_TARGET_EHRETREG 0 +diff -ur LuaJIT-2.1.0-beta1.orig/src/lj_err.c LuaJIT-2.1.0-beta1/src/lj_err.c +--- LuaJIT-2.1.0-beta1.orig/src/lj_err.c 2015-08-25 23:35:00.000000000 +0200 ++++ LuaJIT-2.1.0-beta1/src/lj_err.c 2015-09-27 19:05:37.000000000 +0200 +@@ -183,7 +183,7 @@ + + /* -- External frame unwinding -------------------------------------------- */ + +-#if defined(__GNUC__) && !LJ_NO_UNWIND && !LJ_TARGET_WINDOWS ++#if defined(__GNUC__) && !LJ_NO_UNWIND && !LJ_ABI_WIN + + /* + ** We have to use our own definitions instead of the mandatory (!) unwind.h, +@@ -349,7 +349,7 @@ + + #endif + +-#elif LJ_TARGET_X64 && LJ_TARGET_WINDOWS ++#elif LJ_TARGET_X64 && LJ_ABI_WIN + + /* + ** Someone in Redmond owes me several days of my life. A lot of this is +@@ -414,7 +414,9 @@ + if (cf2) { /* We catch it, so start unwinding the upper frames. */ + if (rec->ExceptionCode == LJ_MSVC_EXCODE || + rec->ExceptionCode == LJ_GCC_EXCODE) { ++#if LJ_TARGET_WINDOWS + __DestructExceptionObject(rec, 1); ++#endif + setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRCPP)); + } else if (!LJ_EXCODE_CHECK(rec->ExceptionCode)) { + /* Don't catch access violations etc. */ diff --git a/Build/source/libs/luajit/LuaJIT-src/COPYRIGHT b/Build/source/libs/luajit/LuaJIT-src/COPYRIGHT new file mode 100644 index 00000000000..1ef7df62446 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/COPYRIGHT @@ -0,0 +1,56 @@ +=============================================================================== +LuaJIT -- a Just-In-Time Compiler for Lua. http://luajit.org/ + +Copyright (C) 2005-2015 Mike Pall. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +[ MIT license: http://www.opensource.org/licenses/mit-license.php ] + +=============================================================================== +[ LuaJIT includes code from Lua 5.1/5.2, which has this license statement: ] + +Copyright (C) 1994-2012 Lua.org, PUC-Rio. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +=============================================================================== +[ LuaJIT includes code from dlmalloc, which has this license statement: ] + +This is a version (aka dlmalloc) of malloc/free/realloc written by +Doug Lea and released to the public domain, as explained at +http://creativecommons.org/licenses/publicdomain + +=============================================================================== diff --git a/Build/source/libs/luajit/LuaJIT-src/Makefile b/Build/source/libs/luajit/LuaJIT-src/Makefile new file mode 100644 index 00000000000..8ce773e3936 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/Makefile @@ -0,0 +1,159 @@ +############################################################################## +# LuaJIT top level Makefile for installation. Requires GNU Make. +# +# Please read doc/install.html before changing any variables! +# +# Suitable for POSIX platforms (Linux, *BSD, OSX etc.). +# Note: src/Makefile has many more configurable options. +# +# ##### This Makefile is NOT useful for Windows! ##### +# For MSVC, please follow the instructions given in src/msvcbuild.bat. +# For MinGW and Cygwin, cd to src and run make with the Makefile there. +# +# Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +############################################################################## + +MAJVER= 2 +MINVER= 1 +RELVER= 0 +PREREL= -beta1 +VERSION= $(MAJVER).$(MINVER).$(RELVER)$(PREREL) +ABIVER= 5.1 + +############################################################################## +# +# Change the installation path as needed. This automatically adjusts +# the paths in src/luaconf.h, too. Note: PREFIX must be an absolute path! +# +export PREFIX= /usr/local +export MULTILIB= lib +############################################################################## + +DPREFIX= $(DESTDIR)$(PREFIX) +INSTALL_BIN= $(DPREFIX)/bin +INSTALL_LIB= $(DPREFIX)/$(MULTILIB) +INSTALL_SHARE= $(DPREFIX)/share +INSTALL_INC= $(DPREFIX)/include/luajit-$(MAJVER).$(MINVER) + +INSTALL_LJLIBD= $(INSTALL_SHARE)/luajit-$(VERSION) +INSTALL_JITLIB= $(INSTALL_LJLIBD)/jit +INSTALL_LMODD= $(INSTALL_SHARE)/lua +INSTALL_LMOD= $(INSTALL_LMODD)/$(ABIVER) +INSTALL_CMODD= $(INSTALL_LIB)/lua +INSTALL_CMOD= $(INSTALL_CMODD)/$(ABIVER) +INSTALL_MAN= $(INSTALL_SHARE)/man/man1 +INSTALL_PKGCONFIG= $(INSTALL_LIB)/pkgconfig + +INSTALL_TNAME= luajit-$(VERSION) +INSTALL_TSYMNAME= luajit +INSTALL_ANAME= libluajit-$(ABIVER).a +INSTALL_SONAME= libluajit-$(ABIVER).so.$(MAJVER).$(MINVER).$(RELVER) +INSTALL_SOSHORT= libluajit-$(ABIVER).so +INSTALL_DYLIBNAME= libluajit-$(ABIVER).$(MAJVER).$(MINVER).$(RELVER).dylib +INSTALL_DYLIBSHORT1= libluajit-$(ABIVER).dylib +INSTALL_DYLIBSHORT2= libluajit-$(ABIVER).$(MAJVER).dylib +INSTALL_PCNAME= luajit.pc + +INSTALL_STATIC= $(INSTALL_LIB)/$(INSTALL_ANAME) +INSTALL_DYN= $(INSTALL_LIB)/$(INSTALL_SONAME) +INSTALL_SHORT1= $(INSTALL_LIB)/$(INSTALL_SOSHORT) +INSTALL_SHORT2= $(INSTALL_LIB)/$(INSTALL_SOSHORT) +INSTALL_T= $(INSTALL_BIN)/$(INSTALL_TNAME) +INSTALL_TSYM= $(INSTALL_BIN)/$(INSTALL_TSYMNAME) +INSTALL_PC= $(INSTALL_PKGCONFIG)/$(INSTALL_PCNAME) + +INSTALL_DIRS= $(INSTALL_BIN) $(INSTALL_LIB) $(INSTALL_INC) $(INSTALL_MAN) \ + $(INSTALL_PKGCONFIG) $(INSTALL_JITLIB) $(INSTALL_LMOD) $(INSTALL_CMOD) +UNINSTALL_DIRS= $(INSTALL_JITLIB) $(INSTALL_LJLIBD) $(INSTALL_INC) \ + $(INSTALL_LMOD) $(INSTALL_LMODD) $(INSTALL_CMOD) $(INSTALL_CMODD) + +RM= rm -f +MKDIR= mkdir -p +RMDIR= rmdir 2>/dev/null +SYMLINK= ln -sf +INSTALL_X= install -m 0755 +INSTALL_F= install -m 0644 +UNINSTALL= $(RM) +LDCONFIG= ldconfig -n +SED_PC= sed -e "s|^prefix=.*|prefix=$(PREFIX)|" \ + -e "s|^multilib=.*|multilib=$(MULTILIB)|" + +FILE_T= luajit +FILE_A= libluajit.a +FILE_SO= libluajit.so +FILE_MAN= luajit.1 +FILE_PC= luajit.pc +FILES_INC= lua.h lualib.h lauxlib.h luaconf.h lua.hpp luajit.h +FILES_JITLIB= bc.lua bcsave.lua dump.lua p.lua v.lua zone.lua \ + dis_x86.lua dis_x64.lua dis_arm.lua dis_ppc.lua \ + dis_mips.lua dis_mipsel.lua vmdef.lua + +ifeq (,$(findstring Windows,$(OS))) + ifeq (Darwin,$(shell uname -s)) + INSTALL_SONAME= $(INSTALL_DYLIBNAME) + INSTALL_SHORT1= $(INSTALL_LIB)/$(INSTALL_DYLIBSHORT1) + INSTALL_SHORT2= $(INSTALL_LIB)/$(INSTALL_DYLIBSHORT2) + LDCONFIG= : + endif +endif + +############################################################################## + +INSTALL_DEP= src/luajit + +default all $(INSTALL_DEP): + @echo "==== Building LuaJIT $(VERSION) ====" + $(MAKE) -C src + @echo "==== Successfully built LuaJIT $(VERSION) ====" + +install: $(INSTALL_DEP) + @echo "==== Installing LuaJIT $(VERSION) to $(PREFIX) ====" + $(MKDIR) $(INSTALL_DIRS) + cd src && $(INSTALL_X) $(FILE_T) $(INSTALL_T) + cd src && test -f $(FILE_A) && $(INSTALL_F) $(FILE_A) $(INSTALL_STATIC) || : + $(RM) $(INSTALL_DYN) $(INSTALL_SHORT1) $(INSTALL_SHORT2) + cd src && test -f $(FILE_SO) && \ + $(INSTALL_X) $(FILE_SO) $(INSTALL_DYN) && \ + $(LDCONFIG) $(INSTALL_LIB) && \ + $(SYMLINK) $(INSTALL_SONAME) $(INSTALL_SHORT1) && \ + $(SYMLINK) $(INSTALL_SONAME) $(INSTALL_SHORT2) || : + cd etc && $(INSTALL_F) $(FILE_MAN) $(INSTALL_MAN) + cd etc && $(SED_PC) $(FILE_PC) > $(FILE_PC).tmp && \ + $(INSTALL_F) $(FILE_PC).tmp $(INSTALL_PC) && \ + $(RM) $(FILE_PC).tmp + cd src && $(INSTALL_F) $(FILES_INC) $(INSTALL_INC) + cd src/jit && $(INSTALL_F) $(FILES_JITLIB) $(INSTALL_JITLIB) + @echo "==== Successfully installed LuaJIT $(VERSION) to $(PREFIX) ====" + @echo "" + @echo "Note: the development releases deliberately do NOT install a symlink for luajit" + @echo "You can do this now by running this command (with sudo):" + @echo "" + @echo " $(SYMLINK) $(INSTALL_TNAME) $(INSTALL_TSYM)" + @echo "" + + +uninstall: + @echo "==== Uninstalling LuaJIT $(VERSION) from $(PREFIX) ====" + $(UNINSTALL) $(INSTALL_T) $(INSTALL_STATIC) $(INSTALL_DYN) $(INSTALL_SHORT1) $(INSTALL_SHORT2) $(INSTALL_MAN)/$(FILE_MAN) $(INSTALL_PC) + for file in $(FILES_JITLIB); do \ + $(UNINSTALL) $(INSTALL_JITLIB)/$$file; \ + done + for file in $(FILES_INC); do \ + $(UNINSTALL) $(INSTALL_INC)/$$file; \ + done + $(LDCONFIG) $(INSTALL_LIB) + $(RMDIR) $(UNINSTALL_DIRS) || : + @echo "==== Successfully uninstalled LuaJIT $(VERSION) from $(PREFIX) ====" + +############################################################################## + +amalg: + @echo "Building LuaJIT $(VERSION)" + $(MAKE) -C src amalg + +clean: + $(MAKE) -C src clean + +.PHONY: all install amalg clean + +############################################################################## diff --git a/Build/source/libs/luajit/LuaJIT-src/README b/Build/source/libs/luajit/LuaJIT-src/README new file mode 100644 index 00000000000..951ddbf9c7f --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/README @@ -0,0 +1,16 @@ +README for LuaJIT 2.1.0-beta1 +----------------------------- + +LuaJIT is a Just-In-Time (JIT) compiler for the Lua programming language. + +Project Homepage: http://luajit.org/ + +LuaJIT is Copyright (C) 2005-2015 Mike Pall. +LuaJIT is free software, released under the MIT license. +See full Copyright Notice in the COPYRIGHT file or in luajit.h. + +Documentation for LuaJIT is available in HTML format. +Please point your favorite browser to: + + doc/luajit.html + diff --git a/Build/source/libs/luajit/LuaJIT-src/doc/bluequad-print.css b/Build/source/libs/luajit/LuaJIT-src/doc/bluequad-print.css new file mode 100644 index 00000000000..dc62ce5c4ca --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/doc/bluequad-print.css @@ -0,0 +1,166 @@ +/* Copyright (C) 2004-2015 Mike Pall. + * + * You are welcome to use the general ideas of this design for your own sites. + * But please do not steal the stylesheet, the layout or the color scheme. + */ +body { + font-family: serif; + font-size: 11pt; + margin: 0 3em; + padding: 0; + border: none; +} +a:link, a:visited, a:hover, a:active { + text-decoration: none; + background: transparent; + color: #0000ff; +} +h1, h2, h3 { + font-family: sans-serif; + font-weight: bold; + text-align: left; + margin: 0.5em 0; + padding: 0; +} +h1 { + font-size: 200%; +} +h2 { + font-size: 150%; +} +h3 { + font-size: 125%; +} +p { + margin: 0 0 0.5em 0; + padding: 0; +} +ul, ol { + margin: 0.5em 0; + padding: 0 0 0 2em; +} +ul { + list-style: outside square; +} +ol { + list-style: outside decimal; +} +li { + margin: 0; + padding: 0; +} +dl { + margin: 1em 0; + padding: 1em; + border: 1px solid black; +} +dt { + font-weight: bold; + margin: 0; + padding: 0; +} +dt sup { + float: right; + margin-left: 1em; +} +dd { + margin: 0.5em 0 0 2em; + padding: 0; +} +table { + table-layout: fixed; + width: 100%; + margin: 1em 0; + padding: 0; + border: 1px solid black; + border-spacing: 0; + border-collapse: collapse; +} +tr { + margin: 0; + padding: 0; + border: none; +} +td { + text-align: left; + margin: 0; + padding: 0.2em 0.5em; + border-top: 1px solid black; + border-bottom: 1px solid black; +} +tr.separate td { + border-top: double; +} +tt, pre, code, kbd, samp { + font-family: monospace; + font-size: 75%; +} +kbd { + font-weight: bolder; +} +blockquote, pre { + margin: 1em 2em; + padding: 0; +} +img { + border: none; + vertical-align: baseline; + margin: 0; + padding: 0; +} +img.left { + float: left; + margin: 0.5em 1em 0.5em 0; +} +img.right { + float: right; + margin: 0.5em 0 0.5em 1em; +} +.flush { + clear: both; + visibility: hidden; +} +.hide, .noprint, #nav { + display: none !important; +} +.pagebreak { + page-break-before: always; +} +#site { + text-align: right; + font-family: sans-serif; + font-weight: bold; + margin: 0 1em; + border-bottom: 1pt solid black; +} +#site a { + font-size: 1.2em; +} +#site a:link, #site a:visited { + text-decoration: none; + font-weight: bold; + background: transparent; + color: #ffffff; +} +#logo { + color: #ff8000; +} +#head { + clear: both; + margin: 0 1em; +} +#main { + line-height: 1.3; + text-align: justify; + margin: 1em; +} +#foot { + clear: both; + font-size: 80%; + text-align: center; + margin: 0 1.25em; + padding: 0.5em 0 0 0; + border-top: 1pt solid black; + page-break-before: avoid; + page-break-after: avoid; +} diff --git a/Build/source/libs/luajit/LuaJIT-src/doc/bluequad.css b/Build/source/libs/luajit/LuaJIT-src/doc/bluequad.css new file mode 100644 index 00000000000..6839e0a76b4 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/doc/bluequad.css @@ -0,0 +1,325 @@ +/* Copyright (C) 2004-2015 Mike Pall. + * + * You are welcome to use the general ideas of this design for your own sites. + * But please do not steal the stylesheet, the layout or the color scheme. + */ +/* colorscheme: + * + * site | head #4162bf/white | #6078bf/#e6ecff + * ------+------ ----------------+------------------- + * nav | main #bfcfff | #e6ecff/black + * + * nav: hiback loback #c5d5ff #b9c9f9 + * hiborder loborder #e6ecff #97a7d7 + * link hover #2142bf #ff0000 + * + * link: link visited hover #2142bf #8122bf #ff0000 + * + * main: boxback boxborder #f0f4ff #bfcfff + */ +body { + font-family: Verdana, Arial, Helvetica, sans-serif; + font-size: 10pt; + margin: 0; + padding: 0; + border: none; + background: #e0e0e0; + color: #000000; +} +a:link { + text-decoration: none; + background: transparent; + color: #2142bf; +} +a:visited { + text-decoration: none; + background: transparent; + color: #8122bf; +} +a:hover, a:active { + text-decoration: underline; + background: transparent; + color: #ff0000; +} +h1, h2, h3 { + font-weight: bold; + text-align: left; + margin: 0.5em 0; + padding: 0; + background: transparent; +} +h1 { + font-size: 200%; + line-height: 3em; /* really 6em relative to body, match #site span */ + margin: 0; +} +h2 { + font-size: 150%; + color: #606060; +} +h3 { + font-size: 125%; + color: #404040; +} +p { + max-width: 600px; + margin: 0 0 0.5em 0; + padding: 0; +} +b { + color: #404040; +} +ul, ol { + max-width: 600px; + margin: 0.5em 0; + padding: 0 0 0 2em; +} +ul { + list-style: outside square; +} +ol { + list-style: outside decimal; +} +li { + margin: 0; + padding: 0; +} +dl { + max-width: 600px; + margin: 1em 0; + padding: 1em; + border: 1px solid #bfcfff; + background: #f0f4ff; +} +dt { + font-weight: bold; + margin: 0; + padding: 0; +} +dt sup { + float: right; + margin-left: 1em; + color: #808080; +} +dt a:visited { + text-decoration: none; + color: #2142bf; +} +dt a:hover, dt a:active { + text-decoration: none; + color: #ff0000; +} +dd { + margin: 0.5em 0 0 2em; + padding: 0; +} +div.tablewrap { /* for IE *sigh* */ + max-width: 600px; +} +table { + table-layout: fixed; + border-spacing: 0; + border-collapse: collapse; + max-width: 600px; + width: 100%; + margin: 1em 0; + padding: 0; + border: 1px solid #bfcfff; +} +tr { + margin: 0; + padding: 0; + border: none; +} +tr.odd { + background: #f0f4ff; +} +tr.separate td { + border-top: 1px solid #bfcfff; +} +td { + text-align: left; + margin: 0; + padding: 0.2em 0.5em; + border: none; +} +tt, code, kbd, samp { + font-family: Courier New, Courier, monospace; + line-height: 1.2; + font-size: 110%; +} +kbd { + font-weight: bolder; +} +blockquote, pre { + max-width: 600px; + margin: 1em 2em; + padding: 0; +} +pre { + line-height: 1.1; +} +pre.code { + line-height: 1.4; + margin: 0.5em 0 1em 0.5em; + padding: 0.5em 1em; + border: 1px solid #bfcfff; + background: #f0f4ff; +} +pre.mark { + padding-left: 2em; +} +span.codemark { + position:absolute; + left: 16em; + color: #4040c0; +} +span.mark { + color: #4040c0; + font-family: Courier New, Courier, monospace; + line-height: 1.1; +} +img { + border: none; + vertical-align: baseline; + margin: 0; + padding: 0; +} +img.left { + float: left; + margin: 0.5em 1em 0.5em 0; +} +img.right { + float: right; + margin: 0.5em 0 0.5em 1em; +} +.indent { + padding-left: 1em; +} +.flush { + clear: both; + visibility: hidden; +} +.hide, .noscreen { + display: none !important; +} +.ext { + color: #ff8000; +} +.new { + font-size: 6pt; + vertical-align: middle; + background: #ff8000; + color: #ffffff; +} +#site { + clear: both; + float: left; + width: 13em; + text-align: center; + font-weight: bold; + margin: 0; + padding: 0; + background: transparent; + color: #ffffff; +} +#site a { + font-size: 200%; +} +#site a:link, #site a:visited { + text-decoration: none; + font-weight: bold; + background: transparent; + color: #ffffff; +} +#site span { + line-height: 3em; /* really 6em relative to body, match h1 */ +} +#logo { + color: #ffb380; +} +#head { + margin: 0; + padding: 0 0 0 2em; + border-left: solid 13em #4162bf; + border-right: solid 3em #6078bf; + background: #6078bf; + color: #e6ecff; +} +#nav { + clear: both; + float: left; + overflow: hidden; + text-align: left; + line-height: 1.5; + width: 13em; + padding-top: 1em; + background: transparent; +} +#nav ul { + list-style: none outside; + margin: 0; + padding: 0; +} +#nav li { + margin: 0; + padding: 0; +} +#nav a { + display: block; + text-decoration: none; + font-weight: bold; + margin: 0; + padding: 2px 1em; + border-top: 1px solid transparent; + border-bottom: 1px solid transparent; + background: transparent; + color: #2142bf; +} +#nav a:hover, #nav a:active { + text-decoration: none; + border-top: 1px solid #97a7d7; + border-bottom: 1px solid #e6ecff; + background: #b9c9f9; + color: #ff0000; +} +#nav a.current, #nav a.current:hover, #nav a.current:active { + border-top: 1px solid #e6ecff; + border-bottom: 1px solid #97a7d7; + background: #c5d5ff; + color: #2142bf; +} +#nav ul ul a { + padding: 0 1em 0 1.7em; +} +#nav ul ul ul a { + padding: 0 0.5em 0 2.4em; +} +#main { + line-height: 1.5; + text-align: left; + margin: 0; + padding: 1em 2em; + border-left: solid 13em #bfcfff; + border-right: solid 3em #e6ecff; + background: #e6ecff; +} +#foot { + clear: both; + font-size: 80%; + text-align: center; + margin: 0; + padding: 0.5em; + background: #6078bf; + color: #ffffff; +} +#foot a:link, #foot a:visited { + text-decoration: underline; + background: transparent; + color: #ffffff; +} +#foot a:hover, #foot a:active { + text-decoration: underline; + background: transparent; + color: #bfcfff; +} diff --git a/Build/source/libs/luajit/LuaJIT-src/doc/changes.html b/Build/source/libs/luajit/LuaJIT-src/doc/changes.html new file mode 100644 index 00000000000..dcf60b68ddd --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/doc/changes.html @@ -0,0 +1,804 @@ + + + +LuaJIT Change History + + + + + + + + + +
+Lua +
+ + +
+

+This is a list of changes between the released versions of LuaJIT.
+The current stable version is LuaJIT 2.0.4.
+

+

+Please check the +» Online Change History +to see whether newer versions are available. +

+ +
+

LuaJIT 2.1.0-beta1 — 2015-08-25

+

+This is a brief summary of the major changes in LuaJIT 2.1 compared to 2.0. +Please take a look at the commit history for more details. +

+
    +
  • Changes to the VM core: +
      +
    • Add low-overhead profiler (-jp).
    • +
    • Add LJ_GC64 mode: 64 bit GC object references (really: 47 bit). Interpreter-only for now.
    • +
    • Add LJ_FR2 mode: Two-slot frame info. Required by LJ_GC64 mode.
    • +
    • Add table.new() and table.clear().
    • +
    • Parse binary number literals (0bxxx).
    • +
  • +
  • Improvements to the JIT compiler: +
      +
    • Add trace stitching (disabled for now).
    • +
    • Compile various builtins: string.char(), string.reverse(), string.lower(), string.upper(), string.rep(), string.format(), table.concat(), bit.tohex(), getfenv(0), debug.getmetatable().
    • +
    • Compile string.find() for fixed string searches (no patterns).
    • +
    • Compile BC_TSETM, e.g. {1,2,3,f()}.
    • +
    • Compile string concatenations (BC_CAT).
    • +
    • Compile __concat metamethod.
    • +
    • Various minor optimizations.
    • +
  • +
  • Internal Changes: +
      +
    • Add support for embedding LuaJIT bytecode for builtins.
    • +
    • Replace various builtins with embedded bytecode.
    • +
    • Refactor string buffers and string formatting.
    • +
    • Remove obsolete non-truncating number to integer conversions.
    • +
  • +
  • Ports: +
      +
    • Add Xbox One port (LJ_GC64 mode).
    • +
    • ARM64: Add port of the interpreter (LJ_GC64 mode).
    • +
    • x64: Add separate port of the interpreter to LJ_GC64 mode.
    • +
    • x86/x64: Drop internal x87 math functions. Use libm functions.
    • +
    • x86: Remove x87 support from interpreter. SSE2 is mandatory now.
    • +
    • PPC/e500: Drop support for this architecture.
    • +
  • +
  • FFI library: +
      +
    • FFI: Add 64 bit bitwise operations.
    • +
    • FFI: Compile VLA/VLS and large cdata allocations with default initialization.
    • +
    • FFI: Compile conversions from functions to function pointers.
    • +
    • FFI: Compile lightuserdata to void * conversion.
    • +
    • FFI: Compile ffi.gc(cdata, nil), too.
    • +
    • FFI: Add ffi.typeinfo().
    • +
  • +
+
+ +
+

LuaJIT 2.0.4 — 2015-05-14

+
    +
  • Fix stack check in narrowing optimization.
  • +
  • Fix Lua/C API typecheck error for special indexes.
  • +
  • Fix string to number conversion.
  • +
  • Fix lexer error for chunks without tokens.
  • +
  • Don't compile IR_RETF after CALLT to ff with-side effects.
  • +
  • Fix BC_UCLO/BC_JMP join optimization in Lua parser.
  • +
  • Fix corner case in string to number conversion.
  • +
  • Gracefully handle lua_error() for a suspended coroutine.
  • +
  • Avoid error messages when building with Clang.
  • +
  • Fix snapshot #0 handling for traces with a stack check on entry.
  • +
  • Fix fused constant loads under high register pressure.
  • +
  • Invalidate backpropagation cache after DCE.
  • +
  • Fix ABC elimination.
  • +
  • Fix debug info for main chunk of stripped bytecode.
  • +
  • Fix FOLD rule for string.sub(s, ...) == k.
  • +
  • Fix FOLD rule for STRREF of SNEW.
  • +
  • Fix frame traversal while searching for error function.
  • +
  • Prevent GC estimate miscalculation due to buffer growth.
  • +
  • Prevent adding side traces for stack checks.
  • +
  • Fix top slot calculation for snapshots with continuations.
  • +
  • Fix check for reuse of SCEV results in FORL.
  • +
  • Add PS Vita port.
  • +
  • Fix compatibility issues with Illumos.
  • +
  • Fix DragonFly build (unsupported).
  • +
  • OpenBSD/x86: Better executable memory allocation for W^X mode.
  • +
  • x86: Fix argument checks for ipairs() iterator.
  • +
  • x86: lj_math_random_step() clobbers XMM regs on OSX Clang.
  • +
  • x86: Fix code generation for unused result of math.random().
  • +
  • x64: Allow building with LUAJIT_USE_SYSMALLOC and LUAJIT_USE_VALGRIND.
  • +
  • x86/x64: Fix argument check for bit shifts.
  • +
  • x86/x64: Fix code generation for fused test/arith ops.
  • +
  • ARM: Fix write barrier check in BC_USETS.
  • +
  • PPC: Fix red zone overflow in machine code generation.
  • +
  • PPC: Don't use mcrxr on PPE.
  • +
  • Various archs: Fix excess stack growth in interpreter.
  • +
  • FFI: Fix FOLD rule for TOBIT + CONV num.u32.
  • +
  • FFI: Prevent DSE across ffi.string().
  • +
  • FFI: No meta fallback when indexing pointer to incomplete struct.
  • +
  • FFI: Fix initialization of unions of subtypes.
  • +
  • FFI: Fix cdata vs. non-cdata arithmetic and comparisons.
  • +
  • FFI: Fix __index/__newindex metamethod resolution for ctypes.
  • +
  • FFI: Fix compilation of reference field access.
  • +
  • FFI: Fix frame traversal for backtraces with FFI callbacks.
  • +
  • FFI: Fix recording of indexing a struct pointer ctype object itself.
  • +
  • FFI: Allow non-scalar cdata to be compared for equality by address.
  • +
  • FFI: Fix pseudo type conversions for type punning.
  • +
+ +

LuaJIT 2.0.3 — 2014-03-12

+
    +
  • Add PS4 port.
  • +
  • Add support for multilib distro builds.
  • +
  • Fix OSX build.
  • +
  • Fix MinGW build.
  • +
  • Fix Xbox 360 build.
  • +
  • Improve ULOAD forwarding for open upvalues.
  • +
  • Fix GC steps threshold handling when called by JIT-compiled code.
  • +
  • Fix argument checks for math.deg() and math.rad().
  • +
  • Fix jit.flush(func|true).
  • +
  • Respect jit.off(func) when returning to a function, too.
  • +
  • Fix compilation of string.byte(s, nil, n).
  • +
  • Fix line number for relocated bytecode after closure fixup
  • +
  • Fix frame traversal for backtraces.
  • +
  • Fix ABC elimination.
  • +
  • Fix handling of redundant PHIs.
  • +
  • Fix snapshot restore for exit to function header.
  • +
  • Fix type punning alias analysis for constified pointers
  • +
  • Fix call unroll checks in the presence of metamethod frames.
  • +
  • Fix initial maxslot for down-recursive traces.
  • +
  • Prevent BASE register coalescing if parent uses IR_RETF.
  • +
  • Don't purge modified function from stack slots in BC_RET.
  • +
  • Fix recording of BC_VARG.
  • +
  • Don't access dangling reference to reallocated IR.
  • +
  • Fix frame depth display for bytecode dump in -jdump.
  • +
  • ARM: Fix register allocation when rematerializing FPRs.
  • +
  • x64: Fix store to upvalue for lightuserdata values.
  • +
  • FFI: Add missing GC steps for callback argument conversions.
  • +
  • FFI: Properly unload loaded DLLs.
  • +
  • FFI: Fix argument checks for ffi.string().
  • +
  • FFI/x64: Fix passing of vector arguments to calls.
  • +
  • FFI: Rehash finalizer table after GC cycle, if needed.
  • +
  • FFI: Fix cts->L for cdata unsinking in snapshot restore.
  • +
+ +

LuaJIT 2.0.2 — 2013-06-03

+
    +
  • Fix memory access check for fast string interning.
  • +
  • Fix MSVC intrinsics for older versions.
  • +
  • Add missing GC steps for io.* functions.
  • +
  • Fix spurious red zone overflows in machine code generation.
  • +
  • Fix jump-range constrained mcode allocation.
  • +
  • Inhibit DSE for implicit loads via calls.
  • +
  • Fix builtin string to number conversion for overflow digits.
  • +
  • Fix optional argument handling while recording builtins.
  • +
  • Fix optional argument handling in table.concat().
  • +
  • Add partial support for building with MingW64 GCC 4.8-SEH.
  • +
  • Add missing PHI barrier to string.sub(str, a, b) == kstr FOLD rule.
  • +
  • Fix compatibility issues with Illumos.
  • +
  • ARM: Fix cache flush/sync for exit stubs of JIT-compiled code.
  • +
  • MIPS: Fix cache flush/sync for JIT-compiled code jump area.
  • +
  • PPC: Add plt suffix for external calls from assembler code.
  • +
  • FFI: Fix snapshot substitution in SPLIT pass.
  • +
  • FFI/x86: Fix register allocation for 64 bit comparisons.
  • +
  • FFI: Fix tailcall in lowest frame to C function with bool result.
  • +
  • FFI: Ignore long type specifier in ffi.istype().
  • +
  • FFI: Fix calling conventions for 32 bit OSX and iOS simulator (struct returns).
  • +
  • FFI: Fix calling conventions for ARM hard-float EABI (nested structs).
  • +
  • FFI: Improve error messages for arithmetic and comparison operators.
  • +
  • FFI: Insert no-op type conversion for pointer to integer cast.
  • +
  • FFI: Fix unroll limit for ffi.fill().
  • +
  • FFI: Must sink XBAR together with XSTOREs.
  • +
  • FFI: Preserve intermediate string for const char * conversion.
  • +
+ +

LuaJIT 2.0.1 — 2013-02-19

+
    +
  • Don't clear frame for out-of-memory error.
  • +
  • Leave hook when resume catches error thrown from hook.
  • +
  • Add missing GC steps for template table creation.
  • +
  • Fix discharge order of comparisons in Lua parser.
  • +
  • Improve buffer handling for io.read().
  • +
  • OSX: Add support for Mach-O object files to -b option.
  • +
  • Fix PS3 port.
  • +
  • Fix/enable Xbox 360 port.
  • +
  • x86/x64: Always mark ref for shift count as non-weak.
  • +
  • x64: Don't fuse implicitly 32-to-64 extended operands.
  • +
  • ARM: Fix armhf call argument handling.
  • +
  • ARM: Fix code generation for integer math.min/math.max.
  • +
  • PPC/e500: Fix lj_vm_floor() for Inf/NaN.
  • +
  • FFI: Change priority of table initializer variants for structs.
  • +
  • FFI: Fix code generation for bool call result check on x86/x64.
  • +
  • FFI: Load FFI library on-demand for bytecode with cdata literals.
  • +
  • FFI: Fix handling of qualified transparent structs/unions.
  • +
+ +

LuaJIT 2.0.0 — 2012-11-08

+
    +
  • Correctness and completeness: +
      +
    • Fix Android/x86 build.
    • +
    • Fix recording of equality comparisons with __eq metamethods.
    • +
    • Fix detection of immutable upvalues.
    • +
    • Replace error with PANIC for callbacks from JIT-compiled code.
    • +
    • Fix builtin string to number conversion for INT_MIN.
    • +
    • Don't create unneeded array part for template tables.
    • +
    • Fix CONV.num.int sinking.
    • +
    • Don't propagate implicitly widened number to index metamethods.
    • +
    • ARM: Fix ordered comparisons of number vs. non-number.
    • +
    • FFI: Fix code generation for replay of sunk float fields.
    • +
    • FFI: Fix signedness of bool.
    • +
    • FFI: Fix recording of bool call result check on x86/x64.
    • +
    • FFI: Fix stack-adjustment for __thiscall callbacks.
    • +
  • +
+ +

LuaJIT 2.0.0-beta11 — 2012-10-16

+
    +
  • New features: +
      +
    • Use ARM VFP instructions, if available (build-time detection).
    • +
    • Add support for ARM hard-float EABI (armhf).
    • +
    • Add PS3 port.
    • +
    • Add many features from Lua 5.2, e.g. goto/labels. + Refer to this list.
    • +
    • FFI: Add parameterized C types.
    • +
    • FFI: Add support for copy constructors.
    • +
    • FFI: Equality comparisons never raise an error (treat as unequal instead).
    • +
    • FFI: Box all accessed or returned enums.
    • +
    • FFI: Check for __new metamethod when calling a constructor.
    • +
    • FFI: Handle __pairs/__ipairs metamethods for cdata objects.
    • +
    • FFI: Convert io.* file handle to FILE * pointer (but as a void *).
    • +
    • FFI: Detect and support type punning through unions.
    • +
    • FFI: Improve various error messages.
    • +
  • +
  • Build-system reorganization: +
      +
    • Reorganize directory layout:
      + lib/*src/jit/*
      + src/buildvm_*.dascsrc/vm_*.dasc
      + src/buildvm_*.h → removed
      + src/buildvm*src/host/*
    • +
    • Add minified Lua interpreter plus Lua BitOp (minilua) to run DynASM.
    • +
    • Change DynASM bit operations to use Lua BitOp
    • +
    • Translate only vm_*.dasc for detected target architecture.
    • +
    • Improve target detection for msvcbuild.bat.
    • +
    • Fix build issues on Cygwin and MinGW with optional MSys.
    • +
    • Handle cross-compiles with FPU/no-FPU or hard-fp/soft-fp ABI mismatch.
    • +
    • Remove some library functions for no-JIT/no-FFI builds.
    • +
    • Add uninstall target to top-level Makefile.
    • +
  • +
  • Correctness and completeness: +
      +
    • Preserve snapshot #0 PC for all traces.
    • +
    • Fix argument checks for coroutine.create().
    • +
    • Command line prints version and JIT status to stdout, not stderr.
    • +
    • Fix userdata __gc separations at Lua state close.
    • +
    • Fix TDUP to HLOAD forwarding for LJ_DUALNUM builds.
    • +
    • Fix buffer check in bytecode writer.
    • +
    • Make os.date() thread-safe.
    • +
    • Add missing declarations for MSVC intrinsics.
    • +
    • Fix dispatch table modifications for return hooks.
    • +
    • Workaround for MSVC conversion bug (doubleuint32_tint32_t).
    • +
    • Fix FOLD rule (i-j)-i => 0-j.
    • +
    • Never use DWARF unwinder on Windows.
    • +
    • Fix shrinking of direct mapped blocks in builtin allocator.
    • +
    • Limit recursion depth in string.match() et al.
    • +
    • Fix late despecialization of ITERN after loop has been entered.
    • +
    • Fix 'f' and 'L' options for debug.getinfo() and lua_getinfo().
    • +
    • Fix package.searchpath().
    • +
    • OSX: Change dylib names to be consistent with other platforms.
    • +
    • Android: Workaround for broken sprintf("%g", -0.0).
    • +
    • x86: Remove support for ancient CPUs without CMOV (before Pentium Pro).
    • +
    • x86: Fix register allocation for calls returning register pair.
    • +
    • x86/x64: Fix fusion of unsigned byte comparisons with swapped operands.
    • +
    • ARM: Fix tonumber() argument check.
    • +
    • ARM: Fix modulo operator and math.floor()/math.ceil() for inf/nan.
    • +
    • ARM: Invoke SPLIT pass for leftover IR_TOBIT.
    • +
    • ARM: Fix BASE register coalescing.
    • +
    • PPC: Fix interpreter state setup in callbacks.
    • +
    • PPC: Fix string.sub() range check.
    • +
    • MIPS: Support generation of MIPS/MIPSEL bytecode object files.
    • +
    • MIPS: Fix calls to floor()/ceil()/trunc().
    • +
    • ARM/PPC: Detect more target architecture variants.
    • +
    • ARM/PPC/e500/MIPS: Fix tailcalls from fast functions, esp. tostring().
    • +
    • ARM/PPC/MIPS: Fix rematerialization of FP constants.
    • +
    • FFI: Don't call FreeLibrary() on our own EXE/DLL.
    • +
    • FFI: Resolve metamethods for constructors, too.
    • +
    • FFI: Properly disable callbacks on iOS (would require executable memory).
    • +
    • FFI: Fix cdecl string parsing during recording.
    • +
    • FFI: Show address pointed to for tostring(ref), too.
    • +
    • FFI: Fix alignment of C call argument/return structure.
    • +
    • FFI: Initialize all fields of standard types.
    • +
    • FFI: Fix callback handling when new C types are declared in callback.
    • +
    • FFI: Fix recording of constructors for pointers.
    • +
    • FFI: Always resolve metamethods for pointers to structs.
    • +
    • FFI: Correctly propagate alignment when interning nested types.
    • +
  • +
  • Structural and performance enhancements: +
      +
    • Add allocation sinking and store sinking optimization.
    • +
    • Constify immutable upvalues.
    • +
    • Add builtin string to integer or FP number conversion. Improves cross-platform consistency and correctness.
    • +
    • Create string hash slots in template tables for non-const values, too. Avoids later table resizes.
    • +
    • Eliminate HREFK guard for template table references.
    • +
    • Add various new FOLD rules.
    • +
    • Don't use stack unwinding for lua_yield() (slow on x64).
    • +
    • ARM, PPC, MIPS: Improve XLOAD operand fusion and register hinting.
    • +
    • PPC, MIPS: Compile math.sqrt() to sqrt instruction, if available.
    • +
    • FFI: Fold KPTR + constant offset in SPLIT pass.
    • +
    • FFI: Optimize/inline ffi.copy() and ffi.fill().
    • +
    • FFI: Compile and optimize array/struct copies.
    • +
    • FFI: Compile ffi.typeof(cdata|ctype), ffi.sizeof(), ffi.alignof(), ffi.offsetof() and ffi.gc().
    • +
  • +
+ +

LuaJIT 2.0.0-beta10 — 2012-05-09

+
    +
  • New features: +
      +
    • The MIPS of LuaJIT is complete. It requires a CPU conforming to the +MIPS32 R1 architecture with hardware FPU. O32 hard-fp ABI, +little-endian or big-endian.
    • +
    • Auto-detect target arch via cross-compiler. No need for +TARGET=arch anymore.
    • +
    • Make DynASM compatible with Lua 5.2.
    • +
    • From Lua 5.2: Try __tostring metamethod on non-string error +messages..
    • +
  • +
  • Correctness and completeness: +
      +
    • Fix parsing of hex literals with exponents.
    • +
    • Fix bytecode dump for certain number constants.
    • +
    • Fix argument type in error message for relative arguments.
    • +
    • Fix argument error handling on Lua stacks without a frame.
    • +
    • Add missing mcode limit check in assembler backend.
    • +
    • Fix compilation on OpenBSD.
    • +
    • Avoid recursive GC steps after GC-triggered trace exit.
    • +
    • Replace <unwind.h> definitions with our own.
    • +
    • Fix OSX build issues. Bump minimum required OSX version to 10.4.
    • +
    • Fix discharge order of comparisons in Lua parser.
    • +
    • Ensure running __gc of userdata created in __gc +at state close.
    • +
    • Limit number of userdata __gc separations at state close.
    • +
    • Fix bytecode JMP slot range when optimizing +and/or with constant LHS.
    • +
    • Fix DSE of USTORE.
    • +
    • Make lua_concat() work from C hook with partial frame.
    • +
    • Add required PHIs for implicit conversions, e.g. via XREF +forwarding.
    • +
    • Add more comparison variants to Valgrind suppressions file.
    • +
    • Disable loading bytecode with an extra header (BOM or #!).
    • +
    • Fix PHI stack slot syncing.
    • +
    • ARM: Reorder type/value tests to silence Valgrind.
    • +
    • ARM: Fix register allocation for ldrd-optimized +HREFK.
    • +
    • ARM: Fix conditional branch fixup for OBAR.
    • +
    • ARM: Invoke SPLIT pass for double args in FFI call.
    • +
    • ARM: Handle all CALL* ops with double results in +SPLIT pass.
    • +
    • ARM: Fix rejoin of POW in SPLIT pass.
    • +
    • ARM: Fix compilation of math.sinh, math.cosh, +math.tanh.
    • +
    • ARM, PPC: Avoid pointless arg clearing in BC_IFUNCF.
    • +
    • PPC: Fix resume after yield from hook.
    • +
    • PPC: Fix argument checking for rawget().
    • +
    • PPC: Fix fusion of floating-point XLOAD/XSTORE.
    • +
    • PPC: Fix HREFK code generation for huge tables.
    • +
    • PPC: Use builtin D-Cache/I-Cache sync code.
    • +
  • +
  • FFI library: +
      +
    • Ignore empty statements in ffi.cdef().
    • +
    • Ignore number parsing errors while skipping definitions.
    • +
    • Don't touch frame in callbacks with tailcalls to fast functions.
    • +
    • Fix library unloading on POSIX systems.
    • +
    • Finalize cdata before userdata when closing the state.
    • +
    • Change ffi.load() library name resolution for Cygwin.
    • +
    • Fix resolving of function name redirects on Windows/x86.
    • +
    • Fix symbol resolving error messages on Windows.
    • +
    • Fix blacklisting of C functions calling callbacks.
    • +
    • Fix result type of pointer difference.
    • +
    • Use correct PC in FFI metamethod error message.
    • +
    • Allow 'typedef _Bool int BOOL;' for the Windows API.
    • +
    • Don't record test for bool result of call, if ignored.
    • +
  • +
+ +

LuaJIT 2.0.0-beta9 — 2011-12-14

+
    +
  • New features: +
      +
    • PPC port of LuaJIT is complete. Default is the dual-number port +(usually faster). Single-number port selectable via src/Makefile +at build time.
    • +
    • Add FFI callback support.
    • +
    • Extend -b to generate .c, .h or .obj/.o +files with embedded bytecode.
    • +
    • Allow loading embedded bytecode with require().
    • +
    • From Lua 5.2: Change to '\z' escape. Reject undefined escape +sequences.
    • +
  • +
  • Correctness and completeness: +
      +
    • Fix OSX 10.7 build. Fix install_name and versioning on OSX.
    • +
    • Fix iOS build.
    • +
    • Install dis_arm.lua, too.
    • +
    • Mark installed shared library as executable.
    • +
    • Add debug option to msvcbuild.bat and improve error handling.
    • +
    • Fix data-flow analysis for iterators.
    • +
    • Fix forced unwinding triggered by external unwinder.
    • +
    • Record missing for loop slot loads (return to lower frame).
    • +
    • Always use ANSI variants of Windows system functions.
    • +
    • Fix GC barrier for multi-result table constructor (TSETM).
    • +
    • Fix/add various FOLD rules.
    • +
    • Add potential PHI for number conversions due to type instability.
    • +
    • Do not eliminate PHIs only referenced from other PHIs.
    • +
    • Correctly anchor implicit number to string conversions in Lua/C API.
    • +
    • Fix various stack limit checks.
    • +
    • x64: Use thread-safe exceptions for external unwinding (GCC platforms).
    • +
    • x64: Fix result type of cdata index conversions.
    • +
    • x64: Fix math.random() and bit.bswap() code generation.
    • +
    • x64: Fix lightuserdata comparisons.
    • +
    • x64: Always extend stack-passed arguments to pointer size.
    • +
    • ARM: Many fixes to code generation backend.
    • +
    • PPC/e500: Fix dispatch for binop metamethods.
    • +
    • PPC/e500: Save/restore condition registers when entering/leaving the VM.
    • +
    • PPC/e500: Fix write barrier in stores of strings to upvalues.
    • +
  • +
  • FFI library: +
      +
    • Fix C comment parsing.
    • +
    • Fix snapshot optimization for cdata comparisons.
    • +
    • Fix recording of const/enum lookups in namespaces.
    • +
    • Fix call argument and return handling for I8/U8/I16/U16 types.
    • +
    • Fix unfused loads of float fields.
    • +
    • Fix ffi.string() recording.
    • +
    • Save GetLastError() around ffi.load() and symbol +resolving, too.
    • +
    • Improve ld script detection in ffi.load().
    • +
    • Record loads/stores to external variables in namespaces.
    • +
    • Compile calls to stdcall, fastcall and vararg functions.
    • +
    • Treat function ctypes like pointers in comparisons.
    • +
    • Resolve __call metamethod for pointers, too.
    • +
    • Record C function calls with bool return values.
    • +
    • Record ffi.errno().
    • +
    • x86: Fix number to uint32_t conversion rounding.
    • +
    • x86: Fix 64 bit arithmetic in assembler backend.
    • +
    • x64: Fix struct-by-value calling conventions.
    • +
    • ARM: Ensure invocation of SPLIT pass for float conversions.
    • +
  • +
  • Structural and performance enhancements: +
      +
    • Display trace types with -jv and -jdump.
    • +
    • Record isolated calls. But prefer recording loops over calls.
    • +
    • Specialize to prototype for non-monomorphic functions. Solves the +trace-explosion problem for closure-heavy programming styles.
    • +
    • Always generate a portable vmdef.lua. Easier for distros.
    • +
  • +
+ +

LuaJIT 2.0.0-beta8 — 2011-06-23

+
    +
  • New features: +
      +
    • Soft-float ARM port of LuaJIT is complete.
    • +
    • Add support for bytecode loading/saving and -b command line +option.
    • +
    • From Lua 5.2: __len metamethod for tables +(disabled by default).
    • +
  • +
  • Correctness and completeness: +
      +
    • ARM: Misc. fixes for interpreter.
    • +
    • x86/x64: Fix bit.* argument checking in interpreter.
    • +
    • Catch early out-of-memory in memory allocator initialization.
    • +
    • Fix data-flow analysis for paths leading to an upvalue close.
    • +
    • Fix check for missing arguments in string.format().
    • +
    • Fix Solaris/x86 build (note: not a supported target).
    • +
    • Fix recording of loops with instable directions in side traces.
    • +
    • x86/x64: Fix fusion of comparisons with u8/u16 +XLOAD.
    • +
    • x86/x64: Fix register allocation for variable shifts.
    • +
  • +
  • FFI library: +
      +
    • Add ffi.errno(). Save errno/GetLastError() +around allocations etc.
    • +
    • Fix __gc for VLA/VLS cdata objects.
    • +
    • Fix recording of casts from 32 bit cdata pointers to integers.
    • +
    • tonumber(cdata) returns nil for non-numbers.
    • +
    • Show address pointed to for tostring(pointer).
    • +
    • Print NULL pointers as "cdata<... *>: NULL".
    • +
    • Support __tostring metamethod for pointers to structs, too.
    • +
  • +
  • Structural and performance enhancements: +
      +
    • More tuning for loop unrolling heuristics.
    • +
    • Flatten and compress in-memory debug info (saves ~70%).
    • +
  • +
+ +

LuaJIT 2.0.0-beta7 — 2011-05-05

+
    +
  • New features: +
      +
    • ARM port of the LuaJIT interpreter is complete.
    • +
    • FFI library: Add ffi.gc(), ffi.metatype(), +ffi.istype().
    • +
    • FFI library: Resolve ld script redirection in ffi.load().
    • +
    • From Lua 5.2: package.searchpath(), fp:read("*L"), +load(string).
    • +
    • From Lua 5.2, disabled by default: empty statement, +table.unpack(), modified coroutine.running().
    • +
  • +
  • Correctness and completeness: +
      +
    • FFI library: numerous fixes.
    • +
    • Fix type mismatches in store-to-load forwarding.
    • +
    • Fix error handling within metamethods.
    • +
    • Fix table.maxn().
    • +
    • Improve accuracy of x^-k on x64.
    • +
    • Fix code generation for Intel Atom in x64 mode.
    • +
    • Fix narrowing of POW.
    • +
    • Fix recording of retried fast functions.
    • +
    • Fix code generation for bit.bnot() and multiplies.
    • +
    • Fix error location within cpcall frames.
    • +
    • Add workaround for old libgcc unwind bug.
    • +
    • Fix lua_yield() and getmetatable(lightuserdata) on x64.
    • +
    • Misc. fixes for PPC/e500 interpreter.
    • +
    • Fix stack slot updates for down-recursion.
    • +
  • +
  • Structural and performance enhancements: +
      +
    • Add dual-number mode (int/double) for the VM. Enabled for ARM.
    • +
    • Improve narrowing of arithmetic operators and for loops.
    • +
    • Tune loop unrolling heuristics and increase trace recorder limits.
    • +
    • Eliminate dead slots in snapshots using bytecode data-flow analysis.
    • +
    • Avoid phantom stores to proxy tables.
    • +
    • Optimize lookups in empty proxy tables.
    • +
    • Improve bytecode optimization of and/or operators.
    • +
  • +
+ +

LuaJIT 2.0.0-beta6 — 2011-02-11

+
    +
  • New features: +
      +
    • PowerPC/e500v2 port of the LuaJIT interpreter is complete.
    • +
    • Various minor features from Lua 5.2: Hex escapes in literals, +'\*' escape, reversible string.format("%q",s), +"%g" pattern, table.sort checks callbacks, +os.exit(status|true|false[,close]).
    • +
    • Lua 5.2 __pairs and __ipairs metamethods +(disabled by default).
    • +
    • Initial release of the FFI library.
    • +
  • +
  • Correctness and completeness: +
      +
    • Fix string.format() for non-finite numbers.
    • +
    • Fix memory leak when compiled to use the built-in allocator.
    • +
    • x86/x64: Fix unnecessary resize in TSETM bytecode.
    • +
    • Fix various GC issues with traces and jit.flush().
    • +
    • x64: Fix fusion of indexes for array references.
    • +
    • x86/x64: Fix stack overflow handling for coroutine results.
    • +
    • Enable low-2GB memory allocation on FreeBSD/x64.
    • +
    • Fix collectgarbage("count") result if more than 2GB is in use.
    • +
    • Fix parsing of hex floats.
    • +
    • x86/x64: Fix loop branch inversion with trailing +HREF+NE/EQ.
    • +
    • Add jit.os string.
    • +
    • coroutine.create() permits running C functions, too.
    • +
    • Fix OSX build to work with newer ld64 versions.
    • +
    • Fix bytecode optimization of and/or operators.
    • +
  • +
  • Structural and performance enhancements: +
      +
    • Emit specialized bytecode for pairs()/next().
    • +
    • Improve bytecode coalescing of nil constants.
    • +
    • Compile calls to vararg functions.
    • +
    • Compile select().
    • +
    • Improve alias analysis, esp. for loads from allocations.
    • +
    • Tuning of various compiler heuristics.
    • +
    • Refactor and extend IR conversion instructions.
    • +
    • x86/x64: Various backend enhancements related to the FFI.
    • +
    • Add SPLIT pass to split 64 bit IR instructions for 32 bit CPUs.
    • +
  • +
+ +

LuaJIT 2.0.0-beta5 — 2010-08-24

+
    +
  • Correctness and completeness: +
      +
    • Fix trace exit dispatch to function headers.
    • +
    • Fix Windows and OSX builds with LUAJIT_DISABLE_JIT.
    • +
    • Reorganize and fix placement of generated machine code on x64.
    • +
    • Fix TNEW in x64 interpreter.
    • +
    • Do not eliminate PHIs for values only referenced from side exits.
    • +
    • OS-independent canonicalization of strings for non-finite numbers.
    • +
    • Fix string.char() range check on x64.
    • +
    • Fix tostring() resolving within print().
    • +
    • Fix error handling for next().
    • +
    • Fix passing of constant arguments to external calls on x64.
    • +
    • Fix interpreter argument check for two-argument SSE math functions.
    • +
    • Fix C frame chain corruption caused by lua_cpcall().
    • +
    • Fix return from pcall() within active hook.
    • +
  • +
  • Structural and performance enhancements: +
      +
    • Replace on-trace GC frame syncing with interpreter exit.
    • +
    • Improve hash lookup specialization by not removing dead keys during GC.
    • +
    • Turn traces into true GC objects.
    • +
    • Avoid starting a GC cycle immediately after library init.
    • +
    • Add weak guards to improve dead-code elimination.
    • +
    • Speed up string interning.
    • +
  • +
+ +

LuaJIT 2.0.0-beta4 — 2010-03-28

+
    +
  • Correctness and completeness: +
      +
    • Fix precondition for on-trace creation of table keys.
    • +
    • Fix {f()} on x64 when table is resized.
    • +
    • Fix folding of ordered comparisons with same references.
    • +
    • Fix snapshot restores for multi-result bytecodes.
    • +
    • Fix potential hang when recording bytecode with nested closures.
    • +
    • Fix recording of getmetatable(), tonumber() and bad argument types.
    • +
    • Fix SLOAD fusion across returns to lower frames.
    • +
  • +
  • Structural and performance enhancements: +
      +
    • Add array bounds check elimination. -Oabc is enabled by default.
    • +
    • More tuning for x64, e.g. smaller table objects.
    • +
  • +
+ +

LuaJIT 2.0.0-beta3 — 2010-03-07

+
    +
  • LuaJIT x64 port: +
      +
    • Port integrated memory allocator to Linux/x64, Windows/x64 and OSX/x64.
    • +
    • Port interpreter and JIT compiler to x64.
    • +
    • Port DynASM to x64.
    • +
    • Many 32/64 bit cleanups in the VM.
    • +
    • Allow building the interpreter with either x87 or SSE2 arithmetics.
    • +
    • Add external unwinding and C++ exception interop (default on x64).
    • +
  • +
  • Correctness and completeness: +
      +
    • Fix constructor bytecode generation for certain conditional values.
    • +
    • Fix some cases of ordered string comparisons.
    • +
    • Fix lua_tocfunction().
    • +
    • Fix cutoff register in JMP bytecode for some conditional expressions.
    • +
    • Fix PHI marking algorithm for references from variant slots.
    • +
    • Fix package.cpath for non-default PREFIX.
    • +
    • Fix DWARF2 frame unwind information for interpreter on OSX.
    • +
    • Drive the GC forward on string allocations in the parser.
    • +
    • Implement call/return hooks (zero-cost if disabled).
    • +
    • Implement yield from C hooks.
    • +
    • Disable JIT compiler on older non-SSE2 CPUs instead of aborting.
    • +
  • +
  • Structural and performance enhancements: +
      +
    • Compile recursive code (tail-, up- and down-recursion).
    • +
    • Improve heuristics for bytecode penalties and blacklisting.
    • +
    • Split CALL/FUNC recording and clean up fast function call semantics.
    • +
    • Major redesign of internal function call handling.
    • +
    • Improve FOR loop const specialization and integerness checks.
    • +
    • Switch to pre-initialized stacks. Avoid frame-clearing.
    • +
    • Colocation of prototypes and related data: bytecode, constants, debug info.
    • +
    • Cleanup parser and streamline bytecode generation.
    • +
    • Add support for weak IR references to register allocator.
    • +
    • Switch to compressed, extensible snapshots.
    • +
    • Compile returns to frames below the start frame.
    • +
    • Improve alias analysis of upvalues using a disambiguation hash value.
    • +
    • Compile floor/ceil/trunc to SSE2 helper calls or SSE4.1 instructions.
    • +
    • Add generic C call handling to IR and backend.
    • +
    • Improve KNUM fuse vs. load heuristics.
    • +
    • Compile various io.*() functions.
    • +
    • Compile math.sinh(), math.cosh(), math.tanh() +and math.random().
    • +
  • +
+ +

LuaJIT 2.0.0-beta2 — 2009-11-09

+
    +
  • Reorganize build system. Build static+shared library on POSIX.
  • +
  • Allow C++ exception conversion on all platforms +using a wrapper function.
  • +
  • Automatically catch C++ exceptions and rethrow Lua error +(DWARF2 only).
  • +
  • Check for the correct x87 FPU precision at strategic points.
  • +
  • Always use wrappers for libm functions.
  • +
  • Resurrect metamethod name strings before copying them.
  • +
  • Mark current trace, even if compiler is idle.
  • +
  • Ensure FILE metatable is created only once.
  • +
  • Fix type comparisons when different integer types are involved.
  • +
  • Fix getmetatable() recording.
  • +
  • Fix TDUP with dead keys in template table.
  • +
  • jit.flush(tr) returns status. +Prevent manual flush of a trace that's still linked.
  • +
  • Improve register allocation heuristics for invariant references.
  • +
  • Compile the push/pop variants of table.insert() and +table.remove().
  • +
  • Compatibility with MSVC link /debug.
  • +
  • Fix lua_iscfunction().
  • +
  • Fix math.random() when compiled with -fpic (OSX).
  • +
  • Fix table.maxn().
  • +
  • Bump MACOSX_DEPLOYMENT_TARGET to 10.4
  • +
  • luaL_check*() and luaL_opt*() now support +negative arguments, too.
    +This matches the behavior of Lua 5.1, but not the specification.
  • +
+ +

LuaJIT 2.0.0-beta1 — 2009-10-31

+
    +
  • This is the first public release of LuaJIT 2.0.
  • +
  • The whole VM has been rewritten from the ground up, so there's +no point in listing differences over earlier versions.
  • +
+
+
+
+ + + diff --git a/Build/source/libs/luajit/LuaJIT-src/doc/contact.html b/Build/source/libs/luajit/LuaJIT-src/doc/contact.html new file mode 100644 index 00000000000..4bba550f9cd --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/doc/contact.html @@ -0,0 +1,104 @@ + + + +Contact + + + + + + + + +
+Lua +
+ + +
+

+Please send general questions to the +» LuaJIT mailing list. +You can also send any questions you have directly to me: +

+ + + + + +

Copyright

+

+All documentation is +Copyright © 2005-2015 Mike Pall. +

+ + +
+
+ + + diff --git a/Build/source/libs/luajit/LuaJIT-src/doc/ext_c_api.html b/Build/source/libs/luajit/LuaJIT-src/doc/ext_c_api.html new file mode 100644 index 00000000000..650032c29b1 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/doc/ext_c_api.html @@ -0,0 +1,189 @@ + + + +Lua/C API Extensions + + + + + + + + +
+Lua +
+ + +
+

+LuaJIT adds some extensions to the standard Lua/C API. The LuaJIT include +directory must be in the compiler search path (-Ipath) +to be able to include the required header for C code: +

+
+#include "luajit.h"
+
+

+Or for C++ code: +

+
+#include "lua.hpp"
+
+ +

luaJIT_setmode(L, idx, mode) +— Control VM

+

+This is a C API extension to allow control of the VM from C code. The +full prototype of LuaJIT_setmode is: +

+
+LUA_API int luaJIT_setmode(lua_State *L, int idx, int mode);
+
+

+The returned status is either success (1) or failure (0). +The second argument is either 0 or a stack index (similar to the +other Lua/C API functions). +

+

+The third argument specifies the mode, which is 'or'ed with a flag. +The flag can be LUAJIT_MODE_OFF to turn a feature on, +LUAJIT_MODE_ON to turn a feature off, or +LUAJIT_MODE_FLUSH to flush cached code. +

+

+The following modes are defined: +

+ +

luaJIT_setmode(L, 0, LUAJIT_MODE_ENGINE|flag)

+

+Turn the whole JIT compiler on or off or flush the whole cache of compiled code. +

+ +

luaJIT_setmode(L, idx, LUAJIT_MODE_FUNC|flag)
+luaJIT_setmode(L, idx, LUAJIT_MODE_ALLFUNC|flag)
+luaJIT_setmode(L, idx, LUAJIT_MODE_ALLSUBFUNC|flag)

+

+This sets the mode for the function at the stack index idx or +the parent of the calling function (idx = 0). It either +enables JIT compilation for a function, disables it and flushes any +already compiled code or only flushes already compiled code. This +applies recursively to all sub-functions of the function with +LUAJIT_MODE_ALLFUNC or only to the sub-functions with +LUAJIT_MODE_ALLSUBFUNC. +

+ +

luaJIT_setmode(L, trace,
+  LUAJIT_MODE_TRACE|LUAJIT_MODE_FLUSH)

+

+Flushes the specified root trace and all of its side traces from the cache. +The code for the trace will be retained as long as there are any other +traces which link to it. +

+ +

luaJIT_setmode(L, idx, LUAJIT_MODE_WRAPCFUNC|flag)

+

+This mode defines a wrapper function for calls to C functions. If +called with LUAJIT_MODE_ON, the stack index at idx +must be a lightuserdata object holding a pointer to the wrapper +function. From now on all C functions are called through the wrapper +function. If called with LUAJIT_MODE_OFF this mode is turned +off and all C functions are directly called. +

+

+The wrapper function can be used for debugging purposes or to catch +and convert foreign exceptions. But please read the section on +C++ exception interoperability +first. Recommended usage can be seen in this C++ code excerpt: +

+
+#include <exception>
+#include "lua.hpp"
+
+// Catch C++ exceptions and convert them to Lua error messages.
+// Customize as needed for your own exception classes.
+static int wrap_exceptions(lua_State *L, lua_CFunction f)
+{
+  try {
+    return f(L);  // Call wrapped function and return result.
+  } catch (const char *s) {  // Catch and convert exceptions.
+    lua_pushstring(L, s);
+  } catch (std::exception& e) {
+    lua_pushstring(L, e.what());
+  } catch (...) {
+    lua_pushliteral(L, "caught (...)");
+  }
+  return lua_error(L);  // Rethrow as a Lua error.
+}
+
+static int myinit(lua_State *L)
+{
+  ...
+  // Define wrapper function and enable it.
+  lua_pushlightuserdata(L, (void *)wrap_exceptions);
+  luaJIT_setmode(L, -1, LUAJIT_MODE_WRAPCFUNC|LUAJIT_MODE_ON);
+  lua_pop(L, 1);
+  ...
+}
+
+

+Note that you can only define a single global wrapper function, +so be careful when using this mechanism from multiple C++ modules. +Also note that this mechanism is not without overhead. +

+
+
+ + + diff --git a/Build/source/libs/luajit/LuaJIT-src/doc/ext_ffi.html b/Build/source/libs/luajit/LuaJIT-src/doc/ext_ffi.html new file mode 100644 index 00000000000..652df6cbaf0 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/doc/ext_ffi.html @@ -0,0 +1,332 @@ + + + +FFI Library + + + + + + + + +
+Lua +
+ + +
+

+ +The FFI library allows calling external C functions and +using C data structures from pure Lua code. + +

+

+ +The FFI library largely obviates the need to write tedious manual +Lua/C bindings in C. No need to learn a separate binding language +— it parses plain C declarations! These can be +cut-n-pasted from C header files or reference manuals. It's up to +the task of binding large libraries without the need for dealing with +fragile binding generators. + +

+

+The FFI library is tightly integrated into LuaJIT (it's not available +as a separate module). The code generated by the JIT-compiler for +accesses to C data structures from Lua code is on par with the +code a C compiler would generate. Calls to C functions can +be inlined in JIT-compiled code, unlike calls to functions bound via +the classic Lua/C API. +

+

+This page gives a short introduction to the usage of the FFI library. +Please use the FFI sub-topics in the navigation bar to learn more. +

+ +

Motivating Example: Calling External C Functions

+

+It's really easy to call an external C library function: +

+
+①
+②
+
+
+③local ffi = require("ffi")
+ffi.cdef[[
+int printf(const char *fmt, ...);
+]]
+ffi.C.printf("Hello %s!", "world")
+
+

+So, let's pick that apart: +

+

+ Load the FFI library. +

+

+ Add a C declaration +for the function. The part inside the double-brackets (in green) is +just standard C syntax. +

+

+ Call the named +C function — Yes, it's that simple! +

+

+Actually, what goes on behind the scenes is far from simple: makes use of the standard +C library namespace ffi.C. Indexing this namespace with +a symbol name ("printf") automatically binds it to the +standard C library. The result is a special kind of object which, +when called, runs the printf function. The arguments passed +to this function are automatically converted from Lua objects to the +corresponding C types. +

+

+Ok, so maybe the use of printf() wasn't such a spectacular +example. You could have done that with io.write() and +string.format(), too. But you get the idea ... +

+

+So here's something to pop up a message box on Windows: +

+
+local ffi = require("ffi")
+ffi.cdef[[
+int MessageBoxA(void *w, const char *txt, const char *cap, int type);
+]]
+ffi.C.MessageBoxA(nil, "Hello world!", "Test", 0)
+
+

+Bing! Again, that was far too easy, no? +

+

+Compare this with the effort required to bind that function using the +classic Lua/C API: create an extra C file, add a C function +that retrieves and checks the argument types passed from Lua and calls +the actual C function, add a list of module functions and their +names, add a luaopen_* function and register all module +functions, compile and link it into a shared library (DLL), move it to +the proper path, add Lua code that loads the module aaaand ... finally +call the binding function. Phew! +

+ +

Motivating Example: Using C Data Structures

+

+The FFI library allows you to create and access C data +structures. Of course the main use for this is for interfacing with +C functions. But they can be used stand-alone, too. +

+

+Lua is built upon high-level data types. They are flexible, extensible +and dynamic. That's why we all love Lua so much. Alas, this can be +inefficient for certain tasks, where you'd really want a low-level +data type. E.g. a large array of a fixed structure needs to be +implemented with a big table holding lots of tiny tables. This imposes +both a substantial memory overhead as well as a performance overhead. +

+

+Here's a sketch of a library that operates on color images plus a +simple benchmark. First, the plain Lua version: +

+
+local floor = math.floor
+
+local function image_ramp_green(n)
+  local img = {}
+  local f = 255/(n-1)
+  for i=1,n do
+    img[i] = { red = 0, green = floor((i-1)*f), blue = 0, alpha = 255 }
+  end
+  return img
+end
+
+local function image_to_grey(img, n)
+  for i=1,n do
+    local y = floor(0.3*img[i].red + 0.59*img[i].green + 0.11*img[i].blue)
+    img[i].red = y; img[i].green = y; img[i].blue = y
+  end
+end
+
+local N = 400*400
+local img = image_ramp_green(N)
+for i=1,1000 do
+  image_to_grey(img, N)
+end
+
+

+This creates a table with 160.000 pixels, each of which is a table +holding four number values in the range of 0-255. First an image with +a green ramp is created (1D for simplicity), then the image is +converted to greyscale 1000 times. Yes, that's silly, but I was in +need of a simple example ... +

+

+And here's the FFI version. The modified parts have been marked in +bold: +

+
+①
+
+
+
+
+
+②
+
+③
+④
+
+
+
+
+
+
+③
+⑤local ffi = require("ffi")
+ffi.cdef[[
+typedef struct { uint8_t red, green, blue, alpha; } rgba_pixel;
+]]
+
+local function image_ramp_green(n)
+  local img = ffi.new("rgba_pixel[?]", n)
+  local f = 255/(n-1)
+  for i=0,n-1 do
+    img[i].green = i*f
+    img[i].alpha = 255
+  end
+  return img
+end
+
+local function image_to_grey(img, n)
+  for i=0,n-1 do
+    local y = 0.3*img[i].red + 0.59*img[i].green + 0.11*img[i].blue
+    img[i].red = y; img[i].green = y; img[i].blue = y
+  end
+end
+
+local N = 400*400
+local img = image_ramp_green(N)
+for i=1,1000 do
+  image_to_grey(img, N)
+end
+
+

+Ok, so that wasn't too difficult: +

+

+ First, load the FFI +library and declare the low-level data type. Here we choose a +struct which holds four byte fields, one for each component +of a 4x8 bit RGBA pixel. +

+

+ Creating the data +structure with ffi.new() is straightforward — the +'?' is a placeholder for the number of elements of a +variable-length array. +

+

+ C arrays are +zero-based, so the indexes have to run from 0 to +n-1. One might want to allocate one more element instead to +simplify converting legacy code. +

+

+ Since ffi.new() +zero-fills the array by default, we only need to set the green and the +alpha fields. +

+

+ The calls to +math.floor() can be omitted here, because floating-point +numbers are already truncated towards zero when converting them to an +integer. This happens implicitly when the number is stored in the +fields of each pixel. +

+

+Now let's have a look at the impact of the changes: first, memory +consumption for the image is down from 22 Megabytes to +640 Kilobytes (400*400*4 bytes). That's a factor of 35x less! So, +yes, tables do have a noticeable overhead. BTW: The original program +would consume 40 Megabytes in plain Lua (on x64). +

+

+Next, performance: the pure Lua version runs in 9.57 seconds (52.9 +seconds with the Lua interpreter) and the FFI version runs in 0.48 +seconds on my machine (YMMV). That's a factor of 20x faster (110x +faster than the Lua interpreter). +

+

+The avid reader may notice that converting the pure Lua version over +to use array indexes for the colors ([1] instead of +.red, [2] instead of .green etc.) ought to +be more compact and faster. This is certainly true (by a factor of +~1.7x). Switching to a struct-of-arrays would help, too. +

+

+However the resulting code would be less idiomatic and rather +error-prone. And it still doesn't get even close to the performance of +the FFI version of the code. Also, high-level data structures cannot +be easily passed to other C functions, especially I/O functions, +without undue conversion penalties. +

+
+
+ + + diff --git a/Build/source/libs/luajit/LuaJIT-src/doc/ext_ffi_api.html b/Build/source/libs/luajit/LuaJIT-src/doc/ext_ffi_api.html new file mode 100644 index 00000000000..b27e4675f96 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/doc/ext_ffi_api.html @@ -0,0 +1,570 @@ + + + +ffi.* API Functions + + + + + + + + + +
+Lua +
+ + +
+

+This page describes the API functions provided by the FFI library in +detail. It's recommended to read through the +introduction and the +FFI tutorial first. +

+ +

Glossary

+
    +
  • cdecl — An abstract C type declaration (a Lua +string).
  • +
  • ctype — A C type object. This is a special kind of +cdata returned by ffi.typeof(). It serves as a +cdata constructor when called.
  • +
  • cdata — A C data object. It holds a value of the +corresponding ctype.
  • +
  • ct — A C type specification which can be used for +most of the API functions. Either a cdecl, a ctype or a +cdata serving as a template type.
  • +
  • cb — A callback object. This is a C data object +holding a special function pointer. Calling this function from +C code runs an associated Lua function.
  • +
  • VLA — A variable-length array is declared with a +? instead of the number of elements, e.g. "int[?]". +The number of elements (nelem) must be given when it's +created.
  • +
  • VLS — A variable-length struct is a struct C +type where the last element is a VLA. The same rules for +declaration and creation apply.
  • +
+ +

Declaring and Accessing External Symbols

+

+External symbols must be declared first and can then be accessed by +indexing a C library +namespace, which automatically binds the symbol to a specific +library. +

+ +

ffi.cdef(def)

+

+Adds multiple C declarations for types or external symbols (named +variables or functions). def must be a Lua string. It's +recommended to use the syntactic sugar for string arguments as +follows: +

+
+ffi.cdef[[
+typedef struct foo { int a, b; } foo_t;  // Declare a struct and typedef.
+int dofoo(foo_t *f, int n);  /* Declare an external C function. */
+]]
+
+

+The contents of the string (the part in green above) must be a +sequence of +C declarations, +separated by semicolons. The trailing semicolon for a single +declaration may be omitted. +

+

+Please note that external symbols are only declared, but they +are not bound to any specific address, yet. Binding is +achieved with C library namespaces (see below). +

+

+C declarations are not passed through a C pre-processor, +yet. No pre-processor tokens are allowed, except for +#pragma pack. Replace #define in existing +C header files with enum, static const +or typedef and/or pass the files through an external +C pre-processor (once). Be careful not to include unneeded or +redundant declarations from unrelated header files. +

+ +

ffi.C

+

+This is the default C library namespace — note the +uppercase 'C'. It binds to the default set of symbols or +libraries on the target system. These are more or less the same as a +C compiler would offer by default, without specifying extra link +libraries. +

+

+On POSIX systems, this binds to symbols in the default or global +namespace. This includes all exported symbols from the executable and +any libraries loaded into the global namespace. This includes at least +libc, libm, libdl (on Linux), +libgcc (if compiled with GCC), as well as any exported +symbols from the Lua/C API provided by LuaJIT itself. +

+

+On Windows systems, this binds to symbols exported from the +*.exe, the lua51.dll (i.e. the Lua/C API +provided by LuaJIT itself), the C runtime library LuaJIT was linked +with (msvcrt*.dll), kernel32.dll, +user32.dll and gdi32.dll. +

+ +

clib = ffi.load(name [,global])

+

+This loads the dynamic library given by name and returns +a new C library namespace which binds to its symbols. On POSIX +systems, if global is true, the library symbols are +loaded into the global namespace, too. +

+

+If name is a path, the library is loaded from this path. +Otherwise name is canonicalized in a system-dependent way and +searched in the default search path for dynamic libraries: +

+

+On POSIX systems, if the name contains no dot, the extension +.so is appended. Also, the lib prefix is prepended +if necessary. So ffi.load("z") looks for "libz.so" +in the default shared library search path. +

+

+On Windows systems, if the name contains no dot, the extension +.dll is appended. So ffi.load("ws2_32") looks for +"ws2_32.dll" in the default DLL search path. +

+ +

Creating cdata Objects

+

+The following API functions create cdata objects (type() +returns "cdata"). All created cdata objects are +garbage collected. +

+ +

cdata = ffi.new(ct [,nelem] [,init...])
+cdata = ctype([nelem,] [init...])

+

+Creates a cdata object for the given ct. VLA/VLS types +require the nelem argument. The second syntax uses a ctype as +a constructor and is otherwise fully equivalent. +

+

+The cdata object is initialized according to the +rules for initializers, +using the optional init arguments. Excess initializers cause +an error. +

+

+Performance notice: if you want to create many objects of one kind, +parse the cdecl only once and get its ctype with +ffi.typeof(). Then use the ctype as a constructor repeatedly. +

+

+Please note that an anonymous struct declaration implicitly +creates a new and distinguished ctype every time you use it for +ffi.new(). This is probably not what you want, +especially if you create more than one cdata object. Different anonymous +structs are not considered assignment-compatible by the +C standard, even though they may have the same fields! Also, they +are considered different types by the JIT-compiler, which may cause an +excessive number of traces. It's strongly suggested to either declare +a named struct or typedef with ffi.cdef() +or to create a single ctype object for an anonymous struct +with ffi.typeof(). +

+ +

ctype = ffi.typeof(ct)

+

+Creates a ctype object for the given ct. +

+

+This function is especially useful to parse a cdecl only once and then +use the resulting ctype object as a constructor. +

+ +

cdata = ffi.cast(ct, init)

+

+Creates a scalar cdata object for the given ct. The cdata +object is initialized with init using the "cast" variant of +the C type conversion +rules. +

+

+This functions is mainly useful to override the pointer compatibility +checks or to convert pointers to addresses or vice versa. +

+ +

ctype = ffi.metatype(ct, metatable)

+

+Creates a ctype object for the given ct and associates it with +a metatable. Only struct/union types, complex numbers +and vectors are allowed. Other types may be wrapped in a +struct, if needed. +

+

+The association with a metatable is permanent and cannot be changed +afterwards. Neither the contents of the metatable nor the +contents of an __index table (if any) may be modified +afterwards. The associated metatable automatically applies to all uses +of this type, no matter how the objects are created or where they +originate from. Note that pre-defined operations on types have +precedence (e.g. declared field names cannot be overriden). +

+

+All standard Lua metamethods are implemented. These are called directly, +without shortcuts and on any mix of types. For binary operations, the +left operand is checked first for a valid ctype metamethod. The +__gc metamethod only applies to struct/union +types and performs an implicit ffi.gc() +call during creation of an instance. +

+ +

cdata = ffi.gc(cdata, finalizer)

+

+Associates a finalizer with a pointer or aggregate cdata object. The +cdata object is returned unchanged. +

+

+This function allows safe integration of unmanaged resources into the +automatic memory management of the LuaJIT garbage collector. Typical +usage: +

+
+local p = ffi.gc(ffi.C.malloc(n), ffi.C.free)
+...
+p = nil -- Last reference to p is gone.
+-- GC will eventually run finalizer: ffi.C.free(p)
+
+

+A cdata finalizer works like the __gc metamethod for userdata +objects: when the last reference to a cdata object is gone, the +associated finalizer is called with the cdata object as an argument. The +finalizer can be a Lua function or a cdata function or cdata function +pointer. An existing finalizer can be removed by setting a nil +finalizer, e.g. right before explicitly deleting a resource: +

+
+ffi.C.free(ffi.gc(p, nil)) -- Manually free the memory.
+
+ +

C Type Information

+

+The following API functions return information about C types. +They are most useful for inspecting cdata objects. +

+ +

size = ffi.sizeof(ct [,nelem])

+

+Returns the size of ct in bytes. Returns nil if +the size is not known (e.g. for "void" or function types). +Requires nelem for VLA/VLS types, except for cdata objects. +

+ +

align = ffi.alignof(ct)

+

+Returns the minimum required alignment for ct in bytes. +

+ +

ofs [,bpos,bsize] = ffi.offsetof(ct, field)

+

+Returns the offset (in bytes) of field relative to the start +of ct, which must be a struct. Additionally returns +the position and the field size (in bits) for bit fields. +

+ +

status = ffi.istype(ct, obj)

+

+Returns true if obj has the C type given by +ct. Returns false otherwise. +

+

+C type qualifiers (const etc.) are ignored. Pointers are +checked with the standard pointer compatibility rules, but without any +special treatment for void *. If ct specifies a +struct/union, then a pointer to this type is accepted, +too. Otherwise the types must match exactly. +

+

+Note: this function accepts all kinds of Lua objects for the +obj argument, but always returns false for non-cdata +objects. +

+ +

Utility Functions

+ +

err = ffi.errno([newerr])

+

+Returns the error number set by the last C function call which +indicated an error condition. If the optional newerr argument +is present, the error number is set to the new value and the previous +value is returned. +

+

+This function offers a portable and OS-independent way to get and set the +error number. Note that only some C functions set the error +number. And it's only significant if the function actually indicated an +error condition (e.g. with a return value of -1 or +NULL). Otherwise, it may or may not contain any previously set +value. +

+

+You're advised to call this function only when needed and as close as +possible after the return of the related C function. The +errno value is preserved across hooks, memory allocations, +invocations of the JIT compiler and other internal VM activity. The same +applies to the value returned by GetLastError() on Windows, but +you need to declare and call it yourself. +

+ +

str = ffi.string(ptr [,len])

+

+Creates an interned Lua string from the data pointed to by +ptr. +

+

+If the optional argument len is missing, ptr is +converted to a "char *" and the data is assumed to be +zero-terminated. The length of the string is computed with +strlen(). +

+

+Otherwise ptr is converted to a "void *" and +len gives the length of the data. The data may contain +embedded zeros and need not be byte-oriented (though this may cause +endianess issues). +

+

+This function is mainly useful to convert (temporary) +"const char *" pointers returned by +C functions to Lua strings and store them or pass them to other +functions expecting a Lua string. The Lua string is an (interned) copy +of the data and bears no relation to the original data area anymore. +Lua strings are 8 bit clean and may be used to hold arbitrary, +non-character data. +

+

+Performance notice: it's faster to pass the length of the string, if +it's known. E.g. when the length is returned by a C call like +sprintf(). +

+ +

ffi.copy(dst, src, len)
+ffi.copy(dst, str)

+

+Copies the data pointed to by src to dst. +dst is converted to a "void *" and src +is converted to a "const void *". +

+

+In the first syntax, len gives the number of bytes to copy. +Caveat: if src is a Lua string, then len must not +exceed #src+1. +

+

+In the second syntax, the source of the copy must be a Lua string. All +bytes of the string plus a zero-terminator are copied to +dst (i.e. #src+1 bytes). +

+

+Performance notice: ffi.copy() may be used as a faster +(inlinable) replacement for the C library functions +memcpy(), strcpy() and strncpy(). +

+ +

ffi.fill(dst, len [,c])

+

+Fills the data pointed to by dst with len constant +bytes, given by c. If c is omitted, the data is +zero-filled. +

+

+Performance notice: ffi.fill() may be used as a faster +(inlinable) replacement for the C library function +memset(dst, c, len). Please note the different +order of arguments! +

+ +

Target-specific Information

+ +

status = ffi.abi(param)

+

+Returns true if param (a Lua string) applies for the +target ABI (Application Binary Interface). Returns false +otherwise. The following parameters are currently defined: +

+ + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterDescription
32bit32 bit architecture
64bit64 bit architecture
leLittle-endian architecture
beBig-endian architecture
fpuTarget has a hardware FPU
softfpsoftfp calling conventions
hardfphardfp calling conventions
eabiEABI variant of the standard ABI
winWindows variant of the standard ABI
gc6464 bit GC references
+ +

ffi.os

+

+Contains the target OS name. Same contents as +jit.os. +

+ +

ffi.arch

+

+Contains the target architecture name. Same contents as +jit.arch. +

+ +

Methods for Callbacks

+

+The C types for callbacks +have some extra methods: +

+ +

cb:free()

+

+Free the resources associated with a callback. The associated Lua +function is unanchored and may be garbage collected. The callback +function pointer is no longer valid and must not be called anymore +(it may be reused by a subsequently created callback). +

+ +

cb:set(func)

+

+Associate a new Lua function with a callback. The C type of the +callback and the callback function pointer are unchanged. +

+

+This method is useful to dynamically switch the receiver of callbacks +without creating a new callback each time and registering it again (e.g. +with a GUI library). +

+ +

Extended Standard Library Functions

+

+The following standard library functions have been extended to work +with cdata objects: +

+ +

n = tonumber(cdata)

+

+Converts a number cdata object to a double and returns it as +a Lua number. This is particularly useful for boxed 64 bit +integer values. Caveat: this conversion may incur a precision loss. +

+ +

s = tostring(cdata)

+

+Returns a string representation of the value of 64 bit integers +("nnnLL" or "nnnULL") or +complex numbers ("re±imi"). Otherwise +returns a string representation of the C type of a ctype object +("ctype<type>") or a cdata object +("cdata<type>: address"), unless you +override it with a __tostring metamethod (see +ffi.metatype()). +

+ +

iter, obj, start = pairs(cdata)
+iter, obj, start = ipairs(cdata)

+

+Calls the __pairs or __ipairs metamethod of the +corresponding ctype. +

+ +

Extensions to the Lua Parser

+

+The parser for Lua source code treats numeric literals with the +suffixes LL or ULL as signed or unsigned 64 bit +integers. Case doesn't matter, but uppercase is recommended for +readability. It handles decimal (42LL), hexadecimal +(0x2aLL) and binary (0b101010LL) literals. +

+

+The imaginary part of complex numbers can be specified by suffixing +number literals with i or I, e.g. 12.5i. +Caveat: you'll need to use 1i to get an imaginary part with +the value one, since i itself still refers to a variable +named i. +

+
+
+ + + diff --git a/Build/source/libs/luajit/LuaJIT-src/doc/ext_ffi_semantics.html b/Build/source/libs/luajit/LuaJIT-src/doc/ext_ffi_semantics.html new file mode 100644 index 00000000000..b231ac289d5 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/doc/ext_ffi_semantics.html @@ -0,0 +1,1261 @@ + + + +FFI Semantics + + + + + + + + + +
+Lua +
+ + +
+

+This page describes the detailed semantics underlying the FFI library +and its interaction with both Lua and C code. +

+

+Given that the FFI library is designed to interface with C code +and that declarations can be written in plain C syntax, it +closely follows the C language semantics, wherever possible. +Some minor concessions are needed for smoother interoperation with Lua +language semantics. +

+

+Please don't be overwhelmed by the contents of this page — this +is a reference and you may need to consult it, if in doubt. It doesn't +hurt to skim this page, but most of the semantics "just work" as you'd +expect them to work. It should be straightforward to write +applications using the LuaJIT FFI for developers with a C or C++ +background. +

+ +

C Language Support

+

+The FFI library has a built-in C parser with a minimal memory +footprint. It's used by the ffi.* library +functions to declare C types or external symbols. +

+

+It's only purpose is to parse C declarations, as found e.g. in +C header files. Although it does evaluate constant expressions, +it's not a C compiler. The body of inline +C function definitions is simply ignored. +

+

+Also, this is not a validating C parser. It expects and +accepts correctly formed C declarations, but it may choose to +ignore bad declarations or show rather generic error messages. If in +doubt, please check the input against your favorite C compiler. +

+

+The C parser complies to the C99 language standard plus +the following extensions: +

+
    + +
  • The '\e' escape in character and string literals.
  • + +
  • The C99/C++ boolean type, declared with the keywords bool +or _Bool.
  • + +
  • Complex numbers, declared with the keywords complex or +_Complex.
  • + +
  • Two complex number types: complex (aka +complex double) and complex float.
  • + +
  • Vector types, declared with the GCC mode or +vector_size attribute.
  • + +
  • Unnamed ('transparent') struct/union fields +inside a struct/union.
  • + +
  • Incomplete enum declarations, handled like incomplete +struct declarations.
  • + +
  • Unnamed enum fields inside a +struct/union. This is similar to a scoped C++ +enum, except that declared constants are visible in the +global namespace, too.
  • + +
  • Scoped static const declarations inside a +struct/union (from C++).
  • + +
  • Zero-length arrays ([0]), empty +struct/union, variable-length arrays (VLA, +[?]) and variable-length structs (VLS, with a trailing +VLA).
  • + +
  • C++ reference types (int &x).
  • + +
  • Alternate GCC keywords with '__', e.g. +__const__.
  • + +
  • GCC __attribute__ with the following attributes: +aligned, packed, mode, +vector_size, cdecl, fastcall, +stdcall, thiscall.
  • + +
  • The GCC __extension__ keyword and the GCC +__alignof__ operator.
  • + +
  • GCC __asm__("symname") symbol name redirection for +function declarations.
  • + +
  • MSVC keywords for fixed-length types: __int8, +__int16, __int32 and __int64.
  • + +
  • MSVC __cdecl, __fastcall, __stdcall, +__thiscall, __ptr32, __ptr64, +__declspec(align(n)) and #pragma pack.
  • + +
  • All other GCC/MSVC-specific attributes are ignored.
  • + +
+

+The following C types are pre-defined by the C parser (like +a typedef, except re-declarations will be ignored): +

+
    + +
  • Vararg handling: va_list, __builtin_va_list, +__gnuc_va_list.
  • + +
  • From <stddef.h>: ptrdiff_t, +size_t, wchar_t.
  • + +
  • From <stdint.h>: int8_t, int16_t, +int32_t, int64_t, uint8_t, +uint16_t, uint32_t, uint64_t, +intptr_t, uintptr_t.
  • + +
+

+You're encouraged to use these types in preference to +compiler-specific extensions or target-dependent standard types. +E.g. char differs in signedness and long differs in +size, depending on the target architecture and platform ABI. +

+

+The following C features are not supported: +

+
    + +
  • A declaration must always have a type specifier; it doesn't +default to an int type.
  • + +
  • Old-style empty function declarations (K&R) are not allowed. +All C functions must have a proper prototype declaration. A +function declared without parameters (int foo();) is +treated as a function taking zero arguments, like in C++.
  • + +
  • The long double C type is parsed correctly, but +there's no support for the related conversions, accesses or arithmetic +operations.
  • + +
  • Wide character strings and character literals are not +supported.
  • + +
  • See below for features that are currently +not implemented.
  • + +
+ +

C Type Conversion Rules

+ +

Conversions from C types to Lua objects

+

+These conversion rules apply for read accesses to +C types: indexing pointers, arrays or +struct/union types; reading external variables or +constant values; retrieving return values from C calls: +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
InputConversionOutput
int8_t, int16_tsign-ext int32_tdoublenumber
uint8_t, uint16_tzero-ext int32_tdoublenumber
int32_t, uint32_tdoublenumber
int64_t, uint64_tboxed value64 bit int cdata
double, floatdoublenumber
bool0 → false, otherwise trueboolean
enumboxed valueenum cdata
Complex numberboxed valuecomplex cdata
Vectorboxed valuevector cdata
Pointerboxed valuepointer cdata
Arrayboxed referencereference cdata
struct/unionboxed referencereference cdata
+

+Bitfields are treated like their underlying type. +

+

+Reference types are dereferenced before a conversion can take +place — the conversion is applied to the C type pointed to +by the reference. +

+ +

Conversions from Lua objects to C types

+

+These conversion rules apply for write accesses to +C types: indexing pointers, arrays or +struct/union types; initializing cdata objects; +casts to C types; writing to external variables; passing +arguments to C calls: +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
InputConversionOutput
numberdouble
booleanfalse → 0, true → 1bool
nilNULL(void *)
lightuserdatalightuserdata address →(void *)
userdatauserdata payload →(void *)
io.* fileget FILE * handle →(void *)
stringmatch against enum constantenum
stringcopy string data + zero-byteint8_t[], uint8_t[]
stringstring data →const char[]
functioncreate callbackC function type
tabletable initializerArray
tabletable initializerstruct/union
cdatacdata payload →C type
+

+If the result type of this conversion doesn't match the +C type of the destination, the +conversion rules between C types +are applied. +

+

+Reference types are immutable after initialization ("no re-seating of +references"). For initialization purposes or when passing values to +reference parameters, they are treated like pointers. Note that unlike +in C++, there's no way to implement automatic reference generation of +variables under the Lua language semantics. If you want to call a +function with a reference parameter, you need to explicitly pass a +one-element array. +

+ +

Conversions between C types

+

+These conversion rules are more or less the same as the standard +C conversion rules. Some rules only apply to casts, or require +pointer or type compatibility: +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
InputConversionOutput
Signed integernarrow or sign-extendInteger
Unsigned integernarrow or zero-extendInteger
Integerrounddouble, float
double, floattrunc int32_tnarrow(u)int8_t, (u)int16_t
double, floattrunc(u)int32_t, (u)int64_t
double, floatroundfloat, double
Numbern == 0 → 0, otherwise 1bool
boolfalse → 0, true → 1Number
Complex numberconvert real partNumber
Numberconvert real part, imag = 0Complex number
Complex numberconvert real and imag partComplex number
Numberconvert scalar and replicateVector
Vectorcopy (same size)Vector
struct/uniontake base address (compat)Pointer
Arraytake base address (compat)Pointer
Functiontake function addressFunction pointer
Numberconvert via uintptr_t (cast)Pointer
Pointerconvert address (compat/cast)Pointer
Pointerconvert address (cast)Integer
Arrayconvert base address (cast)Integer
Arraycopy (compat)Array
struct/unioncopy (identical type)struct/union
+

+Bitfields or enum types are treated like their underlying +type. +

+

+Conversions not listed above will raise an error. E.g. it's not +possible to convert a pointer to a complex number or vice versa. +

+ +

Conversions for vararg C function arguments

+

+The following default conversion rules apply when passing Lua objects +to the variable argument part of vararg C functions: +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
InputConversionOutput
numberdouble
booleanfalse → 0, true → 1bool
nilNULL(void *)
userdatauserdata payload →(void *)
lightuserdatalightuserdata address →(void *)
stringstring data →const char *
float cdatadouble
Array cdatatake base addressElement pointer
struct/union cdatatake base addressstruct/union pointer
Function cdatatake function addressFunction pointer
Any other cdatano conversionC type
+

+To pass a Lua object, other than a cdata object, as a specific type, +you need to override the conversion rules: create a temporary cdata +object with a constructor or a cast and initialize it with the value +to pass: +

+

+Assuming x is a Lua number, here's how to pass it as an +integer to a vararg function: +

+
+ffi.cdef[[
+int printf(const char *fmt, ...);
+]]
+ffi.C.printf("integer value: %d\n", ffi.new("int", x))
+
+

+If you don't do this, the default Lua number → double +conversion rule applies. A vararg C function expecting an integer +will see a garbled or uninitialized value. +

+ +

Initializers

+

+Creating a cdata object with +ffi.new() or the +equivalent constructor syntax always initializes its contents, too. +Different rules apply, depending on the number of optional +initializers and the C types involved: +

+
    +
  • If no initializers are given, the object is filled with zero bytes.
  • + +
  • Scalar types (numbers and pointers) accept a single initializer. +The Lua object is converted to the scalar +C type.
  • + +
  • Valarrays (complex numbers and vectors) are treated like scalars +when a single initializer is given. Otherwise they are treated like +regular arrays.
  • + +
  • Aggregate types (arrays and structs) accept either a single cdata +initializer of the same type (copy constructor), a single +table initializer, or a flat list of +initializers.
  • + +
  • The elements of an array are initialized, starting at index zero. +If a single initializer is given for an array, it's repeated for all +remaining elements. This doesn't happen if two or more initializers +are given: all remaining uninitialized elements are filled with zero +bytes.
  • + +
  • Byte arrays may also be initialized with a Lua string. This copies +the whole string plus a terminating zero-byte. The copy stops early only +if the array has a known, fixed size.
  • + +
  • The fields of a struct are initialized in the order of +their declaration. Uninitialized fields are filled with zero +bytes.
  • + +
  • Only the first field of a union can be initialized with a +flat initializer.
  • + +
  • Elements or fields which are aggregates themselves are initialized +with a single initializer, but this may be a table +initializer or a compatible aggregate.
  • + +
  • Excess initializers cause an error.
  • + +
+ +

Table Initializers

+

+The following rules apply if a Lua table is used to initialize an +Array or a struct/union: +

+
    + +
  • If the table index [0] is non-nil, then the +table is assumed to be zero-based. Otherwise it's assumed to be +one-based.
  • + +
  • Array elements, starting at index zero, are initialized one-by-one +with the consecutive table elements, starting at either index +[0] or [1]. This process stops at the first +nil table element.
  • + +
  • If exactly one array element was initialized, it's repeated for +all the remaining elements. Otherwise all remaining uninitialized +elements are filled with zero bytes.
  • + +
  • The above logic only applies to arrays with a known fixed size. +A VLA is only initialized with the element(s) given in the table. +Depending on the use case, you may need to explicitly add a +NULL or 0 terminator to a VLA.
  • + +
  • A struct/union can be initialized in the +order of the declaration of its fields. Each field is initialized with +consecutive table elements, starting at either index [0] +or [1]. This process stops at the first nil table +element.
  • + +
  • Otherwise, if neither index [0] nor [1] is present, +a struct/union is initialized by looking up each field +name (as a string key) in the table. Each non-nil value is +used to initialize the corresponding field.
  • + +
  • Uninitialized fields of a struct are filled with zero +bytes, except for the trailing VLA of a VLS.
  • + +
  • Initialization of a union stops after one field has been +initialized. If no field has been initialized, the union is +filled with zero bytes.
  • + +
  • Elements or fields which are aggregates themselves are initialized +with a single initializer, but this may be a nested table +initializer (or a compatible aggregate).
  • + +
  • Excess initializers for an array cause an error. Excess +initializers for a struct/union are ignored. +Unrelated table entries are ignored, too.
  • + +
+

+Example: +

+
+local ffi = require("ffi")
+
+ffi.cdef[[
+struct foo { int a, b; };
+union bar { int i; double d; };
+struct nested { int x; struct foo y; };
+]]
+
+ffi.new("int[3]", {})            --> 0, 0, 0
+ffi.new("int[3]", {1})           --> 1, 1, 1
+ffi.new("int[3]", {1,2})         --> 1, 2, 0
+ffi.new("int[3]", {1,2,3})       --> 1, 2, 3
+ffi.new("int[3]", {[0]=1})       --> 1, 1, 1
+ffi.new("int[3]", {[0]=1,2})     --> 1, 2, 0
+ffi.new("int[3]", {[0]=1,2,3})   --> 1, 2, 3
+ffi.new("int[3]", {[0]=1,2,3,4}) --> error: too many initializers
+
+ffi.new("struct foo", {})            --> a = 0, b = 0
+ffi.new("struct foo", {1})           --> a = 1, b = 0
+ffi.new("struct foo", {1,2})         --> a = 1, b = 2
+ffi.new("struct foo", {[0]=1,2})     --> a = 1, b = 2
+ffi.new("struct foo", {b=2})         --> a = 0, b = 2
+ffi.new("struct foo", {a=1,b=2,c=3}) --> a = 1, b = 2  'c' is ignored
+
+ffi.new("union bar", {})        --> i = 0, d = 0.0
+ffi.new("union bar", {1})       --> i = 1, d = ?
+ffi.new("union bar", {[0]=1,2}) --> i = 1, d = ?    '2' is ignored
+ffi.new("union bar", {d=2})     --> i = ?, d = 2.0
+
+ffi.new("struct nested", {1,{2,3}})     --> x = 1, y.a = 2, y.b = 3
+ffi.new("struct nested", {x=1,y={2,3}}) --> x = 1, y.a = 2, y.b = 3
+
+ +

Operations on cdata Objects

+

+All of the standard Lua operators can be applied to cdata objects or a +mix of a cdata object and another Lua object. The following list shows +the pre-defined operations. +

+

+Reference types are dereferenced before performing each of +the operations below — the operation is applied to the +C type pointed to by the reference. +

+

+The pre-defined operations are always tried first before deferring to a +metamethod or index table (if any) for the corresponding ctype (except +for __new). An error is raised if the metamethod lookup or +index table lookup fails. +

+ +

Indexing a cdata object

+
    + +
  • Indexing a pointer/array: a cdata pointer/array can be +indexed by a cdata number or a Lua number. The element address is +computed as the base address plus the number value multiplied by the +element size in bytes. A read access loads the element value and +converts it to a Lua object. A write +access converts a Lua object to the element +type and stores the converted value to the element. An error is +raised if the element size is undefined or a write access to a +constant element is attempted.
  • + +
  • Dereferencing a struct/union field: a +cdata struct/union or a pointer to a +struct/union can be dereferenced by a string key, +giving the field name. The field address is computed as the base +address plus the relative offset of the field. A read access loads the +field value and converts it to a Lua +object. A write access converts a Lua +object to the field type and stores the converted value to the +field. An error is raised if a write access to a constant +struct/union or a constant field is attempted. +Scoped enum constants or static constants are treated like a constant +field.
  • + +
  • Indexing a complex number: a complex number can be indexed +either by a cdata number or a Lua number with the values 0 or 1, or by +the strings "re" or "im". A read access loads the +real part ([0], .re) or the imaginary part +([1], .im) part of a complex number and +converts it to a Lua number. The +sub-parts of a complex number are immutable — assigning to an +index of a complex number raises an error. Accessing out-of-bound +indexes returns unspecified results, but is guaranteed not to trigger +memory access violations.
  • + +
  • Indexing a vector: a vector is treated like an array for +indexing purposes, except the vector elements are immutable — +assigning to an index of a vector raises an error.
  • + +
+

+A ctype object can be indexed with a string key, too. The only +pre-defined operation is reading scoped constants of +struct/union types. All other accesses defer +to the corresponding metamethods or index tables (if any). +

+

+Note: since there's (deliberately) no address-of operator, a cdata +object holding a value type is effectively immutable after +initialization. The JIT compiler benefits from this fact when applying +certain optimizations. +

+

+As a consequence, the elements of complex numbers and +vectors are immutable. But the elements of an aggregate holding these +types may be modified of course. I.e. you cannot assign to +foo.c.im, but you can assign a (newly created) complex number +to foo.c. +

+

+The JIT compiler implements strict aliasing rules: accesses to different +types do not alias, except for differences in signedness (this +applies even to char pointers, unlike C99). Type punning +through unions is explicitly detected and allowed. +

+ +

Calling a cdata object

+
    + +
  • Constructor: a ctype object can be called and used as a +constructor. This is equivalent +to ffi.new(ct, ...), unless a __new metamethod is +defined. The __new metamethod is called with the ctype object +plus any other arguments passed to the contructor. Note that you have to +use ffi.new inside of it, since calling ct(...) would +cause infinite recursion.
  • + +
  • C function call: a cdata function or cdata function +pointer can be called. The passed arguments are +converted to the C types of the +parameters given by the function declaration. Arguments passed to the +variable argument part of vararg C function use +special conversion rules. This +C function is called and the return value (if any) is +converted to a Lua object.
    +On Windows/x86 systems, __stdcall functions are automatically +detected and a function declared as __cdecl (the default) is +silently fixed up after the first call.
  • + +
+ +

Arithmetic on cdata objects

+
    + +
  • Pointer arithmetic: a cdata pointer/array and a cdata +number or a Lua number can be added or subtracted. The number must be +on the right hand side for a subtraction. The result is a pointer of +the same type with an address plus or minus the number value +multiplied by the element size in bytes. An error is raised if the +element size is undefined.
  • + +
  • Pointer difference: two compatible cdata pointers/arrays +can be subtracted. The result is the difference between their +addresses, divided by the element size in bytes. An error is raised if +the element size is undefined or zero.
  • + +
  • 64 bit integer arithmetic: the standard arithmetic +operators (+ - * / % ^ and unary +minus) can be applied to two cdata numbers, or a cdata number and a +Lua number. If one of them is an uint64_t, the other side is +converted to an uint64_t and an unsigned arithmetic operation +is performed. Otherwise both sides are converted to an +int64_t and a signed arithmetic operation is performed. The +result is a boxed 64 bit cdata object.
    + +If one of the operands is an enum and the other operand is a +string, the string is converted to the value of a matching enum +constant before the above conversion.
    + +These rules ensure that 64 bit integers are "sticky". Any +expression involving at least one 64 bit integer operand results +in another one. The undefined cases for the division, modulo and power +operators return 2LL ^ 63 or +2ULL ^ 63.
    + +You'll have to explicitly convert a 64 bit integer to a Lua +number (e.g. for regular floating-point calculations) with +tonumber(). But note this may incur a precision loss.
  • + +
  • 64 bit bitwise operations: the rules for 64 bit +arithmetic operators apply analogously.
    + +Unlike the other bit.* operations, bit.tobit() +converts a cdata number via int64_t to int32_t and +returns a Lua number.
    + +For bit.band(), bit.bor() and bit.bxor(), the +conversion to int64_t or uint64_t applies to +all arguments, if any argument is a cdata number.
    + +For all other operations, only the first argument is used to determine +the output type. This implies that a cdata number as a shift count for +shifts and rotates is accepted, but that alone does not cause +a cdata number output. + +
+ +

Comparisons of cdata objects

+
    + +
  • Pointer comparison: two compatible cdata pointers/arrays +can be compared. The result is the same as an unsigned comparison of +their addresses. nil is treated like a NULL pointer, +which is compatible with any other pointer type.
  • + +
  • 64 bit integer comparison: two cdata numbers, or a +cdata number and a Lua number can be compared with each other. If one +of them is an uint64_t, the other side is converted to an +uint64_t and an unsigned comparison is performed. Otherwise +both sides are converted to an int64_t and a signed +comparison is performed.
    + +If one of the operands is an enum and the other operand is a +string, the string is converted to the value of a matching enum +constant before the above conversion.
    + +
  • Comparisons for equality/inequality never raise an error. +Even incompatible pointers can be compared for equality by address. Any +other incompatible comparison (also with non-cdata objects) treats the +two sides as unequal.
  • + +
+ +

cdata objects as table keys

+

+Lua tables may be indexed by cdata objects, but this doesn't provide +any useful semantics — cdata objects are unsuitable as table +keys! +

+

+A cdata object is treated like any other garbage-collected object and +is hashed and compared by its address for table indexing. Since +there's no interning for cdata value types, the same value may be +boxed in different cdata objects with different addresses. Thus +t[1LL+1LL] and t[2LL] usually do not point to +the same hash slot and they certainly do not point to the same +hash slot as t[2]. +

+

+It would seriously drive up implementation complexity and slow down +the common case, if one were to add extra handling for by-value +hashing and comparisons to Lua tables. Given the ubiquity of their use +inside the VM, this is not acceptable. +

+

+There are three viable alternatives, if you really need to use cdata +objects as keys: +

+
    + +
  • If you can get by with the precision of Lua numbers +(52 bits), then use tonumber() on a cdata number or +combine multiple fields of a cdata aggregate to a Lua number. Then use +the resulting Lua number as a key when indexing tables.
    +One obvious benefit: t[tonumber(2LL)] does point to +the same slot as t[2].
  • + +
  • Otherwise use either tostring() on 64 bit integers +or complex numbers or combine multiple fields of a cdata aggregate to +a Lua string (e.g. with +ffi.string()). Then +use the resulting Lua string as a key when indexing tables.
  • + +
  • Create your own specialized hash table implementation using the +C types provided by the FFI library, just like you would in +C code. Ultimately this may give much better performance than the +other alternatives or what a generic by-value hash table could +possibly provide.
  • + +
+ +

Parameterized Types

+

+To facilitate some abstractions, the two functions +ffi.typeof and +ffi.cdef support +parameterized types in C declarations. Note: none of the other API +functions taking a cdecl allow this. +

+

+Any place you can write a typedef name, an +identifier or a number in a declaration, you can write +$ (the dollar sign) instead. These placeholders are replaced in +order of appearance with the arguments following the cdecl string: +

+
+-- Declare a struct with a parameterized field type and name:
+ffi.cdef([[
+typedef struct { $ $; } foo_t;
+]], type1, name1)
+
+-- Anonymous struct with dynamic names:
+local bar_t = ffi.typeof("struct { int $, $; }", name1, name2)
+-- Derived pointer type:
+local bar_ptr_t = ffi.typeof("$ *", bar_t)
+
+-- Parameterized dimensions work even where a VLA won't work:
+local matrix_t = ffi.typeof("uint8_t[$][$]", width, height)
+
+

+Caveat: this is not simple text substitution! A passed ctype or +cdata object is treated like the underlying type, a passed string is +considered an identifier and a number is considered a number. You must +not mix this up: e.g. passing "int" as a string doesn't work in +place of a type, you'd need to use ffi.typeof("int") instead. +

+

+The main use for parameterized types are libraries implementing abstract +data types +(» example), +similar to what can be achieved with C++ template metaprogramming. +Another use case are derived types of anonymous structs, which avoids +pollution of the global struct namespace. +

+

+Please note that parameterized types are a nice tool and indispensable +for certain use cases. But you'll want to use them sparingly in regular +code, e.g. when all types are actually fixed. +

+ +

Garbage Collection of cdata Objects

+

+All explicitly (ffi.new(), ffi.cast() etc.) or +implicitly (accessors) created cdata objects are garbage collected. +You need to ensure to retain valid references to cdata objects +somewhere on a Lua stack, an upvalue or in a Lua table while they are +still in use. Once the last reference to a cdata object is gone, the +garbage collector will automatically free the memory used by it (at +the end of the next GC cycle). +

+

+Please note that pointers themselves are cdata objects, however they +are not followed by the garbage collector. So e.g. if you +assign a cdata array to a pointer, you must keep the cdata object +holding the array alive as long as the pointer is still in use: +

+
+ffi.cdef[[
+typedef struct { int *a; } foo_t;
+]]
+
+local s = ffi.new("foo_t", ffi.new("int[10]")) -- WRONG!
+
+local a = ffi.new("int[10]") -- OK
+local s = ffi.new("foo_t", a)
+-- Now do something with 's', but keep 'a' alive until you're done.
+
+

+Similar rules apply for Lua strings which are implicitly converted to +"const char *": the string object itself must be +referenced somewhere or it'll be garbage collected eventually. The +pointer will then point to stale data, which may have already been +overwritten. Note that string literals are automatically kept +alive as long as the function containing it (actually its prototype) +is not garbage collected. +

+

+Objects which are passed as an argument to an external C function +are kept alive until the call returns. So it's generally safe to +create temporary cdata objects in argument lists. This is a common +idiom for passing specific C types to +vararg functions. +

+

+Memory areas returned by C functions (e.g. from malloc()) +must be manually managed, of course (or use +ffi.gc()). Pointers to +cdata objects are indistinguishable from pointers returned by C +functions (which is one of the reasons why the GC cannot follow them). +

+ +

Callbacks

+

+The LuaJIT FFI automatically generates special callback functions +whenever a Lua function is converted to a C function pointer. This +associates the generated callback function pointer with the C type +of the function pointer and the Lua function object (closure). +

+

+This can happen implicitly due to the usual conversions, e.g. when +passing a Lua function to a function pointer argument. Or you can use +ffi.cast() to explicitly cast a Lua function to a +C function pointer. +

+

+Currently only certain C function types can be used as callback +functions. Neither C vararg functions nor functions with +pass-by-value aggregate argument or result types are supported. There +are no restrictions for the kind of Lua functions that can be called +from the callback — no checks for the proper number of arguments +are made. The return value of the Lua function will be converted to the +result type and an error will be thrown for invalid conversions. +

+

+It's allowed to throw errors across a callback invocation, but it's not +advisable in general. Do this only if you know the C function, that +called the callback, copes with the forced stack unwinding and doesn't +leak resources. +

+

+One thing that's not allowed, is to let an FFI call into a C function +get JIT-compiled, which in turn calls a callback, calling into Lua again. +Usually this attempt is caught by the interpreter first and the +C function is blacklisted for compilation. +

+

+However, this heuristic may fail under specific circumstances: e.g. a +message polling function might not run Lua callbacks right away and the call +gets JIT-compiled. If it later happens to call back into Lua (e.g. a rarely +invoked error callback), you'll get a VM PANIC with the message +"bad callback". Then you'll need to manually turn off +JIT-compilation with +jit.off() for the +surrounding Lua function that invokes such a message polling function (or +similar). +

+ +

Callback resource handling

+

+Callbacks take up resources — you can only have a limited number +of them at the same time (500 - 1000, depending on the +architecture). The associated Lua functions are anchored to prevent +garbage collection, too. +

+

+Callbacks due to implicit conversions are permanent! There is no +way to guess their lifetime, since the C side might store the +function pointer for later use (typical for GUI toolkits). The associated +resources cannot be reclaimed until termination: +

+
+ffi.cdef[[
+typedef int (__stdcall *WNDENUMPROC)(void *hwnd, intptr_t l);
+int EnumWindows(WNDENUMPROC func, intptr_t l);
+]]
+
+-- Implicit conversion to a callback via function pointer argument.
+local count = 0
+ffi.C.EnumWindows(function(hwnd, l)
+  count = count + 1
+  return true
+end, 0)
+-- The callback is permanent and its resources cannot be reclaimed!
+-- Ok, so this may not be a problem, if you do this only once.
+
+

+Note: this example shows that you must properly declare +__stdcall callbacks on Windows/x86 systems. The calling +convention cannot be automatically detected, unlike for +__stdcall calls to Windows functions. +

+

+For some use cases it's necessary to free up the resources or to +dynamically redirect callbacks. Use an explicit cast to a +C function pointer and keep the resulting cdata object. Then use +the cb:free() +or cb:set() methods +on the cdata object: +

+
+-- Explicitly convert to a callback via cast.
+local count = 0
+local cb = ffi.cast("WNDENUMPROC", function(hwnd, l)
+  count = count + 1
+  return true
+end)
+
+-- Pass it to a C function.
+ffi.C.EnumWindows(cb, 0)
+-- EnumWindows doesn't need the callback after it returns, so free it.
+
+cb:free()
+-- The callback function pointer is no longer valid and its resources
+-- will be reclaimed. The created Lua closure will be garbage collected.
+
+ +

Callback performance

+

+Callbacks are slow! First, the C to Lua transition itself +has an unavoidable cost, similar to a lua_call() or +lua_pcall(). Argument and result marshalling add to that cost. +And finally, neither the C compiler nor LuaJIT can inline or +optimize across the language barrier and hoist repeated computations out +of a callback function. +

+

+Do not use callbacks for performance-sensitive work: e.g. consider a +numerical integration routine which takes a user-defined function to +integrate over. It's a bad idea to call a user-defined Lua function from +C code millions of times. The callback overhead will be absolutely +detrimental for performance. +

+

+It's considerably faster to write the numerical integration routine +itself in Lua — the JIT compiler will be able to inline the +user-defined function and optimize it together with its calling context, +with very competitive performance. +

+

+As a general guideline: use callbacks only when you must, because +of existing C APIs. E.g. callback performance is irrelevant for a +GUI application, which waits for user input most of the time, anyway. +

+

+For new designs avoid push-style APIs: a C function repeatedly +calling a callback for each result. Instead use pull-style APIs: +call a C function repeatedly to get a new result. Calls from Lua +to C via the FFI are much faster than the other way round. Most well-designed +libraries already use pull-style APIs (read/write, get/put). +

+ +

C Library Namespaces

+

+A C library namespace is a special kind of object which allows +access to the symbols contained in shared libraries or the default +symbol namespace. The default +ffi.C namespace is +automatically created when the FFI library is loaded. C library +namespaces for specific shared libraries may be created with the +ffi.load() API +function. +

+

+Indexing a C library namespace object with a symbol name (a Lua +string) automatically binds it to the library. First the symbol type +is resolved — it must have been declared with +ffi.cdef. Then the +symbol address is resolved by searching for the symbol name in the +associated shared libraries or the default symbol namespace. Finally, +the resulting binding between the symbol name, the symbol type and its +address is cached. Missing symbol declarations or nonexistent symbol +names cause an error. +

+

+This is what happens on a read access for the different kinds of +symbols: +

+
    + +
  • External functions: a cdata object with the type of the function +and its address is returned.
  • + +
  • External variables: the symbol address is dereferenced and the +loaded value is converted to a Lua object +and returned.
  • + +
  • Constant values (static const or enum +constants): the constant is converted to a +Lua object and returned.
  • + +
+

+This is what happens on a write access: +

+
    + +
  • External variables: the value to be written is +converted to the C type of the +variable and then stored at the symbol address.
  • + +
  • Writing to constant variables or to any other symbol type causes +an error, like any other attempted write to a constant location.
  • + +
+

+C library namespaces themselves are garbage collected objects. If +the last reference to the namespace object is gone, the garbage +collector will eventually release the shared library reference and +remove all memory associated with the namespace. Since this may +trigger the removal of the shared library from the memory of the +running process, it's generally not safe to use function +cdata objects obtained from a library if the namespace object may be +unreferenced. +

+

+Performance notice: the JIT compiler specializes to the identity of +namespace objects and to the strings used to index it. This +effectively turns function cdata objects into constants. It's not +useful and actually counter-productive to explicitly cache these +function objects, e.g. local strlen = ffi.C.strlen. OTOH it +is useful to cache the namespace itself, e.g. local C = +ffi.C. +

+ +

No Hand-holding!

+

+The FFI library has been designed as a low-level library. The +goal is to interface with C code and C data types with a +minimum of overhead. This means you can do anything you can do +from C: access all memory, overwrite anything in memory, call +machine code at any memory address and so on. +

+

+The FFI library provides no memory safety, unlike regular Lua +code. It will happily allow you to dereference a NULL +pointer, to access arrays out of bounds or to misdeclare +C functions. If you make a mistake, your application might crash, +just like equivalent C code would. +

+

+This behavior is inevitable, since the goal is to provide full +interoperability with C code. Adding extra safety measures, like +bounds checks, would be futile. There's no way to detect +misdeclarations of C functions, since shared libraries only +provide symbol names, but no type information. Likewise there's no way +to infer the valid range of indexes for a returned pointer. +

+

+Again: the FFI library is a low-level library. This implies it needs +to be used with care, but it's flexibility and performance often +outweigh this concern. If you're a C or C++ developer, it'll be easy +to apply your existing knowledge. OTOH writing code for the FFI +library is not for the faint of heart and probably shouldn't be the +first exercise for someone with little experience in Lua, C or C++. +

+

+As a corollary of the above, the FFI library is not safe for use by +untrusted Lua code. If you're sandboxing untrusted Lua code, you +definitely don't want to give this code access to the FFI library or +to any cdata object (except 64 bit integers or complex +numbers). Any properly engineered Lua sandbox needs to provide safety +wrappers for many of the standard Lua library functions — +similar wrappers need to be written for high-level operations on FFI +data types, too. +

+ +

Current Status

+

+The initial release of the FFI library has some limitations and is +missing some features. Most of these will be fixed in future releases. +

+

+C language support is +currently incomplete: +

+
    +
  • C declarations are not passed through a C pre-processor, +yet.
  • +
  • The C parser is able to evaluate most constant expressions +commonly found in C header files. However it doesn't handle the +full range of C expression semantics and may fail for some +obscure constructs.
  • +
  • static const declarations only work for integer types +up to 32 bits. Neither declaring string constants nor +floating-point constants is supported.
  • +
  • Packed struct bitfields that cross container boundaries +are not implemented.
  • +
  • Native vector types may be defined with the GCC mode or +vector_size attribute. But no operations other than loading, +storing and initializing them are supported, yet.
  • +
  • The volatile type qualifier is currently ignored by +compiled code.
  • +
  • ffi.cdef silently +ignores most re-declarations. Note: avoid re-declarations which do not +conform to C99. The implementation will eventually be changed to +perform strict checks.
  • +
+

+The JIT compiler already handles a large subset of all FFI operations. +It automatically falls back to the interpreter for unimplemented +operations (you can check for this with the +-jv command line option). +The following operations are currently not compiled and may exhibit +suboptimal performance, especially when used in inner loops: +

+
    +
  • Bitfield accesses and initializations.
  • +
  • Vector operations.
  • +
  • Table initializers.
  • +
  • Initialization of nested struct/union types.
  • +
  • Non-default initialization of VLA/VLS or large C types +(> 128 bytes or > 16 array elements.
  • +
  • Conversions from lightuserdata to void *.
  • +
  • Pointer differences for element sizes that are not a power of +two.
  • +
  • Calls to C functions with aggregates passed or returned by +value.
  • +
  • Calls to ctype metamethods which are not plain functions.
  • +
  • ctype __newindex tables and non-string lookups in ctype +__index tables.
  • +
  • tostring() for cdata types.
  • +
  • Calls to ffi.cdef(), ffi.load() and +ffi.metatype().
  • +
+

+Other missing features: +

+
    +
  • Arithmetic for complex numbers.
  • +
  • Passing structs by value to vararg C functions.
  • +
  • C++ exception interoperability +does not extend to C functions called via the FFI, if the call is +compiled.
  • +
+
+
+ + + diff --git a/Build/source/libs/luajit/LuaJIT-src/doc/ext_ffi_tutorial.html b/Build/source/libs/luajit/LuaJIT-src/doc/ext_ffi_tutorial.html new file mode 100644 index 00000000000..a624e6fd6e6 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/doc/ext_ffi_tutorial.html @@ -0,0 +1,603 @@ + + + +FFI Tutorial + + + + + + + + + +
+Lua +
+ + +
+

+This page is intended to give you an overview of the features of the FFI +library by presenting a few use cases and guidelines. +

+

+This page makes no attempt to explain all of the FFI library, though. +You'll want to have a look at the ffi.* API +function reference and the FFI +semantics to learn more. +

+ +

Loading the FFI Library

+

+The FFI library is built into LuaJIT by default, but it's not loaded +and initialized by default. The suggested way to use the FFI library +is to add the following to the start of every Lua file that needs one +of its functions: +

+
+local ffi = require("ffi")
+
+

+Please note this doesn't define an ffi variable in the table +of globals — you really need to use the local variable. The +require function ensures the library is only loaded once. +

+

+Note: If you want to experiment with the FFI from the interactive prompt +of the command line executable, omit the local, as it doesn't +preserve local variables across lines. +

+ +

Accessing Standard System Functions

+

+The following code explains how to access standard system functions. +We slowly print two lines of dots by sleeping for 10 milliseconds +after each dot: +

+
+ 
+①
+
+
+
+
+
+②
+③
+④
+
+
+
+⑤
+
+
+
+
+
+⑥local ffi = require("ffi")
+ffi.cdef[[
+void Sleep(int ms);
+int poll(struct pollfd *fds, unsigned long nfds, int timeout);
+]]
+
+local sleep
+if ffi.os == "Windows" then
+  function sleep(s)
+    ffi.C.Sleep(s*1000)
+  end
+else
+  function sleep(s)
+    ffi.C.poll(nil, 0, s*1000)
+  end
+end
+
+for i=1,160 do
+  io.write("."); io.flush()
+  sleep(0.01)
+end
+io.write("\n")
+
+

+Here's the step-by-step explanation: +

+

+ This defines the +C library functions we're going to use. The part inside the +double-brackets (in green) is just standard C syntax. You can +usually get this info from the C header files or the +documentation provided by each C library or C compiler. +

+

+ The difficulty we're +facing here, is that there are different standards to choose from. +Windows has a simple Sleep() function. On other systems there +are a variety of functions available to achieve sub-second sleeps, but +with no clear consensus. Thankfully poll() can be used for +this task, too, and it's present on most non-Windows systems. The +check for ffi.os makes sure we use the Windows-specific +function only on Windows systems. +

+

+ Here we're wrapping the +call to the C function in a Lua function. This isn't strictly +necessary, but it's helpful to deal with system-specific issues only +in one part of the code. The way we're wrapping it ensures the check +for the OS is only done during initialization and not for every call. +

+

+ A more subtle point is +that we defined our sleep() function (for the sake of this +example) as taking the number of seconds, but accepting fractional +seconds. Multiplying this by 1000 gets us milliseconds, but that still +leaves it a Lua number, which is a floating-point value. Alas, the +Sleep() function only accepts an integer value. Luckily for +us, the FFI library automatically performs the conversion when calling +the function (truncating the FP value towards zero, like in C). +

+

+Some readers will notice that Sleep() is part of +KERNEL32.DLL and is also a stdcall function. So how +can this possibly work? The FFI library provides the ffi.C +default C library namespace, which allows calling functions from +the default set of libraries, like a C compiler would. Also, the +FFI library automatically detects stdcall functions, so you +don't need to declare them as such. +

+

+ The poll() +function takes a couple more arguments we're not going to use. You can +simply use nil to pass a NULL pointer and 0 +for the nfds parameter. Please note that the +number 0 does not convert to a pointer value, +unlike in C++. You really have to pass pointers to pointer arguments +and numbers to number arguments. +

+

+The page on FFI semantics has all +of the gory details about +conversions between Lua +objects and C types. For the most part you don't have to deal +with this, as it's performed automatically and it's carefully designed +to bridge the semantic differences between Lua and C. +

+

+ Now that we have defined +our own sleep() function, we can just call it from plain Lua +code. That wasn't so bad, huh? Turning these boring animated dots into +a fascinating best-selling game is left as an exercise for the reader. +:-) +

+ +

Accessing the zlib Compression Library

+

+The following code shows how to access the zlib compression library from Lua code. +We'll define two convenience wrapper functions that take a string and +compress or uncompress it to another string: +

+
+ 
+①
+
+
+
+
+
+
+②
+
+
+③
+
+④
+
+
+⑤
+
+
+⑥
+
+
+
+
+
+
+
+⑦local ffi = require("ffi")
+ffi.cdef[[
+unsigned long compressBound(unsigned long sourceLen);
+int compress2(uint8_t *dest, unsigned long *destLen,
+	      const uint8_t *source, unsigned long sourceLen, int level);
+int uncompress(uint8_t *dest, unsigned long *destLen,
+	       const uint8_t *source, unsigned long sourceLen);
+]]
+local zlib = ffi.load(ffi.os == "Windows" and "zlib1" or "z")
+
+local function compress(txt)
+  local n = zlib.compressBound(#txt)
+  local buf = ffi.new("uint8_t[?]", n)
+  local buflen = ffi.new("unsigned long[1]", n)
+  local res = zlib.compress2(buf, buflen, txt, #txt, 9)
+  assert(res == 0)
+  return ffi.string(buf, buflen[0])
+end
+
+local function uncompress(comp, n)
+  local buf = ffi.new("uint8_t[?]", n)
+  local buflen = ffi.new("unsigned long[1]", n)
+  local res = zlib.uncompress(buf, buflen, comp, #comp)
+  assert(res == 0)
+  return ffi.string(buf, buflen[0])
+end
+
+-- Simple test code.
+local txt = string.rep("abcd", 1000)
+print("Uncompressed size: ", #txt)
+local c = compress(txt)
+print("Compressed size: ", #c)
+local txt2 = uncompress(c, #txt)
+assert(txt2 == txt)
+
+

+Here's the step-by-step explanation: +

+

+ This defines some of the +C functions provided by zlib. For the sake of this example, some +type indirections have been reduced and it uses the pre-defined +fixed-size integer types, while still adhering to the zlib API/ABI. +

+

+ This loads the zlib shared +library. On POSIX systems it's named libz.so and usually +comes pre-installed. Since ffi.load() automatically adds any +missing standard prefixes/suffixes, we can simply load the +"z" library. On Windows it's named zlib1.dll and +you'll have to download it first from the +» zlib site. The check for +ffi.os makes sure we pass the right name to +ffi.load(). +

+

+ First, the maximum size of +the compression buffer is obtained by calling the +zlib.compressBound function with the length of the +uncompressed string. The next line allocates a byte buffer of this +size. The [?] in the type specification indicates a +variable-length array (VLA). The actual number of elements of this +array is given as the 2nd argument to ffi.new(). +

+

+ This may look strange at +first, but have a look at the declaration of the compress2 +function from zlib: the destination length is defined as a pointer! +This is because you pass in the maximum buffer size and get back the +actual length that was used. +

+

+In C you'd pass in the address of a local variable +(&buflen). But since there's no address-of operator in +Lua, we'll just pass in a one-element array. Conveniently it can be +initialized with the maximum buffer size in one step. Calling the +actual zlib.compress2 function is then straightforward. +

+

+ We want to return the +compressed data as a Lua string, so we'll use ffi.string(). +It needs a pointer to the start of the data and the actual length. The +length has been returned in the buflen array, so we'll just +get it from there. +

+

+Note that since the function returns now, the buf and +buflen variables will eventually be garbage collected. This +is fine, because ffi.string() has copied the contents to a +newly created (interned) Lua string. If you plan to call this function +lots of times, consider reusing the buffers and/or handing back the +results in buffers instead of strings. This will reduce the overhead +for garbage collection and string interning. +

+

+ The uncompress +functions does the exact opposite of the compress function. +The compressed data doesn't include the size of the original string, +so this needs to be passed in. Otherwise no surprises here. +

+

+ The code, that makes use +of the functions we just defined, is just plain Lua code. It doesn't +need to know anything about the LuaJIT FFI — the convenience +wrapper functions completely hide it. +

+

+One major advantage of the LuaJIT FFI is that you are now able to +write those wrappers in Lua. And at a fraction of the time it +would cost you to create an extra C module using the Lua/C API. +Many of the simpler C functions can probably be used directly +from your Lua code, without any wrappers. +

+

+Side note: the zlib API uses the long type for passing +lengths and sizes around. But all those zlib functions actually only +deal with 32 bit values. This is an unfortunate choice for a +public API, but may be explained by zlib's history — we'll just +have to deal with it. +

+

+First, you should know that a long is a 64 bit type e.g. +on POSIX/x64 systems, but a 32 bit type on Windows/x64 and on +32 bit systems. Thus a long result can be either a plain +Lua number or a boxed 64 bit integer cdata object, depending on +the target system. +

+

+Ok, so the ffi.* functions generally accept cdata objects +wherever you'd want to use a number. That's why we get a away with +passing n to ffi.string() above. But other Lua +library functions or modules don't know how to deal with this. So for +maximum portability one needs to use tonumber() on returned +long results before passing them on. Otherwise the +application might work on some systems, but would fail in a POSIX/x64 +environment. +

+ +

Defining Metamethods for a C Type

+

+The following code explains how to define metamethods for a C type. +We define a simple point type and add some operations to it: +

+
+ 
+①
+
+
+
+②
+
+③
+
+④
+
+
+
+⑤
+
+⑥local ffi = require("ffi")
+ffi.cdef[[
+typedef struct { double x, y; } point_t;
+]]
+
+local point
+local mt = {
+  __add = function(a, b) return point(a.x+b.x, a.y+b.y) end,
+  __len = function(a) return math.sqrt(a.x*a.x + a.y*a.y) end,
+  __index = {
+    area = function(a) return a.x*a.x + a.y*a.y end,
+  },
+}
+point = ffi.metatype("point_t", mt)
+
+local a = point(3, 4)
+print(a.x, a.y)  --> 3  4
+print(#a)        --> 5
+print(a:area())  --> 25
+local b = a + point(0.5, 8)
+print(#b)        --> 12.5
+
+

+Here's the step-by-step explanation: +

+

+ This defines the C type for a +two-dimensional point object. +

+

+ We have to declare the variable +holding the point constructor first, because it's used inside of a +metamethod. +

+

+ Let's define an __add +metamethod which adds the coordinates of two points and creates a new +point object. For simplicity, this function assumes that both arguments +are points. But it could be any mix of objects, if at least one operand +is of the required type (e.g. adding a point plus a number or vice +versa). Our __len metamethod returns the distance of a point to +the origin. +

+

+ If we run out of operators, we can +define named methods, too. Here the __index table defines an +area function. For custom indexing needs, one might want to +define __index and __newindex functions instead. +

+

+ This associates the metamethods with +our C type. This only needs to be done once. For convenience, a +constructor is returned by +ffi.metatype(). +We're not required to use it, though. The original C type can still +be used e.g. to create an array of points. The metamethods automatically +apply to any and all uses of this type. +

+

+Please note that the association with a metatable is permanent and +the metatable must not be modified afterwards! Ditto for the +__index table. +

+

+ Here are some simple usage examples +for the point type and their expected results. The pre-defined +operations (such as a.x) can be freely mixed with the newly +defined metamethods. Note that area is a method and must be +called with the Lua syntax for methods: a:area(), not +a.area(). +

+

+The C type metamethod mechanism is most useful when used in +conjunction with C libraries that are written in an object-oriented +style. Creators return a pointer to a new instance and methods take an +instance pointer as the first argument. Sometimes you can just point +__index to the library namespace and __gc to the +destructor and you're done. But often enough you'll want to add +convenience wrappers, e.g. to return actual Lua strings or when +returning multiple values. +

+

+Some C libraries only declare instance pointers as an opaque +void * type. In this case you can use a fake type for all +declarations, e.g. a pointer to a named (incomplete) struct will do: +typedef struct foo_type *foo_handle. The C side doesn't +know what you declare with the LuaJIT FFI, but as long as the underlying +types are compatible, everything still works. +

+ +

Translating C Idioms

+

+Here's a list of common C idioms and their translation to the +LuaJIT FFI: +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
IdiomC codeLua code
Pointer dereference
int *p;
x = *p;
*p = y;
x = p[0]
p[0] = y
Pointer indexing
int i, *p;
x = p[i];
p[i+1] = y;
x = p[i]
p[i+1] = y
Array indexing
int i, a[];
x = a[i];
a[i+1] = y;
x = a[i]
a[i+1] = y
struct/union dereference
struct foo s;
x = s.field;
s.field = y;
x = s.field
s.field = y
struct/union pointer deref.
struct foo *sp;
x = sp->field;
sp->field = y;
x = s.field
s.field = y
Pointer arithmetic
int i, *p;
x = p + i;
y = p - i;
x = p + i
y = p - i
Pointer difference
int *p1, *p2;
x = p1 - p2;x = p1 - p2
Array element pointer
int i, a[];
x = &a[i];x = a+i
Cast pointer to address
int *p;
x = (intptr_t)p;x = tonumber(
 ffi.cast("intptr_t",
          p))
Functions with outargs
void foo(int *inoutlen);
int len = x;
foo(&len);
y = len;
local len =
  ffi.new("int[1]", x)
foo(len)
y = len[0]
Vararg conversions
int printf(char *fmt, ...);
printf("%g", 1.0);
printf("%d", 1);
 
printf("%g", 1);
printf("%d",
  ffi.new("int", 1))
+ +

To Cache or Not to Cache

+

+It's a common Lua idiom to cache library functions in local variables +or upvalues, e.g.: +

+
+local byte, char = string.byte, string.char
+local function foo(x)
+  return char(byte(x)+1)
+end
+
+

+This replaces several hash-table lookups with a (faster) direct use of +a local or an upvalue. This is less important with LuaJIT, since the +JIT compiler optimizes hash-table lookups a lot and is even able to +hoist most of them out of the inner loops. It can't eliminate +all of them, though, and it saves some typing for often-used +functions. So there's still a place for this, even with LuaJIT. +

+

+The situation is a bit different with C function calls via the +FFI library. The JIT compiler has special logic to eliminate all +of the lookup overhead for functions resolved from a +C library namespace! +Thus it's not helpful and actually counter-productive to cache +individual C functions like this: +

+
+local funca, funcb = ffi.C.funca, ffi.C.funcb -- Not helpful!
+local function foo(x, n)
+  for i=1,n do funcb(funca(x, i), 1) end
+end
+
+

+This turns them into indirect calls and generates bigger and slower +machine code. Instead you'll want to cache the namespace itself and +rely on the JIT compiler to eliminate the lookups: +

+
+local C = ffi.C          -- Instead use this!
+local function foo(x, n)
+  for i=1,n do C.funcb(C.funca(x, i), 1) end
+end
+
+

+This generates both shorter and faster code. So don't cache +C functions, but do cache namespaces! Most often the +namespace is already in a local variable at an outer scope, e.g. from +local lib = ffi.load(...). Note that copying +it to a local variable in the function scope is unnecessary. +

+
+
+ + + diff --git a/Build/source/libs/luajit/LuaJIT-src/doc/ext_jit.html b/Build/source/libs/luajit/LuaJIT-src/doc/ext_jit.html new file mode 100644 index 00000000000..79c9f1f0085 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/doc/ext_jit.html @@ -0,0 +1,201 @@ + + + +jit.* Library + + + + + + + + +
+Lua +
+ + +
+

+The functions in this built-in module control the behavior of the JIT +compiler engine. Note that JIT-compilation is fully automatic — +you probably won't need to use any of the following functions unless +you have special needs. +

+ +

jit.on()
+jit.off()

+

+Turns the whole JIT compiler on (default) or off. +

+

+These functions are typically used with the command line options +-j on or -j off. +

+ +

jit.flush()

+

+Flushes the whole cache of compiled code. +

+ +

jit.on(func|true [,true|false])
+jit.off(func|true [,true|false])
+jit.flush(func|true [,true|false])

+

+jit.on enables JIT compilation for a Lua function (this is +the default). +

+

+jit.off disables JIT compilation for a Lua function and +flushes any already compiled code from the code cache. +

+

+jit.flush flushes the code, but doesn't affect the +enable/disable status. +

+

+The current function, i.e. the Lua function calling this library +function, can also be specified by passing true as the first +argument. +

+

+If the second argument is true, JIT compilation is also +enabled, disabled or flushed recursively for all sub-functions of a +function. With false only the sub-functions are affected. +

+

+The jit.on and jit.off functions only set a flag +which is checked when the function is about to be compiled. They do +not trigger immediate compilation. +

+

+Typical usage is jit.off(true, true) in the main chunk +of a module to turn off JIT compilation for the whole module for +debugging purposes. +

+ +

jit.flush(tr)

+

+Flushes the root trace, specified by its number, and all of its side +traces from the cache. The code for the trace will be retained as long +as there are any other traces which link to it. +

+ +

status, ... = jit.status()

+

+Returns the current status of the JIT compiler. The first result is +either true or false if the JIT compiler is turned +on or off. The remaining results are strings for CPU-specific features +and enabled optimizations. +

+ +

jit.version

+

+Contains the LuaJIT version string. +

+ +

jit.version_num

+

+Contains the version number of the LuaJIT core. Version xx.yy.zz +is represented by the decimal number xxyyzz. +

+ +

jit.os

+

+Contains the target OS name: +"Windows", "Linux", "OSX", "BSD", "POSIX" or "Other". +

+ +

jit.arch

+

+Contains the target architecture name: +"x86", "x64", "arm", "ppc", or "mips". +

+ +

jit.opt.* — JIT compiler optimization control

+

+This sub-module provides the backend for the -O command line +option. +

+

+You can also use it programmatically, e.g.: +

+
+jit.opt.start(2) -- same as -O2
+jit.opt.start("-dce")
+jit.opt.start("hotloop=10", "hotexit=2")
+
+

+Unlike in LuaJIT 1.x, the module is built-in and +optimization is turned on by default! +It's no longer necessary to run require("jit.opt").start(), +which was one of the ways to enable optimization. +

+ +

jit.util.* — JIT compiler introspection

+

+This sub-module holds functions to introspect the bytecode, generated +traces, the IR and the generated machine code. The functionality +provided by this module is still in flux and therefore undocumented. +

+

+The debug modules -jbc, -jv and -jdump make +extensive use of these functions. Please check out their source code, +if you want to know more. +

+
+
+ + + diff --git a/Build/source/libs/luajit/LuaJIT-src/doc/ext_profiler.html b/Build/source/libs/luajit/LuaJIT-src/doc/ext_profiler.html new file mode 100644 index 00000000000..c435bb3090a --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/doc/ext_profiler.html @@ -0,0 +1,365 @@ + + + +Profiler + + + + + + + + +
+Lua +
+ + +
+

+LuaJIT has an integrated statistical profiler with very low overhead. It +allows sampling the currently executing stack and other parameters in +regular intervals. +

+

+The integrated profiler can be accessed from three levels: +

+ + +

High-Level Profiler

+

+The bundled high-level profiler offers basic profiling functionality. It +generates simple textual summaries or source code annotations. It can be +accessed with the -jp command line option +or from Lua code by loading the underlying jit.p module. +

+

+To cut to the chase — run this to get a CPU usage profile by +function name: +

+
+luajit -jp myapp.lua
+
+

+It's not a stated goal of the bundled profiler to add every +possible option or to cater for special profiling needs. The low-level +profiler APIs are documented below. They may be used by third-party +authors to implement advanced functionality, e.g. IDE integration or +graphical profilers. +

+

+Note: Sampling works for both interpreted and JIT-compiled code. The +results for JIT-compiled code may sometimes be surprising. LuaJIT +heavily optimizes and inlines Lua code — there's no simple +one-to-one correspondence between source code lines and the sampled +machine code. +

+ +

-jp=[options[,output]]

+

+The -jp command line option starts the high-level profiler. +When the application run by the command line terminates, the profiler +stops and writes the results to stdout or to the specified +output file. +

+

+The options argument specifies how the profiling is to be +performed: +

+
    +
  • f — Stack dump: function name, otherwise module:line. +This is the default mode.
  • +
  • F — Stack dump: ditto, but dump module:name.
  • +
  • l — Stack dump: module:line.
  • +
  • <number> — stack dump depth (callee ← +caller). Default: 1.
  • +
  • -<number> — Inverse stack dump depth (caller +→ callee).
  • +
  • s — Split stack dump after first stack level. Implies +depth ≥ 2 or depth ≤ -2.
  • +
  • p — Show full path for module names.
  • +
  • v — Show VM states.
  • +
  • z — Show zones.
  • +
  • r — Show raw sample counts. Default: show percentages.
  • +
  • a — Annotate excerpts from source code files.
  • +
  • A — Annotate complete source code files.
  • +
  • G — Produce raw output suitable for graphical tools.
  • +
  • m<number> — Minimum sample percentage to be shown. +Default: 3%.
  • +
  • i<number> — Sampling interval in milliseconds. +Default: 10ms.
    +Note: The actual sampling precision is OS-dependent.
  • +
+

+The default output for -jp is a list of the most CPU consuming +spots in the application. Increasing the stack dump depth with (say) +-jp=2 may help to point out the main callers or callees of +hotspots. But sample aggregation is still flat per unique stack dump. +

+

+To get a two-level view (split view) of callers/callees, use +-jp=s or -jp=-s. The percentages shown for the second +level are relative to the first level. +

+

+To see how much time is spent in each line relative to a function, use +-jp=fl. +

+

+To see how much time is spent in different VM states or +zones, use -jp=v or -jp=z. +

+

+Combinations of v/z with f/F/l produce two-level +views, e.g. -jp=vf or -jp=fv. This shows the time +spent in a VM state or zone vs. hotspots. This can be used to answer +questions like "Which time consuming functions are only interpreted?" or +"What's the garbage collector overhead for a specific function?". +

+

+Multiple options can be combined — but not all combinations make +sense, see above. E.g. -jp=3si4m1 samples three stack levels +deep in 4ms intervals and shows a split view of the CPU consuming +functions and their callers with a 1% threshold. +

+

+Source code annotations produced by -jp=a or -jp=A are +always flat and at the line level. Obviously, the source code files need +to be readable by the profiler script. +

+

+The high-level profiler can also be started and stopped from Lua code with: +

+
+require("jit.p").start(options, output)
+...
+require("jit.p").stop()
+
+ +

jit.zone — Zones

+

+Zones can be used to provide information about different parts of an +application to the high-level profiler. E.g. a game could make use of an +"AI" zone, a "PHYS" zone, etc. Zones are hierarchical, +organized as a stack. +

+

+The jit.zone module needs to be loaded explicitly: +

+
+local zone = require("jit.zone")
+
+
    +
  • zone("name") pushes a named zone to the zone stack.
  • +
  • zone() pops the current zone from the zone stack and +returns its name.
  • +
  • zone:get() returns the current zone name or nil.
  • +
  • zone:flush() flushes the zone stack.
  • +
+

+To show the time spent in each zone use -jp=z. To show the time +spent relative to hotspots use e.g. -jp=zf or -jp=fz. +

+ +

Low-level Lua API

+

+The jit.profile module gives access to the low-level API of the +profiler from Lua code. This module needs to be loaded explicitly: +

+local profile = require("jit.profile")
+
+

+This module can be used to implement your own higher-level profiler. +A typical profiling run starts the profiler, captures stack dumps in +the profiler callback, adds them to a hash table to aggregate the number +of samples, stops the profiler and then analyzes all of the captured +stack dumps. Other parameters can be sampled in the profiler callback, +too. But it's important not to spend too much time in the callback, +since this may skew the statistics. +

+ +

profile.start(mode, cb) +— Start profiler

+

+This function starts the profiler. The mode argument is a +string holding options: +

+
    +
  • f — Profile with precision down to the function level.
  • +
  • l — Profile with precision down to the line level.
  • +
  • i<number> — Sampling interval in milliseconds (default +10ms).
    +Note: The actual sampling precision is OS-dependent. +
  • +
+

+The cb argument is a callback function which is called with +three arguments: (thread, samples, vmstate). The callback is +called on a separate coroutine, the thread argument is the +state that holds the stack to sample for profiling. Note: do +not modify the stack of that state or call functions on it. +

+

+samples gives the number of accumulated samples since the last +callback (usually 1). +

+

+vmstate holds the VM state at the time the profiling timer +triggered. This may or may not correspond to the state of the VM when +the profiling callback is called. The state is either 'N' +native (compiled) code, 'I' interpreted code, 'C' +C code, 'G' the garbage collector, or 'J' the JIT +compiler. +

+ +

profile.stop() +— Stop profiler

+

+This function stops the profiler. +

+ +

dump = profile.dumpstack([thread,] fmt, depth) +— Dump stack

+

+This function allows taking stack dumps in an efficient manner. It +returns a string with a stack dump for the thread (coroutine), +formatted according to the fmt argument: +

+
    +
  • p — Preserve the full path for module names. Otherwise +only the file name is used.
  • +
  • f — Dump the function name if it can be derived. Otherwise +use module:line.
  • +
  • F — Ditto, but dump module:name.
  • +
  • l — Dump module:line.
  • +
  • Z — Zap the following characters for the last dumped +frame.
  • +
  • All other characters are added verbatim to the output string.
  • +
+

+The depth argument gives the number of frames to dump, starting +at the topmost frame of the thread. A negative number dumps the frames in +inverse order. +

+

+The first example prints a list of the current module names and line +numbers of up to 10 frames in separate lines. The second example prints +semicolon-separated function names for all frames (up to 100) in inverse +order: +

+
+print(profile.dumpstack(thread, "l\n", 10))
+print(profile.dumpstack(thread, "lZ;", -100))
+
+ +

Low-level C API

+

+The profiler can be controlled directly from C code, e.g. for +use by IDEs. The declarations are in "luajit.h" (see +Lua/C API extensions). +

+ +

luaJIT_profile_start(L, mode, cb, data) +— Start profiler

+

+This function starts the profiler. See +above for a description of the mode argument. +

+

+The cb argument is a callback function with the following +declaration: +

+
+typedef void (*luaJIT_profile_callback)(void *data, lua_State *L,
+                                        int samples, int vmstate);
+
+

+data is available for use by the callback. L is the +state that holds the stack to sample for profiling. Note: do +not modify this stack or call functions on this stack — +use a separate coroutine for this purpose. See +above for a description of samples and vmstate. +

+ +

luaJIT_profile_stop(L) +— Stop profiler

+

+This function stops the profiler. +

+ +

p = luaJIT_profile_dumpstack(L, fmt, depth, len) +— Dump stack

+

+This function allows taking stack dumps in an efficient manner. +See above for a description of fmt +and depth. +

+

+This function returns a const char * pointing to a +private string buffer of the profiler. The int *len +argument returns the length of the output string. The buffer is +overwritten on the next call and deallocated when the profiler stops. +You either need to consume the content immediately or copy it for later +use. +

+
+
+ + + diff --git a/Build/source/libs/luajit/LuaJIT-src/doc/extensions.html b/Build/source/libs/luajit/LuaJIT-src/doc/extensions.html new file mode 100644 index 00000000000..51d02fca89f --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/doc/extensions.html @@ -0,0 +1,448 @@ + + + +Extensions + + + + + + + + + +
+Lua +
+ + +
+

+LuaJIT is fully upwards-compatible with Lua 5.1. It supports all +» standard Lua +library functions and the full set of +» Lua/C API +functions. +

+

+LuaJIT is also fully ABI-compatible to Lua 5.1 at the linker/dynamic +loader level. This means you can compile a C module against the +standard Lua headers and load the same shared library from either Lua +or LuaJIT. +

+

+LuaJIT extends the standard Lua VM with new functionality and adds +several extension modules. Please note this page is only about +functional enhancements and not about performance enhancements, +such as the optimized VM, the faster interpreter or the JIT compiler. +

+ +

Extensions Modules

+

+LuaJIT comes with several built-in extension modules: +

+ +

bit.* — Bitwise operations

+

+LuaJIT supports all bitwise operations as defined by +» Lua BitOp: +

+
+bit.tobit  bit.tohex  bit.bnot    bit.band bit.bor  bit.bxor
+bit.lshift bit.rshift bit.arshift bit.rol  bit.ror  bit.bswap
+
+

+This module is a LuaJIT built-in — you don't need to download or +install Lua BitOp. The Lua BitOp site has full documentation for all +» Lua BitOp API functions. +The FFI adds support for +64 bit bitwise operations, +using the same API functions. +

+

+Please make sure to require the module before using any of +its functions: +

+
+local bit = require("bit")
+
+

+An already installed Lua BitOp module is ignored by LuaJIT. +This way you can use bit operations from both Lua and LuaJIT on a +shared installation. +

+ +

ffi.* — FFI library

+

+The FFI library allows calling external +C functions and the use of C data structures from pure Lua +code. +

+ +

jit.* — JIT compiler control

+

+The functions in this module +control the behavior of the JIT compiler engine. +

+ +

C API extensions

+

+LuaJIT adds some +extra functions to the Lua/C API. +

+ +

Profiler

+

+LuaJIT has an integrated profiler. +

+ +

Enhanced Standard Library Functions

+ +

xpcall(f, err [,args...]) passes arguments

+

+Unlike the standard implementation in Lua 5.1, xpcall() +passes any arguments after the error function to the function +which is called in a protected context. +

+ +

loadfile() etc. handle UTF-8 source code

+

+Non-ASCII characters are handled transparently by the Lua source code parser. +This allows the use of UTF-8 characters in identifiers and strings. +A UTF-8 BOM is skipped at the start of the source code. +

+ +

tostring() etc. canonicalize NaN and ±Inf

+

+All number-to-string conversions consistently convert non-finite numbers +to the same strings on all platforms. NaN results in "nan", +positive infinity results in "inf" and negative infinity results +in "-inf". +

+ +

tonumber() etc. use builtin string to number conversion

+

+All string-to-number conversions consistently convert integer and +floating-point inputs in decimal, hexadecimal and binary on all platforms. +strtod() is not used anymore, which avoids numerous +problems with poor C library implementations. The builtin conversion +function provides full precision according to the IEEE-754 standard, it +works independently of the current locale and it supports hex floating-point +numbers (e.g. 0x1.5p-3). +

+ +

string.dump(f [,strip]) generates portable bytecode

+

+An extra argument has been added to string.dump(). If set to +true, 'stripped' bytecode without debug information is +generated. This speeds up later bytecode loading and reduces memory +usage. See also the +-b command line option. +

+

+The generated bytecode is portable and can be loaded on any architecture +that LuaJIT supports, independent of word size or endianess. However the +bytecode compatibility versions must match. Bytecode stays compatible +for dot releases (x.y.0 → x.y.1), but may change with major or +minor releases (2.0 → 2.1) or between any beta release. Foreign +bytecode (e.g. from Lua 5.1) is incompatible and cannot be loaded. +

+

+Note: LJ_GC64 mode requires a different frame layout, which implies +a different, incompatible bytecode format for ports that use this mode (e.g. +ARM64). This may be rectified in the future. +

+ +

table.new(narray, nhash) allocates a pre-sized table

+

+An extra library function table.new() can be made available via +require("table.new"). This creates a pre-sized table, just like +the C API equivalent lua_createtable(). This is useful for big +tables if the final table size is known and automatic table resizing is +too expensive. +

+ +

table.clear(tab) clears a table

+

+An extra library function table.clear() can be made available +via require("table.clear"). This clears all keys and values +from a table, but preserves the allocated array/hash sizes. This is +useful when a table, which is linked from multiple places, needs to be +cleared and/or when recycling a table for use by the same context. This +avoids managing backlinks, saves an allocation and the overhead of +incremental array/hash part growth. +

+

+Please note this function is meant for very specific situations. In most +cases it's better to replace the (usually single) link with a new table +and let the GC do its work. +

+ +

Enhanced PRNG for math.random()

+

+LuaJIT uses a Tausworthe PRNG with period 2^223 to implement +math.random() and math.randomseed(). The quality of +the PRNG results is much superior compared to the standard Lua +implementation which uses the platform-specific ANSI rand(). +

+

+The PRNG generates the same sequences from the same seeds on all +platforms and makes use of all bits in the seed argument. +math.random() without arguments generates 52 pseudo-random bits +for every call. The result is uniformly distributed between 0.0 and 1.0. +It's correctly scaled up and rounded for math.random(n [,m]) to +preserve uniformity. +

+ +

io.* functions handle 64 bit file offsets

+

+The file I/O functions in the standard io.* library handle +64 bit file offsets. In particular this means it's possible +to open files larger than 2 Gigabytes and to reposition or obtain +the current file position for offsets beyond 2 GB +(fp:seek() method). +

+ +

debug.* functions identify metamethods

+

+debug.getinfo() and lua_getinfo() also return information +about invoked metamethods. The namewhat field is set to +"metamethod" and the name field has the name of +the corresponding metamethod (e.g. "__index"). +

+ +

Fully Resumable VM

+

+The LuaJIT VM is fully resumable. This means you can yield from a +coroutine even across contexts, where this would not possible with +the standard Lua 5.1 VM: e.g. you can yield across pcall() +and xpcall(), across iterators and across metamethods. +

+ +

Extensions from Lua 5.2

+

+LuaJIT supports some language and library extensions from Lua 5.2. +Features that are unlikely to break existing code are unconditionally +enabled: +

+
    +
  • goto and ::labels::.
  • +
  • Hex escapes '\x3F' and '\*' escape in strings.
  • +
  • load(string|reader [, chunkname [,mode [,env]]]).
  • +
  • loadstring() is an alias for load().
  • +
  • loadfile(filename [,mode [,env]]).
  • +
  • math.log(x [,base]). +
  • string.rep(s, n [,sep]). +
  • string.format(): %q reversible. +%s checks __tostring. +%a and "%A added.
  • +
  • String matching pattern %g added.
  • +
  • io.read("*L").
  • +
  • io.lines() and file:lines() process +io.read() options.
  • +
  • os.exit(status|true|false [,close]).
  • +
  • package.searchpath(name, path [, sep [, rep]]).
  • +
  • package.loadlib(name, "*").
  • +
  • debug.getinfo() returns nparams and isvararg +for option "u".
  • +
  • debug.getlocal() accepts function instead of level.
  • +
  • debug.getlocal() and debug.setlocal() accept negative +indexes for varargs.
  • +
  • debug.getupvalue() and debug.setupvalue() handle +C functions.
  • +
  • debug.upvalueid() and debug.upvaluejoin().
  • +
  • Command line option -E.
  • +
  • Command line checks __tostring for errors.
  • +
+

+Other features are only enabled, if LuaJIT is built with +-DLUAJIT_ENABLE_LUA52COMPAT: +

+
    +
  • goto is a keyword and not a valid variable name anymore.
  • +
  • break can be placed anywhere. Empty statements (;;) +are allowed.
  • +
  • __lt, __le are invoked for mixed types.
  • +
  • __len for tables. rawlen() library function.
  • +
  • pairs() and ipairs() check for __pairs and +__ipairs.
  • +
  • coroutine.running() returns two results.
  • +
  • table.pack() and table.unpack() +(same as unpack()).
  • +
  • io.write() and file:write() return file handle +instead of true.
  • +
  • os.execute() and pipe:close() return detailed +exit status.
  • +
  • debug.setmetatable() returns object.
  • +
  • debug.getuservalue() and debug.setuservalue().
  • +
  • Remove math.mod(), string.gfind(). +
+

+Note: this provides only partial compatibility with Lua 5.2 at the +language and Lua library level. LuaJIT is API+ABI-compatible with +Lua 5.1, which prevents implementing features that would otherwise +break the Lua/C API and ABI (e.g. _ENV). +

+ +

C++ Exception Interoperability

+

+LuaJIT has built-in support for interoperating with C++ exceptions. +The available range of features depends on the target platform and +the toolchain used to compile LuaJIT: +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PlatformCompilerInteroperability
POSIX/x64, DWARF2 unwindingGCC 4.3+Full
Other platforms, DWARF2 unwindingGCCLimited
Windows/x64MSVC or WinSDKFull
Windows/x86AnyNo
Other platformsOther compilersNo
+

+Full interoperability means: +

+
    +
  • C++ exceptions can be caught on the Lua side with pcall(), +lua_pcall() etc.
  • +
  • C++ exceptions will be converted to the generic Lua error +"C++ exception", unless you use the +C call wrapper feature.
  • +
  • It's safe to throw C++ exceptions across non-protected Lua frames +on the C stack. The contents of the C++ exception object +pass through unmodified.
  • +
  • Lua errors can be caught on the C++ side with catch(...). +The corresponding Lua error message can be retrieved from the Lua stack.
  • +
  • Throwing Lua errors across C++ frames is safe. C++ destructors +will be called.
  • +
+

+Limited interoperability means: +

+
    +
  • C++ exceptions can be caught on the Lua side with pcall(), +lua_pcall() etc.
  • +
  • C++ exceptions will be converted to the generic Lua error +"C++ exception", unless you use the +C call wrapper feature.
  • +
  • C++ exceptions will be caught by non-protected Lua frames and +are rethrown as a generic Lua error. The C++ exception object will +be destroyed.
  • +
  • Lua errors cannot be caught on the C++ side.
  • +
  • Throwing Lua errors across C++ frames will not call +C++ destructors.
  • +
+ +

+No interoperability means: +

+
    +
  • It's not safe to throw C++ exceptions across Lua frames.
  • +
  • C++ exceptions cannot be caught on the Lua side.
  • +
  • Lua errors cannot be caught on the C++ side.
  • +
  • Throwing Lua errors across C++ frames will not call +C++ destructors.
  • +
  • Additionally, on Windows/x86 with SEH-based C++ exceptions: +it's not safe to throw a Lua error across any frames containing +a C++ function with any try/catch construct or using variables with +(implicit) destructors. This also applies to any functions which may be +inlined in such a function. It doesn't matter whether lua_error() +is called inside or outside of a try/catch or whether any object actually +needs to be destroyed: the SEH chain is corrupted and this will eventually +lead to the termination of the process.
  • +
+
+
+ + + diff --git a/Build/source/libs/luajit/LuaJIT-src/doc/faq.html b/Build/source/libs/luajit/LuaJIT-src/doc/faq.html new file mode 100644 index 00000000000..106dbd03727 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/doc/faq.html @@ -0,0 +1,186 @@ + + + +Frequently Asked Questions (FAQ) + + + + + + + + + +
+Lua +
+ + +
+
+
Q: Where can I learn more about LuaJIT and Lua?
+
+ +
+ +
+
Q: Where can I learn more about the compiler technology used by LuaJIT?
+
+I'm planning to write more documentation about the internals of LuaJIT. +In the meantime, please use the following Google Scholar searches +to find relevant papers:
+Search for: » Trace Compiler
+Search for: » JIT Compiler
+Search for: » Dynamic Language Optimizations
+Search for: » SSA Form
+Search for: » Linear Scan Register Allocation
+Here is a list of the » innovative features in LuaJIT.
+And, you know, reading the source is of course the only way to enlightenment. :-) +
+
+ +
+
Q: Why do I get this error: "attempt to index global 'arg' (a nil value)"?
+Q: My vararg functions fail after switching to LuaJIT!
+
LuaJIT is compatible to the Lua 5.1 language standard. It doesn't +support the implicit arg parameter for old-style vararg +functions from Lua 5.0.
Please convert your code to the +» Lua 5.1 +vararg syntax.
+
+ +
+
Q: Why do I get this error: "bad FPU precision"?
+
Q: I get weird behavior after initializing Direct3D.
+
Q: Some FPU operations crash after I load a Delphi DLL.
+
+
+ +DirectX/Direct3D (up to version 9) sets the x87 FPU to single-precision +mode by default. This violates the Windows ABI and interferes with the +operation of many programs — LuaJIT is affected, too. Please make +sure you always use the D3DCREATE_FPU_PRESERVE flag when +initializing Direct3D.
+ +Direct3D version 10 or higher do not show this behavior anymore. +Consider testing your application with older versions, too.
+ +Similarly, the Borland/Delphi runtime modifies the FPU control word and +enables FP exceptions. Of course this violates the Windows ABI, too. +Please check the Delphi docs for the Set8087CW method. + +
+ +
+
Q: Sometimes Ctrl-C fails to stop my Lua program. Why?
+
The interrupt signal handler sets a Lua debug hook. But this is +currently ignored by compiled code (this will eventually be fixed). If +your program is running in a tight loop and never falls back to the +interpreter, the debug hook never runs and can't throw the +"interrupted!" error.
In the meantime you have to press Ctrl-C +twice to get stop your program. That's similar to when it's stuck +running inside a C function under the Lua interpreter.
+
+ +
+
Q: Why doesn't my favorite power-patch for Lua apply against LuaJIT?
+
Because it's a completely redesigned VM and has very little code +in common with Lua anymore. Also, if the patch introduces changes to +the Lua semantics, these would need to be reflected everywhere in the +VM, from the interpreter up to all stages of the compiler.
Please +use only standard Lua language constructs. For many common needs you +can use source transformations or use wrapper or proxy functions. +The compiler will happily optimize away such indirections.
+
+ +
+
Q: Lua runs everywhere. Why doesn't LuaJIT support my CPU?
+
Because it's a compiler — it needs to generate native +machine code. This means the code generator must be ported to each +architecture. And the fast interpreter is written in assembler and +must be ported, too. This is quite an undertaking.
+The install documentation shows the supported +architectures. Other architectures will follow based on sufficient user +demand and/or sponsoring.
+
+ +
+
Q: When will feature X be added? When will the next version be released?
+
When it's ready.
+C'mon, it's open source — I'm doing it on my own time and you're +getting it for free. You can either contribute a patch or sponsor +the development of certain features, if they are important to you. +
+
+
+
+ + + diff --git a/Build/source/libs/luajit/LuaJIT-src/doc/img/contact.png b/Build/source/libs/luajit/LuaJIT-src/doc/img/contact.png new file mode 100644 index 00000000000..9c73dc594ef Binary files /dev/null and b/Build/source/libs/luajit/LuaJIT-src/doc/img/contact.png differ diff --git a/Build/source/libs/luajit/LuaJIT-src/doc/install.html b/Build/source/libs/luajit/LuaJIT-src/doc/install.html new file mode 100644 index 00000000000..39539e4607d --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/doc/install.html @@ -0,0 +1,659 @@ + + + +Installation + + + + + + + + + +
+Lua +
+ + +
+

+LuaJIT is only distributed as a source package. This page explains +how to build and install LuaJIT with different operating systems +and C compilers. +

+

+For the impatient (on POSIX systems): +

+
+make && sudo make install
+
+

+LuaJIT currently builds out-of-the box on most systems. +Here's the compatibility matrix for the supported combinations of +operating systems, CPUs and compilers: +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
CPU / OSLinux or
Android
*BSD, OtherOSX 10.4+ or
iOS 3.0+
Windows
XP/Vista/7
x86 (32 bit)GCC 4.x
GCC 3.4
GCC 4.x
GCC 3.4
GCC 4.x
GCC 3.4
MSVC, MSVC/EE
WinSDK
MinGW, Cygwin
x64 (64 bit)GCC 4.xORBIS (PS4)GCC 4.xMSVC + SDK v7.0
WinSDK v7.0
Durango (Xbox One)
ARMv5+
ARM9E+
GCC 4.2+GCC 4.2+
PSP2 (PS VITA)
GCC 4.2+ 
ARM64GCC 4.8+ Clang 3.5+ 
PPCGCC 4.3+GCC 4.3+
GCC 4.1 (PS3)
 XEDK (Xbox 360)
MIPSGCC 4.3+GCC 4.3+  
+ +

Configuring LuaJIT

+

+The standard configuration should work fine for most installations. +Usually there is no need to tweak the settings. The following files +hold all user-configurable settings: +

+
    +
  • src/luaconf.h sets some configuration variables.
  • +
  • Makefile has settings for installing LuaJIT (POSIX +only).
  • +
  • src/Makefile has settings for compiling LuaJIT +under POSIX, MinGW or Cygwin.
  • +
  • src/msvcbuild.bat has settings for compiling LuaJIT with +MSVC or WinSDK.
  • +
+

+Please read the instructions given in these files, before changing +any settings. +

+ +

POSIX Systems (Linux, OSX, *BSD etc.)

+

Prerequisites

+

+Depending on your distribution, you may need to install a package for +GCC, the development headers and/or a complete SDK. E.g. on a current +Debian/Ubuntu, install libc6-dev with the package manager. +

+

+Download the current source package of LuaJIT (pick the .tar.gz), +if you haven't already done so. Move it to a directory of your choice, +open a terminal window and change to this directory. Now unpack the archive +and change to the newly created directory: +

+
+tar zxf LuaJIT-2.0.4.tar.gz
+cd LuaJIT-2.0.4
+

Building LuaJIT

+

+The supplied Makefiles try to auto-detect the settings needed for your +operating system and your compiler. They need to be run with GNU Make, +which is probably the default on your system, anyway. Simply run: +

+
+make
+
+

+This always builds a native x86, x64 or PPC binary, depending on the host OS +you're running this command on. Check the section on +cross-compilation for more options. +

+

+By default, modules are only searched under the prefix /usr/local. +You can add an extra prefix to the search paths by appending the +PREFIX option, e.g.: +

+
+make PREFIX=/home/myself/lj2
+
+

+Note for OSX: if the MACOSX_DEPLOYMENT_TARGET environment +variable is not set, then it's forced to 10.4. +

+

Installing LuaJIT

+

+The top-level Makefile installs LuaJIT by default under +/usr/local, i.e. the executable ends up in +/usr/local/bin and so on. You need root privileges +to write to this path. So, assuming sudo is installed on your system, +run the following command and enter your sudo password: +

+
+sudo make install
+
+

+Otherwise specify the directory prefix as an absolute path, e.g.: +

+
+make install PREFIX=/home/myself/lj2
+
+

+Obviously the prefixes given during build and installation need to be the same. +

+ +

Windows Systems

+

Prerequisites

+

+Either install one of the open source SDKs +(» MinGW or +» Cygwin), which come with a modified +GCC plus the required development headers. +

+

+Or install Microsoft's Visual C++ (MSVC). The freely downloadable +» Express Edition +works just fine, but only contains an x86 compiler. +

+

+The freely downloadable +» Windows SDK +only comes with command line tools, but this is all you need to build LuaJIT. +It contains x86 and x64 compilers. +

+

+Next, download the source package and unpack it using an archive manager +(e.g. the Windows Explorer) to a directory of your choice. +

+

Building with MSVC

+

+Open a "Visual Studio .NET Command Prompt", cd to the +directory where you've unpacked the sources and run these commands: +

+
+cd src
+msvcbuild
+
+

+Then follow the installation instructions below. +

+

Building with the Windows SDK

+

+Open a "Windows SDK Command Shell" and select the x86 compiler: +

+
+setenv /release /x86
+
+

+Or select the x64 compiler: +

+
+setenv /release /x64
+
+

+Then cd to the directory where you've unpacked the sources +and run these commands: +

+
+cd src
+msvcbuild
+
+

+Then follow the installation instructions below. +

+

Building with MinGW or Cygwin

+

+Open a command prompt window and make sure the MinGW or Cygwin programs +are in your path. Then cd to the directory where +you've unpacked the sources and run this command for MinGW: +

+
+mingw32-make
+
+

+Or this command for Cygwin: +

+
+make
+
+

+Then follow the installation instructions below. +

+

Installing LuaJIT

+

+Copy luajit.exe and lua51.dll (built in the src +directory) to a newly created directory (any location is ok). +Add lua and lua\jit directories below it and copy +all Lua files from the src\jit directory of the distribution +to the latter directory. +

+

+There are no hardcoded +absolute path names — all modules are loaded relative to the +directory where luajit.exe is installed +(see src/luaconf.h). +

+ +

Cross-compiling LuaJIT

+

+The GNU Makefile-based build system allows cross-compiling on any host +for any supported target, as long as both architectures have the same +pointer size. If you want to cross-compile to any 32 bit target on an +x64 OS, you need to install the multilib development package (e.g. +libc6-dev-i386 on Debian/Ubuntu) and build a 32 bit host part +(HOST_CC="gcc -m32"). +

+

+You need to specify TARGET_SYS whenever the host OS and the +target OS differ, or you'll get assembler or linker errors. E.g. if +you're compiling on a Windows or OSX host for embedded Linux or Android, +you need to add TARGET_SYS=Linux to the examples below. For a +minimal target OS, you may need to disable the built-in allocator in +src/Makefile and use TARGET_SYS=Other. The examples +below only show some popular targets — please check the comments +in src/Makefile for more details. +

+
+# Cross-compile to a 32 bit binary on a multilib x64 OS
+make CC="gcc -m32"
+
+# Cross-compile on Debian/Ubuntu for Windows (mingw32 package)
+make HOST_CC="gcc -m32" CROSS=i586-mingw32msvc- TARGET_SYS=Windows
+
+

+The CROSS prefix allows specifying a standard GNU cross-compile +toolchain (Binutils, GCC and a matching libc). The prefix may vary +depending on the --target the toolchain was built for (note the +CROSS prefix has a trailing "-"). The examples below +use the canonical toolchain triplets for Linux. +

+

+Since there's often no easy way to detect CPU features at runtime, it's +important to compile with the proper CPU or architecture settings. You +can specify these when building the toolchain yourself. Or add +-mcpu=... or -march=... to TARGET_CFLAGS. For +ARM it's important to have the correct -mfloat-abi=... setting, +too. Otherwise LuaJIT may not run at the full performance of your target +CPU. +

+
+# ARM soft-float
+make HOST_CC="gcc -m32" CROSS=arm-linux-gnueabi- \
+     TARGET_CFLAGS="-mfloat-abi=soft"
+
+# ARM soft-float ABI with VFP (example for Cortex-A8)
+make HOST_CC="gcc -m32" CROSS=arm-linux-gnueabi- \
+     TARGET_CFLAGS="-mcpu=cortex-a8 -mfloat-abi=softfp"
+
+# ARM hard-float ABI with VFP (armhf, requires recent toolchain)
+make HOST_CC="gcc -m32" CROSS=arm-linux-gnueabihf-
+
+# ARM64 (requires x64 host)
+make CROSS=aarch64-linux-
+
+# PPC
+make HOST_CC="gcc -m32" CROSS=powerpc-linux-gnu-
+
+# MIPS big-endian
+make HOST_CC="gcc -m32" CROSS=mips-linux-
+# MIPS little-endian
+make HOST_CC="gcc -m32" CROSS=mipsel-linux-
+
+

+You can cross-compile for Android using the » Android NDK. +The environment variables need to match the install locations and the +desired target platform. E.g. Android 4.0 corresponds to ABI level 14. +For details check the folder docs in the NDK directory. +

+

+Only a few common variations for the different CPUs, ABIs and platforms +are listed. Please use your own judgement for which combination you want +to build/deploy or which lowest common denominator you want to pick: +

+
+# Android/ARM, armeabi (ARMv5TE soft-float), Android 2.2+ (Froyo)
+NDK=/opt/android/ndk
+NDKABI=8
+NDKVER=$NDK/toolchains/arm-linux-androideabi-4.6
+NDKP=$NDKVER/prebuilt/linux-x86/bin/arm-linux-androideabi-
+NDKF="--sysroot $NDK/platforms/android-$NDKABI/arch-arm"
+make HOST_CC="gcc -m32" CROSS=$NDKP TARGET_FLAGS="$NDKF"
+
+# Android/ARM, armeabi-v7a (ARMv7 VFP), Android 4.0+ (ICS)
+NDK=/opt/android/ndk
+NDKABI=14
+NDKVER=$NDK/toolchains/arm-linux-androideabi-4.6
+NDKP=$NDKVER/prebuilt/linux-x86/bin/arm-linux-androideabi-
+NDKF="--sysroot $NDK/platforms/android-$NDKABI/arch-arm"
+NDKARCH="-march=armv7-a -mfloat-abi=softfp -Wl,--fix-cortex-a8"
+make HOST_CC="gcc -m32" CROSS=$NDKP TARGET_FLAGS="$NDKF $NDKARCH"
+
+# Android/MIPS, mips (MIPS32R1 hard-float), Android 4.0+ (ICS)
+NDK=/opt/android/ndk
+NDKABI=14
+NDKVER=$NDK/toolchains/mipsel-linux-android-4.6
+NDKP=$NDKVER/prebuilt/linux-x86/bin/mipsel-linux-android-
+NDKF="--sysroot $NDK/platforms/android-$NDKABI/arch-mips"
+make HOST_CC="gcc -m32" CROSS=$NDKP TARGET_FLAGS="$NDKF"
+
+# Android/x86, x86 (i686 SSE3), Android 4.0+ (ICS)
+NDK=/opt/android/ndk
+NDKABI=14
+NDKVER=$NDK/toolchains/x86-4.6
+NDKP=$NDKVER/prebuilt/linux-x86/bin/i686-linux-android-
+NDKF="--sysroot $NDK/platforms/android-$NDKABI/arch-x86"
+make HOST_CC="gcc -m32" CROSS=$NDKP TARGET_FLAGS="$NDKF"
+
+

+You can cross-compile for iOS 3.0+ (iPhone/iPad) using the » iOS SDK. +The environment variables need to match the iOS SDK version: +

+

+Note: the JIT compiler is disabled for iOS, because regular iOS Apps +are not allowed to generate code at runtime. You'll only get the performance +of the LuaJIT interpreter on iOS. This is still faster than plain Lua, but +much slower than the JIT compiler. Please complain to Apple, not me. +Or use Android. :-p +

+
+IXCODE=`xcode-select -print-path`
+ISDK=$IXCODE/Platforms/iPhoneOS.platform/Developer
+ISDKVER=iPhoneOS6.0.sdk
+ISDKP=$ISDK/usr/bin/
+ISDKF="-arch armv7 -isysroot $ISDK/SDKs/$ISDKVER"
+make HOST_CC="gcc -m32 -arch i386" CROSS=$ISDKP TARGET_FLAGS="$ISDKF" \
+     TARGET_SYS=iOS
+
+ +

Cross-compiling for consoles

+

+Building LuaJIT for consoles requires both a supported host compiler +(x86 or x64) and a cross-compiler (to PPC or ARM) from the official +console SDK. +

+

+Due to restrictions on consoles, the JIT compiler is disabled and only +the fast interpreter is built. This is still faster than plain Lua, +but much slower than the JIT compiler. The FFI is disabled, too, since +it's not very useful in such an environment. +

+

+The following commands build a static library libluajit.a, +which can be linked against your game, just like the Lua library. +

+

+To cross-compile for PS3 from a Linux host (requires +32 bit GCC, i.e. multilib Linux/x64) or a Windows host (requires +32 bit MinGW), run this command: +

+
+make HOST_CC="gcc -m32" CROSS=ppu-lv2-
+
+

+To cross-compile for PS4 from a Windows host, +open a "Visual Studio .NET Command Prompt" (64 bit host compiler), +cd to the directory where you've unpacked the sources and +run the following commands: +

+
+cd src
+ps4build
+
+

+To cross-compile for PS Vita from a Windows host, +open a "Visual Studio .NET Command Prompt" (32 bit host compiler), +cd to the directory where you've unpacked the sources and +run the following commands: +

+
+cd src
+psvitabuild
+
+

+To cross-compile for Xbox 360 from a Windows host, +open a "Visual Studio .NET Command Prompt" (32 bit host compiler), +cd to the directory where you've unpacked the sources and run +the following commands: +

+
+cd src
+xedkbuild
+
+

+To cross-compile for Xbox One from a Windows host, +open a "Visual Studio .NET Command Prompt" (64 bit host compiler), +cd to the directory where you've unpacked the sources and run +the following commands: +

+
+cd src
+xb1build
+
+ +

Embedding LuaJIT

+

+LuaJIT is API-compatible with Lua 5.1. If you've already embedded Lua +into your application, you probably don't need to do anything to switch +to LuaJIT, except link with a different library: +

+
    +
  • It's strongly suggested to build LuaJIT separately using the supplied +build system. Please do not attempt to integrate the individual +source files into your build tree. You'll most likely get the internal build +dependencies wrong or mess up the compiler flags. Treat LuaJIT like any +other external library and link your application with either the dynamic +or static library, depending on your needs.
  • +
  • If you want to load C modules compiled for plain Lua +with require(), you need to make sure the public symbols +(e.g. lua_pushnumber) are exported, too: +
    • On POSIX systems you can either link to the shared library +or link the static library into your application. In the latter case +you'll need to export all public symbols from your main executable +(e.g. -Wl,-E on Linux) and add the external dependencies +(e.g. -lm -ldl on Linux).
    • +
    • Since Windows symbols are bound to a specific DLL name, you need to +link to the lua51.dll created by the LuaJIT build (do not rename +the DLL). You may link LuaJIT statically on Windows only if you don't +intend to load Lua/C modules at runtime. +
    +
  • +
  • +If you're building a 64 bit application on OSX which links directly or +indirectly against LuaJIT, you need to link your main executable +with these flags: +
    +-pagezero_size 10000 -image_base 100000000
    +
    +Also, it's recommended to rebase all (self-compiled) shared libraries +which are loaded at runtime on OSX/x64 (e.g. C extension modules for Lua). +See: man rebase +
  • +
+

Additional hints for initializing LuaJIT using the C API functions:

+
    +
  • Here's a +» simple example +for embedding Lua or LuaJIT into your application.
  • +
  • Make sure you use luaL_newstate. Avoid using +lua_newstate, since this uses the (slower) default memory +allocator from your system (no support for this on x64).
  • +
  • Make sure you use luaL_openlibs and not the old Lua 5.0 style +of calling luaopen_base etc. directly.
  • +
  • To change or extend the list of standard libraries to load, copy +src/lib_init.c to your project and modify it accordingly. +Make sure the jit library is loaded or the JIT compiler +will not be activated.
  • +
  • The bit.* module for bitwise operations +is already built-in. There's no need to statically link +» Lua BitOp to your application.
  • +
+ +

Hints for Distribution Maintainers

+

+The LuaJIT build system has extra provisions for the needs of most +POSIX-based distributions. If you're a package maintainer for +a distribution, please make use of these features and +avoid patching, subverting, autotoolizing or messing up the build system +in unspeakable ways. +

+

+There should be absolutely no need to patch luaconf.h or any +of the Makefiles. And please do not hand-pick files for your packages — +simply use whatever make install creates. There's a reason +for all of the files and directories it creates. +

+

+The build system uses GNU make and auto-detects most settings based on +the host you're building it on. This should work fine for native builds, +even when sandboxed. You may need to pass some of the following flags to +both the make and the make install command lines +for a regular distribution build: +

+
    +
  • PREFIX overrides the installation path and should usually +be set to /usr. Setting this also changes the module paths and +the paths needed to locate the shared library.
  • +
  • DESTDIR is an absolute path which allows you to install +to a shadow tree instead of the root tree of the build system.
  • +
  • MULTILIB sets the architecture-specific library path component +for multilib systems. The default is lib.
  • +
  • Have a look at the top-level Makefile and src/Makefile +for additional variables to tweak. The following variables may be +overridden, but it's not recommended, except for special needs +like cross-builds: +BUILDMODE, CC, HOST_CC, STATIC_CC, DYNAMIC_CC, CFLAGS, HOST_CFLAGS, +TARGET_CFLAGS, LDFLAGS, HOST_LDFLAGS, TARGET_LDFLAGS, TARGET_SHLDFLAGS, +TARGET_FLAGS, LIBS, HOST_LIBS, TARGET_LIBS, CROSS, HOST_SYS, TARGET_SYS +
  • +
+

+The build system has a special target for an amalgamated build, i.e. +make amalg. This compiles the LuaJIT core as one huge C file +and allows GCC to generate faster and shorter code. Alas, this requires +lots of memory during the build. This may be a problem for some users, +that's why it's not enabled by default. But it shouldn't be a problem for +most build farms. It's recommended that binary distributions use this +target for their LuaJIT builds. +

+

+The tl;dr version of the above: +

+
+make amalg PREFIX=/usr && \
+make install PREFIX=/usr DESTDIR=/tmp/buildroot
+
+

+Finally, if you encounter any difficulties, please +contact me first, instead of releasing a broken +package onto unsuspecting users. Because they'll usually gonna complain +to me (the upstream) and not you (the package maintainer), anyway. +

+
+
+ + + diff --git a/Build/source/libs/luajit/LuaJIT-src/doc/luajit.html b/Build/source/libs/luajit/LuaJIT-src/doc/luajit.html new file mode 100644 index 00000000000..bee0f62a1d0 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/doc/luajit.html @@ -0,0 +1,236 @@ + + + +LuaJIT + + + + + + + + + + +
+Lua +
+ + +
+

+LuaJIT is a Just-In-Time Compiler (JIT) for the +» Lua programming language. +Lua is a powerful, dynamic and light-weight programming language. +It may be embedded or used as a general-purpose, stand-alone language. +

+

+LuaJIT is Copyright © 2005-2015 Mike Pall, released under the +» MIT open source license. +

+

+

+ +

Compatibility

+ + +
WindowsLinuxBSDOSXPOSIX
+ + +
EmbeddedAndroidiOS
+ + +
PS3PS4PS VitaXbox 360Xbox One
+ + +
GCCCLANG
LLVM
MSVC
+ + +
x86x64ARMARM64PPCMIPS
+ + +
Lua 5.1
API+ABI
+ JIT+ BitOp+ FFIDrop-in
DLL/.so
+ +

Overview

+ + + + + + + + + +
3x
-  100x
115 KB
VM
90 KB
JIT
63 KLOC
C
24 KLOC
ASM
11 KLOC
Lua
+

+LuaJIT has been successfully used as a scripting middleware in +games, appliances, network and graphics apps, numerical simulations, +trading platforms and many other specialty applications. It scales from +embedded devices, smartphones, desktops up to server farms. It combines +high flexibility with » high performance +and an unmatched low memory footprint. +

+

+LuaJIT has been in continuous development since 2005. It's widely +considered to be one of the fastest dynamic language +implementations. It has outperformed other dynamic languages on many +cross-language benchmarks since its first release — often by a +substantial margin. +

+

+For LuaJIT 2.0, the whole VM has been rewritten from the ground up +and relentlessly optimized for performance. It combines a high-speed +interpreter, written in assembler, with a state-of-the-art JIT +compiler. +

+

+An innovative trace compiler is integrated with advanced, +SSA-based optimizations and highly tuned code generation backends. +A substantial reduction of the overhead associated with dynamic languages +allows it to break into the performance range traditionally reserved for +offline, static language compilers. +

+ +

More ...

+

+Please select a sub-topic in the navigation bar to learn more about LuaJIT. +

+
+
+ + + diff --git a/Build/source/libs/luajit/LuaJIT-src/doc/running.html b/Build/source/libs/luajit/LuaJIT-src/doc/running.html new file mode 100644 index 00000000000..353b542b6c2 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/doc/running.html @@ -0,0 +1,309 @@ + + + +Running LuaJIT + + + + + + + + + +
+Lua +
+ + +
+

+LuaJIT has only a single stand-alone executable, called luajit on +POSIX systems or luajit.exe on Windows. It can be used to run simple +Lua statements or whole Lua applications from the command line. It has an +interactive mode, too. +

+ +

Command Line Options

+

+The luajit stand-alone executable is just a slightly modified +version of the regular lua stand-alone executable. +It supports the same basic options, too. luajit -h +prints a short list of the available options. Please have a look at the +» Lua manual +for details. +

+

+LuaJIT has some additional options: +

+ +

-b[options] input output

+

+This option saves or lists bytecode. The following additional options +are accepted: +

+
    +
  • -l — Only list bytecode.
  • +
  • -s — Strip debug info (this is the default).
  • +
  • -g — Keep debug info.
  • +
  • -n name — Set module name (default: auto-detect from input name)
  • +
  • -t type — Set output file type (default: auto-detect from output name).
  • +
  • -a arch — Override architecture for object files (default: native).
  • +
  • -o os — Override OS for object files (default: native).
  • +
  • -e chunk — Use chunk string as input.
  • +
  • - (a single minus sign) — Use stdin as input and/or stdout as output.
  • +
+

+The output file type is auto-detected from the extension of the output +file name: +

+
    +
  • c — C source file, exported bytecode data.
  • +
  • h — C header file, static bytecode data.
  • +
  • obj or o — Object file, exported bytecode data +(OS- and architecture-specific).
  • +
  • raw or any other extension — Raw bytecode file (portable). +
+

+Notes: +

+
    +
  • See also string.dump() +for information on bytecode portability and compatibility.
  • +
  • A file in raw bytecode format is auto-detected and can be loaded like +any Lua source file. E.g. directly from the command line or with +loadfile(), dofile() etc.
  • +
  • To statically embed the bytecode of a module in your application, +generate an object file and just link it with your application.
  • +
  • On most ELF-based systems (e.g. Linux) you need to explicitly export the +global symbols when linking your application, e.g. with: -Wl,-E
  • +
  • require() tries to load embedded bytecode data from exported +symbols (in *.exe or lua51.dll on Windows) and from +shared libraries in package.cpath.
  • +
+

+Typical usage examples: +

+
+luajit -b test.lua test.out                 # Save bytecode to test.out
+luajit -bg test.lua test.out                # Keep debug info
+luajit -be "print('hello world')" test.out  # Save cmdline script
+
+luajit -bl test.lua                         # List to stdout
+luajit -bl test.lua test.txt                # List to test.txt
+luajit -ble "print('hello world')"          # List cmdline script
+
+luajit -b test.lua test.obj                 # Generate object file
+# Link test.obj with your application and load it with require("test")
+
+ +

-j cmd[=arg[,arg...]]

+

+This option performs a LuaJIT control command or activates one of the +loadable extension modules. The command is first looked up in the +jit.* library. If no matching function is found, a module +named jit.<cmd> is loaded and the start() +function of the module is called with the specified arguments (if +any). The space between -j and cmd is optional. +

+

+Here are the available LuaJIT control commands: +

+
    +
  • -jon — Turns the JIT compiler on (default).
  • +
  • -joff — Turns the JIT compiler off (only use the interpreter).
  • +
  • -jflush — Flushes the whole cache of compiled code.
  • +
  • -jv — Shows verbose information about the progress of the JIT compiler.
  • +
  • -jdump — Dumps the code and structures used in various compiler stages.
  • +
  • -jp — Start the integrated profiler.
  • +
+

+The -jv and -jdump commands are extension modules +written in Lua. They are mainly used for debugging the JIT compiler +itself. For a description of their options and output format, please +read the comment block at the start of their source. +They can be found in the lib directory of the source +distribution or installed under the jit directory. By default +this is /usr/local/share/luajit-2.0.4/jit on POSIX +systems. +

+ +

-O[level]
+-O[+]flag   -O-flag
+-Oparam=value

+

+This options allows fine-tuned control of the optimizations used by +the JIT compiler. This is mainly intended for debugging LuaJIT itself. +Please note that the JIT compiler is extremely fast (we are talking +about the microsecond to millisecond range). Disabling optimizations +doesn't have any visible impact on its overhead, but usually generates +code that runs slower. +

+

+The first form sets an optimization level — this enables a +specific mix of optimization flags. -O0 turns off all +optimizations and higher numbers enable more optimizations. Omitting +the level (i.e. just -O) sets the default optimization level, +which is -O3 in the current version. +

+

+The second form adds or removes individual optimization flags. +The third form sets a parameter for the VM or the JIT compiler +to a specific value. +

+

+You can either use this option multiple times (like -Ocse +-O-dce -Ohotloop=10) or separate several settings with a comma +(like -O+cse,-dce,hotloop=10). The settings are applied from +left to right and later settings override earlier ones. You can freely +mix the three forms, but note that setting an optimization level +overrides all earlier flags. +

+

+Here are the available flags and at what optimization levels they +are enabled: +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Flag-O1-O2-O3 
foldConstant Folding, Simplifications and Reassociation
cseCommon-Subexpression Elimination
dceDead-Code Elimination
narrow Narrowing of numbers to integers
loop Loop Optimizations (code hoisting)
fwd  Load Forwarding (L2L) and Store Forwarding (S2L)
dse  Dead-Store Elimination
abc  Array Bounds Check Elimination
sink  Allocation/Store Sinking
fuse  Fusion of operands into instructions
+

+Here are the parameters and their default settings: +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterDefault 
maxtrace1000Max. number of traces in the cache
maxrecord4000Max. number of recorded IR instructions
maxirconst500Max. number of IR constants of a trace
maxside100Max. number of side traces of a root trace
maxsnap500Max. number of snapshots for a trace
hotloop56Number of iterations to detect a hot loop or hot call
hotexit10Number of taken exits to start a side trace
tryside4Number of attempts to compile a side trace
instunroll4Max. unroll factor for instable loops
loopunroll15Max. unroll factor for loop ops in side traces
callunroll3Max. unroll factor for pseudo-recursive calls
recunroll2Min. unroll factor for true recursion
sizemcode32Size of each machine code area in KBytes (Windows: 64K)
maxmcode512Max. total size of all machine code areas in KBytes
+
+
+ + + diff --git a/Build/source/libs/luajit/LuaJIT-src/doc/status.html b/Build/source/libs/luajit/LuaJIT-src/doc/status.html new file mode 100644 index 00000000000..9ffa65fefad --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/doc/status.html @@ -0,0 +1,118 @@ + + + +Status + + + + + + + + + +
+Lua +
+ + +
+

+LuaJIT 2.0 is the current +stable branch. This branch is in +feature-freeze — new features will only be added to LuaJIT 2.1. +

+ +

Current Status

+

+LuaJIT ought to run all Lua 5.1-compatible source code just fine. +It's considered a serious bug if the VM crashes or produces unexpected +results — please report this. +

+

+Known incompatibilities and issues in LuaJIT 2.0: +

+
    +
  • +There are some differences in implementation-defined behavior. +These either have a good reason, are arbitrary design choices +or are due to quirks in the VM. The latter cases may get fixed if a +demonstrable need is shown. +
  • +
  • +The Lua debug API is missing a couple of features (return +hooks for non-Lua functions) and shows slightly different behavior +in LuaJIT (no per-coroutine hooks, no tail call counting). +
  • +
  • +Some checks are missing in the JIT-compiled code for obscure situations +with open upvalues aliasing one of the SSA slots later on (or +vice versa). Bonus points, if you can find a real world test case for +this. +
  • +
  • +Currently some out-of-memory errors from on-trace code are not +handled correctly. The error may fall through an on-trace +pcall or it may be passed on to the function set with +lua_atpanic on x64. This issue will be fixed with the new +garbage collector. +
  • +
+
+
+ + + diff --git a/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_arm.h b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_arm.h new file mode 100644 index 00000000000..57e0116f5ac --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_arm.h @@ -0,0 +1,456 @@ +/* +** DynASM ARM encoding engine. +** Copyright (C) 2005-2015 Mike Pall. All rights reserved. +** Released under the MIT license. See dynasm.lua for full copyright notice. +*/ + +#include +#include +#include +#include + +#define DASM_ARCH "arm" + +#ifndef DASM_EXTERN +#define DASM_EXTERN(a,b,c,d) 0 +#endif + +/* Action definitions. */ +enum { + DASM_STOP, DASM_SECTION, DASM_ESC, DASM_REL_EXT, + /* The following actions need a buffer position. */ + DASM_ALIGN, DASM_REL_LG, DASM_LABEL_LG, + /* The following actions also have an argument. */ + DASM_REL_PC, DASM_LABEL_PC, + DASM_IMM, DASM_IMM12, DASM_IMM16, DASM_IMML8, DASM_IMML12, DASM_IMMV8, + DASM__MAX +}; + +/* Maximum number of section buffer positions for a single dasm_put() call. */ +#define DASM_MAXSECPOS 25 + +/* DynASM encoder status codes. Action list offset or number are or'ed in. */ +#define DASM_S_OK 0x00000000 +#define DASM_S_NOMEM 0x01000000 +#define DASM_S_PHASE 0x02000000 +#define DASM_S_MATCH_SEC 0x03000000 +#define DASM_S_RANGE_I 0x11000000 +#define DASM_S_RANGE_SEC 0x12000000 +#define DASM_S_RANGE_LG 0x13000000 +#define DASM_S_RANGE_PC 0x14000000 +#define DASM_S_RANGE_REL 0x15000000 +#define DASM_S_UNDEF_LG 0x21000000 +#define DASM_S_UNDEF_PC 0x22000000 + +/* Macros to convert positions (8 bit section + 24 bit index). */ +#define DASM_POS2IDX(pos) ((pos)&0x00ffffff) +#define DASM_POS2BIAS(pos) ((pos)&0xff000000) +#define DASM_SEC2POS(sec) ((sec)<<24) +#define DASM_POS2SEC(pos) ((pos)>>24) +#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos)) + +/* Action list type. */ +typedef const unsigned int *dasm_ActList; + +/* Per-section structure. */ +typedef struct dasm_Section { + int *rbuf; /* Biased buffer pointer (negative section bias). */ + int *buf; /* True buffer pointer. */ + size_t bsize; /* Buffer size in bytes. */ + int pos; /* Biased buffer position. */ + int epos; /* End of biased buffer position - max single put. */ + int ofs; /* Byte offset into section. */ +} dasm_Section; + +/* Core structure holding the DynASM encoding state. */ +struct dasm_State { + size_t psize; /* Allocated size of this structure. */ + dasm_ActList actionlist; /* Current actionlist pointer. */ + int *lglabels; /* Local/global chain/pos ptrs. */ + size_t lgsize; + int *pclabels; /* PC label chains/pos ptrs. */ + size_t pcsize; + void **globals; /* Array of globals (bias -10). */ + dasm_Section *section; /* Pointer to active section. */ + size_t codesize; /* Total size of all code sections. */ + int maxsection; /* 0 <= sectionidx < maxsection. */ + int status; /* Status code. */ + dasm_Section sections[1]; /* All sections. Alloc-extended. */ +}; + +/* The size of the core structure depends on the max. number of sections. */ +#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section)) + + +/* Initialize DynASM state. */ +void dasm_init(Dst_DECL, int maxsection) +{ + dasm_State *D; + size_t psz = 0; + int i; + Dst_REF = NULL; + DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection)); + D = Dst_REF; + D->psize = psz; + D->lglabels = NULL; + D->lgsize = 0; + D->pclabels = NULL; + D->pcsize = 0; + D->globals = NULL; + D->maxsection = maxsection; + for (i = 0; i < maxsection; i++) { + D->sections[i].buf = NULL; /* Need this for pass3. */ + D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i); + D->sections[i].bsize = 0; + D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */ + } +} + +/* Free DynASM state. */ +void dasm_free(Dst_DECL) +{ + dasm_State *D = Dst_REF; + int i; + for (i = 0; i < D->maxsection; i++) + if (D->sections[i].buf) + DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize); + if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize); + if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize); + DASM_M_FREE(Dst, D, D->psize); +} + +/* Setup global label array. Must be called before dasm_setup(). */ +void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl) +{ + dasm_State *D = Dst_REF; + D->globals = gl - 10; /* Negative bias to compensate for locals. */ + DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int)); +} + +/* Grow PC label array. Can be called after dasm_setup(), too. */ +void dasm_growpc(Dst_DECL, unsigned int maxpc) +{ + dasm_State *D = Dst_REF; + size_t osz = D->pcsize; + DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int)); + memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz); +} + +/* Setup encoder. */ +void dasm_setup(Dst_DECL, const void *actionlist) +{ + dasm_State *D = Dst_REF; + int i; + D->actionlist = (dasm_ActList)actionlist; + D->status = DASM_S_OK; + D->section = &D->sections[0]; + memset((void *)D->lglabels, 0, D->lgsize); + if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize); + for (i = 0; i < D->maxsection; i++) { + D->sections[i].pos = DASM_SEC2POS(i); + D->sections[i].ofs = 0; + } +} + + +#ifdef DASM_CHECKS +#define CK(x, st) \ + do { if (!(x)) { \ + D->status = DASM_S_##st|(p-D->actionlist-1); return; } } while (0) +#define CKPL(kind, st) \ + do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \ + D->status = DASM_S_RANGE_##st|(p-D->actionlist-1); return; } } while (0) +#else +#define CK(x, st) ((void)0) +#define CKPL(kind, st) ((void)0) +#endif + +static int dasm_imm12(unsigned int n) +{ + int i; + for (i = 0; i < 16; i++, n = (n << 2) | (n >> 30)) + if (n <= 255) return (int)(n + (i << 8)); + return -1; +} + +/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */ +void dasm_put(Dst_DECL, int start, ...) +{ + va_list ap; + dasm_State *D = Dst_REF; + dasm_ActList p = D->actionlist + start; + dasm_Section *sec = D->section; + int pos = sec->pos, ofs = sec->ofs; + int *b; + + if (pos >= sec->epos) { + DASM_M_GROW(Dst, int, sec->buf, sec->bsize, + sec->bsize + 2*DASM_MAXSECPOS*sizeof(int)); + sec->rbuf = sec->buf - DASM_POS2BIAS(pos); + sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos); + } + + b = sec->rbuf; + b[pos++] = start; + + va_start(ap, start); + while (1) { + unsigned int ins = *p++; + unsigned int action = (ins >> 16); + if (action >= DASM__MAX) { + ofs += 4; + } else { + int *pl, n = action >= DASM_REL_PC ? va_arg(ap, int) : 0; + switch (action) { + case DASM_STOP: goto stop; + case DASM_SECTION: + n = (ins & 255); CK(n < D->maxsection, RANGE_SEC); + D->section = &D->sections[n]; goto stop; + case DASM_ESC: p++; ofs += 4; break; + case DASM_REL_EXT: break; + case DASM_ALIGN: ofs += (ins & 255); b[pos++] = ofs; break; + case DASM_REL_LG: + n = (ins & 2047) - 10; pl = D->lglabels + n; + /* Bkwd rel or global. */ + if (n >= 0) { CK(n>=10||*pl<0, RANGE_LG); CKPL(lg, LG); goto putrel; } + pl += 10; n = *pl; + if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */ + goto linkrel; + case DASM_REL_PC: + pl = D->pclabels + n; CKPL(pc, PC); + putrel: + n = *pl; + if (n < 0) { /* Label exists. Get label pos and store it. */ + b[pos] = -n; + } else { + linkrel: + b[pos] = n; /* Else link to rel chain, anchored at label. */ + *pl = pos; + } + pos++; + break; + case DASM_LABEL_LG: + pl = D->lglabels + (ins & 2047) - 10; CKPL(lg, LG); goto putlabel; + case DASM_LABEL_PC: + pl = D->pclabels + n; CKPL(pc, PC); + putlabel: + n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */ + while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos; + } + *pl = -pos; /* Label exists now. */ + b[pos++] = ofs; /* Store pass1 offset estimate. */ + break; + case DASM_IMM: + case DASM_IMM16: +#ifdef DASM_CHECKS + CK((n & ((1<<((ins>>10)&31))-1)) == 0, RANGE_I); + if ((ins & 0x8000)) + CK(((n + (1<<(((ins>>5)&31)-1)))>>((ins>>5)&31)) == 0, RANGE_I); + else + CK((n>>((ins>>5)&31)) == 0, RANGE_I); +#endif + b[pos++] = n; + break; + case DASM_IMMV8: + CK((n & 3) == 0, RANGE_I); + n >>= 2; + case DASM_IMML8: + case DASM_IMML12: + CK(n >= 0 ? ((n>>((ins>>5)&31)) == 0) : + (((-n)>>((ins>>5)&31)) == 0), RANGE_I); + b[pos++] = n; + break; + case DASM_IMM12: + CK(dasm_imm12((unsigned int)n) != -1, RANGE_I); + b[pos++] = n; + break; + } + } + } +stop: + va_end(ap); + sec->pos = pos; + sec->ofs = ofs; +} +#undef CK + +/* Pass 2: Link sections, shrink aligns, fix label offsets. */ +int dasm_link(Dst_DECL, size_t *szp) +{ + dasm_State *D = Dst_REF; + int secnum; + int ofs = 0; + +#ifdef DASM_CHECKS + *szp = 0; + if (D->status != DASM_S_OK) return D->status; + { + int pc; + for (pc = 0; pc*sizeof(int) < D->pcsize; pc++) + if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc; + } +#endif + + { /* Handle globals not defined in this translation unit. */ + int idx; + for (idx = 20; idx*sizeof(int) < D->lgsize; idx++) { + int n = D->lglabels[idx]; + /* Undefined label: Collapse rel chain and replace with marker (< 0). */ + while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; } + } + } + + /* Combine all code sections. No support for data sections (yet). */ + for (secnum = 0; secnum < D->maxsection; secnum++) { + dasm_Section *sec = D->sections + secnum; + int *b = sec->rbuf; + int pos = DASM_SEC2POS(secnum); + int lastpos = sec->pos; + + while (pos != lastpos) { + dasm_ActList p = D->actionlist + b[pos++]; + while (1) { + unsigned int ins = *p++; + unsigned int action = (ins >> 16); + switch (action) { + case DASM_STOP: case DASM_SECTION: goto stop; + case DASM_ESC: p++; break; + case DASM_REL_EXT: break; + case DASM_ALIGN: ofs -= (b[pos++] + ofs) & (ins & 255); break; + case DASM_REL_LG: case DASM_REL_PC: pos++; break; + case DASM_LABEL_LG: case DASM_LABEL_PC: b[pos++] += ofs; break; + case DASM_IMM: case DASM_IMM12: case DASM_IMM16: + case DASM_IMML8: case DASM_IMML12: case DASM_IMMV8: pos++; break; + } + } + stop: (void)0; + } + ofs += sec->ofs; /* Next section starts right after current section. */ + } + + D->codesize = ofs; /* Total size of all code sections */ + *szp = ofs; + return DASM_S_OK; +} + +#ifdef DASM_CHECKS +#define CK(x, st) \ + do { if (!(x)) return DASM_S_##st|(p-D->actionlist-1); } while (0) +#else +#define CK(x, st) ((void)0) +#endif + +/* Pass 3: Encode sections. */ +int dasm_encode(Dst_DECL, void *buffer) +{ + dasm_State *D = Dst_REF; + char *base = (char *)buffer; + unsigned int *cp = (unsigned int *)buffer; + int secnum; + + /* Encode all code sections. No support for data sections (yet). */ + for (secnum = 0; secnum < D->maxsection; secnum++) { + dasm_Section *sec = D->sections + secnum; + int *b = sec->buf; + int *endb = sec->rbuf + sec->pos; + + while (b != endb) { + dasm_ActList p = D->actionlist + *b++; + while (1) { + unsigned int ins = *p++; + unsigned int action = (ins >> 16); + int n = (action >= DASM_ALIGN && action < DASM__MAX) ? *b++ : 0; + switch (action) { + case DASM_STOP: case DASM_SECTION: goto stop; + case DASM_ESC: *cp++ = *p++; break; + case DASM_REL_EXT: + n = DASM_EXTERN(Dst, (unsigned char *)cp, (ins&2047), !(ins&2048)); + goto patchrel; + case DASM_ALIGN: + ins &= 255; while ((((char *)cp - base) & ins)) *cp++ = 0xe1a00000; + break; + case DASM_REL_LG: + CK(n >= 0, UNDEF_LG); + case DASM_REL_PC: + CK(n >= 0, UNDEF_PC); + n = *DASM_POS2PTR(D, n) - (int)((char *)cp - base) - 4; + patchrel: + if ((ins & 0x800) == 0) { + CK((n & 3) == 0 && ((n+0x02000000) >> 26) == 0, RANGE_REL); + cp[-1] |= ((n >> 2) & 0x00ffffff); + } else if ((ins & 0x1000)) { + CK((n & 3) == 0 && -256 <= n && n <= 256, RANGE_REL); + goto patchimml8; + } else if ((ins & 0x2000) == 0) { + CK((n & 3) == 0 && -4096 <= n && n <= 4096, RANGE_REL); + goto patchimml; + } else { + CK((n & 3) == 0 && -1020 <= n && n <= 1020, RANGE_REL); + n >>= 2; + goto patchimml; + } + break; + case DASM_LABEL_LG: + ins &= 2047; if (ins >= 20) D->globals[ins-10] = (void *)(base + n); + break; + case DASM_LABEL_PC: break; + case DASM_IMM: + cp[-1] |= ((n>>((ins>>10)&31)) & ((1<<((ins>>5)&31))-1)) << (ins&31); + break; + case DASM_IMM12: + cp[-1] |= dasm_imm12((unsigned int)n); + break; + case DASM_IMM16: + cp[-1] |= ((n & 0xf000) << 4) | (n & 0x0fff); + break; + case DASM_IMML8: patchimml8: + cp[-1] |= n >= 0 ? (0x00800000 | (n & 0x0f) | ((n & 0xf0) << 4)) : + ((-n & 0x0f) | ((-n & 0xf0) << 4)); + break; + case DASM_IMML12: case DASM_IMMV8: patchimml: + cp[-1] |= n >= 0 ? (0x00800000 | n) : (-n); + break; + default: *cp++ = ins; break; + } + } + stop: (void)0; + } + } + + if (base + D->codesize != (char *)cp) /* Check for phase errors. */ + return DASM_S_PHASE; + return DASM_S_OK; +} +#undef CK + +/* Get PC label offset. */ +int dasm_getpclabel(Dst_DECL, unsigned int pc) +{ + dasm_State *D = Dst_REF; + if (pc*sizeof(int) < D->pcsize) { + int pos = D->pclabels[pc]; + if (pos < 0) return *DASM_POS2PTR(D, -pos); + if (pos > 0) return -1; /* Undefined. */ + } + return -2; /* Unused or out of range. */ +} + +#ifdef DASM_CHECKS +/* Optional sanity checker to call between isolated encoding steps. */ +int dasm_checkstep(Dst_DECL, int secmatch) +{ + dasm_State *D = Dst_REF; + if (D->status == DASM_S_OK) { + int i; + for (i = 1; i <= 9; i++) { + if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_LG|i; break; } + D->lglabels[i] = 0; + } + } + if (D->status == DASM_S_OK && secmatch >= 0 && + D->section != &D->sections[secmatch]) + D->status = DASM_S_MATCH_SEC|(D->section-D->sections); + return D->status; +} +#endif + diff --git a/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_arm.lua b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_arm.lua new file mode 100644 index 00000000000..90a259c5c33 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_arm.lua @@ -0,0 +1,1125 @@ +------------------------------------------------------------------------------ +-- DynASM ARM module. +-- +-- Copyright (C) 2005-2015 Mike Pall. All rights reserved. +-- See dynasm.lua for full copyright notice. +------------------------------------------------------------------------------ + +-- Module information: +local _info = { + arch = "arm", + description = "DynASM ARM module", + version = "1.3.0", + vernum = 10300, + release = "2011-05-05", + author = "Mike Pall", + license = "MIT", +} + +-- Exported glue functions for the arch-specific module. +local _M = { _info = _info } + +-- Cache library functions. +local type, tonumber, pairs, ipairs = type, tonumber, pairs, ipairs +local assert, setmetatable, rawget = assert, setmetatable, rawget +local _s = string +local sub, format, byte, char = _s.sub, _s.format, _s.byte, _s.char +local match, gmatch, gsub = _s.match, _s.gmatch, _s.gsub +local concat, sort, insert = table.concat, table.sort, table.insert +local bit = bit or require("bit") +local band, shl, shr, sar = bit.band, bit.lshift, bit.rshift, bit.arshift +local ror, tohex = bit.ror, bit.tohex + +-- Inherited tables and callbacks. +local g_opt, g_arch +local wline, werror, wfatal, wwarn + +-- Action name list. +-- CHECK: Keep this in sync with the C code! +local action_names = { + "STOP", "SECTION", "ESC", "REL_EXT", + "ALIGN", "REL_LG", "LABEL_LG", + "REL_PC", "LABEL_PC", "IMM", "IMM12", "IMM16", "IMML8", "IMML12", "IMMV8", +} + +-- Maximum number of section buffer positions for dasm_put(). +-- CHECK: Keep this in sync with the C code! +local maxsecpos = 25 -- Keep this low, to avoid excessively long C lines. + +-- Action name -> action number. +local map_action = {} +for n,name in ipairs(action_names) do + map_action[name] = n-1 +end + +-- Action list buffer. +local actlist = {} + +-- Argument list for next dasm_put(). Start with offset 0 into action list. +local actargs = { 0 } + +-- Current number of section buffer positions for dasm_put(). +local secpos = 1 + +------------------------------------------------------------------------------ + +-- Dump action names and numbers. +local function dumpactions(out) + out:write("DynASM encoding engine action codes:\n") + for n,name in ipairs(action_names) do + local num = map_action[name] + out:write(format(" %-10s %02X %d\n", name, num, num)) + end + out:write("\n") +end + +-- Write action list buffer as a huge static C array. +local function writeactions(out, name) + local nn = #actlist + if nn == 0 then nn = 1; actlist[0] = map_action.STOP end + out:write("static const unsigned int ", name, "[", nn, "] = {\n") + for i = 1,nn-1 do + assert(out:write("0x", tohex(actlist[i]), ",\n")) + end + assert(out:write("0x", tohex(actlist[nn]), "\n};\n\n")) +end + +------------------------------------------------------------------------------ + +-- Add word to action list. +local function wputxw(n) + assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range") + actlist[#actlist+1] = n +end + +-- Add action to list with optional arg. Advance buffer pos, too. +local function waction(action, val, a, num) + local w = assert(map_action[action], "bad action name `"..action.."'") + wputxw(w * 0x10000 + (val or 0)) + if a then actargs[#actargs+1] = a end + if a or num then secpos = secpos + (num or 1) end +end + +-- Flush action list (intervening C code or buffer pos overflow). +local function wflush(term) + if #actlist == actargs[1] then return end -- Nothing to flush. + if not term then waction("STOP") end -- Terminate action list. + wline(format("dasm_put(Dst, %s);", concat(actargs, ", ")), true) + actargs = { #actlist } -- Actionlist offset is 1st arg to next dasm_put(). + secpos = 1 -- The actionlist offset occupies a buffer position, too. +end + +-- Put escaped word. +local function wputw(n) + if n <= 0x000fffff then waction("ESC") end + wputxw(n) +end + +-- Reserve position for word. +local function wpos() + local pos = #actlist+1 + actlist[pos] = "" + return pos +end + +-- Store word to reserved position. +local function wputpos(pos, n) + assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range") + if n <= 0x000fffff then + insert(actlist, pos+1, n) + n = map_action.ESC * 0x10000 + end + actlist[pos] = n +end + +------------------------------------------------------------------------------ + +-- Global label name -> global label number. With auto assignment on 1st use. +local next_global = 20 +local map_global = setmetatable({}, { __index = function(t, name) + if not match(name, "^[%a_][%w_]*$") then werror("bad global label") end + local n = next_global + if n > 2047 then werror("too many global labels") end + next_global = n + 1 + t[name] = n + return n +end}) + +-- Dump global labels. +local function dumpglobals(out, lvl) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("Global labels:\n") + for i=20,next_global-1 do + out:write(format(" %s\n", t[i])) + end + out:write("\n") +end + +-- Write global label enum. +local function writeglobals(out, prefix) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("enum {\n") + for i=20,next_global-1 do + out:write(" ", prefix, t[i], ",\n") + end + out:write(" ", prefix, "_MAX\n};\n") +end + +-- Write global label names. +local function writeglobalnames(out, name) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("static const char *const ", name, "[] = {\n") + for i=20,next_global-1 do + out:write(" \"", t[i], "\",\n") + end + out:write(" (const char *)0\n};\n") +end + +------------------------------------------------------------------------------ + +-- Extern label name -> extern label number. With auto assignment on 1st use. +local next_extern = 0 +local map_extern_ = {} +local map_extern = setmetatable({}, { __index = function(t, name) + -- No restrictions on the name for now. + local n = next_extern + if n > 2047 then werror("too many extern labels") end + next_extern = n + 1 + t[name] = n + map_extern_[n] = name + return n +end}) + +-- Dump extern labels. +local function dumpexterns(out, lvl) + out:write("Extern labels:\n") + for i=0,next_extern-1 do + out:write(format(" %s\n", map_extern_[i])) + end + out:write("\n") +end + +-- Write extern label names. +local function writeexternnames(out, name) + out:write("static const char *const ", name, "[] = {\n") + for i=0,next_extern-1 do + out:write(" \"", map_extern_[i], "\",\n") + end + out:write(" (const char *)0\n};\n") +end + +------------------------------------------------------------------------------ + +-- Arch-specific maps. + +-- Ext. register name -> int. name. +local map_archdef = { sp = "r13", lr = "r14", pc = "r15", } + +-- Int. register name -> ext. name. +local map_reg_rev = { r13 = "sp", r14 = "lr", r15 = "pc", } + +local map_type = {} -- Type name -> { ctype, reg } +local ctypenum = 0 -- Type number (for Dt... macros). + +-- Reverse defines for registers. +function _M.revdef(s) + return map_reg_rev[s] or s +end + +local map_shift = { lsl = 0, lsr = 1, asr = 2, ror = 3, } + +local map_cond = { + eq = 0, ne = 1, cs = 2, cc = 3, mi = 4, pl = 5, vs = 6, vc = 7, + hi = 8, ls = 9, ge = 10, lt = 11, gt = 12, le = 13, al = 14, + hs = 2, lo = 3, +} + +------------------------------------------------------------------------------ + +-- Template strings for ARM instructions. +local map_op = { + -- Basic data processing instructions. + and_3 = "e0000000DNPs", + eor_3 = "e0200000DNPs", + sub_3 = "e0400000DNPs", + rsb_3 = "e0600000DNPs", + add_3 = "e0800000DNPs", + adc_3 = "e0a00000DNPs", + sbc_3 = "e0c00000DNPs", + rsc_3 = "e0e00000DNPs", + tst_2 = "e1100000NP", + teq_2 = "e1300000NP", + cmp_2 = "e1500000NP", + cmn_2 = "e1700000NP", + orr_3 = "e1800000DNPs", + mov_2 = "e1a00000DPs", + bic_3 = "e1c00000DNPs", + mvn_2 = "e1e00000DPs", + + and_4 = "e0000000DNMps", + eor_4 = "e0200000DNMps", + sub_4 = "e0400000DNMps", + rsb_4 = "e0600000DNMps", + add_4 = "e0800000DNMps", + adc_4 = "e0a00000DNMps", + sbc_4 = "e0c00000DNMps", + rsc_4 = "e0e00000DNMps", + tst_3 = "e1100000NMp", + teq_3 = "e1300000NMp", + cmp_3 = "e1500000NMp", + cmn_3 = "e1700000NMp", + orr_4 = "e1800000DNMps", + mov_3 = "e1a00000DMps", + bic_4 = "e1c00000DNMps", + mvn_3 = "e1e00000DMps", + + lsl_3 = "e1a00000DMws", + lsr_3 = "e1a00020DMws", + asr_3 = "e1a00040DMws", + ror_3 = "e1a00060DMws", + rrx_2 = "e1a00060DMs", + + -- Multiply and multiply-accumulate. + mul_3 = "e0000090NMSs", + mla_4 = "e0200090NMSDs", + umaal_4 = "e0400090DNMSs", -- v6 + mls_4 = "e0600090DNMSs", -- v6T2 + umull_4 = "e0800090DNMSs", + umlal_4 = "e0a00090DNMSs", + smull_4 = "e0c00090DNMSs", + smlal_4 = "e0e00090DNMSs", + + -- Halfword multiply and multiply-accumulate. + smlabb_4 = "e1000080NMSD", -- v5TE + smlatb_4 = "e10000a0NMSD", -- v5TE + smlabt_4 = "e10000c0NMSD", -- v5TE + smlatt_4 = "e10000e0NMSD", -- v5TE + smlawb_4 = "e1200080NMSD", -- v5TE + smulwb_3 = "e12000a0NMS", -- v5TE + smlawt_4 = "e12000c0NMSD", -- v5TE + smulwt_3 = "e12000e0NMS", -- v5TE + smlalbb_4 = "e1400080NMSD", -- v5TE + smlaltb_4 = "e14000a0NMSD", -- v5TE + smlalbt_4 = "e14000c0NMSD", -- v5TE + smlaltt_4 = "e14000e0NMSD", -- v5TE + smulbb_3 = "e1600080NMS", -- v5TE + smultb_3 = "e16000a0NMS", -- v5TE + smulbt_3 = "e16000c0NMS", -- v5TE + smultt_3 = "e16000e0NMS", -- v5TE + + -- Miscellaneous data processing instructions. + clz_2 = "e16f0f10DM", -- v5T + rev_2 = "e6bf0f30DM", -- v6 + rev16_2 = "e6bf0fb0DM", -- v6 + revsh_2 = "e6ff0fb0DM", -- v6 + sel_3 = "e6800fb0DNM", -- v6 + usad8_3 = "e780f010NMS", -- v6 + usada8_4 = "e7800010NMSD", -- v6 + rbit_2 = "e6ff0f30DM", -- v6T2 + movw_2 = "e3000000DW", -- v6T2 + movt_2 = "e3400000DW", -- v6T2 + -- Note: the X encodes width-1, not width. + sbfx_4 = "e7a00050DMvX", -- v6T2 + ubfx_4 = "e7e00050DMvX", -- v6T2 + -- Note: the X encodes the msb field, not the width. + bfc_3 = "e7c0001fDvX", -- v6T2 + bfi_4 = "e7c00010DMvX", -- v6T2 + + -- Packing and unpacking instructions. + pkhbt_3 = "e6800010DNM", pkhbt_4 = "e6800010DNMv", -- v6 + pkhtb_3 = "e6800050DNM", pkhtb_4 = "e6800050DNMv", -- v6 + sxtab_3 = "e6a00070DNM", sxtab_4 = "e6a00070DNMv", -- v6 + sxtab16_3 = "e6800070DNM", sxtab16_4 = "e6800070DNMv", -- v6 + sxtah_3 = "e6b00070DNM", sxtah_4 = "e6b00070DNMv", -- v6 + sxtb_2 = "e6af0070DM", sxtb_3 = "e6af0070DMv", -- v6 + sxtb16_2 = "e68f0070DM", sxtb16_3 = "e68f0070DMv", -- v6 + sxth_2 = "e6bf0070DM", sxth_3 = "e6bf0070DMv", -- v6 + uxtab_3 = "e6e00070DNM", uxtab_4 = "e6e00070DNMv", -- v6 + uxtab16_3 = "e6c00070DNM", uxtab16_4 = "e6c00070DNMv", -- v6 + uxtah_3 = "e6f00070DNM", uxtah_4 = "e6f00070DNMv", -- v6 + uxtb_2 = "e6ef0070DM", uxtb_3 = "e6ef0070DMv", -- v6 + uxtb16_2 = "e6cf0070DM", uxtb16_3 = "e6cf0070DMv", -- v6 + uxth_2 = "e6ff0070DM", uxth_3 = "e6ff0070DMv", -- v6 + + -- Saturating instructions. + qadd_3 = "e1000050DMN", -- v5TE + qsub_3 = "e1200050DMN", -- v5TE + qdadd_3 = "e1400050DMN", -- v5TE + qdsub_3 = "e1600050DMN", -- v5TE + -- Note: the X for ssat* encodes sat_imm-1, not sat_imm. + ssat_3 = "e6a00010DXM", ssat_4 = "e6a00010DXMp", -- v6 + usat_3 = "e6e00010DXM", usat_4 = "e6e00010DXMp", -- v6 + ssat16_3 = "e6a00f30DXM", -- v6 + usat16_3 = "e6e00f30DXM", -- v6 + + -- Parallel addition and subtraction. + sadd16_3 = "e6100f10DNM", -- v6 + sasx_3 = "e6100f30DNM", -- v6 + ssax_3 = "e6100f50DNM", -- v6 + ssub16_3 = "e6100f70DNM", -- v6 + sadd8_3 = "e6100f90DNM", -- v6 + ssub8_3 = "e6100ff0DNM", -- v6 + qadd16_3 = "e6200f10DNM", -- v6 + qasx_3 = "e6200f30DNM", -- v6 + qsax_3 = "e6200f50DNM", -- v6 + qsub16_3 = "e6200f70DNM", -- v6 + qadd8_3 = "e6200f90DNM", -- v6 + qsub8_3 = "e6200ff0DNM", -- v6 + shadd16_3 = "e6300f10DNM", -- v6 + shasx_3 = "e6300f30DNM", -- v6 + shsax_3 = "e6300f50DNM", -- v6 + shsub16_3 = "e6300f70DNM", -- v6 + shadd8_3 = "e6300f90DNM", -- v6 + shsub8_3 = "e6300ff0DNM", -- v6 + uadd16_3 = "e6500f10DNM", -- v6 + uasx_3 = "e6500f30DNM", -- v6 + usax_3 = "e6500f50DNM", -- v6 + usub16_3 = "e6500f70DNM", -- v6 + uadd8_3 = "e6500f90DNM", -- v6 + usub8_3 = "e6500ff0DNM", -- v6 + uqadd16_3 = "e6600f10DNM", -- v6 + uqasx_3 = "e6600f30DNM", -- v6 + uqsax_3 = "e6600f50DNM", -- v6 + uqsub16_3 = "e6600f70DNM", -- v6 + uqadd8_3 = "e6600f90DNM", -- v6 + uqsub8_3 = "e6600ff0DNM", -- v6 + uhadd16_3 = "e6700f10DNM", -- v6 + uhasx_3 = "e6700f30DNM", -- v6 + uhsax_3 = "e6700f50DNM", -- v6 + uhsub16_3 = "e6700f70DNM", -- v6 + uhadd8_3 = "e6700f90DNM", -- v6 + uhsub8_3 = "e6700ff0DNM", -- v6 + + -- Load/store instructions. + str_2 = "e4000000DL", str_3 = "e4000000DL", str_4 = "e4000000DL", + strb_2 = "e4400000DL", strb_3 = "e4400000DL", strb_4 = "e4400000DL", + ldr_2 = "e4100000DL", ldr_3 = "e4100000DL", ldr_4 = "e4100000DL", + ldrb_2 = "e4500000DL", ldrb_3 = "e4500000DL", ldrb_4 = "e4500000DL", + strh_2 = "e00000b0DL", strh_3 = "e00000b0DL", + ldrh_2 = "e01000b0DL", ldrh_3 = "e01000b0DL", + ldrd_2 = "e00000d0DL", ldrd_3 = "e00000d0DL", -- v5TE + ldrsb_2 = "e01000d0DL", ldrsb_3 = "e01000d0DL", + strd_2 = "e00000f0DL", strd_3 = "e00000f0DL", -- v5TE + ldrsh_2 = "e01000f0DL", ldrsh_3 = "e01000f0DL", + + ldm_2 = "e8900000oR", ldmia_2 = "e8900000oR", ldmfd_2 = "e8900000oR", + ldmda_2 = "e8100000oR", ldmfa_2 = "e8100000oR", + ldmdb_2 = "e9100000oR", ldmea_2 = "e9100000oR", + ldmib_2 = "e9900000oR", ldmed_2 = "e9900000oR", + stm_2 = "e8800000oR", stmia_2 = "e8800000oR", stmfd_2 = "e8800000oR", + stmda_2 = "e8000000oR", stmfa_2 = "e8000000oR", + stmdb_2 = "e9000000oR", stmea_2 = "e9000000oR", + stmib_2 = "e9800000oR", stmed_2 = "e9800000oR", + pop_1 = "e8bd0000R", push_1 = "e92d0000R", + + -- Branch instructions. + b_1 = "ea000000B", + bl_1 = "eb000000B", + blx_1 = "e12fff30C", + bx_1 = "e12fff10M", + + -- Miscellaneous instructions. + nop_0 = "e1a00000", + mrs_1 = "e10f0000D", + bkpt_1 = "e1200070K", -- v5T + svc_1 = "ef000000T", swi_1 = "ef000000T", + ud_0 = "e7f001f0", + + -- VFP instructions. + ["vadd.f32_3"] = "ee300a00dnm", + ["vadd.f64_3"] = "ee300b00Gdnm", + ["vsub.f32_3"] = "ee300a40dnm", + ["vsub.f64_3"] = "ee300b40Gdnm", + ["vmul.f32_3"] = "ee200a00dnm", + ["vmul.f64_3"] = "ee200b00Gdnm", + ["vnmul.f32_3"] = "ee200a40dnm", + ["vnmul.f64_3"] = "ee200b40Gdnm", + ["vmla.f32_3"] = "ee000a00dnm", + ["vmla.f64_3"] = "ee000b00Gdnm", + ["vmls.f32_3"] = "ee000a40dnm", + ["vmls.f64_3"] = "ee000b40Gdnm", + ["vnmla.f32_3"] = "ee100a40dnm", + ["vnmla.f64_3"] = "ee100b40Gdnm", + ["vnmls.f32_3"] = "ee100a00dnm", + ["vnmls.f64_3"] = "ee100b00Gdnm", + ["vdiv.f32_3"] = "ee800a00dnm", + ["vdiv.f64_3"] = "ee800b00Gdnm", + + ["vabs.f32_2"] = "eeb00ac0dm", + ["vabs.f64_2"] = "eeb00bc0Gdm", + ["vneg.f32_2"] = "eeb10a40dm", + ["vneg.f64_2"] = "eeb10b40Gdm", + ["vsqrt.f32_2"] = "eeb10ac0dm", + ["vsqrt.f64_2"] = "eeb10bc0Gdm", + ["vcmp.f32_2"] = "eeb40a40dm", + ["vcmp.f64_2"] = "eeb40b40Gdm", + ["vcmpe.f32_2"] = "eeb40ac0dm", + ["vcmpe.f64_2"] = "eeb40bc0Gdm", + ["vcmpz.f32_1"] = "eeb50a40d", + ["vcmpz.f64_1"] = "eeb50b40Gd", + ["vcmpze.f32_1"] = "eeb50ac0d", + ["vcmpze.f64_1"] = "eeb50bc0Gd", + + vldr_2 = "ed100a00dl|ed100b00Gdl", + vstr_2 = "ed000a00dl|ed000b00Gdl", + vldm_2 = "ec900a00or", + vldmia_2 = "ec900a00or", + vldmdb_2 = "ed100a00or", + vpop_1 = "ecbd0a00r", + vstm_2 = "ec800a00or", + vstmia_2 = "ec800a00or", + vstmdb_2 = "ed000a00or", + vpush_1 = "ed2d0a00r", + + ["vmov.f32_2"] = "eeb00a40dm|eeb00a00dY", -- #imm is VFPv3 only + ["vmov.f64_2"] = "eeb00b40Gdm|eeb00b00GdY", -- #imm is VFPv3 only + vmov_2 = "ee100a10Dn|ee000a10nD", + vmov_3 = "ec500a10DNm|ec400a10mDN|ec500b10GDNm|ec400b10GmDN", + + vmrs_0 = "eef1fa10", + vmrs_1 = "eef10a10D", + vmsr_1 = "eee10a10D", + + ["vcvt.s32.f32_2"] = "eebd0ac0dm", + ["vcvt.s32.f64_2"] = "eebd0bc0dGm", + ["vcvt.u32.f32_2"] = "eebc0ac0dm", + ["vcvt.u32.f64_2"] = "eebc0bc0dGm", + ["vcvtr.s32.f32_2"] = "eebd0a40dm", + ["vcvtr.s32.f64_2"] = "eebd0b40dGm", + ["vcvtr.u32.f32_2"] = "eebc0a40dm", + ["vcvtr.u32.f64_2"] = "eebc0b40dGm", + ["vcvt.f32.s32_2"] = "eeb80ac0dm", + ["vcvt.f64.s32_2"] = "eeb80bc0GdFm", + ["vcvt.f32.u32_2"] = "eeb80a40dm", + ["vcvt.f64.u32_2"] = "eeb80b40GdFm", + ["vcvt.f32.f64_2"] = "eeb70bc0dGm", + ["vcvt.f64.f32_2"] = "eeb70ac0GdFm", + + -- VFPv4 only: + ["vfma.f32_3"] = "eea00a00dnm", + ["vfma.f64_3"] = "eea00b00Gdnm", + ["vfms.f32_3"] = "eea00a40dnm", + ["vfms.f64_3"] = "eea00b40Gdnm", + ["vfnma.f32_3"] = "ee900a40dnm", + ["vfnma.f64_3"] = "ee900b40Gdnm", + ["vfnms.f32_3"] = "ee900a00dnm", + ["vfnms.f64_3"] = "ee900b00Gdnm", + + -- NYI: Advanced SIMD instructions. + + -- NYI: I have no need for these instructions right now: + -- swp, swpb, strex, ldrex, strexd, ldrexd, strexb, ldrexb, strexh, ldrexh + -- msr, nopv6, yield, wfe, wfi, sev, dbg, bxj, smc, srs, rfe + -- cps, setend, pli, pld, pldw, clrex, dsb, dmb, isb + -- stc, ldc, mcr, mcr2, mrc, mrc2, mcrr, mcrr2, mrrc, mrrc2, cdp, cdp2 +} + +-- Add mnemonics for "s" variants. +do + local t = {} + for k,v in pairs(map_op) do + if sub(v, -1) == "s" then + local v2 = sub(v, 1, 2)..char(byte(v, 3)+1)..sub(v, 4, -2) + t[sub(k, 1, -3).."s"..sub(k, -2)] = v2 + end + end + for k,v in pairs(t) do + map_op[k] = v + end +end + +------------------------------------------------------------------------------ + +local function parse_gpr(expr) + local tname, ovreg = match(expr, "^([%w_]+):(r1?[0-9])$") + local tp = map_type[tname or expr] + if tp then + local reg = ovreg or tp.reg + if not reg then + werror("type `"..(tname or expr).."' needs a register override") + end + expr = reg + end + local r = match(expr, "^r(1?[0-9])$") + if r then + r = tonumber(r) + if r <= 15 then return r, tp end + end + werror("bad register name `"..expr.."'") +end + +local function parse_gpr_pm(expr) + local pm, expr2 = match(expr, "^([+-]?)(.*)$") + return parse_gpr(expr2), (pm == "-") +end + +local function parse_vr(expr, tp) + local t, r = match(expr, "^([sd])([0-9]+)$") + if t == tp then + r = tonumber(r) + if r <= 31 then + if t == "s" then return shr(r, 1), band(r, 1) end + return band(r, 15), shr(r, 4) + end + end + werror("bad register name `"..expr.."'") +end + +local function parse_reglist(reglist) + reglist = match(reglist, "^{%s*([^}]*)}$") + if not reglist then werror("register list expected") end + local rr = 0 + for p in gmatch(reglist..",", "%s*([^,]*),") do + local rbit = shl(1, parse_gpr(gsub(p, "%s+$", ""))) + if band(rr, rbit) ~= 0 then + werror("duplicate register `"..p.."'") + end + rr = rr + rbit + end + return rr +end + +local function parse_vrlist(reglist) + local ta, ra, tb, rb = match(reglist, + "^{%s*([sd])([0-9]+)%s*%-%s*([sd])([0-9]+)%s*}$") + ra, rb = tonumber(ra), tonumber(rb) + if ta and ta == tb and ra and rb and ra <= 31 and rb <= 31 and ra <= rb then + local nr = rb+1 - ra + if ta == "s" then + return shl(shr(ra,1),12)+shl(band(ra,1),22) + nr + else + return shl(band(ra,15),12)+shl(shr(ra,4),22) + nr*2 + 0x100 + end + end + werror("register list expected") +end + +local function parse_imm(imm, bits, shift, scale, signed) + imm = match(imm, "^#(.*)$") + if not imm then werror("expected immediate operand") end + local n = tonumber(imm) + if n then + local m = sar(n, scale) + if shl(m, scale) == n then + if signed then + local s = sar(m, bits-1) + if s == 0 then return shl(m, shift) + elseif s == -1 then return shl(m + shl(1, bits), shift) end + else + if sar(m, bits) == 0 then return shl(m, shift) end + end + end + werror("out of range immediate `"..imm.."'") + else + waction("IMM", (signed and 32768 or 0)+scale*1024+bits*32+shift, imm) + return 0 + end +end + +local function parse_imm12(imm) + local n = tonumber(imm) + if n then + local m = band(n) + for i=0,-15,-1 do + if shr(m, 8) == 0 then return m + shl(band(i, 15), 8) end + m = ror(m, 2) + end + werror("out of range immediate `"..imm.."'") + else + waction("IMM12", 0, imm) + return 0 + end +end + +local function parse_imm16(imm) + imm = match(imm, "^#(.*)$") + if not imm then werror("expected immediate operand") end + local n = tonumber(imm) + if n then + if shr(n, 16) == 0 then return band(n, 0x0fff) + shl(band(n, 0xf000), 4) end + werror("out of range immediate `"..imm.."'") + else + waction("IMM16", 32*16, imm) + return 0 + end +end + +local function parse_imm_load(imm, ext) + local n = tonumber(imm) + if n then + if ext then + if n >= -255 and n <= 255 then + local up = 0x00800000 + if n < 0 then n = -n; up = 0 end + return shl(band(n, 0xf0), 4) + band(n, 0x0f) + up + end + else + if n >= -4095 and n <= 4095 then + if n >= 0 then return n+0x00800000 end + return -n + end + end + werror("out of range immediate `"..imm.."'") + else + waction(ext and "IMML8" or "IMML12", 32768 + shl(ext and 8 or 12, 5), imm) + return 0 + end +end + +local function parse_shift(shift, gprok) + if shift == "rrx" then + return 3 * 32 + else + local s, s2 = match(shift, "^(%S+)%s*(.*)$") + s = map_shift[s] + if not s then werror("expected shift operand") end + if sub(s2, 1, 1) == "#" then + return parse_imm(s2, 5, 7, 0, false) + shl(s, 5) + else + if not gprok then werror("expected immediate shift operand") end + return shl(parse_gpr(s2), 8) + shl(s, 5) + 16 + end + end +end + +local function parse_label(label, def) + local prefix = sub(label, 1, 2) + -- =>label (pc label reference) + if prefix == "=>" then + return "PC", 0, sub(label, 3) + end + -- ->name (global label reference) + if prefix == "->" then + return "LG", map_global[sub(label, 3)] + end + if def then + -- [1-9] (local label definition) + if match(label, "^[1-9]$") then + return "LG", 10+tonumber(label) + end + else + -- [<>][1-9] (local label reference) + local dir, lnum = match(label, "^([<>])([1-9])$") + if dir then -- Fwd: 1-9, Bkwd: 11-19. + return "LG", lnum + (dir == ">" and 0 or 10) + end + -- extern label (extern label reference) + local extname = match(label, "^extern%s+(%S+)$") + if extname then + return "EXT", map_extern[extname] + end + end + werror("bad label `"..label.."'") +end + +local function parse_load(params, nparams, n, op) + local oplo = band(op, 255) + local ext, ldrd = (oplo ~= 0), (oplo == 208) + local d + if (ldrd or oplo == 240) then + d = band(shr(op, 12), 15) + if band(d, 1) ~= 0 then werror("odd destination register") end + end + local pn = params[n] + local p1, wb = match(pn, "^%[%s*(.-)%s*%](!?)$") + local p2 = params[n+1] + if not p1 then + if not p2 then + if match(pn, "^[<>=%-]") or match(pn, "^extern%s+") then + local mode, n, s = parse_label(pn, false) + waction("REL_"..mode, n + (ext and 0x1800 or 0x0800), s, 1) + return op + 15 * 65536 + 0x01000000 + (ext and 0x00400000 or 0) + end + local reg, tailr = match(pn, "^([%w_:]+)%s*(.*)$") + if reg and tailr ~= "" then + local d, tp = parse_gpr(reg) + if tp then + waction(ext and "IMML8" or "IMML12", 32768 + 32*(ext and 8 or 12), + format(tp.ctypefmt, tailr)) + return op + shl(d, 16) + 0x01000000 + (ext and 0x00400000 or 0) + end + end + end + werror("expected address operand") + end + if wb == "!" then op = op + 0x00200000 end + if p2 then + if wb == "!" then werror("bad use of '!'") end + local p3 = params[n+2] + op = op + shl(parse_gpr(p1), 16) + local imm = match(p2, "^#(.*)$") + if imm then + local m = parse_imm_load(imm, ext) + if p3 then werror("too many parameters") end + op = op + m + (ext and 0x00400000 or 0) + else + local m, neg = parse_gpr_pm(p2) + if ldrd and (m == d or m-1 == d) then werror("register conflict") end + op = op + m + (neg and 0 or 0x00800000) + (ext and 0 or 0x02000000) + if p3 then op = op + parse_shift(p3) end + end + else + local p1a, p2 = match(p1, "^([^,%s]*)%s*(.*)$") + op = op + shl(parse_gpr(p1a), 16) + 0x01000000 + if p2 ~= "" then + local imm = match(p2, "^,%s*#(.*)$") + if imm then + local m = parse_imm_load(imm, ext) + op = op + m + (ext and 0x00400000 or 0) + else + local p2a, p3 = match(p2, "^,%s*([^,%s]*)%s*,?%s*(.*)$") + local m, neg = parse_gpr_pm(p2a) + if ldrd and (m == d or m-1 == d) then werror("register conflict") end + op = op + m + (neg and 0 or 0x00800000) + (ext and 0 or 0x02000000) + if p3 ~= "" then + if ext then werror("too many parameters") end + op = op + parse_shift(p3) + end + end + else + if wb == "!" then werror("bad use of '!'") end + op = op + (ext and 0x00c00000 or 0x00800000) + end + end + return op +end + +local function parse_vload(q) + local reg, imm = match(q, "^%[%s*([^,%s]*)%s*(.*)%]$") + if reg then + local d = shl(parse_gpr(reg), 16) + if imm == "" then return d end + imm = match(imm, "^,%s*#(.*)$") + if imm then + local n = tonumber(imm) + if n then + if n >= -1020 and n <= 1020 and n%4 == 0 then + return d + (n >= 0 and n/4+0x00800000 or -n/4) + end + werror("out of range immediate `"..imm.."'") + else + waction("IMMV8", 32768 + 32*8, imm) + return d + end + end + else + if match(q, "^[<>=%-]") or match(q, "^extern%s+") then + local mode, n, s = parse_label(q, false) + waction("REL_"..mode, n + 0x2800, s, 1) + return 15 * 65536 + end + local reg, tailr = match(q, "^([%w_:]+)%s*(.*)$") + if reg and tailr ~= "" then + local d, tp = parse_gpr(reg) + if tp then + waction("IMMV8", 32768 + 32*8, format(tp.ctypefmt, tailr)) + return shl(d, 16) + end + end + end + werror("expected address operand") +end + +------------------------------------------------------------------------------ + +-- Handle opcodes defined with template strings. +local function parse_template(params, template, nparams, pos) + local op = tonumber(sub(template, 1, 8), 16) + local n = 1 + local vr = "s" + + -- Process each character. + for p in gmatch(sub(template, 9), ".") do + local q = params[n] + if p == "D" then + op = op + shl(parse_gpr(q), 12); n = n + 1 + elseif p == "N" then + op = op + shl(parse_gpr(q), 16); n = n + 1 + elseif p == "S" then + op = op + shl(parse_gpr(q), 8); n = n + 1 + elseif p == "M" then + op = op + parse_gpr(q); n = n + 1 + elseif p == "d" then + local r,h = parse_vr(q, vr); op = op+shl(r,12)+shl(h,22); n = n + 1 + elseif p == "n" then + local r,h = parse_vr(q, vr); op = op+shl(r,16)+shl(h,7); n = n + 1 + elseif p == "m" then + local r,h = parse_vr(q, vr); op = op+r+shl(h,5); n = n + 1 + elseif p == "P" then + local imm = match(q, "^#(.*)$") + if imm then + op = op + parse_imm12(imm) + 0x02000000 + else + op = op + parse_gpr(q) + end + n = n + 1 + elseif p == "p" then + op = op + parse_shift(q, true); n = n + 1 + elseif p == "L" then + op = parse_load(params, nparams, n, op) + elseif p == "l" then + op = op + parse_vload(q) + elseif p == "B" then + local mode, n, s = parse_label(q, false) + waction("REL_"..mode, n, s, 1) + elseif p == "C" then -- blx gpr vs. blx label. + if match(q, "^([%w_]+):(r1?[0-9])$") or match(q, "^r(1?[0-9])$") then + op = op + parse_gpr(q) + else + if op < 0xe0000000 then werror("unconditional instruction") end + local mode, n, s = parse_label(q, false) + waction("REL_"..mode, n, s, 1) + op = 0xfa000000 + end + elseif p == "F" then + vr = "s" + elseif p == "G" then + vr = "d" + elseif p == "o" then + local r, wb = match(q, "^([^!]*)(!?)$") + op = op + shl(parse_gpr(r), 16) + (wb == "!" and 0x00200000 or 0) + n = n + 1 + elseif p == "R" then + op = op + parse_reglist(q); n = n + 1 + elseif p == "r" then + op = op + parse_vrlist(q); n = n + 1 + elseif p == "W" then + op = op + parse_imm16(q); n = n + 1 + elseif p == "v" then + op = op + parse_imm(q, 5, 7, 0, false); n = n + 1 + elseif p == "w" then + local imm = match(q, "^#(.*)$") + if imm then + op = op + parse_imm(q, 5, 7, 0, false); n = n + 1 + else + op = op + shl(parse_gpr(q), 8) + 16 + end + elseif p == "X" then + op = op + parse_imm(q, 5, 16, 0, false); n = n + 1 + elseif p == "Y" then + local imm = tonumber(match(q, "^#(.*)$")); n = n + 1 + if not imm or shr(imm, 8) ~= 0 then + werror("bad immediate operand") + end + op = op + shl(band(imm, 0xf0), 12) + band(imm, 0x0f) + elseif p == "K" then + local imm = tonumber(match(q, "^#(.*)$")); n = n + 1 + if not imm or shr(imm, 16) ~= 0 then + werror("bad immediate operand") + end + op = op + shl(band(imm, 0xfff0), 4) + band(imm, 0x000f) + elseif p == "T" then + op = op + parse_imm(q, 24, 0, 0, false); n = n + 1 + elseif p == "s" then + -- Ignored. + else + assert(false) + end + end + wputpos(pos, op) +end + +map_op[".template__"] = function(params, template, nparams) + if not params then return template:gsub("%x%x%x%x%x%x%x%x", "") end + + -- Limit number of section buffer positions used by a single dasm_put(). + -- A single opcode needs a maximum of 3 positions. + if secpos+3 > maxsecpos then wflush() end + local pos = wpos() + local lpos, apos, spos = #actlist, #actargs, secpos + + local ok, err + for t in gmatch(template, "[^|]+") do + ok, err = pcall(parse_template, params, t, nparams, pos) + if ok then return end + secpos = spos + actlist[lpos+1] = nil + actlist[lpos+2] = nil + actlist[lpos+3] = nil + actargs[apos+1] = nil + actargs[apos+2] = nil + actargs[apos+3] = nil + end + error(err, 0) +end + +------------------------------------------------------------------------------ + +-- Pseudo-opcode to mark the position where the action list is to be emitted. +map_op[".actionlist_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeactions(out, name) end) +end + +-- Pseudo-opcode to mark the position where the global enum is to be emitted. +map_op[".globals_1"] = function(params) + if not params then return "prefix" end + local prefix = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeglobals(out, prefix) end) +end + +-- Pseudo-opcode to mark the position where the global names are to be emitted. +map_op[".globalnames_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeglobalnames(out, name) end) +end + +-- Pseudo-opcode to mark the position where the extern names are to be emitted. +map_op[".externnames_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeexternnames(out, name) end) +end + +------------------------------------------------------------------------------ + +-- Label pseudo-opcode (converted from trailing colon form). +map_op[".label_1"] = function(params) + if not params then return "[1-9] | ->global | =>pcexpr" end + if secpos+1 > maxsecpos then wflush() end + local mode, n, s = parse_label(params[1], true) + if mode == "EXT" then werror("bad label definition") end + waction("LABEL_"..mode, n, s, 1) +end + +------------------------------------------------------------------------------ + +-- Pseudo-opcodes for data storage. +map_op[".long_*"] = function(params) + if not params then return "imm..." end + for _,p in ipairs(params) do + local n = tonumber(p) + if not n then werror("bad immediate `"..p.."'") end + if n < 0 then n = n + 2^32 end + wputw(n) + if secpos+2 > maxsecpos then wflush() end + end +end + +-- Alignment pseudo-opcode. +map_op[".align_1"] = function(params) + if not params then return "numpow2" end + if secpos+1 > maxsecpos then wflush() end + local align = tonumber(params[1]) + if align then + local x = align + -- Must be a power of 2 in the range (2 ... 256). + for i=1,8 do + x = x / 2 + if x == 1 then + waction("ALIGN", align-1, nil, 1) -- Action byte is 2**n-1. + return + end + end + end + werror("bad alignment") +end + +------------------------------------------------------------------------------ + +-- Pseudo-opcode for (primitive) type definitions (map to C types). +map_op[".type_3"] = function(params, nparams) + if not params then + return nparams == 2 and "name, ctype" or "name, ctype, reg" + end + local name, ctype, reg = params[1], params[2], params[3] + if not match(name, "^[%a_][%w_]*$") then + werror("bad type name `"..name.."'") + end + local tp = map_type[name] + if tp then + werror("duplicate type `"..name.."'") + end + -- Add #type to defines. A bit unclean to put it in map_archdef. + map_archdef["#"..name] = "sizeof("..ctype..")" + -- Add new type and emit shortcut define. + local num = ctypenum + 1 + map_type[name] = { + ctype = ctype, + ctypefmt = format("Dt%X(%%s)", num), + reg = reg, + } + wline(format("#define Dt%X(_V) (int)(ptrdiff_t)&(((%s *)0)_V)", num, ctype)) + ctypenum = num +end +map_op[".type_2"] = map_op[".type_3"] + +-- Dump type definitions. +local function dumptypes(out, lvl) + local t = {} + for name in pairs(map_type) do t[#t+1] = name end + sort(t) + out:write("Type definitions:\n") + for _,name in ipairs(t) do + local tp = map_type[name] + local reg = tp.reg or "" + out:write(format(" %-20s %-20s %s\n", name, tp.ctype, reg)) + end + out:write("\n") +end + +------------------------------------------------------------------------------ + +-- Set the current section. +function _M.section(num) + waction("SECTION", num) + wflush(true) -- SECTION is a terminal action. +end + +------------------------------------------------------------------------------ + +-- Dump architecture description. +function _M.dumparch(out) + out:write(format("DynASM %s version %s, released %s\n\n", + _info.arch, _info.version, _info.release)) + dumpactions(out) +end + +-- Dump all user defined elements. +function _M.dumpdef(out, lvl) + dumptypes(out, lvl) + dumpglobals(out, lvl) + dumpexterns(out, lvl) +end + +------------------------------------------------------------------------------ + +-- Pass callbacks from/to the DynASM core. +function _M.passcb(wl, we, wf, ww) + wline, werror, wfatal, wwarn = wl, we, wf, ww + return wflush +end + +-- Setup the arch-specific module. +function _M.setup(arch, opt) + g_arch, g_opt = arch, opt +end + +-- Merge the core maps and the arch-specific maps. +function _M.mergemaps(map_coreop, map_def) + setmetatable(map_op, { __index = function(t, k) + local v = map_coreop[k] + if v then return v end + local k1, cc, k2 = match(k, "^(.-)(..)([._].*)$") + local cv = map_cond[cc] + if cv then + local v = rawget(t, k1..k2) + if type(v) == "string" then + local scv = format("%x", cv) + return gsub(scv..sub(v, 2), "|e", "|"..scv) + end + end + end }) + setmetatable(map_def, { __index = map_archdef }) + return map_op, map_def +end + +return _M + +------------------------------------------------------------------------------ + diff --git a/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_arm64.h b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_arm64.h new file mode 100644 index 00000000000..d912e61dda7 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_arm64.h @@ -0,0 +1,518 @@ +/* +** DynASM ARM64 encoding engine. +** Copyright (C) 2005-2015 Mike Pall. All rights reserved. +** Released under the MIT license. See dynasm.lua for full copyright notice. +*/ + +#include +#include +#include +#include + +#define DASM_ARCH "arm64" + +#ifndef DASM_EXTERN +#define DASM_EXTERN(a,b,c,d) 0 +#endif + +/* Action definitions. */ +enum { + DASM_STOP, DASM_SECTION, DASM_ESC, DASM_REL_EXT, + /* The following actions need a buffer position. */ + DASM_ALIGN, DASM_REL_LG, DASM_LABEL_LG, + /* The following actions also have an argument. */ + DASM_REL_PC, DASM_LABEL_PC, + DASM_IMM, DASM_IMM6, DASM_IMM12, DASM_IMM13W, DASM_IMM13X, DASM_IMML, + DASM__MAX +}; + +/* Maximum number of section buffer positions for a single dasm_put() call. */ +#define DASM_MAXSECPOS 25 + +/* DynASM encoder status codes. Action list offset or number are or'ed in. */ +#define DASM_S_OK 0x00000000 +#define DASM_S_NOMEM 0x01000000 +#define DASM_S_PHASE 0x02000000 +#define DASM_S_MATCH_SEC 0x03000000 +#define DASM_S_RANGE_I 0x11000000 +#define DASM_S_RANGE_SEC 0x12000000 +#define DASM_S_RANGE_LG 0x13000000 +#define DASM_S_RANGE_PC 0x14000000 +#define DASM_S_RANGE_REL 0x15000000 +#define DASM_S_UNDEF_LG 0x21000000 +#define DASM_S_UNDEF_PC 0x22000000 + +/* Macros to convert positions (8 bit section + 24 bit index). */ +#define DASM_POS2IDX(pos) ((pos)&0x00ffffff) +#define DASM_POS2BIAS(pos) ((pos)&0xff000000) +#define DASM_SEC2POS(sec) ((sec)<<24) +#define DASM_POS2SEC(pos) ((pos)>>24) +#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos)) + +/* Action list type. */ +typedef const unsigned int *dasm_ActList; + +/* Per-section structure. */ +typedef struct dasm_Section { + int *rbuf; /* Biased buffer pointer (negative section bias). */ + int *buf; /* True buffer pointer. */ + size_t bsize; /* Buffer size in bytes. */ + int pos; /* Biased buffer position. */ + int epos; /* End of biased buffer position - max single put. */ + int ofs; /* Byte offset into section. */ +} dasm_Section; + +/* Core structure holding the DynASM encoding state. */ +struct dasm_State { + size_t psize; /* Allocated size of this structure. */ + dasm_ActList actionlist; /* Current actionlist pointer. */ + int *lglabels; /* Local/global chain/pos ptrs. */ + size_t lgsize; + int *pclabels; /* PC label chains/pos ptrs. */ + size_t pcsize; + void **globals; /* Array of globals (bias -10). */ + dasm_Section *section; /* Pointer to active section. */ + size_t codesize; /* Total size of all code sections. */ + int maxsection; /* 0 <= sectionidx < maxsection. */ + int status; /* Status code. */ + dasm_Section sections[1]; /* All sections. Alloc-extended. */ +}; + +/* The size of the core structure depends on the max. number of sections. */ +#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section)) + + +/* Initialize DynASM state. */ +void dasm_init(Dst_DECL, int maxsection) +{ + dasm_State *D; + size_t psz = 0; + int i; + Dst_REF = NULL; + DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection)); + D = Dst_REF; + D->psize = psz; + D->lglabels = NULL; + D->lgsize = 0; + D->pclabels = NULL; + D->pcsize = 0; + D->globals = NULL; + D->maxsection = maxsection; + for (i = 0; i < maxsection; i++) { + D->sections[i].buf = NULL; /* Need this for pass3. */ + D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i); + D->sections[i].bsize = 0; + D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */ + } +} + +/* Free DynASM state. */ +void dasm_free(Dst_DECL) +{ + dasm_State *D = Dst_REF; + int i; + for (i = 0; i < D->maxsection; i++) + if (D->sections[i].buf) + DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize); + if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize); + if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize); + DASM_M_FREE(Dst, D, D->psize); +} + +/* Setup global label array. Must be called before dasm_setup(). */ +void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl) +{ + dasm_State *D = Dst_REF; + D->globals = gl - 10; /* Negative bias to compensate for locals. */ + DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int)); +} + +/* Grow PC label array. Can be called after dasm_setup(), too. */ +void dasm_growpc(Dst_DECL, unsigned int maxpc) +{ + dasm_State *D = Dst_REF; + size_t osz = D->pcsize; + DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int)); + memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz); +} + +/* Setup encoder. */ +void dasm_setup(Dst_DECL, const void *actionlist) +{ + dasm_State *D = Dst_REF; + int i; + D->actionlist = (dasm_ActList)actionlist; + D->status = DASM_S_OK; + D->section = &D->sections[0]; + memset((void *)D->lglabels, 0, D->lgsize); + if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize); + for (i = 0; i < D->maxsection; i++) { + D->sections[i].pos = DASM_SEC2POS(i); + D->sections[i].ofs = 0; + } +} + + +#ifdef DASM_CHECKS +#define CK(x, st) \ + do { if (!(x)) { \ + D->status = DASM_S_##st|(p-D->actionlist-1); return; } } while (0) +#define CKPL(kind, st) \ + do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \ + D->status = DASM_S_RANGE_##st|(p-D->actionlist-1); return; } } while (0) +#else +#define CK(x, st) ((void)0) +#define CKPL(kind, st) ((void)0) +#endif + +static int dasm_imm12(unsigned int n) +{ + if ((n >> 12) == 0) + return n; + else if ((n & 0xff000fff) == 0) + return (n >> 12) | 0x1000; + else + return -1; +} + +static int dasm_ffs(unsigned long long x) +{ + int n = -1; + while (x) { x >>= 1; n++; } + return n; +} + +static int dasm_imm13(int lo, int hi) +{ + int inv = 0, w = 64, s = 0xfff, xa, xb; + unsigned long long n = (((unsigned long long)hi) << 32) | (unsigned int)lo; + unsigned long long m = 1ULL, a, b, c; + if (n & 1) { n = ~n; inv = 1; } + a = n & -n; b = (n+a)&-(n+a); c = (n+a-b)&-(n+a-b); + xa = dasm_ffs(a); xb = dasm_ffs(b); + if (c) { + w = dasm_ffs(c) - xa; + if (w == 32) m = 0x0000000100000001UL; + else if (w == 16) m = 0x0001000100010001UL; + else if (w == 8) m = 0x0101010101010101UL; + else if (w == 4) m = 0x1111111111111111UL; + else if (w == 2) m = 0x5555555555555555UL; + else return -1; + s = (-2*w & 0x3f) - 1; + } else if (!a) { + return -1; + } else if (xb == -1) { + xb = 64; + } + if ((b-a) * m != n) return -1; + if (inv) { + return ((w - xb) << 6) | (s+w+xa-xb); + } else { + return ((w - xa) << 6) | (s+xb-xa); + } + return -1; +} + +/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */ +void dasm_put(Dst_DECL, int start, ...) +{ + va_list ap; + dasm_State *D = Dst_REF; + dasm_ActList p = D->actionlist + start; + dasm_Section *sec = D->section; + int pos = sec->pos, ofs = sec->ofs; + int *b; + + if (pos >= sec->epos) { + DASM_M_GROW(Dst, int, sec->buf, sec->bsize, + sec->bsize + 2*DASM_MAXSECPOS*sizeof(int)); + sec->rbuf = sec->buf - DASM_POS2BIAS(pos); + sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos); + } + + b = sec->rbuf; + b[pos++] = start; + + va_start(ap, start); + while (1) { + unsigned int ins = *p++; + unsigned int action = (ins >> 16); + if (action >= DASM__MAX) { + ofs += 4; + } else { + int *pl, n = action >= DASM_REL_PC ? va_arg(ap, int) : 0; + switch (action) { + case DASM_STOP: goto stop; + case DASM_SECTION: + n = (ins & 255); CK(n < D->maxsection, RANGE_SEC); + D->section = &D->sections[n]; goto stop; + case DASM_ESC: p++; ofs += 4; break; + case DASM_REL_EXT: break; + case DASM_ALIGN: ofs += (ins & 255); b[pos++] = ofs; break; + case DASM_REL_LG: + n = (ins & 2047) - 10; pl = D->lglabels + n; + /* Bkwd rel or global. */ + if (n >= 0) { CK(n>=10||*pl<0, RANGE_LG); CKPL(lg, LG); goto putrel; } + pl += 10; n = *pl; + if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */ + goto linkrel; + case DASM_REL_PC: + pl = D->pclabels + n; CKPL(pc, PC); + putrel: + n = *pl; + if (n < 0) { /* Label exists. Get label pos and store it. */ + b[pos] = -n; + } else { + linkrel: + b[pos] = n; /* Else link to rel chain, anchored at label. */ + *pl = pos; + } + pos++; + break; + case DASM_LABEL_LG: + pl = D->lglabels + (ins & 2047) - 10; CKPL(lg, LG); goto putlabel; + case DASM_LABEL_PC: + pl = D->pclabels + n; CKPL(pc, PC); + putlabel: + n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */ + while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos; + } + *pl = -pos; /* Label exists now. */ + b[pos++] = ofs; /* Store pass1 offset estimate. */ + break; + case DASM_IMM: + CK((n & ((1<<((ins>>10)&31))-1)) == 0, RANGE_I); + n >>= ((ins>>10)&31); +#ifdef DASM_CHECKS + if ((ins & 0x8000)) + CK(((n + (1<<(((ins>>5)&31)-1)))>>((ins>>5)&31)) == 0, RANGE_I); + else + CK((n>>((ins>>5)&31)) == 0, RANGE_I); +#endif + b[pos++] = n; + break; + case DASM_IMM6: + CK((n >> 6) == 0, RANGE_I); + b[pos++] = n; + break; + case DASM_IMM12: + CK(dasm_imm12((unsigned int)n) != -1, RANGE_I); + b[pos++] = n; + break; + case DASM_IMM13W: + CK(dasm_imm13(n, n) != -1, RANGE_I); + b[pos++] = n; + break; + case DASM_IMM13X: { + int m = va_arg(ap, int); + CK(dasm_imm13(n, m) != -1, RANGE_I); + b[pos++] = n; + b[pos++] = m; + break; + } + case DASM_IMML: { +#ifdef DASM_CHECKS + int scale = (p[-2] >> 30); + CK((!(n & ((1<>scale) < 4096) || + (unsigned int)(n+256) < 512, RANGE_I); +#endif + b[pos++] = n; + break; + } + } + } + } +stop: + va_end(ap); + sec->pos = pos; + sec->ofs = ofs; +} +#undef CK + +/* Pass 2: Link sections, shrink aligns, fix label offsets. */ +int dasm_link(Dst_DECL, size_t *szp) +{ + dasm_State *D = Dst_REF; + int secnum; + int ofs = 0; + +#ifdef DASM_CHECKS + *szp = 0; + if (D->status != DASM_S_OK) return D->status; + { + int pc; + for (pc = 0; pc*sizeof(int) < D->pcsize; pc++) + if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc; + } +#endif + + { /* Handle globals not defined in this translation unit. */ + int idx; + for (idx = 20; idx*sizeof(int) < D->lgsize; idx++) { + int n = D->lglabels[idx]; + /* Undefined label: Collapse rel chain and replace with marker (< 0). */ + while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; } + } + } + + /* Combine all code sections. No support for data sections (yet). */ + for (secnum = 0; secnum < D->maxsection; secnum++) { + dasm_Section *sec = D->sections + secnum; + int *b = sec->rbuf; + int pos = DASM_SEC2POS(secnum); + int lastpos = sec->pos; + + while (pos != lastpos) { + dasm_ActList p = D->actionlist + b[pos++]; + while (1) { + unsigned int ins = *p++; + unsigned int action = (ins >> 16); + switch (action) { + case DASM_STOP: case DASM_SECTION: goto stop; + case DASM_ESC: p++; break; + case DASM_REL_EXT: break; + case DASM_ALIGN: ofs -= (b[pos++] + ofs) & (ins & 255); break; + case DASM_REL_LG: case DASM_REL_PC: pos++; break; + case DASM_LABEL_LG: case DASM_LABEL_PC: b[pos++] += ofs; break; + case DASM_IMM: case DASM_IMM6: case DASM_IMM12: case DASM_IMM13W: + case DASM_IMML: pos++; break; + case DASM_IMM13X: pos += 2; break; + } + } + stop: (void)0; + } + ofs += sec->ofs; /* Next section starts right after current section. */ + } + + D->codesize = ofs; /* Total size of all code sections */ + *szp = ofs; + return DASM_S_OK; +} + +#ifdef DASM_CHECKS +#define CK(x, st) \ + do { if (!(x)) return DASM_S_##st|(p-D->actionlist-1); } while (0) +#else +#define CK(x, st) ((void)0) +#endif + +/* Pass 3: Encode sections. */ +int dasm_encode(Dst_DECL, void *buffer) +{ + dasm_State *D = Dst_REF; + char *base = (char *)buffer; + unsigned int *cp = (unsigned int *)buffer; + int secnum; + + /* Encode all code sections. No support for data sections (yet). */ + for (secnum = 0; secnum < D->maxsection; secnum++) { + dasm_Section *sec = D->sections + secnum; + int *b = sec->buf; + int *endb = sec->rbuf + sec->pos; + + while (b != endb) { + dasm_ActList p = D->actionlist + *b++; + while (1) { + unsigned int ins = *p++; + unsigned int action = (ins >> 16); + int n = (action >= DASM_ALIGN && action < DASM__MAX) ? *b++ : 0; + switch (action) { + case DASM_STOP: case DASM_SECTION: goto stop; + case DASM_ESC: *cp++ = *p++; break; + case DASM_REL_EXT: + n = DASM_EXTERN(Dst, (unsigned char *)cp, (ins&2047), !(ins&2048)); + goto patchrel; + case DASM_ALIGN: + ins &= 255; while ((((char *)cp - base) & ins)) *cp++ = 0xe1a00000; + break; + case DASM_REL_LG: + CK(n >= 0, UNDEF_LG); + case DASM_REL_PC: + CK(n >= 0, UNDEF_PC); + n = *DASM_POS2PTR(D, n) - (int)((char *)cp - base) + 4; + patchrel: + if (!(ins & 0xf800)) { /* B, BL */ + CK((n & 3) == 0 && ((n+0x08000000) >> 28) == 0, RANGE_REL); + cp[-1] |= ((n >> 2) & 0x03ffffff); + } else if ((ins & 0x800)) { /* B.cond, CBZ, CBNZ, LDR* literal */ + CK((n & 3) == 0 && ((n+0x00100000) >> 21) == 0, RANGE_REL); + cp[-1] |= ((n << 3) & 0x00ffffe0); + } else if ((ins & 0x3000) == 0x2000) { /* ADR */ + CK(((n+0x00100000) >> 21) == 0, RANGE_REL); + cp[-1] |= ((n << 3) & 0x00ffffe0) | ((n & 3) << 29); + } else if ((ins & 0x3000) == 0x3000) { /* ADRP */ + cp[-1] |= ((n >> 9) & 0x00ffffe0) | (((n >> 12) & 3) << 29); + } else if ((ins & 0x1000)) { /* TBZ, TBNZ */ + CK((n & 3) == 0 && ((n+0x00008000) >> 16) == 0, RANGE_REL); + cp[-1] |= ((n << 3) & 0x0007ffe0); + } + break; + case DASM_LABEL_LG: + ins &= 2047; if (ins >= 20) D->globals[ins-10] = (void *)(base + n); + break; + case DASM_LABEL_PC: break; + case DASM_IMM: + cp[-1] |= (n & ((1<<((ins>>5)&31))-1)) << (ins&31); + break; + case DASM_IMM6: + cp[-1] |= ((n&31) << 19) | ((n&32) << 26); + break; + case DASM_IMM12: + cp[-1] |= (dasm_imm12((unsigned int)n) << 10); + break; + case DASM_IMM13W: + cp[-1] |= (dasm_imm13(n, n) << 10); + break; + case DASM_IMM13X: + cp[-1] |= (dasm_imm13(n, *b++) << 10); + break; + case DASM_IMML: { + int scale = (p[-2] >> 30); + cp[-1] |= (!(n & ((1<>scale) < 4096) ? + ((n << (10-scale)) | 0x01000000) : ((n & 511) << 12); + break; + } + default: *cp++ = ins; break; + } + } + stop: (void)0; + } + } + + if (base + D->codesize != (char *)cp) /* Check for phase errors. */ + return DASM_S_PHASE; + return DASM_S_OK; +} +#undef CK + +/* Get PC label offset. */ +int dasm_getpclabel(Dst_DECL, unsigned int pc) +{ + dasm_State *D = Dst_REF; + if (pc*sizeof(int) < D->pcsize) { + int pos = D->pclabels[pc]; + if (pos < 0) return *DASM_POS2PTR(D, -pos); + if (pos > 0) return -1; /* Undefined. */ + } + return -2; /* Unused or out of range. */ +} + +#ifdef DASM_CHECKS +/* Optional sanity checker to call between isolated encoding steps. */ +int dasm_checkstep(Dst_DECL, int secmatch) +{ + dasm_State *D = Dst_REF; + if (D->status == DASM_S_OK) { + int i; + for (i = 1; i <= 9; i++) { + if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_LG|i; break; } + D->lglabels[i] = 0; + } + } + if (D->status == DASM_S_OK && secmatch >= 0 && + D->section != &D->sections[secmatch]) + D->status = DASM_S_MATCH_SEC|(D->section-D->sections); + return D->status; +} +#endif + diff --git a/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_arm64.lua b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_arm64.lua new file mode 100644 index 00000000000..9766e475b0f --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_arm64.lua @@ -0,0 +1,1166 @@ +------------------------------------------------------------------------------ +-- DynASM ARM64 module. +-- +-- Copyright (C) 2005-2015 Mike Pall. All rights reserved. +-- See dynasm.lua for full copyright notice. +------------------------------------------------------------------------------ + +-- Module information: +local _info = { + arch = "arm", + description = "DynASM ARM64 module", + version = "1.3.0", + vernum = 10300, + release = "2014-12-03", + author = "Mike Pall", + license = "MIT", +} + +-- Exported glue functions for the arch-specific module. +local _M = { _info = _info } + +-- Cache library functions. +local type, tonumber, pairs, ipairs = type, tonumber, pairs, ipairs +local assert, setmetatable, rawget = assert, setmetatable, rawget +local _s = string +local sub, format, byte, char = _s.sub, _s.format, _s.byte, _s.char +local match, gmatch, gsub = _s.match, _s.gmatch, _s.gsub +local concat, sort, insert = table.concat, table.sort, table.insert +local bit = bit or require("bit") +local band, shl, shr, sar = bit.band, bit.lshift, bit.rshift, bit.arshift +local ror, tohex = bit.ror, bit.tohex + +-- Inherited tables and callbacks. +local g_opt, g_arch +local wline, werror, wfatal, wwarn + +-- Action name list. +-- CHECK: Keep this in sync with the C code! +local action_names = { + "STOP", "SECTION", "ESC", "REL_EXT", + "ALIGN", "REL_LG", "LABEL_LG", + "REL_PC", "LABEL_PC", "IMM", "IMM6", "IMM12", "IMM13W", "IMM13X", "IMML", +} + +-- Maximum number of section buffer positions for dasm_put(). +-- CHECK: Keep this in sync with the C code! +local maxsecpos = 25 -- Keep this low, to avoid excessively long C lines. + +-- Action name -> action number. +local map_action = {} +for n,name in ipairs(action_names) do + map_action[name] = n-1 +end + +-- Action list buffer. +local actlist = {} + +-- Argument list for next dasm_put(). Start with offset 0 into action list. +local actargs = { 0 } + +-- Current number of section buffer positions for dasm_put(). +local secpos = 1 + +------------------------------------------------------------------------------ + +-- Dump action names and numbers. +local function dumpactions(out) + out:write("DynASM encoding engine action codes:\n") + for n,name in ipairs(action_names) do + local num = map_action[name] + out:write(format(" %-10s %02X %d\n", name, num, num)) + end + out:write("\n") +end + +-- Write action list buffer as a huge static C array. +local function writeactions(out, name) + local nn = #actlist + if nn == 0 then nn = 1; actlist[0] = map_action.STOP end + out:write("static const unsigned int ", name, "[", nn, "] = {\n") + for i = 1,nn-1 do + assert(out:write("0x", tohex(actlist[i]), ",\n")) + end + assert(out:write("0x", tohex(actlist[nn]), "\n};\n\n")) +end + +------------------------------------------------------------------------------ + +-- Add word to action list. +local function wputxw(n) + assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range") + actlist[#actlist+1] = n +end + +-- Add action to list with optional arg. Advance buffer pos, too. +local function waction(action, val, a, num) + local w = assert(map_action[action], "bad action name `"..action.."'") + wputxw(w * 0x10000 + (val or 0)) + if a then actargs[#actargs+1] = a end + if a or num then secpos = secpos + (num or 1) end +end + +-- Flush action list (intervening C code or buffer pos overflow). +local function wflush(term) + if #actlist == actargs[1] then return end -- Nothing to flush. + if not term then waction("STOP") end -- Terminate action list. + wline(format("dasm_put(Dst, %s);", concat(actargs, ", ")), true) + actargs = { #actlist } -- Actionlist offset is 1st arg to next dasm_put(). + secpos = 1 -- The actionlist offset occupies a buffer position, too. +end + +-- Put escaped word. +local function wputw(n) + if n <= 0x000fffff then waction("ESC") end + wputxw(n) +end + +-- Reserve position for word. +local function wpos() + local pos = #actlist+1 + actlist[pos] = "" + return pos +end + +-- Store word to reserved position. +local function wputpos(pos, n) + assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range") + if n <= 0x000fffff then + insert(actlist, pos+1, n) + n = map_action.ESC * 0x10000 + end + actlist[pos] = n +end + +------------------------------------------------------------------------------ + +-- Global label name -> global label number. With auto assignment on 1st use. +local next_global = 20 +local map_global = setmetatable({}, { __index = function(t, name) + if not match(name, "^[%a_][%w_]*$") then werror("bad global label") end + local n = next_global + if n > 2047 then werror("too many global labels") end + next_global = n + 1 + t[name] = n + return n +end}) + +-- Dump global labels. +local function dumpglobals(out, lvl) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("Global labels:\n") + for i=20,next_global-1 do + out:write(format(" %s\n", t[i])) + end + out:write("\n") +end + +-- Write global label enum. +local function writeglobals(out, prefix) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("enum {\n") + for i=20,next_global-1 do + out:write(" ", prefix, t[i], ",\n") + end + out:write(" ", prefix, "_MAX\n};\n") +end + +-- Write global label names. +local function writeglobalnames(out, name) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("static const char *const ", name, "[] = {\n") + for i=20,next_global-1 do + out:write(" \"", t[i], "\",\n") + end + out:write(" (const char *)0\n};\n") +end + +------------------------------------------------------------------------------ + +-- Extern label name -> extern label number. With auto assignment on 1st use. +local next_extern = 0 +local map_extern_ = {} +local map_extern = setmetatable({}, { __index = function(t, name) + -- No restrictions on the name for now. + local n = next_extern + if n > 2047 then werror("too many extern labels") end + next_extern = n + 1 + t[name] = n + map_extern_[n] = name + return n +end}) + +-- Dump extern labels. +local function dumpexterns(out, lvl) + out:write("Extern labels:\n") + for i=0,next_extern-1 do + out:write(format(" %s\n", map_extern_[i])) + end + out:write("\n") +end + +-- Write extern label names. +local function writeexternnames(out, name) + out:write("static const char *const ", name, "[] = {\n") + for i=0,next_extern-1 do + out:write(" \"", map_extern_[i], "\",\n") + end + out:write(" (const char *)0\n};\n") +end + +------------------------------------------------------------------------------ + +-- Arch-specific maps. + +-- Ext. register name -> int. name. +local map_archdef = { xzr = "@x31", wzr = "@w31", lr = "x30", } + +-- Int. register name -> ext. name. +local map_reg_rev = { ["@x31"] = "xzr", ["@w31"] = "wzr", x30 = "lr", } + +local map_type = {} -- Type name -> { ctype, reg } +local ctypenum = 0 -- Type number (for Dt... macros). + +-- Reverse defines for registers. +function _M.revdef(s) + return map_reg_rev[s] or s +end + +local map_shift = { lsl = 0, lsr = 1, asr = 2, } + +local map_extend = { + uxtb = 0, uxth = 1, uxtw = 2, uxtx = 3, + sxtb = 4, sxth = 5, sxtw = 6, sxtx = 7, +} + +local map_cond = { + eq = 0, ne = 1, cs = 2, cc = 3, mi = 4, pl = 5, vs = 6, vc = 7, + hi = 8, ls = 9, ge = 10, lt = 11, gt = 12, le = 13, al = 14, + hs = 2, lo = 3, +} + +------------------------------------------------------------------------------ + +local parse_reg_type + +local function parse_reg(expr) + if not expr then werror("expected register name") end + local tname, ovreg = match(expr, "^([%w_]+):(@?%l%d+)$") + local tp = map_type[tname or expr] + if tp then + local reg = ovreg or tp.reg + if not reg then + werror("type `"..(tname or expr).."' needs a register override") + end + expr = reg + end + local ok31, rt, r = match(expr, "^(@?)([xwqdshb])([123]?[0-9])$") + if r then + r = tonumber(r) + if r <= 30 or (r == 31 and ok31 ~= "" or (rt ~= "w" and rt ~= "x")) then + if not parse_reg_type then + parse_reg_type = rt + elseif parse_reg_type ~= rt then + werror("register size mismatch") + end + return r, tp + end + end + werror("bad register name `"..expr.."'") +end + +local function parse_reg_base(expr) + if expr == "sp" then return 0x3e0 end + local base, tp = parse_reg(expr) + if parse_reg_type ~= "x" then werror("bad register type") end + parse_reg_type = false + return shl(base, 5), tp +end + +local parse_ctx = {} + +local loadenv = setfenv and function(s) + local code = loadstring(s, "") + if code then setfenv(code, parse_ctx) end + return code +end or function(s) + return load(s, "", nil, parse_ctx) +end + +-- Try to parse simple arithmetic, too, since some basic ops are aliases. +local function parse_number(n) + local x = tonumber(n) + if x then return x end + local code = loadenv("return "..n) + if code then + local ok, y = pcall(code) + if ok then return y end + end + return nil +end + +local function parse_imm(imm, bits, shift, scale, signed) + imm = match(imm, "^#(.*)$") + if not imm then werror("expected immediate operand") end + local n = parse_number(imm) + if n then + local m = sar(n, scale) + if shl(m, scale) == n then + if signed then + local s = sar(m, bits-1) + if s == 0 then return shl(m, shift) + elseif s == -1 then return shl(m + shl(1, bits), shift) end + else + if sar(m, bits) == 0 then return shl(m, shift) end + end + end + werror("out of range immediate `"..imm.."'") + else + waction("IMM", (signed and 32768 or 0)+scale*1024+bits*32+shift, imm) + return 0 + end +end + +local function parse_imm12(imm) + imm = match(imm, "^#(.*)$") + if not imm then werror("expected immediate operand") end + local n = parse_number(imm) + if n then + if shr(n, 12) == 0 then + return shl(n, 10) + elseif band(n, 0xff000fff) == 0 then + return shr(n, 2) + 0x00400000 + end + werror("out of range immediate `"..imm.."'") + else + waction("IMM12", 0, imm) + return 0 + end +end + +local function parse_imm13(imm) + imm = match(imm, "^#(.*)$") + if not imm then werror("expected immediate operand") end + local n = parse_number(imm) + local r64 = parse_reg_type == "x" + if n and n % 1 == 0 and n >= 0 and n <= 0xffffffff then + local inv = false + if band(n, 1) == 1 then n = bit.bnot(n); inv = true end + local t = {} + for i=1,32 do t[i] = band(n, 1); n = shr(n, 1) end + local b = table.concat(t) + b = b..(r64 and (inv and "1" or "0"):rep(32) or b) + local p0, p1, p0a, p1a = b:match("^(0+)(1+)(0*)(1*)") + if p0 then + local w = p1a == "" and (r64 and 64 or 32) or #p1+#p0a + if band(w, w-1) == 0 and b == b:sub(1, w):rep(64/w) then + local s = band(-2*w, 0x3f) - 1 + if w == 64 then s = s + 0x1000 end + if inv then + return shl(w-#p1-#p0, 16) + shl(s+w-#p1, 10) + else + return shl(w-#p0, 16) + shl(s+#p1, 10) + end + end + end + werror("out of range immediate `"..imm.."'") + elseif r64 then + waction("IMM13X", 0, format("(unsigned int)(%s)", imm)) + actargs[#actargs+1] = format("(unsigned int)((unsigned long long)(%s)>>32)", imm) + return 0 + else + waction("IMM13W", 0, imm) + return 0 + end +end + +local function parse_imm6(imm) + imm = match(imm, "^#(.*)$") + if not imm then werror("expected immediate operand") end + local n = parse_number(imm) + if n then + if n >= 0 and n <= 63 then + return shl(band(n, 0x1f), 19) + (n >= 32 and 0x80000000 or 0) + end + werror("out of range immediate `"..imm.."'") + else + waction("IMM6", 0, imm) + return 0 + end +end + +local function parse_imm_load(imm, scale) + local n = parse_number(imm) + if n then + local m = sar(n, scale) + if shl(m, scale) == n and m >= 0 and m < 0x1000 then + return shl(m, 10) + 0x01000000 -- Scaled, unsigned 12 bit offset. + elseif n >= -256 and n < 256 then + return shl(band(n, 511), 12) -- Unscaled, signed 9 bit offset. + end + werror("out of range immediate `"..imm.."'") + else + waction("IMML", 0, imm) + return 0 + end +end + +local function parse_fpimm(imm) + imm = match(imm, "^#(.*)$") + if not imm then werror("expected immediate operand") end + local n = parse_number(imm) + if n then + local m, e = math.frexp(n) + local s, e2 = 0, band(e-2, 7) + if m < 0 then m = -m; s = 0x00100000 end + m = m*32-16 + if m % 1 == 0 and m >= 0 and m <= 15 and sar(shl(e2, 29), 29)+2 == e then + return s + shl(e2, 17) + shl(m, 13) + end + werror("out of range immediate `"..imm.."'") + else + werror("NYI fpimm action") + end +end + +local function parse_shift(expr) + local s, s2 = match(expr, "^(%S+)%s*(.*)$") + s = map_shift[s] + if not s then werror("expected shift operand") end + return parse_imm(s2, 6, 10, 0, false) + shl(s, 22) +end + +local function parse_lslx16(expr) + local n = match(expr, "^lsl%s*#(%d+)$") + n = tonumber(n) + if not n then werror("expected shift operand") end + if band(n, parse_reg_type == "x" and 0xffffffcf or 0xffffffef) ~= 0 then + werror("bad shift amount") + end + return shl(n, 17) +end + +local function parse_extend(expr) + local s, s2 = match(expr, "^(%S+)%s*(.*)$") + if s == "lsl" then + s = parse_reg_type == "x" and 3 or 2 + else + s = map_extend[s] + end + if not s then werror("expected extend operand") end + return (s2 == "" and 0 or parse_imm(s2, 3, 10, 0, false)) + shl(s, 13) +end + +local function parse_cond(expr, inv) + local c = map_cond[expr] + if not c then werror("expected condition operand") end + return shl(bit.bxor(c, inv), 12) +end + +local function parse_load(params, nparams, n, op) + if params[n+2] then werror("too many operands") end + local pn, p2 = params[n], params[n+1] + local p1, wb = match(pn, "^%[%s*(.-)%s*%](!?)$") + if not p1 then + if not p2 then + local reg, tailr = match(pn, "^([%w_:]+)%s*(.*)$") + if reg and tailr ~= "" then + local base, tp = parse_reg_base(reg) + if tp then + waction("IMML", 0, format(tp.ctypefmt, tailr)) + return op + base + end + end + end + werror("expected address operand") + end + local scale = shr(op, 30) + if p2 then + if wb == "!" then werror("bad use of '!'") end + op = op + parse_reg_base(p1) + parse_imm(p2, 9, 12, 0, true) + 0x400 + elseif wb == "!" then + local p1a, p2a = match(p1, "^([^,%s]*)%s*,%s*(.*)$") + if not p1a then werror("bad use of '!'") end + op = op + parse_reg_base(p1a) + parse_imm(p2a, 9, 12, 0, true) + 0xc00 + else + local p1a, p2a = match(p1, "^([^,%s]*)%s*(.*)$") + op = op + parse_reg_base(p1a) + if p2a ~= "" then + local imm = match(p2a, "^,%s*#(.*)$") + if imm then + op = op + parse_imm_load(imm, scale) + else + local p2b, p3b, p3s = match(p2a, "^,%s*([^,%s]*)%s*,?%s*(%S*)%s*(.*)$") + op = op + shl(parse_reg(p2b), 16) + 0x00200800 + if parse_reg_type ~= "x" and parse_reg_type ~= "w" then + werror("bad index register type") + end + if p3b == "" then + if parse_reg_type ~= "x" then werror("bad index register type") end + op = op + 0x6000 + else + if p3s == "" or p3s == "#0" then + elseif p3s == "#"..scale then + op = op + 0x1000 + else + werror("bad scale") + end + if parse_reg_type == "x" then + if p3b == "lsl" and p3s ~= "" then op = op + 0x6000 + elseif p3b == "sxtx" then op = op + 0xe000 + else + werror("bad extend/shift specifier") + end + else + if p3b == "uxtw" then op = op + 0x4000 + elseif p3b == "sxtw" then op = op + 0xc000 + else + werror("bad extend/shift specifier") + end + end + end + end + else + if wb == "!" then werror("bad use of '!'") end + op = op + 0x01000000 + end + end + return op +end + +local function parse_load_pair(params, nparams, n, op) + if params[n+2] then werror("too many operands") end + local pn, p2 = params[n], params[n+1] + local scale = shr(op, 30) == 0 and 2 or 3 + local p1, wb = match(pn, "^%[%s*(.-)%s*%](!?)$") + if not p1 then + if not p2 then + local reg, tailr = match(pn, "^([%w_:]+)%s*(.*)$") + if reg and tailr ~= "" then + local base, tp = parse_reg_base(reg) + if tp then + waction("IMM", 32768+7*32+15+scale*1024, format(tp.ctypefmt, tailr)) + return op + base + 0x01000000 + end + end + end + werror("expected address operand") + end + if p2 then + if wb == "!" then werror("bad use of '!'") end + op = op + 0x00800000 + else + local p1a, p2a = match(p1, "^([^,%s]*)%s*,%s*(.*)$") + if p1a then p1, p2 = p1a, p2a else p2 = "#0" end + op = op + (wb == "!" and 0x01800000 or 0x01000000) + end + return op + parse_reg_base(p1) + parse_imm(p2, 7, 15, scale, true) +end + +local function parse_label(label, def) + local prefix = sub(label, 1, 2) + -- =>label (pc label reference) + if prefix == "=>" then + return "PC", 0, sub(label, 3) + end + -- ->name (global label reference) + if prefix == "->" then + return "LG", map_global[sub(label, 3)] + end + if def then + -- [1-9] (local label definition) + if match(label, "^[1-9]$") then + return "LG", 10+tonumber(label) + end + else + -- [<>][1-9] (local label reference) + local dir, lnum = match(label, "^([<>])([1-9])$") + if dir then -- Fwd: 1-9, Bkwd: 11-19. + return "LG", lnum + (dir == ">" and 0 or 10) + end + -- extern label (extern label reference) + local extname = match(label, "^extern%s+(%S+)$") + if extname then + return "EXT", map_extern[extname] + end + end + werror("bad label `"..label.."'") +end + +local function branch_type(op) + if band(op, 0x7c000000) == 0x14000000 then return 0 -- B, BL + elseif shr(op, 24) == 0x54 or band(op, 0x7e000000) == 0x34000000 or + band(op, 0x3b000000) == 0x18000000 then + return 0x800 -- B.cond, CBZ, CBNZ, LDR* literal + elseif band(op, 0x7e000000) == 0x36000000 then return 0x1000 -- TBZ, TBNZ + elseif band(op, 0x9f000000) == 0x10000000 then return 0x2000 -- ADR + elseif band(op, 0x9f000000) == band(0x90000000) then return 0x3000 -- ADRP + else + assert(false, "unknown branch type") + end +end + +------------------------------------------------------------------------------ + +local map_op, op_template + +local function op_alias(opname, f) + return function(params, nparams) + if not params then return "-> "..opname:sub(1, -3) end + f(params, nparams) + op_template(params, map_op[opname], nparams) + end +end + +local function alias_bfx(p) + p[4] = "#("..p[3]:sub(2)..")+("..p[4]:sub(2)..")-1" +end + +local function alias_bfiz(p) + parse_reg(p[1]) + if parse_reg_type == "w" then + p[3] = "#-("..p[3]:sub(2)..")%32" + p[4] = "#("..p[4]:sub(2)..")-1" + else + p[3] = "#-("..p[3]:sub(2)..")%64" + p[4] = "#("..p[4]:sub(2)..")-1" + end +end + +local alias_lslimm = op_alias("ubfm_4", function(p) + parse_reg(p[1]) + local sh = p[3]:sub(2) + if parse_reg_type == "w" then + p[3] = "#-("..sh..")%32" + p[4] = "#31-("..sh..")" + else + p[3] = "#-("..sh..")%64" + p[4] = "#63-("..sh..")" + end +end) + +-- Template strings for ARM instructions. +map_op = { + -- Basic data processing instructions. + add_3 = "0b000000DNMg|11000000pDpNIg|8b206000pDpNMx", + add_4 = "0b000000DNMSg|0b200000DNMXg|8b200000pDpNMXx|8b200000pDpNxMwX", + adds_3 = "2b000000DNMg|31000000DpNIg|ab206000DpNMx", + adds_4 = "2b000000DNMSg|2b200000DNMXg|ab200000DpNMXx|ab200000DpNxMwX", + cmn_2 = "2b00001fNMg|3100001fpNIg|ab20601fpNMx", + cmn_3 = "2b00001fNMSg|2b20001fNMXg|ab20001fpNMXx|ab20001fpNxMwX", + + sub_3 = "4b000000DNMg|51000000pDpNIg|cb206000pDpNMx", + sub_4 = "4b000000DNMSg|4b200000DNMXg|cb200000pDpNMXx|cb200000pDpNxMwX", + subs_3 = "6b000000DNMg|71000000DpNIg|eb206000DpNMx", + subs_4 = "6b000000DNMSg|6b200000DNMXg|eb200000DpNMXx|eb200000DpNxMwX", + cmp_2 = "6b00001fNMg|7100001fpNIg|eb20601fpNMx", + cmp_3 = "6b00001fNMSg|6b20001fNMXg|eb20001fpNMXx|eb20001fpNxMwX", + + neg_2 = "4b0003e0DMg", + neg_3 = "4b0003e0DMSg", + negs_2 = "6b0003e0DMg", + negs_3 = "6b0003e0DMSg", + + adc_3 = "1a000000DNMg", + adcs_3 = "3a000000DNMg", + sbc_3 = "5a000000DNMg", + sbcs_3 = "7a000000DNMg", + ngc_2 = "5a0003e0DMg", + ngcs_2 = "7a0003e0DMg", + + and_3 = "0a000000DNMg|12000000pDNig", + and_4 = "0a000000DNMSg", + orr_3 = "2a000000DNMg|32000000pDNig", + orr_4 = "2a000000DNMSg", + eor_3 = "4a000000DNMg|52000000pDNig", + eor_4 = "4a000000DNMSg", + ands_3 = "6a000000DNMg|72000000DNig", + ands_4 = "6a000000DNMSg", + tst_2 = "6a00001fNMg|7200001fNig", + tst_3 = "6a00001fNMSg", + + bic_3 = "0a200000DNMg", + bic_4 = "0a200000DNMSg", + orn_3 = "2a200000DNMg", + orn_4 = "2a200000DNMSg", + eon_3 = "4a200000DNMg", + eon_4 = "4a200000DNMSg", + bics_3 = "6a200000DNMg", + bics_4 = "6a200000DNMSg", + + movn_2 = "12800000DWg", + movn_3 = "12800000DWRg", + movz_2 = "52800000DWg", + movz_3 = "52800000DWRg", + movk_2 = "72800000DWg", + movk_3 = "72800000DWRg", + + -- TODO: this doesn't cover all valid immediates for mov reg, #imm. + mov_2 = "2a0003e0DMg|52800000DW|320003e0pDig|11000000pDpNg", + mov_3 = "2a0003e0DMSg", + mvn_2 = "2a2003e0DMg", + mvn_3 = "2a2003e0DMSg", + + adr_2 = "10000000DBx", + adrp_2 = "90000000DBx", + + csel_4 = "1a800000DNMCg", + csinc_4 = "1a800400DNMCg", + csinv_4 = "5a800000DNMCg", + csneg_4 = "5a800400DNMCg", + cset_2 = "1a9f07e0Dcg", + csetm_2 = "5a9f03e0Dcg", + cinc_3 = "1a800400DNmcg", + cinv_3 = "5a800000DNmcg", + cneg_3 = "5a800400DNmcg", + + ccmn_4 = "3a400000NMVCg|3a400800N5VCg", + ccmp_4 = "7a400000NMVCg|7a400800N5VCg", + + madd_4 = "1b000000DNMAg", + msub_4 = "1b008000DNMAg", + mul_3 = "1b007c00DNMg", + mneg_3 = "1b00fc00DNMg", + + smaddl_4 = "9b200000DxNMwAx", + smsubl_4 = "9b208000DxNMwAx", + smull_3 = "9b207c00DxNMw", + smnegl_3 = "9b20fc00DxNMw", + smulh_3 = "9b407c00DNMx", + umaddl_4 = "9ba00000DxNMwAx", + umsubl_4 = "9ba08000DxNMwAx", + umull_3 = "9ba07c00DxNMw", + umnegl_3 = "9ba0fc00DxNMw", + umulh_3 = "9bc07c00DNMx", + + udiv_3 = "1ac00800DNMg", + sdiv_3 = "1ac00c00DNMg", + + -- Bit operations. + sbfm_4 = "13000000DN12w|93400000DN12x", + bfm_4 = "33000000DN12w|b3400000DN12x", + ubfm_4 = "53000000DN12w|d3400000DN12x", + extr_4 = "13800000DNM2w|93c00000DNM2x", + + sxtb_2 = "13001c00DNw|93401c00DNx", + sxth_2 = "13003c00DNw|93403c00DNx", + sxtw_2 = "93407c00DxNw", + uxtb_2 = "53001c00DNw", + uxth_2 = "53003c00DNw", + + sbfx_4 = op_alias("sbfm_4", alias_bfx), + bfxil_4 = op_alias("bfm_4", alias_bfx), + ubfx_4 = op_alias("ubfm_4", alias_bfx), + sbfiz_4 = op_alias("sbfm_4", alias_bfiz), + bfi_4 = op_alias("bfm_4", alias_bfiz), + ubfiz_4 = op_alias("ubfm_4", alias_bfiz), + + lsl_3 = function(params, nparams) + if params and params[3]:byte() == 35 then + return alias_lslimm(params, nparams) + else + return op_template(params, "1ac02000DNMg", nparams) + end + end, + lsr_3 = "1ac02400DNMg|53007c00DN1w|d340fc00DN1x", + asr_3 = "1ac02800DNMg|13007c00DN1w|9340fc00DN1x", + ror_3 = "1ac02c00DNMg|13800000DNm2w|93c00000DNm2x", + + clz_2 = "5ac01000DNg", + cls_2 = "5ac01400DNg", + rbit_2 = "5ac00000DNg", + rev_2 = "5ac00800DNw|dac00c00DNx", + rev16_2 = "5ac00400DNg", + rev32_2 = "dac00800DNx", + + -- Loads and stores. + ["strb_*"] = "38000000DwL", + ["ldrb_*"] = "38400000DwL", + ["ldrsb_*"] = "38c00000DwL|38800000DxL", + ["strh_*"] = "78000000DwL", + ["ldrh_*"] = "78400000DwL", + ["ldrsh_*"] = "78c00000DwL|78800000DxL", + ["str_*"] = "b8000000DwL|f8000000DxL|bc000000DsL|fc000000DdL", + ["ldr_*"] = "18000000DwB|58000000DxB|1c000000DsB|5c000000DdB|b8400000DwL|f8400000DxL|bc400000DsL|fc400000DdL", + ["ldrsw_*"] = "98000000DxB|b8800000DxL", + -- NOTE: ldur etc. are handled by ldr et al. + + ["stp_*"] = "28000000DAwP|a8000000DAxP|2c000000DAsP|6c000000DAdP", + ["ldp_*"] = "28400000DAwP|a8400000DAxP|2c400000DAsP|6c400000DAdP", + ["ldpsw_*"] = "68400000DAxP", + + -- Branches. + b_1 = "14000000B", + bl_1 = "94000000B", + blr_1 = "d63f0000Nx", + br_1 = "d61f0000Nx", + ret_0 = "d65f03c0", + ret_1 = "d65f0000Nx", + -- b.cond is added below. + cbz_2 = "34000000DBg", + cbnz_2 = "35000000DBg", + tbz_3 = "36000000DTBw|36000000DTBx", + tbnz_3 = "37000000DTBw|37000000DTBx", + + -- Miscellaneous instructions. + -- TODO: hlt, hvc, smc, svc, eret, dcps[123], drps, mrs, msr + -- TODO: sys, sysl, ic, dc, at, tlbi + -- TODO: hint, yield, wfe, wfi, sev, sevl + -- TODO: clrex, dsb, dmb, isb + nop_0 = "d503201f", + brk_0 = "d4200000", + brk_1 = "d4200000W", + + -- Floating point instructions. + fmov_2 = "1e204000DNf|1e260000DwNs|1e270000DsNw|9e660000DxNd|9e670000DdNx|1e201000DFf", + fabs_2 = "1e20c000DNf", + fneg_2 = "1e214000DNf", + fsqrt_2 = "1e21c000DNf", + + fcvt_2 = "1e22c000DdNs|1e624000DsNd", + + -- TODO: half-precision and fixed-point conversions. + fcvtas_2 = "1e240000DwNs|9e240000DxNs|1e640000DwNd|9e640000DxNd", + fcvtau_2 = "1e250000DwNs|9e250000DxNs|1e650000DwNd|9e650000DxNd", + fcvtms_2 = "1e300000DwNs|9e300000DxNs|1e700000DwNd|9e700000DxNd", + fcvtmu_2 = "1e310000DwNs|9e310000DxNs|1e710000DwNd|9e710000DxNd", + fcvtns_2 = "1e200000DwNs|9e200000DxNs|1e600000DwNd|9e600000DxNd", + fcvtnu_2 = "1e210000DwNs|9e210000DxNs|1e610000DwNd|9e610000DxNd", + fcvtps_2 = "1e280000DwNs|9e280000DxNs|1e680000DwNd|9e680000DxNd", + fcvtpu_2 = "1e290000DwNs|9e290000DxNs|1e690000DwNd|9e690000DxNd", + fcvtzs_2 = "1e380000DwNs|9e380000DxNs|1e780000DwNd|9e780000DxNd", + fcvtzu_2 = "1e390000DwNs|9e390000DxNs|1e790000DwNd|9e790000DxNd", + + scvtf_2 = "1e220000DsNw|9e220000DsNx|1e620000DdNw|9e620000DdNx", + ucvtf_2 = "1e230000DsNw|9e230000DsNx|1e630000DdNw|9e630000DdNx", + + frintn_2 = "1e244000DNf", + frintp_2 = "1e24c000DNf", + frintm_2 = "1e254000DNf", + frintz_2 = "1e25c000DNf", + frinta_2 = "1e264000DNf", + frintx_2 = "1e274000DNf", + frinti_2 = "1e27c000DNf", + + fadd_3 = "1e202800DNMf", + fsub_3 = "1e203800DNMf", + fmul_3 = "1e200800DNMf", + fnmul_3 = "1e208800DNMf", + fdiv_3 = "1e201800DNMf", + + fmadd_4 = "1f000000DNMAf", + fmsub_4 = "1f008000DNMAf", + fnmadd_4 = "1f200000DNMAf", + fnmsub_4 = "1f208000DNMAf", + + fmax_3 = "1e204800DNMf", + fmaxnm_3 = "1e206800DNMf", + fmin_3 = "1e205800DNMf", + fminnm_3 = "1e207800DNMf", + + fcmp_2 = "1e202000NMf|1e202008NZf", + fcmpe_2 = "1e202010NMf|1e202018NZf", + + fccmp_4 = "1e200400NMVCf", + fccmpe_4 = "1e200410NMVCf", + + fcsel_4 = "1e200c00DNMCf", + + -- TODO: crc32*, aes*, sha*, pmull + -- TODO: SIMD instructions. +} + +for cond,c in pairs(map_cond) do + map_op["b"..cond.."_1"] = tohex(0x54000000+c).."B" +end + +------------------------------------------------------------------------------ + +-- Handle opcodes defined with template strings. +local function parse_template(params, template, nparams, pos) + local op = tonumber(sub(template, 1, 8), 16) + local n = 1 + local rtt = {} + + parse_reg_type = false + + -- Process each character. + for p in gmatch(sub(template, 9), ".") do + local q = params[n] + if p == "D" then + op = op + parse_reg(q); n = n + 1 + elseif p == "N" then + op = op + shl(parse_reg(q), 5); n = n + 1 + elseif p == "M" then + op = op + shl(parse_reg(q), 16); n = n + 1 + elseif p == "A" then + op = op + shl(parse_reg(q), 10); n = n + 1 + elseif p == "m" then + op = op + shl(parse_reg(params[n-1]), 16) + + elseif p == "p" then + if q == "sp" then params[n] = "@x31" end + elseif p == "g" then + if parse_reg_type == "x" then + op = op + 0x80000000 + elseif parse_reg_type ~= "w" then + werror("bad register type") + end + parse_reg_type = false + elseif p == "f" then + if parse_reg_type == "d" then + op = op + 0x00400000 + elseif parse_reg_type ~= "s" then + werror("bad register type") + end + parse_reg_type = false + elseif p == "x" or p == "w" or p == "d" or p == "s" then + if parse_reg_type ~= p then + werror("register size mismatch") + end + parse_reg_type = false + + elseif p == "L" then + op = parse_load(params, nparams, n, op) + elseif p == "P" then + op = parse_load_pair(params, nparams, n, op) + + elseif p == "B" then + local mode, v, s = parse_label(q, false); n = n + 1 + local m = branch_type(op) + waction("REL_"..mode, v+m, s, 1) + + elseif p == "I" then + op = op + parse_imm12(q); n = n + 1 + elseif p == "i" then + op = op + parse_imm13(q); n = n + 1 + elseif p == "W" then + op = op + parse_imm(q, 16, 5, 0, false); n = n + 1 + elseif p == "T" then + op = op + parse_imm6(q); n = n + 1 + elseif p == "1" then + op = op + parse_imm(q, 6, 16, 0, false); n = n + 1 + elseif p == "2" then + op = op + parse_imm(q, 6, 10, 0, false); n = n + 1 + elseif p == "5" then + op = op + parse_imm(q, 5, 16, 0, false); n = n + 1 + elseif p == "V" then + op = op + parse_imm(q, 4, 0, 0, false); n = n + 1 + elseif p == "F" then + op = op + parse_fpimm(q); n = n + 1 + elseif p == "Z" then + if q ~= "#0" and q ~= "#0.0" then werror("expected zero immediate") end + n = n + 1 + + elseif p == "S" then + op = op + parse_shift(q); n = n + 1 + elseif p == "X" then + op = op + parse_extend(q); n = n + 1 + elseif p == "R" then + op = op + parse_lslx16(q); n = n + 1 + elseif p == "C" then + op = op + parse_cond(q, 0); n = n + 1 + elseif p == "c" then + op = op + parse_cond(q, 1); n = n + 1 + + else + assert(false) + end + end + wputpos(pos, op) +end + +function op_template(params, template, nparams) + if not params then return template:gsub("%x%x%x%x%x%x%x%x", "") end + + -- Limit number of section buffer positions used by a single dasm_put(). + -- A single opcode needs a maximum of 3 positions. + if secpos+3 > maxsecpos then wflush() end + local pos = wpos() + local lpos, apos, spos = #actlist, #actargs, secpos + + local ok, err + for t in gmatch(template, "[^|]+") do + ok, err = pcall(parse_template, params, t, nparams, pos) + if ok then return end + secpos = spos + actlist[lpos+1] = nil + actlist[lpos+2] = nil + actlist[lpos+3] = nil + actargs[apos+1] = nil + actargs[apos+2] = nil + actargs[apos+3] = nil + end + error(err, 0) +end + +map_op[".template__"] = op_template + +------------------------------------------------------------------------------ + +-- Pseudo-opcode to mark the position where the action list is to be emitted. +map_op[".actionlist_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeactions(out, name) end) +end + +-- Pseudo-opcode to mark the position where the global enum is to be emitted. +map_op[".globals_1"] = function(params) + if not params then return "prefix" end + local prefix = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeglobals(out, prefix) end) +end + +-- Pseudo-opcode to mark the position where the global names are to be emitted. +map_op[".globalnames_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeglobalnames(out, name) end) +end + +-- Pseudo-opcode to mark the position where the extern names are to be emitted. +map_op[".externnames_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeexternnames(out, name) end) +end + +------------------------------------------------------------------------------ + +-- Label pseudo-opcode (converted from trailing colon form). +map_op[".label_1"] = function(params) + if not params then return "[1-9] | ->global | =>pcexpr" end + if secpos+1 > maxsecpos then wflush() end + local mode, n, s = parse_label(params[1], true) + if mode == "EXT" then werror("bad label definition") end + waction("LABEL_"..mode, n, s, 1) +end + +------------------------------------------------------------------------------ + +-- Pseudo-opcodes for data storage. +map_op[".long_*"] = function(params) + if not params then return "imm..." end + for _,p in ipairs(params) do + local n = tonumber(p) + if not n then werror("bad immediate `"..p.."'") end + if n < 0 then n = n + 2^32 end + wputw(n) + if secpos+2 > maxsecpos then wflush() end + end +end + +-- Alignment pseudo-opcode. +map_op[".align_1"] = function(params) + if not params then return "numpow2" end + if secpos+1 > maxsecpos then wflush() end + local align = tonumber(params[1]) + if align then + local x = align + -- Must be a power of 2 in the range (2 ... 256). + for i=1,8 do + x = x / 2 + if x == 1 then + waction("ALIGN", align-1, nil, 1) -- Action byte is 2**n-1. + return + end + end + end + werror("bad alignment") +end + +------------------------------------------------------------------------------ + +-- Pseudo-opcode for (primitive) type definitions (map to C types). +map_op[".type_3"] = function(params, nparams) + if not params then + return nparams == 2 and "name, ctype" or "name, ctype, reg" + end + local name, ctype, reg = params[1], params[2], params[3] + if not match(name, "^[%a_][%w_]*$") then + werror("bad type name `"..name.."'") + end + local tp = map_type[name] + if tp then + werror("duplicate type `"..name.."'") + end + -- Add #type to defines. A bit unclean to put it in map_archdef. + map_archdef["#"..name] = "sizeof("..ctype..")" + -- Add new type and emit shortcut define. + local num = ctypenum + 1 + map_type[name] = { + ctype = ctype, + ctypefmt = format("Dt%X(%%s)", num), + reg = reg, + } + wline(format("#define Dt%X(_V) (int)(ptrdiff_t)&(((%s *)0)_V)", num, ctype)) + ctypenum = num +end +map_op[".type_2"] = map_op[".type_3"] + +-- Dump type definitions. +local function dumptypes(out, lvl) + local t = {} + for name in pairs(map_type) do t[#t+1] = name end + sort(t) + out:write("Type definitions:\n") + for _,name in ipairs(t) do + local tp = map_type[name] + local reg = tp.reg or "" + out:write(format(" %-20s %-20s %s\n", name, tp.ctype, reg)) + end + out:write("\n") +end + +------------------------------------------------------------------------------ + +-- Set the current section. +function _M.section(num) + waction("SECTION", num) + wflush(true) -- SECTION is a terminal action. +end + +------------------------------------------------------------------------------ + +-- Dump architecture description. +function _M.dumparch(out) + out:write(format("DynASM %s version %s, released %s\n\n", + _info.arch, _info.version, _info.release)) + dumpactions(out) +end + +-- Dump all user defined elements. +function _M.dumpdef(out, lvl) + dumptypes(out, lvl) + dumpglobals(out, lvl) + dumpexterns(out, lvl) +end + +------------------------------------------------------------------------------ + +-- Pass callbacks from/to the DynASM core. +function _M.passcb(wl, we, wf, ww) + wline, werror, wfatal, wwarn = wl, we, wf, ww + return wflush +end + +-- Setup the arch-specific module. +function _M.setup(arch, opt) + g_arch, g_opt = arch, opt +end + +-- Merge the core maps and the arch-specific maps. +function _M.mergemaps(map_coreop, map_def) + setmetatable(map_op, { __index = map_coreop }) + setmetatable(map_def, { __index = map_archdef }) + return map_op, map_def +end + +return _M + +------------------------------------------------------------------------------ + diff --git a/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_mips.h b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_mips.h new file mode 100644 index 00000000000..2f4c2d2220a --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_mips.h @@ -0,0 +1,416 @@ +/* +** DynASM MIPS encoding engine. +** Copyright (C) 2005-2015 Mike Pall. All rights reserved. +** Released under the MIT license. See dynasm.lua for full copyright notice. +*/ + +#include +#include +#include +#include + +#define DASM_ARCH "mips" + +#ifndef DASM_EXTERN +#define DASM_EXTERN(a,b,c,d) 0 +#endif + +/* Action definitions. */ +enum { + DASM_STOP, DASM_SECTION, DASM_ESC, DASM_REL_EXT, + /* The following actions need a buffer position. */ + DASM_ALIGN, DASM_REL_LG, DASM_LABEL_LG, + /* The following actions also have an argument. */ + DASM_REL_PC, DASM_LABEL_PC, DASM_IMM, + DASM__MAX +}; + +/* Maximum number of section buffer positions for a single dasm_put() call. */ +#define DASM_MAXSECPOS 25 + +/* DynASM encoder status codes. Action list offset or number are or'ed in. */ +#define DASM_S_OK 0x00000000 +#define DASM_S_NOMEM 0x01000000 +#define DASM_S_PHASE 0x02000000 +#define DASM_S_MATCH_SEC 0x03000000 +#define DASM_S_RANGE_I 0x11000000 +#define DASM_S_RANGE_SEC 0x12000000 +#define DASM_S_RANGE_LG 0x13000000 +#define DASM_S_RANGE_PC 0x14000000 +#define DASM_S_RANGE_REL 0x15000000 +#define DASM_S_UNDEF_LG 0x21000000 +#define DASM_S_UNDEF_PC 0x22000000 + +/* Macros to convert positions (8 bit section + 24 bit index). */ +#define DASM_POS2IDX(pos) ((pos)&0x00ffffff) +#define DASM_POS2BIAS(pos) ((pos)&0xff000000) +#define DASM_SEC2POS(sec) ((sec)<<24) +#define DASM_POS2SEC(pos) ((pos)>>24) +#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos)) + +/* Action list type. */ +typedef const unsigned int *dasm_ActList; + +/* Per-section structure. */ +typedef struct dasm_Section { + int *rbuf; /* Biased buffer pointer (negative section bias). */ + int *buf; /* True buffer pointer. */ + size_t bsize; /* Buffer size in bytes. */ + int pos; /* Biased buffer position. */ + int epos; /* End of biased buffer position - max single put. */ + int ofs; /* Byte offset into section. */ +} dasm_Section; + +/* Core structure holding the DynASM encoding state. */ +struct dasm_State { + size_t psize; /* Allocated size of this structure. */ + dasm_ActList actionlist; /* Current actionlist pointer. */ + int *lglabels; /* Local/global chain/pos ptrs. */ + size_t lgsize; + int *pclabels; /* PC label chains/pos ptrs. */ + size_t pcsize; + void **globals; /* Array of globals (bias -10). */ + dasm_Section *section; /* Pointer to active section. */ + size_t codesize; /* Total size of all code sections. */ + int maxsection; /* 0 <= sectionidx < maxsection. */ + int status; /* Status code. */ + dasm_Section sections[1]; /* All sections. Alloc-extended. */ +}; + +/* The size of the core structure depends on the max. number of sections. */ +#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section)) + + +/* Initialize DynASM state. */ +void dasm_init(Dst_DECL, int maxsection) +{ + dasm_State *D; + size_t psz = 0; + int i; + Dst_REF = NULL; + DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection)); + D = Dst_REF; + D->psize = psz; + D->lglabels = NULL; + D->lgsize = 0; + D->pclabels = NULL; + D->pcsize = 0; + D->globals = NULL; + D->maxsection = maxsection; + for (i = 0; i < maxsection; i++) { + D->sections[i].buf = NULL; /* Need this for pass3. */ + D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i); + D->sections[i].bsize = 0; + D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */ + } +} + +/* Free DynASM state. */ +void dasm_free(Dst_DECL) +{ + dasm_State *D = Dst_REF; + int i; + for (i = 0; i < D->maxsection; i++) + if (D->sections[i].buf) + DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize); + if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize); + if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize); + DASM_M_FREE(Dst, D, D->psize); +} + +/* Setup global label array. Must be called before dasm_setup(). */ +void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl) +{ + dasm_State *D = Dst_REF; + D->globals = gl - 10; /* Negative bias to compensate for locals. */ + DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int)); +} + +/* Grow PC label array. Can be called after dasm_setup(), too. */ +void dasm_growpc(Dst_DECL, unsigned int maxpc) +{ + dasm_State *D = Dst_REF; + size_t osz = D->pcsize; + DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int)); + memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz); +} + +/* Setup encoder. */ +void dasm_setup(Dst_DECL, const void *actionlist) +{ + dasm_State *D = Dst_REF; + int i; + D->actionlist = (dasm_ActList)actionlist; + D->status = DASM_S_OK; + D->section = &D->sections[0]; + memset((void *)D->lglabels, 0, D->lgsize); + if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize); + for (i = 0; i < D->maxsection; i++) { + D->sections[i].pos = DASM_SEC2POS(i); + D->sections[i].ofs = 0; + } +} + + +#ifdef DASM_CHECKS +#define CK(x, st) \ + do { if (!(x)) { \ + D->status = DASM_S_##st|(p-D->actionlist-1); return; } } while (0) +#define CKPL(kind, st) \ + do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \ + D->status = DASM_S_RANGE_##st|(p-D->actionlist-1); return; } } while (0) +#else +#define CK(x, st) ((void)0) +#define CKPL(kind, st) ((void)0) +#endif + +/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */ +void dasm_put(Dst_DECL, int start, ...) +{ + va_list ap; + dasm_State *D = Dst_REF; + dasm_ActList p = D->actionlist + start; + dasm_Section *sec = D->section; + int pos = sec->pos, ofs = sec->ofs; + int *b; + + if (pos >= sec->epos) { + DASM_M_GROW(Dst, int, sec->buf, sec->bsize, + sec->bsize + 2*DASM_MAXSECPOS*sizeof(int)); + sec->rbuf = sec->buf - DASM_POS2BIAS(pos); + sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos); + } + + b = sec->rbuf; + b[pos++] = start; + + va_start(ap, start); + while (1) { + unsigned int ins = *p++; + unsigned int action = (ins >> 16) - 0xff00; + if (action >= DASM__MAX) { + ofs += 4; + } else { + int *pl, n = action >= DASM_REL_PC ? va_arg(ap, int) : 0; + switch (action) { + case DASM_STOP: goto stop; + case DASM_SECTION: + n = (ins & 255); CK(n < D->maxsection, RANGE_SEC); + D->section = &D->sections[n]; goto stop; + case DASM_ESC: p++; ofs += 4; break; + case DASM_REL_EXT: break; + case DASM_ALIGN: ofs += (ins & 255); b[pos++] = ofs; break; + case DASM_REL_LG: + n = (ins & 2047) - 10; pl = D->lglabels + n; + /* Bkwd rel or global. */ + if (n >= 0) { CK(n>=10||*pl<0, RANGE_LG); CKPL(lg, LG); goto putrel; } + pl += 10; n = *pl; + if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */ + goto linkrel; + case DASM_REL_PC: + pl = D->pclabels + n; CKPL(pc, PC); + putrel: + n = *pl; + if (n < 0) { /* Label exists. Get label pos and store it. */ + b[pos] = -n; + } else { + linkrel: + b[pos] = n; /* Else link to rel chain, anchored at label. */ + *pl = pos; + } + pos++; + break; + case DASM_LABEL_LG: + pl = D->lglabels + (ins & 2047) - 10; CKPL(lg, LG); goto putlabel; + case DASM_LABEL_PC: + pl = D->pclabels + n; CKPL(pc, PC); + putlabel: + n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */ + while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos; + } + *pl = -pos; /* Label exists now. */ + b[pos++] = ofs; /* Store pass1 offset estimate. */ + break; + case DASM_IMM: +#ifdef DASM_CHECKS + CK((n & ((1<<((ins>>10)&31))-1)) == 0, RANGE_I); +#endif + n >>= ((ins>>10)&31); +#ifdef DASM_CHECKS + if (ins & 0x8000) + CK(((n + (1<<(((ins>>5)&31)-1)))>>((ins>>5)&31)) == 0, RANGE_I); + else + CK((n>>((ins>>5)&31)) == 0, RANGE_I); +#endif + b[pos++] = n; + break; + } + } + } +stop: + va_end(ap); + sec->pos = pos; + sec->ofs = ofs; +} +#undef CK + +/* Pass 2: Link sections, shrink aligns, fix label offsets. */ +int dasm_link(Dst_DECL, size_t *szp) +{ + dasm_State *D = Dst_REF; + int secnum; + int ofs = 0; + +#ifdef DASM_CHECKS + *szp = 0; + if (D->status != DASM_S_OK) return D->status; + { + int pc; + for (pc = 0; pc*sizeof(int) < D->pcsize; pc++) + if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc; + } +#endif + + { /* Handle globals not defined in this translation unit. */ + int idx; + for (idx = 20; idx*sizeof(int) < D->lgsize; idx++) { + int n = D->lglabels[idx]; + /* Undefined label: Collapse rel chain and replace with marker (< 0). */ + while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; } + } + } + + /* Combine all code sections. No support for data sections (yet). */ + for (secnum = 0; secnum < D->maxsection; secnum++) { + dasm_Section *sec = D->sections + secnum; + int *b = sec->rbuf; + int pos = DASM_SEC2POS(secnum); + int lastpos = sec->pos; + + while (pos != lastpos) { + dasm_ActList p = D->actionlist + b[pos++]; + while (1) { + unsigned int ins = *p++; + unsigned int action = (ins >> 16) - 0xff00; + switch (action) { + case DASM_STOP: case DASM_SECTION: goto stop; + case DASM_ESC: p++; break; + case DASM_REL_EXT: break; + case DASM_ALIGN: ofs -= (b[pos++] + ofs) & (ins & 255); break; + case DASM_REL_LG: case DASM_REL_PC: pos++; break; + case DASM_LABEL_LG: case DASM_LABEL_PC: b[pos++] += ofs; break; + case DASM_IMM: pos++; break; + } + } + stop: (void)0; + } + ofs += sec->ofs; /* Next section starts right after current section. */ + } + + D->codesize = ofs; /* Total size of all code sections */ + *szp = ofs; + return DASM_S_OK; +} + +#ifdef DASM_CHECKS +#define CK(x, st) \ + do { if (!(x)) return DASM_S_##st|(p-D->actionlist-1); } while (0) +#else +#define CK(x, st) ((void)0) +#endif + +/* Pass 3: Encode sections. */ +int dasm_encode(Dst_DECL, void *buffer) +{ + dasm_State *D = Dst_REF; + char *base = (char *)buffer; + unsigned int *cp = (unsigned int *)buffer; + int secnum; + + /* Encode all code sections. No support for data sections (yet). */ + for (secnum = 0; secnum < D->maxsection; secnum++) { + dasm_Section *sec = D->sections + secnum; + int *b = sec->buf; + int *endb = sec->rbuf + sec->pos; + + while (b != endb) { + dasm_ActList p = D->actionlist + *b++; + while (1) { + unsigned int ins = *p++; + unsigned int action = (ins >> 16) - 0xff00; + int n = (action >= DASM_ALIGN && action < DASM__MAX) ? *b++ : 0; + switch (action) { + case DASM_STOP: case DASM_SECTION: goto stop; + case DASM_ESC: *cp++ = *p++; break; + case DASM_REL_EXT: + n = DASM_EXTERN(Dst, (unsigned char *)cp, (ins & 2047), 1); + goto patchrel; + case DASM_ALIGN: + ins &= 255; while ((((char *)cp - base) & ins)) *cp++ = 0x60000000; + break; + case DASM_REL_LG: + CK(n >= 0, UNDEF_LG); + case DASM_REL_PC: + CK(n >= 0, UNDEF_PC); + n = *DASM_POS2PTR(D, n); + if (ins & 2048) + n = n - (int)((char *)cp - base); + else + n = (n + (int)base) & 0x0fffffff; + patchrel: + CK((n & 3) == 0 && + ((n + ((ins & 2048) ? 0x00020000 : 0)) >> + ((ins & 2048) ? 18 : 28)) == 0, RANGE_REL); + cp[-1] |= ((n>>2) & ((ins & 2048) ? 0x0000ffff: 0x03ffffff)); + break; + case DASM_LABEL_LG: + ins &= 2047; if (ins >= 20) D->globals[ins-10] = (void *)(base + n); + break; + case DASM_LABEL_PC: break; + case DASM_IMM: + cp[-1] |= (n & ((1<<((ins>>5)&31))-1)) << (ins&31); + break; + default: *cp++ = ins; break; + } + } + stop: (void)0; + } + } + + if (base + D->codesize != (char *)cp) /* Check for phase errors. */ + return DASM_S_PHASE; + return DASM_S_OK; +} +#undef CK + +/* Get PC label offset. */ +int dasm_getpclabel(Dst_DECL, unsigned int pc) +{ + dasm_State *D = Dst_REF; + if (pc*sizeof(int) < D->pcsize) { + int pos = D->pclabels[pc]; + if (pos < 0) return *DASM_POS2PTR(D, -pos); + if (pos > 0) return -1; /* Undefined. */ + } + return -2; /* Unused or out of range. */ +} + +#ifdef DASM_CHECKS +/* Optional sanity checker to call between isolated encoding steps. */ +int dasm_checkstep(Dst_DECL, int secmatch) +{ + dasm_State *D = Dst_REF; + if (D->status == DASM_S_OK) { + int i; + for (i = 1; i <= 9; i++) { + if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_LG|i; break; } + D->lglabels[i] = 0; + } + } + if (D->status == DASM_S_OK && secmatch >= 0 && + D->section != &D->sections[secmatch]) + D->status = DASM_S_MATCH_SEC|(D->section-D->sections); + return D->status; +} +#endif + diff --git a/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_mips.lua b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_mips.lua new file mode 100644 index 00000000000..ae0dbd7a9bc --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_mips.lua @@ -0,0 +1,953 @@ +------------------------------------------------------------------------------ +-- DynASM MIPS module. +-- +-- Copyright (C) 2005-2015 Mike Pall. All rights reserved. +-- See dynasm.lua for full copyright notice. +------------------------------------------------------------------------------ + +-- Module information: +local _info = { + arch = "mips", + description = "DynASM MIPS module", + version = "1.3.0", + vernum = 10300, + release = "2012-01-23", + author = "Mike Pall", + license = "MIT", +} + +-- Exported glue functions for the arch-specific module. +local _M = { _info = _info } + +-- Cache library functions. +local type, tonumber, pairs, ipairs = type, tonumber, pairs, ipairs +local assert, setmetatable = assert, setmetatable +local _s = string +local sub, format, byte, char = _s.sub, _s.format, _s.byte, _s.char +local match, gmatch = _s.match, _s.gmatch +local concat, sort = table.concat, table.sort +local bit = bit or require("bit") +local band, shl, sar, tohex = bit.band, bit.lshift, bit.arshift, bit.tohex + +-- Inherited tables and callbacks. +local g_opt, g_arch +local wline, werror, wfatal, wwarn + +-- Action name list. +-- CHECK: Keep this in sync with the C code! +local action_names = { + "STOP", "SECTION", "ESC", "REL_EXT", + "ALIGN", "REL_LG", "LABEL_LG", + "REL_PC", "LABEL_PC", "IMM", +} + +-- Maximum number of section buffer positions for dasm_put(). +-- CHECK: Keep this in sync with the C code! +local maxsecpos = 25 -- Keep this low, to avoid excessively long C lines. + +-- Action name -> action number. +local map_action = {} +for n,name in ipairs(action_names) do + map_action[name] = n-1 +end + +-- Action list buffer. +local actlist = {} + +-- Argument list for next dasm_put(). Start with offset 0 into action list. +local actargs = { 0 } + +-- Current number of section buffer positions for dasm_put(). +local secpos = 1 + +------------------------------------------------------------------------------ + +-- Dump action names and numbers. +local function dumpactions(out) + out:write("DynASM encoding engine action codes:\n") + for n,name in ipairs(action_names) do + local num = map_action[name] + out:write(format(" %-10s %02X %d\n", name, num, num)) + end + out:write("\n") +end + +-- Write action list buffer as a huge static C array. +local function writeactions(out, name) + local nn = #actlist + if nn == 0 then nn = 1; actlist[0] = map_action.STOP end + out:write("static const unsigned int ", name, "[", nn, "] = {\n") + for i = 1,nn-1 do + assert(out:write("0x", tohex(actlist[i]), ",\n")) + end + assert(out:write("0x", tohex(actlist[nn]), "\n};\n\n")) +end + +------------------------------------------------------------------------------ + +-- Add word to action list. +local function wputxw(n) + assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range") + actlist[#actlist+1] = n +end + +-- Add action to list with optional arg. Advance buffer pos, too. +local function waction(action, val, a, num) + local w = assert(map_action[action], "bad action name `"..action.."'") + wputxw(0xff000000 + w * 0x10000 + (val or 0)) + if a then actargs[#actargs+1] = a end + if a or num then secpos = secpos + (num or 1) end +end + +-- Flush action list (intervening C code or buffer pos overflow). +local function wflush(term) + if #actlist == actargs[1] then return end -- Nothing to flush. + if not term then waction("STOP") end -- Terminate action list. + wline(format("dasm_put(Dst, %s);", concat(actargs, ", ")), true) + actargs = { #actlist } -- Actionlist offset is 1st arg to next dasm_put(). + secpos = 1 -- The actionlist offset occupies a buffer position, too. +end + +-- Put escaped word. +local function wputw(n) + if n >= 0xff000000 then waction("ESC") end + wputxw(n) +end + +-- Reserve position for word. +local function wpos() + local pos = #actlist+1 + actlist[pos] = "" + return pos +end + +-- Store word to reserved position. +local function wputpos(pos, n) + assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range") + actlist[pos] = n +end + +------------------------------------------------------------------------------ + +-- Global label name -> global label number. With auto assignment on 1st use. +local next_global = 20 +local map_global = setmetatable({}, { __index = function(t, name) + if not match(name, "^[%a_][%w_]*$") then werror("bad global label") end + local n = next_global + if n > 2047 then werror("too many global labels") end + next_global = n + 1 + t[name] = n + return n +end}) + +-- Dump global labels. +local function dumpglobals(out, lvl) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("Global labels:\n") + for i=20,next_global-1 do + out:write(format(" %s\n", t[i])) + end + out:write("\n") +end + +-- Write global label enum. +local function writeglobals(out, prefix) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("enum {\n") + for i=20,next_global-1 do + out:write(" ", prefix, t[i], ",\n") + end + out:write(" ", prefix, "_MAX\n};\n") +end + +-- Write global label names. +local function writeglobalnames(out, name) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("static const char *const ", name, "[] = {\n") + for i=20,next_global-1 do + out:write(" \"", t[i], "\",\n") + end + out:write(" (const char *)0\n};\n") +end + +------------------------------------------------------------------------------ + +-- Extern label name -> extern label number. With auto assignment on 1st use. +local next_extern = 0 +local map_extern_ = {} +local map_extern = setmetatable({}, { __index = function(t, name) + -- No restrictions on the name for now. + local n = next_extern + if n > 2047 then werror("too many extern labels") end + next_extern = n + 1 + t[name] = n + map_extern_[n] = name + return n +end}) + +-- Dump extern labels. +local function dumpexterns(out, lvl) + out:write("Extern labels:\n") + for i=0,next_extern-1 do + out:write(format(" %s\n", map_extern_[i])) + end + out:write("\n") +end + +-- Write extern label names. +local function writeexternnames(out, name) + out:write("static const char *const ", name, "[] = {\n") + for i=0,next_extern-1 do + out:write(" \"", map_extern_[i], "\",\n") + end + out:write(" (const char *)0\n};\n") +end + +------------------------------------------------------------------------------ + +-- Arch-specific maps. +local map_archdef = { sp="r29", ra="r31" } -- Ext. register name -> int. name. + +local map_type = {} -- Type name -> { ctype, reg } +local ctypenum = 0 -- Type number (for Dt... macros). + +-- Reverse defines for registers. +function _M.revdef(s) + if s == "r29" then return "sp" + elseif s == "r31" then return "ra" end + return s +end + +------------------------------------------------------------------------------ + +-- Template strings for MIPS instructions. +local map_op = { + -- First-level opcodes. + j_1 = "08000000J", + jal_1 = "0c000000J", + b_1 = "10000000B", + beqz_2 = "10000000SB", + beq_3 = "10000000STB", + bnez_2 = "14000000SB", + bne_3 = "14000000STB", + blez_2 = "18000000SB", + bgtz_2 = "1c000000SB", + addi_3 = "20000000TSI", + li_2 = "24000000TI", + addiu_3 = "24000000TSI", + slti_3 = "28000000TSI", + sltiu_3 = "2c000000TSI", + andi_3 = "30000000TSU", + lu_2 = "34000000TU", + ori_3 = "34000000TSU", + xori_3 = "38000000TSU", + lui_2 = "3c000000TU", + beqzl_2 = "50000000SB", + beql_3 = "50000000STB", + bnezl_2 = "54000000SB", + bnel_3 = "54000000STB", + blezl_2 = "58000000SB", + bgtzl_2 = "5c000000SB", + lb_2 = "80000000TO", + lh_2 = "84000000TO", + lwl_2 = "88000000TO", + lw_2 = "8c000000TO", + lbu_2 = "90000000TO", + lhu_2 = "94000000TO", + lwr_2 = "98000000TO", + sb_2 = "a0000000TO", + sh_2 = "a4000000TO", + swl_2 = "a8000000TO", + sw_2 = "ac000000TO", + swr_2 = "b8000000TO", + cache_2 = "bc000000NO", + ll_2 = "c0000000TO", + lwc1_2 = "c4000000HO", + pref_2 = "cc000000NO", + ldc1_2 = "d4000000HO", + sc_2 = "e0000000TO", + swc1_2 = "e4000000HO", + sdc1_2 = "f4000000HO", + + -- Opcode SPECIAL. + nop_0 = "00000000", + sll_3 = "00000000DTA", + movf_2 = "00000001DS", + movf_3 = "00000001DSC", + movt_2 = "00010001DS", + movt_3 = "00010001DSC", + srl_3 = "00000002DTA", + rotr_3 = "00200002DTA", + sra_3 = "00000003DTA", + sllv_3 = "00000004DTS", + srlv_3 = "00000006DTS", + rotrv_3 = "00000046DTS", + srav_3 = "00000007DTS", + jr_1 = "00000008S", + jalr_1 = "0000f809S", + jalr_2 = "00000009DS", + movz_3 = "0000000aDST", + movn_3 = "0000000bDST", + syscall_0 = "0000000c", + syscall_1 = "0000000cY", + break_0 = "0000000d", + break_1 = "0000000dY", + sync_0 = "0000000f", + mfhi_1 = "00000010D", + mthi_1 = "00000011S", + mflo_1 = "00000012D", + mtlo_1 = "00000013S", + mult_2 = "00000018ST", + multu_2 = "00000019ST", + div_2 = "0000001aST", + divu_2 = "0000001bST", + add_3 = "00000020DST", + move_2 = "00000021DS", + addu_3 = "00000021DST", + sub_3 = "00000022DST", + negu_2 = "00000023DT", + subu_3 = "00000023DST", + and_3 = "00000024DST", + or_3 = "00000025DST", + xor_3 = "00000026DST", + not_2 = "00000027DS", + nor_3 = "00000027DST", + slt_3 = "0000002aDST", + sltu_3 = "0000002bDST", + tge_2 = "00000030ST", + tge_3 = "00000030STZ", + tgeu_2 = "00000031ST", + tgeu_3 = "00000031STZ", + tlt_2 = "00000032ST", + tlt_3 = "00000032STZ", + tltu_2 = "00000033ST", + tltu_3 = "00000033STZ", + teq_2 = "00000034ST", + teq_3 = "00000034STZ", + tne_2 = "00000036ST", + tne_3 = "00000036STZ", + + -- Opcode REGIMM. + bltz_2 = "04000000SB", + bgez_2 = "04010000SB", + bltzl_2 = "04020000SB", + bgezl_2 = "04030000SB", + tgei_2 = "04080000SI", + tgeiu_2 = "04090000SI", + tlti_2 = "040a0000SI", + tltiu_2 = "040b0000SI", + teqi_2 = "040c0000SI", + tnei_2 = "040e0000SI", + bltzal_2 = "04100000SB", + bal_1 = "04110000B", + bgezal_2 = "04110000SB", + bltzall_2 = "04120000SB", + bgezall_2 = "04130000SB", + synci_1 = "041f0000O", + + -- Opcode SPECIAL2. + madd_2 = "70000000ST", + maddu_2 = "70000001ST", + mul_3 = "70000002DST", + msub_2 = "70000004ST", + msubu_2 = "70000005ST", + clz_2 = "70000020DS=", + clo_2 = "70000021DS=", + sdbbp_0 = "7000003f", + sdbbp_1 = "7000003fY", + + -- Opcode SPECIAL3. + ext_4 = "7c000000TSAM", -- Note: last arg is msbd = size-1 + ins_4 = "7c000004TSAM", -- Note: last arg is msb = pos+size-1 + wsbh_2 = "7c0000a0DT", + seb_2 = "7c000420DT", + seh_2 = "7c000620DT", + rdhwr_2 = "7c00003bTD", + + -- Opcode COP0. + mfc0_2 = "40000000TD", + mfc0_3 = "40000000TDW", + mtc0_2 = "40800000TD", + mtc0_3 = "40800000TDW", + rdpgpr_2 = "41400000DT", + di_0 = "41606000", + di_1 = "41606000T", + ei_0 = "41606020", + ei_1 = "41606020T", + wrpgpr_2 = "41c00000DT", + tlbr_0 = "42000001", + tlbwi_0 = "42000002", + tlbwr_0 = "42000006", + tlbp_0 = "42000008", + eret_0 = "42000018", + deret_0 = "4200001f", + wait_0 = "42000020", + + -- Opcode COP1. + mfc1_2 = "44000000TG", + cfc1_2 = "44400000TG", + mfhc1_2 = "44600000TG", + mtc1_2 = "44800000TG", + ctc1_2 = "44c00000TG", + mthc1_2 = "44e00000TG", + + bc1f_1 = "45000000B", + bc1f_2 = "45000000CB", + bc1t_1 = "45010000B", + bc1t_2 = "45010000CB", + bc1fl_1 = "45020000B", + bc1fl_2 = "45020000CB", + bc1tl_1 = "45030000B", + bc1tl_2 = "45030000CB", + + ["add.s_3"] = "46000000FGH", + ["sub.s_3"] = "46000001FGH", + ["mul.s_3"] = "46000002FGH", + ["div.s_3"] = "46000003FGH", + ["sqrt.s_2"] = "46000004FG", + ["abs.s_2"] = "46000005FG", + ["mov.s_2"] = "46000006FG", + ["neg.s_2"] = "46000007FG", + ["round.l.s_2"] = "46000008FG", + ["trunc.l.s_2"] = "46000009FG", + ["ceil.l.s_2"] = "4600000aFG", + ["floor.l.s_2"] = "4600000bFG", + ["round.w.s_2"] = "4600000cFG", + ["trunc.w.s_2"] = "4600000dFG", + ["ceil.w.s_2"] = "4600000eFG", + ["floor.w.s_2"] = "4600000fFG", + ["movf.s_2"] = "46000011FG", + ["movf.s_3"] = "46000011FGC", + ["movt.s_2"] = "46010011FG", + ["movt.s_3"] = "46010011FGC", + ["movz.s_3"] = "46000012FGT", + ["movn.s_3"] = "46000013FGT", + ["recip.s_2"] = "46000015FG", + ["rsqrt.s_2"] = "46000016FG", + ["cvt.d.s_2"] = "46000021FG", + ["cvt.w.s_2"] = "46000024FG", + ["cvt.l.s_2"] = "46000025FG", + ["cvt.ps.s_3"] = "46000026FGH", + ["c.f.s_2"] = "46000030GH", + ["c.f.s_3"] = "46000030VGH", + ["c.un.s_2"] = "46000031GH", + ["c.un.s_3"] = "46000031VGH", + ["c.eq.s_2"] = "46000032GH", + ["c.eq.s_3"] = "46000032VGH", + ["c.ueq.s_2"] = "46000033GH", + ["c.ueq.s_3"] = "46000033VGH", + ["c.olt.s_2"] = "46000034GH", + ["c.olt.s_3"] = "46000034VGH", + ["c.ult.s_2"] = "46000035GH", + ["c.ult.s_3"] = "46000035VGH", + ["c.ole.s_2"] = "46000036GH", + ["c.ole.s_3"] = "46000036VGH", + ["c.ule.s_2"] = "46000037GH", + ["c.ule.s_3"] = "46000037VGH", + ["c.sf.s_2"] = "46000038GH", + ["c.sf.s_3"] = "46000038VGH", + ["c.ngle.s_2"] = "46000039GH", + ["c.ngle.s_3"] = "46000039VGH", + ["c.seq.s_2"] = "4600003aGH", + ["c.seq.s_3"] = "4600003aVGH", + ["c.ngl.s_2"] = "4600003bGH", + ["c.ngl.s_3"] = "4600003bVGH", + ["c.lt.s_2"] = "4600003cGH", + ["c.lt.s_3"] = "4600003cVGH", + ["c.nge.s_2"] = "4600003dGH", + ["c.nge.s_3"] = "4600003dVGH", + ["c.le.s_2"] = "4600003eGH", + ["c.le.s_3"] = "4600003eVGH", + ["c.ngt.s_2"] = "4600003fGH", + ["c.ngt.s_3"] = "4600003fVGH", + + ["add.d_3"] = "46200000FGH", + ["sub.d_3"] = "46200001FGH", + ["mul.d_3"] = "46200002FGH", + ["div.d_3"] = "46200003FGH", + ["sqrt.d_2"] = "46200004FG", + ["abs.d_2"] = "46200005FG", + ["mov.d_2"] = "46200006FG", + ["neg.d_2"] = "46200007FG", + ["round.l.d_2"] = "46200008FG", + ["trunc.l.d_2"] = "46200009FG", + ["ceil.l.d_2"] = "4620000aFG", + ["floor.l.d_2"] = "4620000bFG", + ["round.w.d_2"] = "4620000cFG", + ["trunc.w.d_2"] = "4620000dFG", + ["ceil.w.d_2"] = "4620000eFG", + ["floor.w.d_2"] = "4620000fFG", + ["movf.d_2"] = "46200011FG", + ["movf.d_3"] = "46200011FGC", + ["movt.d_2"] = "46210011FG", + ["movt.d_3"] = "46210011FGC", + ["movz.d_3"] = "46200012FGT", + ["movn.d_3"] = "46200013FGT", + ["recip.d_2"] = "46200015FG", + ["rsqrt.d_2"] = "46200016FG", + ["cvt.s.d_2"] = "46200020FG", + ["cvt.w.d_2"] = "46200024FG", + ["cvt.l.d_2"] = "46200025FG", + ["c.f.d_2"] = "46200030GH", + ["c.f.d_3"] = "46200030VGH", + ["c.un.d_2"] = "46200031GH", + ["c.un.d_3"] = "46200031VGH", + ["c.eq.d_2"] = "46200032GH", + ["c.eq.d_3"] = "46200032VGH", + ["c.ueq.d_2"] = "46200033GH", + ["c.ueq.d_3"] = "46200033VGH", + ["c.olt.d_2"] = "46200034GH", + ["c.olt.d_3"] = "46200034VGH", + ["c.ult.d_2"] = "46200035GH", + ["c.ult.d_3"] = "46200035VGH", + ["c.ole.d_2"] = "46200036GH", + ["c.ole.d_3"] = "46200036VGH", + ["c.ule.d_2"] = "46200037GH", + ["c.ule.d_3"] = "46200037VGH", + ["c.sf.d_2"] = "46200038GH", + ["c.sf.d_3"] = "46200038VGH", + ["c.ngle.d_2"] = "46200039GH", + ["c.ngle.d_3"] = "46200039VGH", + ["c.seq.d_2"] = "4620003aGH", + ["c.seq.d_3"] = "4620003aVGH", + ["c.ngl.d_2"] = "4620003bGH", + ["c.ngl.d_3"] = "4620003bVGH", + ["c.lt.d_2"] = "4620003cGH", + ["c.lt.d_3"] = "4620003cVGH", + ["c.nge.d_2"] = "4620003dGH", + ["c.nge.d_3"] = "4620003dVGH", + ["c.le.d_2"] = "4620003eGH", + ["c.le.d_3"] = "4620003eVGH", + ["c.ngt.d_2"] = "4620003fGH", + ["c.ngt.d_3"] = "4620003fVGH", + + ["add.ps_3"] = "46c00000FGH", + ["sub.ps_3"] = "46c00001FGH", + ["mul.ps_3"] = "46c00002FGH", + ["abs.ps_2"] = "46c00005FG", + ["mov.ps_2"] = "46c00006FG", + ["neg.ps_2"] = "46c00007FG", + ["movf.ps_2"] = "46c00011FG", + ["movf.ps_3"] = "46c00011FGC", + ["movt.ps_2"] = "46c10011FG", + ["movt.ps_3"] = "46c10011FGC", + ["movz.ps_3"] = "46c00012FGT", + ["movn.ps_3"] = "46c00013FGT", + ["cvt.s.pu_2"] = "46c00020FG", + ["cvt.s.pl_2"] = "46c00028FG", + ["pll.ps_3"] = "46c0002cFGH", + ["plu.ps_3"] = "46c0002dFGH", + ["pul.ps_3"] = "46c0002eFGH", + ["puu.ps_3"] = "46c0002fFGH", + ["c.f.ps_2"] = "46c00030GH", + ["c.f.ps_3"] = "46c00030VGH", + ["c.un.ps_2"] = "46c00031GH", + ["c.un.ps_3"] = "46c00031VGH", + ["c.eq.ps_2"] = "46c00032GH", + ["c.eq.ps_3"] = "46c00032VGH", + ["c.ueq.ps_2"] = "46c00033GH", + ["c.ueq.ps_3"] = "46c00033VGH", + ["c.olt.ps_2"] = "46c00034GH", + ["c.olt.ps_3"] = "46c00034VGH", + ["c.ult.ps_2"] = "46c00035GH", + ["c.ult.ps_3"] = "46c00035VGH", + ["c.ole.ps_2"] = "46c00036GH", + ["c.ole.ps_3"] = "46c00036VGH", + ["c.ule.ps_2"] = "46c00037GH", + ["c.ule.ps_3"] = "46c00037VGH", + ["c.sf.ps_2"] = "46c00038GH", + ["c.sf.ps_3"] = "46c00038VGH", + ["c.ngle.ps_2"] = "46c00039GH", + ["c.ngle.ps_3"] = "46c00039VGH", + ["c.seq.ps_2"] = "46c0003aGH", + ["c.seq.ps_3"] = "46c0003aVGH", + ["c.ngl.ps_2"] = "46c0003bGH", + ["c.ngl.ps_3"] = "46c0003bVGH", + ["c.lt.ps_2"] = "46c0003cGH", + ["c.lt.ps_3"] = "46c0003cVGH", + ["c.nge.ps_2"] = "46c0003dGH", + ["c.nge.ps_3"] = "46c0003dVGH", + ["c.le.ps_2"] = "46c0003eGH", + ["c.le.ps_3"] = "46c0003eVGH", + ["c.ngt.ps_2"] = "46c0003fGH", + ["c.ngt.ps_3"] = "46c0003fVGH", + + ["cvt.s.w_2"] = "46800020FG", + ["cvt.d.w_2"] = "46800021FG", + + ["cvt.s.l_2"] = "46a00020FG", + ["cvt.d.l_2"] = "46a00021FG", + + -- Opcode COP1X. + lwxc1_2 = "4c000000FX", + ldxc1_2 = "4c000001FX", + luxc1_2 = "4c000005FX", + swxc1_2 = "4c000008FX", + sdxc1_2 = "4c000009FX", + suxc1_2 = "4c00000dFX", + prefx_2 = "4c00000fMX", + ["alnv.ps_4"] = "4c00001eFGHS", + ["madd.s_4"] = "4c000020FRGH", + ["madd.d_4"] = "4c000021FRGH", + ["madd.ps_4"] = "4c000026FRGH", + ["msub.s_4"] = "4c000028FRGH", + ["msub.d_4"] = "4c000029FRGH", + ["msub.ps_4"] = "4c00002eFRGH", + ["nmadd.s_4"] = "4c000030FRGH", + ["nmadd.d_4"] = "4c000031FRGH", + ["nmadd.ps_4"] = "4c000036FRGH", + ["nmsub.s_4"] = "4c000038FRGH", + ["nmsub.d_4"] = "4c000039FRGH", + ["nmsub.ps_4"] = "4c00003eFRGH", +} + +------------------------------------------------------------------------------ + +local function parse_gpr(expr) + local tname, ovreg = match(expr, "^([%w_]+):(r[1-3]?[0-9])$") + local tp = map_type[tname or expr] + if tp then + local reg = ovreg or tp.reg + if not reg then + werror("type `"..(tname or expr).."' needs a register override") + end + expr = reg + end + local r = match(expr, "^r([1-3]?[0-9])$") + if r then + r = tonumber(r) + if r <= 31 then return r, tp end + end + werror("bad register name `"..expr.."'") +end + +local function parse_fpr(expr) + local r = match(expr, "^f([1-3]?[0-9])$") + if r then + r = tonumber(r) + if r <= 31 then return r end + end + werror("bad register name `"..expr.."'") +end + +local function parse_imm(imm, bits, shift, scale, signed) + local n = tonumber(imm) + if n then + local m = sar(n, scale) + if shl(m, scale) == n then + if signed then + local s = sar(m, bits-1) + if s == 0 then return shl(m, shift) + elseif s == -1 then return shl(m + shl(1, bits), shift) end + else + if sar(m, bits) == 0 then return shl(m, shift) end + end + end + werror("out of range immediate `"..imm.."'") + elseif match(imm, "^[rf]([1-3]?[0-9])$") or + match(imm, "^([%w_]+):([rf][1-3]?[0-9])$") then + werror("expected immediate operand, got register") + else + waction("IMM", (signed and 32768 or 0)+scale*1024+bits*32+shift, imm) + return 0 + end +end + +local function parse_disp(disp) + local imm, reg = match(disp, "^(.*)%(([%w_:]+)%)$") + if imm then + local r = shl(parse_gpr(reg), 21) + local extname = match(imm, "^extern%s+(%S+)$") + if extname then + waction("REL_EXT", map_extern[extname], nil, 1) + return r + else + return r + parse_imm(imm, 16, 0, 0, true) + end + end + local reg, tailr = match(disp, "^([%w_:]+)%s*(.*)$") + if reg and tailr ~= "" then + local r, tp = parse_gpr(reg) + if tp then + waction("IMM", 32768+16*32, format(tp.ctypefmt, tailr)) + return shl(r, 21) + end + end + werror("bad displacement `"..disp.."'") +end + +local function parse_index(idx) + local rt, rs = match(idx, "^(.*)%(([%w_:]+)%)$") + if rt then + rt = parse_gpr(rt) + rs = parse_gpr(rs) + return shl(rt, 16) + shl(rs, 21) + end + werror("bad index `"..idx.."'") +end + +local function parse_label(label, def) + local prefix = sub(label, 1, 2) + -- =>label (pc label reference) + if prefix == "=>" then + return "PC", 0, sub(label, 3) + end + -- ->name (global label reference) + if prefix == "->" then + return "LG", map_global[sub(label, 3)] + end + if def then + -- [1-9] (local label definition) + if match(label, "^[1-9]$") then + return "LG", 10+tonumber(label) + end + else + -- [<>][1-9] (local label reference) + local dir, lnum = match(label, "^([<>])([1-9])$") + if dir then -- Fwd: 1-9, Bkwd: 11-19. + return "LG", lnum + (dir == ">" and 0 or 10) + end + -- extern label (extern label reference) + local extname = match(label, "^extern%s+(%S+)$") + if extname then + return "EXT", map_extern[extname] + end + end + werror("bad label `"..label.."'") +end + +------------------------------------------------------------------------------ + +-- Handle opcodes defined with template strings. +map_op[".template__"] = function(params, template, nparams) + if not params then return sub(template, 9) end + local op = tonumber(sub(template, 1, 8), 16) + local n = 1 + + -- Limit number of section buffer positions used by a single dasm_put(). + -- A single opcode needs a maximum of 2 positions (ins/ext). + if secpos+2 > maxsecpos then wflush() end + local pos = wpos() + + -- Process each character. + for p in gmatch(sub(template, 9), ".") do + if p == "D" then + op = op + shl(parse_gpr(params[n]), 11); n = n + 1 + elseif p == "T" then + op = op + shl(parse_gpr(params[n]), 16); n = n + 1 + elseif p == "S" then + op = op + shl(parse_gpr(params[n]), 21); n = n + 1 + elseif p == "F" then + op = op + shl(parse_fpr(params[n]), 6); n = n + 1 + elseif p == "G" then + op = op + shl(parse_fpr(params[n]), 11); n = n + 1 + elseif p == "H" then + op = op + shl(parse_fpr(params[n]), 16); n = n + 1 + elseif p == "R" then + op = op + shl(parse_fpr(params[n]), 21); n = n + 1 + elseif p == "I" then + op = op + parse_imm(params[n], 16, 0, 0, true); n = n + 1 + elseif p == "U" then + op = op + parse_imm(params[n], 16, 0, 0, false); n = n + 1 + elseif p == "O" then + op = op + parse_disp(params[n]); n = n + 1 + elseif p == "X" then + op = op + parse_index(params[n]); n = n + 1 + elseif p == "B" or p == "J" then + local mode, n, s = parse_label(params[n], false) + if p == "B" then n = n + 2048 end + waction("REL_"..mode, n, s, 1) + n = n + 1 + elseif p == "A" then + op = op + parse_imm(params[n], 5, 6, 0, false); n = n + 1 + elseif p == "M" then + op = op + parse_imm(params[n], 5, 11, 0, false); n = n + 1 + elseif p == "N" then + op = op + parse_imm(params[n], 5, 16, 0, false); n = n + 1 + elseif p == "C" then + op = op + parse_imm(params[n], 3, 18, 0, false); n = n + 1 + elseif p == "V" then + op = op + parse_imm(params[n], 3, 8, 0, false); n = n + 1 + elseif p == "W" then + op = op + parse_imm(params[n], 3, 0, 0, false); n = n + 1 + elseif p == "Y" then + op = op + parse_imm(params[n], 20, 6, 0, false); n = n + 1 + elseif p == "Z" then + op = op + parse_imm(params[n], 10, 6, 0, false); n = n + 1 + elseif p == "=" then + op = op + shl(band(op, 0xf800), 5) -- Copy D to T for clz, clo. + else + assert(false) + end + end + wputpos(pos, op) +end + +------------------------------------------------------------------------------ + +-- Pseudo-opcode to mark the position where the action list is to be emitted. +map_op[".actionlist_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeactions(out, name) end) +end + +-- Pseudo-opcode to mark the position where the global enum is to be emitted. +map_op[".globals_1"] = function(params) + if not params then return "prefix" end + local prefix = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeglobals(out, prefix) end) +end + +-- Pseudo-opcode to mark the position where the global names are to be emitted. +map_op[".globalnames_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeglobalnames(out, name) end) +end + +-- Pseudo-opcode to mark the position where the extern names are to be emitted. +map_op[".externnames_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeexternnames(out, name) end) +end + +------------------------------------------------------------------------------ + +-- Label pseudo-opcode (converted from trailing colon form). +map_op[".label_1"] = function(params) + if not params then return "[1-9] | ->global | =>pcexpr" end + if secpos+1 > maxsecpos then wflush() end + local mode, n, s = parse_label(params[1], true) + if mode == "EXT" then werror("bad label definition") end + waction("LABEL_"..mode, n, s, 1) +end + +------------------------------------------------------------------------------ + +-- Pseudo-opcodes for data storage. +map_op[".long_*"] = function(params) + if not params then return "imm..." end + for _,p in ipairs(params) do + local n = tonumber(p) + if not n then werror("bad immediate `"..p.."'") end + if n < 0 then n = n + 2^32 end + wputw(n) + if secpos+2 > maxsecpos then wflush() end + end +end + +-- Alignment pseudo-opcode. +map_op[".align_1"] = function(params) + if not params then return "numpow2" end + if secpos+1 > maxsecpos then wflush() end + local align = tonumber(params[1]) + if align then + local x = align + -- Must be a power of 2 in the range (2 ... 256). + for i=1,8 do + x = x / 2 + if x == 1 then + waction("ALIGN", align-1, nil, 1) -- Action byte is 2**n-1. + return + end + end + end + werror("bad alignment") +end + +------------------------------------------------------------------------------ + +-- Pseudo-opcode for (primitive) type definitions (map to C types). +map_op[".type_3"] = function(params, nparams) + if not params then + return nparams == 2 and "name, ctype" or "name, ctype, reg" + end + local name, ctype, reg = params[1], params[2], params[3] + if not match(name, "^[%a_][%w_]*$") then + werror("bad type name `"..name.."'") + end + local tp = map_type[name] + if tp then + werror("duplicate type `"..name.."'") + end + -- Add #type to defines. A bit unclean to put it in map_archdef. + map_archdef["#"..name] = "sizeof("..ctype..")" + -- Add new type and emit shortcut define. + local num = ctypenum + 1 + map_type[name] = { + ctype = ctype, + ctypefmt = format("Dt%X(%%s)", num), + reg = reg, + } + wline(format("#define Dt%X(_V) (int)(ptrdiff_t)&(((%s *)0)_V)", num, ctype)) + ctypenum = num +end +map_op[".type_2"] = map_op[".type_3"] + +-- Dump type definitions. +local function dumptypes(out, lvl) + local t = {} + for name in pairs(map_type) do t[#t+1] = name end + sort(t) + out:write("Type definitions:\n") + for _,name in ipairs(t) do + local tp = map_type[name] + local reg = tp.reg or "" + out:write(format(" %-20s %-20s %s\n", name, tp.ctype, reg)) + end + out:write("\n") +end + +------------------------------------------------------------------------------ + +-- Set the current section. +function _M.section(num) + waction("SECTION", num) + wflush(true) -- SECTION is a terminal action. +end + +------------------------------------------------------------------------------ + +-- Dump architecture description. +function _M.dumparch(out) + out:write(format("DynASM %s version %s, released %s\n\n", + _info.arch, _info.version, _info.release)) + dumpactions(out) +end + +-- Dump all user defined elements. +function _M.dumpdef(out, lvl) + dumptypes(out, lvl) + dumpglobals(out, lvl) + dumpexterns(out, lvl) +end + +------------------------------------------------------------------------------ + +-- Pass callbacks from/to the DynASM core. +function _M.passcb(wl, we, wf, ww) + wline, werror, wfatal, wwarn = wl, we, wf, ww + return wflush +end + +-- Setup the arch-specific module. +function _M.setup(arch, opt) + g_arch, g_opt = arch, opt +end + +-- Merge the core maps and the arch-specific maps. +function _M.mergemaps(map_coreop, map_def) + setmetatable(map_op, { __index = map_coreop }) + setmetatable(map_def, { __index = map_archdef }) + return map_op, map_def +end + +return _M + +------------------------------------------------------------------------------ + diff --git a/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_ppc.h b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_ppc.h new file mode 100644 index 00000000000..332c64dc68a --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_ppc.h @@ -0,0 +1,419 @@ +/* +** DynASM PPC/PPC64 encoding engine. +** Copyright (C) 2005-2015 Mike Pall. All rights reserved. +** Released under the MIT license. See dynasm.lua for full copyright notice. +*/ + +#include +#include +#include +#include + +#define DASM_ARCH "ppc" + +#ifndef DASM_EXTERN +#define DASM_EXTERN(a,b,c,d) 0 +#endif + +/* Action definitions. */ +enum { + DASM_STOP, DASM_SECTION, DASM_ESC, DASM_REL_EXT, + /* The following actions need a buffer position. */ + DASM_ALIGN, DASM_REL_LG, DASM_LABEL_LG, + /* The following actions also have an argument. */ + DASM_REL_PC, DASM_LABEL_PC, DASM_IMM, DASM_IMMSH, + DASM__MAX +}; + +/* Maximum number of section buffer positions for a single dasm_put() call. */ +#define DASM_MAXSECPOS 25 + +/* DynASM encoder status codes. Action list offset or number are or'ed in. */ +#define DASM_S_OK 0x00000000 +#define DASM_S_NOMEM 0x01000000 +#define DASM_S_PHASE 0x02000000 +#define DASM_S_MATCH_SEC 0x03000000 +#define DASM_S_RANGE_I 0x11000000 +#define DASM_S_RANGE_SEC 0x12000000 +#define DASM_S_RANGE_LG 0x13000000 +#define DASM_S_RANGE_PC 0x14000000 +#define DASM_S_RANGE_REL 0x15000000 +#define DASM_S_UNDEF_LG 0x21000000 +#define DASM_S_UNDEF_PC 0x22000000 + +/* Macros to convert positions (8 bit section + 24 bit index). */ +#define DASM_POS2IDX(pos) ((pos)&0x00ffffff) +#define DASM_POS2BIAS(pos) ((pos)&0xff000000) +#define DASM_SEC2POS(sec) ((sec)<<24) +#define DASM_POS2SEC(pos) ((pos)>>24) +#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos)) + +/* Action list type. */ +typedef const unsigned int *dasm_ActList; + +/* Per-section structure. */ +typedef struct dasm_Section { + int *rbuf; /* Biased buffer pointer (negative section bias). */ + int *buf; /* True buffer pointer. */ + size_t bsize; /* Buffer size in bytes. */ + int pos; /* Biased buffer position. */ + int epos; /* End of biased buffer position - max single put. */ + int ofs; /* Byte offset into section. */ +} dasm_Section; + +/* Core structure holding the DynASM encoding state. */ +struct dasm_State { + size_t psize; /* Allocated size of this structure. */ + dasm_ActList actionlist; /* Current actionlist pointer. */ + int *lglabels; /* Local/global chain/pos ptrs. */ + size_t lgsize; + int *pclabels; /* PC label chains/pos ptrs. */ + size_t pcsize; + void **globals; /* Array of globals (bias -10). */ + dasm_Section *section; /* Pointer to active section. */ + size_t codesize; /* Total size of all code sections. */ + int maxsection; /* 0 <= sectionidx < maxsection. */ + int status; /* Status code. */ + dasm_Section sections[1]; /* All sections. Alloc-extended. */ +}; + +/* The size of the core structure depends on the max. number of sections. */ +#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section)) + + +/* Initialize DynASM state. */ +void dasm_init(Dst_DECL, int maxsection) +{ + dasm_State *D; + size_t psz = 0; + int i; + Dst_REF = NULL; + DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection)); + D = Dst_REF; + D->psize = psz; + D->lglabels = NULL; + D->lgsize = 0; + D->pclabels = NULL; + D->pcsize = 0; + D->globals = NULL; + D->maxsection = maxsection; + for (i = 0; i < maxsection; i++) { + D->sections[i].buf = NULL; /* Need this for pass3. */ + D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i); + D->sections[i].bsize = 0; + D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */ + } +} + +/* Free DynASM state. */ +void dasm_free(Dst_DECL) +{ + dasm_State *D = Dst_REF; + int i; + for (i = 0; i < D->maxsection; i++) + if (D->sections[i].buf) + DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize); + if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize); + if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize); + DASM_M_FREE(Dst, D, D->psize); +} + +/* Setup global label array. Must be called before dasm_setup(). */ +void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl) +{ + dasm_State *D = Dst_REF; + D->globals = gl - 10; /* Negative bias to compensate for locals. */ + DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int)); +} + +/* Grow PC label array. Can be called after dasm_setup(), too. */ +void dasm_growpc(Dst_DECL, unsigned int maxpc) +{ + dasm_State *D = Dst_REF; + size_t osz = D->pcsize; + DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int)); + memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz); +} + +/* Setup encoder. */ +void dasm_setup(Dst_DECL, const void *actionlist) +{ + dasm_State *D = Dst_REF; + int i; + D->actionlist = (dasm_ActList)actionlist; + D->status = DASM_S_OK; + D->section = &D->sections[0]; + memset((void *)D->lglabels, 0, D->lgsize); + if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize); + for (i = 0; i < D->maxsection; i++) { + D->sections[i].pos = DASM_SEC2POS(i); + D->sections[i].ofs = 0; + } +} + + +#ifdef DASM_CHECKS +#define CK(x, st) \ + do { if (!(x)) { \ + D->status = DASM_S_##st|(p-D->actionlist-1); return; } } while (0) +#define CKPL(kind, st) \ + do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \ + D->status = DASM_S_RANGE_##st|(p-D->actionlist-1); return; } } while (0) +#else +#define CK(x, st) ((void)0) +#define CKPL(kind, st) ((void)0) +#endif + +/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */ +void dasm_put(Dst_DECL, int start, ...) +{ + va_list ap; + dasm_State *D = Dst_REF; + dasm_ActList p = D->actionlist + start; + dasm_Section *sec = D->section; + int pos = sec->pos, ofs = sec->ofs; + int *b; + + if (pos >= sec->epos) { + DASM_M_GROW(Dst, int, sec->buf, sec->bsize, + sec->bsize + 2*DASM_MAXSECPOS*sizeof(int)); + sec->rbuf = sec->buf - DASM_POS2BIAS(pos); + sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos); + } + + b = sec->rbuf; + b[pos++] = start; + + va_start(ap, start); + while (1) { + unsigned int ins = *p++; + unsigned int action = (ins >> 16); + if (action >= DASM__MAX) { + ofs += 4; + } else { + int *pl, n = action >= DASM_REL_PC ? va_arg(ap, int) : 0; + switch (action) { + case DASM_STOP: goto stop; + case DASM_SECTION: + n = (ins & 255); CK(n < D->maxsection, RANGE_SEC); + D->section = &D->sections[n]; goto stop; + case DASM_ESC: p++; ofs += 4; break; + case DASM_REL_EXT: break; + case DASM_ALIGN: ofs += (ins & 255); b[pos++] = ofs; break; + case DASM_REL_LG: + n = (ins & 2047) - 10; pl = D->lglabels + n; + /* Bkwd rel or global. */ + if (n >= 0) { CK(n>=10||*pl<0, RANGE_LG); CKPL(lg, LG); goto putrel; } + pl += 10; n = *pl; + if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */ + goto linkrel; + case DASM_REL_PC: + pl = D->pclabels + n; CKPL(pc, PC); + putrel: + n = *pl; + if (n < 0) { /* Label exists. Get label pos and store it. */ + b[pos] = -n; + } else { + linkrel: + b[pos] = n; /* Else link to rel chain, anchored at label. */ + *pl = pos; + } + pos++; + break; + case DASM_LABEL_LG: + pl = D->lglabels + (ins & 2047) - 10; CKPL(lg, LG); goto putlabel; + case DASM_LABEL_PC: + pl = D->pclabels + n; CKPL(pc, PC); + putlabel: + n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */ + while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos; + } + *pl = -pos; /* Label exists now. */ + b[pos++] = ofs; /* Store pass1 offset estimate. */ + break; + case DASM_IMM: +#ifdef DASM_CHECKS + CK((n & ((1<<((ins>>10)&31))-1)) == 0, RANGE_I); +#endif + n >>= ((ins>>10)&31); +#ifdef DASM_CHECKS + if (ins & 0x8000) + CK(((n + (1<<(((ins>>5)&31)-1)))>>((ins>>5)&31)) == 0, RANGE_I); + else + CK((n>>((ins>>5)&31)) == 0, RANGE_I); +#endif + b[pos++] = n; + break; + case DASM_IMMSH: + CK((n >> 6) == 0, RANGE_I); + b[pos++] = n; + break; + } + } + } +stop: + va_end(ap); + sec->pos = pos; + sec->ofs = ofs; +} +#undef CK + +/* Pass 2: Link sections, shrink aligns, fix label offsets. */ +int dasm_link(Dst_DECL, size_t *szp) +{ + dasm_State *D = Dst_REF; + int secnum; + int ofs = 0; + +#ifdef DASM_CHECKS + *szp = 0; + if (D->status != DASM_S_OK) return D->status; + { + int pc; + for (pc = 0; pc*sizeof(int) < D->pcsize; pc++) + if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc; + } +#endif + + { /* Handle globals not defined in this translation unit. */ + int idx; + for (idx = 20; idx*sizeof(int) < D->lgsize; idx++) { + int n = D->lglabels[idx]; + /* Undefined label: Collapse rel chain and replace with marker (< 0). */ + while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; } + } + } + + /* Combine all code sections. No support for data sections (yet). */ + for (secnum = 0; secnum < D->maxsection; secnum++) { + dasm_Section *sec = D->sections + secnum; + int *b = sec->rbuf; + int pos = DASM_SEC2POS(secnum); + int lastpos = sec->pos; + + while (pos != lastpos) { + dasm_ActList p = D->actionlist + b[pos++]; + while (1) { + unsigned int ins = *p++; + unsigned int action = (ins >> 16); + switch (action) { + case DASM_STOP: case DASM_SECTION: goto stop; + case DASM_ESC: p++; break; + case DASM_REL_EXT: break; + case DASM_ALIGN: ofs -= (b[pos++] + ofs) & (ins & 255); break; + case DASM_REL_LG: case DASM_REL_PC: pos++; break; + case DASM_LABEL_LG: case DASM_LABEL_PC: b[pos++] += ofs; break; + case DASM_IMM: case DASM_IMMSH: pos++; break; + } + } + stop: (void)0; + } + ofs += sec->ofs; /* Next section starts right after current section. */ + } + + D->codesize = ofs; /* Total size of all code sections */ + *szp = ofs; + return DASM_S_OK; +} + +#ifdef DASM_CHECKS +#define CK(x, st) \ + do { if (!(x)) return DASM_S_##st|(p-D->actionlist-1); } while (0) +#else +#define CK(x, st) ((void)0) +#endif + +/* Pass 3: Encode sections. */ +int dasm_encode(Dst_DECL, void *buffer) +{ + dasm_State *D = Dst_REF; + char *base = (char *)buffer; + unsigned int *cp = (unsigned int *)buffer; + int secnum; + + /* Encode all code sections. No support for data sections (yet). */ + for (secnum = 0; secnum < D->maxsection; secnum++) { + dasm_Section *sec = D->sections + secnum; + int *b = sec->buf; + int *endb = sec->rbuf + sec->pos; + + while (b != endb) { + dasm_ActList p = D->actionlist + *b++; + while (1) { + unsigned int ins = *p++; + unsigned int action = (ins >> 16); + int n = (action >= DASM_ALIGN && action < DASM__MAX) ? *b++ : 0; + switch (action) { + case DASM_STOP: case DASM_SECTION: goto stop; + case DASM_ESC: *cp++ = *p++; break; + case DASM_REL_EXT: + n = DASM_EXTERN(Dst, (unsigned char *)cp, (ins & 2047), 1) - 4; + goto patchrel; + case DASM_ALIGN: + ins &= 255; while ((((char *)cp - base) & ins)) *cp++ = 0x60000000; + break; + case DASM_REL_LG: + CK(n >= 0, UNDEF_LG); + case DASM_REL_PC: + CK(n >= 0, UNDEF_PC); + n = *DASM_POS2PTR(D, n) - (int)((char *)cp - base); + patchrel: + CK((n & 3) == 0 && + (((n+4) + ((ins & 2048) ? 0x00008000 : 0x02000000)) >> + ((ins & 2048) ? 16 : 26)) == 0, RANGE_REL); + cp[-1] |= ((n+4) & ((ins & 2048) ? 0x0000fffc: 0x03fffffc)); + break; + case DASM_LABEL_LG: + ins &= 2047; if (ins >= 20) D->globals[ins-10] = (void *)(base + n); + break; + case DASM_LABEL_PC: break; + case DASM_IMM: + cp[-1] |= (n & ((1<<((ins>>5)&31))-1)) << (ins&31); + break; + case DASM_IMMSH: + cp[-1] |= (ins & 1) ? ((n&31)<<11)|((n&32)>>4) : ((n&31)<<6)|(n&32); + break; + default: *cp++ = ins; break; + } + } + stop: (void)0; + } + } + + if (base + D->codesize != (char *)cp) /* Check for phase errors. */ + return DASM_S_PHASE; + return DASM_S_OK; +} +#undef CK + +/* Get PC label offset. */ +int dasm_getpclabel(Dst_DECL, unsigned int pc) +{ + dasm_State *D = Dst_REF; + if (pc*sizeof(int) < D->pcsize) { + int pos = D->pclabels[pc]; + if (pos < 0) return *DASM_POS2PTR(D, -pos); + if (pos > 0) return -1; /* Undefined. */ + } + return -2; /* Unused or out of range. */ +} + +#ifdef DASM_CHECKS +/* Optional sanity checker to call between isolated encoding steps. */ +int dasm_checkstep(Dst_DECL, int secmatch) +{ + dasm_State *D = Dst_REF; + if (D->status == DASM_S_OK) { + int i; + for (i = 1; i <= 9; i++) { + if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_LG|i; break; } + D->lglabels[i] = 0; + } + } + if (D->status == DASM_S_OK && secmatch >= 0 && + D->section != &D->sections[secmatch]) + D->status = DASM_S_MATCH_SEC|(D->section-D->sections); + return D->status; +} +#endif + diff --git a/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_ppc.lua b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_ppc.lua new file mode 100644 index 00000000000..278f09526dd --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_ppc.lua @@ -0,0 +1,1919 @@ +------------------------------------------------------------------------------ +-- DynASM PPC/PPC64 module. +-- +-- Copyright (C) 2005-2015 Mike Pall. All rights reserved. +-- See dynasm.lua for full copyright notice. +-- +-- Support for various extensions contributed by Caio Souza Oliveira. +------------------------------------------------------------------------------ + +-- Module information: +local _info = { + arch = "ppc", + description = "DynASM PPC module", + version = "1.3.0", + vernum = 10300, + release = "2015-01-14", + author = "Mike Pall", + license = "MIT", +} + +-- Exported glue functions for the arch-specific module. +local _M = { _info = _info } + +-- Cache library functions. +local type, tonumber, pairs, ipairs = type, tonumber, pairs, ipairs +local assert, setmetatable = assert, setmetatable +local _s = string +local sub, format, byte, char = _s.sub, _s.format, _s.byte, _s.char +local match, gmatch = _s.match, _s.gmatch +local concat, sort = table.concat, table.sort +local bit = bit or require("bit") +local band, shl, shr, sar = bit.band, bit.lshift, bit.rshift, bit.arshift +local tohex = bit.tohex + +-- Inherited tables and callbacks. +local g_opt, g_arch +local wline, werror, wfatal, wwarn + +-- Action name list. +-- CHECK: Keep this in sync with the C code! +local action_names = { + "STOP", "SECTION", "ESC", "REL_EXT", + "ALIGN", "REL_LG", "LABEL_LG", + "REL_PC", "LABEL_PC", "IMM", "IMMSH" +} + +-- Maximum number of section buffer positions for dasm_put(). +-- CHECK: Keep this in sync with the C code! +local maxsecpos = 25 -- Keep this low, to avoid excessively long C lines. + +-- Action name -> action number. +local map_action = {} +for n,name in ipairs(action_names) do + map_action[name] = n-1 +end + +-- Action list buffer. +local actlist = {} + +-- Argument list for next dasm_put(). Start with offset 0 into action list. +local actargs = { 0 } + +-- Current number of section buffer positions for dasm_put(). +local secpos = 1 + +------------------------------------------------------------------------------ + +-- Dump action names and numbers. +local function dumpactions(out) + out:write("DynASM encoding engine action codes:\n") + for n,name in ipairs(action_names) do + local num = map_action[name] + out:write(format(" %-10s %02X %d\n", name, num, num)) + end + out:write("\n") +end + +-- Write action list buffer as a huge static C array. +local function writeactions(out, name) + local nn = #actlist + if nn == 0 then nn = 1; actlist[0] = map_action.STOP end + out:write("static const unsigned int ", name, "[", nn, "] = {\n") + for i = 1,nn-1 do + assert(out:write("0x", tohex(actlist[i]), ",\n")) + end + assert(out:write("0x", tohex(actlist[nn]), "\n};\n\n")) +end + +------------------------------------------------------------------------------ + +-- Add word to action list. +local function wputxw(n) + assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range") + actlist[#actlist+1] = n +end + +-- Add action to list with optional arg. Advance buffer pos, too. +local function waction(action, val, a, num) + local w = assert(map_action[action], "bad action name `"..action.."'") + wputxw(w * 0x10000 + (val or 0)) + if a then actargs[#actargs+1] = a end + if a or num then secpos = secpos + (num or 1) end +end + +-- Flush action list (intervening C code or buffer pos overflow). +local function wflush(term) + if #actlist == actargs[1] then return end -- Nothing to flush. + if not term then waction("STOP") end -- Terminate action list. + wline(format("dasm_put(Dst, %s);", concat(actargs, ", ")), true) + actargs = { #actlist } -- Actionlist offset is 1st arg to next dasm_put(). + secpos = 1 -- The actionlist offset occupies a buffer position, too. +end + +-- Put escaped word. +local function wputw(n) + if n <= 0xffffff then waction("ESC") end + wputxw(n) +end + +-- Reserve position for word. +local function wpos() + local pos = #actlist+1 + actlist[pos] = "" + return pos +end + +-- Store word to reserved position. +local function wputpos(pos, n) + assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range") + actlist[pos] = n +end + +------------------------------------------------------------------------------ + +-- Global label name -> global label number. With auto assignment on 1st use. +local next_global = 20 +local map_global = setmetatable({}, { __index = function(t, name) + if not match(name, "^[%a_][%w_]*$") then werror("bad global label") end + local n = next_global + if n > 2047 then werror("too many global labels") end + next_global = n + 1 + t[name] = n + return n +end}) + +-- Dump global labels. +local function dumpglobals(out, lvl) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("Global labels:\n") + for i=20,next_global-1 do + out:write(format(" %s\n", t[i])) + end + out:write("\n") +end + +-- Write global label enum. +local function writeglobals(out, prefix) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("enum {\n") + for i=20,next_global-1 do + out:write(" ", prefix, t[i], ",\n") + end + out:write(" ", prefix, "_MAX\n};\n") +end + +-- Write global label names. +local function writeglobalnames(out, name) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("static const char *const ", name, "[] = {\n") + for i=20,next_global-1 do + out:write(" \"", t[i], "\",\n") + end + out:write(" (const char *)0\n};\n") +end + +------------------------------------------------------------------------------ + +-- Extern label name -> extern label number. With auto assignment on 1st use. +local next_extern = 0 +local map_extern_ = {} +local map_extern = setmetatable({}, { __index = function(t, name) + -- No restrictions on the name for now. + local n = next_extern + if n > 2047 then werror("too many extern labels") end + next_extern = n + 1 + t[name] = n + map_extern_[n] = name + return n +end}) + +-- Dump extern labels. +local function dumpexterns(out, lvl) + out:write("Extern labels:\n") + for i=0,next_extern-1 do + out:write(format(" %s\n", map_extern_[i])) + end + out:write("\n") +end + +-- Write extern label names. +local function writeexternnames(out, name) + out:write("static const char *const ", name, "[] = {\n") + for i=0,next_extern-1 do + out:write(" \"", map_extern_[i], "\",\n") + end + out:write(" (const char *)0\n};\n") +end + +------------------------------------------------------------------------------ + +-- Arch-specific maps. +local map_archdef = { sp = "r1" } -- Ext. register name -> int. name. + +local map_type = {} -- Type name -> { ctype, reg } +local ctypenum = 0 -- Type number (for Dt... macros). + +-- Reverse defines for registers. +function _M.revdef(s) + if s == "r1" then return "sp" end + return s +end + +local map_cond = { + lt = 0, gt = 1, eq = 2, so = 3, + ge = 4, le = 5, ne = 6, ns = 7, +} + +------------------------------------------------------------------------------ + +local map_op, op_template + +local function op_alias(opname, f) + return function(params, nparams) + if not params then return "-> "..opname:sub(1, -3) end + f(params, nparams) + op_template(params, map_op[opname], nparams) + end +end + +-- Template strings for PPC instructions. +map_op = { + tdi_3 = "08000000ARI", + twi_3 = "0c000000ARI", + mulli_3 = "1c000000RRI", + subfic_3 = "20000000RRI", + cmplwi_3 = "28000000XRU", + cmplwi_2 = "28000000-RU", + cmpldi_3 = "28200000XRU", + cmpldi_2 = "28200000-RU", + cmpwi_3 = "2c000000XRI", + cmpwi_2 = "2c000000-RI", + cmpdi_3 = "2c200000XRI", + cmpdi_2 = "2c200000-RI", + addic_3 = "30000000RRI", + ["addic._3"] = "34000000RRI", + addi_3 = "38000000RR0I", + li_2 = "38000000RI", + la_2 = "38000000RD", + addis_3 = "3c000000RR0I", + lis_2 = "3c000000RI", + lus_2 = "3c000000RU", + bc_3 = "40000000AAK", + bcl_3 = "40000001AAK", + bdnz_1 = "42000000K", + bdz_1 = "42400000K", + sc_0 = "44000000", + b_1 = "48000000J", + bl_1 = "48000001J", + rlwimi_5 = "50000000RR~AAA.", + rlwinm_5 = "54000000RR~AAA.", + rlwnm_5 = "5c000000RR~RAA.", + ori_3 = "60000000RR~U", + nop_0 = "60000000", + oris_3 = "64000000RR~U", + xori_3 = "68000000RR~U", + xoris_3 = "6c000000RR~U", + ["andi._3"] = "70000000RR~U", + ["andis._3"] = "74000000RR~U", + lwz_2 = "80000000RD", + lwzu_2 = "84000000RD", + lbz_2 = "88000000RD", + lbzu_2 = "8c000000RD", + stw_2 = "90000000RD", + stwu_2 = "94000000RD", + stb_2 = "98000000RD", + stbu_2 = "9c000000RD", + lhz_2 = "a0000000RD", + lhzu_2 = "a4000000RD", + lha_2 = "a8000000RD", + lhau_2 = "ac000000RD", + sth_2 = "b0000000RD", + sthu_2 = "b4000000RD", + lmw_2 = "b8000000RD", + stmw_2 = "bc000000RD", + lfs_2 = "c0000000FD", + lfsu_2 = "c4000000FD", + lfd_2 = "c8000000FD", + lfdu_2 = "cc000000FD", + stfs_2 = "d0000000FD", + stfsu_2 = "d4000000FD", + stfd_2 = "d8000000FD", + stfdu_2 = "dc000000FD", + ld_2 = "e8000000RD", -- NYI: displacement must be divisible by 4. + ldu_2 = "e8000001RD", + lwa_2 = "e8000002RD", + std_2 = "f8000000RD", + stdu_2 = "f8000001RD", + + subi_3 = op_alias("addi_3", function(p) p[3] = "-("..p[3]..")" end), + subis_3 = op_alias("addis_3", function(p) p[3] = "-("..p[3]..")" end), + subic_3 = op_alias("addic_3", function(p) p[3] = "-("..p[3]..")" end), + ["subic._3"] = op_alias("addic._3", function(p) p[3] = "-("..p[3]..")" end), + + rotlwi_3 = op_alias("rlwinm_5", function(p) + p[4] = "0"; p[5] = "31" + end), + rotrwi_3 = op_alias("rlwinm_5", function(p) + p[3] = "32-("..p[3]..")"; p[4] = "0"; p[5] = "31" + end), + rotlw_3 = op_alias("rlwnm_5", function(p) + p[4] = "0"; p[5] = "31" + end), + slwi_3 = op_alias("rlwinm_5", function(p) + p[5] = "31-("..p[3]..")"; p[4] = "0" + end), + srwi_3 = op_alias("rlwinm_5", function(p) + p[4] = p[3]; p[3] = "32-("..p[3]..")"; p[5] = "31" + end), + clrlwi_3 = op_alias("rlwinm_5", function(p) + p[4] = p[3]; p[3] = "0"; p[5] = "31" + end), + clrrwi_3 = op_alias("rlwinm_5", function(p) + p[5] = "31-("..p[3]..")"; p[3] = "0"; p[4] = "0" + end), + + -- Primary opcode 4: + mulhhwu_3 = "10000010RRR.", + machhwu_3 = "10000018RRR.", + mulhhw_3 = "10000050RRR.", + nmachhw_3 = "1000005cRRR.", + machhwsu_3 = "10000098RRR.", + machhws_3 = "100000d8RRR.", + nmachhws_3 = "100000dcRRR.", + mulchwu_3 = "10000110RRR.", + macchwu_3 = "10000118RRR.", + mulchw_3 = "10000150RRR.", + macchw_3 = "10000158RRR.", + nmacchw_3 = "1000015cRRR.", + macchwsu_3 = "10000198RRR.", + macchws_3 = "100001d8RRR.", + nmacchws_3 = "100001dcRRR.", + mullhw_3 = "10000350RRR.", + maclhw_3 = "10000358RRR.", + nmaclhw_3 = "1000035cRRR.", + maclhwsu_3 = "10000398RRR.", + maclhws_3 = "100003d8RRR.", + nmaclhws_3 = "100003dcRRR.", + machhwuo_3 = "10000418RRR.", + nmachhwo_3 = "1000045cRRR.", + machhwsuo_3 = "10000498RRR.", + machhwso_3 = "100004d8RRR.", + nmachhwso_3 = "100004dcRRR.", + macchwuo_3 = "10000518RRR.", + macchwo_3 = "10000558RRR.", + nmacchwo_3 = "1000055cRRR.", + macchwsuo_3 = "10000598RRR.", + macchwso_3 = "100005d8RRR.", + nmacchwso_3 = "100005dcRRR.", + maclhwo_3 = "10000758RRR.", + nmaclhwo_3 = "1000075cRRR.", + maclhwsuo_3 = "10000798RRR.", + maclhwso_3 = "100007d8RRR.", + nmaclhwso_3 = "100007dcRRR.", + + vaddubm_3 = "10000000VVV", + vmaxub_3 = "10000002VVV", + vrlb_3 = "10000004VVV", + vcmpequb_3 = "10000006VVV", + vmuloub_3 = "10000008VVV", + vaddfp_3 = "1000000aVVV", + vmrghb_3 = "1000000cVVV", + vpkuhum_3 = "1000000eVVV", + vmhaddshs_4 = "10000020VVVV", + vmhraddshs_4 = "10000021VVVV", + vmladduhm_4 = "10000022VVVV", + vmsumubm_4 = "10000024VVVV", + vmsummbm_4 = "10000025VVVV", + vmsumuhm_4 = "10000026VVVV", + vmsumuhs_4 = "10000027VVVV", + vmsumshm_4 = "10000028VVVV", + vmsumshs_4 = "10000029VVVV", + vsel_4 = "1000002aVVVV", + vperm_4 = "1000002bVVVV", + vsldoi_4 = "1000002cVVVP", + vpermxor_4 = "1000002dVVVV", + vmaddfp_4 = "1000002eVVVV~", + vnmsubfp_4 = "1000002fVVVV~", + vaddeuqm_4 = "1000003cVVVV", + vaddecuq_4 = "1000003dVVVV", + vsubeuqm_4 = "1000003eVVVV", + vsubecuq_4 = "1000003fVVVV", + vadduhm_3 = "10000040VVV", + vmaxuh_3 = "10000042VVV", + vrlh_3 = "10000044VVV", + vcmpequh_3 = "10000046VVV", + vmulouh_3 = "10000048VVV", + vsubfp_3 = "1000004aVVV", + vmrghh_3 = "1000004cVVV", + vpkuwum_3 = "1000004eVVV", + vadduwm_3 = "10000080VVV", + vmaxuw_3 = "10000082VVV", + vrlw_3 = "10000084VVV", + vcmpequw_3 = "10000086VVV", + vmulouw_3 = "10000088VVV", + vmuluwm_3 = "10000089VVV", + vmrghw_3 = "1000008cVVV", + vpkuhus_3 = "1000008eVVV", + vaddudm_3 = "100000c0VVV", + vmaxud_3 = "100000c2VVV", + vrld_3 = "100000c4VVV", + vcmpeqfp_3 = "100000c6VVV", + vcmpequd_3 = "100000c7VVV", + vpkuwus_3 = "100000ceVVV", + vadduqm_3 = "10000100VVV", + vmaxsb_3 = "10000102VVV", + vslb_3 = "10000104VVV", + vmulosb_3 = "10000108VVV", + vrefp_2 = "1000010aV-V", + vmrglb_3 = "1000010cVVV", + vpkshus_3 = "1000010eVVV", + vaddcuq_3 = "10000140VVV", + vmaxsh_3 = "10000142VVV", + vslh_3 = "10000144VVV", + vmulosh_3 = "10000148VVV", + vrsqrtefp_2 = "1000014aV-V", + vmrglh_3 = "1000014cVVV", + vpkswus_3 = "1000014eVVV", + vaddcuw_3 = "10000180VVV", + vmaxsw_3 = "10000182VVV", + vslw_3 = "10000184VVV", + vmulosw_3 = "10000188VVV", + vexptefp_2 = "1000018aV-V", + vmrglw_3 = "1000018cVVV", + vpkshss_3 = "1000018eVVV", + vmaxsd_3 = "100001c2VVV", + vsl_3 = "100001c4VVV", + vcmpgefp_3 = "100001c6VVV", + vlogefp_2 = "100001caV-V", + vpkswss_3 = "100001ceVVV", + vadduhs_3 = "10000240VVV", + vminuh_3 = "10000242VVV", + vsrh_3 = "10000244VVV", + vcmpgtuh_3 = "10000246VVV", + vmuleuh_3 = "10000248VVV", + vrfiz_2 = "1000024aV-V", + vsplth_3 = "1000024cVV3", + vupkhsh_2 = "1000024eV-V", + vminuw_3 = "10000282VVV", + vminud_3 = "100002c2VVV", + vcmpgtud_3 = "100002c7VVV", + vrfim_2 = "100002caV-V", + vcmpgtsb_3 = "10000306VVV", + vcfux_3 = "1000030aVVA~", + vaddshs_3 = "10000340VVV", + vminsh_3 = "10000342VVV", + vsrah_3 = "10000344VVV", + vcmpgtsh_3 = "10000346VVV", + vmulesh_3 = "10000348VVV", + vcfsx_3 = "1000034aVVA~", + vspltish_2 = "1000034cVS", + vupkhpx_2 = "1000034eV-V", + vaddsws_3 = "10000380VVV", + vminsw_3 = "10000382VVV", + vsraw_3 = "10000384VVV", + vcmpgtsw_3 = "10000386VVV", + vmulesw_3 = "10000388VVV", + vctuxs_3 = "1000038aVVA~", + vspltisw_2 = "1000038cVS", + vminsd_3 = "100003c2VVV", + vsrad_3 = "100003c4VVV", + vcmpbfp_3 = "100003c6VVV", + vcmpgtsd_3 = "100003c7VVV", + vctsxs_3 = "100003caVVA~", + vupklpx_2 = "100003ceV-V", + vsububm_3 = "10000400VVV", + ["bcdadd._4"] = "10000401VVVy.", + vavgub_3 = "10000402VVV", + vand_3 = "10000404VVV", + ["vcmpequb._3"] = "10000406VVV", + vmaxfp_3 = "1000040aVVV", + vsubuhm_3 = "10000440VVV", + ["bcdsub._4"] = "10000441VVVy.", + vavguh_3 = "10000442VVV", + vandc_3 = "10000444VVV", + ["vcmpequh._3"] = "10000446VVV", + vminfp_3 = "1000044aVVV", + vpkudum_3 = "1000044eVVV", + vsubuwm_3 = "10000480VVV", + vavguw_3 = "10000482VVV", + vor_3 = "10000484VVV", + ["vcmpequw._3"] = "10000486VVV", + vpmsumw_3 = "10000488VVV", + ["vcmpeqfp._3"] = "100004c6VVV", + ["vcmpequd._3"] = "100004c7VVV", + vpkudus_3 = "100004ceVVV", + vavgsb_3 = "10000502VVV", + vavgsh_3 = "10000542VVV", + vorc_3 = "10000544VVV", + vbpermq_3 = "1000054cVVV", + vpksdus_3 = "1000054eVVV", + vavgsw_3 = "10000582VVV", + vsld_3 = "100005c4VVV", + ["vcmpgefp._3"] = "100005c6VVV", + vpksdss_3 = "100005ceVVV", + vsububs_3 = "10000600VVV", + mfvscr_1 = "10000604V--", + vsum4ubs_3 = "10000608VVV", + vsubuhs_3 = "10000640VVV", + mtvscr_1 = "10000644--V", + ["vcmpgtuh._3"] = "10000646VVV", + vsum4shs_3 = "10000648VVV", + vupkhsw_2 = "1000064eV-V", + vsubuws_3 = "10000680VVV", + vshasigmaw_4 = "10000682VVYp", + veqv_3 = "10000684VVV", + vsum2sws_3 = "10000688VVV", + vmrgow_3 = "1000068cVVV", + vshasigmad_4 = "100006c2VVYp", + vsrd_3 = "100006c4VVV", + ["vcmpgtud._3"] = "100006c7VVV", + vupklsw_2 = "100006ceV-V", + vupkslw_2 = "100006ceV-V", + vsubsbs_3 = "10000700VVV", + vclzb_2 = "10000702V-V", + vpopcntb_2 = "10000703V-V", + ["vcmpgtsb._3"] = "10000706VVV", + vsum4sbs_3 = "10000708VVV", + vsubshs_3 = "10000740VVV", + vclzh_2 = "10000742V-V", + vpopcnth_2 = "10000743V-V", + ["vcmpgtsh._3"] = "10000746VVV", + vsubsws_3 = "10000780VVV", + vclzw_2 = "10000782V-V", + vpopcntw_2 = "10000783V-V", + ["vcmpgtsw._3"] = "10000786VVV", + vsumsws_3 = "10000788VVV", + vmrgew_3 = "1000078cVVV", + vclzd_2 = "100007c2V-V", + vpopcntd_2 = "100007c3V-V", + ["vcmpbfp._3"] = "100007c6VVV", + ["vcmpgtsd._3"] = "100007c7VVV", + + -- Primary opcode 19: + mcrf_2 = "4c000000XX", + isync_0 = "4c00012c", + crnor_3 = "4c000042CCC", + crnot_2 = "4c000042CC=", + crandc_3 = "4c000102CCC", + crxor_3 = "4c000182CCC", + crclr_1 = "4c000182C==", + crnand_3 = "4c0001c2CCC", + crand_3 = "4c000202CCC", + creqv_3 = "4c000242CCC", + crset_1 = "4c000242C==", + crorc_3 = "4c000342CCC", + cror_3 = "4c000382CCC", + crmove_2 = "4c000382CC=", + bclr_2 = "4c000020AA", + bclrl_2 = "4c000021AA", + bcctr_2 = "4c000420AA", + bcctrl_2 = "4c000421AA", + bctar_2 = "4c000460AA", + bctarl_2 = "4c000461AA", + blr_0 = "4e800020", + blrl_0 = "4e800021", + bctr_0 = "4e800420", + bctrl_0 = "4e800421", + + -- Primary opcode 31: + cmpw_3 = "7c000000XRR", + cmpw_2 = "7c000000-RR", + cmpd_3 = "7c200000XRR", + cmpd_2 = "7c200000-RR", + tw_3 = "7c000008ARR", + lvsl_3 = "7c00000cVRR", + subfc_3 = "7c000010RRR.", + subc_3 = "7c000010RRR~.", + mulhdu_3 = "7c000012RRR.", + addc_3 = "7c000014RRR.", + mulhwu_3 = "7c000016RRR.", + isel_4 = "7c00001eRRRC", + isellt_3 = "7c00001eRRR", + iselgt_3 = "7c00005eRRR", + iseleq_3 = "7c00009eRRR", + mfcr_1 = "7c000026R", + mfocrf_2 = "7c100026RG", + mtcrf_2 = "7c000120GR", + mtocrf_2 = "7c100120GR", + lwarx_3 = "7c000028RR0R", + ldx_3 = "7c00002aRR0R", + lwzx_3 = "7c00002eRR0R", + slw_3 = "7c000030RR~R.", + cntlzw_2 = "7c000034RR~", + sld_3 = "7c000036RR~R.", + and_3 = "7c000038RR~R.", + cmplw_3 = "7c000040XRR", + cmplw_2 = "7c000040-RR", + cmpld_3 = "7c200040XRR", + cmpld_2 = "7c200040-RR", + lvsr_3 = "7c00004cVRR", + subf_3 = "7c000050RRR.", + sub_3 = "7c000050RRR~.", + lbarx_3 = "7c000068RR0R", + ldux_3 = "7c00006aRR0R", + dcbst_2 = "7c00006c-RR", + lwzux_3 = "7c00006eRR0R", + cntlzd_2 = "7c000074RR~", + andc_3 = "7c000078RR~R.", + td_3 = "7c000088ARR", + lvewx_3 = "7c00008eVRR", + mulhd_3 = "7c000092RRR.", + addg6s_3 = "7c000094RRR", + mulhw_3 = "7c000096RRR.", + dlmzb_3 = "7c00009cRR~R.", + ldarx_3 = "7c0000a8RR0R", + dcbf_2 = "7c0000ac-RR", + lbzx_3 = "7c0000aeRR0R", + lvx_3 = "7c0000ceVRR", + neg_2 = "7c0000d0RR.", + lharx_3 = "7c0000e8RR0R", + lbzux_3 = "7c0000eeRR0R", + popcntb_2 = "7c0000f4RR~", + not_2 = "7c0000f8RR~%.", + nor_3 = "7c0000f8RR~R.", + stvebx_3 = "7c00010eVRR", + subfe_3 = "7c000110RRR.", + sube_3 = "7c000110RRR~.", + adde_3 = "7c000114RRR.", + stdx_3 = "7c00012aRR0R", + ["stwcx._3"] = "7c00012dRR0R.", + stwx_3 = "7c00012eRR0R", + prtyw_2 = "7c000134RR~", + stvehx_3 = "7c00014eVRR", + stdux_3 = "7c00016aRR0R", + ["stqcx._3"] = "7c00016dR:R0R.", + stwux_3 = "7c00016eRR0R", + prtyd_2 = "7c000174RR~", + stvewx_3 = "7c00018eVRR", + subfze_2 = "7c000190RR.", + addze_2 = "7c000194RR.", + ["stdcx._3"] = "7c0001adRR0R.", + stbx_3 = "7c0001aeRR0R", + stvx_3 = "7c0001ceVRR", + subfme_2 = "7c0001d0RR.", + mulld_3 = "7c0001d2RRR.", + addme_2 = "7c0001d4RR.", + mullw_3 = "7c0001d6RRR.", + dcbtst_2 = "7c0001ec-RR", + stbux_3 = "7c0001eeRR0R", + bpermd_3 = "7c0001f8RR~R", + lvepxl_3 = "7c00020eVRR", + add_3 = "7c000214RRR.", + lqarx_3 = "7c000228R:R0R", + dcbt_2 = "7c00022c-RR", + lhzx_3 = "7c00022eRR0R", + cdtbcd_2 = "7c000234RR~", + eqv_3 = "7c000238RR~R.", + lvepx_3 = "7c00024eVRR", + eciwx_3 = "7c00026cRR0R", + lhzux_3 = "7c00026eRR0R", + cbcdtd_2 = "7c000274RR~", + xor_3 = "7c000278RR~R.", + mfspefscr_1 = "7c0082a6R", + mfxer_1 = "7c0102a6R", + mflr_1 = "7c0802a6R", + mfctr_1 = "7c0902a6R", + lwax_3 = "7c0002aaRR0R", + lhax_3 = "7c0002aeRR0R", + mftb_1 = "7c0c42e6R", + mftbu_1 = "7c0d42e6R", + lvxl_3 = "7c0002ceVRR", + lwaux_3 = "7c0002eaRR0R", + lhaux_3 = "7c0002eeRR0R", + popcntw_2 = "7c0002f4RR~", + divdeu_3 = "7c000312RRR.", + divweu_3 = "7c000316RRR.", + sthx_3 = "7c00032eRR0R", + orc_3 = "7c000338RR~R.", + ecowx_3 = "7c00036cRR0R", + sthux_3 = "7c00036eRR0R", + or_3 = "7c000378RR~R.", + mr_2 = "7c000378RR~%.", + divdu_3 = "7c000392RRR.", + divwu_3 = "7c000396RRR.", + mtspefscr_1 = "7c0083a6R", + mtxer_1 = "7c0103a6R", + mtlr_1 = "7c0803a6R", + mtctr_1 = "7c0903a6R", + dcbi_2 = "7c0003ac-RR", + nand_3 = "7c0003b8RR~R.", + dsn_2 = "7c0003c6-RR", + stvxl_3 = "7c0003ceVRR", + divd_3 = "7c0003d2RRR.", + divw_3 = "7c0003d6RRR.", + popcntd_2 = "7c0003f4RR~", + cmpb_3 = "7c0003f8RR~R.", + mcrxr_1 = "7c000400X", + lbdx_3 = "7c000406RRR", + subfco_3 = "7c000410RRR.", + subco_3 = "7c000410RRR~.", + addco_3 = "7c000414RRR.", + ldbrx_3 = "7c000428RR0R", + lswx_3 = "7c00042aRR0R", + lwbrx_3 = "7c00042cRR0R", + lfsx_3 = "7c00042eFR0R", + srw_3 = "7c000430RR~R.", + srd_3 = "7c000436RR~R.", + lhdx_3 = "7c000446RRR", + subfo_3 = "7c000450RRR.", + subo_3 = "7c000450RRR~.", + lfsux_3 = "7c00046eFR0R", + lwdx_3 = "7c000486RRR", + lswi_3 = "7c0004aaRR0A", + sync_0 = "7c0004ac", + lwsync_0 = "7c2004ac", + ptesync_0 = "7c4004ac", + lfdx_3 = "7c0004aeFR0R", + lddx_3 = "7c0004c6RRR", + nego_2 = "7c0004d0RR.", + lfdux_3 = "7c0004eeFR0R", + stbdx_3 = "7c000506RRR", + subfeo_3 = "7c000510RRR.", + subeo_3 = "7c000510RRR~.", + addeo_3 = "7c000514RRR.", + stdbrx_3 = "7c000528RR0R", + stswx_3 = "7c00052aRR0R", + stwbrx_3 = "7c00052cRR0R", + stfsx_3 = "7c00052eFR0R", + sthdx_3 = "7c000546RRR", + ["stbcx._3"] = "7c00056dRRR", + stfsux_3 = "7c00056eFR0R", + stwdx_3 = "7c000586RRR", + subfzeo_2 = "7c000590RR.", + addzeo_2 = "7c000594RR.", + stswi_3 = "7c0005aaRR0A", + ["sthcx._3"] = "7c0005adRRR", + stfdx_3 = "7c0005aeFR0R", + stddx_3 = "7c0005c6RRR", + subfmeo_2 = "7c0005d0RR.", + mulldo_3 = "7c0005d2RRR.", + addmeo_2 = "7c0005d4RR.", + mullwo_3 = "7c0005d6RRR.", + dcba_2 = "7c0005ec-RR", + stfdux_3 = "7c0005eeFR0R", + stvepxl_3 = "7c00060eVRR", + addo_3 = "7c000614RRR.", + lhbrx_3 = "7c00062cRR0R", + lfdpx_3 = "7c00062eF:RR", + sraw_3 = "7c000630RR~R.", + srad_3 = "7c000634RR~R.", + lfddx_3 = "7c000646FRR", + stvepx_3 = "7c00064eVRR", + srawi_3 = "7c000670RR~A.", + sradi_3 = "7c000674RR~H.", + eieio_0 = "7c0006ac", + lfiwax_3 = "7c0006aeFR0R", + divdeuo_3 = "7c000712RRR.", + divweuo_3 = "7c000716RRR.", + sthbrx_3 = "7c00072cRR0R", + stfdpx_3 = "7c00072eF:RR", + extsh_2 = "7c000734RR~.", + stfddx_3 = "7c000746FRR", + divdeo_3 = "7c000752RRR.", + divweo_3 = "7c000756RRR.", + extsb_2 = "7c000774RR~.", + divduo_3 = "7c000792RRR.", + divwou_3 = "7c000796RRR.", + icbi_2 = "7c0007ac-RR", + stfiwx_3 = "7c0007aeFR0R", + extsw_2 = "7c0007b4RR~.", + divdo_3 = "7c0007d2RRR.", + divwo_3 = "7c0007d6RRR.", + dcbz_2 = "7c0007ec-RR", + + ["tbegin._1"] = "7c00051d1", + ["tbegin._0"] = "7c00051d", + ["tend._1"] = "7c00055dY", + ["tend._0"] = "7c00055d", + ["tendall._0"] = "7e00055d", + tcheck_1 = "7c00059cX", + ["tsr._1"] = "7c0005dd1", + ["tsuspend._0"] = "7c0005dd", + ["tresume._0"] = "7c2005dd", + ["tabortwc._3"] = "7c00061dARR", + ["tabortdc._3"] = "7c00065dARR", + ["tabortwci._3"] = "7c00069dARS", + ["tabortdci._3"] = "7c0006ddARS", + ["tabort._1"] = "7c00071d-R-", + ["treclaim._1"] = "7c00075d-R", + ["trechkpt._0"] = "7c0007dd", + + lxsiwzx_3 = "7c000018QRR", + lxsiwax_3 = "7c000098QRR", + mfvsrd_2 = "7c000066-Rq", + mfvsrwz_2 = "7c0000e6-Rq", + stxsiwx_3 = "7c000118QRR", + mtvsrd_2 = "7c000166QR", + mtvsrwa_2 = "7c0001a6QR", + lxvdsx_3 = "7c000298QRR", + lxsspx_3 = "7c000418QRR", + lxsdx_3 = "7c000498QRR", + stxsspx_3 = "7c000518QRR", + stxsdx_3 = "7c000598QRR", + lxvw4x_3 = "7c000618QRR", + lxvd2x_3 = "7c000698QRR", + stxvw4x_3 = "7c000718QRR", + stxvd2x_3 = "7c000798QRR", + + -- Primary opcode 30: + rldicl_4 = "78000000RR~HM.", + rldicr_4 = "78000004RR~HM.", + rldic_4 = "78000008RR~HM.", + rldimi_4 = "7800000cRR~HM.", + rldcl_4 = "78000010RR~RM.", + rldcr_4 = "78000012RR~RM.", + + rotldi_3 = op_alias("rldicl_4", function(p) + p[4] = "0" + end), + rotrdi_3 = op_alias("rldicl_4", function(p) + p[3] = "64-("..p[3]..")"; p[4] = "0" + end), + rotld_3 = op_alias("rldcl_4", function(p) + p[4] = "0" + end), + sldi_3 = op_alias("rldicr_4", function(p) + p[4] = "63-("..p[3]..")" + end), + srdi_3 = op_alias("rldicl_4", function(p) + p[4] = p[3]; p[3] = "64-("..p[3]..")" + end), + clrldi_3 = op_alias("rldicl_4", function(p) + p[4] = p[3]; p[3] = "0" + end), + clrrdi_3 = op_alias("rldicr_4", function(p) + p[4] = "63-("..p[3]..")"; p[3] = "0" + end), + + -- Primary opcode 56: + lq_2 = "e0000000R:D", -- NYI: displacement must be divisible by 8. + + -- Primary opcode 57: + lfdp_2 = "e4000000F:D", -- NYI: displacement must be divisible by 4. + + -- Primary opcode 59: + fdivs_3 = "ec000024FFF.", + fsubs_3 = "ec000028FFF.", + fadds_3 = "ec00002aFFF.", + fsqrts_2 = "ec00002cF-F.", + fres_2 = "ec000030F-F.", + fmuls_3 = "ec000032FF-F.", + frsqrtes_2 = "ec000034F-F.", + fmsubs_4 = "ec000038FFFF~.", + fmadds_4 = "ec00003aFFFF~.", + fnmsubs_4 = "ec00003cFFFF~.", + fnmadds_4 = "ec00003eFFFF~.", + fcfids_2 = "ec00069cF-F.", + fcfidus_2 = "ec00079cF-F.", + + dadd_3 = "ec000004FFF.", + dqua_4 = "ec000006FFFZ.", + dmul_3 = "ec000044FFF.", + drrnd_4 = "ec000046FFFZ.", + dscli_3 = "ec000084FF6.", + dquai_4 = "ec000086SF~FZ.", + dscri_3 = "ec0000c4FF6.", + drintx_4 = "ec0000c61F~FZ.", + dcmpo_3 = "ec000104XFF", + dtstex_3 = "ec000144XFF", + dtstdc_3 = "ec000184XF6", + dtstdg_3 = "ec0001c4XF6", + drintn_4 = "ec0001c61F~FZ.", + dctdp_2 = "ec000204F-F.", + dctfix_2 = "ec000244F-F.", + ddedpd_3 = "ec000284ZF~F.", + dxex_2 = "ec0002c4F-F.", + dsub_3 = "ec000404FFF.", + ddiv_3 = "ec000444FFF.", + dcmpu_3 = "ec000504XFF", + dtstsf_3 = "ec000544XFF", + drsp_2 = "ec000604F-F.", + dcffix_2 = "ec000644F-F.", + denbcd_3 = "ec000684YF~F.", + diex_3 = "ec0006c4FFF.", + + -- Primary opcode 60: + xsaddsp_3 = "f0000000QQQ", + xsmaddasp_3 = "f0000008QQQ", + xxsldwi_4 = "f0000010QQQz", + xsrsqrtesp_2 = "f0000028Q-Q", + xssqrtsp_2 = "f000002cQ-Q", + xxsel_4 = "f0000030QQQQ", + xssubsp_3 = "f0000040QQQ", + xsmaddmsp_3 = "f0000048QQQ", + xxpermdi_4 = "f0000050QQQz", + xsresp_2 = "f0000068Q-Q", + xsmulsp_3 = "f0000080QQQ", + xsmsubasp_3 = "f0000088QQQ", + xxmrghw_3 = "f0000090QQQ", + xsdivsp_3 = "f00000c0QQQ", + xsmsubmsp_3 = "f00000c8QQQ", + xsadddp_3 = "f0000100QQQ", + xsmaddadp_3 = "f0000108QQQ", + xscmpudp_3 = "f0000118XQQ", + xscvdpuxws_2 = "f0000120Q-Q", + xsrdpi_2 = "f0000124Q-Q", + xsrsqrtedp_2 = "f0000128Q-Q", + xssqrtdp_2 = "f000012cQ-Q", + xssubdp_3 = "f0000140QQQ", + xsmaddmdp_3 = "f0000148QQQ", + xscmpodp_3 = "f0000158XQQ", + xscvdpsxws_2 = "f0000160Q-Q", + xsrdpiz_2 = "f0000164Q-Q", + xsredp_2 = "f0000168Q-Q", + xsmuldp_3 = "f0000180QQQ", + xsmsubadp_3 = "f0000188QQQ", + xxmrglw_3 = "f0000190QQQ", + xsrdpip_2 = "f00001a4Q-Q", + xstsqrtdp_2 = "f00001a8X-Q", + xsrdpic_2 = "f00001acQ-Q", + xsdivdp_3 = "f00001c0QQQ", + xsmsubmdp_3 = "f00001c8QQQ", + xsrdpim_2 = "f00001e4Q-Q", + xstdivdp_3 = "f00001e8XQQ", + xvaddsp_3 = "f0000200QQQ", + xvmaddasp_3 = "f0000208QQQ", + xvcmpeqsp_3 = "f0000218QQQ", + xvcvspuxws_2 = "f0000220Q-Q", + xvrspi_2 = "f0000224Q-Q", + xvrsqrtesp_2 = "f0000228Q-Q", + xvsqrtsp_2 = "f000022cQ-Q", + xvsubsp_3 = "f0000240QQQ", + xvmaddmsp_3 = "f0000248QQQ", + xvcmpgtsp_3 = "f0000258QQQ", + xvcvspsxws_2 = "f0000260Q-Q", + xvrspiz_2 = "f0000264Q-Q", + xvresp_2 = "f0000268Q-Q", + xvmulsp_3 = "f0000280QQQ", + xvmsubasp_3 = "f0000288QQQ", + xxspltw_3 = "f0000290QQg~", + xvcmpgesp_3 = "f0000298QQQ", + xvcvuxwsp_2 = "f00002a0Q-Q", + xvrspip_2 = "f00002a4Q-Q", + xvtsqrtsp_2 = "f00002a8X-Q", + xvrspic_2 = "f00002acQ-Q", + xvdivsp_3 = "f00002c0QQQ", + xvmsubmsp_3 = "f00002c8QQQ", + xvcvsxwsp_2 = "f00002e0Q-Q", + xvrspim_2 = "f00002e4Q-Q", + xvtdivsp_3 = "f00002e8XQQ", + xvadddp_3 = "f0000300QQQ", + xvmaddadp_3 = "f0000308QQQ", + xvcmpeqdp_3 = "f0000318QQQ", + xvcvdpuxws_2 = "f0000320Q-Q", + xvrdpi_2 = "f0000324Q-Q", + xvrsqrtedp_2 = "f0000328Q-Q", + xvsqrtdp_2 = "f000032cQ-Q", + xvsubdp_3 = "f0000340QQQ", + xvmaddmdp_3 = "f0000348QQQ", + xvcmpgtdp_3 = "f0000358QQQ", + xvcvdpsxws_2 = "f0000360Q-Q", + xvrdpiz_2 = "f0000364Q-Q", + xvredp_2 = "f0000368Q-Q", + xvmuldp_3 = "f0000380QQQ", + xvmsubadp_3 = "f0000388QQQ", + xvcmpgedp_3 = "f0000398QQQ", + xvcvuxwdp_2 = "f00003a0Q-Q", + xvrdpip_2 = "f00003a4Q-Q", + xvtsqrtdp_2 = "f00003a8X-Q", + xvrdpic_2 = "f00003acQ-Q", + xvdivdp_3 = "f00003c0QQQ", + xvmsubmdp_3 = "f00003c8QQQ", + xvcvsxwdp_2 = "f00003e0Q-Q", + xvrdpim_2 = "f00003e4Q-Q", + xvtdivdp_3 = "f00003e8XQQ", + xsnmaddasp_3 = "f0000408QQQ", + xxland_3 = "f0000410QQQ", + xscvdpsp_2 = "f0000424Q-Q", + xscvdpspn_2 = "f000042cQ-Q", + xsnmaddmsp_3 = "f0000448QQQ", + xxlandc_3 = "f0000450QQQ", + xsrsp_2 = "f0000464Q-Q", + xsnmsubasp_3 = "f0000488QQQ", + xxlor_3 = "f0000490QQQ", + xscvuxdsp_2 = "f00004a0Q-Q", + xsnmsubmsp_3 = "f00004c8QQQ", + xxlxor_3 = "f00004d0QQQ", + xscvsxdsp_2 = "f00004e0Q-Q", + xsmaxdp_3 = "f0000500QQQ", + xsnmaddadp_3 = "f0000508QQQ", + xxlnor_3 = "f0000510QQQ", + xscvdpuxds_2 = "f0000520Q-Q", + xscvspdp_2 = "f0000524Q-Q", + xscvspdpn_2 = "f000052cQ-Q", + xsmindp_3 = "f0000540QQQ", + xsnmaddmdp_3 = "f0000548QQQ", + xxlorc_3 = "f0000550QQQ", + xscvdpsxds_2 = "f0000560Q-Q", + xsabsdp_2 = "f0000564Q-Q", + xscpsgndp_3 = "f0000580QQQ", + xsnmsubadp_3 = "f0000588QQQ", + xxlnand_3 = "f0000590QQQ", + xscvuxddp_2 = "f00005a0Q-Q", + xsnabsdp_2 = "f00005a4Q-Q", + xsnmsubmdp_3 = "f00005c8QQQ", + xxleqv_3 = "f00005d0QQQ", + xscvsxddp_2 = "f00005e0Q-Q", + xsnegdp_2 = "f00005e4Q-Q", + xvmaxsp_3 = "f0000600QQQ", + xvnmaddasp_3 = "f0000608QQQ", + ["xvcmpeqsp._3"] = "f0000618QQQ", + xvcvspuxds_2 = "f0000620Q-Q", + xvcvdpsp_2 = "f0000624Q-Q", + xvminsp_3 = "f0000640QQQ", + xvnmaddmsp_3 = "f0000648QQQ", + ["xvcmpgtsp._3"] = "f0000658QQQ", + xvcvspsxds_2 = "f0000660Q-Q", + xvabssp_2 = "f0000664Q-Q", + xvcpsgnsp_3 = "f0000680QQQ", + xvnmsubasp_3 = "f0000688QQQ", + ["xvcmpgesp._3"] = "f0000698QQQ", + xvcvuxdsp_2 = "f00006a0Q-Q", + xvnabssp_2 = "f00006a4Q-Q", + xvnmsubmsp_3 = "f00006c8QQQ", + xvcvsxdsp_2 = "f00006e0Q-Q", + xvnegsp_2 = "f00006e4Q-Q", + xvmaxdp_3 = "f0000700QQQ", + xvnmaddadp_3 = "f0000708QQQ", + ["xvcmpeqdp._3"] = "f0000718QQQ", + xvcvdpuxds_2 = "f0000720Q-Q", + xvcvspdp_2 = "f0000724Q-Q", + xvmindp_3 = "f0000740QQQ", + xvnmaddmdp_3 = "f0000748QQQ", + ["xvcmpgtdp._3"] = "f0000758QQQ", + xvcvdpsxds_2 = "f0000760Q-Q", + xvabsdp_2 = "f0000764Q-Q", + xvcpsgndp_3 = "f0000780QQQ", + xvnmsubadp_3 = "f0000788QQQ", + ["xvcmpgedp._3"] = "f0000798QQQ", + xvcvuxddp_2 = "f00007a0Q-Q", + xvnabsdp_2 = "f00007a4Q-Q", + xvnmsubmdp_3 = "f00007c8QQQ", + xvcvsxddp_2 = "f00007e0Q-Q", + xvnegdp_2 = "f00007e4Q-Q", + + -- Primary opcode 61: + stfdp_2 = "f4000000F:D", -- NYI: displacement must be divisible by 4. + + -- Primary opcode 62: + stq_2 = "f8000002R:D", -- NYI: displacement must be divisible by 8. + + -- Primary opcode 63: + fdiv_3 = "fc000024FFF.", + fsub_3 = "fc000028FFF.", + fadd_3 = "fc00002aFFF.", + fsqrt_2 = "fc00002cF-F.", + fsel_4 = "fc00002eFFFF~.", + fre_2 = "fc000030F-F.", + fmul_3 = "fc000032FF-F.", + frsqrte_2 = "fc000034F-F.", + fmsub_4 = "fc000038FFFF~.", + fmadd_4 = "fc00003aFFFF~.", + fnmsub_4 = "fc00003cFFFF~.", + fnmadd_4 = "fc00003eFFFF~.", + fcmpu_3 = "fc000000XFF", + fcpsgn_3 = "fc000010FFF.", + fcmpo_3 = "fc000040XFF", + mtfsb1_1 = "fc00004cA", + fneg_2 = "fc000050F-F.", + mcrfs_2 = "fc000080XX", + mtfsb0_1 = "fc00008cA", + fmr_2 = "fc000090F-F.", + frsp_2 = "fc000018F-F.", + fctiw_2 = "fc00001cF-F.", + fctiwz_2 = "fc00001eF-F.", + ftdiv_2 = "fc000100X-F.", + fctiwu_2 = "fc00011cF-F.", + fctiwuz_2 = "fc00011eF-F.", + mtfsfi_2 = "fc00010cAA", -- NYI: upshift. + fnabs_2 = "fc000110F-F.", + ftsqrt_2 = "fc000140X-F.", + fabs_2 = "fc000210F-F.", + frin_2 = "fc000310F-F.", + friz_2 = "fc000350F-F.", + frip_2 = "fc000390F-F.", + frim_2 = "fc0003d0F-F.", + mffs_1 = "fc00048eF.", + -- NYI: mtfsf, mtfsb0, mtfsb1. + fctid_2 = "fc00065cF-F.", + fctidz_2 = "fc00065eF-F.", + fmrgow_3 = "fc00068cFFF", + fcfid_2 = "fc00069cF-F.", + fctidu_2 = "fc00075cF-F.", + fctiduz_2 = "fc00075eF-F.", + fmrgew_3 = "fc00078cFFF", + fcfidu_2 = "fc00079cF-F.", + + daddq_3 = "fc000004F:F:F:.", + dquaq_4 = "fc000006F:F:F:Z.", + dmulq_3 = "fc000044F:F:F:.", + drrndq_4 = "fc000046F:F:F:Z.", + dscliq_3 = "fc000084F:F:6.", + dquaiq_4 = "fc000086SF:~F:Z.", + dscriq_3 = "fc0000c4F:F:6.", + drintxq_4 = "fc0000c61F:~F:Z.", + dcmpoq_3 = "fc000104XF:F:", + dtstexq_3 = "fc000144XF:F:", + dtstdcq_3 = "fc000184XF:6", + dtstdgq_3 = "fc0001c4XF:6", + drintnq_4 = "fc0001c61F:~F:Z.", + dctqpq_2 = "fc000204F:-F:.", + dctfixq_2 = "fc000244F:-F:.", + ddedpdq_3 = "fc000284ZF:~F:.", + dxexq_2 = "fc0002c4F:-F:.", + dsubq_3 = "fc000404F:F:F:.", + ddivq_3 = "fc000444F:F:F:.", + dcmpuq_3 = "fc000504XF:F:", + dtstsfq_3 = "fc000544XF:F:", + drdpq_2 = "fc000604F:-F:.", + dcffixq_2 = "fc000644F:-F:.", + denbcdq_3 = "fc000684YF:~F:.", + diexq_3 = "fc0006c4F:FF:.", + + -- Primary opcode 4, SPE APU extension: + evaddw_3 = "10000200RRR", + evaddiw_3 = "10000202RAR~", + evsubw_3 = "10000204RRR~", + evsubiw_3 = "10000206RAR~", + evabs_2 = "10000208RR", + evneg_2 = "10000209RR", + evextsb_2 = "1000020aRR", + evextsh_2 = "1000020bRR", + evrndw_2 = "1000020cRR", + evcntlzw_2 = "1000020dRR", + evcntlsw_2 = "1000020eRR", + brinc_3 = "1000020fRRR", + evand_3 = "10000211RRR", + evandc_3 = "10000212RRR", + evxor_3 = "10000216RRR", + evor_3 = "10000217RRR", + evmr_2 = "10000217RR=", + evnor_3 = "10000218RRR", + evnot_2 = "10000218RR=", + eveqv_3 = "10000219RRR", + evorc_3 = "1000021bRRR", + evnand_3 = "1000021eRRR", + evsrwu_3 = "10000220RRR", + evsrws_3 = "10000221RRR", + evsrwiu_3 = "10000222RRA", + evsrwis_3 = "10000223RRA", + evslw_3 = "10000224RRR", + evslwi_3 = "10000226RRA", + evrlw_3 = "10000228RRR", + evsplati_2 = "10000229RS", + evrlwi_3 = "1000022aRRA", + evsplatfi_2 = "1000022bRS", + evmergehi_3 = "1000022cRRR", + evmergelo_3 = "1000022dRRR", + evcmpgtu_3 = "10000230XRR", + evcmpgtu_2 = "10000230-RR", + evcmpgts_3 = "10000231XRR", + evcmpgts_2 = "10000231-RR", + evcmpltu_3 = "10000232XRR", + evcmpltu_2 = "10000232-RR", + evcmplts_3 = "10000233XRR", + evcmplts_2 = "10000233-RR", + evcmpeq_3 = "10000234XRR", + evcmpeq_2 = "10000234-RR", + evsel_4 = "10000278RRRW", + evsel_3 = "10000278RRR", + evfsadd_3 = "10000280RRR", + evfssub_3 = "10000281RRR", + evfsabs_2 = "10000284RR", + evfsnabs_2 = "10000285RR", + evfsneg_2 = "10000286RR", + evfsmul_3 = "10000288RRR", + evfsdiv_3 = "10000289RRR", + evfscmpgt_3 = "1000028cXRR", + evfscmpgt_2 = "1000028c-RR", + evfscmplt_3 = "1000028dXRR", + evfscmplt_2 = "1000028d-RR", + evfscmpeq_3 = "1000028eXRR", + evfscmpeq_2 = "1000028e-RR", + evfscfui_2 = "10000290R-R", + evfscfsi_2 = "10000291R-R", + evfscfuf_2 = "10000292R-R", + evfscfsf_2 = "10000293R-R", + evfsctui_2 = "10000294R-R", + evfsctsi_2 = "10000295R-R", + evfsctuf_2 = "10000296R-R", + evfsctsf_2 = "10000297R-R", + evfsctuiz_2 = "10000298R-R", + evfsctsiz_2 = "1000029aR-R", + evfststgt_3 = "1000029cXRR", + evfststgt_2 = "1000029c-RR", + evfststlt_3 = "1000029dXRR", + evfststlt_2 = "1000029d-RR", + evfststeq_3 = "1000029eXRR", + evfststeq_2 = "1000029e-RR", + efsadd_3 = "100002c0RRR", + efssub_3 = "100002c1RRR", + efsabs_2 = "100002c4RR", + efsnabs_2 = "100002c5RR", + efsneg_2 = "100002c6RR", + efsmul_3 = "100002c8RRR", + efsdiv_3 = "100002c9RRR", + efscmpgt_3 = "100002ccXRR", + efscmpgt_2 = "100002cc-RR", + efscmplt_3 = "100002cdXRR", + efscmplt_2 = "100002cd-RR", + efscmpeq_3 = "100002ceXRR", + efscmpeq_2 = "100002ce-RR", + efscfd_2 = "100002cfR-R", + efscfui_2 = "100002d0R-R", + efscfsi_2 = "100002d1R-R", + efscfuf_2 = "100002d2R-R", + efscfsf_2 = "100002d3R-R", + efsctui_2 = "100002d4R-R", + efsctsi_2 = "100002d5R-R", + efsctuf_2 = "100002d6R-R", + efsctsf_2 = "100002d7R-R", + efsctuiz_2 = "100002d8R-R", + efsctsiz_2 = "100002daR-R", + efststgt_3 = "100002dcXRR", + efststgt_2 = "100002dc-RR", + efststlt_3 = "100002ddXRR", + efststlt_2 = "100002dd-RR", + efststeq_3 = "100002deXRR", + efststeq_2 = "100002de-RR", + efdadd_3 = "100002e0RRR", + efdsub_3 = "100002e1RRR", + efdcfuid_2 = "100002e2R-R", + efdcfsid_2 = "100002e3R-R", + efdabs_2 = "100002e4RR", + efdnabs_2 = "100002e5RR", + efdneg_2 = "100002e6RR", + efdmul_3 = "100002e8RRR", + efddiv_3 = "100002e9RRR", + efdctuidz_2 = "100002eaR-R", + efdctsidz_2 = "100002ebR-R", + efdcmpgt_3 = "100002ecXRR", + efdcmpgt_2 = "100002ec-RR", + efdcmplt_3 = "100002edXRR", + efdcmplt_2 = "100002ed-RR", + efdcmpeq_3 = "100002eeXRR", + efdcmpeq_2 = "100002ee-RR", + efdcfs_2 = "100002efR-R", + efdcfui_2 = "100002f0R-R", + efdcfsi_2 = "100002f1R-R", + efdcfuf_2 = "100002f2R-R", + efdcfsf_2 = "100002f3R-R", + efdctui_2 = "100002f4R-R", + efdctsi_2 = "100002f5R-R", + efdctuf_2 = "100002f6R-R", + efdctsf_2 = "100002f7R-R", + efdctuiz_2 = "100002f8R-R", + efdctsiz_2 = "100002faR-R", + efdtstgt_3 = "100002fcXRR", + efdtstgt_2 = "100002fc-RR", + efdtstlt_3 = "100002fdXRR", + efdtstlt_2 = "100002fd-RR", + efdtsteq_3 = "100002feXRR", + efdtsteq_2 = "100002fe-RR", + evlddx_3 = "10000300RR0R", + evldd_2 = "10000301R8", + evldwx_3 = "10000302RR0R", + evldw_2 = "10000303R8", + evldhx_3 = "10000304RR0R", + evldh_2 = "10000305R8", + evlwhex_3 = "10000310RR0R", + evlwhe_2 = "10000311R4", + evlwhoux_3 = "10000314RR0R", + evlwhou_2 = "10000315R4", + evlwhosx_3 = "10000316RR0R", + evlwhos_2 = "10000317R4", + evstddx_3 = "10000320RR0R", + evstdd_2 = "10000321R8", + evstdwx_3 = "10000322RR0R", + evstdw_2 = "10000323R8", + evstdhx_3 = "10000324RR0R", + evstdh_2 = "10000325R8", + evstwhex_3 = "10000330RR0R", + evstwhe_2 = "10000331R4", + evstwhox_3 = "10000334RR0R", + evstwho_2 = "10000335R4", + evstwwex_3 = "10000338RR0R", + evstwwe_2 = "10000339R4", + evstwwox_3 = "1000033cRR0R", + evstwwo_2 = "1000033dR4", + evmhessf_3 = "10000403RRR", + evmhossf_3 = "10000407RRR", + evmheumi_3 = "10000408RRR", + evmhesmi_3 = "10000409RRR", + evmhesmf_3 = "1000040bRRR", + evmhoumi_3 = "1000040cRRR", + evmhosmi_3 = "1000040dRRR", + evmhosmf_3 = "1000040fRRR", + evmhessfa_3 = "10000423RRR", + evmhossfa_3 = "10000427RRR", + evmheumia_3 = "10000428RRR", + evmhesmia_3 = "10000429RRR", + evmhesmfa_3 = "1000042bRRR", + evmhoumia_3 = "1000042cRRR", + evmhosmia_3 = "1000042dRRR", + evmhosmfa_3 = "1000042fRRR", + evmwhssf_3 = "10000447RRR", + evmwlumi_3 = "10000448RRR", + evmwhumi_3 = "1000044cRRR", + evmwhsmi_3 = "1000044dRRR", + evmwhsmf_3 = "1000044fRRR", + evmwssf_3 = "10000453RRR", + evmwumi_3 = "10000458RRR", + evmwsmi_3 = "10000459RRR", + evmwsmf_3 = "1000045bRRR", + evmwhssfa_3 = "10000467RRR", + evmwlumia_3 = "10000468RRR", + evmwhumia_3 = "1000046cRRR", + evmwhsmia_3 = "1000046dRRR", + evmwhsmfa_3 = "1000046fRRR", + evmwssfa_3 = "10000473RRR", + evmwumia_3 = "10000478RRR", + evmwsmia_3 = "10000479RRR", + evmwsmfa_3 = "1000047bRRR", + evmra_2 = "100004c4RR", + evdivws_3 = "100004c6RRR", + evdivwu_3 = "100004c7RRR", + evmwssfaa_3 = "10000553RRR", + evmwumiaa_3 = "10000558RRR", + evmwsmiaa_3 = "10000559RRR", + evmwsmfaa_3 = "1000055bRRR", + evmwssfan_3 = "100005d3RRR", + evmwumian_3 = "100005d8RRR", + evmwsmian_3 = "100005d9RRR", + evmwsmfan_3 = "100005dbRRR", + evmergehilo_3 = "1000022eRRR", + evmergelohi_3 = "1000022fRRR", + evlhhesplatx_3 = "10000308RR0R", + evlhhesplat_2 = "10000309R2", + evlhhousplatx_3 = "1000030cRR0R", + evlhhousplat_2 = "1000030dR2", + evlhhossplatx_3 = "1000030eRR0R", + evlhhossplat_2 = "1000030fR2", + evlwwsplatx_3 = "10000318RR0R", + evlwwsplat_2 = "10000319R4", + evlwhsplatx_3 = "1000031cRR0R", + evlwhsplat_2 = "1000031dR4", + evaddusiaaw_2 = "100004c0RR", + evaddssiaaw_2 = "100004c1RR", + evsubfusiaaw_2 = "100004c2RR", + evsubfssiaaw_2 = "100004c3RR", + evaddumiaaw_2 = "100004c8RR", + evaddsmiaaw_2 = "100004c9RR", + evsubfumiaaw_2 = "100004caRR", + evsubfsmiaaw_2 = "100004cbRR", + evmheusiaaw_3 = "10000500RRR", + evmhessiaaw_3 = "10000501RRR", + evmhessfaaw_3 = "10000503RRR", + evmhousiaaw_3 = "10000504RRR", + evmhossiaaw_3 = "10000505RRR", + evmhossfaaw_3 = "10000507RRR", + evmheumiaaw_3 = "10000508RRR", + evmhesmiaaw_3 = "10000509RRR", + evmhesmfaaw_3 = "1000050bRRR", + evmhoumiaaw_3 = "1000050cRRR", + evmhosmiaaw_3 = "1000050dRRR", + evmhosmfaaw_3 = "1000050fRRR", + evmhegumiaa_3 = "10000528RRR", + evmhegsmiaa_3 = "10000529RRR", + evmhegsmfaa_3 = "1000052bRRR", + evmhogumiaa_3 = "1000052cRRR", + evmhogsmiaa_3 = "1000052dRRR", + evmhogsmfaa_3 = "1000052fRRR", + evmwlusiaaw_3 = "10000540RRR", + evmwlssiaaw_3 = "10000541RRR", + evmwlumiaaw_3 = "10000548RRR", + evmwlsmiaaw_3 = "10000549RRR", + evmheusianw_3 = "10000580RRR", + evmhessianw_3 = "10000581RRR", + evmhessfanw_3 = "10000583RRR", + evmhousianw_3 = "10000584RRR", + evmhossianw_3 = "10000585RRR", + evmhossfanw_3 = "10000587RRR", + evmheumianw_3 = "10000588RRR", + evmhesmianw_3 = "10000589RRR", + evmhesmfanw_3 = "1000058bRRR", + evmhoumianw_3 = "1000058cRRR", + evmhosmianw_3 = "1000058dRRR", + evmhosmfanw_3 = "1000058fRRR", + evmhegumian_3 = "100005a8RRR", + evmhegsmian_3 = "100005a9RRR", + evmhegsmfan_3 = "100005abRRR", + evmhogumian_3 = "100005acRRR", + evmhogsmian_3 = "100005adRRR", + evmhogsmfan_3 = "100005afRRR", + evmwlusianw_3 = "100005c0RRR", + evmwlssianw_3 = "100005c1RRR", + evmwlumianw_3 = "100005c8RRR", + evmwlsmianw_3 = "100005c9RRR", + + -- NYI: Book E instructions. +} + +-- Add mnemonics for "." variants. +do + local t = {} + for k,v in pairs(map_op) do + if type(v) == "string" and sub(v, -1) == "." then + local v2 = sub(v, 1, 7)..char(byte(v, 8)+1)..sub(v, 9, -2) + t[sub(k, 1, -3).."."..sub(k, -2)] = v2 + end + end + for k,v in pairs(t) do + map_op[k] = v + end +end + +-- Add more branch mnemonics. +for cond,c in pairs(map_cond) do + local b1 = "b"..cond + local c1 = shl(band(c, 3), 16) + (c < 4 and 0x01000000 or 0) + -- bX[l] + map_op[b1.."_1"] = tohex(0x40800000 + c1).."K" + map_op[b1.."y_1"] = tohex(0x40a00000 + c1).."K" + map_op[b1.."l_1"] = tohex(0x40800001 + c1).."K" + map_op[b1.."_2"] = tohex(0x40800000 + c1).."-XK" + map_op[b1.."y_2"] = tohex(0x40a00000 + c1).."-XK" + map_op[b1.."l_2"] = tohex(0x40800001 + c1).."-XK" + -- bXlr[l] + map_op[b1.."lr_0"] = tohex(0x4c800020 + c1) + map_op[b1.."lrl_0"] = tohex(0x4c800021 + c1) + map_op[b1.."ctr_0"] = tohex(0x4c800420 + c1) + map_op[b1.."ctrl_0"] = tohex(0x4c800421 + c1) + -- bXctr[l] + map_op[b1.."lr_1"] = tohex(0x4c800020 + c1).."-X" + map_op[b1.."lrl_1"] = tohex(0x4c800021 + c1).."-X" + map_op[b1.."ctr_1"] = tohex(0x4c800420 + c1).."-X" + map_op[b1.."ctrl_1"] = tohex(0x4c800421 + c1).."-X" +end + +------------------------------------------------------------------------------ + +local function parse_gpr(expr) + local tname, ovreg = match(expr, "^([%w_]+):(r[1-3]?[0-9])$") + local tp = map_type[tname or expr] + if tp then + local reg = ovreg or tp.reg + if not reg then + werror("type `"..(tname or expr).."' needs a register override") + end + expr = reg + end + local r = match(expr, "^r([1-3]?[0-9])$") + if r then + r = tonumber(r) + if r <= 31 then return r, tp end + end + werror("bad register name `"..expr.."'") +end + +local function parse_fpr(expr) + local r = match(expr, "^f([1-3]?[0-9])$") + if r then + r = tonumber(r) + if r <= 31 then return r end + end + werror("bad register name `"..expr.."'") +end + +local function parse_vr(expr) + local r = match(expr, "^v([1-3]?[0-9])$") + if r then + r = tonumber(r) + if r <= 31 then return r end + end + werror("bad register name `"..expr.."'") +end + +local function parse_vs(expr) + local r = match(expr, "^vs([1-6]?[0-9])$") + if r then + r = tonumber(r) + if r <= 63 then return r end + end + werror("bad register name `"..expr.."'") +end + +local function parse_cr(expr) + local r = match(expr, "^cr([0-7])$") + if r then return tonumber(r) end + werror("bad condition register name `"..expr.."'") +end + +local function parse_cond(expr) + local r, cond = match(expr, "^4%*cr([0-7])%+(%w%w)$") + if r then + r = tonumber(r) + local c = map_cond[cond] + if c and c < 4 then return r*4+c end + end + werror("bad condition bit name `"..expr.."'") +end + +local parse_ctx = {} + +local loadenv = setfenv and function(s) + local code = loadstring(s, "") + if code then setfenv(code, parse_ctx) end + return code +end or function(s) + return load(s, "", nil, parse_ctx) +end + +-- Try to parse simple arithmetic, too, since some basic ops are aliases. +local function parse_number(n) + local x = tonumber(n) + if x then return x end + local code = loadenv("return "..n) + if code then + local ok, y = pcall(code) + if ok then return y end + end + return nil +end + +local function parse_imm(imm, bits, shift, scale, signed) + local n = parse_number(imm) + if n then + local m = sar(n, scale) + if shl(m, scale) == n then + if signed then + local s = sar(m, bits-1) + if s == 0 then return shl(m, shift) + elseif s == -1 then return shl(m + shl(1, bits), shift) end + else + if sar(m, bits) == 0 then return shl(m, shift) end + end + end + werror("out of range immediate `"..imm.."'") + elseif match(imm, "^[rfv]([1-3]?[0-9])$") or + match(imm, "^vs([1-6]?[0-9])$") or + match(imm, "^([%w_]+):(r[1-3]?[0-9])$") then + werror("expected immediate operand, got register") + else + waction("IMM", (signed and 32768 or 0)+scale*1024+bits*32+shift, imm) + return 0 + end +end + +local function parse_shiftmask(imm, isshift) + local n = parse_number(imm) + if n then + if shr(n, 6) == 0 then + local lsb = band(n, 31) + local msb = n - lsb + return isshift and (shl(lsb, 11)+shr(msb, 4)) or (shl(lsb, 6)+msb) + end + werror("out of range immediate `"..imm.."'") + elseif match(imm, "^r([1-3]?[0-9])$") or + match(imm, "^([%w_]+):(r[1-3]?[0-9])$") then + werror("expected immediate operand, got register") + else + waction("IMMSH", isshift and 1 or 0, imm) + return 0; + end +end + +local function parse_disp(disp) + local imm, reg = match(disp, "^(.*)%(([%w_:]+)%)$") + if imm then + local r = parse_gpr(reg) + if r == 0 then werror("cannot use r0 in displacement") end + return shl(r, 16) + parse_imm(imm, 16, 0, 0, true) + end + local reg, tailr = match(disp, "^([%w_:]+)%s*(.*)$") + if reg and tailr ~= "" then + local r, tp = parse_gpr(reg) + if r == 0 then werror("cannot use r0 in displacement") end + if tp then + waction("IMM", 32768+16*32, format(tp.ctypefmt, tailr)) + return shl(r, 16) + end + end + werror("bad displacement `"..disp.."'") +end + +local function parse_u5disp(disp, scale) + local imm, reg = match(disp, "^(.*)%(([%w_:]+)%)$") + if imm then + local r = parse_gpr(reg) + if r == 0 then werror("cannot use r0 in displacement") end + return shl(r, 16) + parse_imm(imm, 5, 11, scale, false) + end + local reg, tailr = match(disp, "^([%w_:]+)%s*(.*)$") + if reg and tailr ~= "" then + local r, tp = parse_gpr(reg) + if r == 0 then werror("cannot use r0 in displacement") end + if tp then + waction("IMM", scale*1024+5*32+11, format(tp.ctypefmt, tailr)) + return shl(r, 16) + end + end + werror("bad displacement `"..disp.."'") +end + +local function parse_label(label, def) + local prefix = sub(label, 1, 2) + -- =>label (pc label reference) + if prefix == "=>" then + return "PC", 0, sub(label, 3) + end + -- ->name (global label reference) + if prefix == "->" then + return "LG", map_global[sub(label, 3)] + end + if def then + -- [1-9] (local label definition) + if match(label, "^[1-9]$") then + return "LG", 10+tonumber(label) + end + else + -- [<>][1-9] (local label reference) + local dir, lnum = match(label, "^([<>])([1-9])$") + if dir then -- Fwd: 1-9, Bkwd: 11-19. + return "LG", lnum + (dir == ">" and 0 or 10) + end + -- extern label (extern label reference) + local extname = match(label, "^extern%s+(%S+)$") + if extname then + return "EXT", map_extern[extname] + end + end + werror("bad label `"..label.."'") +end + +------------------------------------------------------------------------------ + +-- Handle opcodes defined with template strings. +op_template = function(params, template, nparams) + if not params then return sub(template, 9) end + local op = tonumber(sub(template, 1, 8), 16) + local n, rs = 1, 26 + + -- Limit number of section buffer positions used by a single dasm_put(). + -- A single opcode needs a maximum of 3 positions (rlwinm). + if secpos+3 > maxsecpos then wflush() end + local pos = wpos() + + -- Process each character. + for p in gmatch(sub(template, 9), ".") do + if p == "R" then + rs = rs - 5; op = op + shl(parse_gpr(params[n]), rs); n = n + 1 + elseif p == "F" then + rs = rs - 5; op = op + shl(parse_fpr(params[n]), rs); n = n + 1 + elseif p == "V" then + rs = rs - 5; op = op + shl(parse_vr(params[n]), rs); n = n + 1 + elseif p == "Q" then + local vs = parse_vs(params[n]); n = n + 1; rs = rs - 5 + local sh = rs == 6 and 2 or 3 + band(shr(rs, 1), 3) + op = op + shl(band(vs, 31), rs) + shr(band(vs, 32), sh) + elseif p == "q" then + local vs = parse_vs(params[n]); n = n + 1 + op = op + shl(band(vs, 31), 21) + shr(band(vs, 32), 5) + elseif p == "A" then + rs = rs - 5; op = op + parse_imm(params[n], 5, rs, 0, false); n = n + 1 + elseif p == "S" then + rs = rs - 5; op = op + parse_imm(params[n], 5, rs, 0, true); n = n + 1 + elseif p == "I" then + op = op + parse_imm(params[n], 16, 0, 0, true); n = n + 1 + elseif p == "U" then + op = op + parse_imm(params[n], 16, 0, 0, false); n = n + 1 + elseif p == "D" then + op = op + parse_disp(params[n]); n = n + 1 + elseif p == "2" then + op = op + parse_u5disp(params[n], 1); n = n + 1 + elseif p == "4" then + op = op + parse_u5disp(params[n], 2); n = n + 1 + elseif p == "8" then + op = op + parse_u5disp(params[n], 3); n = n + 1 + elseif p == "C" then + rs = rs - 5; op = op + shl(parse_cond(params[n]), rs); n = n + 1 + elseif p == "X" then + rs = rs - 5; op = op + shl(parse_cr(params[n]), rs+2); n = n + 1 + elseif p == "1" then + rs = rs - 5; op = op + parse_imm(params[n], 1, rs, 0, false); n = n + 1 + elseif p == "g" then + rs = rs - 5; op = op + parse_imm(params[n], 2, rs, 0, false); n = n + 1 + elseif p == "3" then + rs = rs - 5; op = op + parse_imm(params[n], 3, rs, 0, false); n = n + 1 + elseif p == "P" then + rs = rs - 5; op = op + parse_imm(params[n], 4, rs, 0, false); n = n + 1 + elseif p == "p" then + op = op + parse_imm(params[n], 4, rs, 0, false); n = n + 1 + elseif p == "6" then + rs = rs - 6; op = op + parse_imm(params[n], 6, rs, 0, false); n = n + 1 + elseif p == "Y" then + rs = rs - 5; op = op + parse_imm(params[n], 1, rs+4, 0, false); n = n + 1 + elseif p == "y" then + rs = rs - 5; op = op + parse_imm(params[n], 1, rs+3, 0, false); n = n + 1 + elseif p == "Z" then + rs = rs - 5; op = op + parse_imm(params[n], 2, rs+3, 0, false); n = n + 1 + elseif p == "z" then + rs = rs - 5; op = op + parse_imm(params[n], 2, rs+2, 0, false); n = n + 1 + elseif p == "W" then + op = op + parse_cr(params[n]); n = n + 1 + elseif p == "G" then + op = op + parse_imm(params[n], 8, 12, 0, false); n = n + 1 + elseif p == "H" then + op = op + parse_shiftmask(params[n], true); n = n + 1 + elseif p == "M" then + op = op + parse_shiftmask(params[n], false); n = n + 1 + elseif p == "J" or p == "K" then + local mode, n, s = parse_label(params[n], false) + if p == "K" then n = n + 2048 end + waction("REL_"..mode, n, s, 1) + n = n + 1 + elseif p == "0" then + if band(shr(op, rs), 31) == 0 then werror("cannot use r0") end + elseif p == "=" or p == "%" then + local t = band(shr(op, p == "%" and rs+5 or rs), 31) + rs = rs - 5 + op = op + shl(t, rs) + elseif p == "~" then + local mm = shl(31, rs) + local lo = band(op, mm) + local hi = band(op, shl(mm, 5)) + op = op - lo - hi + shl(lo, 5) + shr(hi, 5) + elseif p == ":" then + if band(shr(op, rs), 1) ~= 0 then werror("register pair expected") end + elseif p == "-" then + rs = rs - 5 + elseif p == "." then + -- Ignored. + else + assert(false) + end + end + wputpos(pos, op) +end + +map_op[".template__"] = op_template + +------------------------------------------------------------------------------ + +-- Pseudo-opcode to mark the position where the action list is to be emitted. +map_op[".actionlist_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeactions(out, name) end) +end + +-- Pseudo-opcode to mark the position where the global enum is to be emitted. +map_op[".globals_1"] = function(params) + if not params then return "prefix" end + local prefix = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeglobals(out, prefix) end) +end + +-- Pseudo-opcode to mark the position where the global names are to be emitted. +map_op[".globalnames_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeglobalnames(out, name) end) +end + +-- Pseudo-opcode to mark the position where the extern names are to be emitted. +map_op[".externnames_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeexternnames(out, name) end) +end + +------------------------------------------------------------------------------ + +-- Label pseudo-opcode (converted from trailing colon form). +map_op[".label_1"] = function(params) + if not params then return "[1-9] | ->global | =>pcexpr" end + if secpos+1 > maxsecpos then wflush() end + local mode, n, s = parse_label(params[1], true) + if mode == "EXT" then werror("bad label definition") end + waction("LABEL_"..mode, n, s, 1) +end + +------------------------------------------------------------------------------ + +-- Pseudo-opcodes for data storage. +map_op[".long_*"] = function(params) + if not params then return "imm..." end + for _,p in ipairs(params) do + local n = tonumber(p) + if not n then werror("bad immediate `"..p.."'") end + if n < 0 then n = n + 2^32 end + wputw(n) + if secpos+2 > maxsecpos then wflush() end + end +end + +-- Alignment pseudo-opcode. +map_op[".align_1"] = function(params) + if not params then return "numpow2" end + if secpos+1 > maxsecpos then wflush() end + local align = tonumber(params[1]) + if align then + local x = align + -- Must be a power of 2 in the range (2 ... 256). + for i=1,8 do + x = x / 2 + if x == 1 then + waction("ALIGN", align-1, nil, 1) -- Action byte is 2**n-1. + return + end + end + end + werror("bad alignment") +end + +------------------------------------------------------------------------------ + +-- Pseudo-opcode for (primitive) type definitions (map to C types). +map_op[".type_3"] = function(params, nparams) + if not params then + return nparams == 2 and "name, ctype" or "name, ctype, reg" + end + local name, ctype, reg = params[1], params[2], params[3] + if not match(name, "^[%a_][%w_]*$") then + werror("bad type name `"..name.."'") + end + local tp = map_type[name] + if tp then + werror("duplicate type `"..name.."'") + end + -- Add #type to defines. A bit unclean to put it in map_archdef. + map_archdef["#"..name] = "sizeof("..ctype..")" + -- Add new type and emit shortcut define. + local num = ctypenum + 1 + map_type[name] = { + ctype = ctype, + ctypefmt = format("Dt%X(%%s)", num), + reg = reg, + } + wline(format("#define Dt%X(_V) (int)(ptrdiff_t)&(((%s *)0)_V)", num, ctype)) + ctypenum = num +end +map_op[".type_2"] = map_op[".type_3"] + +-- Dump type definitions. +local function dumptypes(out, lvl) + local t = {} + for name in pairs(map_type) do t[#t+1] = name end + sort(t) + out:write("Type definitions:\n") + for _,name in ipairs(t) do + local tp = map_type[name] + local reg = tp.reg or "" + out:write(format(" %-20s %-20s %s\n", name, tp.ctype, reg)) + end + out:write("\n") +end + +------------------------------------------------------------------------------ + +-- Set the current section. +function _M.section(num) + waction("SECTION", num) + wflush(true) -- SECTION is a terminal action. +end + +------------------------------------------------------------------------------ + +-- Dump architecture description. +function _M.dumparch(out) + out:write(format("DynASM %s version %s, released %s\n\n", + _info.arch, _info.version, _info.release)) + dumpactions(out) +end + +-- Dump all user defined elements. +function _M.dumpdef(out, lvl) + dumptypes(out, lvl) + dumpglobals(out, lvl) + dumpexterns(out, lvl) +end + +------------------------------------------------------------------------------ + +-- Pass callbacks from/to the DynASM core. +function _M.passcb(wl, we, wf, ww) + wline, werror, wfatal, wwarn = wl, we, wf, ww + return wflush +end + +-- Setup the arch-specific module. +function _M.setup(arch, opt) + g_arch, g_opt = arch, opt +end + +-- Merge the core maps and the arch-specific maps. +function _M.mergemaps(map_coreop, map_def) + setmetatable(map_op, { __index = map_coreop }) + setmetatable(map_def, { __index = map_archdef }) + return map_op, map_def +end + +return _M + +------------------------------------------------------------------------------ + diff --git a/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_proto.h b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_proto.h new file mode 100644 index 00000000000..a8bc6fd285d --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_proto.h @@ -0,0 +1,83 @@ +/* +** DynASM encoding engine prototypes. +** Copyright (C) 2005-2015 Mike Pall. All rights reserved. +** Released under the MIT license. See dynasm.lua for full copyright notice. +*/ + +#ifndef _DASM_PROTO_H +#define _DASM_PROTO_H + +#include +#include + +#define DASM_IDENT "DynASM 1.3.0" +#define DASM_VERSION 10300 /* 1.3.0 */ + +#ifndef Dst_DECL +#define Dst_DECL dasm_State **Dst +#endif + +#ifndef Dst_REF +#define Dst_REF (*Dst) +#endif + +#ifndef DASM_FDEF +#define DASM_FDEF extern +#endif + +#ifndef DASM_M_GROW +#define DASM_M_GROW(ctx, t, p, sz, need) \ + do { \ + size_t _sz = (sz), _need = (need); \ + if (_sz < _need) { \ + if (_sz < 16) _sz = 16; \ + while (_sz < _need) _sz += _sz; \ + (p) = (t *)realloc((p), _sz); \ + if ((p) == NULL) exit(1); \ + (sz) = _sz; \ + } \ + } while(0) +#endif + +#ifndef DASM_M_FREE +#define DASM_M_FREE(ctx, p, sz) free(p) +#endif + +/* Internal DynASM encoder state. */ +typedef struct dasm_State dasm_State; + + +/* Initialize and free DynASM state. */ +DASM_FDEF void dasm_init(Dst_DECL, int maxsection); +DASM_FDEF void dasm_free(Dst_DECL); + +/* Setup global array. Must be called before dasm_setup(). */ +DASM_FDEF void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl); + +/* Grow PC label array. Can be called after dasm_setup(), too. */ +DASM_FDEF void dasm_growpc(Dst_DECL, unsigned int maxpc); + +/* Setup encoder. */ +DASM_FDEF void dasm_setup(Dst_DECL, const void *actionlist); + +/* Feed encoder with actions. Calls are generated by pre-processor. */ +DASM_FDEF void dasm_put(Dst_DECL, int start, ...); + +/* Link sections and return the resulting size. */ +DASM_FDEF int dasm_link(Dst_DECL, size_t *szp); + +/* Encode sections into buffer. */ +DASM_FDEF int dasm_encode(Dst_DECL, void *buffer); + +/* Get PC label offset. */ +DASM_FDEF int dasm_getpclabel(Dst_DECL, unsigned int pc); + +#ifdef DASM_CHECKS +/* Optional sanity checker to call between isolated encoding steps. */ +DASM_FDEF int dasm_checkstep(Dst_DECL, int secmatch); +#else +#define dasm_checkstep(a, b) 0 +#endif + + +#endif /* _DASM_PROTO_H */ diff --git a/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_x64.lua b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_x64.lua new file mode 100644 index 00000000000..b1b62022f3c --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_x64.lua @@ -0,0 +1,12 @@ +------------------------------------------------------------------------------ +-- DynASM x64 module. +-- +-- Copyright (C) 2005-2015 Mike Pall. All rights reserved. +-- See dynasm.lua for full copyright notice. +------------------------------------------------------------------------------ +-- This module just sets 64 bit mode for the combined x86/x64 module. +-- All the interesting stuff is there. +------------------------------------------------------------------------------ + +x64 = true -- Using a global is an ugly, but effective solution. +return require("dasm_x86") diff --git a/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_x86.h b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_x86.h new file mode 100644 index 00000000000..652e8c99b08 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_x86.h @@ -0,0 +1,471 @@ +/* +** DynASM x86 encoding engine. +** Copyright (C) 2005-2015 Mike Pall. All rights reserved. +** Released under the MIT license. See dynasm.lua for full copyright notice. +*/ + +#include +#include +#include +#include + +#define DASM_ARCH "x86" + +#ifndef DASM_EXTERN +#define DASM_EXTERN(a,b,c,d) 0 +#endif + +/* Action definitions. DASM_STOP must be 255. */ +enum { + DASM_DISP = 233, + DASM_IMM_S, DASM_IMM_B, DASM_IMM_W, DASM_IMM_D, DASM_IMM_WB, DASM_IMM_DB, + DASM_VREG, DASM_SPACE, DASM_SETLABEL, DASM_REL_A, DASM_REL_LG, DASM_REL_PC, + DASM_IMM_LG, DASM_IMM_PC, DASM_LABEL_LG, DASM_LABEL_PC, DASM_ALIGN, + DASM_EXTERN, DASM_ESC, DASM_MARK, DASM_SECTION, DASM_STOP +}; + +/* Maximum number of section buffer positions for a single dasm_put() call. */ +#define DASM_MAXSECPOS 25 + +/* DynASM encoder status codes. Action list offset or number are or'ed in. */ +#define DASM_S_OK 0x00000000 +#define DASM_S_NOMEM 0x01000000 +#define DASM_S_PHASE 0x02000000 +#define DASM_S_MATCH_SEC 0x03000000 +#define DASM_S_RANGE_I 0x11000000 +#define DASM_S_RANGE_SEC 0x12000000 +#define DASM_S_RANGE_LG 0x13000000 +#define DASM_S_RANGE_PC 0x14000000 +#define DASM_S_RANGE_VREG 0x15000000 +#define DASM_S_UNDEF_L 0x21000000 +#define DASM_S_UNDEF_PC 0x22000000 + +/* Macros to convert positions (8 bit section + 24 bit index). */ +#define DASM_POS2IDX(pos) ((pos)&0x00ffffff) +#define DASM_POS2BIAS(pos) ((pos)&0xff000000) +#define DASM_SEC2POS(sec) ((sec)<<24) +#define DASM_POS2SEC(pos) ((pos)>>24) +#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos)) + +/* Action list type. */ +typedef const unsigned char *dasm_ActList; + +/* Per-section structure. */ +typedef struct dasm_Section { + int *rbuf; /* Biased buffer pointer (negative section bias). */ + int *buf; /* True buffer pointer. */ + size_t bsize; /* Buffer size in bytes. */ + int pos; /* Biased buffer position. */ + int epos; /* End of biased buffer position - max single put. */ + int ofs; /* Byte offset into section. */ +} dasm_Section; + +/* Core structure holding the DynASM encoding state. */ +struct dasm_State { + size_t psize; /* Allocated size of this structure. */ + dasm_ActList actionlist; /* Current actionlist pointer. */ + int *lglabels; /* Local/global chain/pos ptrs. */ + size_t lgsize; + int *pclabels; /* PC label chains/pos ptrs. */ + size_t pcsize; + void **globals; /* Array of globals (bias -10). */ + dasm_Section *section; /* Pointer to active section. */ + size_t codesize; /* Total size of all code sections. */ + int maxsection; /* 0 <= sectionidx < maxsection. */ + int status; /* Status code. */ + dasm_Section sections[1]; /* All sections. Alloc-extended. */ +}; + +/* The size of the core structure depends on the max. number of sections. */ +#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section)) + + +/* Initialize DynASM state. */ +void dasm_init(Dst_DECL, int maxsection) +{ + dasm_State *D; + size_t psz = 0; + int i; + Dst_REF = NULL; + DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection)); + D = Dst_REF; + D->psize = psz; + D->lglabels = NULL; + D->lgsize = 0; + D->pclabels = NULL; + D->pcsize = 0; + D->globals = NULL; + D->maxsection = maxsection; + for (i = 0; i < maxsection; i++) { + D->sections[i].buf = NULL; /* Need this for pass3. */ + D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i); + D->sections[i].bsize = 0; + D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */ + } +} + +/* Free DynASM state. */ +void dasm_free(Dst_DECL) +{ + dasm_State *D = Dst_REF; + int i; + for (i = 0; i < D->maxsection; i++) + if (D->sections[i].buf) + DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize); + if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize); + if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize); + DASM_M_FREE(Dst, D, D->psize); +} + +/* Setup global label array. Must be called before dasm_setup(). */ +void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl) +{ + dasm_State *D = Dst_REF; + D->globals = gl - 10; /* Negative bias to compensate for locals. */ + DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int)); +} + +/* Grow PC label array. Can be called after dasm_setup(), too. */ +void dasm_growpc(Dst_DECL, unsigned int maxpc) +{ + dasm_State *D = Dst_REF; + size_t osz = D->pcsize; + DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int)); + memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz); +} + +/* Setup encoder. */ +void dasm_setup(Dst_DECL, const void *actionlist) +{ + dasm_State *D = Dst_REF; + int i; + D->actionlist = (dasm_ActList)actionlist; + D->status = DASM_S_OK; + D->section = &D->sections[0]; + memset((void *)D->lglabels, 0, D->lgsize); + if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize); + for (i = 0; i < D->maxsection; i++) { + D->sections[i].pos = DASM_SEC2POS(i); + D->sections[i].ofs = 0; + } +} + + +#ifdef DASM_CHECKS +#define CK(x, st) \ + do { if (!(x)) { \ + D->status = DASM_S_##st|(int)(p-D->actionlist-1); return; } } while (0) +#define CKPL(kind, st) \ + do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \ + D->status=DASM_S_RANGE_##st|(int)(p-D->actionlist-1); return; } } while (0) +#else +#define CK(x, st) ((void)0) +#define CKPL(kind, st) ((void)0) +#endif + +/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */ +void dasm_put(Dst_DECL, int start, ...) +{ + va_list ap; + dasm_State *D = Dst_REF; + dasm_ActList p = D->actionlist + start; + dasm_Section *sec = D->section; + int pos = sec->pos, ofs = sec->ofs, mrm = 4; + int *b; + + if (pos >= sec->epos) { + DASM_M_GROW(Dst, int, sec->buf, sec->bsize, + sec->bsize + 2*DASM_MAXSECPOS*sizeof(int)); + sec->rbuf = sec->buf - DASM_POS2BIAS(pos); + sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos); + } + + b = sec->rbuf; + b[pos++] = start; + + va_start(ap, start); + while (1) { + int action = *p++; + if (action < DASM_DISP) { + ofs++; + } else if (action <= DASM_REL_A) { + int n = va_arg(ap, int); + b[pos++] = n; + switch (action) { + case DASM_DISP: + if (n == 0) { if ((mrm&7) == 4) mrm = p[-2]; if ((mrm&7) != 5) break; } + case DASM_IMM_DB: if (((n+128)&-256) == 0) goto ob; + case DASM_REL_A: /* Assumes ptrdiff_t is int. !x64 */ + case DASM_IMM_D: ofs += 4; break; + case DASM_IMM_S: CK(((n+128)&-256) == 0, RANGE_I); goto ob; + case DASM_IMM_B: CK((n&-256) == 0, RANGE_I); ob: ofs++; break; + case DASM_IMM_WB: if (((n+128)&-256) == 0) goto ob; + case DASM_IMM_W: CK((n&-65536) == 0, RANGE_I); ofs += 2; break; + case DASM_SPACE: p++; ofs += n; break; + case DASM_SETLABEL: b[pos-2] = -0x40000000; break; /* Neg. label ofs. */ + case DASM_VREG: CK((n&-8) == 0 && (n != 4 || (*p&1) == 0), RANGE_VREG); + if (*p++ == 1 && *p == DASM_DISP) mrm = n; continue; + } + mrm = 4; + } else { + int *pl, n; + switch (action) { + case DASM_REL_LG: + case DASM_IMM_LG: + n = *p++; pl = D->lglabels + n; + /* Bkwd rel or global. */ + if (n <= 246) { CK(n>=10||*pl<0, RANGE_LG); CKPL(lg, LG); goto putrel; } + pl -= 246; n = *pl; + if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */ + goto linkrel; + case DASM_REL_PC: + case DASM_IMM_PC: pl = D->pclabels + va_arg(ap, int); CKPL(pc, PC); + putrel: + n = *pl; + if (n < 0) { /* Label exists. Get label pos and store it. */ + b[pos] = -n; + } else { + linkrel: + b[pos] = n; /* Else link to rel chain, anchored at label. */ + *pl = pos; + } + pos++; + ofs += 4; /* Maximum offset needed. */ + if (action == DASM_REL_LG || action == DASM_REL_PC) + b[pos++] = ofs; /* Store pass1 offset estimate. */ + break; + case DASM_LABEL_LG: pl = D->lglabels + *p++; CKPL(lg, LG); goto putlabel; + case DASM_LABEL_PC: pl = D->pclabels + va_arg(ap, int); CKPL(pc, PC); + putlabel: + n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */ + while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos; } + *pl = -pos; /* Label exists now. */ + b[pos++] = ofs; /* Store pass1 offset estimate. */ + break; + case DASM_ALIGN: + ofs += *p++; /* Maximum alignment needed (arg is 2**n-1). */ + b[pos++] = ofs; /* Store pass1 offset estimate. */ + break; + case DASM_EXTERN: p += 2; ofs += 4; break; + case DASM_ESC: p++; ofs++; break; + case DASM_MARK: mrm = p[-2]; break; + case DASM_SECTION: + n = *p; CK(n < D->maxsection, RANGE_SEC); D->section = &D->sections[n]; + case DASM_STOP: goto stop; + } + } + } +stop: + va_end(ap); + sec->pos = pos; + sec->ofs = ofs; +} +#undef CK + +/* Pass 2: Link sections, shrink branches/aligns, fix label offsets. */ +int dasm_link(Dst_DECL, size_t *szp) +{ + dasm_State *D = Dst_REF; + int secnum; + int ofs = 0; + +#ifdef DASM_CHECKS + *szp = 0; + if (D->status != DASM_S_OK) return D->status; + { + int pc; + for (pc = 0; pc*sizeof(int) < D->pcsize; pc++) + if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc; + } +#endif + + { /* Handle globals not defined in this translation unit. */ + int idx; + for (idx = 10; idx*sizeof(int) < D->lgsize; idx++) { + int n = D->lglabels[idx]; + /* Undefined label: Collapse rel chain and replace with marker (< 0). */ + while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; } + } + } + + /* Combine all code sections. No support for data sections (yet). */ + for (secnum = 0; secnum < D->maxsection; secnum++) { + dasm_Section *sec = D->sections + secnum; + int *b = sec->rbuf; + int pos = DASM_SEC2POS(secnum); + int lastpos = sec->pos; + + while (pos != lastpos) { + dasm_ActList p = D->actionlist + b[pos++]; + while (1) { + int op, action = *p++; + switch (action) { + case DASM_REL_LG: p++; op = p[-3]; goto rel_pc; + case DASM_REL_PC: op = p[-2]; rel_pc: { + int shrink = op == 0xe9 ? 3 : ((op&0xf0) == 0x80 ? 4 : 0); + if (shrink) { /* Shrinkable branch opcode? */ + int lofs, lpos = b[pos]; + if (lpos < 0) goto noshrink; /* Ext global? */ + lofs = *DASM_POS2PTR(D, lpos); + if (lpos > pos) { /* Fwd label: add cumulative section offsets. */ + int i; + for (i = secnum; i < DASM_POS2SEC(lpos); i++) + lofs += D->sections[i].ofs; + } else { + lofs -= ofs; /* Bkwd label: unfix offset. */ + } + lofs -= b[pos+1]; /* Short branch ok? */ + if (lofs >= -128-shrink && lofs <= 127) ofs -= shrink; /* Yes. */ + else { noshrink: shrink = 0; } /* No, cannot shrink op. */ + } + b[pos+1] = shrink; + pos += 2; + break; + } + case DASM_SPACE: case DASM_IMM_LG: case DASM_VREG: p++; + case DASM_DISP: case DASM_IMM_S: case DASM_IMM_B: case DASM_IMM_W: + case DASM_IMM_D: case DASM_IMM_WB: case DASM_IMM_DB: + case DASM_SETLABEL: case DASM_REL_A: case DASM_IMM_PC: pos++; break; + case DASM_LABEL_LG: p++; + case DASM_LABEL_PC: b[pos++] += ofs; break; /* Fix label offset. */ + case DASM_ALIGN: ofs -= (b[pos++]+ofs)&*p++; break; /* Adjust ofs. */ + case DASM_EXTERN: p += 2; break; + case DASM_ESC: p++; break; + case DASM_MARK: break; + case DASM_SECTION: case DASM_STOP: goto stop; + } + } + stop: (void)0; + } + ofs += sec->ofs; /* Next section starts right after current section. */ + } + + D->codesize = ofs; /* Total size of all code sections */ + *szp = ofs; + return DASM_S_OK; +} + +#define dasmb(x) *cp++ = (unsigned char)(x) +#ifndef DASM_ALIGNED_WRITES +#define dasmw(x) \ + do { *((unsigned short *)cp) = (unsigned short)(x); cp+=2; } while (0) +#define dasmd(x) \ + do { *((unsigned int *)cp) = (unsigned int)(x); cp+=4; } while (0) +#else +#define dasmw(x) do { dasmb(x); dasmb((x)>>8); } while (0) +#define dasmd(x) do { dasmw(x); dasmw((x)>>16); } while (0) +#endif + +/* Pass 3: Encode sections. */ +int dasm_encode(Dst_DECL, void *buffer) +{ + dasm_State *D = Dst_REF; + unsigned char *base = (unsigned char *)buffer; + unsigned char *cp = base; + int secnum; + + /* Encode all code sections. No support for data sections (yet). */ + for (secnum = 0; secnum < D->maxsection; secnum++) { + dasm_Section *sec = D->sections + secnum; + int *b = sec->buf; + int *endb = sec->rbuf + sec->pos; + + while (b != endb) { + dasm_ActList p = D->actionlist + *b++; + unsigned char *mark = NULL; + while (1) { + int action = *p++; + int n = (action >= DASM_DISP && action <= DASM_ALIGN) ? *b++ : 0; + switch (action) { + case DASM_DISP: if (!mark) mark = cp; { + unsigned char *mm = mark; + if (*p != DASM_IMM_DB && *p != DASM_IMM_WB) mark = NULL; + if (n == 0) { int mrm = mm[-1]&7; if (mrm == 4) mrm = mm[0]&7; + if (mrm != 5) { mm[-1] -= 0x80; break; } } + if (((n+128) & -256) != 0) goto wd; else mm[-1] -= 0x40; + } + case DASM_IMM_S: case DASM_IMM_B: wb: dasmb(n); break; + case DASM_IMM_DB: if (((n+128)&-256) == 0) { + db: if (!mark) mark = cp; mark[-2] += 2; mark = NULL; goto wb; + } else mark = NULL; + case DASM_IMM_D: wd: dasmd(n); break; + case DASM_IMM_WB: if (((n+128)&-256) == 0) goto db; else mark = NULL; + case DASM_IMM_W: dasmw(n); break; + case DASM_VREG: { int t = *p++; if (t >= 2) n<<=3; cp[-1] |= n; break; } + case DASM_REL_LG: p++; if (n >= 0) goto rel_pc; + b++; n = (int)(ptrdiff_t)D->globals[-n]; + case DASM_REL_A: rel_a: n -= (int)(ptrdiff_t)(cp+4); goto wd; /* !x64 */ + case DASM_REL_PC: rel_pc: { + int shrink = *b++; + int *pb = DASM_POS2PTR(D, n); if (*pb < 0) { n = pb[1]; goto rel_a; } + n = *pb - ((int)(cp-base) + 4-shrink); + if (shrink == 0) goto wd; + if (shrink == 4) { cp--; cp[-1] = *cp-0x10; } else cp[-1] = 0xeb; + goto wb; + } + case DASM_IMM_LG: + p++; if (n < 0) { n = (int)(ptrdiff_t)D->globals[-n]; goto wd; } + case DASM_IMM_PC: { + int *pb = DASM_POS2PTR(D, n); + n = *pb < 0 ? pb[1] : (*pb + (int)(ptrdiff_t)base); + goto wd; + } + case DASM_LABEL_LG: { + int idx = *p++; + if (idx >= 10) + D->globals[idx] = (void *)(base + (*p == DASM_SETLABEL ? *b : n)); + break; + } + case DASM_LABEL_PC: case DASM_SETLABEL: break; + case DASM_SPACE: { int fill = *p++; while (n--) *cp++ = fill; break; } + case DASM_ALIGN: + n = *p++; + while (((cp-base) & n)) *cp++ = 0x90; /* nop */ + break; + case DASM_EXTERN: n = DASM_EXTERN(Dst, cp, p[1], *p); p += 2; goto wd; + case DASM_MARK: mark = cp; break; + case DASM_ESC: action = *p++; + default: *cp++ = action; break; + case DASM_SECTION: case DASM_STOP: goto stop; + } + } + stop: (void)0; + } + } + + if (base + D->codesize != cp) /* Check for phase errors. */ + return DASM_S_PHASE; + return DASM_S_OK; +} + +/* Get PC label offset. */ +int dasm_getpclabel(Dst_DECL, unsigned int pc) +{ + dasm_State *D = Dst_REF; + if (pc*sizeof(int) < D->pcsize) { + int pos = D->pclabels[pc]; + if (pos < 0) return *DASM_POS2PTR(D, -pos); + if (pos > 0) return -1; /* Undefined. */ + } + return -2; /* Unused or out of range. */ +} + +#ifdef DASM_CHECKS +/* Optional sanity checker to call between isolated encoding steps. */ +int dasm_checkstep(Dst_DECL, int secmatch) +{ + dasm_State *D = Dst_REF; + if (D->status == DASM_S_OK) { + int i; + for (i = 1; i <= 9; i++) { + if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_L|i; break; } + D->lglabels[i] = 0; + } + } + if (D->status == DASM_S_OK && secmatch >= 0 && + D->section != &D->sections[secmatch]) + D->status = DASM_S_MATCH_SEC|(int)(D->section-D->sections); + return D->status; +} +#endif + diff --git a/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_x86.lua b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_x86.lua new file mode 100644 index 00000000000..7ca061d22f6 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/dynasm/dasm_x86.lua @@ -0,0 +1,1945 @@ +------------------------------------------------------------------------------ +-- DynASM x86/x64 module. +-- +-- Copyright (C) 2005-2015 Mike Pall. All rights reserved. +-- See dynasm.lua for full copyright notice. +------------------------------------------------------------------------------ + +local x64 = x64 + +-- Module information: +local _info = { + arch = x64 and "x64" or "x86", + description = "DynASM x86/x64 module", + version = "1.3.0", + vernum = 10300, + release = "2011-05-05", + author = "Mike Pall", + license = "MIT", +} + +-- Exported glue functions for the arch-specific module. +local _M = { _info = _info } + +-- Cache library functions. +local type, tonumber, pairs, ipairs = type, tonumber, pairs, ipairs +local assert, unpack, setmetatable = assert, unpack or table.unpack, setmetatable +local _s = string +local sub, format, byte, char = _s.sub, _s.format, _s.byte, _s.char +local find, match, gmatch, gsub = _s.find, _s.match, _s.gmatch, _s.gsub +local concat, sort = table.concat, table.sort +local bit = bit or require("bit") +local band, shl, shr = bit.band, bit.lshift, bit.rshift + +-- Inherited tables and callbacks. +local g_opt, g_arch +local wline, werror, wfatal, wwarn + +-- Action name list. +-- CHECK: Keep this in sync with the C code! +local action_names = { + -- int arg, 1 buffer pos: + "DISP", "IMM_S", "IMM_B", "IMM_W", "IMM_D", "IMM_WB", "IMM_DB", + -- action arg (1 byte), int arg, 1 buffer pos (reg/num): + "VREG", "SPACE", -- !x64: VREG support NYI. + -- ptrdiff_t arg, 1 buffer pos (address): !x64 + "SETLABEL", "REL_A", + -- action arg (1 byte) or int arg, 2 buffer pos (link, offset): + "REL_LG", "REL_PC", + -- action arg (1 byte) or int arg, 1 buffer pos (link): + "IMM_LG", "IMM_PC", + -- action arg (1 byte) or int arg, 1 buffer pos (offset): + "LABEL_LG", "LABEL_PC", + -- action arg (1 byte), 1 buffer pos (offset): + "ALIGN", + -- action args (2 bytes), no buffer pos. + "EXTERN", + -- action arg (1 byte), no buffer pos. + "ESC", + -- no action arg, no buffer pos. + "MARK", + -- action arg (1 byte), no buffer pos, terminal action: + "SECTION", + -- no args, no buffer pos, terminal action: + "STOP" +} + +-- Maximum number of section buffer positions for dasm_put(). +-- CHECK: Keep this in sync with the C code! +local maxsecpos = 25 -- Keep this low, to avoid excessively long C lines. + +-- Action name -> action number (dynamically generated below). +local map_action = {} +-- First action number. Everything below does not need to be escaped. +local actfirst = 256-#action_names + +-- Action list buffer and string (only used to remove dupes). +local actlist = {} +local actstr = "" + +-- Argument list for next dasm_put(). Start with offset 0 into action list. +local actargs = { 0 } + +-- Current number of section buffer positions for dasm_put(). +local secpos = 1 + +------------------------------------------------------------------------------ + +-- Compute action numbers for action names. +for n,name in ipairs(action_names) do + local num = actfirst + n - 1 + map_action[name] = num +end + +-- Dump action names and numbers. +local function dumpactions(out) + out:write("DynASM encoding engine action codes:\n") + for n,name in ipairs(action_names) do + local num = map_action[name] + out:write(format(" %-10s %02X %d\n", name, num, num)) + end + out:write("\n") +end + +-- Write action list buffer as a huge static C array. +local function writeactions(out, name) + local nn = #actlist + local last = actlist[nn] or 255 + actlist[nn] = nil -- Remove last byte. + if nn == 0 then nn = 1 end + out:write("static const unsigned char ", name, "[", nn, "] = {\n") + local s = " " + for n,b in ipairs(actlist) do + s = s..b.."," + if #s >= 75 then + assert(out:write(s, "\n")) + s = " " + end + end + out:write(s, last, "\n};\n\n") -- Add last byte back. +end + +------------------------------------------------------------------------------ + +-- Add byte to action list. +local function wputxb(n) + assert(n >= 0 and n <= 255 and n % 1 == 0, "byte out of range") + actlist[#actlist+1] = n +end + +-- Add action to list with optional arg. Advance buffer pos, too. +local function waction(action, a, num) + wputxb(assert(map_action[action], "bad action name `"..action.."'")) + if a then actargs[#actargs+1] = a end + if a or num then secpos = secpos + (num or 1) end +end + +-- Add call to embedded DynASM C code. +local function wcall(func, args) + wline(format("dasm_%s(Dst, %s);", func, concat(args, ", ")), true) +end + +-- Delete duplicate action list chunks. A tad slow, but so what. +local function dedupechunk(offset) + local al, as = actlist, actstr + local chunk = char(unpack(al, offset+1, #al)) + local orig = find(as, chunk, 1, true) + if orig then + actargs[1] = orig-1 -- Replace with original offset. + for i=offset+1,#al do al[i] = nil end -- Kill dupe. + else + actstr = as..chunk + end +end + +-- Flush action list (intervening C code or buffer pos overflow). +local function wflush(term) + local offset = actargs[1] + if #actlist == offset then return end -- Nothing to flush. + if not term then waction("STOP") end -- Terminate action list. + dedupechunk(offset) + wcall("put", actargs) -- Add call to dasm_put(). + actargs = { #actlist } -- Actionlist offset is 1st arg to next dasm_put(). + secpos = 1 -- The actionlist offset occupies a buffer position, too. +end + +-- Put escaped byte. +local function wputb(n) + if n >= actfirst then waction("ESC") end -- Need to escape byte. + wputxb(n) +end + +------------------------------------------------------------------------------ + +-- Global label name -> global label number. With auto assignment on 1st use. +local next_global = 10 +local map_global = setmetatable({}, { __index = function(t, name) + if not match(name, "^[%a_][%w_@]*$") then werror("bad global label") end + local n = next_global + if n > 246 then werror("too many global labels") end + next_global = n + 1 + t[name] = n + return n +end}) + +-- Dump global labels. +local function dumpglobals(out, lvl) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("Global labels:\n") + for i=10,next_global-1 do + out:write(format(" %s\n", t[i])) + end + out:write("\n") +end + +-- Write global label enum. +local function writeglobals(out, prefix) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("enum {\n") + for i=10,next_global-1 do + out:write(" ", prefix, gsub(t[i], "@.*", ""), ",\n") + end + out:write(" ", prefix, "_MAX\n};\n") +end + +-- Write global label names. +local function writeglobalnames(out, name) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("static const char *const ", name, "[] = {\n") + for i=10,next_global-1 do + out:write(" \"", t[i], "\",\n") + end + out:write(" (const char *)0\n};\n") +end + +------------------------------------------------------------------------------ + +-- Extern label name -> extern label number. With auto assignment on 1st use. +local next_extern = -1 +local map_extern = setmetatable({}, { __index = function(t, name) + -- No restrictions on the name for now. + local n = next_extern + if n < -256 then werror("too many extern labels") end + next_extern = n - 1 + t[name] = n + return n +end}) + +-- Dump extern labels. +local function dumpexterns(out, lvl) + local t = {} + for name, n in pairs(map_extern) do t[-n] = name end + out:write("Extern labels:\n") + for i=1,-next_extern-1 do + out:write(format(" %s\n", t[i])) + end + out:write("\n") +end + +-- Write extern label names. +local function writeexternnames(out, name) + local t = {} + for name, n in pairs(map_extern) do t[-n] = name end + out:write("static const char *const ", name, "[] = {\n") + for i=1,-next_extern-1 do + out:write(" \"", t[i], "\",\n") + end + out:write(" (const char *)0\n};\n") +end + +------------------------------------------------------------------------------ + +-- Arch-specific maps. +local map_archdef = {} -- Ext. register name -> int. name. +local map_reg_rev = {} -- Int. register name -> ext. name. +local map_reg_num = {} -- Int. register name -> register number. +local map_reg_opsize = {} -- Int. register name -> operand size. +local map_reg_valid_base = {} -- Int. register name -> valid base register? +local map_reg_valid_index = {} -- Int. register name -> valid index register? +local map_reg_needrex = {} -- Int. register name -> need rex vs. no rex. +local reg_list = {} -- Canonical list of int. register names. + +local map_type = {} -- Type name -> { ctype, reg } +local ctypenum = 0 -- Type number (for _PTx macros). + +local addrsize = x64 and "q" or "d" -- Size for address operands. + +-- Helper functions to fill register maps. +local function mkrmap(sz, cl, names) + local cname = format("@%s", sz) + reg_list[#reg_list+1] = cname + map_archdef[cl] = cname + map_reg_rev[cname] = cl + map_reg_num[cname] = -1 + map_reg_opsize[cname] = sz + if sz == addrsize or sz == "d" then + map_reg_valid_base[cname] = true + map_reg_valid_index[cname] = true + end + if names then + for n,name in ipairs(names) do + local iname = format("@%s%x", sz, n-1) + reg_list[#reg_list+1] = iname + map_archdef[name] = iname + map_reg_rev[iname] = name + map_reg_num[iname] = n-1 + map_reg_opsize[iname] = sz + if sz == "b" and n > 4 then map_reg_needrex[iname] = false end + if sz == addrsize or sz == "d" then + map_reg_valid_base[iname] = true + map_reg_valid_index[iname] = true + end + end + end + for i=0,(x64 and sz ~= "f") and 15 or 7 do + local needrex = sz == "b" and i > 3 + local iname = format("@%s%x%s", sz, i, needrex and "R" or "") + if needrex then map_reg_needrex[iname] = true end + local name + if sz == "o" then name = format("xmm%d", i) + elseif sz == "f" then name = format("st%d", i) + else name = format("r%d%s", i, sz == addrsize and "" or sz) end + map_archdef[name] = iname + if not map_reg_rev[iname] then + reg_list[#reg_list+1] = iname + map_reg_rev[iname] = name + map_reg_num[iname] = i + map_reg_opsize[iname] = sz + if sz == addrsize or sz == "d" then + map_reg_valid_base[iname] = true + map_reg_valid_index[iname] = true + end + end + end + reg_list[#reg_list+1] = "" +end + +-- Integer registers (qword, dword, word and byte sized). +if x64 then + mkrmap("q", "Rq", {"rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi"}) +end +mkrmap("d", "Rd", {"eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"}) +mkrmap("w", "Rw", {"ax", "cx", "dx", "bx", "sp", "bp", "si", "di"}) +mkrmap("b", "Rb", {"al", "cl", "dl", "bl", "ah", "ch", "dh", "bh"}) +map_reg_valid_index[map_archdef.esp] = false +if x64 then map_reg_valid_index[map_archdef.rsp] = false end +map_archdef["Ra"] = "@"..addrsize + +-- FP registers (internally tword sized, but use "f" as operand size). +mkrmap("f", "Rf") + +-- SSE registers (oword sized, but qword and dword accessible). +mkrmap("o", "xmm") + +-- Operand size prefixes to codes. +local map_opsize = { + byte = "b", word = "w", dword = "d", qword = "q", oword = "o", tword = "t", + aword = addrsize, +} + +-- Operand size code to number. +local map_opsizenum = { + b = 1, w = 2, d = 4, q = 8, o = 16, t = 10, +} + +-- Operand size code to name. +local map_opsizename = { + b = "byte", w = "word", d = "dword", q = "qword", o = "oword", t = "tword", + f = "fpword", +} + +-- Valid index register scale factors. +local map_xsc = { + ["1"] = 0, ["2"] = 1, ["4"] = 2, ["8"] = 3, +} + +-- Condition codes. +local map_cc = { + o = 0, no = 1, b = 2, nb = 3, e = 4, ne = 5, be = 6, nbe = 7, + s = 8, ns = 9, p = 10, np = 11, l = 12, nl = 13, le = 14, nle = 15, + c = 2, nae = 2, nc = 3, ae = 3, z = 4, nz = 5, na = 6, a = 7, + pe = 10, po = 11, nge = 12, ge = 13, ng = 14, g = 15, +} + + +-- Reverse defines for registers. +function _M.revdef(s) + return gsub(s, "@%w+", map_reg_rev) +end + +-- Dump register names and numbers +local function dumpregs(out) + out:write("Register names, sizes and internal numbers:\n") + for _,reg in ipairs(reg_list) do + if reg == "" then + out:write("\n") + else + local name = map_reg_rev[reg] + local num = map_reg_num[reg] + local opsize = map_opsizename[map_reg_opsize[reg]] + out:write(format(" %-5s %-8s %s\n", name, opsize, + num < 0 and "(variable)" or num)) + end + end +end + +------------------------------------------------------------------------------ + +-- Put action for label arg (IMM_LG, IMM_PC, REL_LG, REL_PC). +local function wputlabel(aprefix, imm, num) + if type(imm) == "number" then + if imm < 0 then + waction("EXTERN") + wputxb(aprefix == "IMM_" and 0 or 1) + imm = -imm-1 + else + waction(aprefix.."LG", nil, num); + end + wputxb(imm) + else + waction(aprefix.."PC", imm, num) + end +end + +-- Put signed byte or arg. +local function wputsbarg(n) + if type(n) == "number" then + if n < -128 or n > 127 then + werror("signed immediate byte out of range") + end + if n < 0 then n = n + 256 end + wputb(n) + else waction("IMM_S", n) end +end + +-- Put unsigned byte or arg. +local function wputbarg(n) + if type(n) == "number" then + if n < 0 or n > 255 then + werror("unsigned immediate byte out of range") + end + wputb(n) + else waction("IMM_B", n) end +end + +-- Put unsigned word or arg. +local function wputwarg(n) + if type(n) == "number" then + if shr(n, 16) ~= 0 then + werror("unsigned immediate word out of range") + end + wputb(band(n, 255)); wputb(shr(n, 8)); + else waction("IMM_W", n) end +end + +-- Put signed or unsigned dword or arg. +local function wputdarg(n) + local tn = type(n) + if tn == "number" then + wputb(band(n, 255)) + wputb(band(shr(n, 8), 255)) + wputb(band(shr(n, 16), 255)) + wputb(shr(n, 24)) + elseif tn == "table" then + wputlabel("IMM_", n[1], 1) + else + waction("IMM_D", n) + end +end + +-- Put operand-size dependent number or arg (defaults to dword). +local function wputszarg(sz, n) + if not sz or sz == "d" or sz == "q" then wputdarg(n) + elseif sz == "w" then wputwarg(n) + elseif sz == "b" then wputbarg(n) + elseif sz == "s" then wputsbarg(n) + else werror("bad operand size") end +end + +-- Put multi-byte opcode with operand-size dependent modifications. +local function wputop(sz, op, rex) + local r + if rex ~= 0 and not x64 then werror("bad operand size") end + if sz == "w" then wputb(102) end + -- Needs >32 bit numbers, but only for crc32 eax, word [ebx] + if op >= 4294967296 then r = op%4294967296 wputb((op-r)/4294967296) op = r end + if op >= 16777216 then wputb(shr(op, 24)); op = band(op, 0xffffff) end + if op >= 65536 then + if rex ~= 0 then + local opc3 = band(op, 0xffff00) + if opc3 == 0x0f3a00 or opc3 == 0x0f3800 then + wputb(64 + band(rex, 15)); rex = 0 + end + end + wputb(shr(op, 16)); op = band(op, 0xffff) + end + if op >= 256 then + local b = shr(op, 8) + if b == 15 and rex ~= 0 then wputb(64 + band(rex, 15)); rex = 0 end + wputb(b) + op = band(op, 255) + end + if rex ~= 0 then wputb(64 + band(rex, 15)) end + if sz == "b" then op = op - 1 end + wputb(op) +end + +-- Put ModRM or SIB formatted byte. +local function wputmodrm(m, s, rm, vs, vrm) + assert(m < 4 and s < 16 and rm < 16, "bad modrm operands") + wputb(shl(m, 6) + shl(band(s, 7), 3) + band(rm, 7)) +end + +-- Put ModRM/SIB plus optional displacement. +local function wputmrmsib(t, imark, s, vsreg) + local vreg, vxreg + local reg, xreg = t.reg, t.xreg + if reg and reg < 0 then reg = 0; vreg = t.vreg end + if xreg and xreg < 0 then xreg = 0; vxreg = t.vxreg end + if s < 0 then s = 0 end + + -- Register mode. + if sub(t.mode, 1, 1) == "r" then + wputmodrm(3, s, reg) + if vsreg then waction("VREG", vsreg); wputxb(2) end + if vreg then waction("VREG", vreg); wputxb(0) end + return + end + + local disp = t.disp + local tdisp = type(disp) + -- No base register? + if not reg then + local riprel = false + if xreg then + -- Indexed mode with index register only. + -- [xreg*xsc+disp] -> (0, s, esp) (xsc, xreg, ebp) + wputmodrm(0, s, 4) + if imark == "I" then waction("MARK") end + if vsreg then waction("VREG", vsreg); wputxb(2) end + wputmodrm(t.xsc, xreg, 5) + if vxreg then waction("VREG", vxreg); wputxb(3) end + else + -- Pure 32 bit displacement. + if x64 and tdisp ~= "table" then + wputmodrm(0, s, 4) -- [disp] -> (0, s, esp) (0, esp, ebp) + if imark == "I" then waction("MARK") end + wputmodrm(0, 4, 5) + else + riprel = x64 + wputmodrm(0, s, 5) -- [disp|rip-label] -> (0, s, ebp) + if imark == "I" then waction("MARK") end + end + if vsreg then waction("VREG", vsreg); wputxb(2) end + end + if riprel then -- Emit rip-relative displacement. + if match("UWSiI", imark) then + werror("NYI: rip-relative displacement followed by immediate") + end + -- The previous byte in the action buffer cannot be 0xe9 or 0x80-0x8f. + wputlabel("REL_", disp[1], 2) + else + wputdarg(disp) + end + return + end + + local m + if tdisp == "number" then -- Check displacement size at assembly time. + if disp == 0 and band(reg, 7) ~= 5 then -- [ebp] -> [ebp+0] (in SIB, too) + if not vreg then m = 0 end -- Force DISP to allow [Rd(5)] -> [ebp+0] + elseif disp >= -128 and disp <= 127 then m = 1 + else m = 2 end + elseif tdisp == "table" then + m = 2 + end + + -- Index register present or esp as base register: need SIB encoding. + if xreg or band(reg, 7) == 4 then + wputmodrm(m or 2, s, 4) -- ModRM. + if m == nil or imark == "I" then waction("MARK") end + if vsreg then waction("VREG", vsreg); wputxb(2) end + wputmodrm(t.xsc or 0, xreg or 4, reg) -- SIB. + if vxreg then waction("VREG", vxreg); wputxb(3) end + if vreg then waction("VREG", vreg); wputxb(1) end + else + wputmodrm(m or 2, s, reg) -- ModRM. + if (imark == "I" and (m == 1 or m == 2)) or + (m == nil and (vsreg or vreg)) then waction("MARK") end + if vsreg then waction("VREG", vsreg); wputxb(2) end + if vreg then waction("VREG", vreg); wputxb(1) end + end + + -- Put displacement. + if m == 1 then wputsbarg(disp) + elseif m == 2 then wputdarg(disp) + elseif m == nil then waction("DISP", disp) end +end + +------------------------------------------------------------------------------ + +-- Return human-readable operand mode string. +local function opmodestr(op, args) + local m = {} + for i=1,#args do + local a = args[i] + m[#m+1] = sub(a.mode, 1, 1)..(a.opsize or "?") + end + return op.." "..concat(m, ",") +end + +-- Convert number to valid integer or nil. +local function toint(expr) + local n = tonumber(expr) + if n then + if n % 1 ~= 0 or n < -2147483648 or n > 4294967295 then + werror("bad integer number `"..expr.."'") + end + return n + end +end + +-- Parse immediate expression. +local function immexpr(expr) + -- &expr (pointer) + if sub(expr, 1, 1) == "&" then + return "iPJ", format("(ptrdiff_t)(%s)", sub(expr,2)) + end + + local prefix = sub(expr, 1, 2) + -- =>expr (pc label reference) + if prefix == "=>" then + return "iJ", sub(expr, 3) + end + -- ->name (global label reference) + if prefix == "->" then + return "iJ", map_global[sub(expr, 3)] + end + + -- [<>][1-9] (local label reference) + local dir, lnum = match(expr, "^([<>])([1-9])$") + if dir then -- Fwd: 247-255, Bkwd: 1-9. + return "iJ", lnum + (dir == ">" and 246 or 0) + end + + local extname = match(expr, "^extern%s+(%S+)$") + if extname then + return "iJ", map_extern[extname] + end + + -- expr (interpreted as immediate) + return "iI", expr +end + +-- Parse displacement expression: +-num, +-expr, +-opsize*num +local function dispexpr(expr) + local disp = expr == "" and 0 or toint(expr) + if disp then return disp end + local c, dispt = match(expr, "^([+-])%s*(.+)$") + if c == "+" then + expr = dispt + elseif not c then + werror("bad displacement expression `"..expr.."'") + end + local opsize, tailops = match(dispt, "^(%w+)%s*%*%s*(.+)$") + local ops, imm = map_opsize[opsize], toint(tailops) + if ops and imm then + if c == "-" then imm = -imm end + return imm*map_opsizenum[ops] + end + local mode, iexpr = immexpr(dispt) + if mode == "iJ" then + if c == "-" then werror("cannot invert label reference") end + return { iexpr } + end + return expr -- Need to return original signed expression. +end + +-- Parse register or type expression. +local function rtexpr(expr) + if not expr then return end + local tname, ovreg = match(expr, "^([%w_]+):(@[%w_]+)$") + local tp = map_type[tname or expr] + if tp then + local reg = ovreg or tp.reg + local rnum = map_reg_num[reg] + if not rnum then + werror("type `"..(tname or expr).."' needs a register override") + end + if not map_reg_valid_base[reg] then + werror("bad base register override `"..(map_reg_rev[reg] or reg).."'") + end + return reg, rnum, tp + end + return expr, map_reg_num[expr] +end + +-- Parse operand and return { mode, opsize, reg, xreg, xsc, disp, imm }. +local function parseoperand(param) + local t = {} + + local expr = param + local opsize, tailops = match(param, "^(%w+)%s*(.+)$") + if opsize then + t.opsize = map_opsize[opsize] + if t.opsize then expr = tailops end + end + + local br = match(expr, "^%[%s*(.-)%s*%]$") + repeat + if br then + t.mode = "xm" + + -- [disp] + t.disp = toint(br) + if t.disp then + t.mode = x64 and "xm" or "xmO" + break + end + + -- [reg...] + local tp + local reg, tailr = match(br, "^([@%w_:]+)%s*(.*)$") + reg, t.reg, tp = rtexpr(reg) + if not t.reg then + -- [expr] + t.mode = x64 and "xm" or "xmO" + t.disp = dispexpr("+"..br) + break + end + + if t.reg == -1 then + t.vreg, tailr = match(tailr, "^(%b())(.*)$") + if not t.vreg then werror("bad variable register expression") end + end + + -- [xreg*xsc] or [xreg*xsc+-disp] or [xreg*xsc+-expr] + local xsc, tailsc = match(tailr, "^%*%s*([1248])%s*(.*)$") + if xsc then + if not map_reg_valid_index[reg] then + werror("bad index register `"..map_reg_rev[reg].."'") + end + t.xsc = map_xsc[xsc] + t.xreg = t.reg + t.vxreg = t.vreg + t.reg = nil + t.vreg = nil + t.disp = dispexpr(tailsc) + break + end + if not map_reg_valid_base[reg] then + werror("bad base register `"..map_reg_rev[reg].."'") + end + + -- [reg] or [reg+-disp] + t.disp = toint(tailr) or (tailr == "" and 0) + if t.disp then break end + + -- [reg+xreg...] + local xreg, tailx = match(tailr, "^+%s*([@%w_:]+)%s*(.*)$") + xreg, t.xreg, tp = rtexpr(xreg) + if not t.xreg then + -- [reg+-expr] + t.disp = dispexpr(tailr) + break + end + if not map_reg_valid_index[xreg] then + werror("bad index register `"..map_reg_rev[xreg].."'") + end + + if t.xreg == -1 then + t.vxreg, tailx = match(tailx, "^(%b())(.*)$") + if not t.vxreg then werror("bad variable register expression") end + end + + -- [reg+xreg*xsc...] + local xsc, tailsc = match(tailx, "^%*%s*([1248])%s*(.*)$") + if xsc then + t.xsc = map_xsc[xsc] + tailx = tailsc + end + + -- [...] or [...+-disp] or [...+-expr] + t.disp = dispexpr(tailx) + else + -- imm or opsize*imm + local imm = toint(expr) + if not imm and sub(expr, 1, 1) == "*" and t.opsize then + imm = toint(sub(expr, 2)) + if imm then + imm = imm * map_opsizenum[t.opsize] + t.opsize = nil + end + end + if imm then + if t.opsize then werror("bad operand size override") end + local m = "i" + if imm == 1 then m = m.."1" end + if imm >= 4294967168 and imm <= 4294967295 then imm = imm-4294967296 end + if imm >= -128 and imm <= 127 then m = m.."S" end + t.imm = imm + t.mode = m + break + end + + local tp + local reg, tailr = match(expr, "^([@%w_:]+)%s*(.*)$") + reg, t.reg, tp = rtexpr(reg) + if t.reg then + if t.reg == -1 then + t.vreg, tailr = match(tailr, "^(%b())(.*)$") + if not t.vreg then werror("bad variable register expression") end + end + -- reg + if tailr == "" then + if t.opsize then werror("bad operand size override") end + t.opsize = map_reg_opsize[reg] + if t.opsize == "f" then + t.mode = t.reg == 0 and "fF" or "f" + else + if reg == "@w4" or (x64 and reg == "@d4") then + wwarn("bad idea, try again with `"..(x64 and "rsp'" or "esp'")) + end + t.mode = t.reg == 0 and "rmR" or (reg == "@b1" and "rmC" or "rm") + end + t.needrex = map_reg_needrex[reg] + break + end + + -- type[idx], type[idx].field, type->field -> [reg+offset_expr] + if not tp then werror("bad operand `"..param.."'") end + t.mode = "xm" + t.disp = format(tp.ctypefmt, tailr) + else + t.mode, t.imm = immexpr(expr) + if sub(t.mode, -1) == "J" then + if t.opsize and t.opsize ~= addrsize then + werror("bad operand size override") + end + t.opsize = addrsize + end + end + end + until true + return t +end + +------------------------------------------------------------------------------ +-- x86 Template String Description +-- =============================== +-- +-- Each template string is a list of [match:]pattern pairs, +-- separated by "|". The first match wins. No match means a +-- bad or unsupported combination of operand modes or sizes. +-- +-- The match part and the ":" is omitted if the operation has +-- no operands. Otherwise the first N characters are matched +-- against the mode strings of each of the N operands. +-- +-- The mode string for each operand type is (see parseoperand()): +-- Integer register: "rm", +"R" for eax, ax, al, +"C" for cl +-- FP register: "f", +"F" for st0 +-- Index operand: "xm", +"O" for [disp] (pure offset) +-- Immediate: "i", +"S" for signed 8 bit, +"1" for 1, +-- +"I" for arg, +"P" for pointer +-- Any: +"J" for valid jump targets +-- +-- So a match character "m" (mixed) matches both an integer register +-- and an index operand (to be encoded with the ModRM/SIB scheme). +-- But "r" matches only a register and "x" only an index operand +-- (e.g. for FP memory access operations). +-- +-- The operand size match string starts right after the mode match +-- characters and ends before the ":". "dwb" or "qdwb" is assumed, if empty. +-- The effective data size of the operation is matched against this list. +-- +-- If only the regular "b", "w", "d", "q", "t" operand sizes are +-- present, then all operands must be the same size. Unspecified sizes +-- are ignored, but at least one operand must have a size or the pattern +-- won't match (use the "byte", "word", "dword", "qword", "tword" +-- operand size overrides. E.g.: mov dword [eax], 1). +-- +-- If the list has a "1" or "2" prefix, the operand size is taken +-- from the respective operand and any other operand sizes are ignored. +-- If the list contains only ".", all operand sizes are ignored. +-- If the list has a "/" prefix, the concatenated (mixed) operand sizes +-- are compared to the match. +-- +-- E.g. "rrdw" matches for either two dword registers or two word +-- registers. "Fx2dq" matches an st0 operand plus an index operand +-- pointing to a dword (float) or qword (double). +-- +-- Every character after the ":" is part of the pattern string: +-- Hex chars are accumulated to form the opcode (left to right). +-- "n" disables the standard opcode mods +-- (otherwise: -1 for "b", o16 prefix for "w", rex.w for "q") +-- "X" Force REX.W. +-- "r"/"R" adds the reg. number from the 1st/2nd operand to the opcode. +-- "m"/"M" generates ModRM/SIB from the 1st/2nd operand. +-- The spare 3 bits are either filled with the last hex digit or +-- the result from a previous "r"/"R". The opcode is restored. +-- +-- All of the following characters force a flush of the opcode: +-- "o"/"O" stores a pure 32 bit disp (offset) from the 1st/2nd operand. +-- "S" stores a signed 8 bit immediate from the last operand. +-- "U" stores an unsigned 8 bit immediate from the last operand. +-- "W" stores an unsigned 16 bit immediate from the last operand. +-- "i" stores an operand sized immediate from the last operand. +-- "I" dito, but generates an action code to optionally modify +-- the opcode (+2) for a signed 8 bit immediate. +-- "J" generates one of the REL action codes from the last operand. +-- +------------------------------------------------------------------------------ + +-- Template strings for x86 instructions. Ordered by first opcode byte. +-- Unimplemented opcodes (deliberate omissions) are marked with *. +local map_op = { + -- 00-05: add... + -- 06: *push es + -- 07: *pop es + -- 08-0D: or... + -- 0E: *push cs + -- 0F: two byte opcode prefix + -- 10-15: adc... + -- 16: *push ss + -- 17: *pop ss + -- 18-1D: sbb... + -- 1E: *push ds + -- 1F: *pop ds + -- 20-25: and... + es_0 = "26", + -- 27: *daa + -- 28-2D: sub... + cs_0 = "2E", + -- 2F: *das + -- 30-35: xor... + ss_0 = "36", + -- 37: *aaa + -- 38-3D: cmp... + ds_0 = "3E", + -- 3F: *aas + inc_1 = x64 and "m:FF0m" or "rdw:40r|m:FF0m", + dec_1 = x64 and "m:FF1m" or "rdw:48r|m:FF1m", + push_1 = (x64 and "rq:n50r|rw:50r|mq:nFF6m|mw:FF6m" or + "rdw:50r|mdw:FF6m").."|S.:6AS|ib:n6Ai|i.:68i", + pop_1 = x64 and "rq:n58r|rw:58r|mq:n8F0m|mw:8F0m" or "rdw:58r|mdw:8F0m", + -- 60: *pusha, *pushad, *pushaw + -- 61: *popa, *popad, *popaw + -- 62: *bound rdw,x + -- 63: x86: *arpl mw,rw + movsxd_2 = x64 and "rm/qd:63rM", + fs_0 = "64", + gs_0 = "65", + o16_0 = "66", + a16_0 = not x64 and "67" or nil, + a32_0 = x64 and "67", + -- 68: push idw + -- 69: imul rdw,mdw,idw + -- 6A: push ib + -- 6B: imul rdw,mdw,S + -- 6C: *insb + -- 6D: *insd, *insw + -- 6E: *outsb + -- 6F: *outsd, *outsw + -- 70-7F: jcc lb + -- 80: add... mb,i + -- 81: add... mdw,i + -- 82: *undefined + -- 83: add... mdw,S + test_2 = "mr:85Rm|rm:85rM|Ri:A9ri|mi:F70mi", + -- 86: xchg rb,mb + -- 87: xchg rdw,mdw + -- 88: mov mb,r + -- 89: mov mdw,r + -- 8A: mov r,mb + -- 8B: mov r,mdw + -- 8C: *mov mdw,seg + lea_2 = "rx1dq:8DrM", + -- 8E: *mov seg,mdw + -- 8F: pop mdw + nop_0 = "90", + xchg_2 = "Rrqdw:90R|rRqdw:90r|rm:87rM|mr:87Rm", + cbw_0 = "6698", + cwde_0 = "98", + cdqe_0 = "4898", + cwd_0 = "6699", + cdq_0 = "99", + cqo_0 = "4899", + -- 9A: *call iw:idw + wait_0 = "9B", + fwait_0 = "9B", + pushf_0 = "9C", + pushfd_0 = not x64 and "9C", + pushfq_0 = x64 and "9C", + popf_0 = "9D", + popfd_0 = not x64 and "9D", + popfq_0 = x64 and "9D", + sahf_0 = "9E", + lahf_0 = "9F", + mov_2 = "OR:A3o|RO:A1O|mr:89Rm|rm:8BrM|rib:nB0ri|ridw:B8ri|mi:C70mi", + movsb_0 = "A4", + movsw_0 = "66A5", + movsd_0 = "A5", + cmpsb_0 = "A6", + cmpsw_0 = "66A7", + cmpsd_0 = "A7", + -- A8: test Rb,i + -- A9: test Rdw,i + stosb_0 = "AA", + stosw_0 = "66AB", + stosd_0 = "AB", + lodsb_0 = "AC", + lodsw_0 = "66AD", + lodsd_0 = "AD", + scasb_0 = "AE", + scasw_0 = "66AF", + scasd_0 = "AF", + -- B0-B7: mov rb,i + -- B8-BF: mov rdw,i + -- C0: rol... mb,i + -- C1: rol... mdw,i + ret_1 = "i.:nC2W", + ret_0 = "C3", + -- C4: *les rdw,mq + -- C5: *lds rdw,mq + -- C6: mov mb,i + -- C7: mov mdw,i + -- C8: *enter iw,ib + leave_0 = "C9", + -- CA: *retf iw + -- CB: *retf + int3_0 = "CC", + int_1 = "i.:nCDU", + into_0 = "CE", + -- CF: *iret + -- D0: rol... mb,1 + -- D1: rol... mdw,1 + -- D2: rol... mb,cl + -- D3: rol... mb,cl + -- D4: *aam ib + -- D5: *aad ib + -- D6: *salc + -- D7: *xlat + -- D8-DF: floating point ops + -- E0: *loopne + -- E1: *loope + -- E2: *loop + -- E3: *jcxz, *jecxz + -- E4: *in Rb,ib + -- E5: *in Rdw,ib + -- E6: *out ib,Rb + -- E7: *out ib,Rdw + call_1 = x64 and "mq:nFF2m|J.:E8nJ" or "md:FF2m|J.:E8J", + jmp_1 = x64 and "mq:nFF4m|J.:E9nJ" or "md:FF4m|J.:E9J", -- short: EB + -- EA: *jmp iw:idw + -- EB: jmp ib + -- EC: *in Rb,dx + -- ED: *in Rdw,dx + -- EE: *out dx,Rb + -- EF: *out dx,Rdw + lock_0 = "F0", + int1_0 = "F1", + repne_0 = "F2", + repnz_0 = "F2", + rep_0 = "F3", + repe_0 = "F3", + repz_0 = "F3", + -- F4: *hlt + cmc_0 = "F5", + -- F6: test... mb,i; div... mb + -- F7: test... mdw,i; div... mdw + clc_0 = "F8", + stc_0 = "F9", + -- FA: *cli + cld_0 = "FC", + std_0 = "FD", + -- FE: inc... mb + -- FF: inc... mdw + + -- misc ops + not_1 = "m:F72m", + neg_1 = "m:F73m", + mul_1 = "m:F74m", + imul_1 = "m:F75m", + div_1 = "m:F76m", + idiv_1 = "m:F77m", + + imul_2 = "rmqdw:0FAFrM|rIqdw:69rmI|rSqdw:6BrmS|riqdw:69rmi", + imul_3 = "rmIqdw:69rMI|rmSqdw:6BrMS|rmiqdw:69rMi", + + movzx_2 = "rm/db:0FB6rM|rm/qb:|rm/wb:0FB6rM|rm/dw:0FB7rM|rm/qw:", + movsx_2 = "rm/db:0FBErM|rm/qb:|rm/wb:0FBErM|rm/dw:0FBFrM|rm/qw:", + + bswap_1 = "rqd:0FC8r", + bsf_2 = "rmqdw:0FBCrM", + bsr_2 = "rmqdw:0FBDrM", + bt_2 = "mrqdw:0FA3Rm|miqdw:0FBA4mU", + btc_2 = "mrqdw:0FBBRm|miqdw:0FBA7mU", + btr_2 = "mrqdw:0FB3Rm|miqdw:0FBA6mU", + bts_2 = "mrqdw:0FABRm|miqdw:0FBA5mU", + + shld_3 = "mriqdw:0FA4RmU|mrCqdw:0FA5Rm", + shrd_3 = "mriqdw:0FACRmU|mrCqdw:0FADRm", + + rdtsc_0 = "0F31", -- P1+ + cpuid_0 = "0FA2", -- P1+ + + -- floating point ops + fst_1 = "ff:DDD0r|xd:D92m|xq:nDD2m", + fstp_1 = "ff:DDD8r|xd:D93m|xq:nDD3m|xt:DB7m", + fld_1 = "ff:D9C0r|xd:D90m|xq:nDD0m|xt:DB5m", + + fpop_0 = "DDD8", -- Alias for fstp st0. + + fist_1 = "xw:nDF2m|xd:DB2m", + fistp_1 = "xw:nDF3m|xd:DB3m|xq:nDF7m", + fild_1 = "xw:nDF0m|xd:DB0m|xq:nDF5m", + + fxch_0 = "D9C9", + fxch_1 = "ff:D9C8r", + fxch_2 = "fFf:D9C8r|Fff:D9C8R", + + fucom_1 = "ff:DDE0r", + fucom_2 = "Fff:DDE0R", + fucomp_1 = "ff:DDE8r", + fucomp_2 = "Fff:DDE8R", + fucomi_1 = "ff:DBE8r", -- P6+ + fucomi_2 = "Fff:DBE8R", -- P6+ + fucomip_1 = "ff:DFE8r", -- P6+ + fucomip_2 = "Fff:DFE8R", -- P6+ + fcomi_1 = "ff:DBF0r", -- P6+ + fcomi_2 = "Fff:DBF0R", -- P6+ + fcomip_1 = "ff:DFF0r", -- P6+ + fcomip_2 = "Fff:DFF0R", -- P6+ + fucompp_0 = "DAE9", + fcompp_0 = "DED9", + + fldenv_1 = "x.:D94m", + fnstenv_1 = "x.:D96m", + fstenv_1 = "x.:9BD96m", + fldcw_1 = "xw:nD95m", + fstcw_1 = "xw:n9BD97m", + fnstcw_1 = "xw:nD97m", + fstsw_1 = "Rw:n9BDFE0|xw:n9BDD7m", + fnstsw_1 = "Rw:nDFE0|xw:nDD7m", + fclex_0 = "9BDBE2", + fnclex_0 = "DBE2", + + fnop_0 = "D9D0", + -- D9D1-D9DF: unassigned + + fchs_0 = "D9E0", + fabs_0 = "D9E1", + -- D9E2: unassigned + -- D9E3: unassigned + ftst_0 = "D9E4", + fxam_0 = "D9E5", + -- D9E6: unassigned + -- D9E7: unassigned + fld1_0 = "D9E8", + fldl2t_0 = "D9E9", + fldl2e_0 = "D9EA", + fldpi_0 = "D9EB", + fldlg2_0 = "D9EC", + fldln2_0 = "D9ED", + fldz_0 = "D9EE", + -- D9EF: unassigned + + f2xm1_0 = "D9F0", + fyl2x_0 = "D9F1", + fptan_0 = "D9F2", + fpatan_0 = "D9F3", + fxtract_0 = "D9F4", + fprem1_0 = "D9F5", + fdecstp_0 = "D9F6", + fincstp_0 = "D9F7", + fprem_0 = "D9F8", + fyl2xp1_0 = "D9F9", + fsqrt_0 = "D9FA", + fsincos_0 = "D9FB", + frndint_0 = "D9FC", + fscale_0 = "D9FD", + fsin_0 = "D9FE", + fcos_0 = "D9FF", + + -- SSE, SSE2 + andnpd_2 = "rmo:660F55rM", + andnps_2 = "rmo:0F55rM", + andpd_2 = "rmo:660F54rM", + andps_2 = "rmo:0F54rM", + clflush_1 = "x.:0FAE7m", + cmppd_3 = "rmio:660FC2rMU", + cmpps_3 = "rmio:0FC2rMU", + cmpsd_3 = "rrio:F20FC2rMU|rxi/oq:", + cmpss_3 = "rrio:F30FC2rMU|rxi/od:", + comisd_2 = "rro:660F2FrM|rx/oq:", + comiss_2 = "rro:0F2FrM|rx/od:", + cvtdq2pd_2 = "rro:F30FE6rM|rx/oq:", + cvtdq2ps_2 = "rmo:0F5BrM", + cvtpd2dq_2 = "rmo:F20FE6rM", + cvtpd2ps_2 = "rmo:660F5ArM", + cvtpi2pd_2 = "rx/oq:660F2ArM", + cvtpi2ps_2 = "rx/oq:0F2ArM", + cvtps2dq_2 = "rmo:660F5BrM", + cvtps2pd_2 = "rro:0F5ArM|rx/oq:", + cvtsd2si_2 = "rr/do:F20F2DrM|rr/qo:|rx/dq:|rxq:", + cvtsd2ss_2 = "rro:F20F5ArM|rx/oq:", + cvtsi2sd_2 = "rm/od:F20F2ArM|rm/oq:F20F2ArXM", + cvtsi2ss_2 = "rm/od:F30F2ArM|rm/oq:F30F2ArXM", + cvtss2sd_2 = "rro:F30F5ArM|rx/od:", + cvtss2si_2 = "rr/do:F20F2CrM|rr/qo:|rxd:|rx/qd:", + cvttpd2dq_2 = "rmo:660FE6rM", + cvttps2dq_2 = "rmo:F30F5BrM", + cvttsd2si_2 = "rr/do:F20F2CrM|rr/qo:|rx/dq:|rxq:", + cvttss2si_2 = "rr/do:F30F2CrM|rr/qo:|rxd:|rx/qd:", + fxsave_1 = "x.:0FAE0m", + fxrstor_1 = "x.:0FAE1m", + ldmxcsr_1 = "xd:0FAE2m", + lfence_0 = "0FAEE8", + maskmovdqu_2 = "rro:660FF7rM", + mfence_0 = "0FAEF0", + movapd_2 = "rmo:660F28rM|mro:660F29Rm", + movaps_2 = "rmo:0F28rM|mro:0F29Rm", + movd_2 = "rm/od:660F6ErM|rm/oq:660F6ErXM|mr/do:660F7ERm|mr/qo:", + movdqa_2 = "rmo:660F6FrM|mro:660F7FRm", + movdqu_2 = "rmo:F30F6FrM|mro:F30F7FRm", + movhlps_2 = "rro:0F12rM", + movhpd_2 = "rx/oq:660F16rM|xr/qo:n660F17Rm", + movhps_2 = "rx/oq:0F16rM|xr/qo:n0F17Rm", + movlhps_2 = "rro:0F16rM", + movlpd_2 = "rx/oq:660F12rM|xr/qo:n660F13Rm", + movlps_2 = "rx/oq:0F12rM|xr/qo:n0F13Rm", + movmskpd_2 = "rr/do:660F50rM", + movmskps_2 = "rr/do:0F50rM", + movntdq_2 = "xro:660FE7Rm", + movnti_2 = "xrqd:0FC3Rm", + movntpd_2 = "xro:660F2BRm", + movntps_2 = "xro:0F2BRm", + movq_2 = "rro:F30F7ErM|rx/oq:|xr/qo:n660FD6Rm", + movsd_2 = "rro:F20F10rM|rx/oq:|xr/qo:nF20F11Rm", + movss_2 = "rro:F30F10rM|rx/od:|xr/do:F30F11Rm", + movupd_2 = "rmo:660F10rM|mro:660F11Rm", + movups_2 = "rmo:0F10rM|mro:0F11Rm", + orpd_2 = "rmo:660F56rM", + orps_2 = "rmo:0F56rM", + packssdw_2 = "rmo:660F6BrM", + packsswb_2 = "rmo:660F63rM", + packuswb_2 = "rmo:660F67rM", + paddb_2 = "rmo:660FFCrM", + paddd_2 = "rmo:660FFErM", + paddq_2 = "rmo:660FD4rM", + paddsb_2 = "rmo:660FECrM", + paddsw_2 = "rmo:660FEDrM", + paddusb_2 = "rmo:660FDCrM", + paddusw_2 = "rmo:660FDDrM", + paddw_2 = "rmo:660FFDrM", + pand_2 = "rmo:660FDBrM", + pandn_2 = "rmo:660FDFrM", + pause_0 = "F390", + pavgb_2 = "rmo:660FE0rM", + pavgw_2 = "rmo:660FE3rM", + pcmpeqb_2 = "rmo:660F74rM", + pcmpeqd_2 = "rmo:660F76rM", + pcmpeqw_2 = "rmo:660F75rM", + pcmpgtb_2 = "rmo:660F64rM", + pcmpgtd_2 = "rmo:660F66rM", + pcmpgtw_2 = "rmo:660F65rM", + pextrw_3 = "rri/do:660FC5rMU|xri/wo:660F3A15nrMU", -- Mem op: SSE4.1 only. + pinsrw_3 = "rri/od:660FC4rMU|rxi/ow:", + pmaddwd_2 = "rmo:660FF5rM", + pmaxsw_2 = "rmo:660FEErM", + pmaxub_2 = "rmo:660FDErM", + pminsw_2 = "rmo:660FEArM", + pminub_2 = "rmo:660FDArM", + pmovmskb_2 = "rr/do:660FD7rM", + pmulhuw_2 = "rmo:660FE4rM", + pmulhw_2 = "rmo:660FE5rM", + pmullw_2 = "rmo:660FD5rM", + pmuludq_2 = "rmo:660FF4rM", + por_2 = "rmo:660FEBrM", + prefetchnta_1 = "xb:n0F180m", + prefetcht0_1 = "xb:n0F181m", + prefetcht1_1 = "xb:n0F182m", + prefetcht2_1 = "xb:n0F183m", + psadbw_2 = "rmo:660FF6rM", + pshufd_3 = "rmio:660F70rMU", + pshufhw_3 = "rmio:F30F70rMU", + pshuflw_3 = "rmio:F20F70rMU", + pslld_2 = "rmo:660FF2rM|rio:660F726mU", + pslldq_2 = "rio:660F737mU", + psllq_2 = "rmo:660FF3rM|rio:660F736mU", + psllw_2 = "rmo:660FF1rM|rio:660F716mU", + psrad_2 = "rmo:660FE2rM|rio:660F724mU", + psraw_2 = "rmo:660FE1rM|rio:660F714mU", + psrld_2 = "rmo:660FD2rM|rio:660F722mU", + psrldq_2 = "rio:660F733mU", + psrlq_2 = "rmo:660FD3rM|rio:660F732mU", + psrlw_2 = "rmo:660FD1rM|rio:660F712mU", + psubb_2 = "rmo:660FF8rM", + psubd_2 = "rmo:660FFArM", + psubq_2 = "rmo:660FFBrM", + psubsb_2 = "rmo:660FE8rM", + psubsw_2 = "rmo:660FE9rM", + psubusb_2 = "rmo:660FD8rM", + psubusw_2 = "rmo:660FD9rM", + psubw_2 = "rmo:660FF9rM", + punpckhbw_2 = "rmo:660F68rM", + punpckhdq_2 = "rmo:660F6ArM", + punpckhqdq_2 = "rmo:660F6DrM", + punpckhwd_2 = "rmo:660F69rM", + punpcklbw_2 = "rmo:660F60rM", + punpckldq_2 = "rmo:660F62rM", + punpcklqdq_2 = "rmo:660F6CrM", + punpcklwd_2 = "rmo:660F61rM", + pxor_2 = "rmo:660FEFrM", + rcpps_2 = "rmo:0F53rM", + rcpss_2 = "rro:F30F53rM|rx/od:", + rsqrtps_2 = "rmo:0F52rM", + rsqrtss_2 = "rmo:F30F52rM", + sfence_0 = "0FAEF8", + shufpd_3 = "rmio:660FC6rMU", + shufps_3 = "rmio:0FC6rMU", + stmxcsr_1 = "xd:0FAE3m", + ucomisd_2 = "rro:660F2ErM|rx/oq:", + ucomiss_2 = "rro:0F2ErM|rx/od:", + unpckhpd_2 = "rmo:660F15rM", + unpckhps_2 = "rmo:0F15rM", + unpcklpd_2 = "rmo:660F14rM", + unpcklps_2 = "rmo:0F14rM", + xorpd_2 = "rmo:660F57rM", + xorps_2 = "rmo:0F57rM", + + -- SSE3 ops + fisttp_1 = "xw:nDF1m|xd:DB1m|xq:nDD1m", + addsubpd_2 = "rmo:660FD0rM", + addsubps_2 = "rmo:F20FD0rM", + haddpd_2 = "rmo:660F7CrM", + haddps_2 = "rmo:F20F7CrM", + hsubpd_2 = "rmo:660F7DrM", + hsubps_2 = "rmo:F20F7DrM", + lddqu_2 = "rxo:F20FF0rM", + movddup_2 = "rmo:F20F12rM", + movshdup_2 = "rmo:F30F16rM", + movsldup_2 = "rmo:F30F12rM", + + -- SSSE3 ops + pabsb_2 = "rmo:660F381CrM", + pabsd_2 = "rmo:660F381ErM", + pabsw_2 = "rmo:660F381DrM", + palignr_3 = "rmio:660F3A0FrMU", + phaddd_2 = "rmo:660F3802rM", + phaddsw_2 = "rmo:660F3803rM", + phaddw_2 = "rmo:660F3801rM", + phsubd_2 = "rmo:660F3806rM", + phsubsw_2 = "rmo:660F3807rM", + phsubw_2 = "rmo:660F3805rM", + pmaddubsw_2 = "rmo:660F3804rM", + pmulhrsw_2 = "rmo:660F380BrM", + pshufb_2 = "rmo:660F3800rM", + psignb_2 = "rmo:660F3808rM", + psignd_2 = "rmo:660F380ArM", + psignw_2 = "rmo:660F3809rM", + + -- SSE4.1 ops + blendpd_3 = "rmio:660F3A0DrMU", + blendps_3 = "rmio:660F3A0CrMU", + blendvpd_3 = "rmRo:660F3815rM", + blendvps_3 = "rmRo:660F3814rM", + dppd_3 = "rmio:660F3A41rMU", + dpps_3 = "rmio:660F3A40rMU", + extractps_3 = "mri/do:660F3A17RmU|rri/qo:660F3A17RXmU", + insertps_3 = "rrio:660F3A41rMU|rxi/od:", + movntdqa_2 = "rmo:660F382ArM", + mpsadbw_3 = "rmio:660F3A42rMU", + packusdw_2 = "rmo:660F382BrM", + pblendvb_3 = "rmRo:660F3810rM", + pblendw_3 = "rmio:660F3A0ErMU", + pcmpeqq_2 = "rmo:660F3829rM", + pextrb_3 = "rri/do:660F3A14nRmU|rri/qo:|xri/bo:", + pextrd_3 = "mri/do:660F3A16RmU", + pextrq_3 = "mri/qo:660F3A16RmU", + -- pextrw is SSE2, mem operand is SSE4.1 only + phminposuw_2 = "rmo:660F3841rM", + pinsrb_3 = "rri/od:660F3A20nrMU|rxi/ob:", + pinsrd_3 = "rmi/od:660F3A22rMU", + pinsrq_3 = "rmi/oq:660F3A22rXMU", + pmaxsb_2 = "rmo:660F383CrM", + pmaxsd_2 = "rmo:660F383DrM", + pmaxud_2 = "rmo:660F383FrM", + pmaxuw_2 = "rmo:660F383ErM", + pminsb_2 = "rmo:660F3838rM", + pminsd_2 = "rmo:660F3839rM", + pminud_2 = "rmo:660F383BrM", + pminuw_2 = "rmo:660F383ArM", + pmovsxbd_2 = "rro:660F3821rM|rx/od:", + pmovsxbq_2 = "rro:660F3822rM|rx/ow:", + pmovsxbw_2 = "rro:660F3820rM|rx/oq:", + pmovsxdq_2 = "rro:660F3825rM|rx/oq:", + pmovsxwd_2 = "rro:660F3823rM|rx/oq:", + pmovsxwq_2 = "rro:660F3824rM|rx/od:", + pmovzxbd_2 = "rro:660F3831rM|rx/od:", + pmovzxbq_2 = "rro:660F3832rM|rx/ow:", + pmovzxbw_2 = "rro:660F3830rM|rx/oq:", + pmovzxdq_2 = "rro:660F3835rM|rx/oq:", + pmovzxwd_2 = "rro:660F3833rM|rx/oq:", + pmovzxwq_2 = "rro:660F3834rM|rx/od:", + pmuldq_2 = "rmo:660F3828rM", + pmulld_2 = "rmo:660F3840rM", + ptest_2 = "rmo:660F3817rM", + roundpd_3 = "rmio:660F3A09rMU", + roundps_3 = "rmio:660F3A08rMU", + roundsd_3 = "rrio:660F3A0BrMU|rxi/oq:", + roundss_3 = "rrio:660F3A0ArMU|rxi/od:", + + -- SSE4.2 ops + crc32_2 = "rmqd:F20F38F1rM|rm/dw:66F20F38F1rM|rm/db:F20F38F0rM|rm/qb:", + pcmpestri_3 = "rmio:660F3A61rMU", + pcmpestrm_3 = "rmio:660F3A60rMU", + pcmpgtq_2 = "rmo:660F3837rM", + pcmpistri_3 = "rmio:660F3A63rMU", + pcmpistrm_3 = "rmio:660F3A62rMU", + popcnt_2 = "rmqdw:F30FB8rM", + + -- SSE4a + extrq_2 = "rro:660F79rM", + extrq_3 = "riio:660F780mUU", + insertq_2 = "rro:F20F79rM", + insertq_4 = "rriio:F20F78rMUU", + lzcnt_2 = "rmqdw:F30FBDrM", + movntsd_2 = "xr/qo:nF20F2BRm", + movntss_2 = "xr/do:F30F2BRm", + -- popcnt is also in SSE4.2 +} + +------------------------------------------------------------------------------ + +-- Arithmetic ops. +for name,n in pairs{ add = 0, ["or"] = 1, adc = 2, sbb = 3, + ["and"] = 4, sub = 5, xor = 6, cmp = 7 } do + local n8 = shl(n, 3) + map_op[name.."_2"] = format( + "mr:%02XRm|rm:%02XrM|mI1qdw:81%XmI|mS1qdw:83%XmS|Ri1qdwb:%02Xri|mi1qdwb:81%Xmi", + 1+n8, 3+n8, n, n, 5+n8, n) +end + +-- Shift ops. +for name,n in pairs{ rol = 0, ror = 1, rcl = 2, rcr = 3, + shl = 4, shr = 5, sar = 7, sal = 4 } do + map_op[name.."_2"] = format("m1:D1%Xm|mC1qdwb:D3%Xm|mi:C1%XmU", n, n, n) +end + +-- Conditional ops. +for cc,n in pairs(map_cc) do + map_op["j"..cc.."_1"] = format("J.:n0F8%XJ", n) -- short: 7%X + map_op["set"..cc.."_1"] = format("mb:n0F9%X2m", n) + map_op["cmov"..cc.."_2"] = format("rmqdw:0F4%XrM", n) -- P6+ +end + +-- FP arithmetic ops. +for name,n in pairs{ add = 0, mul = 1, com = 2, comp = 3, + sub = 4, subr = 5, div = 6, divr = 7 } do + local nc = 0xc0 + shl(n, 3) + local nr = nc + (n < 4 and 0 or (n % 2 == 0 and 8 or -8)) + local fn = "f"..name + map_op[fn.."_1"] = format("ff:D8%02Xr|xd:D8%Xm|xq:nDC%Xm", nc, n, n) + if n == 2 or n == 3 then + map_op[fn.."_2"] = format("Fff:D8%02XR|Fx2d:D8%XM|Fx2q:nDC%XM", nc, n, n) + else + map_op[fn.."_2"] = format("Fff:D8%02XR|fFf:DC%02Xr|Fx2d:D8%XM|Fx2q:nDC%XM", nc, nr, n, n) + map_op[fn.."p_1"] = format("ff:DE%02Xr", nr) + map_op[fn.."p_2"] = format("fFf:DE%02Xr", nr) + end + map_op["fi"..name.."_1"] = format("xd:DA%Xm|xw:nDE%Xm", n, n) +end + +-- FP conditional moves. +for cc,n in pairs{ b=0, e=1, be=2, u=3, nb=4, ne=5, nbe=6, nu=7 } do + local nc = 0xdac0 + shl(band(n, 3), 3) + shl(band(n, 4), 6) + map_op["fcmov"..cc.."_1"] = format("ff:%04Xr", nc) -- P6+ + map_op["fcmov"..cc.."_2"] = format("Fff:%04XR", nc) -- P6+ +end + +-- SSE FP arithmetic ops. +for name,n in pairs{ sqrt = 1, add = 8, mul = 9, + sub = 12, min = 13, div = 14, max = 15 } do + map_op[name.."ps_2"] = format("rmo:0F5%XrM", n) + map_op[name.."ss_2"] = format("rro:F30F5%XrM|rx/od:", n) + map_op[name.."pd_2"] = format("rmo:660F5%XrM", n) + map_op[name.."sd_2"] = format("rro:F20F5%XrM|rx/oq:", n) +end + +------------------------------------------------------------------------------ + +-- Process pattern string. +local function dopattern(pat, args, sz, op, needrex) + local digit, addin + local opcode = 0 + local szov = sz + local narg = 1 + local rex = 0 + + -- Limit number of section buffer positions used by a single dasm_put(). + -- A single opcode needs a maximum of 5 positions. + if secpos+5 > maxsecpos then wflush() end + + -- Process each character. + for c in gmatch(pat.."|", ".") do + if match(c, "%x") then -- Hex digit. + digit = byte(c) - 48 + if digit > 48 then digit = digit - 39 + elseif digit > 16 then digit = digit - 7 end + opcode = opcode*16 + digit + addin = nil + elseif c == "n" then -- Disable operand size mods for opcode. + szov = nil + elseif c == "X" then -- Force REX.W. + rex = 8 + elseif c == "r" then -- Merge 1st operand regno. into opcode. + addin = args[1]; opcode = opcode + (addin.reg % 8) + if narg < 2 then narg = 2 end + elseif c == "R" then -- Merge 2nd operand regno. into opcode. + addin = args[2]; opcode = opcode + (addin.reg % 8) + narg = 3 + elseif c == "m" or c == "M" then -- Encode ModRM/SIB. + local s + if addin then + s = addin.reg + opcode = opcode - band(s, 7) -- Undo regno opcode merge. + else + s = band(opcode, 15) -- Undo last digit. + opcode = shr(opcode, 4) + end + local nn = c == "m" and 1 or 2 + local t = args[nn] + if narg <= nn then narg = nn + 1 end + if szov == "q" and rex == 0 then rex = rex + 8 end + if t.reg and t.reg > 7 then rex = rex + 1 end + if t.xreg and t.xreg > 7 then rex = rex + 2 end + if s > 7 then rex = rex + 4 end + if needrex then rex = rex + 16 end + wputop(szov, opcode, rex); opcode = nil + local imark = sub(pat, -1) -- Force a mark (ugly). + -- Put ModRM/SIB with regno/last digit as spare. + wputmrmsib(t, imark, s, addin and addin.vreg) + addin = nil + else + if opcode then -- Flush opcode. + if szov == "q" and rex == 0 then rex = rex + 8 end + if needrex then rex = rex + 16 end + if addin and addin.reg == -1 then + wputop(szov, opcode - 7, rex) + waction("VREG", addin.vreg); wputxb(0) + else + if addin and addin.reg > 7 then rex = rex + 1 end + wputop(szov, opcode, rex) + end + opcode = nil + end + if c == "|" then break end + if c == "o" then -- Offset (pure 32 bit displacement). + wputdarg(args[1].disp); if narg < 2 then narg = 2 end + elseif c == "O" then + wputdarg(args[2].disp); narg = 3 + else + -- Anything else is an immediate operand. + local a = args[narg] + narg = narg + 1 + local mode, imm = a.mode, a.imm + if mode == "iJ" and not match("iIJ", c) then + werror("bad operand size for label") + end + if c == "S" then + wputsbarg(imm) + elseif c == "U" then + wputbarg(imm) + elseif c == "W" then + wputwarg(imm) + elseif c == "i" or c == "I" then + if mode == "iJ" then + wputlabel("IMM_", imm, 1) + elseif mode == "iI" and c == "I" then + waction(sz == "w" and "IMM_WB" or "IMM_DB", imm) + else + wputszarg(sz, imm) + end + elseif c == "J" then + if mode == "iPJ" then + waction("REL_A", imm) -- !x64 (secpos) + else + wputlabel("REL_", imm, 2) + end + else + werror("bad char `"..c.."' in pattern `"..pat.."' for `"..op.."'") + end + end + end + end +end + +------------------------------------------------------------------------------ + +-- Mapping of operand modes to short names. Suppress output with '#'. +local map_modename = { + r = "reg", R = "eax", C = "cl", x = "mem", m = "mrm", i = "imm", + f = "stx", F = "st0", J = "lbl", ["1"] = "1", + I = "#", S = "#", O = "#", +} + +-- Return a table/string showing all possible operand modes. +local function templatehelp(template, nparams) + if nparams == 0 then return "" end + local t = {} + for tm in gmatch(template, "[^%|]+") do + local s = map_modename[sub(tm, 1, 1)] + s = s..gsub(sub(tm, 2, nparams), ".", function(c) + return ", "..map_modename[c] + end) + if not match(s, "#") then t[#t+1] = s end + end + return t +end + +-- Match operand modes against mode match part of template. +local function matchtm(tm, args) + for i=1,#args do + if not match(args[i].mode, sub(tm, i, i)) then return end + end + return true +end + +-- Handle opcodes defined with template strings. +map_op[".template__"] = function(params, template, nparams) + if not params then return templatehelp(template, nparams) end + local args = {} + + -- Zero-operand opcodes have no match part. + if #params == 0 then + dopattern(template, args, "d", params.op, nil) + return + end + + -- Determine common operand size (coerce undefined size) or flag as mixed. + local sz, szmix, needrex + for i,p in ipairs(params) do + args[i] = parseoperand(p) + local nsz = args[i].opsize + if nsz then + if sz and sz ~= nsz then szmix = true else sz = nsz end + end + local nrex = args[i].needrex + if nrex ~= nil then + if needrex == nil then + needrex = nrex + elseif needrex ~= nrex then + werror("bad mix of byte-addressable registers") + end + end + end + + -- Try all match:pattern pairs (separated by '|'). + local gotmatch, lastpat + for tm in gmatch(template, "[^%|]+") do + -- Split off size match (starts after mode match) and pattern string. + local szm, pat = match(tm, "^(.-):(.*)$", #args+1) + if pat == "" then pat = lastpat else lastpat = pat end + if matchtm(tm, args) then + local prefix = sub(szm, 1, 1) + if prefix == "/" then -- Match both operand sizes. + if args[1].opsize == sub(szm, 2, 2) and + args[2].opsize == sub(szm, 3, 3) then + dopattern(pat, args, sz, params.op, needrex) -- Process pattern. + return + end + else -- Match common operand size. + local szp = sz + if szm == "" then szm = x64 and "qdwb" or "dwb" end -- Default sizes. + if prefix == "1" then szp = args[1].opsize; szmix = nil + elseif prefix == "2" then szp = args[2].opsize; szmix = nil end + if not szmix and (prefix == "." or match(szm, szp or "#")) then + dopattern(pat, args, szp, params.op, needrex) -- Process pattern. + return + end + end + gotmatch = true + end + end + + local msg = "bad operand mode" + if gotmatch then + if szmix then + msg = "mixed operand size" + else + msg = sz and "bad operand size" or "missing operand size" + end + end + + werror(msg.." in `"..opmodestr(params.op, args).."'") +end + +------------------------------------------------------------------------------ + +-- x64-specific opcode for 64 bit immediates and displacements. +if x64 then + function map_op.mov64_2(params) + if not params then return { "reg, imm", "reg, [disp]", "[disp], reg" } end + if secpos+2 > maxsecpos then wflush() end + local opcode, op64, sz, rex, vreg + local op64 = match(params[1], "^%[%s*(.-)%s*%]$") + if op64 then + local a = parseoperand(params[2]) + if a.mode ~= "rmR" then werror("bad operand mode") end + sz = a.opsize + rex = sz == "q" and 8 or 0 + opcode = 0xa3 + else + op64 = match(params[2], "^%[%s*(.-)%s*%]$") + local a = parseoperand(params[1]) + if op64 then + if a.mode ~= "rmR" then werror("bad operand mode") end + sz = a.opsize + rex = sz == "q" and 8 or 0 + opcode = 0xa1 + else + if sub(a.mode, 1, 1) ~= "r" or a.opsize ~= "q" then + werror("bad operand mode") + end + op64 = params[2] + if a.reg == -1 then + vreg = a.vreg + opcode = 0xb8 + else + opcode = 0xb8 + band(a.reg, 7) + end + rex = a.reg > 7 and 9 or 8 + end + end + wputop(sz, opcode, rex) + if vreg then waction("VREG", vreg); wputxb(0) end + waction("IMM_D", format("(unsigned int)(%s)", op64)) + waction("IMM_D", format("(unsigned int)((%s)>>32)", op64)) + end +end + +------------------------------------------------------------------------------ + +-- Pseudo-opcodes for data storage. +local function op_data(params) + if not params then return "imm..." end + local sz = sub(params.op, 2, 2) + if sz == "a" then sz = addrsize end + for _,p in ipairs(params) do + local a = parseoperand(p) + if sub(a.mode, 1, 1) ~= "i" or (a.opsize and a.opsize ~= sz) then + werror("bad mode or size in `"..p.."'") + end + if a.mode == "iJ" then + wputlabel("IMM_", a.imm, 1) + else + wputszarg(sz, a.imm) + end + if secpos+2 > maxsecpos then wflush() end + end +end + +map_op[".byte_*"] = op_data +map_op[".sbyte_*"] = op_data +map_op[".word_*"] = op_data +map_op[".dword_*"] = op_data +map_op[".aword_*"] = op_data + +------------------------------------------------------------------------------ + +-- Pseudo-opcode to mark the position where the action list is to be emitted. +map_op[".actionlist_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeactions(out, name) end) +end + +-- Pseudo-opcode to mark the position where the global enum is to be emitted. +map_op[".globals_1"] = function(params) + if not params then return "prefix" end + local prefix = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeglobals(out, prefix) end) +end + +-- Pseudo-opcode to mark the position where the global names are to be emitted. +map_op[".globalnames_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeglobalnames(out, name) end) +end + +-- Pseudo-opcode to mark the position where the extern names are to be emitted. +map_op[".externnames_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeexternnames(out, name) end) +end + +------------------------------------------------------------------------------ + +-- Label pseudo-opcode (converted from trailing colon form). +map_op[".label_2"] = function(params) + if not params then return "[1-9] | ->global | =>pcexpr [, addr]" end + if secpos+2 > maxsecpos then wflush() end + local a = parseoperand(params[1]) + local mode, imm = a.mode, a.imm + if type(imm) == "number" and (mode == "iJ" or (imm >= 1 and imm <= 9)) then + -- Local label (1: ... 9:) or global label (->global:). + waction("LABEL_LG", nil, 1) + wputxb(imm) + elseif mode == "iJ" then + -- PC label (=>pcexpr:). + waction("LABEL_PC", imm) + else + werror("bad label definition") + end + -- SETLABEL must immediately follow LABEL_LG/LABEL_PC. + local addr = params[2] + if addr then + local a = parseoperand(addr) + if a.mode == "iPJ" then + waction("SETLABEL", a.imm) + else + werror("bad label assignment") + end + end +end +map_op[".label_1"] = map_op[".label_2"] + +------------------------------------------------------------------------------ + +-- Alignment pseudo-opcode. +map_op[".align_1"] = function(params) + if not params then return "numpow2" end + if secpos+1 > maxsecpos then wflush() end + local align = tonumber(params[1]) or map_opsizenum[map_opsize[params[1]]] + if align then + local x = align + -- Must be a power of 2 in the range (2 ... 256). + for i=1,8 do + x = x / 2 + if x == 1 then + waction("ALIGN", nil, 1) + wputxb(align-1) -- Action byte is 2**n-1. + return + end + end + end + werror("bad alignment") +end + +-- Spacing pseudo-opcode. +map_op[".space_2"] = function(params) + if not params then return "num [, filler]" end + if secpos+1 > maxsecpos then wflush() end + waction("SPACE", params[1]) + local fill = params[2] + if fill then + fill = tonumber(fill) + if not fill or fill < 0 or fill > 255 then werror("bad filler") end + end + wputxb(fill or 0) +end +map_op[".space_1"] = map_op[".space_2"] + +------------------------------------------------------------------------------ + +-- Pseudo-opcode for (primitive) type definitions (map to C types). +map_op[".type_3"] = function(params, nparams) + if not params then + return nparams == 2 and "name, ctype" or "name, ctype, reg" + end + local name, ctype, reg = params[1], params[2], params[3] + if not match(name, "^[%a_][%w_]*$") then + werror("bad type name `"..name.."'") + end + local tp = map_type[name] + if tp then + werror("duplicate type `"..name.."'") + end + if reg and not map_reg_valid_base[reg] then + werror("bad base register `"..(map_reg_rev[reg] or reg).."'") + end + -- Add #type to defines. A bit unclean to put it in map_archdef. + map_archdef["#"..name] = "sizeof("..ctype..")" + -- Add new type and emit shortcut define. + local num = ctypenum + 1 + map_type[name] = { + ctype = ctype, + ctypefmt = format("Dt%X(%%s)", num), + reg = reg, + } + wline(format("#define Dt%X(_V) (int)(ptrdiff_t)&(((%s *)0)_V)", num, ctype)) + ctypenum = num +end +map_op[".type_2"] = map_op[".type_3"] + +-- Dump type definitions. +local function dumptypes(out, lvl) + local t = {} + for name in pairs(map_type) do t[#t+1] = name end + sort(t) + out:write("Type definitions:\n") + for _,name in ipairs(t) do + local tp = map_type[name] + local reg = tp.reg and map_reg_rev[tp.reg] or "" + out:write(format(" %-20s %-20s %s\n", name, tp.ctype, reg)) + end + out:write("\n") +end + +------------------------------------------------------------------------------ + +-- Set the current section. +function _M.section(num) + waction("SECTION") + wputxb(num) + wflush(true) -- SECTION is a terminal action. +end + +------------------------------------------------------------------------------ + +-- Dump architecture description. +function _M.dumparch(out) + out:write(format("DynASM %s version %s, released %s\n\n", + _info.arch, _info.version, _info.release)) + dumpregs(out) + dumpactions(out) +end + +-- Dump all user defined elements. +function _M.dumpdef(out, lvl) + dumptypes(out, lvl) + dumpglobals(out, lvl) + dumpexterns(out, lvl) +end + +------------------------------------------------------------------------------ + +-- Pass callbacks from/to the DynASM core. +function _M.passcb(wl, we, wf, ww) + wline, werror, wfatal, wwarn = wl, we, wf, ww + return wflush +end + +-- Setup the arch-specific module. +function _M.setup(arch, opt) + g_arch, g_opt = arch, opt +end + +-- Merge the core maps and the arch-specific maps. +function _M.mergemaps(map_coreop, map_def) + setmetatable(map_op, { __index = map_coreop }) + setmetatable(map_def, { __index = map_archdef }) + return map_op, map_def +end + +return _M + +------------------------------------------------------------------------------ + diff --git a/Build/source/libs/luajit/LuaJIT-src/dynasm/dynasm.lua b/Build/source/libs/luajit/LuaJIT-src/dynasm/dynasm.lua new file mode 100644 index 00000000000..fffda7513c8 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/dynasm/dynasm.lua @@ -0,0 +1,1094 @@ +------------------------------------------------------------------------------ +-- DynASM. A dynamic assembler for code generation engines. +-- Originally designed and implemented for LuaJIT. +-- +-- Copyright (C) 2005-2015 Mike Pall. All rights reserved. +-- See below for full copyright notice. +------------------------------------------------------------------------------ + +-- Application information. +local _info = { + name = "DynASM", + description = "A dynamic assembler for code generation engines", + version = "1.3.0", + vernum = 10300, + release = "2011-05-05", + author = "Mike Pall", + url = "http://luajit.org/dynasm.html", + license = "MIT", + copyright = [[ +Copyright (C) 2005-2015 Mike Pall. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +[ MIT license: http://www.opensource.org/licenses/mit-license.php ] +]], +} + +-- Cache library functions. +local type, pairs, ipairs = type, pairs, ipairs +local pcall, error, assert = pcall, error, assert +local _s = string +local sub, match, gmatch, gsub = _s.sub, _s.match, _s.gmatch, _s.gsub +local format, rep, upper = _s.format, _s.rep, _s.upper +local _t = table +local insert, remove, concat, sort = _t.insert, _t.remove, _t.concat, _t.sort +local exit = os.exit +local io = io +local stdin, stdout, stderr = io.stdin, io.stdout, io.stderr + +------------------------------------------------------------------------------ + +-- Program options. +local g_opt = {} + +-- Global state for current file. +local g_fname, g_curline, g_indent, g_lineno, g_synclineno, g_arch +local g_errcount = 0 + +-- Write buffer for output file. +local g_wbuffer, g_capbuffer + +------------------------------------------------------------------------------ + +-- Write an output line (or callback function) to the buffer. +local function wline(line, needindent) + local buf = g_capbuffer or g_wbuffer + buf[#buf+1] = needindent and g_indent..line or line + g_synclineno = g_synclineno + 1 +end + +-- Write assembler line as a comment, if requestd. +local function wcomment(aline) + if g_opt.comment then + wline(g_opt.comment..aline..g_opt.endcomment, true) + end +end + +-- Resync CPP line numbers. +local function wsync() + if g_synclineno ~= g_lineno and g_opt.cpp then + wline("#line "..g_lineno..' "'..g_fname..'"') + g_synclineno = g_lineno + end +end + +-- Dummy action flush function. Replaced with arch-specific function later. +local function wflush(term) +end + +-- Dump all buffered output lines. +local function wdumplines(out, buf) + for _,line in ipairs(buf) do + if type(line) == "string" then + assert(out:write(line, "\n")) + else + -- Special callback to dynamically insert lines after end of processing. + line(out) + end + end +end + +------------------------------------------------------------------------------ + +-- Emit an error. Processing continues with next statement. +local function werror(msg) + error(format("%s:%s: error: %s:\n%s", g_fname, g_lineno, msg, g_curline), 0) +end + +-- Emit a fatal error. Processing stops. +local function wfatal(msg) + g_errcount = "fatal" + werror(msg) +end + +-- Print a warning. Processing continues. +local function wwarn(msg) + stderr:write(format("%s:%s: warning: %s:\n%s\n", + g_fname, g_lineno, msg, g_curline)) +end + +-- Print caught error message. But suppress excessive errors. +local function wprinterr(...) + if type(g_errcount) == "number" then + -- Regular error. + g_errcount = g_errcount + 1 + if g_errcount < 21 then -- Seems to be a reasonable limit. + stderr:write(...) + elseif g_errcount == 21 then + stderr:write(g_fname, + ":*: warning: too many errors (suppressed further messages).\n") + end + else + -- Fatal error. + stderr:write(...) + return true -- Stop processing. + end +end + +------------------------------------------------------------------------------ + +-- Map holding all option handlers. +local opt_map = {} +local opt_current + +-- Print error and exit with error status. +local function opterror(...) + stderr:write("dynasm.lua: ERROR: ", ...) + stderr:write("\n") + exit(1) +end + +-- Get option parameter. +local function optparam(args) + local argn = args.argn + local p = args[argn] + if not p then + opterror("missing parameter for option `", opt_current, "'.") + end + args.argn = argn + 1 + return p +end + +------------------------------------------------------------------------------ + +-- Core pseudo-opcodes. +local map_coreop = {} +-- Dummy opcode map. Replaced by arch-specific map. +local map_op = {} + +-- Forward declarations. +local dostmt +local readfile + +------------------------------------------------------------------------------ + +-- Map for defines (initially empty, chains to arch-specific map). +local map_def = {} + +-- Pseudo-opcode to define a substitution. +map_coreop[".define_2"] = function(params, nparams) + if not params then return nparams == 1 and "name" or "name, subst" end + local name, def = params[1], params[2] or "1" + if not match(name, "^[%a_][%w_]*$") then werror("bad or duplicate define") end + map_def[name] = def +end +map_coreop[".define_1"] = map_coreop[".define_2"] + +-- Define a substitution on the command line. +function opt_map.D(args) + local namesubst = optparam(args) + local name, subst = match(namesubst, "^([%a_][%w_]*)=(.*)$") + if name then + map_def[name] = subst + elseif match(namesubst, "^[%a_][%w_]*$") then + map_def[namesubst] = "1" + else + opterror("bad define") + end +end + +-- Undefine a substitution on the command line. +function opt_map.U(args) + local name = optparam(args) + if match(name, "^[%a_][%w_]*$") then + map_def[name] = nil + else + opterror("bad define") + end +end + +-- Helper for definesubst. +local gotsubst + +local function definesubst_one(word) + local subst = map_def[word] + if subst then gotsubst = word; return subst else return word end +end + +-- Iteratively substitute defines. +local function definesubst(stmt) + -- Limit number of iterations. + for i=1,100 do + gotsubst = false + stmt = gsub(stmt, "#?[%w_]+", definesubst_one) + if not gotsubst then break end + end + if gotsubst then wfatal("recursive define involving `"..gotsubst.."'") end + return stmt +end + +-- Dump all defines. +local function dumpdefines(out, lvl) + local t = {} + for name in pairs(map_def) do + t[#t+1] = name + end + sort(t) + out:write("Defines:\n") + for _,name in ipairs(t) do + local subst = map_def[name] + if g_arch then subst = g_arch.revdef(subst) end + out:write(format(" %-20s %s\n", name, subst)) + end + out:write("\n") +end + +------------------------------------------------------------------------------ + +-- Support variables for conditional assembly. +local condlevel = 0 +local condstack = {} + +-- Evaluate condition with a Lua expression. Substitutions already performed. +local function cond_eval(cond) + local func, err + if setfenv then + func, err = loadstring("return "..cond, "=expr") + else + -- No globals. All unknown identifiers evaluate to nil. + func, err = load("return "..cond, "=expr", "t", {}) + end + if func then + if setfenv then + setfenv(func, {}) -- No globals. All unknown identifiers evaluate to nil. + end + local ok, res = pcall(func) + if ok then + if res == 0 then return false end -- Oh well. + return not not res + end + err = res + end + wfatal("bad condition: "..err) +end + +-- Skip statements until next conditional pseudo-opcode at the same level. +local function stmtskip() + local dostmt_save = dostmt + local lvl = 0 + dostmt = function(stmt) + local op = match(stmt, "^%s*(%S+)") + if op == ".if" then + lvl = lvl + 1 + elseif lvl ~= 0 then + if op == ".endif" then lvl = lvl - 1 end + elseif op == ".elif" or op == ".else" or op == ".endif" then + dostmt = dostmt_save + dostmt(stmt) + end + end +end + +-- Pseudo-opcodes for conditional assembly. +map_coreop[".if_1"] = function(params) + if not params then return "condition" end + local lvl = condlevel + 1 + local res = cond_eval(params[1]) + condlevel = lvl + condstack[lvl] = res + if not res then stmtskip() end +end + +map_coreop[".elif_1"] = function(params) + if not params then return "condition" end + if condlevel == 0 then wfatal(".elif without .if") end + local lvl = condlevel + local res = condstack[lvl] + if res then + if res == "else" then wfatal(".elif after .else") end + else + res = cond_eval(params[1]) + if res then + condstack[lvl] = res + return + end + end + stmtskip() +end + +map_coreop[".else_0"] = function(params) + if condlevel == 0 then wfatal(".else without .if") end + local lvl = condlevel + local res = condstack[lvl] + condstack[lvl] = "else" + if res then + if res == "else" then wfatal(".else after .else") end + stmtskip() + end +end + +map_coreop[".endif_0"] = function(params) + local lvl = condlevel + if lvl == 0 then wfatal(".endif without .if") end + condlevel = lvl - 1 +end + +-- Check for unfinished conditionals. +local function checkconds() + if g_errcount ~= "fatal" and condlevel ~= 0 then + wprinterr(g_fname, ":*: error: unbalanced conditional\n") + end +end + +------------------------------------------------------------------------------ + +-- Search for a file in the given path and open it for reading. +local function pathopen(path, name) + local dirsep = package and match(package.path, "\\") and "\\" or "/" + for _,p in ipairs(path) do + local fullname = p == "" and name or p..dirsep..name + local fin = io.open(fullname, "r") + if fin then + g_fname = fullname + return fin + end + end +end + +-- Include a file. +map_coreop[".include_1"] = function(params) + if not params then return "filename" end + local name = params[1] + -- Save state. Ugly, I know. but upvalues are fast. + local gf, gl, gcl, gi = g_fname, g_lineno, g_curline, g_indent + -- Read the included file. + local fatal = readfile(pathopen(g_opt.include, name) or + wfatal("include file `"..name.."' not found")) + -- Restore state. + g_synclineno = -1 + g_fname, g_lineno, g_curline, g_indent = gf, gl, gcl, gi + if fatal then wfatal("in include file") end +end + +-- Make .include and conditionals initially available, too. +map_op[".include_1"] = map_coreop[".include_1"] +map_op[".if_1"] = map_coreop[".if_1"] +map_op[".elif_1"] = map_coreop[".elif_1"] +map_op[".else_0"] = map_coreop[".else_0"] +map_op[".endif_0"] = map_coreop[".endif_0"] + +------------------------------------------------------------------------------ + +-- Support variables for macros. +local mac_capture, mac_lineno, mac_name +local mac_active = {} +local mac_list = {} + +-- Pseudo-opcode to define a macro. +map_coreop[".macro_*"] = function(mparams) + if not mparams then return "name [, params...]" end + -- Split off and validate macro name. + local name = remove(mparams, 1) + if not name then werror("missing macro name") end + if not (match(name, "^[%a_][%w_%.]*$") or match(name, "^%.[%w_%.]*$")) then + wfatal("bad macro name `"..name.."'") + end + -- Validate macro parameter names. + local mdup = {} + for _,mp in ipairs(mparams) do + if not match(mp, "^[%a_][%w_]*$") then + wfatal("bad macro parameter name `"..mp.."'") + end + if mdup[mp] then wfatal("duplicate macro parameter name `"..mp.."'") end + mdup[mp] = true + end + -- Check for duplicate or recursive macro definitions. + local opname = name.."_"..#mparams + if map_op[opname] or map_op[name.."_*"] then + wfatal("duplicate macro `"..name.."' ("..#mparams.." parameters)") + end + if mac_capture then wfatal("recursive macro definition") end + + -- Enable statement capture. + local lines = {} + mac_lineno = g_lineno + mac_name = name + mac_capture = function(stmt) -- Statement capture function. + -- Stop macro definition with .endmacro pseudo-opcode. + if not match(stmt, "^%s*.endmacro%s*$") then + lines[#lines+1] = stmt + return + end + mac_capture = nil + mac_lineno = nil + mac_name = nil + mac_list[#mac_list+1] = opname + -- Add macro-op definition. + map_op[opname] = function(params) + if not params then return mparams, lines end + -- Protect against recursive macro invocation. + if mac_active[opname] then wfatal("recursive macro invocation") end + mac_active[opname] = true + -- Setup substitution map. + local subst = {} + for i,mp in ipairs(mparams) do subst[mp] = params[i] end + local mcom + if g_opt.maccomment and g_opt.comment then + mcom = " MACRO "..name.." ("..#mparams..")" + wcomment("{"..mcom) + end + -- Loop through all captured statements + for _,stmt in ipairs(lines) do + -- Substitute macro parameters. + local st = gsub(stmt, "[%w_]+", subst) + st = definesubst(st) + st = gsub(st, "%s*%.%.%s*", "") -- Token paste a..b. + if mcom and sub(st, 1, 1) ~= "|" then wcomment(st) end + -- Emit statement. Use a protected call for better diagnostics. + local ok, err = pcall(dostmt, st) + if not ok then + -- Add the captured statement to the error. + wprinterr(err, "\n", g_indent, "| ", stmt, + "\t[MACRO ", name, " (", #mparams, ")]\n") + end + end + if mcom then wcomment("}"..mcom) end + mac_active[opname] = nil + end + end +end + +-- An .endmacro pseudo-opcode outside of a macro definition is an error. +map_coreop[".endmacro_0"] = function(params) + wfatal(".endmacro without .macro") +end + +-- Dump all macros and their contents (with -PP only). +local function dumpmacros(out, lvl) + sort(mac_list) + out:write("Macros:\n") + for _,opname in ipairs(mac_list) do + local name = sub(opname, 1, -3) + local params, lines = map_op[opname]() + out:write(format(" %-20s %s\n", name, concat(params, ", "))) + if lvl > 1 then + for _,line in ipairs(lines) do + out:write(" |", line, "\n") + end + out:write("\n") + end + end + out:write("\n") +end + +-- Check for unfinished macro definitions. +local function checkmacros() + if mac_capture then + wprinterr(g_fname, ":", mac_lineno, + ": error: unfinished .macro `", mac_name ,"'\n") + end +end + +------------------------------------------------------------------------------ + +-- Support variables for captures. +local cap_lineno, cap_name +local cap_buffers = {} +local cap_used = {} + +-- Start a capture. +map_coreop[".capture_1"] = function(params) + if not params then return "name" end + wflush() + local name = params[1] + if not match(name, "^[%a_][%w_]*$") then + wfatal("bad capture name `"..name.."'") + end + if cap_name then + wfatal("already capturing to `"..cap_name.."' since line "..cap_lineno) + end + cap_name = name + cap_lineno = g_lineno + -- Create or continue a capture buffer and start the output line capture. + local buf = cap_buffers[name] + if not buf then buf = {}; cap_buffers[name] = buf end + g_capbuffer = buf + g_synclineno = 0 +end + +-- Stop a capture. +map_coreop[".endcapture_0"] = function(params) + wflush() + if not cap_name then wfatal(".endcapture without a valid .capture") end + cap_name = nil + cap_lineno = nil + g_capbuffer = nil + g_synclineno = 0 +end + +-- Dump a capture buffer. +map_coreop[".dumpcapture_1"] = function(params) + if not params then return "name" end + wflush() + local name = params[1] + if not match(name, "^[%a_][%w_]*$") then + wfatal("bad capture name `"..name.."'") + end + cap_used[name] = true + wline(function(out) + local buf = cap_buffers[name] + if buf then wdumplines(out, buf) end + end) + g_synclineno = 0 +end + +-- Dump all captures and their buffers (with -PP only). +local function dumpcaptures(out, lvl) + out:write("Captures:\n") + for name,buf in pairs(cap_buffers) do + out:write(format(" %-20s %4s)\n", name, "("..#buf)) + if lvl > 1 then + local bar = rep("=", 76) + out:write(" ", bar, "\n") + for _,line in ipairs(buf) do + out:write(" ", line, "\n") + end + out:write(" ", bar, "\n\n") + end + end + out:write("\n") +end + +-- Check for unfinished or unused captures. +local function checkcaptures() + if cap_name then + wprinterr(g_fname, ":", cap_lineno, + ": error: unfinished .capture `", cap_name,"'\n") + return + end + for name in pairs(cap_buffers) do + if not cap_used[name] then + wprinterr(g_fname, ":*: error: missing .dumpcapture ", name ,"\n") + end + end +end + +------------------------------------------------------------------------------ + +-- Sections names. +local map_sections = {} + +-- Pseudo-opcode to define code sections. +-- TODO: Data sections, BSS sections. Needs extra C code and API. +map_coreop[".section_*"] = function(params) + if not params then return "name..." end + if #map_sections > 0 then werror("duplicate section definition") end + wflush() + for sn,name in ipairs(params) do + local opname = "."..name.."_0" + if not match(name, "^[%a][%w_]*$") or + map_op[opname] or map_op["."..name.."_*"] then + werror("bad section name `"..name.."'") + end + map_sections[#map_sections+1] = name + wline(format("#define DASM_SECTION_%s\t%d", upper(name), sn-1)) + map_op[opname] = function(params) g_arch.section(sn-1) end + end + wline(format("#define DASM_MAXSECTION\t\t%d", #map_sections)) +end + +-- Dump all sections. +local function dumpsections(out, lvl) + out:write("Sections:\n") + for _,name in ipairs(map_sections) do + out:write(format(" %s\n", name)) + end + out:write("\n") +end + +------------------------------------------------------------------------------ + +-- Replacement for customized Lua, which lacks the package library. +local prefix = "" +if not require then + function require(name) + local fp = assert(io.open(prefix..name..".lua")) + local s = fp:read("*a") + assert(fp:close()) + return assert(loadstring(s, "@"..name..".lua"))() + end +end + +-- Load architecture-specific module. +local function loadarch(arch) + if not match(arch, "^[%w_]+$") then return "bad arch name" end + local ok, m_arch = pcall(require, "dasm_"..arch) + if not ok then return "cannot load module: "..m_arch end + g_arch = m_arch + wflush = m_arch.passcb(wline, werror, wfatal, wwarn) + m_arch.setup(arch, g_opt) + map_op, map_def = m_arch.mergemaps(map_coreop, map_def) +end + +-- Dump architecture description. +function opt_map.dumparch(args) + local name = optparam(args) + if not g_arch then + local err = loadarch(name) + if err then opterror(err) end + end + + local t = {} + for name in pairs(map_coreop) do t[#t+1] = name end + for name in pairs(map_op) do t[#t+1] = name end + sort(t) + + local out = stdout + local _arch = g_arch._info + out:write(format("%s version %s, released %s, %s\n", + _info.name, _info.version, _info.release, _info.url)) + g_arch.dumparch(out) + + local pseudo = true + out:write("Pseudo-Opcodes:\n") + for _,sname in ipairs(t) do + local name, nparam = match(sname, "^(.+)_([0-9%*])$") + if name then + if pseudo and sub(name, 1, 1) ~= "." then + out:write("\nOpcodes:\n") + pseudo = false + end + local f = map_op[sname] + local s + if nparam ~= "*" then nparam = nparam + 0 end + if nparam == 0 then + s = "" + elseif type(f) == "string" then + s = map_op[".template__"](nil, f, nparam) + else + s = f(nil, nparam) + end + if type(s) == "table" then + for _,s2 in ipairs(s) do + out:write(format(" %-12s %s\n", name, s2)) + end + else + out:write(format(" %-12s %s\n", name, s)) + end + end + end + out:write("\n") + exit(0) +end + +-- Pseudo-opcode to set the architecture. +-- Only initially available (map_op is replaced when called). +map_op[".arch_1"] = function(params) + if not params then return "name" end + local err = loadarch(params[1]) + if err then wfatal(err) end + wline(format("#if DASM_VERSION != %d", _info.vernum)) + wline('#error "Version mismatch between DynASM and included encoding engine"') + wline("#endif") +end + +-- Dummy .arch pseudo-opcode to improve the error report. +map_coreop[".arch_1"] = function(params) + if not params then return "name" end + wfatal("duplicate .arch statement") +end + +------------------------------------------------------------------------------ + +-- Dummy pseudo-opcode. Don't confuse '.nop' with 'nop'. +map_coreop[".nop_*"] = function(params) + if not params then return "[ignored...]" end +end + +-- Pseudo-opcodes to raise errors. +map_coreop[".error_1"] = function(params) + if not params then return "message" end + werror(params[1]) +end + +map_coreop[".fatal_1"] = function(params) + if not params then return "message" end + wfatal(params[1]) +end + +-- Dump all user defined elements. +local function dumpdef(out) + local lvl = g_opt.dumpdef + if lvl == 0 then return end + dumpsections(out, lvl) + dumpdefines(out, lvl) + if g_arch then g_arch.dumpdef(out, lvl) end + dumpmacros(out, lvl) + dumpcaptures(out, lvl) +end + +------------------------------------------------------------------------------ + +-- Helper for splitstmt. +local splitlvl + +local function splitstmt_one(c) + if c == "(" then + splitlvl = ")"..splitlvl + elseif c == "[" then + splitlvl = "]"..splitlvl + elseif c == "{" then + splitlvl = "}"..splitlvl + elseif c == ")" or c == "]" or c == "}" then + if sub(splitlvl, 1, 1) ~= c then werror("unbalanced (), [] or {}") end + splitlvl = sub(splitlvl, 2) + elseif splitlvl == "" then + return " \0 " + end + return c +end + +-- Split statement into (pseudo-)opcode and params. +local function splitstmt(stmt) + -- Convert label with trailing-colon into .label statement. + local label = match(stmt, "^%s*(.+):%s*$") + if label then return ".label", {label} end + + -- Split at commas and equal signs, but obey parentheses and brackets. + splitlvl = "" + stmt = gsub(stmt, "[,%(%)%[%]{}]", splitstmt_one) + if splitlvl ~= "" then werror("unbalanced () or []") end + + -- Split off opcode. + local op, other = match(stmt, "^%s*([^%s%z]+)%s*(.*)$") + if not op then werror("bad statement syntax") end + + -- Split parameters. + local params = {} + for p in gmatch(other, "%s*(%Z+)%z?") do + params[#params+1] = gsub(p, "%s+$", "") + end + if #params > 16 then werror("too many parameters") end + + params.op = op + return op, params +end + +-- Process a single statement. +dostmt = function(stmt) + -- Ignore empty statements. + if match(stmt, "^%s*$") then return end + + -- Capture macro defs before substitution. + if mac_capture then return mac_capture(stmt) end + stmt = definesubst(stmt) + + -- Emit C code without parsing the line. + if sub(stmt, 1, 1) == "|" then + local tail = sub(stmt, 2) + wflush() + if sub(tail, 1, 2) == "//" then wcomment(tail) else wline(tail, true) end + return + end + + -- Split into (pseudo-)opcode and params. + local op, params = splitstmt(stmt) + + -- Get opcode handler (matching # of parameters or generic handler). + local f = map_op[op.."_"..#params] or map_op[op.."_*"] + if not f then + if not g_arch then wfatal("first statement must be .arch") end + -- Improve error report. + for i=0,9 do + if map_op[op.."_"..i] then + werror("wrong number of parameters for `"..op.."'") + end + end + werror("unknown statement `"..op.."'") + end + + -- Call opcode handler or special handler for template strings. + if type(f) == "string" then + map_op[".template__"](params, f) + else + f(params) + end +end + +-- Process a single line. +local function doline(line) + if g_opt.flushline then wflush() end + + -- Assembler line? + local indent, aline = match(line, "^(%s*)%|(.*)$") + if not aline then + -- No, plain C code line, need to flush first. + wflush() + wsync() + wline(line, false) + return + end + + g_indent = indent -- Remember current line indentation. + + -- Emit C code (even from macros). Avoids echo and line parsing. + if sub(aline, 1, 1) == "|" then + if not mac_capture then + wsync() + elseif g_opt.comment then + wsync() + wcomment(aline) + end + dostmt(aline) + return + end + + -- Echo assembler line as a comment. + if g_opt.comment then + wsync() + wcomment(aline) + end + + -- Strip assembler comments. + aline = gsub(aline, "//.*$", "") + + -- Split line into statements at semicolons. + if match(aline, ";") then + for stmt in gmatch(aline, "[^;]+") do dostmt(stmt) end + else + dostmt(aline) + end +end + +------------------------------------------------------------------------------ + +-- Write DynASM header. +local function dasmhead(out) + out:write(format([[ +/* +** This file has been pre-processed with DynASM. +** %s +** DynASM version %s, DynASM %s version %s +** DO NOT EDIT! The original file is in "%s". +*/ + +]], _info.url, + _info.version, g_arch._info.arch, g_arch._info.version, + g_fname)) +end + +-- Read input file. +readfile = function(fin) + g_indent = "" + g_lineno = 0 + g_synclineno = -1 + + -- Process all lines. + for line in fin:lines() do + g_lineno = g_lineno + 1 + g_curline = line + local ok, err = pcall(doline, line) + if not ok and wprinterr(err, "\n") then return true end + end + wflush() + + -- Close input file. + assert(fin == stdin or fin:close()) +end + +-- Write output file. +local function writefile(outfile) + local fout + + -- Open output file. + if outfile == nil or outfile == "-" then + fout = stdout + else + fout = assert(io.open(outfile, "w")) + end + + -- Write all buffered lines + wdumplines(fout, g_wbuffer) + + -- Close output file. + assert(fout == stdout or fout:close()) + + -- Optionally dump definitions. + dumpdef(fout == stdout and stderr or stdout) +end + +-- Translate an input file to an output file. +local function translate(infile, outfile) + g_wbuffer = {} + g_indent = "" + g_lineno = 0 + g_synclineno = -1 + + -- Put header. + wline(dasmhead) + + -- Read input file. + local fin + if infile == "-" then + g_fname = "(stdin)" + fin = stdin + else + g_fname = infile + fin = assert(io.open(infile, "r")) + end + readfile(fin) + + -- Check for errors. + if not g_arch then + wprinterr(g_fname, ":*: error: missing .arch directive\n") + end + checkconds() + checkmacros() + checkcaptures() + + if g_errcount ~= 0 then + stderr:write(g_fname, ":*: info: ", g_errcount, " error", + (type(g_errcount) == "number" and g_errcount > 1) and "s" or "", + " in input file -- no output file generated.\n") + dumpdef(stderr) + exit(1) + end + + -- Write output file. + writefile(outfile) +end + +------------------------------------------------------------------------------ + +-- Print help text. +function opt_map.help() + stdout:write("DynASM -- ", _info.description, ".\n") + stdout:write("DynASM ", _info.version, " ", _info.release, " ", _info.url, "\n") + stdout:write[[ + +Usage: dynasm [OPTION]... INFILE.dasc|- + + -h, --help Display this help text. + -V, --version Display version and copyright information. + + -o, --outfile FILE Output file name (default is stdout). + -I, --include DIR Add directory to the include search path. + + -c, --ccomment Use /* */ comments for assembler lines. + -C, --cppcomment Use // comments for assembler lines (default). + -N, --nocomment Suppress assembler lines in output. + -M, --maccomment Show macro expansions as comments (default off). + + -L, --nolineno Suppress CPP line number information in output. + -F, --flushline Flush action list for every line. + + -D NAME[=SUBST] Define a substitution. + -U NAME Undefine a substitution. + + -P, --dumpdef Dump defines, macros, etc. Repeat for more output. + -A, --dumparch ARCH Load architecture ARCH and dump description. +]] + exit(0) +end + +-- Print version information. +function opt_map.version() + stdout:write(format("%s version %s, released %s\n%s\n\n%s", + _info.name, _info.version, _info.release, _info.url, _info.copyright)) + exit(0) +end + +-- Misc. options. +function opt_map.outfile(args) g_opt.outfile = optparam(args) end +function opt_map.include(args) insert(g_opt.include, 1, optparam(args)) end +function opt_map.ccomment() g_opt.comment = "/*|"; g_opt.endcomment = " */" end +function opt_map.cppcomment() g_opt.comment = "//|"; g_opt.endcomment = "" end +function opt_map.nocomment() g_opt.comment = false end +function opt_map.maccomment() g_opt.maccomment = true end +function opt_map.nolineno() g_opt.cpp = false end +function opt_map.flushline() g_opt.flushline = true end +function opt_map.dumpdef() g_opt.dumpdef = g_opt.dumpdef + 1 end + +------------------------------------------------------------------------------ + +-- Short aliases for long options. +local opt_alias = { + h = "help", ["?"] = "help", V = "version", + o = "outfile", I = "include", + c = "ccomment", C = "cppcomment", N = "nocomment", M = "maccomment", + L = "nolineno", F = "flushline", + P = "dumpdef", A = "dumparch", +} + +-- Parse single option. +local function parseopt(opt, args) + opt_current = #opt == 1 and "-"..opt or "--"..opt + local f = opt_map[opt] or opt_map[opt_alias[opt]] + if not f then + opterror("unrecognized option `", opt_current, "'. Try `--help'.\n") + end + f(args) +end + +-- Parse arguments. +local function parseargs(args) + -- Default options. + g_opt.comment = "//|" + g_opt.endcomment = "" + g_opt.cpp = true + g_opt.dumpdef = 0 + g_opt.include = { "" } + + -- Process all option arguments. + args.argn = 1 + repeat + local a = args[args.argn] + if not a then break end + local lopt, opt = match(a, "^%-(%-?)(.+)") + if not opt then break end + args.argn = args.argn + 1 + if lopt == "" then + -- Loop through short options. + for o in gmatch(opt, ".") do parseopt(o, args) end + else + -- Long option. + parseopt(opt, args) + end + until false + + -- Check for proper number of arguments. + local nargs = #args - args.argn + 1 + if nargs ~= 1 then + if nargs == 0 then + if g_opt.dumpdef > 0 then return dumpdef(stdout) end + end + opt_map.help() + end + + -- Translate a single input file to a single output file + -- TODO: Handle multiple files? + translate(args[args.argn], g_opt.outfile) +end + +------------------------------------------------------------------------------ + +-- Add the directory dynasm.lua resides in to the Lua module search path. +local arg = arg +if arg and arg[0] then + prefix = match(arg[0], "^(.*[/\\])") + if package and prefix then package.path = prefix.."?.lua;"..package.path end +end + +-- Start DynASM. +parseargs{...} + +------------------------------------------------------------------------------ + diff --git a/Build/source/libs/luajit/LuaJIT-src/etc/luajit.1 b/Build/source/libs/luajit/LuaJIT-src/etc/luajit.1 new file mode 100644 index 00000000000..223a0283755 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/etc/luajit.1 @@ -0,0 +1,88 @@ +.TH luajit 1 "" "" "LuaJIT documentation" +.SH NAME +luajit \- Just-In-Time Compiler for the Lua Language +\fB +.SH SYNOPSIS +.B luajit +[\fIoptions\fR]... [\fIscript\fR [\fIargs\fR]...] +.SH "WEB SITE" +.IR http://luajit.org +.SH DESCRIPTION +.PP +This is the command-line program to run Lua programs with \fBLuaJIT\fR. +.PP +\fBLuaJIT\fR is a just-in-time (JIT) compiler for the Lua language. +The virtual machine (VM) is based on a fast interpreter combined with +a trace compiler. It can significantly improve the performance of Lua programs. +.PP +\fBLuaJIT\fR is API\- and ABI-compatible with the VM of the standard +Lua\ 5.1 interpreter. When embedding the VM into an application, +the built library can be used as a drop-in replacement. +.SH OPTIONS +.TP +.BI "\-e " chunk +Run the given chunk of Lua code. +.TP +.BI "\-l " library +Load the named library, just like \fBrequire("\fR\fIlibrary\fR\fB")\fR. +.TP +.BI "\-b " ... +Save or list bytecode. Run without arguments to get help on options. +.TP +.BI "\-j " command +Perform LuaJIT control command (optional space after \fB\-j\fR). +.TP +.BI "\-O" [opt] +Control LuaJIT optimizations. +.TP +.B "\-i" +Run in interactive mode. +.TP +.B "\-v" +Show \fBLuaJIT\fR version. +.TP +.B "\-E" +Ignore environment variables. +.TP +.B "\-\-" +Stop processing options. +.TP +.B "\-" +Read script from stdin instead. +.PP +After all options are processed, the given \fIscript\fR is run. +The arguments are passed in the global \fIarg\fR table. +.PP +Interactive mode is only entered, if no \fIscript\fR and no \fB\-e\fR +option is given. Interactive mode can be left with EOF (\fICtrl\-Z\fB). +.SH EXAMPLES +.TP +luajit hello.lua world + +Prints "Hello world", assuming \fIhello.lua\fR contains: +.br + print("Hello", arg[1]) +.TP +luajit \-e "local x=0; for i=1,1e9 do x=x+i end; print(x)" + +Calculates the sum of the numbers from 1 to 1000000000. +.br +And finishes in a reasonable amount of time, too. +.TP +luajit \-jv \-e "for i=1,10 do for j=1,10 do for k=1,100 do end end end" + +Runs some nested loops and shows the resulting traces. +.SH COPYRIGHT +.PP +\fBLuaJIT\fR is Copyright \(co 2005-2015 Mike Pall. +.br +\fBLuaJIT\fR is open source software, released under the MIT license. +.SH SEE ALSO +.PP +More details in the provided HTML docs or at: +.IR http://luajit.org +.br +More about the Lua language can be found at: +.IR http://lua.org/docs.html +.PP +lua(1) diff --git a/Build/source/libs/luajit/LuaJIT-src/etc/luajit.pc b/Build/source/libs/luajit/LuaJIT-src/etc/luajit.pc new file mode 100644 index 00000000000..b9ccdf59ea0 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/etc/luajit.pc @@ -0,0 +1,25 @@ +# Package information for LuaJIT to be used by pkg-config. +majver=2 +minver=1 +relver=0 +version=${majver}.${minver}.${relver}-beta1 +abiver=5.1 + +prefix=/usr/local +multilib=lib +exec_prefix=${prefix} +libdir=${exec_prefix}/${multilib} +libname=luajit-${abiver} +includedir=${prefix}/include/luajit-${majver}.${minver} + +INSTALL_LMOD=${prefix}/share/lua/${abiver} +INSTALL_CMOD=${prefix}/${multilib}/lua/${abiver} + +Name: LuaJIT +Description: Just-in-time compiler for Lua +URL: http://luajit.org +Version: ${version} +Requires: +Libs: -L${libdir} -l${libname} +Libs.private: -Wl,-E -lm -ldl +Cflags: -I${includedir} diff --git a/Build/source/libs/luajit/LuaJIT-src/src/Makefile b/Build/source/libs/luajit/LuaJIT-src/src/Makefile new file mode 100644 index 00000000000..7f86e98ec03 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/Makefile @@ -0,0 +1,697 @@ +############################################################################## +# LuaJIT Makefile. Requires GNU Make. +# +# Please read doc/install.html before changing any variables! +# +# Suitable for POSIX platforms (Linux, *BSD, OSX etc.). +# Also works with MinGW and Cygwin on Windows. +# Please check msvcbuild.bat for building with MSVC on Windows. +# +# Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +############################################################################## + +MAJVER= 2 +MINVER= 1 +RELVER= 0 +ABIVER= 5.1 +NODOTABIVER= 51 + +############################################################################## +############################# COMPILER OPTIONS ############################# +############################################################################## +# These options mainly affect the speed of the JIT compiler itself, not the +# speed of the JIT-compiled code. Turn any of the optional settings on by +# removing the '#' in front of them. Make sure you force a full recompile +# with "make clean", followed by "make" if you change any options. +# +# LuaJIT builds as a native 32 or 64 bit binary by default. +CC= gcc +# +# Use this if you want to force a 32 bit build on a 64 bit multilib OS. +#CC= gcc -m32 +# +# Since the assembler part does NOT maintain a frame pointer, it's pointless +# to slow down the C part by not omitting it. Debugging, tracebacks and +# unwinding are not affected -- the assembler part has frame unwind +# information and GCC emits it where needed (x64) or with -g (see CCDEBUG). +CCOPT= -O2 -fomit-frame-pointer +# Use this if you want to generate a smaller binary (but it's slower): +#CCOPT= -Os -fomit-frame-pointer +# Note: it's no longer recommended to use -O3 with GCC 4.x. +# The I-Cache bloat usually outweighs the benefits from aggressive inlining. +# +# Target-specific compiler options: +# +# x86/x64 only: For GCC 4.2 or higher and if you don't intend to distribute +# the binaries to a different machine you could also use: -march=native +# +CCOPT_x86= -march=i686 -msse -msse2 -mfpmath=sse +CCOPT_x64= +CCOPT_arm= +CCOPT_arm64= +CCOPT_ppc= +CCOPT_mips= +# +CCDEBUG= +# Uncomment the next line to generate debug information: +#CCDEBUG= -g +# +CCWARN= -Wall +# Uncomment the next line to enable more warnings: +#CCWARN+= -Wextra -Wdeclaration-after-statement -Wredundant-decls -Wshadow -Wpointer-arith +# +############################################################################## + +############################################################################## +################################ BUILD MODE ################################ +############################################################################## +# The default build mode is mixed mode on POSIX. On Windows this is the same +# as dynamic mode. +# +# Mixed mode creates a static + dynamic library and a statically linked luajit. +BUILDMODE= mixed +# +# Static mode creates a static library and a statically linked luajit. +#BUILDMODE= static +# +# Dynamic mode creates a dynamic library and a dynamically linked luajit. +# Note: this executable will only run when the library is installed! +#BUILDMODE= dynamic +# +############################################################################## + +############################################################################## +################################# FEATURES ################################# +############################################################################## +# Enable/disable these features as needed, but make sure you force a full +# recompile with "make clean", followed by "make". +XCFLAGS= +# +# Permanently disable the FFI extension to reduce the size of the LuaJIT +# executable. But please consider that the FFI library is compiled-in, +# but NOT loaded by default. It only allocates any memory, if you actually +# make use of it. +#XCFLAGS+= -DLUAJIT_DISABLE_FFI +# +# Features from Lua 5.2 that are unlikely to break existing code are +# enabled by default. Some other features that *might* break some existing +# code (e.g. __pairs or os.execute() return values) can be enabled here. +# Note: this does not provide full compatibility with Lua 5.2 at this time. +XCFLAGS+= -DLUAJIT_ENABLE_LUA52COMPAT +# +# Disable the JIT compiler, i.e. turn LuaJIT into a pure interpreter. +#XCFLAGS+= -DLUAJIT_DISABLE_JIT +# +# Some architectures (e.g. PPC) can use either single-number (1) or +# dual-number (2) mode. Uncomment one of these lines to override the +# default mode. Please see LJ_ARCH_NUMMODE in lj_arch.h for details. +#XCFLAGS+= -DLUAJIT_NUMMODE=1 +#XCFLAGS+= -DLUAJIT_NUMMODE=2 +# +############################################################################## + +############################################################################## +############################ DEBUGGING SUPPORT ############################# +############################################################################## +# Enable these options as needed, but make sure you force a full recompile +# with "make clean", followed by "make". +# Note that most of these are NOT suitable for benchmarking or release mode! +# +# Use the system provided memory allocator (realloc) instead of the +# bundled memory allocator. This is slower, but sometimes helpful for +# debugging. This option cannot be enabled on x64, since realloc usually +# doesn't return addresses in the right address range. +# OTOH this option is mandatory for Valgrind's memcheck tool on x64 and +# the only way to get useful results from it for all other architectures. +#XCFLAGS+= -DLUAJIT_USE_SYSMALLOC +# +# This define is required to run LuaJIT under Valgrind. The Valgrind +# header files must be installed. You should enable debug information, too. +# Use --suppressions=lj.supp to avoid some false positives. +#XCFLAGS+= -DLUAJIT_USE_VALGRIND +# +# This is the client for the GDB JIT API. GDB 7.0 or higher is required +# to make use of it. See lj_gdbjit.c for details. Enabling this causes +# a non-negligible overhead, even when not running under GDB. +#XCFLAGS+= -DLUAJIT_USE_GDBJIT +# +# Turn on assertions for the Lua/C API to debug problems with lua_* calls. +# This is rather slow -- use only while developing C libraries/embeddings. +#XCFLAGS+= -DLUA_USE_APICHECK +# +# Turn on assertions for the whole LuaJIT VM. This significantly slows down +# everything. Use only if you suspect a problem with LuaJIT itself. +#XCFLAGS+= -DLUA_USE_ASSERT +# +############################################################################## +# You probably don't need to change anything below this line! +############################################################################## + +############################################################################## +# Flags and options for host and target. +############################################################################## + +# You can override the following variables at the make command line: +# CC HOST_CC STATIC_CC DYNAMIC_CC +# CFLAGS HOST_CFLAGS TARGET_CFLAGS +# LDFLAGS HOST_LDFLAGS TARGET_LDFLAGS TARGET_SHLDFLAGS +# LIBS HOST_LIBS TARGET_LIBS +# CROSS HOST_SYS TARGET_SYS TARGET_FLAGS +# +# Cross-compilation examples: +# make HOST_CC="gcc -m32" CROSS=i586-mingw32msvc- TARGET_SYS=Windows +# make HOST_CC="gcc -m32" CROSS=powerpc-linux-gnu- + +ASOPTIONS= $(CCOPT) $(CCWARN) $(XCFLAGS) $(CFLAGS) +CCOPTIONS= $(CCDEBUG) $(ASOPTIONS) +LDOPTIONS= $(CCDEBUG) $(LDFLAGS) + +HOST_CC= $(CC) +HOST_RM= rm -f +# If left blank, minilua is built and used. You can supply an installed +# copy of (plain) Lua 5.1 or 5.2, plus Lua BitOp. E.g. with: HOST_LUA=lua +HOST_LUA= + +HOST_XCFLAGS= -I. +HOST_XLDFLAGS= +HOST_XLIBS= +HOST_ACFLAGS= $(CCOPTIONS) $(HOST_XCFLAGS) $(TARGET_ARCH) $(HOST_CFLAGS) +HOST_ALDFLAGS= $(LDOPTIONS) $(HOST_XLDFLAGS) $(HOST_LDFLAGS) +HOST_ALIBS= $(HOST_XLIBS) $(LIBS) $(HOST_LIBS) + +STATIC_CC = $(CROSS)$(CC) +DYNAMIC_CC = $(CROSS)$(CC) -fPIC +TARGET_CC= $(STATIC_CC) +TARGET_STCC= $(STATIC_CC) +TARGET_DYNCC= $(DYNAMIC_CC) +TARGET_LD= $(CROSS)$(CC) +TARGET_AR= $(CROSS)ar rcus +TARGET_STRIP= $(CROSS)strip + +TARGET_LIBPATH= $(or $(PREFIX),/usr/local)/$(or $(MULTILIB),lib) +TARGET_SONAME= libluajit-$(ABIVER).so.$(MAJVER) +TARGET_DYLIBNAME= libluajit-$(ABIVER).$(MAJVER).dylib +TARGET_DYLIBPATH= $(TARGET_LIBPATH)/$(TARGET_DYLIBNAME) +TARGET_DLLNAME= lua$(NODOTABIVER).dll +TARGET_XSHLDFLAGS= -shared -fPIC -Wl,-soname,$(TARGET_SONAME) +TARGET_DYNXLDOPTS= + +TARGET_LFSFLAGS= -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE +TARGET_XCFLAGS= $(TARGET_LFSFLAGS) -U_FORTIFY_SOURCE +TARGET_XLDFLAGS= +TARGET_XLIBS= -lm +TARGET_TCFLAGS= $(CCOPTIONS) $(TARGET_XCFLAGS) $(TARGET_FLAGS) $(TARGET_CFLAGS) +TARGET_ACFLAGS= $(CCOPTIONS) $(TARGET_XCFLAGS) $(TARGET_FLAGS) $(TARGET_CFLAGS) +TARGET_ASFLAGS= $(ASOPTIONS) $(TARGET_XCFLAGS) $(TARGET_FLAGS) $(TARGET_CFLAGS) +TARGET_ALDFLAGS= $(LDOPTIONS) $(TARGET_XLDFLAGS) $(TARGET_FLAGS) $(TARGET_LDFLAGS) +TARGET_ASHLDFLAGS= $(LDOPTIONS) $(TARGET_XSHLDFLAGS) $(TARGET_FLAGS) $(TARGET_SHLDFLAGS) +TARGET_ALIBS= $(TARGET_XLIBS) $(LIBS) $(TARGET_LIBS) + +TARGET_TESTARCH=$(shell $(TARGET_CC) $(TARGET_TCFLAGS) -E lj_arch.h -dM) +ifneq (,$(findstring LJ_TARGET_X64 ,$(TARGET_TESTARCH))) + TARGET_LJARCH= x64 +else +ifneq (,$(findstring LJ_TARGET_X86 ,$(TARGET_TESTARCH))) + TARGET_LJARCH= x86 +else +ifneq (,$(findstring LJ_TARGET_ARM ,$(TARGET_TESTARCH))) + TARGET_LJARCH= arm +else +ifneq (,$(findstring LJ_TARGET_ARM64 ,$(TARGET_TESTARCH))) + TARGET_LJARCH= arm64 +else +ifneq (,$(findstring LJ_TARGET_PPC ,$(TARGET_TESTARCH))) + ifneq (,$(findstring LJ_LE 1,$(TARGET_TESTARCH))) + TARGET_ARCH= -DLJ_ARCH_ENDIAN=LUAJIT_LE + else + TARGET_ARCH= -DLJ_ARCH_ENDIAN=LUAJIT_BE + endif + TARGET_LJARCH= ppc +else +ifneq (,$(findstring LJ_TARGET_MIPS ,$(TARGET_TESTARCH))) + ifneq (,$(findstring MIPSEL ,$(TARGET_TESTARCH))) + TARGET_ARCH= -D__MIPSEL__=1 + endif + TARGET_LJARCH= mips +else + $(error Unsupported target architecture) +endif +endif +endif +endif +endif +endif + +ifneq (,$(findstring LJ_TARGET_PS3 1,$(TARGET_TESTARCH))) + TARGET_SYS= PS3 + TARGET_ARCH+= -D__CELLOS_LV2__ + TARGET_XCFLAGS+= -DLUAJIT_USE_SYSMALLOC + TARGET_XLIBS+= -lpthread +endif + +TARGET_XCFLAGS+= $(CCOPT_$(TARGET_LJARCH)) +TARGET_ARCH+= $(patsubst %,-DLUAJIT_TARGET=LUAJIT_ARCH_%,$(TARGET_LJARCH)) + +ifneq (,$(PREFIX)) +ifneq (/usr/local,$(PREFIX)) + TARGET_XCFLAGS+= -DLUA_ROOT=\"$(PREFIX)\" + ifneq (/usr,$(PREFIX)) + TARGET_DYNXLDOPTS= -Wl,-rpath,$(TARGET_LIBPATH) + endif +endif +endif +ifneq (,$(MULTILIB)) + TARGET_XCFLAGS+= -DLUA_MULTILIB=\"$(MULTILIB)\" +endif +ifneq (,$(LMULTILIB)) + TARGET_XCFLAGS+= -DLUA_LMULTILIB=\"$(LMULTILIB)\" +endif + +############################################################################## +# System detection. +############################################################################## + +ifeq (Windows,$(findstring Windows,$(OS))$(MSYSTEM)$(TERM)) + HOST_SYS= Windows + HOST_RM= del +else + HOST_SYS:= $(shell uname -s) + ifneq (,$(findstring MINGW,$(HOST_SYS))) + HOST_SYS= Windows + HOST_MSYS= mingw + endif + ifneq (,$(findstring CYGWIN,$(HOST_SYS))) + HOST_SYS= Windows + HOST_MSYS= cygwin + endif +endif + +TARGET_SYS?= $(HOST_SYS) +ifeq (Windows,$(TARGET_SYS)) + TARGET_STRIP+= --strip-unneeded + TARGET_XSHLDFLAGS= -shared + TARGET_DYNXLDOPTS= +else +ifeq (,$(shell $(TARGET_CC) -o /dev/null -c -x c /dev/null -fno-stack-protector 2>/dev/null || echo 1)) + TARGET_XCFLAGS+= -fno-stack-protector +endif +ifeq (Darwin,$(TARGET_SYS)) + ifeq (,$(MACOSX_DEPLOYMENT_TARGET)) + export MACOSX_DEPLOYMENT_TARGET=10.4 + endif + TARGET_STRIP+= -x + TARGET_AR+= 2>/dev/null + TARGET_XSHLDFLAGS= -dynamiclib -single_module -undefined dynamic_lookup -fPIC + TARGET_DYNXLDOPTS= + TARGET_XSHLDFLAGS+= -install_name $(TARGET_DYLIBPATH) -compatibility_version $(MAJVER).$(MINVER) -current_version $(MAJVER).$(MINVER).$(RELVER) + ifeq (x64,$(TARGET_LJARCH)) + TARGET_XLDFLAGS+= -pagezero_size 10000 -image_base 100000000 + TARGET_XSHLDFLAGS+= -image_base 7fff04c4a000 + endif +else +ifeq (iOS,$(TARGET_SYS)) + TARGET_STRIP+= -x + TARGET_AR+= 2>/dev/null + TARGET_XSHLDFLAGS= -dynamiclib -single_module -undefined dynamic_lookup -fPIC + TARGET_DYNXLDOPTS= + TARGET_XSHLDFLAGS+= -install_name $(TARGET_DYLIBPATH) -compatibility_version $(MAJVER).$(MINVER) -current_version $(MAJVER).$(MINVER).$(RELVER) + ifeq (arm64,$(TARGET_LJARCH)) + TARGET_XCFLAGS+= -fno-omit-frame-pointer + endif +else + ifneq (SunOS,$(TARGET_SYS)) + ifneq (PS3,$(TARGET_SYS)) + TARGET_XLDFLAGS+= -Wl,-E + endif + endif + ifeq (Linux,$(TARGET_SYS)) + TARGET_XLIBS+= -ldl + endif + ifeq (GNU/kFreeBSD,$(TARGET_SYS)) + TARGET_XLIBS+= -ldl + endif +endif +endif +endif + +ifneq ($(HOST_SYS),$(TARGET_SYS)) + ifeq (Windows,$(TARGET_SYS)) + HOST_XCFLAGS+= -malign-double -DLUAJIT_OS=LUAJIT_OS_WINDOWS + else + ifeq (Linux,$(TARGET_SYS)) + HOST_XCFLAGS+= -DLUAJIT_OS=LUAJIT_OS_LINUX + else + ifeq (Darwin,$(TARGET_SYS)) + HOST_XCFLAGS+= -DLUAJIT_OS=LUAJIT_OS_OSX + else + ifeq (iOS,$(TARGET_SYS)) + HOST_XCFLAGS+= -DLUAJIT_OS=LUAJIT_OS_OSX + else + HOST_XCFLAGS+= -DLUAJIT_OS=LUAJIT_OS_OTHER + endif + endif + endif + endif +endif + +ifneq (,$(CCDEBUG)) + TARGET_STRIP= @: +endif + +############################################################################## +# Files and pathnames. +############################################################################## + +MINILUA_O= host/minilua.o +MINILUA_LIBS= -lm +MINILUA_T= host/minilua +MINILUA_X= $(MINILUA_T) + +ifeq (,$(HOST_LUA)) + HOST_LUA= $(MINILUA_X) + DASM_DEP= $(MINILUA_T) +endif + +DASM_DIR= ../dynasm +DASM= $(HOST_LUA) $(DASM_DIR)/dynasm.lua +DASM_XFLAGS= +DASM_AFLAGS= +DASM_ARCH= $(TARGET_LJARCH) + +ifneq (,$(findstring LJ_ARCH_BITS 64,$(TARGET_TESTARCH))) + DASM_AFLAGS+= -D P64 +endif +ifneq (,$(findstring LJ_HASJIT 1,$(TARGET_TESTARCH))) + DASM_AFLAGS+= -D JIT +endif +ifneq (,$(findstring LJ_HASFFI 1,$(TARGET_TESTARCH))) + DASM_AFLAGS+= -D FFI +endif +ifneq (,$(findstring LJ_DUALNUM 1,$(TARGET_TESTARCH))) + DASM_AFLAGS+= -D DUALNUM +endif +ifneq (,$(findstring LJ_ARCH_HASFPU 1,$(TARGET_TESTARCH))) + DASM_AFLAGS+= -D FPU + TARGET_ARCH+= -DLJ_ARCH_HASFPU=1 +else + TARGET_ARCH+= -DLJ_ARCH_HASFPU=0 +endif +ifeq (,$(findstring LJ_ABI_SOFTFP 1,$(TARGET_TESTARCH))) + DASM_AFLAGS+= -D HFABI + TARGET_ARCH+= -DLJ_ABI_SOFTFP=0 +else + TARGET_ARCH+= -DLJ_ABI_SOFTFP=1 +endif +ifneq (,$(findstring LJ_NO_UNWIND 1,$(TARGET_TESTARCH))) + DASM_AFLAGS+= -D NO_UNWIND + TARGET_ARCH+= -DLUAJIT_NO_UNWIND +endif +DASM_AFLAGS+= -D VER=$(subst LJ_ARCH_VERSION_,,$(filter LJ_ARCH_VERSION_%,$(subst LJ_ARCH_VERSION ,LJ_ARCH_VERSION_,$(TARGET_TESTARCH)))) +ifeq (Windows,$(TARGET_SYS)) + DASM_AFLAGS+= -D WIN +endif +ifeq (x64,$(TARGET_LJARCH)) + ifeq (,$(findstring LJ_FR2 1,$(TARGET_TESTARCH))) + DASM_ARCH= x86 + endif +else +ifeq (arm,$(TARGET_LJARCH)) + ifeq (iOS,$(TARGET_SYS)) + DASM_AFLAGS+= -D IOS + endif +else +ifeq (ppc,$(TARGET_LJARCH)) + ifneq (,$(findstring LJ_ARCH_SQRT 1,$(TARGET_TESTARCH))) + DASM_AFLAGS+= -D SQRT + endif + ifneq (,$(findstring LJ_ARCH_ROUND 1,$(TARGET_TESTARCH))) + DASM_AFLAGS+= -D ROUND + endif + ifneq (,$(findstring LJ_ARCH_PPC32ON64 1,$(TARGET_TESTARCH))) + DASM_AFLAGS+= -D GPR64 + endif + ifeq (PS3,$(TARGET_SYS)) + DASM_AFLAGS+= -D PPE -D TOC + endif + ifneq (,$(findstring LJ_ARCH_PPC64 ,$(TARGET_TESTARCH))) + DASM_ARCH= ppc64 + endif +endif +endif +endif + +DASM_FLAGS= $(DASM_XFLAGS) $(DASM_AFLAGS) +DASM_DASC= vm_$(DASM_ARCH).dasc + +BUILDVM_O= host/buildvm.o host/buildvm_asm.o host/buildvm_peobj.o \ + host/buildvm_lib.o host/buildvm_fold.o +BUILDVM_T= host/buildvm +BUILDVM_X= $(BUILDVM_T) + +HOST_O= $(MINILUA_O) $(BUILDVM_O) +HOST_T= $(MINILUA_T) $(BUILDVM_T) + +LJVM_S= lj_vm.S +LJVM_O= lj_vm.o +LJVM_BOUT= $(LJVM_S) +LJVM_MODE= elfasm + +LJLIB_O= lib_base.o lib_math.o lbitlib.o lib_bit.o lib_string.o lib_table.o \ + lib_io.o lib_os.o lib_package.o lib_debug.o lib_jit.o lib_ffi.o +LJLIB_C= $(LJLIB_O:.o=.c) + +LJCORE_O= lj_gc.o lj_err.o lj_char.o lj_bc.o lj_obj.o lj_buf.o \ + lj_str.o lj_tab.o lj_func.o lj_udata.o lj_meta.o lj_debug.o \ + lj_state.o lj_dispatch.o lj_vmevent.o lj_vmmath.o lj_strscan.o \ + lj_strfmt.o lj_api.o lj_profile.o \ + lj_lex.o lj_parse.o lj_bcread.o lj_bcwrite.o lj_load.o \ + lj_ir.o lj_opt_mem.o lj_opt_fold.o lj_opt_narrow.o \ + lj_opt_dce.o lj_opt_loop.o lj_opt_split.o lj_opt_sink.o \ + lj_mcode.o lj_snap.o lj_record.o lj_crecord.o lj_ffrecord.o \ + lj_asm.o lj_trace.o lj_gdbjit.o \ + lj_ctype.o lj_cdata.o lj_cconv.o lj_ccall.o lj_ccallback.o \ + lj_carith.o lj_clib.o lj_cparse.o \ + lj_lib.o lj_alloc.o lib_aux.o \ + $(LJLIB_O) lib_init.o + +LJVMCORE_O= $(LJVM_O) $(LJCORE_O) +LJVMCORE_DYNO= $(LJVMCORE_O:.o=_dyn.o) + +LIB_VMDEF= jit/vmdef.lua +LIB_VMDEFP= $(LIB_VMDEF) + +LUAJIT_O= luajit.o +LUAJIT_A= libluajit.a +LUAJIT_SO= libluajit.so +LUAJIT_T= luajit + +ALL_T= $(LUAJIT_T) $(LUAJIT_A) $(LUAJIT_SO) $(HOST_T) +ALL_HDRGEN= lj_bcdef.h lj_ffdef.h lj_libdef.h lj_recdef.h lj_folddef.h \ + host/buildvm_arch.h +ALL_GEN= $(LJVM_S) $(ALL_HDRGEN) $(LIB_VMDEFP) +WIN_RM= *.obj *.lib *.exp *.dll *.exe *.manifest *.pdb *.ilk +ALL_RM= $(ALL_T) $(ALL_GEN) *.o host/*.o $(WIN_RM) + +############################################################################## +# Build mode handling. +############################################################################## + +# Mixed mode defaults. +TARGET_O= $(LUAJIT_A) +TARGET_T= $(LUAJIT_T) $(LUAJIT_SO) +TARGET_DEP= $(LIB_VMDEF) $(LUAJIT_SO) + +ifeq (Windows,$(TARGET_SYS)) + TARGET_DYNCC= $(STATIC_CC) + LJVM_MODE= peobj + LJVM_BOUT= $(LJVM_O) + LUAJIT_T= luajit.exe + ifeq (cygwin,$(HOST_MSYS)) + LUAJIT_SO= cyg$(TARGET_DLLNAME) + else + LUAJIT_SO= $(TARGET_DLLNAME) + endif + # Mixed mode is not supported on Windows. And static mode doesn't work well. + # C modules cannot be loaded, because they bind to lua51.dll. + ifneq (static,$(BUILDMODE)) + BUILDMODE= dynamic + TARGET_XCFLAGS+= -DLUA_BUILD_AS_DLL + endif +endif +ifeq (Darwin,$(TARGET_SYS)) + LJVM_MODE= machasm +endif +ifeq (iOS,$(TARGET_SYS)) + LJVM_MODE= machasm +endif +ifeq (SunOS,$(TARGET_SYS)) + BUILDMODE= static +endif +ifeq (PS3,$(TARGET_SYS)) + BUILDMODE= static +endif + +ifeq (Windows,$(HOST_SYS)) + MINILUA_T= host/minilua.exe + BUILDVM_T= host/buildvm.exe + ifeq (,$(HOST_MSYS)) + MINILUA_X= host\minilua + BUILDVM_X= host\buildvm + ALL_RM:= $(subst /,\,$(ALL_RM)) + endif +endif + +ifeq (static,$(BUILDMODE)) + TARGET_DYNCC= @: + TARGET_T= $(LUAJIT_T) + TARGET_DEP= $(LIB_VMDEF) +else +ifeq (dynamic,$(BUILDMODE)) + ifneq (Windows,$(TARGET_SYS)) + TARGET_CC= $(DYNAMIC_CC) + endif + TARGET_DYNCC= @: + LJVMCORE_DYNO= $(LJVMCORE_O) + TARGET_O= $(LUAJIT_SO) + TARGET_XLDFLAGS+= $(TARGET_DYNXLDOPTS) +else +ifeq (Darwin,$(TARGET_SYS)) + TARGET_DYNCC= @: + LJVMCORE_DYNO= $(LJVMCORE_O) +endif +ifeq (iOS,$(TARGET_SYS)) + TARGET_DYNCC= @: + LJVMCORE_DYNO= $(LJVMCORE_O) +endif +endif +endif + +Q= @ +E= @echo +#Q= +#E= @: + +############################################################################## +# Make targets. +############################################################################## + +default all: $(TARGET_T) + +amalg: + @grep "^[+|]" ljamalg.c + $(MAKE) all "LJCORE_O=ljamalg.o" + +clean: + $(HOST_RM) $(ALL_RM) + +libbc: + ./$(LUAJIT_T) host/genlibbc.lua -o host/buildvm_libbc.h $(LJLIB_C) + $(MAKE) all + +depend: + @for file in $(ALL_HDRGEN); do \ + test -f $$file || touch $$file; \ + done + @$(HOST_CC) $(HOST_ACFLAGS) -MM *.c host/*.c | \ + sed -e "s| [^ ]*/dasm_\S*\.h||g" \ + -e "s|^\([^l ]\)|host/\1|" \ + -e "s| lj_target_\S*\.h| lj_target_*.h|g" \ + -e "s| lj_emit_\S*\.h| lj_emit_*.h|g" \ + -e "s| lj_asm_\S*\.h| lj_asm_*.h|g" >Makefile.dep + @for file in $(ALL_HDRGEN); do \ + test -s $$file || $(HOST_RM) $$file; \ + done + +.PHONY: default all amalg clean libbc depend + +############################################################################## +# Rules for generated files. +############################################################################## + +$(MINILUA_T): $(MINILUA_O) + $(E) "HOSTLINK $@" + $(Q)$(HOST_CC) $(HOST_ALDFLAGS) -o $@ $(MINILUA_O) $(MINILUA_LIBS) $(HOST_ALIBS) + +host/buildvm_arch.h: $(DASM_DASC) $(DASM_DEP) + $(E) "DYNASM $@" + $(Q)$(DASM) $(DASM_FLAGS) -o $@ $(DASM_DASC) + +host/buildvm.o: $(DASM_DIR)/dasm_*.h + +$(BUILDVM_T): $(BUILDVM_O) + $(E) "HOSTLINK $@" + $(Q)$(HOST_CC) $(HOST_ALDFLAGS) -o $@ $(BUILDVM_O) $(HOST_ALIBS) + +$(LJVM_BOUT): $(BUILDVM_T) + $(E) "BUILDVM $@" + $(Q)$(BUILDVM_X) -m $(LJVM_MODE) -o $@ + +lj_bcdef.h: $(BUILDVM_T) $(LJLIB_C) + $(E) "BUILDVM $@" + $(Q)$(BUILDVM_X) -m bcdef -o $@ $(LJLIB_C) + +lj_ffdef.h: $(BUILDVM_T) $(LJLIB_C) + $(E) "BUILDVM $@" + $(Q)$(BUILDVM_X) -m ffdef -o $@ $(LJLIB_C) + +lj_libdef.h: $(BUILDVM_T) $(LJLIB_C) + $(E) "BUILDVM $@" + $(Q)$(BUILDVM_X) -m libdef -o $@ $(LJLIB_C) + +lj_recdef.h: $(BUILDVM_T) $(LJLIB_C) + $(E) "BUILDVM $@" + $(Q)$(BUILDVM_X) -m recdef -o $@ $(LJLIB_C) + +$(LIB_VMDEF): $(BUILDVM_T) $(LJLIB_C) + $(E) "BUILDVM $@" + $(Q)$(BUILDVM_X) -m vmdef -o $(LIB_VMDEFP) $(LJLIB_C) + +lj_folddef.h: $(BUILDVM_T) lj_opt_fold.c + $(E) "BUILDVM $@" + $(Q)$(BUILDVM_X) -m folddef -o $@ lj_opt_fold.c + +############################################################################## +# Object file rules. +############################################################################## + +%.o: %.c + $(E) "CC $@" + $(Q)$(TARGET_DYNCC) $(TARGET_ACFLAGS) -c -o $(@:.o=_dyn.o) $< + $(Q)$(TARGET_CC) $(TARGET_ACFLAGS) -c -o $@ $< + +%.o: %.S + $(E) "ASM $@" + $(Q)$(TARGET_DYNCC) $(TARGET_ASFLAGS) -c -o $(@:.o=_dyn.o) $< + $(Q)$(TARGET_CC) $(TARGET_ASFLAGS) -c -o $@ $< + +$(LUAJIT_O): + $(E) "CC $@" + $(Q)$(TARGET_STCC) $(TARGET_ACFLAGS) -c -o $@ $< + +$(HOST_O): %.o: %.c + $(E) "HOSTCC $@" + $(Q)$(HOST_CC) $(HOST_ACFLAGS) -c -o $@ $< + +include Makefile.dep + +############################################################################## +# Target file rules. +############################################################################## + +$(LUAJIT_A): $(LJVMCORE_O) + $(E) "AR $@" + $(Q)$(TARGET_AR) $@ $(LJVMCORE_O) + +# The dependency on _O, but linking with _DYNO is intentional. +$(LUAJIT_SO): $(LJVMCORE_O) + $(E) "DYNLINK $@" + $(Q)$(TARGET_LD) $(TARGET_ASHLDFLAGS) -o $@ $(LJVMCORE_DYNO) $(TARGET_ALIBS) + $(Q)$(TARGET_STRIP) $@ + +$(LUAJIT_T): $(TARGET_O) $(LUAJIT_O) $(TARGET_DEP) + $(E) "LINK $@" + $(Q)$(TARGET_LD) $(TARGET_ALDFLAGS) -o $@ $(LUAJIT_O) $(TARGET_O) $(TARGET_ALIBS) + $(Q)$(TARGET_STRIP) $@ + $(E) "OK Successfully built LuaJIT" + +############################################################################## diff --git a/Build/source/libs/luajit/LuaJIT-src/src/Makefile.dep b/Build/source/libs/luajit/LuaJIT-src/src/Makefile.dep new file mode 100644 index 00000000000..34cb53be2ed --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/Makefile.dep @@ -0,0 +1,245 @@ +lib_aux.o: lib_aux.c lua.h luaconf.h lauxlib.h lj_obj.h lj_def.h \ + lj_arch.h lj_err.h lj_errmsg.h lj_state.h lj_trace.h lj_jit.h lj_ir.h \ + lj_dispatch.h lj_bc.h lj_traceerr.h lj_lib.h lj_alloc.h +lib_base.o: lib_base.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \ + lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h \ + lj_tab.h lj_meta.h lj_state.h lj_ctype.h lj_cconv.h lj_bc.h lj_ff.h \ + lj_ffdef.h lj_dispatch.h lj_jit.h lj_ir.h lj_char.h lj_strscan.h \ + lj_strfmt.h lj_lib.h lj_libdef.h +lbitlib.o: lbitlib.c lua.h luaconf.h lauxlib.h lualib.h +lib_bit.o: lib_bit.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \ + lj_arch.h lj_err.h lj_errmsg.h lj_buf.h lj_gc.h lj_str.h lj_strscan.h \ + lj_strfmt.h lj_ctype.h lj_cdata.h lj_cconv.h lj_carith.h lj_ff.h \ + lj_ffdef.h lj_lib.h lj_libdef.h +lib_debug.o: lib_debug.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \ + lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_lib.h \ + lj_libdef.h +lib_ffi.o: lib_ffi.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \ + lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_meta.h \ + lj_ctype.h lj_cparse.h lj_cdata.h lj_cconv.h lj_carith.h lj_ccall.h \ + lj_ccallback.h lj_clib.h lj_strfmt.h lj_ff.h lj_ffdef.h lj_lib.h \ + lj_libdef.h +lib_init.o: lib_init.c lua.h luaconf.h lauxlib.h lualib.h lj_arch.h +lib_io.o: lib_io.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \ + lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_state.h \ + lj_strfmt.h lj_ff.h lj_ffdef.h lj_lib.h lj_libdef.h +lib_jit.o: lib_jit.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \ + lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h \ + lj_state.h lj_bc.h lj_ctype.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h \ + lj_target.h lj_target_*.h lj_trace.h lj_dispatch.h lj_traceerr.h \ + lj_vm.h lj_vmevent.h lj_lib.h luajit.h lj_libdef.h +lib_math.o: lib_math.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \ + lj_def.h lj_arch.h lj_lib.h lj_vm.h lj_libdef.h +lib_os.o: lib_os.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \ + lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_lib.h \ + lj_libdef.h +lib_package.o: lib_package.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \ + lj_def.h lj_arch.h lj_err.h lj_errmsg.h lj_lib.h +lib_string.o: lib_string.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \ + lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h \ + lj_tab.h lj_meta.h lj_state.h lj_ff.h lj_ffdef.h lj_bcdump.h lj_lex.h \ + lj_char.h lj_strfmt.h lj_lib.h lj_libdef.h +lib_table.o: lib_table.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \ + lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h \ + lj_tab.h lj_ff.h lj_ffdef.h lj_lib.h lj_libdef.h +lj_alloc.o: lj_alloc.c lj_def.h lua.h luaconf.h lj_arch.h lj_alloc.h +lj_api.o: lj_api.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ + lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h lj_func.h lj_udata.h \ + lj_meta.h lj_state.h lj_bc.h lj_frame.h lj_trace.h lj_jit.h lj_ir.h \ + lj_dispatch.h lj_traceerr.h lj_vm.h lj_strscan.h lj_strfmt.h +lj_asm.o: lj_asm.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ + lj_str.h lj_tab.h lj_frame.h lj_bc.h lj_ctype.h lj_ir.h lj_jit.h \ + lj_ircall.h lj_iropt.h lj_mcode.h lj_trace.h lj_dispatch.h lj_traceerr.h \ + lj_snap.h lj_asm.h lj_vm.h lj_target.h lj_target_*.h lj_emit_*.h \ + lj_asm_*.h +lj_bc.o: lj_bc.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_bc.h \ + lj_bcdef.h +lj_bcread.o: lj_bcread.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_bc.h \ + lj_ctype.h lj_cdata.h lualib.h lj_lex.h lj_bcdump.h lj_state.h \ + lj_strfmt.h +lj_bcwrite.o: lj_bcwrite.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_gc.h lj_buf.h lj_str.h lj_bc.h lj_ctype.h lj_dispatch.h lj_jit.h \ + lj_ir.h lj_strfmt.h lj_bcdump.h lj_lex.h lj_err.h lj_errmsg.h lj_vm.h +lj_buf.o: lj_buf.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ + lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_strfmt.h +lj_carith.o: lj_carith.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_meta.h lj_ir.h lj_ctype.h \ + lj_cconv.h lj_cdata.h lj_carith.h lj_strscan.h +lj_ccall.o: lj_ccall.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_ctype.h lj_cconv.h lj_cdata.h \ + lj_ccall.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_bc.h \ + lj_traceerr.h +lj_ccallback.o: lj_ccallback.c lj_obj.h lua.h luaconf.h lj_def.h \ + lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_state.h lj_frame.h \ + lj_bc.h lj_ctype.h lj_cconv.h lj_ccall.h lj_ccallback.h lj_target.h \ + lj_target_*.h lj_mcode.h lj_jit.h lj_ir.h lj_trace.h lj_dispatch.h \ + lj_traceerr.h lj_vm.h +lj_cconv.o: lj_cconv.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_err.h lj_errmsg.h lj_tab.h lj_ctype.h lj_gc.h lj_cdata.h lj_cconv.h \ + lj_ccallback.h +lj_cdata.o: lj_cdata.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_ctype.h lj_cconv.h lj_cdata.h +lj_char.o: lj_char.c lj_char.h lj_def.h lua.h luaconf.h +lj_clib.o: lj_clib.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ + lj_err.h lj_errmsg.h lj_tab.h lj_str.h lj_udata.h lj_ctype.h lj_cconv.h \ + lj_cdata.h lj_clib.h lj_strfmt.h +lj_cparse.o: lj_cparse.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_ctype.h lj_cparse.h \ + lj_frame.h lj_bc.h lj_vm.h lj_char.h lj_strscan.h lj_strfmt.h +lj_crecord.o: lj_crecord.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_err.h lj_errmsg.h lj_tab.h lj_frame.h lj_bc.h lj_ctype.h lj_gc.h \ + lj_cdata.h lj_cparse.h lj_cconv.h lj_carith.h lj_clib.h lj_ccall.h \ + lj_ff.h lj_ffdef.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h lj_trace.h \ + lj_dispatch.h lj_traceerr.h lj_record.h lj_ffrecord.h lj_snap.h \ + lj_crecord.h lj_strfmt.h +lj_ctype.o: lj_ctype.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_strfmt.h lj_ctype.h \ + lj_ccallback.h +lj_debug.o: lj_debug.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_err.h lj_errmsg.h lj_debug.h lj_buf.h lj_gc.h lj_str.h lj_tab.h \ + lj_state.h lj_frame.h lj_bc.h lj_strfmt.h lj_jit.h lj_ir.h +lj_dispatch.o: lj_dispatch.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_err.h lj_errmsg.h lj_buf.h lj_gc.h lj_str.h lj_func.h lj_tab.h \ + lj_meta.h lj_debug.h lj_state.h lj_frame.h lj_bc.h lj_ff.h lj_ffdef.h \ + lj_strfmt.h lj_jit.h lj_ir.h lj_ccallback.h lj_ctype.h lj_trace.h \ + lj_dispatch.h lj_traceerr.h lj_profile.h lj_vm.h luajit.h +lj_err.o: lj_err.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_err.h \ + lj_errmsg.h lj_debug.h lj_str.h lj_func.h lj_state.h lj_frame.h lj_bc.h \ + lj_ff.h lj_ffdef.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h \ + lj_traceerr.h lj_vm.h lj_strfmt.h +lj_ffrecord.o: lj_ffrecord.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_frame.h lj_bc.h lj_ff.h \ + lj_ffdef.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h lj_trace.h \ + lj_dispatch.h lj_traceerr.h lj_record.h lj_ffrecord.h lj_crecord.h \ + lj_vm.h lj_strscan.h lj_strfmt.h lj_recdef.h +lj_func.o: lj_func.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ + lj_func.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_bc.h \ + lj_traceerr.h lj_vm.h +lj_gc.o: lj_gc.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ + lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_func.h lj_udata.h \ + lj_meta.h lj_state.h lj_frame.h lj_bc.h lj_ctype.h lj_cdata.h lj_trace.h \ + lj_jit.h lj_ir.h lj_dispatch.h lj_traceerr.h lj_vm.h +lj_gdbjit.o: lj_gdbjit.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_frame.h lj_bc.h lj_buf.h \ + lj_str.h lj_strfmt.h lj_jit.h lj_ir.h lj_dispatch.h +lj_ir.o: lj_ir.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ + lj_buf.h lj_str.h lj_tab.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h \ + lj_trace.h lj_dispatch.h lj_bc.h lj_traceerr.h lj_ctype.h lj_cdata.h \ + lj_carith.h lj_vm.h lj_strscan.h lj_strfmt.h lj_lib.h +lj_lex.o: lj_lex.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ + lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_ctype.h lj_cdata.h \ + lualib.h lj_state.h lj_lex.h lj_parse.h lj_char.h lj_strscan.h \ + lj_strfmt.h +lj_lib.o: lj_lib.c lauxlib.h lua.h luaconf.h lj_obj.h lj_def.h lj_arch.h \ + lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_func.h lj_bc.h \ + lj_dispatch.h lj_jit.h lj_ir.h lj_vm.h lj_strscan.h lj_strfmt.h lj_lex.h \ + lj_bcdump.h lj_lib.h +lj_load.o: lj_load.c lua.h luaconf.h lauxlib.h lj_obj.h lj_def.h \ + lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_func.h \ + lj_frame.h lj_bc.h lj_vm.h lj_lex.h lj_bcdump.h lj_parse.h +lj_mcode.o: lj_mcode.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_gc.h lj_err.h lj_errmsg.h lj_jit.h lj_ir.h lj_mcode.h lj_trace.h \ + lj_dispatch.h lj_bc.h lj_traceerr.h lj_vm.h +lj_meta.o: lj_meta.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ + lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_meta.h lj_frame.h \ + lj_bc.h lj_vm.h lj_strscan.h lj_strfmt.h lj_lib.h +lj_obj.o: lj_obj.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h +lj_opt_dce.o: lj_opt_dce.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_ir.h lj_jit.h lj_iropt.h +lj_opt_fold.o: lj_opt_fold.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_buf.h lj_gc.h lj_str.h lj_tab.h lj_ir.h lj_jit.h lj_ircall.h \ + lj_iropt.h lj_trace.h lj_dispatch.h lj_bc.h lj_traceerr.h lj_ctype.h \ + lj_carith.h lj_vm.h lj_strscan.h lj_strfmt.h lj_folddef.h +lj_opt_loop.o: lj_opt_loop.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_err.h lj_errmsg.h lj_buf.h lj_gc.h lj_str.h lj_ir.h lj_jit.h \ + lj_iropt.h lj_trace.h lj_dispatch.h lj_bc.h lj_traceerr.h lj_snap.h \ + lj_vm.h +lj_opt_mem.o: lj_opt_mem.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_tab.h lj_ir.h lj_jit.h lj_iropt.h lj_ircall.h +lj_opt_narrow.o: lj_opt_narrow.c lj_obj.h lua.h luaconf.h lj_def.h \ + lj_arch.h lj_bc.h lj_ir.h lj_jit.h lj_iropt.h lj_trace.h lj_dispatch.h \ + lj_traceerr.h lj_vm.h lj_strscan.h +lj_opt_sink.o: lj_opt_sink.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_ir.h lj_jit.h lj_iropt.h lj_target.h lj_target_*.h +lj_opt_split.o: lj_opt_split.c lj_obj.h lua.h luaconf.h lj_def.h \ + lj_arch.h lj_err.h lj_errmsg.h lj_buf.h lj_gc.h lj_str.h lj_ir.h \ + lj_jit.h lj_ircall.h lj_iropt.h lj_vm.h +lj_parse.o: lj_parse.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_buf.h lj_str.h lj_tab.h \ + lj_func.h lj_state.h lj_bc.h lj_ctype.h lj_strfmt.h lj_lex.h lj_parse.h \ + lj_vm.h lj_vmevent.h +lj_profile.o: lj_profile.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_buf.h lj_gc.h lj_str.h lj_frame.h lj_bc.h lj_debug.h lj_dispatch.h \ + lj_jit.h lj_ir.h lj_trace.h lj_traceerr.h lj_profile.h luajit.h +lj_record.o: lj_record.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_meta.h lj_frame.h lj_bc.h \ + lj_ctype.h lj_gc.h lj_ff.h lj_ffdef.h lj_debug.h lj_ir.h lj_jit.h \ + lj_ircall.h lj_iropt.h lj_trace.h lj_dispatch.h lj_traceerr.h \ + lj_record.h lj_ffrecord.h lj_snap.h lj_vm.h +lj_snap.o: lj_snap.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ + lj_tab.h lj_state.h lj_frame.h lj_bc.h lj_ir.h lj_jit.h lj_iropt.h \ + lj_trace.h lj_dispatch.h lj_traceerr.h lj_snap.h lj_target.h \ + lj_target_*.h lj_ctype.h lj_cdata.h +lj_state.o: lj_state.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_func.h \ + lj_meta.h lj_state.h lj_frame.h lj_bc.h lj_ctype.h lj_trace.h lj_jit.h \ + lj_ir.h lj_dispatch.h lj_traceerr.h lj_vm.h lj_lex.h lj_alloc.h luajit.h +lj_str.o: lj_str.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ + lj_err.h lj_errmsg.h lj_str.h lj_char.h +lj_strfmt.o: lj_strfmt.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_buf.h lj_gc.h lj_str.h lj_state.h lj_char.h lj_strfmt.h +lj_strscan.o: lj_strscan.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_char.h lj_strscan.h +lj_tab.o: lj_tab.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ + lj_err.h lj_errmsg.h lj_tab.h +lj_trace.o: lj_trace.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_frame.h lj_bc.h \ + lj_state.h lj_ir.h lj_jit.h lj_iropt.h lj_mcode.h lj_trace.h \ + lj_dispatch.h lj_traceerr.h lj_snap.h lj_gdbjit.h lj_record.h lj_asm.h \ + lj_vm.h lj_vmevent.h lj_target.h lj_target_*.h +lj_udata.o: lj_udata.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_gc.h lj_udata.h +lj_vmevent.o: lj_vmevent.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_str.h lj_tab.h lj_state.h lj_dispatch.h lj_bc.h lj_jit.h lj_ir.h \ + lj_vm.h lj_vmevent.h +lj_vmmath.o: lj_vmmath.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ + lj_ir.h lj_vm.h +ljamalg.o: ljamalg.c lua.h luaconf.h lauxlib.h lj_gc.c lj_obj.h lj_def.h \ + lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h \ + lj_func.h lj_udata.h lj_meta.h lj_state.h lj_frame.h lj_bc.h lj_ctype.h \ + lj_cdata.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_traceerr.h \ + lj_vm.h lj_err.c lj_debug.h lj_ff.h lj_ffdef.h lj_strfmt.h lj_char.c \ + lj_char.h lj_bc.c lj_bcdef.h lj_obj.c lj_buf.c lj_str.c lj_tab.c \ + lj_func.c lj_udata.c lj_meta.c lj_strscan.h lj_lib.h lj_debug.c \ + lj_state.c lj_lex.h lj_alloc.h luajit.h lj_dispatch.c lj_ccallback.h \ + lj_profile.h lj_vmevent.c lj_vmevent.h lj_vmmath.c lj_strscan.c \ + lj_strfmt.c lj_api.c lj_profile.c lj_lex.c lualib.h lj_parse.h \ + lj_parse.c lj_bcread.c lj_bcdump.h lj_bcwrite.c lj_load.c lj_ctype.c \ + lj_cdata.c lj_cconv.h lj_cconv.c lj_ccall.c lj_ccall.h lj_ccallback.c \ + lj_target.h lj_target_*.h lj_mcode.h lj_carith.c lj_carith.h lj_clib.c \ + lj_clib.h lj_cparse.c lj_cparse.h lj_lib.c lj_ir.c lj_ircall.h \ + lj_iropt.h lj_opt_mem.c lj_opt_fold.c lj_folddef.h lj_opt_narrow.c \ + lj_opt_dce.c lj_opt_loop.c lj_snap.h lj_opt_split.c lj_opt_sink.c \ + lj_mcode.c lj_snap.c lj_record.c lj_record.h lj_ffrecord.h lj_crecord.c \ + lj_crecord.h lj_ffrecord.c lj_recdef.h lj_asm.c lj_asm.h lj_emit_*.h \ + lj_asm_*.h lj_trace.c lj_gdbjit.h lj_gdbjit.c lj_alloc.c lib_aux.c \ + lib_base.c lj_libdef.h lib_math.c lib_string.c lib_table.c lib_io.c \ + lib_os.c lib_package.c lib_debug.c lib_bit.c lib_jit.c lib_ffi.c \ + lib_init.c +luajit.o: luajit.c lua.h luaconf.h lauxlib.h lualib.h luajit.h lj_arch.h +host/buildvm.o: host/buildvm.c host/buildvm.h lj_def.h lua.h luaconf.h \ + lj_arch.h lj_obj.h lj_def.h lj_arch.h lj_gc.h lj_obj.h lj_bc.h lj_ir.h \ + lj_ircall.h lj_ir.h lj_jit.h lj_frame.h lj_bc.h lj_dispatch.h lj_ctype.h \ + lj_gc.h lj_ccall.h lj_ctype.h luajit.h \ + host/buildvm_arch.h lj_traceerr.h +host/buildvm_asm.o: host/buildvm_asm.c host/buildvm.h lj_def.h lua.h luaconf.h \ + lj_arch.h lj_bc.h lj_def.h lj_arch.h +host/buildvm_fold.o: host/buildvm_fold.c host/buildvm.h lj_def.h lua.h \ + luaconf.h lj_arch.h lj_obj.h lj_def.h lj_arch.h lj_ir.h lj_obj.h +host/buildvm_lib.o: host/buildvm_lib.c host/buildvm.h lj_def.h lua.h luaconf.h \ + lj_arch.h lj_obj.h lj_def.h lj_arch.h lj_bc.h lj_lib.h lj_obj.h \ + host/buildvm_libbc.h +host/buildvm_peobj.o: host/buildvm_peobj.c host/buildvm.h lj_def.h lua.h \ + luaconf.h lj_arch.h lj_bc.h lj_def.h lj_arch.h +host/minilua.o: host/minilua.c diff --git a/Build/source/libs/luajit/LuaJIT-src/src/host/README b/Build/source/libs/luajit/LuaJIT-src/src/host/README new file mode 100644 index 00000000000..78f51a57051 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/host/README @@ -0,0 +1,4 @@ +The files in this directory are only used during the build process of LuaJIT. +For cross-compilation, they must be executed on the host, not on the target. + +These files should NOT be installed! diff --git a/Build/source/libs/luajit/LuaJIT-src/src/host/buildvm.c b/Build/source/libs/luajit/LuaJIT-src/src/host/buildvm.c new file mode 100644 index 00000000000..8584e614d09 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/host/buildvm.c @@ -0,0 +1,518 @@ +/* +** LuaJIT VM builder. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +** +** This is a tool to build the hand-tuned assembler code required for +** LuaJIT's bytecode interpreter. It supports a variety of output formats +** to feed different toolchains (see usage() below). +** +** This tool is not particularly optimized because it's only used while +** _building_ LuaJIT. There's no point in distributing or installing it. +** Only the object code generated by this tool is linked into LuaJIT. +** +** Caveat: some memory is not free'd, error handling is lazy. +** It's a one-shot tool -- any effort fixing this would be wasted. +*/ + +#include "buildvm.h" +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_bc.h" +#include "lj_ir.h" +#include "lj_ircall.h" +#include "lj_frame.h" +#include "lj_dispatch.h" +#if LJ_HASFFI +#include "lj_ctype.h" +#include "lj_ccall.h" +#endif +#include "luajit.h" + +#if defined(_WIN32) +#include +#include +#endif + +/* ------------------------------------------------------------------------ */ + +/* DynASM glue definitions. */ +#define Dst ctx +#define Dst_DECL BuildCtx *ctx +#define Dst_REF (ctx->D) +#define DASM_CHECKS 1 + +#include "../dynasm/dasm_proto.h" + +/* Glue macros for DynASM. */ +static int collect_reloc(BuildCtx *ctx, uint8_t *addr, int idx, int type); + +#define DASM_EXTERN(ctx, addr, idx, type) \ + collect_reloc(ctx, addr, idx, type) + +/* ------------------------------------------------------------------------ */ + +/* Avoid trouble if cross-compiling for an x86 target. Speed doesn't matter. */ +#define DASM_ALIGNED_WRITES 1 + +/* Embed architecture-specific DynASM encoder. */ +#if LJ_TARGET_X86ORX64 +#include "../dynasm/dasm_x86.h" +#elif LJ_TARGET_ARM +#include "../dynasm/dasm_arm.h" +#elif LJ_TARGET_ARM64 +#include "../dynasm/dasm_arm64.h" +#elif LJ_TARGET_PPC +#include "../dynasm/dasm_ppc.h" +#elif LJ_TARGET_MIPS +#include "../dynasm/dasm_mips.h" +#else +#error "No support for this architecture (yet)" +#endif + +/* Embed generated architecture-specific backend. */ +#include "buildvm_arch.h" + +/* ------------------------------------------------------------------------ */ + +void owrite(BuildCtx *ctx, const void *ptr, size_t sz) +{ + if (fwrite(ptr, 1, sz, ctx->fp) != sz) { + fprintf(stderr, "Error: cannot write to output file: %s\n", + strerror(errno)); + exit(1); + } +} + +/* ------------------------------------------------------------------------ */ + +/* Emit code as raw bytes. Only used for DynASM debugging. */ +static void emit_raw(BuildCtx *ctx) +{ + owrite(ctx, ctx->code, ctx->codesz); +} + +/* -- Build machine code -------------------------------------------------- */ + +static const char *sym_decorate(BuildCtx *ctx, + const char *prefix, const char *suffix) +{ + char name[256]; + char *p; +#if LJ_64 + const char *symprefix = ctx->mode == BUILD_machasm ? "_" : ""; +#elif LJ_TARGET_XBOX360 + const char *symprefix = ""; +#else + const char *symprefix = ctx->mode != BUILD_elfasm ? "_" : ""; +#endif + sprintf(name, "%s%s%s", symprefix, prefix, suffix); + p = strchr(name, '@'); + if (p) { +#if LJ_TARGET_X86ORX64 + if (!LJ_64 && (ctx->mode == BUILD_coffasm || ctx->mode == BUILD_peobj)) + name[0] = '@'; + else + *p = '\0'; +#elif LJ_TARGET_PPC && !LJ_TARGET_OSX && !LJ_TARGET_CONSOLE + /* Keep @plt etc. */ +#else + *p = '\0'; +#endif + } + p = (char *)malloc(strlen(name)+1); /* MSVC doesn't like strdup. */ + strcpy(p, name); + return p; +} + +#define NRELOCSYM (sizeof(extnames)/sizeof(extnames[0])-1) + +static int relocmap[NRELOCSYM]; + +/* Collect external relocations. */ +static int collect_reloc(BuildCtx *ctx, uint8_t *addr, int idx, int type) +{ + if (ctx->nreloc >= BUILD_MAX_RELOC) { + fprintf(stderr, "Error: too many relocations, increase BUILD_MAX_RELOC.\n"); + exit(1); + } + if (relocmap[idx] < 0) { + relocmap[idx] = ctx->nrelocsym; + ctx->relocsym[ctx->nrelocsym] = sym_decorate(ctx, "", extnames[idx]); + ctx->nrelocsym++; + } + ctx->reloc[ctx->nreloc].ofs = (int32_t)(addr - ctx->code); + ctx->reloc[ctx->nreloc].sym = relocmap[idx]; + ctx->reloc[ctx->nreloc].type = type; + ctx->nreloc++; +#if LJ_TARGET_XBOX360 + return (int)(ctx->code - addr) + 4; /* Encode symbol offset of .text. */ +#else + return 0; /* Encode symbol offset of 0. */ +#endif +} + +/* Naive insertion sort. Performance doesn't matter here. */ +static void sym_insert(BuildCtx *ctx, int32_t ofs, + const char *prefix, const char *suffix) +{ + ptrdiff_t i = ctx->nsym++; + while (i > 0) { + if (ctx->sym[i-1].ofs <= ofs) + break; + ctx->sym[i] = ctx->sym[i-1]; + i--; + } + ctx->sym[i].ofs = ofs; + ctx->sym[i].name = sym_decorate(ctx, prefix, suffix); +} + +/* Build the machine code. */ +static int build_code(BuildCtx *ctx) +{ + int status; + int i; + + /* Initialize DynASM structures. */ + ctx->nglob = GLOB__MAX; + ctx->glob = (void **)malloc(ctx->nglob*sizeof(void *)); + memset(ctx->glob, 0, ctx->nglob*sizeof(void *)); + ctx->nreloc = 0; + + ctx->globnames = globnames; + ctx->extnames = extnames; + ctx->relocsym = (const char **)malloc(NRELOCSYM*sizeof(const char *)); + ctx->nrelocsym = 0; + for (i = 0; i < (int)NRELOCSYM; i++) relocmap[i] = -1; + + ctx->dasm_ident = DASM_IDENT; + ctx->dasm_arch = DASM_ARCH; + + dasm_init(Dst, DASM_MAXSECTION); + dasm_setupglobal(Dst, ctx->glob, ctx->nglob); + dasm_setup(Dst, build_actionlist); + + /* Call arch-specific backend to emit the code. */ + ctx->npc = build_backend(ctx); + + /* Finalize the code. */ + (void)dasm_checkstep(Dst, -1); + if ((status = dasm_link(Dst, &ctx->codesz))) return status; + ctx->code = (uint8_t *)malloc(ctx->codesz); + if ((status = dasm_encode(Dst, (void *)ctx->code))) return status; + + /* Allocate symbol table and bytecode offsets. */ + ctx->beginsym = sym_decorate(ctx, "", LABEL_PREFIX "vm_asm_begin"); + ctx->sym = (BuildSym *)malloc((ctx->npc+ctx->nglob+1)*sizeof(BuildSym)); + ctx->nsym = 0; + ctx->bc_ofs = (int32_t *)malloc(ctx->npc*sizeof(int32_t)); + + /* Collect the opcodes (PC labels). */ + for (i = 0; i < ctx->npc; i++) { + int32_t ofs = dasm_getpclabel(Dst, i); + if (ofs < 0) return 0x22000000|i; + ctx->bc_ofs[i] = ofs; + if ((LJ_HASJIT || + !(i == BC_JFORI || i == BC_JFORL || i == BC_JITERL || i == BC_JLOOP || + i == BC_IFORL || i == BC_IITERL || i == BC_ILOOP)) && + (LJ_HASFFI || i != BC_KCDATA)) + sym_insert(ctx, ofs, LABEL_PREFIX_BC, bc_names[i]); + } + + /* Collect the globals (named labels). */ + for (i = 0; i < ctx->nglob; i++) { + const char *gl = globnames[i]; + int len = (int)strlen(gl); + if (!ctx->glob[i]) { + fprintf(stderr, "Error: undefined global %s\n", gl); + exit(2); + } + /* Skip the _Z symbols. */ + if (!(len >= 2 && gl[len-2] == '_' && gl[len-1] == 'Z')) + sym_insert(ctx, (int32_t)((uint8_t *)(ctx->glob[i]) - ctx->code), + LABEL_PREFIX, globnames[i]); + } + + /* Close the address range. */ + sym_insert(ctx, (int32_t)ctx->codesz, "", ""); + ctx->nsym--; + + dasm_free(Dst); + + return 0; +} + +/* -- Generate VM enums --------------------------------------------------- */ + +const char *const bc_names[] = { +#define BCNAME(name, ma, mb, mc, mt) #name, +BCDEF(BCNAME) +#undef BCNAME + NULL +}; + +const char *const ir_names[] = { +#define IRNAME(name, m, m1, m2) #name, +IRDEF(IRNAME) +#undef IRNAME + NULL +}; + +const char *const irt_names[] = { +#define IRTNAME(name, size) #name, +IRTDEF(IRTNAME) +#undef IRTNAME + NULL +}; + +const char *const irfpm_names[] = { +#define FPMNAME(name) #name, +IRFPMDEF(FPMNAME) +#undef FPMNAME + NULL +}; + +const char *const irfield_names[] = { +#define FLNAME(name, ofs) #name, +IRFLDEF(FLNAME) +#undef FLNAME + NULL +}; + +const char *const ircall_names[] = { +#define IRCALLNAME(cond, name, nargs, kind, type, flags) #name, +IRCALLDEF(IRCALLNAME) +#undef IRCALLNAME + NULL +}; + +static const char *const trace_errors[] = { +#define TREDEF(name, msg) msg, +#include "lj_traceerr.h" + NULL +}; + +static const char *lower(char *buf, const char *s) +{ + char *p = buf; + while (*s) { + *p++ = (*s >= 'A' && *s <= 'Z') ? *s+0x20 : *s; + s++; + } + *p = '\0'; + return buf; +} + +/* Emit C source code for bytecode-related definitions. */ +static void emit_bcdef(BuildCtx *ctx) +{ + int i; + fprintf(ctx->fp, "/* This is a generated file. DO NOT EDIT! */\n\n"); + fprintf(ctx->fp, "LJ_DATADEF const uint16_t lj_bc_ofs[] = {\n"); + for (i = 0; i < ctx->npc; i++) { + if (i != 0) + fprintf(ctx->fp, ",\n"); + fprintf(ctx->fp, "%d", ctx->bc_ofs[i]); + } +} + +/* Emit VM definitions as Lua code for debug modules. */ +static void emit_vmdef(BuildCtx *ctx) +{ + char buf[80]; + int i; + fprintf(ctx->fp, "-- This is a generated file. DO NOT EDIT!\n\n"); + fprintf(ctx->fp, "return {\n\n"); + + fprintf(ctx->fp, "bcnames = \""); + for (i = 0; bc_names[i]; i++) fprintf(ctx->fp, "%-6s", bc_names[i]); + fprintf(ctx->fp, "\",\n\n"); + + fprintf(ctx->fp, "irnames = \""); + for (i = 0; ir_names[i]; i++) fprintf(ctx->fp, "%-6s", ir_names[i]); + fprintf(ctx->fp, "\",\n\n"); + + fprintf(ctx->fp, "irfpm = { [0]="); + for (i = 0; irfpm_names[i]; i++) + fprintf(ctx->fp, "\"%s\", ", lower(buf, irfpm_names[i])); + fprintf(ctx->fp, "},\n\n"); + + fprintf(ctx->fp, "irfield = { [0]="); + for (i = 0; irfield_names[i]; i++) { + char *p; + lower(buf, irfield_names[i]); + p = strchr(buf, '_'); + if (p) *p = '.'; + fprintf(ctx->fp, "\"%s\", ", buf); + } + fprintf(ctx->fp, "},\n\n"); + + fprintf(ctx->fp, "ircall = {\n[0]="); + for (i = 0; ircall_names[i]; i++) + fprintf(ctx->fp, "\"%s\",\n", ircall_names[i]); + fprintf(ctx->fp, "},\n\n"); + + fprintf(ctx->fp, "traceerr = {\n[0]="); + for (i = 0; trace_errors[i]; i++) + fprintf(ctx->fp, "\"%s\",\n", trace_errors[i]); + fprintf(ctx->fp, "},\n\n"); +} + +/* -- Argument parsing ---------------------------------------------------- */ + +/* Build mode names. */ +static const char *const modenames[] = { +#define BUILDNAME(name) #name, +BUILDDEF(BUILDNAME) +#undef BUILDNAME + NULL +}; + +/* Print usage information and exit. */ +static void usage(void) +{ + int i; + fprintf(stderr, LUAJIT_VERSION " VM builder.\n"); + fprintf(stderr, LUAJIT_COPYRIGHT ", " LUAJIT_URL "\n"); + fprintf(stderr, "Target architecture: " LJ_ARCH_NAME "\n\n"); + fprintf(stderr, "Usage: buildvm -m mode [-o outfile] [infiles...]\n\n"); + fprintf(stderr, "Available modes:\n"); + for (i = 0; i < BUILD__MAX; i++) + fprintf(stderr, " %s\n", modenames[i]); + exit(1); +} + +/* Parse the output mode name. */ +static BuildMode parsemode(const char *mode) +{ + int i; + for (i = 0; modenames[i]; i++) + if (!strcmp(mode, modenames[i])) + return (BuildMode)i; + usage(); + return (BuildMode)-1; +} + +/* Parse arguments. */ +static void parseargs(BuildCtx *ctx, char **argv) +{ + const char *a; + int i; + ctx->mode = (BuildMode)-1; + ctx->outname = "-"; + for (i = 1; (a = argv[i]) != NULL; i++) { + if (a[0] != '-') + break; + switch (a[1]) { + case '-': + if (a[2]) goto err; + i++; + goto ok; + case '\0': + goto ok; + case 'm': + i++; + if (a[2] || argv[i] == NULL) goto err; + ctx->mode = parsemode(argv[i]); + break; + case 'o': + i++; + if (a[2] || argv[i] == NULL) goto err; + ctx->outname = argv[i]; + break; + default: err: + usage(); + break; + } + } +ok: + ctx->args = argv+i; + if (ctx->mode == (BuildMode)-1) goto err; +} + +int main(int argc, char **argv) +{ + BuildCtx ctx_; + BuildCtx *ctx = &ctx_; + int status, binmode; + + if (sizeof(void *) != 4*LJ_32+8*LJ_64) { + fprintf(stderr,"Error: pointer size mismatch in cross-build.\n"); + fprintf(stderr,"Try: make HOST_CC=\"gcc -m32\" CROSS=...\n\n"); + return 1; + } + + UNUSED(argc); + parseargs(ctx, argv); + + if ((status = build_code(ctx))) { + fprintf(stderr,"Error: DASM error %08x\n", status); + return 1; + } + + switch (ctx->mode) { + case BUILD_peobj: + case BUILD_raw: + binmode = 1; + break; + default: + binmode = 0; + break; + } + + if (ctx->outname[0] == '-' && ctx->outname[1] == '\0') { + ctx->fp = stdout; +#if defined(_WIN32) + if (binmode) + _setmode(_fileno(stdout), _O_BINARY); /* Yuck. */ +#endif + } else if (!(ctx->fp = fopen(ctx->outname, binmode ? "wb" : "w"))) { + fprintf(stderr, "Error: cannot open output file '%s': %s\n", + ctx->outname, strerror(errno)); + exit(1); + } + + switch (ctx->mode) { + case BUILD_elfasm: + case BUILD_coffasm: + case BUILD_machasm: + emit_asm(ctx); + emit_asm_debug(ctx); + break; + case BUILD_peobj: + emit_peobj(ctx); + break; + case BUILD_raw: + emit_raw(ctx); + break; + case BUILD_bcdef: + emit_bcdef(ctx); + emit_lib(ctx); + break; + case BUILD_vmdef: + emit_vmdef(ctx); + emit_lib(ctx); + fprintf(ctx->fp, "}\n\n"); + break; + case BUILD_ffdef: + case BUILD_libdef: + case BUILD_recdef: + emit_lib(ctx); + break; + case BUILD_folddef: + emit_fold(ctx); + break; + default: + break; + } + + fflush(ctx->fp); + if (ferror(ctx->fp)) { + fprintf(stderr, "Error: cannot write to output file: %s\n", + strerror(errno)); + exit(1); + } + fclose(ctx->fp); + + return 0; +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/host/buildvm.h b/Build/source/libs/luajit/LuaJIT-src/src/host/buildvm.h new file mode 100644 index 00000000000..5588555307e --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/host/buildvm.h @@ -0,0 +1,105 @@ +/* +** LuaJIT VM builder. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _BUILDVM_H +#define _BUILDVM_H + +#include +#include +#include +#include +#include + +#include "lj_def.h" +#include "lj_arch.h" + +/* Hardcoded limits. Increase as needed. */ +#define BUILD_MAX_RELOC 200 /* Max. number of relocations. */ +#define BUILD_MAX_FOLD 4096 /* Max. number of fold rules. */ + +/* Prefix for scanned library definitions. */ +#define LIBDEF_PREFIX "LJLIB_" + +/* Prefix for scanned fold definitions. */ +#define FOLDDEF_PREFIX "LJFOLD" + +/* Prefixes for generated labels. */ +#define LABEL_PREFIX "lj_" +#define LABEL_PREFIX_BC LABEL_PREFIX "BC_" +#define LABEL_PREFIX_FF LABEL_PREFIX "ff_" +#define LABEL_PREFIX_CF LABEL_PREFIX "cf_" +#define LABEL_PREFIX_FFH LABEL_PREFIX "ffh_" +#define LABEL_PREFIX_LIBCF LABEL_PREFIX "lib_cf_" +#define LABEL_PREFIX_LIBINIT LABEL_PREFIX "lib_init_" + +/* Forward declaration. */ +struct dasm_State; + +/* Build modes. */ +#define BUILDDEF(_) \ + _(elfasm) _(coffasm) _(machasm) _(peobj) _(raw) \ + _(bcdef) _(ffdef) _(libdef) _(recdef) _(vmdef) \ + _(folddef) + +typedef enum { +#define BUILDENUM(name) BUILD_##name, +BUILDDEF(BUILDENUM) +#undef BUILDENUM + BUILD__MAX +} BuildMode; + +/* Code relocation. */ +typedef struct BuildReloc { + int32_t ofs; + int sym; + int type; +} BuildReloc; + +typedef struct BuildSym { + const char *name; + int32_t ofs; +} BuildSym; + +/* Build context structure. */ +typedef struct BuildCtx { + /* DynASM state pointer. Should be first member. */ + struct dasm_State *D; + /* Parsed command line. */ + BuildMode mode; + FILE *fp; + const char *outname; + char **args; + /* Code and symbols generated by DynASM. */ + uint8_t *code; + size_t codesz; + int npc, nglob, nsym, nreloc, nrelocsym; + void **glob; + BuildSym *sym; + const char **relocsym; + int32_t *bc_ofs; + const char *beginsym; + /* Strings generated by DynASM. */ + const char *const *globnames; + const char *const *extnames; + const char *dasm_ident; + const char *dasm_arch; + /* Relocations. */ + BuildReloc reloc[BUILD_MAX_RELOC]; +} BuildCtx; + +extern void owrite(BuildCtx *ctx, const void *ptr, size_t sz); +extern void emit_asm(BuildCtx *ctx); +extern void emit_peobj(BuildCtx *ctx); +extern void emit_lib(BuildCtx *ctx); +extern void emit_fold(BuildCtx *ctx); + +extern const char *const bc_names[]; +extern const char *const ir_names[]; +extern const char *const irt_names[]; +extern const char *const irfpm_names[]; +extern const char *const irfield_names[]; +extern const char *const ircall_names[]; + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/host/buildvm_asm.c b/Build/source/libs/luajit/LuaJIT-src/src/host/buildvm_asm.c new file mode 100644 index 00000000000..9b7ae53a26a --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/host/buildvm_asm.c @@ -0,0 +1,345 @@ +/* +** LuaJIT VM builder: Assembler source code emitter. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include "buildvm.h" +#include "lj_bc.h" + +/* ------------------------------------------------------------------------ */ + +#if LJ_TARGET_X86ORX64 +/* Emit bytes piecewise as assembler text. */ +static void emit_asm_bytes(BuildCtx *ctx, uint8_t *p, int n) +{ + int i; + for (i = 0; i < n; i++) { + if ((i & 15) == 0) + fprintf(ctx->fp, "\t.byte %d", p[i]); + else + fprintf(ctx->fp, ",%d", p[i]); + if ((i & 15) == 15) putc('\n', ctx->fp); + } + if ((n & 15) != 0) putc('\n', ctx->fp); +} + +/* Emit relocation */ +static void emit_asm_reloc(BuildCtx *ctx, int type, const char *sym) +{ + switch (ctx->mode) { + case BUILD_elfasm: + if (type) + fprintf(ctx->fp, "\t.long %s-.-4\n", sym); + else + fprintf(ctx->fp, "\t.long %s\n", sym); + break; + case BUILD_coffasm: + fprintf(ctx->fp, "\t.def %s; .scl 3; .type 32; .endef\n", sym); + if (type) + fprintf(ctx->fp, "\t.long %s-.-4\n", sym); + else + fprintf(ctx->fp, "\t.long %s\n", sym); + break; + default: /* BUILD_machasm for relative relocations handled below. */ + fprintf(ctx->fp, "\t.long %s\n", sym); + break; + } +} + +static const char *const jccnames[] = { + "jo", "jno", "jb", "jnb", "jz", "jnz", "jbe", "ja", + "js", "jns", "jpe", "jpo", "jl", "jge", "jle", "jg" +}; + +/* Emit x86/x64 text relocations. */ +static void emit_asm_reloc_text(BuildCtx *ctx, uint8_t *cp, int n, + const char *sym) +{ + const char *opname = NULL; + if (--n < 0) goto err; + if (cp[n] == 0xe8) { + opname = "call"; + } else if (cp[n] == 0xe9) { + opname = "jmp"; + } else if (cp[n] >= 0x80 && cp[n] <= 0x8f && n > 0 && cp[n-1] == 0x0f) { + opname = jccnames[cp[n]-0x80]; + n--; + } else { +err: + fprintf(stderr, "Error: unsupported opcode for %s symbol relocation.\n", + sym); + exit(1); + } + emit_asm_bytes(ctx, cp, n); + if (strncmp(sym+(*sym == '_'), LABEL_PREFIX, sizeof(LABEL_PREFIX)-1)) { + /* Various fixups for external symbols outside of our binary. */ + if (ctx->mode == BUILD_elfasm) { + if (LJ_32) + fprintf(ctx->fp, "#if __PIC__\n\t%s lj_wrap_%s\n#else\n", opname, sym); + fprintf(ctx->fp, "\t%s %s@PLT\n", opname, sym); + if (LJ_32) + fprintf(ctx->fp, "#endif\n"); + return; + } else if (LJ_32 && ctx->mode == BUILD_machasm) { + fprintf(ctx->fp, "\t%s L%s$stub\n", opname, sym); + return; + } + } + fprintf(ctx->fp, "\t%s %s\n", opname, sym); +} +#else +/* Emit words piecewise as assembler text. */ +static void emit_asm_words(BuildCtx *ctx, uint8_t *p, int n) +{ + int i; + for (i = 0; i < n; i += 4) { + if ((i & 15) == 0) + fprintf(ctx->fp, "\t.long 0x%08x", *(uint32_t *)(p+i)); + else + fprintf(ctx->fp, ",0x%08x", *(uint32_t *)(p+i)); + if ((i & 15) == 12) putc('\n', ctx->fp); + } + if ((n & 15) != 0) putc('\n', ctx->fp); +} + +/* Emit relocation as part of an instruction. */ +static void emit_asm_wordreloc(BuildCtx *ctx, uint8_t *p, int n, + const char *sym) +{ + uint32_t ins; + emit_asm_words(ctx, p, n-4); + ins = *(uint32_t *)(p+n-4); +#if LJ_TARGET_ARM + if ((ins & 0xff000000u) == 0xfa000000u) { + fprintf(ctx->fp, "\tblx %s\n", sym); + } else if ((ins & 0x0e000000u) == 0x0a000000u) { + fprintf(ctx->fp, "\t%s%.2s %s\n", (ins & 0x01000000u) ? "bl" : "b", + &"eqnecsccmiplvsvchilsgeltgtle"[2*(ins >> 28)], sym); + } else { + fprintf(stderr, + "Error: unsupported opcode %08x for %s symbol relocation.\n", + ins, sym); + exit(1); + } +#elif LJ_TARGET_ARM64 + if ((ins >> 26) == 0x25u) { + fprintf(ctx->fp, "\tbl %s\n", sym); + } else { + fprintf(stderr, + "Error: unsupported opcode %08x for %s symbol relocation.\n", + ins, sym); + exit(1); + } +#elif LJ_TARGET_PPC +#if LJ_TARGET_PS3 +#define TOCPREFIX "." +#else +#define TOCPREFIX "" +#endif + if ((ins >> 26) == 16) { + fprintf(ctx->fp, "\t%s %d, %d, " TOCPREFIX "%s\n", + (ins & 1) ? "bcl" : "bc", (ins >> 21) & 31, (ins >> 16) & 31, sym); + } else if ((ins >> 26) == 18) { +#if LJ_ARCH_PPC64 + const char *suffix = strchr(sym, '@'); + if (suffix && suffix[1] == 'h') { + fprintf(ctx->fp, "\taddis 11, 2, %s\n", sym); + } else if (suffix && suffix[1] == 'l') { + fprintf(ctx->fp, "\tld 12, %s\n", sym); + } else +#endif + fprintf(ctx->fp, "\t%s " TOCPREFIX "%s\n", (ins & 1) ? "bl" : "b", sym); + } else { + fprintf(stderr, + "Error: unsupported opcode %08x for %s symbol relocation.\n", + ins, sym); + exit(1); + } +#elif LJ_TARGET_MIPS + fprintf(stderr, + "Error: unsupported opcode %08x for %s symbol relocation.\n", + ins, sym); + exit(1); +#else +#error "missing relocation support for this architecture" +#endif +} +#endif + +#if LJ_TARGET_ARM +#define ELFASM_PX "%%" +#else +#define ELFASM_PX "@" +#endif + +/* Emit an assembler label. */ +static void emit_asm_label(BuildCtx *ctx, const char *name, int size, int isfunc) +{ + switch (ctx->mode) { + case BUILD_elfasm: +#if LJ_TARGET_PS3 + if (!strncmp(name, "lj_vm_", 6) && + strcmp(name, ctx->beginsym) && + !strstr(name, "hook")) { + fprintf(ctx->fp, + "\n\t.globl %s\n" + "\t.section \".opd\",\"aw\"\n" + "%s:\n" + "\t.long .%s,.TOC.@tocbase32\n" + "\t.size %s,8\n" + "\t.previous\n" + "\t.globl .%s\n" + "\t.hidden .%s\n" + "\t.type .%s, " ELFASM_PX "function\n" + "\t.size .%s, %d\n" + ".%s:\n", + name, name, name, name, name, name, name, name, size, name); + break; + } +#endif + fprintf(ctx->fp, + "\n\t.globl %s\n" + "\t.hidden %s\n" + "\t.type %s, " ELFASM_PX "%s\n" + "\t.size %s, %d\n" + "%s:\n", + name, name, name, isfunc ? "function" : "object", name, size, name); + break; + case BUILD_coffasm: + fprintf(ctx->fp, "\n\t.globl %s\n", name); + if (isfunc) + fprintf(ctx->fp, "\t.def %s; .scl 3; .type 32; .endef\n", name); + fprintf(ctx->fp, "%s:\n", name); + break; + case BUILD_machasm: + fprintf(ctx->fp, + "\n\t.private_extern %s\n" + "%s:\n", name, name); + break; + default: + break; + } +} + +/* Emit alignment. */ +static void emit_asm_align(BuildCtx *ctx, int bits) +{ + switch (ctx->mode) { + case BUILD_elfasm: + case BUILD_coffasm: + fprintf(ctx->fp, "\t.p2align %d\n", bits); + break; + case BUILD_machasm: + fprintf(ctx->fp, "\t.align %d\n", bits); + break; + default: + break; + } +} + +/* ------------------------------------------------------------------------ */ + +/* Emit assembler source code. */ +void emit_asm(BuildCtx *ctx) +{ + int i, rel; + + fprintf(ctx->fp, "\t.file \"buildvm_%s.dasc\"\n", ctx->dasm_arch); +#if LJ_ARCH_PPC64 + fprintf(ctx->fp, "\t.abiversion 2\n"); +#endif + fprintf(ctx->fp, "\t.text\n"); + emit_asm_align(ctx, 4); + +#if LJ_TARGET_PS3 + emit_asm_label(ctx, ctx->beginsym, ctx->codesz, 0); +#else + emit_asm_label(ctx, ctx->beginsym, 0, 0); +#endif + if (ctx->mode != BUILD_machasm) + fprintf(ctx->fp, ".Lbegin:\n"); + +#if LJ_TARGET_ARM && defined(__GNUC__) && !LJ_NO_UNWIND + /* This should really be moved into buildvm_arm.dasc. */ + fprintf(ctx->fp, + ".fnstart\n" + ".save {r4, r5, r6, r7, r8, r9, r10, r11, lr}\n" + ".pad #28\n"); +#endif +#if LJ_TARGET_MIPS + fprintf(ctx->fp, ".set nomips16\n.abicalls\n.set noreorder\n.set nomacro\n"); +#endif + + for (i = rel = 0; i < ctx->nsym; i++) { + int32_t ofs = ctx->sym[i].ofs; + int32_t next = ctx->sym[i+1].ofs; +#if LJ_TARGET_ARM && defined(__GNUC__) && !LJ_NO_UNWIND && LJ_HASFFI + if (!strcmp(ctx->sym[i].name, "lj_vm_ffi_call")) + fprintf(ctx->fp, + ".globl lj_err_unwind_arm\n" + ".personality lj_err_unwind_arm\n" + ".fnend\n" + ".fnstart\n" + ".save {r4, r5, r11, lr}\n" + ".setfp r11, sp\n"); +#endif + emit_asm_label(ctx, ctx->sym[i].name, next - ofs, 1); + while (rel < ctx->nreloc && ctx->reloc[rel].ofs <= next) { + BuildReloc *r = &ctx->reloc[rel]; + int n = r->ofs - ofs; +#if LJ_TARGET_X86ORX64 + if (r->type != 0 && + (ctx->mode == BUILD_elfasm || ctx->mode == BUILD_machasm)) { + emit_asm_reloc_text(ctx, ctx->code+ofs, n, ctx->relocsym[r->sym]); + } else { + emit_asm_bytes(ctx, ctx->code+ofs, n); + emit_asm_reloc(ctx, r->type, ctx->relocsym[r->sym]); + } + ofs += n+4; +#else + emit_asm_wordreloc(ctx, ctx->code+ofs, n, ctx->relocsym[r->sym]); + ofs += n; +#endif + rel++; + } +#if LJ_TARGET_X86ORX64 + emit_asm_bytes(ctx, ctx->code+ofs, next-ofs); +#else + emit_asm_words(ctx, ctx->code+ofs, next-ofs); +#endif + } + +#if LJ_TARGET_ARM && defined(__GNUC__) && !LJ_NO_UNWIND + fprintf(ctx->fp, +#if !LJ_HASFFI + ".globl lj_err_unwind_arm\n" + ".personality lj_err_unwind_arm\n" +#endif + ".fnend\n"); +#endif + + fprintf(ctx->fp, "\n"); + switch (ctx->mode) { + case BUILD_elfasm: +#if !(LJ_TARGET_PS3 || LJ_TARGET_PSVITA) + fprintf(ctx->fp, "\t.section .note.GNU-stack,\"\"," ELFASM_PX "progbits\n"); +#endif +#if LJ_TARGET_PPC && !LJ_TARGET_PS3 + /* Hard-float ABI. */ + fprintf(ctx->fp, "\t.gnu_attribute 4, 1\n"); +#endif + /* fallthrough */ + case BUILD_coffasm: + fprintf(ctx->fp, "\t.ident \"%s\"\n", ctx->dasm_ident); + break; + case BUILD_machasm: + fprintf(ctx->fp, + "\t.cstring\n" + "\t.ascii \"%s\\0\"\n", ctx->dasm_ident); + break; + default: + break; + } + fprintf(ctx->fp, "\n"); +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/host/buildvm_fold.c b/Build/source/libs/luajit/LuaJIT-src/src/host/buildvm_fold.c new file mode 100644 index 00000000000..daed7eccb22 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/host/buildvm_fold.c @@ -0,0 +1,229 @@ +/* +** LuaJIT VM builder: IR folding hash table generator. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include "buildvm.h" +#include "lj_obj.h" +#include "lj_ir.h" + +/* Context for the folding hash table generator. */ +static int lineno; +static int funcidx; +static uint32_t foldkeys[BUILD_MAX_FOLD]; +static uint32_t nkeys; + +/* Try to fill the hash table with keys using the hash parameters. */ +static int tryhash(uint32_t *htab, uint32_t sz, uint32_t r, int dorol) +{ + uint32_t i; + if (dorol && ((r & 31) == 0 || (r>>5) == 0)) + return 0; /* Avoid zero rotates. */ + memset(htab, 0xff, (sz+1)*sizeof(uint32_t)); + for (i = 0; i < nkeys; i++) { + uint32_t key = foldkeys[i]; + uint32_t k = key & 0xffffff; + uint32_t h = (dorol ? lj_rol(lj_rol(k, r>>5) - k, r&31) : + (((k << (r>>5)) - k) << (r&31))) % sz; + if (htab[h] != 0xffffffff) { /* Collision on primary slot. */ + if (htab[h+1] != 0xffffffff) { /* Collision on secondary slot. */ + /* Try to move the colliding key, if possible. */ + if (h < sz-1 && htab[h+2] == 0xffffffff) { + uint32_t k2 = htab[h+1] & 0xffffff; + uint32_t h2 = (dorol ? lj_rol(lj_rol(k2, r>>5) - k2, r&31) : + (((k2 << (r>>5)) - k2) << (r&31))) % sz; + if (h2 != h+1) return 0; /* Cannot resolve collision. */ + htab[h+2] = htab[h+1]; /* Move colliding key to secondary slot. */ + } else { + return 0; /* Collision. */ + } + } + htab[h+1] = key; + } else { + htab[h] = key; + } + } + return 1; /* Success, all keys could be stored. */ +} + +/* Print the generated hash table. */ +static void printhash(BuildCtx *ctx, uint32_t *htab, uint32_t sz) +{ + uint32_t i; + fprintf(ctx->fp, "static const uint32_t fold_hash[%d] = {\n0x%08x", + sz+1, htab[0]); + for (i = 1; i < sz+1; i++) + fprintf(ctx->fp, ",\n0x%08x", htab[i]); + fprintf(ctx->fp, "\n};\n\n"); +} + +/* Exhaustive search for the shortest semi-perfect hash table. */ +static void makehash(BuildCtx *ctx) +{ + uint32_t htab[BUILD_MAX_FOLD*2+1]; + uint32_t sz, r; + /* Search for the smallest hash table with an odd size. */ + for (sz = (nkeys|1); sz < BUILD_MAX_FOLD*2; sz += 2) { + /* First try all shift hash combinations. */ + for (r = 0; r < 32*32; r++) { + if (tryhash(htab, sz, r, 0)) { + printhash(ctx, htab, sz); + fprintf(ctx->fp, + "#define fold_hashkey(k)\t(((((k)<<%u)-(k))<<%u)%%%u)\n\n", + r>>5, r&31, sz); + return; + } + } + /* Then try all rotate hash combinations. */ + for (r = 0; r < 32*32; r++) { + if (tryhash(htab, sz, r, 1)) { + printhash(ctx, htab, sz); + fprintf(ctx->fp, + "#define fold_hashkey(k)\t(lj_rol(lj_rol((k),%u)-(k),%u)%%%u)\n\n", + r>>5, r&31, sz); + return; + } + } + } + fprintf(stderr, "Error: search for perfect hash failed\n"); + exit(1); +} + +/* Parse one token of a fold rule. */ +static uint32_t nexttoken(char **pp, int allowlit, int allowany) +{ + char *p = *pp; + if (p) { + uint32_t i; + char *q = strchr(p, ' '); + if (q) *q++ = '\0'; + *pp = q; + if (allowlit && !strncmp(p, "IRFPM_", 6)) { + for (i = 0; irfpm_names[i]; i++) + if (!strcmp(irfpm_names[i], p+6)) + return i; + } else if (allowlit && !strncmp(p, "IRFL_", 5)) { + for (i = 0; irfield_names[i]; i++) + if (!strcmp(irfield_names[i], p+5)) + return i; + } else if (allowlit && !strncmp(p, "IRCALL_", 7)) { + for (i = 0; ircall_names[i]; i++) + if (!strcmp(ircall_names[i], p+7)) + return i; + } else if (allowlit && !strncmp(p, "IRCONV_", 7)) { + for (i = 0; irt_names[i]; i++) { + const char *r = strchr(p+7, '_'); + if (r && !strncmp(irt_names[i], p+7, r-(p+7))) { + uint32_t j; + for (j = 0; irt_names[j]; j++) + if (!strcmp(irt_names[j], r+1)) + return (i << 5) + j; + } + } + } else if (allowlit && *p >= '0' && *p <= '9') { + for (i = 0; *p >= '0' && *p <= '9'; p++) + i = i*10 + (*p - '0'); + if (*p == '\0') + return i; + } else if (allowany && !strcmp("any", p)) { + return allowany; + } else { + for (i = 0; ir_names[i]; i++) + if (!strcmp(ir_names[i], p)) + return i; + } + fprintf(stderr, "Error: bad fold definition token \"%s\" at line %d\n", p, lineno); + exit(1); + } + return 0; +} + +/* Parse a fold rule. */ +static void foldrule(char *p) +{ + uint32_t op = nexttoken(&p, 0, 0); + uint32_t left = nexttoken(&p, 0, 0x7f); + uint32_t right = nexttoken(&p, 1, 0x3ff); + uint32_t key = (funcidx << 24) | (op << 17) | (left << 10) | right; + uint32_t i; + if (nkeys >= BUILD_MAX_FOLD) { + fprintf(stderr, "Error: too many fold rules, increase BUILD_MAX_FOLD.\n"); + exit(1); + } + /* Simple insertion sort to detect duplicates. */ + for (i = nkeys; i > 0; i--) { + if ((foldkeys[i-1]&0xffffff) < (key & 0xffffff)) + break; + if ((foldkeys[i-1]&0xffffff) == (key & 0xffffff)) { + fprintf(stderr, "Error: duplicate fold definition at line %d\n", lineno); + exit(1); + } + foldkeys[i] = foldkeys[i-1]; + } + foldkeys[i] = key; + nkeys++; +} + +/* Emit C source code for IR folding hash table. */ +void emit_fold(BuildCtx *ctx) +{ + char buf[256]; /* We don't care about analyzing lines longer than that. */ + const char *fname = ctx->args[0]; + FILE *fp; + + if (fname == NULL) { + fprintf(stderr, "Error: missing input filename\n"); + exit(1); + } + + if (fname[0] == '-' && fname[1] == '\0') { + fp = stdin; + } else { + fp = fopen(fname, "r"); + if (!fp) { + fprintf(stderr, "Error: cannot open input file '%s': %s\n", + fname, strerror(errno)); + exit(1); + } + } + + fprintf(ctx->fp, "/* This is a generated file. DO NOT EDIT! */\n\n"); + fprintf(ctx->fp, "static const FoldFunc fold_func[] = {\n"); + + lineno = 0; + funcidx = 0; + nkeys = 0; + while (fgets(buf, sizeof(buf), fp) != NULL) { + lineno++; + /* The prefix must be at the start of a line, otherwise it's ignored. */ + if (!strncmp(buf, FOLDDEF_PREFIX, sizeof(FOLDDEF_PREFIX)-1)) { + char *p = buf+sizeof(FOLDDEF_PREFIX)-1; + char *q = strchr(p, ')'); + if (p[0] == '(' && q) { + p++; + *q = '\0'; + foldrule(p); + } else if ((p[0] == 'F' || p[0] == 'X') && p[1] == '(' && q) { + p += 2; + *q = '\0'; + if (funcidx) + fprintf(ctx->fp, ",\n"); + if (p[-2] == 'X') + fprintf(ctx->fp, " %s", p); + else + fprintf(ctx->fp, " fold_%s", p); + funcidx++; + } else { + buf[strlen(buf)-1] = '\0'; + fprintf(stderr, "Error: unknown fold definition tag %s%s at line %d\n", + FOLDDEF_PREFIX, p, lineno); + exit(1); + } + } + } + fclose(fp); + fprintf(ctx->fp, "\n};\n\n"); + + makehash(ctx); +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/host/buildvm_lib.c b/Build/source/libs/luajit/LuaJIT-src/src/host/buildvm_lib.c new file mode 100644 index 00000000000..e928673d4e0 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/host/buildvm_lib.c @@ -0,0 +1,457 @@ +/* +** LuaJIT VM builder: library definition compiler. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include "buildvm.h" +#include "lj_obj.h" +#include "lj_bc.h" +#include "lj_lib.h" +#include "buildvm_libbc.h" + +/* Context for library definitions. */ +static uint8_t obuf[8192]; +static uint8_t *optr; +static char modname[80]; +static size_t modnamelen; +static char funcname[80]; +static int modstate, regfunc; +static int ffid, recffid, ffasmfunc; + +enum { + REGFUNC_OK, + REGFUNC_NOREG, + REGFUNC_NOREGUV +}; + +static void libdef_name(const char *p, int kind) +{ + size_t n = strlen(p); + if (kind != LIBINIT_STRING) { + if (n > modnamelen && p[modnamelen] == '_' && + !strncmp(p, modname, modnamelen)) { + p += modnamelen+1; + n -= modnamelen+1; + } + } + if (n > LIBINIT_MAXSTR) { + fprintf(stderr, "Error: string too long: '%s'\n", p); + exit(1); + } + if (optr+1+n+2 > obuf+sizeof(obuf)) { /* +2 for caller. */ + fprintf(stderr, "Error: output buffer overflow\n"); + exit(1); + } + *optr++ = (uint8_t)(n | kind); + memcpy(optr, p, n); + optr += n; +} + +static void libdef_endmodule(BuildCtx *ctx) +{ + if (modstate != 0) { + char line[80]; + const uint8_t *p; + int n; + if (modstate == 1) + fprintf(ctx->fp, " (lua_CFunction)0"); + fprintf(ctx->fp, "\n};\n"); + fprintf(ctx->fp, "static const uint8_t %s%s[] = {\n", + LABEL_PREFIX_LIBINIT, modname); + line[0] = '\0'; + for (n = 0, p = obuf; p < optr; p++) { + n += sprintf(line+n, "%d,", *p); + if (n >= 75) { + fprintf(ctx->fp, "%s\n", line); + n = 0; + line[0] = '\0'; + } + } + fprintf(ctx->fp, "%s%d\n};\n#endif\n\n", line, LIBINIT_END); + } +} + +static void libdef_module(BuildCtx *ctx, char *p, int arg) +{ + UNUSED(arg); + if (ctx->mode == BUILD_libdef) { + libdef_endmodule(ctx); + optr = obuf; + *optr++ = (uint8_t)ffid; + *optr++ = (uint8_t)ffasmfunc; + *optr++ = 0; /* Hash table size. */ + modstate = 1; + fprintf(ctx->fp, "#ifdef %sMODULE_%s\n", LIBDEF_PREFIX, p); + fprintf(ctx->fp, "#undef %sMODULE_%s\n", LIBDEF_PREFIX, p); + fprintf(ctx->fp, "static const lua_CFunction %s%s[] = {\n", + LABEL_PREFIX_LIBCF, p); + } + modnamelen = strlen(p); + if (modnamelen > sizeof(modname)-1) { + fprintf(stderr, "Error: module name too long: '%s'\n", p); + exit(1); + } + strcpy(modname, p); +} + +static int find_ffofs(BuildCtx *ctx, const char *name) +{ + int i; + for (i = 0; i < ctx->nglob; i++) { + const char *gl = ctx->globnames[i]; + if (gl[0] == 'f' && gl[1] == 'f' && gl[2] == '_' && !strcmp(gl+3, name)) { + return (int)((uint8_t *)ctx->glob[i] - ctx->code); + } + } + fprintf(stderr, "Error: undefined fast function %s%s\n", + LABEL_PREFIX_FF, name); + exit(1); +} + +static void libdef_func(BuildCtx *ctx, char *p, int arg) +{ + if (arg != LIBINIT_CF) + ffasmfunc++; + if (ctx->mode == BUILD_libdef) { + if (modstate == 0) { + fprintf(stderr, "Error: no module for function definition %s\n", p); + exit(1); + } + if (regfunc == REGFUNC_NOREG) { + if (optr+1 > obuf+sizeof(obuf)) { + fprintf(stderr, "Error: output buffer overflow\n"); + exit(1); + } + *optr++ = LIBINIT_FFID; + } else { + if (arg != LIBINIT_ASM_) { + if (modstate != 1) fprintf(ctx->fp, ",\n"); + modstate = 2; + fprintf(ctx->fp, " %s%s", arg ? LABEL_PREFIX_FFH : LABEL_PREFIX_CF, p); + } + if (regfunc != REGFUNC_NOREGUV) obuf[2]++; /* Bump hash table size. */ + libdef_name(regfunc == REGFUNC_NOREGUV ? "" : p, arg); + } + } else if (ctx->mode == BUILD_ffdef) { + fprintf(ctx->fp, "FFDEF(%s)\n", p); + } else if (ctx->mode == BUILD_recdef) { + if (strlen(p) > sizeof(funcname)-1) { + fprintf(stderr, "Error: function name too long: '%s'\n", p); + exit(1); + } + strcpy(funcname, p); + } else if (ctx->mode == BUILD_vmdef) { + int i; + for (i = 1; p[i] && modname[i-1]; i++) + if (p[i] == '_') p[i] = '.'; + fprintf(ctx->fp, "\"%s\",\n", p); + } else if (ctx->mode == BUILD_bcdef) { + if (arg != LIBINIT_CF) + fprintf(ctx->fp, ",\n%d", find_ffofs(ctx, p)); + } + ffid++; + regfunc = REGFUNC_OK; +} + +static uint8_t *libdef_uleb128(uint8_t *p, uint32_t *vv) +{ + uint32_t v = *p++; + if (v >= 0x80) { + int sh = 0; v &= 0x7f; + do { v |= ((*p & 0x7f) << (sh += 7)); } while (*p++ >= 0x80); + } + *vv = v; + return p; +} + +static void libdef_fixupbc(uint8_t *p) +{ + uint32_t i, sizebc; + p += 4; + p = libdef_uleb128(p, &sizebc); + p = libdef_uleb128(p, &sizebc); + p = libdef_uleb128(p, &sizebc); + for (i = 0; i < sizebc; i++, p += 4) { + uint8_t op = p[libbc_endian ? 3 : 0]; + uint8_t ra = p[libbc_endian ? 2 : 1]; + uint8_t rc = p[libbc_endian ? 1 : 2]; + uint8_t rb = p[libbc_endian ? 0 : 3]; + if (!LJ_DUALNUM && op == BC_ISTYPE && rc == ~LJ_TNUMX+1) { + op = BC_ISNUM; rc++; + } + p[LJ_ENDIAN_SELECT(0, 3)] = op; + p[LJ_ENDIAN_SELECT(1, 2)] = ra; + p[LJ_ENDIAN_SELECT(2, 1)] = rc; + p[LJ_ENDIAN_SELECT(3, 0)] = rb; + } +} + +static void libdef_lua(BuildCtx *ctx, char *p, int arg) +{ + UNUSED(arg); + if (ctx->mode == BUILD_libdef) { + int i; + for (i = 0; libbc_map[i].name != NULL; i++) { + if (!strcmp(libbc_map[i].name, p)) { + int ofs = libbc_map[i].ofs; + int len = libbc_map[i+1].ofs - ofs; + obuf[2]++; /* Bump hash table size. */ + *optr++ = LIBINIT_LUA; + libdef_name(p, 0); + memcpy(optr, libbc_code + ofs, len); + libdef_fixupbc(optr); + optr += len; + return; + } + } + fprintf(stderr, "Error: missing libbc definition for %s\n", p); + exit(1); + } +} + +static uint32_t find_rec(char *name) +{ + char *p = (char *)obuf; + uint32_t n; + for (n = 2; *p; n++) { + if (strcmp(p, name) == 0) + return n; + p += strlen(p)+1; + } + if (p+strlen(name)+1 >= (char *)obuf+sizeof(obuf)) { + fprintf(stderr, "Error: output buffer overflow\n"); + exit(1); + } + strcpy(p, name); + return n; +} + +static void libdef_rec(BuildCtx *ctx, char *p, int arg) +{ + UNUSED(arg); + if (ctx->mode == BUILD_recdef) { + char *q; + uint32_t n; + for (; recffid+1 < ffid; recffid++) + fprintf(ctx->fp, ",\n0"); + recffid = ffid; + if (*p == '.') p = funcname; + q = strchr(p, ' '); + if (q) *q++ = '\0'; + n = find_rec(p); + if (q) + fprintf(ctx->fp, ",\n0x%02x00+(%s)", n, q); + else + fprintf(ctx->fp, ",\n0x%02x00", n); + } +} + +static void memcpy_endian(void *dst, void *src, size_t n) +{ + union { uint8_t b; uint32_t u; } host_endian; + host_endian.u = 1; + if (host_endian.b == LJ_ENDIAN_SELECT(1, 0)) { + memcpy(dst, src, n); + } else { + size_t i; + for (i = 0; i < n; i++) + ((uint8_t *)dst)[i] = ((uint8_t *)src)[n-i-1]; + } +} + +static void libdef_push(BuildCtx *ctx, char *p, int arg) +{ + UNUSED(arg); + if (ctx->mode == BUILD_libdef) { + int len = (int)strlen(p); + if (*p == '"') { + if (len > 1 && p[len-1] == '"') { + p[len-1] = '\0'; + libdef_name(p+1, LIBINIT_STRING); + return; + } + } else if (*p >= '0' && *p <= '9') { + char *ep; + double d = strtod(p, &ep); + if (*ep == '\0') { + if (optr+1+sizeof(double) > obuf+sizeof(obuf)) { + fprintf(stderr, "Error: output buffer overflow\n"); + exit(1); + } + *optr++ = LIBINIT_NUMBER; + memcpy_endian(optr, &d, sizeof(double)); + optr += sizeof(double); + return; + } + } else if (!strcmp(p, "lastcl")) { + if (optr+1 > obuf+sizeof(obuf)) { + fprintf(stderr, "Error: output buffer overflow\n"); + exit(1); + } + *optr++ = LIBINIT_LASTCL; + return; + } else if (len > 4 && !strncmp(p, "top-", 4)) { + if (optr+2 > obuf+sizeof(obuf)) { + fprintf(stderr, "Error: output buffer overflow\n"); + exit(1); + } + *optr++ = LIBINIT_COPY; + *optr++ = (uint8_t)atoi(p+4); + return; + } + fprintf(stderr, "Error: bad value for %sPUSH(%s)\n", LIBDEF_PREFIX, p); + exit(1); + } +} + +static void libdef_set(BuildCtx *ctx, char *p, int arg) +{ + UNUSED(arg); + if (ctx->mode == BUILD_libdef) { + if (p[0] == '!' && p[1] == '\0') p[0] = '\0'; /* Set env. */ + libdef_name(p, LIBINIT_STRING); + *optr++ = LIBINIT_SET; + obuf[2]++; /* Bump hash table size. */ + } +} + +static void libdef_regfunc(BuildCtx *ctx, char *p, int arg) +{ + UNUSED(ctx); UNUSED(p); + regfunc = arg; +} + +typedef void (*LibDefFunc)(BuildCtx *ctx, char *p, int arg); + +typedef struct LibDefHandler { + const char *suffix; + const char *stop; + const LibDefFunc func; + const int arg; +} LibDefHandler; + +static const LibDefHandler libdef_handlers[] = { + { "MODULE_", " \t\r\n", libdef_module, 0 }, + { "CF(", ")", libdef_func, LIBINIT_CF }, + { "ASM(", ")", libdef_func, LIBINIT_ASM }, + { "ASM_(", ")", libdef_func, LIBINIT_ASM_ }, + { "LUA(", ")", libdef_lua, 0 }, + { "REC(", ")", libdef_rec, 0 }, + { "PUSH(", ")", libdef_push, 0 }, + { "SET(", ")", libdef_set, 0 }, + { "NOREGUV", NULL, libdef_regfunc, REGFUNC_NOREGUV }, + { "NOREG", NULL, libdef_regfunc, REGFUNC_NOREG }, + { NULL, NULL, (LibDefFunc)0, 0 } +}; + +/* Emit C source code for library function definitions. */ +void emit_lib(BuildCtx *ctx) +{ + const char *fname; + + if (ctx->mode == BUILD_ffdef || ctx->mode == BUILD_libdef || + ctx->mode == BUILD_recdef) + fprintf(ctx->fp, "/* This is a generated file. DO NOT EDIT! */\n\n"); + else if (ctx->mode == BUILD_vmdef) + fprintf(ctx->fp, "ffnames = {\n[0]=\"Lua\",\n\"C\",\n"); + if (ctx->mode == BUILD_recdef) + fprintf(ctx->fp, "static const uint16_t recff_idmap[] = {\n0,\n0x0100"); + recffid = ffid = FF_C+1; + ffasmfunc = 0; + + while ((fname = *ctx->args++)) { + char buf[256]; /* We don't care about analyzing lines longer than that. */ + FILE *fp; + if (fname[0] == '-' && fname[1] == '\0') { + fp = stdin; + } else { + fp = fopen(fname, "r"); + if (!fp) { + fprintf(stderr, "Error: cannot open input file '%s': %s\n", + fname, strerror(errno)); + exit(1); + } + } + modstate = 0; + regfunc = REGFUNC_OK; + while (fgets(buf, sizeof(buf), fp) != NULL) { + char *p; + /* Simplistic pre-processor. Only handles top-level #if/#endif. */ + if (buf[0] == '#' && buf[1] == 'i' && buf[2] == 'f') { + int ok = 1; + if (!strcmp(buf, "#if LJ_52\n")) + ok = LJ_52; + else if (!strcmp(buf, "#if LJ_HASJIT\n")) + ok = LJ_HASJIT; + else if (!strcmp(buf, "#if LJ_HASFFI\n")) + ok = LJ_HASFFI; + if (!ok) { + int lvl = 1; + while (fgets(buf, sizeof(buf), fp) != NULL) { + if (buf[0] == '#' && buf[1] == 'e' && buf[2] == 'n') { + if (--lvl == 0) break; + } else if (buf[0] == '#' && buf[1] == 'i' && buf[2] == 'f') { + lvl++; + } + } + continue; + } + } + for (p = buf; (p = strstr(p, LIBDEF_PREFIX)) != NULL; ) { + const LibDefHandler *ldh; + p += sizeof(LIBDEF_PREFIX)-1; + for (ldh = libdef_handlers; ldh->suffix != NULL; ldh++) { + size_t n, len = strlen(ldh->suffix); + if (!strncmp(p, ldh->suffix, len)) { + p += len; + n = ldh->stop ? strcspn(p, ldh->stop) : 0; + if (!p[n]) break; + p[n] = '\0'; + ldh->func(ctx, p, ldh->arg); + p += n+1; + break; + } + } + if (ldh->suffix == NULL) { + buf[strlen(buf)-1] = '\0'; + fprintf(stderr, "Error: unknown library definition tag %s%s\n", + LIBDEF_PREFIX, p); + exit(1); + } + } + } + fclose(fp); + if (ctx->mode == BUILD_libdef) { + libdef_endmodule(ctx); + } + } + + if (ctx->mode == BUILD_ffdef) { + fprintf(ctx->fp, "\n#undef FFDEF\n\n"); + fprintf(ctx->fp, + "#ifndef FF_NUM_ASMFUNC\n#define FF_NUM_ASMFUNC %d\n#endif\n\n", + ffasmfunc); + } else if (ctx->mode == BUILD_vmdef) { + fprintf(ctx->fp, "},\n\n"); + } else if (ctx->mode == BUILD_bcdef) { + int i; + fprintf(ctx->fp, "\n};\n\n"); + fprintf(ctx->fp, "LJ_DATADEF const uint16_t lj_bc_mode[] = {\n"); + fprintf(ctx->fp, "BCDEF(BCMODE)\n"); + for (i = ffasmfunc-1; i > 0; i--) + fprintf(ctx->fp, "BCMODE_FF,\n"); + fprintf(ctx->fp, "BCMODE_FF\n};\n\n"); + } else if (ctx->mode == BUILD_recdef) { + char *p = (char *)obuf; + fprintf(ctx->fp, "\n};\n\n"); + fprintf(ctx->fp, "static const RecordFunc recff_func[] = {\n" + "recff_nyi,\n" + "recff_c"); + while (*p) { + fprintf(ctx->fp, ",\nrecff_%s", p); + p += strlen(p)+1; + } + fprintf(ctx->fp, "\n};\n\n"); + } +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/host/buildvm_libbc.h b/Build/source/libs/luajit/LuaJIT-src/src/host/buildvm_libbc.h new file mode 100644 index 00000000000..45f8f8cbdf2 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/host/buildvm_libbc.h @@ -0,0 +1,45 @@ +/* This is a generated file. DO NOT EDIT! */ + +static const int libbc_endian = 0; + +static const uint8_t libbc_code[] = { +#if LJ_FR2 +0,1,2,0,0,1,2,24,1,0,0,76,1,2,0,241,135,158,166,3,220,203,178,130,4,0,1,2,0, +0,1,2,24,1,0,0,76,1,2,0,243,244,148,165,20,198,190,199,252,3,0,1,2,0,0,0,3, +16,0,5,0,21,1,0,0,76,1,2,0,0,2,10,0,0,0,15,16,0,12,0,16,1,9,0,41,2,1,0,21,3, +0,0,41,4,1,0,77,2,8,128,18,6,1,0,18,8,5,0,59,9,5,0,66,6,3,2,10,6,0,0,88,7,1, +128,76,6,2,0,79,2,248,127,75,0,1,0,0,2,11,0,0,0,16,16,0,12,0,16,1,9,0,43,2, +0,0,18,3,0,0,41,4,0,0,88,5,7,128,18,7,1,0,18,9,5,0,18,10,6,0,66,7,3,2,10,7, +0,0,88,8,1,128,76,7,2,0,70,5,3,3,82,5,247,127,75,0,1,0,0,1,2,0,0,0,3,16,0,12, +0,21,1,0,0,76,1,2,0,0,2,10,0,0,2,30,16,0,12,0,21,2,0,0,11,1,0,0,88,3,7,128, +8,2,0,0,88,3,23,128,59,3,2,0,43,4,0,0,64,4,2,0,76,3,2,0,88,3,18,128,16,1,14, +0,41,3,1,0,3,3,1,0,88,3,14,128,3,1,2,0,88,3,12,128,59,3,1,0,22,4,1,1,18,5,2, +0,41,6,1,0,77,4,4,128,23,8,1,7,59,9,7,0,64,9,8,0,79,4,252,127,43,4,0,0,64,4, +2,0,76,3,2,0,75,0,1,0,0,2,0 +#else +0,1,2,0,0,1,2,24,1,0,0,76,1,2,0,241,135,158,166,3,220,203,178,130,4,0,1,2,0, +0,1,2,24,1,0,0,76,1,2,0,243,244,148,165,20,198,190,199,252,3,0,1,2,0,0,0,3, +16,0,5,0,21,1,0,0,76,1,2,0,0,2,9,0,0,0,15,16,0,12,0,16,1,9,0,41,2,1,0,21,3, +0,0,41,4,1,0,77,2,8,128,18,6,1,0,18,7,5,0,59,8,5,0,66,6,3,2,10,6,0,0,88,7,1, +128,76,6,2,0,79,2,248,127,75,0,1,0,0,2,10,0,0,0,16,16,0,12,0,16,1,9,0,43,2, +0,0,18,3,0,0,41,4,0,0,88,5,7,128,18,7,1,0,18,8,5,0,18,9,6,0,66,7,3,2,10,7,0, +0,88,8,1,128,76,7,2,0,70,5,3,3,82,5,247,127,75,0,1,0,0,1,2,0,0,0,3,16,0,12, +0,21,1,0,0,76,1,2,0,0,2,10,0,0,2,30,16,0,12,0,21,2,0,0,11,1,0,0,88,3,7,128, +8,2,0,0,88,3,23,128,59,3,2,0,43,4,0,0,64,4,2,0,76,3,2,0,88,3,18,128,16,1,14, +0,41,3,1,0,3,3,1,0,88,3,14,128,3,1,2,0,88,3,12,128,59,3,1,0,22,4,1,1,18,5,2, +0,41,6,1,0,77,4,4,128,23,8,1,7,59,9,7,0,64,9,8,0,79,4,252,127,43,4,0,0,64,4, +2,0,76,3,2,0,75,0,1,0,0,2,0 +#endif +}; + +static const struct { const char *name; int ofs; } libbc_map[] = { +{"math_deg",0}, +{"math_rad",25}, +{"string_len",50}, +{"table_foreachi",69}, +{"table_foreach",136}, +{"table_getn",207}, +{"table_remove",226}, +{NULL,355} +}; + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/host/buildvm_peobj.c b/Build/source/libs/luajit/LuaJIT-src/src/host/buildvm_peobj.c new file mode 100644 index 00000000000..4279f50050b --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/host/buildvm_peobj.c @@ -0,0 +1,368 @@ +/* +** LuaJIT VM builder: PE object emitter. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +** +** Only used for building on Windows, since we cannot assume the presence +** of a suitable assembler. The host and target byte order must match. +*/ + +#include "buildvm.h" +#include "lj_bc.h" + +#if LJ_TARGET_X86ORX64 || LJ_TARGET_PPC + +/* Context for PE object emitter. */ +static char *strtab; +static size_t strtabofs; + +/* -- PE object definitions ----------------------------------------------- */ + +/* PE header. */ +typedef struct PEheader { + uint16_t arch; + uint16_t nsects; + uint32_t time; + uint32_t symtabofs; + uint32_t nsyms; + uint16_t opthdrsz; + uint16_t flags; +} PEheader; + +/* PE section. */ +typedef struct PEsection { + char name[8]; + uint32_t vsize; + uint32_t vaddr; + uint32_t size; + uint32_t ofs; + uint32_t relocofs; + uint32_t lineofs; + uint16_t nreloc; + uint16_t nline; + uint32_t flags; +} PEsection; + +/* PE relocation. */ +typedef struct PEreloc { + uint32_t vaddr; + uint32_t symidx; + uint16_t type; +} PEreloc; + +/* Cannot use sizeof, because it pads up to the max. alignment. */ +#define PEOBJ_RELOC_SIZE (4+4+2) + +/* PE symbol table entry. */ +typedef struct PEsym { + union { + char name[8]; + uint32_t nameref[2]; + } n; + uint32_t value; + int16_t sect; + uint16_t type; + uint8_t scl; + uint8_t naux; +} PEsym; + +/* PE symbol table auxiliary entry for a section. */ +typedef struct PEsymaux { + uint32_t size; + uint16_t nreloc; + uint16_t nline; + uint32_t cksum; + uint16_t assoc; + uint8_t comdatsel; + uint8_t unused[3]; +} PEsymaux; + +/* Cannot use sizeof, because it pads up to the max. alignment. */ +#define PEOBJ_SYM_SIZE (8+4+2+2+1+1) + +/* PE object CPU specific defines. */ +#if LJ_TARGET_X86 +#define PEOBJ_ARCH_TARGET 0x014c +#define PEOBJ_RELOC_REL32 0x14 /* MS: REL32, GNU: DISP32. */ +#define PEOBJ_RELOC_DIR32 0x06 +#define PEOBJ_RELOC_OFS 0 +#define PEOBJ_TEXT_FLAGS 0x60500020 /* 60=r+x, 50=align16, 20=code. */ +#elif LJ_TARGET_X64 +#define PEOBJ_ARCH_TARGET 0x8664 +#define PEOBJ_RELOC_REL32 0x04 /* MS: REL32, GNU: DISP32. */ +#define PEOBJ_RELOC_DIR32 0x02 +#define PEOBJ_RELOC_ADDR32NB 0x03 +#define PEOBJ_RELOC_OFS 0 +#define PEOBJ_TEXT_FLAGS 0x60500020 /* 60=r+x, 50=align16, 20=code. */ +#elif LJ_TARGET_PPC +#define PEOBJ_ARCH_TARGET 0x01f2 +#define PEOBJ_RELOC_REL32 0x06 +#define PEOBJ_RELOC_DIR32 0x02 +#define PEOBJ_RELOC_OFS (-4) +#define PEOBJ_TEXT_FLAGS 0x60400020 /* 60=r+x, 40=align8, 20=code. */ +#endif + +/* Section numbers (0-based). */ +enum { + PEOBJ_SECT_ABS = -2, + PEOBJ_SECT_UNDEF = -1, + PEOBJ_SECT_TEXT, +#if LJ_TARGET_X64 + PEOBJ_SECT_PDATA, + PEOBJ_SECT_XDATA, +#endif + PEOBJ_SECT_RDATA_Z, + PEOBJ_NSECTIONS +}; + +/* Symbol types. */ +#define PEOBJ_TYPE_NULL 0 +#define PEOBJ_TYPE_FUNC 0x20 + +/* Symbol storage class. */ +#define PEOBJ_SCL_EXTERN 2 +#define PEOBJ_SCL_STATIC 3 + +/* -- PE object emitter --------------------------------------------------- */ + +/* Emit PE object symbol. */ +static void emit_peobj_sym(BuildCtx *ctx, const char *name, uint32_t value, + int sect, int type, int scl) +{ + PEsym sym; + size_t len = strlen(name); + if (!strtab) { /* Pass 1: only calculate string table length. */ + if (len > 8) strtabofs += len+1; + return; + } + if (len <= 8) { + memcpy(sym.n.name, name, len); + memset(sym.n.name+len, 0, 8-len); + } else { + sym.n.nameref[0] = 0; + sym.n.nameref[1] = (uint32_t)strtabofs; + memcpy(strtab + strtabofs, name, len); + strtab[strtabofs+len] = 0; + strtabofs += len+1; + } + sym.value = value; + sym.sect = (int16_t)(sect+1); /* 1-based section number. */ + sym.type = (uint16_t)type; + sym.scl = (uint8_t)scl; + sym.naux = 0; + owrite(ctx, &sym, PEOBJ_SYM_SIZE); +} + +/* Emit PE object section symbol. */ +static void emit_peobj_sym_sect(BuildCtx *ctx, PEsection *pesect, int sect) +{ + PEsym sym; + PEsymaux aux; + if (!strtab) return; /* Pass 1: no output. */ + memcpy(sym.n.name, pesect[sect].name, 8); + sym.value = 0; + sym.sect = (int16_t)(sect+1); /* 1-based section number. */ + sym.type = PEOBJ_TYPE_NULL; + sym.scl = PEOBJ_SCL_STATIC; + sym.naux = 1; + owrite(ctx, &sym, PEOBJ_SYM_SIZE); + memset(&aux, 0, sizeof(PEsymaux)); + aux.size = pesect[sect].size; + aux.nreloc = pesect[sect].nreloc; + owrite(ctx, &aux, PEOBJ_SYM_SIZE); +} + +/* Emit Windows PE object file. */ +void emit_peobj(BuildCtx *ctx) +{ + PEheader pehdr; + PEsection pesect[PEOBJ_NSECTIONS]; + uint32_t sofs; + int i, nrsym; + union { uint8_t b; uint32_t u; } host_endian; + + sofs = sizeof(PEheader) + PEOBJ_NSECTIONS*sizeof(PEsection); + + /* Fill in PE sections. */ + memset(&pesect, 0, PEOBJ_NSECTIONS*sizeof(PEsection)); + memcpy(pesect[PEOBJ_SECT_TEXT].name, ".text", sizeof(".text")-1); + pesect[PEOBJ_SECT_TEXT].ofs = sofs; + sofs += (pesect[PEOBJ_SECT_TEXT].size = (uint32_t)ctx->codesz); + pesect[PEOBJ_SECT_TEXT].relocofs = sofs; + sofs += (pesect[PEOBJ_SECT_TEXT].nreloc = (uint16_t)ctx->nreloc) * PEOBJ_RELOC_SIZE; + /* Flags: 60 = read+execute, 50 = align16, 20 = code. */ + pesect[PEOBJ_SECT_TEXT].flags = PEOBJ_TEXT_FLAGS; + +#if LJ_TARGET_X64 + memcpy(pesect[PEOBJ_SECT_PDATA].name, ".pdata", sizeof(".pdata")-1); + pesect[PEOBJ_SECT_PDATA].ofs = sofs; + sofs += (pesect[PEOBJ_SECT_PDATA].size = 6*4); + pesect[PEOBJ_SECT_PDATA].relocofs = sofs; + sofs += (pesect[PEOBJ_SECT_PDATA].nreloc = 6) * PEOBJ_RELOC_SIZE; + /* Flags: 40 = read, 30 = align4, 40 = initialized data. */ + pesect[PEOBJ_SECT_PDATA].flags = 0x40300040; + + memcpy(pesect[PEOBJ_SECT_XDATA].name, ".xdata", sizeof(".xdata")-1); + pesect[PEOBJ_SECT_XDATA].ofs = sofs; + sofs += (pesect[PEOBJ_SECT_XDATA].size = 8*2+4+6*2); /* See below. */ + pesect[PEOBJ_SECT_XDATA].relocofs = sofs; + sofs += (pesect[PEOBJ_SECT_XDATA].nreloc = 1) * PEOBJ_RELOC_SIZE; + /* Flags: 40 = read, 30 = align4, 40 = initialized data. */ + pesect[PEOBJ_SECT_XDATA].flags = 0x40300040; +#endif + + memcpy(pesect[PEOBJ_SECT_RDATA_Z].name, ".rdata$Z", sizeof(".rdata$Z")-1); + pesect[PEOBJ_SECT_RDATA_Z].ofs = sofs; + sofs += (pesect[PEOBJ_SECT_RDATA_Z].size = (uint32_t)strlen(ctx->dasm_ident)+1); + /* Flags: 40 = read, 30 = align4, 40 = initialized data. */ + pesect[PEOBJ_SECT_RDATA_Z].flags = 0x40300040; + + /* Fill in PE header. */ + pehdr.arch = PEOBJ_ARCH_TARGET; + pehdr.nsects = PEOBJ_NSECTIONS; + pehdr.time = 0; /* Timestamp is optional. */ + pehdr.symtabofs = sofs; + pehdr.opthdrsz = 0; + pehdr.flags = 0; + + /* Compute the size of the symbol table: + ** @feat.00 + nsections*2 + ** + asm_start + nsym + ** + nrsym + */ + nrsym = ctx->nrelocsym; + pehdr.nsyms = 1+PEOBJ_NSECTIONS*2 + 1+ctx->nsym + nrsym; +#if LJ_TARGET_X64 + pehdr.nsyms += 1; /* Symbol for lj_err_unwind_win64. */ +#endif + + /* Write PE object header and all sections. */ + owrite(ctx, &pehdr, sizeof(PEheader)); + owrite(ctx, &pesect, sizeof(PEsection)*PEOBJ_NSECTIONS); + + /* Write .text section. */ + host_endian.u = 1; + if (host_endian.b != LJ_ENDIAN_SELECT(1, 0)) { +#if LJ_TARGET_PPC + uint32_t *p = (uint32_t *)ctx->code; + int n = (int)(ctx->codesz >> 2); + for (i = 0; i < n; i++, p++) + *p = lj_bswap(*p); /* Byteswap .text section. */ +#else + fprintf(stderr, "Error: different byte order for host and target\n"); + exit(1); +#endif + } + owrite(ctx, ctx->code, ctx->codesz); + for (i = 0; i < ctx->nreloc; i++) { + PEreloc reloc; + reloc.vaddr = (uint32_t)ctx->reloc[i].ofs + PEOBJ_RELOC_OFS; + reloc.symidx = 1+2+ctx->reloc[i].sym; /* Reloc syms are after .text sym. */ + reloc.type = ctx->reloc[i].type ? PEOBJ_RELOC_REL32 : PEOBJ_RELOC_DIR32; + owrite(ctx, &reloc, PEOBJ_RELOC_SIZE); + } + +#if LJ_TARGET_X64 + { /* Write .pdata section. */ + uint32_t fcofs = (uint32_t)ctx->sym[ctx->nsym-1].ofs; + uint32_t pdata[3]; /* Start of .text, end of .text and .xdata. */ + PEreloc reloc; + pdata[0] = 0; pdata[1] = fcofs; pdata[2] = 0; + owrite(ctx, &pdata, sizeof(pdata)); + pdata[0] = fcofs; pdata[1] = (uint32_t)ctx->codesz; pdata[2] = 20; + owrite(ctx, &pdata, sizeof(pdata)); + reloc.vaddr = 0; reloc.symidx = 1+2+nrsym+2+2+1; + reloc.type = PEOBJ_RELOC_ADDR32NB; + owrite(ctx, &reloc, PEOBJ_RELOC_SIZE); + reloc.vaddr = 4; reloc.symidx = 1+2+nrsym+2+2+1; + reloc.type = PEOBJ_RELOC_ADDR32NB; + owrite(ctx, &reloc, PEOBJ_RELOC_SIZE); + reloc.vaddr = 8; reloc.symidx = 1+2+nrsym+2; + reloc.type = PEOBJ_RELOC_ADDR32NB; + owrite(ctx, &reloc, PEOBJ_RELOC_SIZE); + reloc.vaddr = 12; reloc.symidx = 1+2+nrsym+2+2+1; + reloc.type = PEOBJ_RELOC_ADDR32NB; + owrite(ctx, &reloc, PEOBJ_RELOC_SIZE); + reloc.vaddr = 16; reloc.symidx = 1+2+nrsym+2+2+1; + reloc.type = PEOBJ_RELOC_ADDR32NB; + owrite(ctx, &reloc, PEOBJ_RELOC_SIZE); + reloc.vaddr = 20; reloc.symidx = 1+2+nrsym+2; + reloc.type = PEOBJ_RELOC_ADDR32NB; + owrite(ctx, &reloc, PEOBJ_RELOC_SIZE); + } + { /* Write .xdata section. */ + uint16_t xdata[8+2+6]; + PEreloc reloc; + xdata[0] = 0x01|0x08|0x10; /* Ver. 1, uhandler/ehandler, prolog size 0. */ + xdata[1] = 0x0005; /* Number of unwind codes, no frame pointer. */ + xdata[2] = 0x4200; /* Stack offset 4*8+8 = aword*5. */ + xdata[3] = 0x3000; /* Push rbx. */ + xdata[4] = 0x6000; /* Push rsi. */ + xdata[5] = 0x7000; /* Push rdi. */ + xdata[6] = 0x5000; /* Push rbp. */ + xdata[7] = 0; /* Alignment. */ + xdata[8] = xdata[9] = 0; /* Relocated address of exception handler. */ + xdata[10] = 0x01; /* Ver. 1, no handler, prolog size 0. */ + xdata[11] = 0x1504; /* Number of unwind codes, fp = rbp, fpofs = 16. */ + xdata[12] = 0x0300; /* set_fpreg. */ + xdata[13] = 0x0200; /* stack offset 0*8+8 = aword*1. */ + xdata[14] = 0x3000; /* Push rbx. */ + xdata[15] = 0x5000; /* Push rbp. */ + owrite(ctx, &xdata, sizeof(xdata)); + reloc.vaddr = 2*8; reloc.symidx = 1+2+nrsym+2+2; + reloc.type = PEOBJ_RELOC_ADDR32NB; + owrite(ctx, &reloc, PEOBJ_RELOC_SIZE); + } +#endif + + /* Write .rdata$Z section. */ + owrite(ctx, ctx->dasm_ident, strlen(ctx->dasm_ident)+1); + + /* Write symbol table. */ + strtab = NULL; /* 1st pass: collect string sizes. */ + for (;;) { + strtabofs = 4; + /* Mark as SafeSEH compliant. */ + emit_peobj_sym(ctx, "@feat.00", 1, + PEOBJ_SECT_ABS, PEOBJ_TYPE_NULL, PEOBJ_SCL_STATIC); + + emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_TEXT); + for (i = 0; i < nrsym; i++) + emit_peobj_sym(ctx, ctx->relocsym[i], 0, + PEOBJ_SECT_UNDEF, PEOBJ_TYPE_FUNC, PEOBJ_SCL_EXTERN); + +#if LJ_TARGET_X64 + emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_PDATA); + emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_XDATA); + emit_peobj_sym(ctx, "lj_err_unwind_win64", 0, + PEOBJ_SECT_UNDEF, PEOBJ_TYPE_FUNC, PEOBJ_SCL_EXTERN); +#endif + + emit_peobj_sym(ctx, ctx->beginsym, 0, + PEOBJ_SECT_TEXT, PEOBJ_TYPE_NULL, PEOBJ_SCL_EXTERN); + for (i = 0; i < ctx->nsym; i++) + emit_peobj_sym(ctx, ctx->sym[i].name, (uint32_t)ctx->sym[i].ofs, + PEOBJ_SECT_TEXT, PEOBJ_TYPE_FUNC, PEOBJ_SCL_EXTERN); + + emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_RDATA_Z); + + if (strtab) + break; + /* 2nd pass: alloc strtab, write syms and copy strings. */ + strtab = (char *)malloc(strtabofs); + *(uint32_t *)strtab = (uint32_t)strtabofs; + } + + /* Write string table. */ + owrite(ctx, strtab, strtabofs); +} + +#else + +void emit_peobj(BuildCtx *ctx) +{ + UNUSED(ctx); + fprintf(stderr, "Error: no PE object support for this target\n"); + exit(1); +} + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/host/genlibbc.lua b/Build/source/libs/luajit/LuaJIT-src/src/host/genlibbc.lua new file mode 100644 index 00000000000..4398d8e74d9 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/host/genlibbc.lua @@ -0,0 +1,197 @@ +---------------------------------------------------------------------------- +-- Lua script to dump the bytecode of the library functions written in Lua. +-- The resulting 'buildvm_libbc.h' is used for the build process of LuaJIT. +---------------------------------------------------------------------------- +-- Copyright (C) 2005-2015 Mike Pall. All rights reserved. +-- Released under the MIT license. See Copyright Notice in luajit.h +---------------------------------------------------------------------------- + +local ffi = require("ffi") +local bit = require("bit") +local vmdef = require("jit.vmdef") +local bcnames = vmdef.bcnames + +local format = string.format + +local isbe = (string.byte(string.dump(function() end), 5) % 2 == 1) + +local function usage(arg) + io.stderr:write("Usage: ", arg and arg[0] or "genlibbc", + " [-o buildvm_libbc.h] lib_*.c\n") + os.exit(1) +end + +local function parse_arg(arg) + local outfile = "-" + if not (arg and arg[1]) then + usage(arg) + end + if arg[1] == "-o" then + outfile = arg[2] + if not outfile then usage(arg) end + table.remove(arg, 1) + table.remove(arg, 1) + end + return outfile +end + +local function read_files(names) + local src = "" + for _,name in ipairs(names) do + local fp = assert(io.open(name)) + src = src .. fp:read("*a") + fp:close() + end + return src +end + +local function transform_lua(code) + local fixup = {} + local n = -30000 + code = string.gsub(code, "CHECK_(%w*)%((.-)%)", function(tp, var) + n = n + 1 + fixup[n] = { "CHECK", tp } + return format("%s=%d", var, n) + end) + code = string.gsub(code, "PAIRS%((.-)%)", function(var) + fixup.PAIRS = true + return format("nil, %s, 0", var) + end) + return "return "..code, fixup +end + +local function read_uleb128(p) + local v = p[0]; p = p + 1 + if v >= 128 then + local sh = 7; v = v - 128 + repeat + local r = p[0] + v = v + bit.lshift(bit.band(r, 127), sh) + sh = sh + 7 + p = p + 1 + until r < 128 + end + return p, v +end + +-- ORDER LJ_T +local name2itype = { + str = 5, func = 9, tab = 12, int = 14, num = 15 +} + +local BC = {} +for i=0,#bcnames/6-1 do + BC[string.gsub(string.sub(bcnames, i*6+1, i*6+6), " ", "")] = i +end +local xop, xra = isbe and 3 or 0, isbe and 2 or 1 +local xrc, xrb = isbe and 1 or 2, isbe and 0 or 3 + +local function fixup_dump(dump, fixup) + local buf = ffi.new("uint8_t[?]", #dump+1, dump) + local p = buf+5 + local n, sizebc + p, n = read_uleb128(p) + local start = p + p = p + 4 + p = read_uleb128(p) + p = read_uleb128(p) + p, sizebc = read_uleb128(p) + local rawtab = {} + for i=0,sizebc-1 do + local op = p[xop] + if op == BC.KSHORT then + local rd = p[xrc] + 256*p[xrb] + rd = bit.arshift(bit.lshift(rd, 16), 16) + local f = fixup[rd] + if f then + if f[1] == "CHECK" then + local tp = f[2] + if tp == "tab" then rawtab[p[xra]] = true end + p[xop] = tp == "num" and BC.ISNUM or BC.ISTYPE + p[xrb] = 0 + p[xrc] = name2itype[tp] + else + error("unhandled fixup type: "..f[1]) + end + end + elseif op == BC.TGETV then + if rawtab[p[xrb]] then + p[xop] = BC.TGETR + end + elseif op == BC.TSETV then + if rawtab[p[xrb]] then + p[xop] = BC.TSETR + end + elseif op == BC.ITERC then + if fixup.PAIRS then + p[xop] = BC.ITERN + end + end + p = p + 4 + end + return ffi.string(start, n) +end + +local function find_defs(src) + local defs = {} + for name, code in string.gmatch(src, "LJLIB_LUA%(([^)]*)%)%s*/%*(.-)%*/") do + local env = {} + local tcode, fixup = transform_lua(code) + local func = assert(load(tcode, "", nil, env))() + defs[name] = fixup_dump(string.dump(func, true), fixup) + defs[#defs+1] = name + end + return defs +end + +local function gen_header(defs) + local t = {} + local function w(x) t[#t+1] = x end + w("/* This is a generated file. DO NOT EDIT! */\n\n") + w("static const int libbc_endian = ") w(isbe and 1 or 0) w(";\n\n") + local s = "" + for _,name in ipairs(defs) do + s = s .. defs[name] + end + w("static const uint8_t libbc_code[] = {\n") + local n = 0 + for i=1,#s do + local x = string.byte(s, i) + w(x); w(",") + n = n + (x < 10 and 2 or (x < 100 and 3 or 4)) + if n >= 75 then n = 0; w("\n") end + end + w("0\n};\n\n") + w("static const struct { const char *name; int ofs; } libbc_map[] = {\n") + local m = 0 + for _,name in ipairs(defs) do + w('{"'); w(name); w('",'); w(m) w('},\n') + m = m + #defs[name] + end + w("{NULL,"); w(m); w("}\n};\n\n") + return table.concat(t) +end + +local function write_file(name, data) + if name == "-" then + assert(io.write(data)) + assert(io.flush()) + else + local fp = io.open(name) + if fp then + local old = fp:read("*a") + fp:close() + if data == old then return end + end + fp = assert(io.open(name, "w")) + assert(fp:write(data)) + assert(fp:close()) + end +end + +local outfile = parse_arg(arg) +local src = read_files(arg) +local defs = find_defs(src) +local hdr = gen_header(defs) +write_file(outfile, hdr) + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/host/genminilua.lua b/Build/source/libs/luajit/LuaJIT-src/src/host/genminilua.lua new file mode 100644 index 00000000000..cd0d946628c --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/host/genminilua.lua @@ -0,0 +1,428 @@ +---------------------------------------------------------------------------- +-- Lua script to generate a customized, minified version of Lua. +-- The resulting 'minilua' is used for the build process of LuaJIT. +---------------------------------------------------------------------------- +-- Copyright (C) 2005-2015 Mike Pall. All rights reserved. +-- Released under the MIT license. See Copyright Notice in luajit.h +---------------------------------------------------------------------------- + +local sub, match, gsub = string.sub, string.match, string.gsub + +local LUA_VERSION = "5.1.5" +local LUA_SOURCE + +local function usage() + io.stderr:write("Usage: ", arg and arg[0] or "genminilua", + " lua-", LUA_VERSION, "-source-dir\n") + os.exit(1) +end + +local function find_sources() + LUA_SOURCE = arg and arg[1] + if not LUA_SOURCE then usage() end + if sub(LUA_SOURCE, -1) ~= "/" then LUA_SOURCE = LUA_SOURCE.."/" end + local fp = io.open(LUA_SOURCE .. "lua.h") + if not fp then + LUA_SOURCE = LUA_SOURCE.."src/" + fp = io.open(LUA_SOURCE .. "lua.h") + if not fp then usage() end + end + local all = fp:read("*a") + fp:close() + if not match(all, 'LUA_RELEASE%s*"Lua '..LUA_VERSION..'"') then + io.stderr:write("Error: version mismatch\n") + usage() + end +end + +local LUA_FILES = { +"lmem.c", "lobject.c", "ltm.c", "lfunc.c", "ldo.c", "lstring.c", "ltable.c", +"lgc.c", "lstate.c", "ldebug.c", "lzio.c", "lopcodes.c", +"llex.c", "lcode.c", "lparser.c", "lvm.c", "lapi.c", "lauxlib.c", +"lbaselib.c", "ltablib.c", "liolib.c", "loslib.c", "lstrlib.c", "linit.c", +} + +local REMOVE_LIB = {} +gsub([[ +collectgarbage dofile gcinfo getfenv getmetatable load print rawequal rawset +select tostring xpcall +foreach foreachi getn maxn setn +popen tmpfile seek setvbuf __tostring +clock date difftime execute getenv rename setlocale time tmpname +dump gfind len reverse +LUA_LOADLIBNAME LUA_MATHLIBNAME LUA_DBLIBNAME +]], "%S+", function(name) + REMOVE_LIB[name] = true +end) + +local REMOVE_EXTINC = { [""] = true, [""] = true, } + +local CUSTOM_MAIN = [[ +typedef unsigned int UB; +static UB barg(lua_State *L,int idx){ +union{lua_Number n;U64 b;}bn; +bn.n=lua_tonumber(L,idx)+6755399441055744.0; +if (bn.n==0.0&&!lua_isnumber(L,idx))luaL_typerror(L,idx,"number"); +return(UB)bn.b; +} +#define BRET(b) lua_pushnumber(L,(lua_Number)(int)(b));return 1; +static int tobit(lua_State *L){ +BRET(barg(L,1))} +static int bnot(lua_State *L){ +BRET(~barg(L,1))} +static int band(lua_State *L){ +int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b&=barg(L,i);BRET(b)} +static int bor(lua_State *L){ +int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b|=barg(L,i);BRET(b)} +static int bxor(lua_State *L){ +int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b^=barg(L,i);BRET(b)} +static int lshift(lua_State *L){ +UB b=barg(L,1),n=barg(L,2)&31;BRET(b<>n)} +static int arshift(lua_State *L){ +UB b=barg(L,1),n=barg(L,2)&31;BRET((int)b>>n)} +static int rol(lua_State *L){ +UB b=barg(L,1),n=barg(L,2)&31;BRET((b<>(32-n)))} +static int ror(lua_State *L){ +UB b=barg(L,1),n=barg(L,2)&31;BRET((b>>n)|(b<<(32-n)))} +static int bswap(lua_State *L){ +UB b=barg(L,1);b=(b>>24)|((b>>8)&0xff00)|((b&0xff00)<<8)|(b<<24);BRET(b)} +static int tohex(lua_State *L){ +UB b=barg(L,1); +int n=lua_isnone(L,2)?8:(int)barg(L,2); +const char *hexdigits="0123456789abcdef"; +char buf[8]; +int i; +if(n<0){n=-n;hexdigits="0123456789ABCDEF";} +if(n>8)n=8; +for(i=(int)n;--i>=0;){buf[i]=hexdigits[b&15];b>>=4;} +lua_pushlstring(L,buf,(size_t)n); +return 1; +} +static const struct luaL_Reg bitlib[] = { +{"tobit",tobit}, +{"bnot",bnot}, +{"band",band}, +{"bor",bor}, +{"bxor",bxor}, +{"lshift",lshift}, +{"rshift",rshift}, +{"arshift",arshift}, +{"rol",rol}, +{"ror",ror}, +{"bswap",bswap}, +{"tohex",tohex}, +{NULL,NULL} +}; +int main(int argc, char **argv){ + lua_State *L = luaL_newstate(); + int i; + luaL_openlibs(L); + luaL_register(L, "bit", bitlib); + if (argc < 2) return sizeof(void *); + lua_createtable(L, 0, 1); + lua_pushstring(L, argv[1]); + lua_rawseti(L, -2, 0); + lua_setglobal(L, "arg"); + if (luaL_loadfile(L, argv[1])) + goto err; + for (i = 2; i < argc; i++) + lua_pushstring(L, argv[i]); + if (lua_pcall(L, argc - 2, 0, 0)) { + err: + fprintf(stderr, "Error: %s\n", lua_tostring(L, -1)); + return 1; + } + lua_close(L); + return 0; +} +]] + +local function read_sources() + local t = {} + for i, name in ipairs(LUA_FILES) do + local fp = assert(io.open(LUA_SOURCE..name, "r")) + t[i] = fp:read("*a") + assert(fp:close()) + end + t[#t+1] = CUSTOM_MAIN + return table.concat(t) +end + +local includes = {} + +local function merge_includes(src) + return gsub(src, '#include%s*"([^"]*)"%s*\n', function(name) + if includes[name] then return "" end + includes[name] = true + local fp = assert(io.open(LUA_SOURCE..name, "r")) + local src = fp:read("*a") + assert(fp:close()) + src = gsub(src, "#ifndef%s+%w+_h\n#define%s+%w+_h\n", "") + src = gsub(src, "#endif%s*$", "") + return merge_includes(src) + end) +end + +local function get_license(src) + return match(src, "/%*+\n%* Copyright %(.-%*/\n") +end + +local function fold_lines(src) + return gsub(src, "\\\n", " ") +end + +local strings = {} + +local function save_str(str) + local n = #strings+1 + strings[n] = str + return "\1"..n.."\2" +end + +local function save_strings(src) + src = gsub(src, '"[^"\n]*"', save_str) + return gsub(src, "'[^'\n]*'", save_str) +end + +local function restore_strings(src) + return gsub(src, "\1(%d+)\2", function(numstr) + return strings[tonumber(numstr)] + end) +end + +local function def_istrue(def) + return def == "INT_MAX > 2147483640L" or + def == "LUAI_BITSINT >= 32" or + def == "SIZE_Bx < LUAI_BITSINT-1" or + def == "cast" or + def == "defined(LUA_CORE)" or + def == "MINSTRTABSIZE" or + def == "LUA_MINBUFFER" or + def == "HARDSTACKTESTS" or + def == "UNUSED" +end + +local head, defs = {[[ +#ifdef _MSC_VER +typedef unsigned __int64 U64; +#else +typedef unsigned long long U64; +#endif +int _CRT_glob = 0; +]]}, {} + +local function preprocess(src) + local t = { match(src, "^(.-)#") } + local lvl, on, oldon = 0, true, {} + for pp, def, txt in string.gmatch(src, "#(%w+) *([^\n]*)\n([^#]*)") do + if pp == "if" or pp == "ifdef" or pp == "ifndef" then + lvl = lvl + 1 + oldon[lvl] = on + on = def_istrue(def) + elseif pp == "else" then + if oldon[lvl] then + if on == false then on = true else on = false end + end + elseif pp == "elif" then + if oldon[lvl] then + on = def_istrue(def) + end + elseif pp == "endif" then + on = oldon[lvl] + lvl = lvl - 1 + elseif on then + if pp == "include" then + if not head[def] and not REMOVE_EXTINC[def] then + head[def] = true + head[#head+1] = "#include "..def.."\n" + end + elseif pp == "define" then + local k, sp, v = match(def, "([%w_]+)(%s*)(.*)") + if k and not (sp == "" and sub(v, 1, 1) == "(") then + defs[k] = gsub(v, "%a[%w_]*", function(tok) + return defs[tok] or tok + end) + else + t[#t+1] = "#define "..def.."\n" + end + elseif pp ~= "undef" then + error("unexpected directive: "..pp.." "..def) + end + end + if on then t[#t+1] = txt end + end + return gsub(table.concat(t), "%a[%w_]*", function(tok) + return defs[tok] or tok + end) +end + +local function merge_header(src, license) + local hdr = string.format([[ +/* This is a heavily customized and minimized copy of Lua %s. */ +/* It's only used to build LuaJIT. It does NOT have all standard functions! */ +]], LUA_VERSION) + return hdr..license..table.concat(head)..src +end + +local function strip_unused1(src) + return gsub(src, '( {"?([%w_]+)"?,%s+%a[%w_]*},\n)', function(line, func) + return REMOVE_LIB[func] and "" or line + end) +end + +local function strip_unused2(src) + return gsub(src, "Symbolic Execution.-}=", "") +end + +local function strip_unused3(src) + src = gsub(src, "extern", "static") + src = gsub(src, "\nstatic([^\n]-)%(([^)]*)%)%(", "\nstatic%1 %2(") + src = gsub(src, "#define lua_assert[^\n]*\n", "") + src = gsub(src, "lua_assert%b();?", "") + src = gsub(src, "default:\n}", "default:;\n}") + src = gsub(src, "lua_lock%b();", "") + src = gsub(src, "lua_unlock%b();", "") + src = gsub(src, "luai_threadyield%b();", "") + src = gsub(src, "luai_userstateopen%b();", "{}") + src = gsub(src, "luai_userstate%w+%b();", "") + src = gsub(src, "%(%(c==.*luaY_parser%)", "luaY_parser") + src = gsub(src, "trydecpoint%(ls,seminfo%)", + "luaX_lexerror(ls,\"malformed number\",TK_NUMBER)") + src = gsub(src, "int c=luaZ_lookahead%b();", "") + src = gsub(src, "luaL_register%(L,[^,]*,co_funcs%);\nreturn 2;", + "return 1;") + src = gsub(src, "getfuncname%b():", "NULL:") + src = gsub(src, "getobjname%b():", "NULL:") + src = gsub(src, "if%([^\n]*hookmask[^\n]*%)\n[^\n]*\n", "") + src = gsub(src, "if%([^\n]*hookmask[^\n]*%)%b{}\n", "") + src = gsub(src, "if%([^\n]*hookmask[^\n]*&&\n[^\n]*%b{}\n", "") + src = gsub(src, "(twoto%b()%()", "%1(size_t)") + src = gsub(src, "i +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +typedef enum{ +TM_INDEX, +TM_NEWINDEX, +TM_GC, +TM_MODE, +TM_EQ, +TM_ADD, +TM_SUB, +TM_MUL, +TM_DIV, +TM_MOD, +TM_POW, +TM_UNM, +TM_LEN, +TM_LT, +TM_LE, +TM_CONCAT, +TM_CALL, +TM_N +}TMS; +enum OpMode{iABC,iABx,iAsBx}; +typedef enum{ +OP_MOVE, +OP_LOADK, +OP_LOADBOOL, +OP_LOADNIL, +OP_GETUPVAL, +OP_GETGLOBAL, +OP_GETTABLE, +OP_SETGLOBAL, +OP_SETUPVAL, +OP_SETTABLE, +OP_NEWTABLE, +OP_SELF, +OP_ADD, +OP_SUB, +OP_MUL, +OP_DIV, +OP_MOD, +OP_POW, +OP_UNM, +OP_NOT, +OP_LEN, +OP_CONCAT, +OP_JMP, +OP_EQ, +OP_LT, +OP_LE, +OP_TEST, +OP_TESTSET, +OP_CALL, +OP_TAILCALL, +OP_RETURN, +OP_FORLOOP, +OP_FORPREP, +OP_TFORLOOP, +OP_SETLIST, +OP_CLOSE, +OP_CLOSURE, +OP_VARARG +}OpCode; +enum OpArgMask{ +OpArgN, +OpArgU, +OpArgR, +OpArgK +}; +typedef enum{ +VVOID, +VNIL, +VTRUE, +VFALSE, +VK, +VKNUM, +VLOCAL, +VUPVAL, +VGLOBAL, +VINDEXED, +VJMP, +VRELOCABLE, +VNONRELOC, +VCALL, +VVARARG +}expkind; +enum RESERVED{ +TK_AND=257,TK_BREAK, +TK_DO,TK_ELSE,TK_ELSEIF,TK_END,TK_FALSE,TK_FOR,TK_FUNCTION, +TK_IF,TK_IN,TK_LOCAL,TK_NIL,TK_NOT,TK_OR,TK_REPEAT, +TK_RETURN,TK_THEN,TK_TRUE,TK_UNTIL,TK_WHILE, +TK_CONCAT,TK_DOTS,TK_EQ,TK_GE,TK_LE,TK_NE,TK_NUMBER, +TK_NAME,TK_STRING,TK_EOS +}; +typedef enum BinOpr{ +OPR_ADD,OPR_SUB,OPR_MUL,OPR_DIV,OPR_MOD,OPR_POW, +OPR_CONCAT, +OPR_NE,OPR_EQ, +OPR_LT,OPR_LE,OPR_GT,OPR_GE, +OPR_AND,OPR_OR, +OPR_NOBINOPR +}BinOpr; +typedef enum UnOpr{OPR_MINUS,OPR_NOT,OPR_LEN,OPR_NOUNOPR}UnOpr; +#define LUA_QL(x)"'"x"'" +#define luai_apicheck(L,o){(void)L;} +#define lua_number2str(s,n)sprintf((s),"%.14g",(n)) +#define lua_str2number(s,p)strtod((s),(p)) +#define luai_numadd(a,b)((a)+(b)) +#define luai_numsub(a,b)((a)-(b)) +#define luai_nummul(a,b)((a)*(b)) +#define luai_numdiv(a,b)((a)/(b)) +#define luai_nummod(a,b)((a)-floor((a)/(b))*(b)) +#define luai_numpow(a,b)(pow(a,b)) +#define luai_numunm(a)(-(a)) +#define luai_numeq(a,b)((a)==(b)) +#define luai_numlt(a,b)((a)<(b)) +#define luai_numle(a,b)((a)<=(b)) +#define luai_numisnan(a)(!luai_numeq((a),(a))) +#define lua_number2int(i,d)((i)=(int)(d)) +#define lua_number2integer(i,d)((i)=(lua_Integer)(d)) +#define LUAI_THROW(L,c)longjmp((c)->b,1) +#define LUAI_TRY(L,c,a)if(setjmp((c)->b)==0){a} +#define lua_pclose(L,file)((void)((void)L,file),0) +#define lua_upvalueindex(i)((-10002)-(i)) +typedef struct lua_State lua_State; +typedef int(*lua_CFunction)(lua_State*L); +typedef const char*(*lua_Reader)(lua_State*L,void*ud,size_t*sz); +typedef void*(*lua_Alloc)(void*ud,void*ptr,size_t osize,size_t nsize); +typedef double lua_Number; +typedef ptrdiff_t lua_Integer; +static void lua_settop(lua_State*L,int idx); +static int lua_type(lua_State*L,int idx); +static const char* lua_tolstring(lua_State*L,int idx,size_t*len); +static size_t lua_objlen(lua_State*L,int idx); +static void lua_pushlstring(lua_State*L,const char*s,size_t l); +static void lua_pushcclosure(lua_State*L,lua_CFunction fn,int n); +static void lua_createtable(lua_State*L,int narr,int nrec); +static void lua_setfield(lua_State*L,int idx,const char*k); +#define lua_pop(L,n)lua_settop(L,-(n)-1) +#define lua_newtable(L)lua_createtable(L,0,0) +#define lua_pushcfunction(L,f)lua_pushcclosure(L,(f),0) +#define lua_strlen(L,i)lua_objlen(L,(i)) +#define lua_isfunction(L,n)(lua_type(L,(n))==6) +#define lua_istable(L,n)(lua_type(L,(n))==5) +#define lua_isnil(L,n)(lua_type(L,(n))==0) +#define lua_isboolean(L,n)(lua_type(L,(n))==1) +#define lua_isnone(L,n)(lua_type(L,(n))==(-1)) +#define lua_isnoneornil(L,n)(lua_type(L,(n))<=0) +#define lua_pushliteral(L,s)lua_pushlstring(L,""s,(sizeof(s)/sizeof(char))-1) +#define lua_setglobal(L,s)lua_setfield(L,(-10002),(s)) +#define lua_tostring(L,i)lua_tolstring(L,(i),NULL) +typedef struct lua_Debug lua_Debug; +typedef void(*lua_Hook)(lua_State*L,lua_Debug*ar); +struct lua_Debug{ +int event; +const char*name; +const char*namewhat; +const char*what; +const char*source; +int currentline; +int nups; +int linedefined; +int lastlinedefined; +char short_src[60]; +int i_ci; +}; +typedef unsigned int lu_int32; +typedef size_t lu_mem; +typedef ptrdiff_t l_mem; +typedef unsigned char lu_byte; +#define IntPoint(p)((unsigned int)(lu_mem)(p)) +typedef union{double u;void*s;long l;}L_Umaxalign; +typedef double l_uacNumber; +#define check_exp(c,e)(e) +#define UNUSED(x)((void)(x)) +#define cast(t,exp)((t)(exp)) +#define cast_byte(i)cast(lu_byte,(i)) +#define cast_num(i)cast(lua_Number,(i)) +#define cast_int(i)cast(int,(i)) +typedef lu_int32 Instruction; +#define condhardstacktests(x)((void)0) +typedef union GCObject GCObject; +typedef struct GCheader{ +GCObject*next;lu_byte tt;lu_byte marked; +}GCheader; +typedef union{ +GCObject*gc; +void*p; +lua_Number n; +int b; +}Value; +typedef struct lua_TValue{ +Value value;int tt; +}TValue; +#define ttisnil(o)(ttype(o)==0) +#define ttisnumber(o)(ttype(o)==3) +#define ttisstring(o)(ttype(o)==4) +#define ttistable(o)(ttype(o)==5) +#define ttisfunction(o)(ttype(o)==6) +#define ttisboolean(o)(ttype(o)==1) +#define ttisuserdata(o)(ttype(o)==7) +#define ttisthread(o)(ttype(o)==8) +#define ttislightuserdata(o)(ttype(o)==2) +#define ttype(o)((o)->tt) +#define gcvalue(o)check_exp(iscollectable(o),(o)->value.gc) +#define pvalue(o)check_exp(ttislightuserdata(o),(o)->value.p) +#define nvalue(o)check_exp(ttisnumber(o),(o)->value.n) +#define rawtsvalue(o)check_exp(ttisstring(o),&(o)->value.gc->ts) +#define tsvalue(o)(&rawtsvalue(o)->tsv) +#define rawuvalue(o)check_exp(ttisuserdata(o),&(o)->value.gc->u) +#define uvalue(o)(&rawuvalue(o)->uv) +#define clvalue(o)check_exp(ttisfunction(o),&(o)->value.gc->cl) +#define hvalue(o)check_exp(ttistable(o),&(o)->value.gc->h) +#define bvalue(o)check_exp(ttisboolean(o),(o)->value.b) +#define thvalue(o)check_exp(ttisthread(o),&(o)->value.gc->th) +#define l_isfalse(o)(ttisnil(o)||(ttisboolean(o)&&bvalue(o)==0)) +#define checkconsistency(obj) +#define checkliveness(g,obj) +#define setnilvalue(obj)((obj)->tt=0) +#define setnvalue(obj,x){TValue*i_o=(obj);i_o->value.n=(x);i_o->tt=3;} +#define setbvalue(obj,x){TValue*i_o=(obj);i_o->value.b=(x);i_o->tt=1;} +#define setsvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=4;checkliveness(G(L),i_o);} +#define setuvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=7;checkliveness(G(L),i_o);} +#define setthvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=8;checkliveness(G(L),i_o);} +#define setclvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=6;checkliveness(G(L),i_o);} +#define sethvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=5;checkliveness(G(L),i_o);} +#define setptvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=(8+1);checkliveness(G(L),i_o);} +#define setobj(L,obj1,obj2){const TValue*o2=(obj2);TValue*o1=(obj1);o1->value=o2->value;o1->tt=o2->tt;checkliveness(G(L),o1);} +#define setttype(obj,tt)(ttype(obj)=(tt)) +#define iscollectable(o)(ttype(o)>=4) +typedef TValue*StkId; +typedef union TString{ +L_Umaxalign dummy; +struct{ +GCObject*next;lu_byte tt;lu_byte marked; +lu_byte reserved; +unsigned int hash; +size_t len; +}tsv; +}TString; +#define getstr(ts)cast(const char*,(ts)+1) +#define svalue(o)getstr(rawtsvalue(o)) +typedef union Udata{ +L_Umaxalign dummy; +struct{ +GCObject*next;lu_byte tt;lu_byte marked; +struct Table*metatable; +struct Table*env; +size_t len; +}uv; +}Udata; +typedef struct Proto{ +GCObject*next;lu_byte tt;lu_byte marked; +TValue*k; +Instruction*code; +struct Proto**p; +int*lineinfo; +struct LocVar*locvars; +TString**upvalues; +TString*source; +int sizeupvalues; +int sizek; +int sizecode; +int sizelineinfo; +int sizep; +int sizelocvars; +int linedefined; +int lastlinedefined; +GCObject*gclist; +lu_byte nups; +lu_byte numparams; +lu_byte is_vararg; +lu_byte maxstacksize; +}Proto; +typedef struct LocVar{ +TString*varname; +int startpc; +int endpc; +}LocVar; +typedef struct UpVal{ +GCObject*next;lu_byte tt;lu_byte marked; +TValue*v; +union{ +TValue value; +struct{ +struct UpVal*prev; +struct UpVal*next; +}l; +}u; +}UpVal; +typedef struct CClosure{ +GCObject*next;lu_byte tt;lu_byte marked;lu_byte isC;lu_byte nupvalues;GCObject*gclist;struct Table*env; +lua_CFunction f; +TValue upvalue[1]; +}CClosure; +typedef struct LClosure{ +GCObject*next;lu_byte tt;lu_byte marked;lu_byte isC;lu_byte nupvalues;GCObject*gclist;struct Table*env; +struct Proto*p; +UpVal*upvals[1]; +}LClosure; +typedef union Closure{ +CClosure c; +LClosure l; +}Closure; +#define iscfunction(o)(ttype(o)==6&&clvalue(o)->c.isC) +typedef union TKey{ +struct{ +Value value;int tt; +struct Node*next; +}nk; +TValue tvk; +}TKey; +typedef struct Node{ +TValue i_val; +TKey i_key; +}Node; +typedef struct Table{ +GCObject*next;lu_byte tt;lu_byte marked; +lu_byte flags; +lu_byte lsizenode; +struct Table*metatable; +TValue*array; +Node*node; +Node*lastfree; +GCObject*gclist; +int sizearray; +}Table; +#define lmod(s,size)(check_exp((size&(size-1))==0,(cast(int,(s)&((size)-1))))) +#define twoto(x)((size_t)1<<(x)) +#define sizenode(t)(twoto((t)->lsizenode)) +static const TValue luaO_nilobject_; +#define ceillog2(x)(luaO_log2((x)-1)+1) +static int luaO_log2(unsigned int x); +#define gfasttm(g,et,e)((et)==NULL?NULL:((et)->flags&(1u<<(e)))?NULL:luaT_gettm(et,e,(g)->tmname[e])) +#define fasttm(l,et,e)gfasttm(G(l),et,e) +static const TValue*luaT_gettm(Table*events,TMS event,TString*ename); +#define luaM_reallocv(L,b,on,n,e)((cast(size_t,(n)+1)<=((size_t)(~(size_t)0)-2)/(e))?luaM_realloc_(L,(b),(on)*(e),(n)*(e)):luaM_toobig(L)) +#define luaM_freemem(L,b,s)luaM_realloc_(L,(b),(s),0) +#define luaM_free(L,b)luaM_realloc_(L,(b),sizeof(*(b)),0) +#define luaM_freearray(L,b,n,t)luaM_reallocv(L,(b),n,0,sizeof(t)) +#define luaM_malloc(L,t)luaM_realloc_(L,NULL,0,(t)) +#define luaM_new(L,t)cast(t*,luaM_malloc(L,sizeof(t))) +#define luaM_newvector(L,n,t)cast(t*,luaM_reallocv(L,NULL,0,n,sizeof(t))) +#define luaM_growvector(L,v,nelems,size,t,limit,e)if((nelems)+1>(size))((v)=cast(t*,luaM_growaux_(L,v,&(size),sizeof(t),limit,e))) +#define luaM_reallocvector(L,v,oldn,n,t)((v)=cast(t*,luaM_reallocv(L,v,oldn,n,sizeof(t)))) +static void*luaM_realloc_(lua_State*L,void*block,size_t oldsize, +size_t size); +static void*luaM_toobig(lua_State*L); +static void*luaM_growaux_(lua_State*L,void*block,int*size, +size_t size_elem,int limit, +const char*errormsg); +typedef struct Zio ZIO; +#define char2int(c)cast(int,cast(unsigned char,(c))) +#define zgetc(z)(((z)->n--)>0?char2int(*(z)->p++):luaZ_fill(z)) +typedef struct Mbuffer{ +char*buffer; +size_t n; +size_t buffsize; +}Mbuffer; +#define luaZ_initbuffer(L,buff)((buff)->buffer=NULL,(buff)->buffsize=0) +#define luaZ_buffer(buff)((buff)->buffer) +#define luaZ_sizebuffer(buff)((buff)->buffsize) +#define luaZ_bufflen(buff)((buff)->n) +#define luaZ_resetbuffer(buff)((buff)->n=0) +#define luaZ_resizebuffer(L,buff,size)(luaM_reallocvector(L,(buff)->buffer,(buff)->buffsize,size,char),(buff)->buffsize=size) +#define luaZ_freebuffer(L,buff)luaZ_resizebuffer(L,buff,0) +struct Zio{ +size_t n; +const char*p; +lua_Reader reader; +void*data; +lua_State*L; +}; +static int luaZ_fill(ZIO*z); +struct lua_longjmp; +#define gt(L)(&L->l_gt) +#define registry(L)(&G(L)->l_registry) +typedef struct stringtable{ +GCObject**hash; +lu_int32 nuse; +int size; +}stringtable; +typedef struct CallInfo{ +StkId base; +StkId func; +StkId top; +const Instruction*savedpc; +int nresults; +int tailcalls; +}CallInfo; +#define curr_func(L)(clvalue(L->ci->func)) +#define ci_func(ci)(clvalue((ci)->func)) +#define f_isLua(ci)(!ci_func(ci)->c.isC) +#define isLua(ci)(ttisfunction((ci)->func)&&f_isLua(ci)) +typedef struct global_State{ +stringtable strt; +lua_Alloc frealloc; +void*ud; +lu_byte currentwhite; +lu_byte gcstate; +int sweepstrgc; +GCObject*rootgc; +GCObject**sweepgc; +GCObject*gray; +GCObject*grayagain; +GCObject*weak; +GCObject*tmudata; +Mbuffer buff; +lu_mem GCthreshold; +lu_mem totalbytes; +lu_mem estimate; +lu_mem gcdept; +int gcpause; +int gcstepmul; +lua_CFunction panic; +TValue l_registry; +struct lua_State*mainthread; +UpVal uvhead; +struct Table*mt[(8+1)]; +TString*tmname[TM_N]; +}global_State; +struct lua_State{ +GCObject*next;lu_byte tt;lu_byte marked; +lu_byte status; +StkId top; +StkId base; +global_State*l_G; +CallInfo*ci; +const Instruction*savedpc; +StkId stack_last; +StkId stack; +CallInfo*end_ci; +CallInfo*base_ci; +int stacksize; +int size_ci; +unsigned short nCcalls; +unsigned short baseCcalls; +lu_byte hookmask; +lu_byte allowhook; +int basehookcount; +int hookcount; +lua_Hook hook; +TValue l_gt; +TValue env; +GCObject*openupval; +GCObject*gclist; +struct lua_longjmp*errorJmp; +ptrdiff_t errfunc; +}; +#define G(L)(L->l_G) +union GCObject{ +GCheader gch; +union TString ts; +union Udata u; +union Closure cl; +struct Table h; +struct Proto p; +struct UpVal uv; +struct lua_State th; +}; +#define rawgco2ts(o)check_exp((o)->gch.tt==4,&((o)->ts)) +#define gco2ts(o)(&rawgco2ts(o)->tsv) +#define rawgco2u(o)check_exp((o)->gch.tt==7,&((o)->u)) +#define gco2u(o)(&rawgco2u(o)->uv) +#define gco2cl(o)check_exp((o)->gch.tt==6,&((o)->cl)) +#define gco2h(o)check_exp((o)->gch.tt==5,&((o)->h)) +#define gco2p(o)check_exp((o)->gch.tt==(8+1),&((o)->p)) +#define gco2uv(o)check_exp((o)->gch.tt==(8+2),&((o)->uv)) +#define ngcotouv(o)check_exp((o)==NULL||(o)->gch.tt==(8+2),&((o)->uv)) +#define gco2th(o)check_exp((o)->gch.tt==8,&((o)->th)) +#define obj2gco(v)(cast(GCObject*,(v))) +static void luaE_freethread(lua_State*L,lua_State*L1); +#define pcRel(pc,p)(cast(int,(pc)-(p)->code)-1) +#define getline_(f,pc)(((f)->lineinfo)?(f)->lineinfo[pc]:0) +#define resethookcount(L)(L->hookcount=L->basehookcount) +static void luaG_typeerror(lua_State*L,const TValue*o, +const char*opname); +static void luaG_runerror(lua_State*L,const char*fmt,...); +#define luaD_checkstack(L,n)if((char*)L->stack_last-(char*)L->top<=(n)*(int)sizeof(TValue))luaD_growstack(L,n);else condhardstacktests(luaD_reallocstack(L,L->stacksize-5-1)); +#define incr_top(L){luaD_checkstack(L,1);L->top++;} +#define savestack(L,p)((char*)(p)-(char*)L->stack) +#define restorestack(L,n)((TValue*)((char*)L->stack+(n))) +#define saveci(L,p)((char*)(p)-(char*)L->base_ci) +#define restoreci(L,n)((CallInfo*)((char*)L->base_ci+(n))) +typedef void(*Pfunc)(lua_State*L,void*ud); +static int luaD_poscall(lua_State*L,StkId firstResult); +static void luaD_reallocCI(lua_State*L,int newsize); +static void luaD_reallocstack(lua_State*L,int newsize); +static void luaD_growstack(lua_State*L,int n); +static void luaD_throw(lua_State*L,int errcode); +static void*luaM_growaux_(lua_State*L,void*block,int*size,size_t size_elems, +int limit,const char*errormsg){ +void*newblock; +int newsize; +if(*size>=limit/2){ +if(*size>=limit) +luaG_runerror(L,errormsg); +newsize=limit; +} +else{ +newsize=(*size)*2; +if(newsize<4) +newsize=4; +} +newblock=luaM_reallocv(L,block,*size,newsize,size_elems); +*size=newsize; +return newblock; +} +static void*luaM_toobig(lua_State*L){ +luaG_runerror(L,"memory allocation error: block too big"); +return NULL; +} +static void*luaM_realloc_(lua_State*L,void*block,size_t osize,size_t nsize){ +global_State*g=G(L); +block=(*g->frealloc)(g->ud,block,osize,nsize); +if(block==NULL&&nsize>0) +luaD_throw(L,4); +g->totalbytes=(g->totalbytes-osize)+nsize; +return block; +} +#define resetbits(x,m)((x)&=cast(lu_byte,~(m))) +#define setbits(x,m)((x)|=(m)) +#define testbits(x,m)((x)&(m)) +#define bitmask(b)(1<<(b)) +#define bit2mask(b1,b2)(bitmask(b1)|bitmask(b2)) +#define l_setbit(x,b)setbits(x,bitmask(b)) +#define resetbit(x,b)resetbits(x,bitmask(b)) +#define testbit(x,b)testbits(x,bitmask(b)) +#define set2bits(x,b1,b2)setbits(x,(bit2mask(b1,b2))) +#define reset2bits(x,b1,b2)resetbits(x,(bit2mask(b1,b2))) +#define test2bits(x,b1,b2)testbits(x,(bit2mask(b1,b2))) +#define iswhite(x)test2bits((x)->gch.marked,0,1) +#define isblack(x)testbit((x)->gch.marked,2) +#define isgray(x)(!isblack(x)&&!iswhite(x)) +#define otherwhite(g)(g->currentwhite^bit2mask(0,1)) +#define isdead(g,v)((v)->gch.marked&otherwhite(g)&bit2mask(0,1)) +#define changewhite(x)((x)->gch.marked^=bit2mask(0,1)) +#define gray2black(x)l_setbit((x)->gch.marked,2) +#define valiswhite(x)(iscollectable(x)&&iswhite(gcvalue(x))) +#define luaC_white(g)cast(lu_byte,(g)->currentwhite&bit2mask(0,1)) +#define luaC_checkGC(L){condhardstacktests(luaD_reallocstack(L,L->stacksize-5-1));if(G(L)->totalbytes>=G(L)->GCthreshold)luaC_step(L);} +#define luaC_barrier(L,p,v){if(valiswhite(v)&&isblack(obj2gco(p)))luaC_barrierf(L,obj2gco(p),gcvalue(v));} +#define luaC_barriert(L,t,v){if(valiswhite(v)&&isblack(obj2gco(t)))luaC_barrierback(L,t);} +#define luaC_objbarrier(L,p,o){if(iswhite(obj2gco(o))&&isblack(obj2gco(p)))luaC_barrierf(L,obj2gco(p),obj2gco(o));} +#define luaC_objbarriert(L,t,o){if(iswhite(obj2gco(o))&&isblack(obj2gco(t)))luaC_barrierback(L,t);} +static void luaC_step(lua_State*L); +static void luaC_link(lua_State*L,GCObject*o,lu_byte tt); +static void luaC_linkupval(lua_State*L,UpVal*uv); +static void luaC_barrierf(lua_State*L,GCObject*o,GCObject*v); +static void luaC_barrierback(lua_State*L,Table*t); +#define sizestring(s)(sizeof(union TString)+((s)->len+1)*sizeof(char)) +#define sizeudata(u)(sizeof(union Udata)+(u)->len) +#define luaS_new(L,s)(luaS_newlstr(L,s,strlen(s))) +#define luaS_newliteral(L,s)(luaS_newlstr(L,""s,(sizeof(s)/sizeof(char))-1)) +#define luaS_fix(s)l_setbit((s)->tsv.marked,5) +static TString*luaS_newlstr(lua_State*L,const char*str,size_t l); +#define tostring(L,o)((ttype(o)==4)||(luaV_tostring(L,o))) +#define tonumber(o,n)(ttype(o)==3||(((o)=luaV_tonumber(o,n))!=NULL)) +#define equalobj(L,o1,o2)(ttype(o1)==ttype(o2)&&luaV_equalval(L,o1,o2)) +static int luaV_equalval(lua_State*L,const TValue*t1,const TValue*t2); +static const TValue*luaV_tonumber(const TValue*obj,TValue*n); +static int luaV_tostring(lua_State*L,StkId obj); +static void luaV_execute(lua_State*L,int nexeccalls); +static void luaV_concat(lua_State*L,int total,int last); +static const TValue luaO_nilobject_={{NULL},0}; +static int luaO_int2fb(unsigned int x){ +int e=0; +while(x>=16){ +x=(x+1)>>1; +e++; +} +if(x<8)return x; +else return((e+1)<<3)|(cast_int(x)-8); +} +static int luaO_fb2int(int x){ +int e=(x>>3)&31; +if(e==0)return x; +else return((x&7)+8)<<(e-1); +} +static int luaO_log2(unsigned int x){ +static const lu_byte log_2[256]={ +0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, +6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, +7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, +7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, +8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, +8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, +8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, +8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8 +}; +int l=-1; +while(x>=256){l+=8;x>>=8;} +return l+log_2[x]; +} +static int luaO_rawequalObj(const TValue*t1,const TValue*t2){ +if(ttype(t1)!=ttype(t2))return 0; +else switch(ttype(t1)){ +case 0: +return 1; +case 3: +return luai_numeq(nvalue(t1),nvalue(t2)); +case 1: +return bvalue(t1)==bvalue(t2); +case 2: +return pvalue(t1)==pvalue(t2); +default: +return gcvalue(t1)==gcvalue(t2); +} +} +static int luaO_str2d(const char*s,lua_Number*result){ +char*endptr; +*result=lua_str2number(s,&endptr); +if(endptr==s)return 0; +if(*endptr=='x'||*endptr=='X') +*result=cast_num(strtoul(s,&endptr,16)); +if(*endptr=='\0')return 1; +while(isspace(cast(unsigned char,*endptr)))endptr++; +if(*endptr!='\0')return 0; +return 1; +} +static void pushstr(lua_State*L,const char*str){ +setsvalue(L,L->top,luaS_new(L,str)); +incr_top(L); +} +static const char*luaO_pushvfstring(lua_State*L,const char*fmt,va_list argp){ +int n=1; +pushstr(L,""); +for(;;){ +const char*e=strchr(fmt,'%'); +if(e==NULL)break; +setsvalue(L,L->top,luaS_newlstr(L,fmt,e-fmt)); +incr_top(L); +switch(*(e+1)){ +case's':{ +const char*s=va_arg(argp,char*); +if(s==NULL)s="(null)"; +pushstr(L,s); +break; +} +case'c':{ +char buff[2]; +buff[0]=cast(char,va_arg(argp,int)); +buff[1]='\0'; +pushstr(L,buff); +break; +} +case'd':{ +setnvalue(L->top,cast_num(va_arg(argp,int))); +incr_top(L); +break; +} +case'f':{ +setnvalue(L->top,cast_num(va_arg(argp,l_uacNumber))); +incr_top(L); +break; +} +case'p':{ +char buff[4*sizeof(void*)+8]; +sprintf(buff,"%p",va_arg(argp,void*)); +pushstr(L,buff); +break; +} +case'%':{ +pushstr(L,"%"); +break; +} +default:{ +char buff[3]; +buff[0]='%'; +buff[1]=*(e+1); +buff[2]='\0'; +pushstr(L,buff); +break; +} +} +n+=2; +fmt=e+2; +} +pushstr(L,fmt); +luaV_concat(L,n+1,cast_int(L->top-L->base)-1); +L->top-=n; +return svalue(L->top-1); +} +static const char*luaO_pushfstring(lua_State*L,const char*fmt,...){ +const char*msg; +va_list argp; +va_start(argp,fmt); +msg=luaO_pushvfstring(L,fmt,argp); +va_end(argp); +return msg; +} +static void luaO_chunkid(char*out,const char*source,size_t bufflen){ +if(*source=='='){ +strncpy(out,source+1,bufflen); +out[bufflen-1]='\0'; +} +else{ +if(*source=='@'){ +size_t l; +source++; +bufflen-=sizeof(" '...' "); +l=strlen(source); +strcpy(out,""); +if(l>bufflen){ +source+=(l-bufflen); +strcat(out,"..."); +} +strcat(out,source); +} +else{ +size_t len=strcspn(source,"\n\r"); +bufflen-=sizeof(" [string \"...\"] "); +if(len>bufflen)len=bufflen; +strcpy(out,"[string \""); +if(source[len]!='\0'){ +strncat(out,source,len); +strcat(out,"..."); +} +else +strcat(out,source); +strcat(out,"\"]"); +} +} +} +#define gnode(t,i)(&(t)->node[i]) +#define gkey(n)(&(n)->i_key.nk) +#define gval(n)(&(n)->i_val) +#define gnext(n)((n)->i_key.nk.next) +#define key2tval(n)(&(n)->i_key.tvk) +static TValue*luaH_setnum(lua_State*L,Table*t,int key); +static const TValue*luaH_getstr(Table*t,TString*key); +static TValue*luaH_set(lua_State*L,Table*t,const TValue*key); +static const char*const luaT_typenames[]={ +"nil","boolean","userdata","number", +"string","table","function","userdata","thread", +"proto","upval" +}; +static void luaT_init(lua_State*L){ +static const char*const luaT_eventname[]={ +"__index","__newindex", +"__gc","__mode","__eq", +"__add","__sub","__mul","__div","__mod", +"__pow","__unm","__len","__lt","__le", +"__concat","__call" +}; +int i; +for(i=0;itmname[i]=luaS_new(L,luaT_eventname[i]); +luaS_fix(G(L)->tmname[i]); +} +} +static const TValue*luaT_gettm(Table*events,TMS event,TString*ename){ +const TValue*tm=luaH_getstr(events,ename); +if(ttisnil(tm)){ +events->flags|=cast_byte(1u<metatable; +break; +case 7: +mt=uvalue(o)->metatable; +break; +default: +mt=G(L)->mt[ttype(o)]; +} +return(mt?luaH_getstr(mt,G(L)->tmname[event]):(&luaO_nilobject_)); +} +#define sizeCclosure(n)(cast(int,sizeof(CClosure))+cast(int,sizeof(TValue)*((n)-1))) +#define sizeLclosure(n)(cast(int,sizeof(LClosure))+cast(int,sizeof(TValue*)*((n)-1))) +static Closure*luaF_newCclosure(lua_State*L,int nelems,Table*e){ +Closure*c=cast(Closure*,luaM_malloc(L,sizeCclosure(nelems))); +luaC_link(L,obj2gco(c),6); +c->c.isC=1; +c->c.env=e; +c->c.nupvalues=cast_byte(nelems); +return c; +} +static Closure*luaF_newLclosure(lua_State*L,int nelems,Table*e){ +Closure*c=cast(Closure*,luaM_malloc(L,sizeLclosure(nelems))); +luaC_link(L,obj2gco(c),6); +c->l.isC=0; +c->l.env=e; +c->l.nupvalues=cast_byte(nelems); +while(nelems--)c->l.upvals[nelems]=NULL; +return c; +} +static UpVal*luaF_newupval(lua_State*L){ +UpVal*uv=luaM_new(L,UpVal); +luaC_link(L,obj2gco(uv),(8+2)); +uv->v=&uv->u.value; +setnilvalue(uv->v); +return uv; +} +static UpVal*luaF_findupval(lua_State*L,StkId level){ +global_State*g=G(L); +GCObject**pp=&L->openupval; +UpVal*p; +UpVal*uv; +while(*pp!=NULL&&(p=ngcotouv(*pp))->v>=level){ +if(p->v==level){ +if(isdead(g,obj2gco(p))) +changewhite(obj2gco(p)); +return p; +} +pp=&p->next; +} +uv=luaM_new(L,UpVal); +uv->tt=(8+2); +uv->marked=luaC_white(g); +uv->v=level; +uv->next=*pp; +*pp=obj2gco(uv); +uv->u.l.prev=&g->uvhead; +uv->u.l.next=g->uvhead.u.l.next; +uv->u.l.next->u.l.prev=uv; +g->uvhead.u.l.next=uv; +return uv; +} +static void unlinkupval(UpVal*uv){ +uv->u.l.next->u.l.prev=uv->u.l.prev; +uv->u.l.prev->u.l.next=uv->u.l.next; +} +static void luaF_freeupval(lua_State*L,UpVal*uv){ +if(uv->v!=&uv->u.value) +unlinkupval(uv); +luaM_free(L,uv); +} +static void luaF_close(lua_State*L,StkId level){ +UpVal*uv; +global_State*g=G(L); +while(L->openupval!=NULL&&(uv=ngcotouv(L->openupval))->v>=level){ +GCObject*o=obj2gco(uv); +L->openupval=uv->next; +if(isdead(g,o)) +luaF_freeupval(L,uv); +else{ +unlinkupval(uv); +setobj(L,&uv->u.value,uv->v); +uv->v=&uv->u.value; +luaC_linkupval(L,uv); +} +} +} +static Proto*luaF_newproto(lua_State*L){ +Proto*f=luaM_new(L,Proto); +luaC_link(L,obj2gco(f),(8+1)); +f->k=NULL; +f->sizek=0; +f->p=NULL; +f->sizep=0; +f->code=NULL; +f->sizecode=0; +f->sizelineinfo=0; +f->sizeupvalues=0; +f->nups=0; +f->upvalues=NULL; +f->numparams=0; +f->is_vararg=0; +f->maxstacksize=0; +f->lineinfo=NULL; +f->sizelocvars=0; +f->locvars=NULL; +f->linedefined=0; +f->lastlinedefined=0; +f->source=NULL; +return f; +} +static void luaF_freeproto(lua_State*L,Proto*f){ +luaM_freearray(L,f->code,f->sizecode,Instruction); +luaM_freearray(L,f->p,f->sizep,Proto*); +luaM_freearray(L,f->k,f->sizek,TValue); +luaM_freearray(L,f->lineinfo,f->sizelineinfo,int); +luaM_freearray(L,f->locvars,f->sizelocvars,struct LocVar); +luaM_freearray(L,f->upvalues,f->sizeupvalues,TString*); +luaM_free(L,f); +} +static void luaF_freeclosure(lua_State*L,Closure*c){ +int size=(c->c.isC)?sizeCclosure(c->c.nupvalues): +sizeLclosure(c->l.nupvalues); +luaM_freemem(L,c,size); +} +#define MASK1(n,p)((~((~(Instruction)0)<>0)&MASK1(6,0))) +#define SET_OPCODE(i,o)((i)=(((i)&MASK0(6,0))|((cast(Instruction,o)<<0)&MASK1(6,0)))) +#define GETARG_A(i)(cast(int,((i)>>(0+6))&MASK1(8,0))) +#define SETARG_A(i,u)((i)=(((i)&MASK0(8,(0+6)))|((cast(Instruction,u)<<(0+6))&MASK1(8,(0+6))))) +#define GETARG_B(i)(cast(int,((i)>>(((0+6)+8)+9))&MASK1(9,0))) +#define SETARG_B(i,b)((i)=(((i)&MASK0(9,(((0+6)+8)+9)))|((cast(Instruction,b)<<(((0+6)+8)+9))&MASK1(9,(((0+6)+8)+9))))) +#define GETARG_C(i)(cast(int,((i)>>((0+6)+8))&MASK1(9,0))) +#define SETARG_C(i,b)((i)=(((i)&MASK0(9,((0+6)+8)))|((cast(Instruction,b)<<((0+6)+8))&MASK1(9,((0+6)+8))))) +#define GETARG_Bx(i)(cast(int,((i)>>((0+6)+8))&MASK1((9+9),0))) +#define SETARG_Bx(i,b)((i)=(((i)&MASK0((9+9),((0+6)+8)))|((cast(Instruction,b)<<((0+6)+8))&MASK1((9+9),((0+6)+8))))) +#define GETARG_sBx(i)(GETARG_Bx(i)-(((1<<(9+9))-1)>>1)) +#define SETARG_sBx(i,b)SETARG_Bx((i),cast(unsigned int,(b)+(((1<<(9+9))-1)>>1))) +#define CREATE_ABC(o,a,b,c)((cast(Instruction,o)<<0)|(cast(Instruction,a)<<(0+6))|(cast(Instruction,b)<<(((0+6)+8)+9))|(cast(Instruction,c)<<((0+6)+8))) +#define CREATE_ABx(o,a,bc)((cast(Instruction,o)<<0)|(cast(Instruction,a)<<(0+6))|(cast(Instruction,bc)<<((0+6)+8))) +#define ISK(x)((x)&(1<<(9-1))) +#define INDEXK(r)((int)(r)&~(1<<(9-1))) +#define RKASK(x)((x)|(1<<(9-1))) +static const lu_byte luaP_opmodes[(cast(int,OP_VARARG)+1)]; +#define getBMode(m)(cast(enum OpArgMask,(luaP_opmodes[m]>>4)&3)) +#define getCMode(m)(cast(enum OpArgMask,(luaP_opmodes[m]>>2)&3)) +#define testTMode(m)(luaP_opmodes[m]&(1<<7)) +typedef struct expdesc{ +expkind k; +union{ +struct{int info,aux;}s; +lua_Number nval; +}u; +int t; +int f; +}expdesc; +typedef struct upvaldesc{ +lu_byte k; +lu_byte info; +}upvaldesc; +struct BlockCnt; +typedef struct FuncState{ +Proto*f; +Table*h; +struct FuncState*prev; +struct LexState*ls; +struct lua_State*L; +struct BlockCnt*bl; +int pc; +int lasttarget; +int jpc; +int freereg; +int nk; +int np; +short nlocvars; +lu_byte nactvar; +upvaldesc upvalues[60]; +unsigned short actvar[200]; +}FuncState; +static Proto*luaY_parser(lua_State*L,ZIO*z,Mbuffer*buff, +const char*name); +struct lua_longjmp{ +struct lua_longjmp*previous; +jmp_buf b; +volatile int status; +}; +static void luaD_seterrorobj(lua_State*L,int errcode,StkId oldtop){ +switch(errcode){ +case 4:{ +setsvalue(L,oldtop,luaS_newliteral(L,"not enough memory")); +break; +} +case 5:{ +setsvalue(L,oldtop,luaS_newliteral(L,"error in error handling")); +break; +} +case 3: +case 2:{ +setobj(L,oldtop,L->top-1); +break; +} +} +L->top=oldtop+1; +} +static void restore_stack_limit(lua_State*L){ +if(L->size_ci>20000){ +int inuse=cast_int(L->ci-L->base_ci); +if(inuse+1<20000) +luaD_reallocCI(L,20000); +} +} +static void resetstack(lua_State*L,int status){ +L->ci=L->base_ci; +L->base=L->ci->base; +luaF_close(L,L->base); +luaD_seterrorobj(L,status,L->base); +L->nCcalls=L->baseCcalls; +L->allowhook=1; +restore_stack_limit(L); +L->errfunc=0; +L->errorJmp=NULL; +} +static void luaD_throw(lua_State*L,int errcode){ +if(L->errorJmp){ +L->errorJmp->status=errcode; +LUAI_THROW(L,L->errorJmp); +} +else{ +L->status=cast_byte(errcode); +if(G(L)->panic){ +resetstack(L,errcode); +G(L)->panic(L); +} +exit(EXIT_FAILURE); +} +} +static int luaD_rawrunprotected(lua_State*L,Pfunc f,void*ud){ +struct lua_longjmp lj; +lj.status=0; +lj.previous=L->errorJmp; +L->errorJmp=&lj; +LUAI_TRY(L,&lj, +(*f)(L,ud); +); +L->errorJmp=lj.previous; +return lj.status; +} +static void correctstack(lua_State*L,TValue*oldstack){ +CallInfo*ci; +GCObject*up; +L->top=(L->top-oldstack)+L->stack; +for(up=L->openupval;up!=NULL;up=up->gch.next) +gco2uv(up)->v=(gco2uv(up)->v-oldstack)+L->stack; +for(ci=L->base_ci;ci<=L->ci;ci++){ +ci->top=(ci->top-oldstack)+L->stack; +ci->base=(ci->base-oldstack)+L->stack; +ci->func=(ci->func-oldstack)+L->stack; +} +L->base=(L->base-oldstack)+L->stack; +} +static void luaD_reallocstack(lua_State*L,int newsize){ +TValue*oldstack=L->stack; +int realsize=newsize+1+5; +luaM_reallocvector(L,L->stack,L->stacksize,realsize,TValue); +L->stacksize=realsize; +L->stack_last=L->stack+newsize; +correctstack(L,oldstack); +} +static void luaD_reallocCI(lua_State*L,int newsize){ +CallInfo*oldci=L->base_ci; +luaM_reallocvector(L,L->base_ci,L->size_ci,newsize,CallInfo); +L->size_ci=newsize; +L->ci=(L->ci-oldci)+L->base_ci; +L->end_ci=L->base_ci+L->size_ci-1; +} +static void luaD_growstack(lua_State*L,int n){ +if(n<=L->stacksize) +luaD_reallocstack(L,2*L->stacksize); +else +luaD_reallocstack(L,L->stacksize+n); +} +static CallInfo*growCI(lua_State*L){ +if(L->size_ci>20000) +luaD_throw(L,5); +else{ +luaD_reallocCI(L,2*L->size_ci); +if(L->size_ci>20000) +luaG_runerror(L,"stack overflow"); +} +return++L->ci; +} +static StkId adjust_varargs(lua_State*L,Proto*p,int actual){ +int i; +int nfixargs=p->numparams; +Table*htab=NULL; +StkId base,fixed; +for(;actualtop++); +fixed=L->top-actual; +base=L->top; +for(i=0;itop++,fixed+i); +setnilvalue(fixed+i); +} +if(htab){ +sethvalue(L,L->top++,htab); +} +return base; +} +static StkId tryfuncTM(lua_State*L,StkId func){ +const TValue*tm=luaT_gettmbyobj(L,func,TM_CALL); +StkId p; +ptrdiff_t funcr=savestack(L,func); +if(!ttisfunction(tm)) +luaG_typeerror(L,func,"call"); +for(p=L->top;p>func;p--)setobj(L,p,p-1); +incr_top(L); +func=restorestack(L,funcr); +setobj(L,func,tm); +return func; +} +#define inc_ci(L)((L->ci==L->end_ci)?growCI(L):(condhardstacktests(luaD_reallocCI(L,L->size_ci)),++L->ci)) +static int luaD_precall(lua_State*L,StkId func,int nresults){ +LClosure*cl; +ptrdiff_t funcr; +if(!ttisfunction(func)) +func=tryfuncTM(L,func); +funcr=savestack(L,func); +cl=&clvalue(func)->l; +L->ci->savedpc=L->savedpc; +if(!cl->isC){ +CallInfo*ci; +StkId st,base; +Proto*p=cl->p; +luaD_checkstack(L,p->maxstacksize); +func=restorestack(L,funcr); +if(!p->is_vararg){ +base=func+1; +if(L->top>base+p->numparams) +L->top=base+p->numparams; +} +else{ +int nargs=cast_int(L->top-func)-1; +base=adjust_varargs(L,p,nargs); +func=restorestack(L,funcr); +} +ci=inc_ci(L); +ci->func=func; +L->base=ci->base=base; +ci->top=L->base+p->maxstacksize; +L->savedpc=p->code; +ci->tailcalls=0; +ci->nresults=nresults; +for(st=L->top;sttop;st++) +setnilvalue(st); +L->top=ci->top; +return 0; +} +else{ +CallInfo*ci; +int n; +luaD_checkstack(L,20); +ci=inc_ci(L); +ci->func=restorestack(L,funcr); +L->base=ci->base=ci->func+1; +ci->top=L->top+20; +ci->nresults=nresults; +n=(*curr_func(L)->c.f)(L); +if(n<0) +return 2; +else{ +luaD_poscall(L,L->top-n); +return 1; +} +} +} +static int luaD_poscall(lua_State*L,StkId firstResult){ +StkId res; +int wanted,i; +CallInfo*ci; +ci=L->ci--; +res=ci->func; +wanted=ci->nresults; +L->base=(ci-1)->base; +L->savedpc=(ci-1)->savedpc; +for(i=wanted;i!=0&&firstResulttop;i--) +setobj(L,res++,firstResult++); +while(i-->0) +setnilvalue(res++); +L->top=res; +return(wanted-(-1)); +} +static void luaD_call(lua_State*L,StkId func,int nResults){ +if(++L->nCcalls>=200){ +if(L->nCcalls==200) +luaG_runerror(L,"C stack overflow"); +else if(L->nCcalls>=(200+(200>>3))) +luaD_throw(L,5); +} +if(luaD_precall(L,func,nResults)==0) +luaV_execute(L,1); +L->nCcalls--; +luaC_checkGC(L); +} +static int luaD_pcall(lua_State*L,Pfunc func,void*u, +ptrdiff_t old_top,ptrdiff_t ef){ +int status; +unsigned short oldnCcalls=L->nCcalls; +ptrdiff_t old_ci=saveci(L,L->ci); +lu_byte old_allowhooks=L->allowhook; +ptrdiff_t old_errfunc=L->errfunc; +L->errfunc=ef; +status=luaD_rawrunprotected(L,func,u); +if(status!=0){ +StkId oldtop=restorestack(L,old_top); +luaF_close(L,oldtop); +luaD_seterrorobj(L,status,oldtop); +L->nCcalls=oldnCcalls; +L->ci=restoreci(L,old_ci); +L->base=L->ci->base; +L->savedpc=L->ci->savedpc; +L->allowhook=old_allowhooks; +restore_stack_limit(L); +} +L->errfunc=old_errfunc; +return status; +} +struct SParser{ +ZIO*z; +Mbuffer buff; +const char*name; +}; +static void f_parser(lua_State*L,void*ud){ +int i; +Proto*tf; +Closure*cl; +struct SParser*p=cast(struct SParser*,ud); +luaC_checkGC(L); +tf=luaY_parser(L,p->z, +&p->buff,p->name); +cl=luaF_newLclosure(L,tf->nups,hvalue(gt(L))); +cl->l.p=tf; +for(i=0;inups;i++) +cl->l.upvals[i]=luaF_newupval(L); +setclvalue(L,L->top,cl); +incr_top(L); +} +static int luaD_protectedparser(lua_State*L,ZIO*z,const char*name){ +struct SParser p; +int status; +p.z=z;p.name=name; +luaZ_initbuffer(L,&p.buff); +status=luaD_pcall(L,f_parser,&p,savestack(L,L->top),L->errfunc); +luaZ_freebuffer(L,&p.buff); +return status; +} +static void luaS_resize(lua_State*L,int newsize){ +GCObject**newhash; +stringtable*tb; +int i; +if(G(L)->gcstate==2) +return; +newhash=luaM_newvector(L,newsize,GCObject*); +tb=&G(L)->strt; +for(i=0;isize;i++){ +GCObject*p=tb->hash[i]; +while(p){ +GCObject*next=p->gch.next; +unsigned int h=gco2ts(p)->hash; +int h1=lmod(h,newsize); +p->gch.next=newhash[h1]; +newhash[h1]=p; +p=next; +} +} +luaM_freearray(L,tb->hash,tb->size,TString*); +tb->size=newsize; +tb->hash=newhash; +} +static TString*newlstr(lua_State*L,const char*str,size_t l, +unsigned int h){ +TString*ts; +stringtable*tb; +if(l+1>(((size_t)(~(size_t)0)-2)-sizeof(TString))/sizeof(char)) +luaM_toobig(L); +ts=cast(TString*,luaM_malloc(L,(l+1)*sizeof(char)+sizeof(TString))); +ts->tsv.len=l; +ts->tsv.hash=h; +ts->tsv.marked=luaC_white(G(L)); +ts->tsv.tt=4; +ts->tsv.reserved=0; +memcpy(ts+1,str,l*sizeof(char)); +((char*)(ts+1))[l]='\0'; +tb=&G(L)->strt; +h=lmod(h,tb->size); +ts->tsv.next=tb->hash[h]; +tb->hash[h]=obj2gco(ts); +tb->nuse++; +if(tb->nuse>cast(lu_int32,tb->size)&&tb->size<=(INT_MAX-2)/2) +luaS_resize(L,tb->size*2); +return ts; +} +static TString*luaS_newlstr(lua_State*L,const char*str,size_t l){ +GCObject*o; +unsigned int h=cast(unsigned int,l); +size_t step=(l>>5)+1; +size_t l1; +for(l1=l;l1>=step;l1-=step) +h=h^((h<<5)+(h>>2)+cast(unsigned char,str[l1-1])); +for(o=G(L)->strt.hash[lmod(h,G(L)->strt.size)]; +o!=NULL; +o=o->gch.next){ +TString*ts=rawgco2ts(o); +if(ts->tsv.len==l&&(memcmp(str,getstr(ts),l)==0)){ +if(isdead(G(L),o))changewhite(o); +return ts; +} +} +return newlstr(L,str,l,h); +} +static Udata*luaS_newudata(lua_State*L,size_t s,Table*e){ +Udata*u; +if(s>((size_t)(~(size_t)0)-2)-sizeof(Udata)) +luaM_toobig(L); +u=cast(Udata*,luaM_malloc(L,s+sizeof(Udata))); +u->uv.marked=luaC_white(G(L)); +u->uv.tt=7; +u->uv.len=s; +u->uv.metatable=NULL; +u->uv.env=e; +u->uv.next=G(L)->mainthread->next; +G(L)->mainthread->next=obj2gco(u); +return u; +} +#define hashpow2(t,n)(gnode(t,lmod((n),sizenode(t)))) +#define hashstr(t,str)hashpow2(t,(str)->tsv.hash) +#define hashboolean(t,p)hashpow2(t,p) +#define hashmod(t,n)(gnode(t,((n)%((sizenode(t)-1)|1)))) +#define hashpointer(t,p)hashmod(t,IntPoint(p)) +static const Node dummynode_={ +{{NULL},0}, +{{{NULL},0,NULL}} +}; +static Node*hashnum(const Table*t,lua_Number n){ +unsigned int a[cast_int(sizeof(lua_Number)/sizeof(int))]; +int i; +if(luai_numeq(n,0)) +return gnode(t,0); +memcpy(a,&n,sizeof(a)); +for(i=1;isizearray) +return i-1; +else{ +Node*n=mainposition(t,key); +do{ +if(luaO_rawequalObj(key2tval(n),key)|| +(ttype(gkey(n))==(8+3)&&iscollectable(key)&& +gcvalue(gkey(n))==gcvalue(key))){ +i=cast_int(n-gnode(t,0)); +return i+t->sizearray; +} +else n=gnext(n); +}while(n); +luaG_runerror(L,"invalid key to "LUA_QL("next")); +return 0; +} +} +static int luaH_next(lua_State*L,Table*t,StkId key){ +int i=findindex(L,t,key); +for(i++;isizearray;i++){ +if(!ttisnil(&t->array[i])){ +setnvalue(key,cast_num(i+1)); +setobj(L,key+1,&t->array[i]); +return 1; +} +} +for(i-=t->sizearray;i<(int)sizenode(t);i++){ +if(!ttisnil(gval(gnode(t,i)))){ +setobj(L,key,key2tval(gnode(t,i))); +setobj(L,key+1,gval(gnode(t,i))); +return 1; +} +} +return 0; +} +static int computesizes(int nums[],int*narray){ +int i; +int twotoi; +int a=0; +int na=0; +int n=0; +for(i=0,twotoi=1;twotoi/2<*narray;i++,twotoi*=2){ +if(nums[i]>0){ +a+=nums[i]; +if(a>twotoi/2){ +n=twotoi; +na=a; +} +} +if(a==*narray)break; +} +*narray=n; +return na; +} +static int countint(const TValue*key,int*nums){ +int k=arrayindex(key); +if(0t->sizearray){ +lim=t->sizearray; +if(i>lim) +break; +} +for(;i<=lim;i++){ +if(!ttisnil(&t->array[i-1])) +lc++; +} +nums[lg]+=lc; +ause+=lc; +} +return ause; +} +static int numusehash(const Table*t,int*nums,int*pnasize){ +int totaluse=0; +int ause=0; +int i=sizenode(t); +while(i--){ +Node*n=&t->node[i]; +if(!ttisnil(gval(n))){ +ause+=countint(key2tval(n),nums); +totaluse++; +} +} +*pnasize+=ause; +return totaluse; +} +static void setarrayvector(lua_State*L,Table*t,int size){ +int i; +luaM_reallocvector(L,t->array,t->sizearray,size,TValue); +for(i=t->sizearray;iarray[i]); +t->sizearray=size; +} +static void setnodevector(lua_State*L,Table*t,int size){ +int lsize; +if(size==0){ +t->node=cast(Node*,(&dummynode_)); +lsize=0; +} +else{ +int i; +lsize=ceillog2(size); +if(lsize>(32-2)) +luaG_runerror(L,"table overflow"); +size=twoto(lsize); +t->node=luaM_newvector(L,size,Node); +for(i=0;ilsizenode=cast_byte(lsize); +t->lastfree=gnode(t,size); +} +static void resize(lua_State*L,Table*t,int nasize,int nhsize){ +int i; +int oldasize=t->sizearray; +int oldhsize=t->lsizenode; +Node*nold=t->node; +if(nasize>oldasize) +setarrayvector(L,t,nasize); +setnodevector(L,t,nhsize); +if(nasizesizearray=nasize; +for(i=nasize;iarray[i])) +setobj(L,luaH_setnum(L,t,i+1),&t->array[i]); +} +luaM_reallocvector(L,t->array,oldasize,nasize,TValue); +} +for(i=twoto(oldhsize)-1;i>=0;i--){ +Node*old=nold+i; +if(!ttisnil(gval(old))) +setobj(L,luaH_set(L,t,key2tval(old)),gval(old)); +} +if(nold!=(&dummynode_)) +luaM_freearray(L,nold,twoto(oldhsize),Node); +} +static void luaH_resizearray(lua_State*L,Table*t,int nasize){ +int nsize=(t->node==(&dummynode_))?0:sizenode(t); +resize(L,t,nasize,nsize); +} +static void rehash(lua_State*L,Table*t,const TValue*ek){ +int nasize,na; +int nums[(32-2)+1]; +int i; +int totaluse; +for(i=0;i<=(32-2);i++)nums[i]=0; +nasize=numusearray(t,nums); +totaluse=nasize; +totaluse+=numusehash(t,nums,&nasize); +nasize+=countint(ek,nums); +totaluse++; +na=computesizes(nums,&nasize); +resize(L,t,nasize,totaluse-na); +} +static Table*luaH_new(lua_State*L,int narray,int nhash){ +Table*t=luaM_new(L,Table); +luaC_link(L,obj2gco(t),5); +t->metatable=NULL; +t->flags=cast_byte(~0); +t->array=NULL; +t->sizearray=0; +t->lsizenode=0; +t->node=cast(Node*,(&dummynode_)); +setarrayvector(L,t,narray); +setnodevector(L,t,nhash); +return t; +} +static void luaH_free(lua_State*L,Table*t){ +if(t->node!=(&dummynode_)) +luaM_freearray(L,t->node,sizenode(t),Node); +luaM_freearray(L,t->array,t->sizearray,TValue); +luaM_free(L,t); +} +static Node*getfreepos(Table*t){ +while(t->lastfree-->t->node){ +if(ttisnil(gkey(t->lastfree))) +return t->lastfree; +} +return NULL; +} +static TValue*newkey(lua_State*L,Table*t,const TValue*key){ +Node*mp=mainposition(t,key); +if(!ttisnil(gval(mp))||mp==(&dummynode_)){ +Node*othern; +Node*n=getfreepos(t); +if(n==NULL){ +rehash(L,t,key); +return luaH_set(L,t,key); +} +othern=mainposition(t,key2tval(mp)); +if(othern!=mp){ +while(gnext(othern)!=mp)othern=gnext(othern); +gnext(othern)=n; +*n=*mp; +gnext(mp)=NULL; +setnilvalue(gval(mp)); +} +else{ +gnext(n)=gnext(mp); +gnext(mp)=n; +mp=n; +} +} +gkey(mp)->value=key->value;gkey(mp)->tt=key->tt; +luaC_barriert(L,t,key); +return gval(mp); +} +static const TValue*luaH_getnum(Table*t,int key){ +if(cast(unsigned int,key-1)sizearray)) +return&t->array[key-1]; +else{ +lua_Number nk=cast_num(key); +Node*n=hashnum(t,nk); +do{ +if(ttisnumber(gkey(n))&&luai_numeq(nvalue(gkey(n)),nk)) +return gval(n); +else n=gnext(n); +}while(n); +return(&luaO_nilobject_); +} +} +static const TValue*luaH_getstr(Table*t,TString*key){ +Node*n=hashstr(t,key); +do{ +if(ttisstring(gkey(n))&&rawtsvalue(gkey(n))==key) +return gval(n); +else n=gnext(n); +}while(n); +return(&luaO_nilobject_); +} +static const TValue*luaH_get(Table*t,const TValue*key){ +switch(ttype(key)){ +case 0:return(&luaO_nilobject_); +case 4:return luaH_getstr(t,rawtsvalue(key)); +case 3:{ +int k; +lua_Number n=nvalue(key); +lua_number2int(k,n); +if(luai_numeq(cast_num(k),nvalue(key))) +return luaH_getnum(t,k); +} +default:{ +Node*n=mainposition(t,key); +do{ +if(luaO_rawequalObj(key2tval(n),key)) +return gval(n); +else n=gnext(n); +}while(n); +return(&luaO_nilobject_); +} +} +} +static TValue*luaH_set(lua_State*L,Table*t,const TValue*key){ +const TValue*p=luaH_get(t,key); +t->flags=0; +if(p!=(&luaO_nilobject_)) +return cast(TValue*,p); +else{ +if(ttisnil(key))luaG_runerror(L,"table index is nil"); +else if(ttisnumber(key)&&luai_numisnan(nvalue(key))) +luaG_runerror(L,"table index is NaN"); +return newkey(L,t,key); +} +} +static TValue*luaH_setnum(lua_State*L,Table*t,int key){ +const TValue*p=luaH_getnum(t,key); +if(p!=(&luaO_nilobject_)) +return cast(TValue*,p); +else{ +TValue k; +setnvalue(&k,cast_num(key)); +return newkey(L,t,&k); +} +} +static TValue*luaH_setstr(lua_State*L,Table*t,TString*key){ +const TValue*p=luaH_getstr(t,key); +if(p!=(&luaO_nilobject_)) +return cast(TValue*,p); +else{ +TValue k; +setsvalue(L,&k,key); +return newkey(L,t,&k); +} +} +static int unbound_search(Table*t,unsigned int j){ +unsigned int i=j; +j++; +while(!ttisnil(luaH_getnum(t,j))){ +i=j; +j*=2; +if(j>cast(unsigned int,(INT_MAX-2))){ +i=1; +while(!ttisnil(luaH_getnum(t,i)))i++; +return i-1; +} +} +while(j-i>1){ +unsigned int m=(i+j)/2; +if(ttisnil(luaH_getnum(t,m)))j=m; +else i=m; +} +return i; +} +static int luaH_getn(Table*t){ +unsigned int j=t->sizearray; +if(j>0&&ttisnil(&t->array[j-1])){ +unsigned int i=0; +while(j-i>1){ +unsigned int m=(i+j)/2; +if(ttisnil(&t->array[m-1]))j=m; +else i=m; +} +return i; +} +else if(t->node==(&dummynode_)) +return j; +else return unbound_search(t,j); +} +#define makewhite(g,x)((x)->gch.marked=cast_byte(((x)->gch.marked&cast_byte(~(bitmask(2)|bit2mask(0,1))))|luaC_white(g))) +#define white2gray(x)reset2bits((x)->gch.marked,0,1) +#define black2gray(x)resetbit((x)->gch.marked,2) +#define stringmark(s)reset2bits((s)->tsv.marked,0,1) +#define isfinalized(u)testbit((u)->marked,3) +#define markfinalized(u)l_setbit((u)->marked,3) +#define markvalue(g,o){checkconsistency(o);if(iscollectable(o)&&iswhite(gcvalue(o)))reallymarkobject(g,gcvalue(o));} +#define markobject(g,t){if(iswhite(obj2gco(t)))reallymarkobject(g,obj2gco(t));} +#define setthreshold(g)(g->GCthreshold=(g->estimate/100)*g->gcpause) +static void removeentry(Node*n){ +if(iscollectable(gkey(n))) +setttype(gkey(n),(8+3)); +} +static void reallymarkobject(global_State*g,GCObject*o){ +white2gray(o); +switch(o->gch.tt){ +case 4:{ +return; +} +case 7:{ +Table*mt=gco2u(o)->metatable; +gray2black(o); +if(mt)markobject(g,mt); +markobject(g,gco2u(o)->env); +return; +} +case(8+2):{ +UpVal*uv=gco2uv(o); +markvalue(g,uv->v); +if(uv->v==&uv->u.value) +gray2black(o); +return; +} +case 6:{ +gco2cl(o)->c.gclist=g->gray; +g->gray=o; +break; +} +case 5:{ +gco2h(o)->gclist=g->gray; +g->gray=o; +break; +} +case 8:{ +gco2th(o)->gclist=g->gray; +g->gray=o; +break; +} +case(8+1):{ +gco2p(o)->gclist=g->gray; +g->gray=o; +break; +} +default:; +} +} +static void marktmu(global_State*g){ +GCObject*u=g->tmudata; +if(u){ +do{ +u=u->gch.next; +makewhite(g,u); +reallymarkobject(g,u); +}while(u!=g->tmudata); +} +} +static size_t luaC_separateudata(lua_State*L,int all){ +global_State*g=G(L); +size_t deadmem=0; +GCObject**p=&g->mainthread->next; +GCObject*curr; +while((curr=*p)!=NULL){ +if(!(iswhite(curr)||all)||isfinalized(gco2u(curr))) +p=&curr->gch.next; +else if(fasttm(L,gco2u(curr)->metatable,TM_GC)==NULL){ +markfinalized(gco2u(curr)); +p=&curr->gch.next; +} +else{ +deadmem+=sizeudata(gco2u(curr)); +markfinalized(gco2u(curr)); +*p=curr->gch.next; +if(g->tmudata==NULL) +g->tmudata=curr->gch.next=curr; +else{ +curr->gch.next=g->tmudata->gch.next; +g->tmudata->gch.next=curr; +g->tmudata=curr; +} +} +} +return deadmem; +} +static int traversetable(global_State*g,Table*h){ +int i; +int weakkey=0; +int weakvalue=0; +const TValue*mode; +if(h->metatable) +markobject(g,h->metatable); +mode=gfasttm(g,h->metatable,TM_MODE); +if(mode&&ttisstring(mode)){ +weakkey=(strchr(svalue(mode),'k')!=NULL); +weakvalue=(strchr(svalue(mode),'v')!=NULL); +if(weakkey||weakvalue){ +h->marked&=~(bitmask(3)|bitmask(4)); +h->marked|=cast_byte((weakkey<<3)| +(weakvalue<<4)); +h->gclist=g->weak; +g->weak=obj2gco(h); +} +} +if(weakkey&&weakvalue)return 1; +if(!weakvalue){ +i=h->sizearray; +while(i--) +markvalue(g,&h->array[i]); +} +i=sizenode(h); +while(i--){ +Node*n=gnode(h,i); +if(ttisnil(gval(n))) +removeentry(n); +else{ +if(!weakkey)markvalue(g,gkey(n)); +if(!weakvalue)markvalue(g,gval(n)); +} +} +return weakkey||weakvalue; +} +static void traverseproto(global_State*g,Proto*f){ +int i; +if(f->source)stringmark(f->source); +for(i=0;isizek;i++) +markvalue(g,&f->k[i]); +for(i=0;isizeupvalues;i++){ +if(f->upvalues[i]) +stringmark(f->upvalues[i]); +} +for(i=0;isizep;i++){ +if(f->p[i]) +markobject(g,f->p[i]); +} +for(i=0;isizelocvars;i++){ +if(f->locvars[i].varname) +stringmark(f->locvars[i].varname); +} +} +static void traverseclosure(global_State*g,Closure*cl){ +markobject(g,cl->c.env); +if(cl->c.isC){ +int i; +for(i=0;ic.nupvalues;i++) +markvalue(g,&cl->c.upvalue[i]); +} +else{ +int i; +markobject(g,cl->l.p); +for(i=0;il.nupvalues;i++) +markobject(g,cl->l.upvals[i]); +} +} +static void checkstacksizes(lua_State*L,StkId max){ +int ci_used=cast_int(L->ci-L->base_ci); +int s_used=cast_int(max-L->stack); +if(L->size_ci>20000) +return; +if(4*ci_usedsize_ci&&2*8size_ci) +luaD_reallocCI(L,L->size_ci/2); +condhardstacktests(luaD_reallocCI(L,ci_used+1)); +if(4*s_usedstacksize&& +2*((2*20)+5)stacksize) +luaD_reallocstack(L,L->stacksize/2); +condhardstacktests(luaD_reallocstack(L,s_used)); +} +static void traversestack(global_State*g,lua_State*l){ +StkId o,lim; +CallInfo*ci; +markvalue(g,gt(l)); +lim=l->top; +for(ci=l->base_ci;ci<=l->ci;ci++){ +if(limtop)lim=ci->top; +} +for(o=l->stack;otop;o++) +markvalue(g,o); +for(;o<=lim;o++) +setnilvalue(o); +checkstacksizes(l,lim); +} +static l_mem propagatemark(global_State*g){ +GCObject*o=g->gray; +gray2black(o); +switch(o->gch.tt){ +case 5:{ +Table*h=gco2h(o); +g->gray=h->gclist; +if(traversetable(g,h)) +black2gray(o); +return sizeof(Table)+sizeof(TValue)*h->sizearray+ +sizeof(Node)*sizenode(h); +} +case 6:{ +Closure*cl=gco2cl(o); +g->gray=cl->c.gclist; +traverseclosure(g,cl); +return(cl->c.isC)?sizeCclosure(cl->c.nupvalues): +sizeLclosure(cl->l.nupvalues); +} +case 8:{ +lua_State*th=gco2th(o); +g->gray=th->gclist; +th->gclist=g->grayagain; +g->grayagain=o; +black2gray(o); +traversestack(g,th); +return sizeof(lua_State)+sizeof(TValue)*th->stacksize+ +sizeof(CallInfo)*th->size_ci; +} +case(8+1):{ +Proto*p=gco2p(o); +g->gray=p->gclist; +traverseproto(g,p); +return sizeof(Proto)+sizeof(Instruction)*p->sizecode+ +sizeof(Proto*)*p->sizep+ +sizeof(TValue)*p->sizek+ +sizeof(int)*p->sizelineinfo+ +sizeof(LocVar)*p->sizelocvars+ +sizeof(TString*)*p->sizeupvalues; +} +default:return 0; +} +} +static size_t propagateall(global_State*g){ +size_t m=0; +while(g->gray)m+=propagatemark(g); +return m; +} +static int iscleared(const TValue*o,int iskey){ +if(!iscollectable(o))return 0; +if(ttisstring(o)){ +stringmark(rawtsvalue(o)); +return 0; +} +return iswhite(gcvalue(o))|| +(ttisuserdata(o)&&(!iskey&&isfinalized(uvalue(o)))); +} +static void cleartable(GCObject*l){ +while(l){ +Table*h=gco2h(l); +int i=h->sizearray; +if(testbit(h->marked,4)){ +while(i--){ +TValue*o=&h->array[i]; +if(iscleared(o,0)) +setnilvalue(o); +} +} +i=sizenode(h); +while(i--){ +Node*n=gnode(h,i); +if(!ttisnil(gval(n))&& +(iscleared(key2tval(n),1)||iscleared(gval(n),0))){ +setnilvalue(gval(n)); +removeentry(n); +} +} +l=h->gclist; +} +} +static void freeobj(lua_State*L,GCObject*o){ +switch(o->gch.tt){ +case(8+1):luaF_freeproto(L,gco2p(o));break; +case 6:luaF_freeclosure(L,gco2cl(o));break; +case(8+2):luaF_freeupval(L,gco2uv(o));break; +case 5:luaH_free(L,gco2h(o));break; +case 8:{ +luaE_freethread(L,gco2th(o)); +break; +} +case 4:{ +G(L)->strt.nuse--; +luaM_freemem(L,o,sizestring(gco2ts(o))); +break; +} +case 7:{ +luaM_freemem(L,o,sizeudata(gco2u(o))); +break; +} +default:; +} +} +#define sweepwholelist(L,p)sweeplist(L,p,((lu_mem)(~(lu_mem)0)-2)) +static GCObject**sweeplist(lua_State*L,GCObject**p,lu_mem count){ +GCObject*curr; +global_State*g=G(L); +int deadmask=otherwhite(g); +while((curr=*p)!=NULL&&count-->0){ +if(curr->gch.tt==8) +sweepwholelist(L,&gco2th(curr)->openupval); +if((curr->gch.marked^bit2mask(0,1))&deadmask){ +makewhite(g,curr); +p=&curr->gch.next; +} +else{ +*p=curr->gch.next; +if(curr==g->rootgc) +g->rootgc=curr->gch.next; +freeobj(L,curr); +} +} +return p; +} +static void checkSizes(lua_State*L){ +global_State*g=G(L); +if(g->strt.nusestrt.size/4)&& +g->strt.size>32*2) +luaS_resize(L,g->strt.size/2); +if(luaZ_sizebuffer(&g->buff)>32*2){ +size_t newsize=luaZ_sizebuffer(&g->buff)/2; +luaZ_resizebuffer(L,&g->buff,newsize); +} +} +static void GCTM(lua_State*L){ +global_State*g=G(L); +GCObject*o=g->tmudata->gch.next; +Udata*udata=rawgco2u(o); +const TValue*tm; +if(o==g->tmudata) +g->tmudata=NULL; +else +g->tmudata->gch.next=udata->uv.next; +udata->uv.next=g->mainthread->next; +g->mainthread->next=o; +makewhite(g,o); +tm=fasttm(L,udata->uv.metatable,TM_GC); +if(tm!=NULL){ +lu_byte oldah=L->allowhook; +lu_mem oldt=g->GCthreshold; +L->allowhook=0; +g->GCthreshold=2*g->totalbytes; +setobj(L,L->top,tm); +setuvalue(L,L->top+1,udata); +L->top+=2; +luaD_call(L,L->top-2,0); +L->allowhook=oldah; +g->GCthreshold=oldt; +} +} +static void luaC_callGCTM(lua_State*L){ +while(G(L)->tmudata) +GCTM(L); +} +static void luaC_freeall(lua_State*L){ +global_State*g=G(L); +int i; +g->currentwhite=bit2mask(0,1)|bitmask(6); +sweepwholelist(L,&g->rootgc); +for(i=0;istrt.size;i++) +sweepwholelist(L,&g->strt.hash[i]); +} +static void markmt(global_State*g){ +int i; +for(i=0;i<(8+1);i++) +if(g->mt[i])markobject(g,g->mt[i]); +} +static void markroot(lua_State*L){ +global_State*g=G(L); +g->gray=NULL; +g->grayagain=NULL; +g->weak=NULL; +markobject(g,g->mainthread); +markvalue(g,gt(g->mainthread)); +markvalue(g,registry(L)); +markmt(g); +g->gcstate=1; +} +static void remarkupvals(global_State*g){ +UpVal*uv; +for(uv=g->uvhead.u.l.next;uv!=&g->uvhead;uv=uv->u.l.next){ +if(isgray(obj2gco(uv))) +markvalue(g,uv->v); +} +} +static void atomic(lua_State*L){ +global_State*g=G(L); +size_t udsize; +remarkupvals(g); +propagateall(g); +g->gray=g->weak; +g->weak=NULL; +markobject(g,L); +markmt(g); +propagateall(g); +g->gray=g->grayagain; +g->grayagain=NULL; +propagateall(g); +udsize=luaC_separateudata(L,0); +marktmu(g); +udsize+=propagateall(g); +cleartable(g->weak); +g->currentwhite=cast_byte(otherwhite(g)); +g->sweepstrgc=0; +g->sweepgc=&g->rootgc; +g->gcstate=2; +g->estimate=g->totalbytes-udsize; +} +static l_mem singlestep(lua_State*L){ +global_State*g=G(L); +switch(g->gcstate){ +case 0:{ +markroot(L); +return 0; +} +case 1:{ +if(g->gray) +return propagatemark(g); +else{ +atomic(L); +return 0; +} +} +case 2:{ +lu_mem old=g->totalbytes; +sweepwholelist(L,&g->strt.hash[g->sweepstrgc++]); +if(g->sweepstrgc>=g->strt.size) +g->gcstate=3; +g->estimate-=old-g->totalbytes; +return 10; +} +case 3:{ +lu_mem old=g->totalbytes; +g->sweepgc=sweeplist(L,g->sweepgc,40); +if(*g->sweepgc==NULL){ +checkSizes(L); +g->gcstate=4; +} +g->estimate-=old-g->totalbytes; +return 40*10; +} +case 4:{ +if(g->tmudata){ +GCTM(L); +if(g->estimate>100) +g->estimate-=100; +return 100; +} +else{ +g->gcstate=0; +g->gcdept=0; +return 0; +} +} +default:return 0; +} +} +static void luaC_step(lua_State*L){ +global_State*g=G(L); +l_mem lim=(1024u/100)*g->gcstepmul; +if(lim==0) +lim=(((lu_mem)(~(lu_mem)0)-2)-1)/2; +g->gcdept+=g->totalbytes-g->GCthreshold; +do{ +lim-=singlestep(L); +if(g->gcstate==0) +break; +}while(lim>0); +if(g->gcstate!=0){ +if(g->gcdept<1024u) +g->GCthreshold=g->totalbytes+1024u; +else{ +g->gcdept-=1024u; +g->GCthreshold=g->totalbytes; +} +} +else{ +setthreshold(g); +} +} +static void luaC_barrierf(lua_State*L,GCObject*o,GCObject*v){ +global_State*g=G(L); +if(g->gcstate==1) +reallymarkobject(g,v); +else +makewhite(g,o); +} +static void luaC_barrierback(lua_State*L,Table*t){ +global_State*g=G(L); +GCObject*o=obj2gco(t); +black2gray(o); +t->gclist=g->grayagain; +g->grayagain=o; +} +static void luaC_link(lua_State*L,GCObject*o,lu_byte tt){ +global_State*g=G(L); +o->gch.next=g->rootgc; +g->rootgc=o; +o->gch.marked=luaC_white(g); +o->gch.tt=tt; +} +static void luaC_linkupval(lua_State*L,UpVal*uv){ +global_State*g=G(L); +GCObject*o=obj2gco(uv); +o->gch.next=g->rootgc; +g->rootgc=o; +if(isgray(o)){ +if(g->gcstate==1){ +gray2black(o); +luaC_barrier(L,uv,uv->v); +} +else{ +makewhite(g,o); +} +} +} +typedef union{ +lua_Number r; +TString*ts; +}SemInfo; +typedef struct Token{ +int token; +SemInfo seminfo; +}Token; +typedef struct LexState{ +int current; +int linenumber; +int lastline; +Token t; +Token lookahead; +struct FuncState*fs; +struct lua_State*L; +ZIO*z; +Mbuffer*buff; +TString*source; +char decpoint; +}LexState; +static void luaX_init(lua_State*L); +static void luaX_lexerror(LexState*ls,const char*msg,int token); +#define state_size(x)(sizeof(x)+0) +#define fromstate(l)(cast(lu_byte*,(l))-0) +#define tostate(l)(cast(lua_State*,cast(lu_byte*,l)+0)) +typedef struct LG{ +lua_State l; +global_State g; +}LG; +static void stack_init(lua_State*L1,lua_State*L){ +L1->base_ci=luaM_newvector(L,8,CallInfo); +L1->ci=L1->base_ci; +L1->size_ci=8; +L1->end_ci=L1->base_ci+L1->size_ci-1; +L1->stack=luaM_newvector(L,(2*20)+5,TValue); +L1->stacksize=(2*20)+5; +L1->top=L1->stack; +L1->stack_last=L1->stack+(L1->stacksize-5)-1; +L1->ci->func=L1->top; +setnilvalue(L1->top++); +L1->base=L1->ci->base=L1->top; +L1->ci->top=L1->top+20; +} +static void freestack(lua_State*L,lua_State*L1){ +luaM_freearray(L,L1->base_ci,L1->size_ci,CallInfo); +luaM_freearray(L,L1->stack,L1->stacksize,TValue); +} +static void f_luaopen(lua_State*L,void*ud){ +global_State*g=G(L); +UNUSED(ud); +stack_init(L,L); +sethvalue(L,gt(L),luaH_new(L,0,2)); +sethvalue(L,registry(L),luaH_new(L,0,2)); +luaS_resize(L,32); +luaT_init(L); +luaX_init(L); +luaS_fix(luaS_newliteral(L,"not enough memory")); +g->GCthreshold=4*g->totalbytes; +} +static void preinit_state(lua_State*L,global_State*g){ +G(L)=g; +L->stack=NULL; +L->stacksize=0; +L->errorJmp=NULL; +L->hook=NULL; +L->hookmask=0; +L->basehookcount=0; +L->allowhook=1; +resethookcount(L); +L->openupval=NULL; +L->size_ci=0; +L->nCcalls=L->baseCcalls=0; +L->status=0; +L->base_ci=L->ci=NULL; +L->savedpc=NULL; +L->errfunc=0; +setnilvalue(gt(L)); +} +static void close_state(lua_State*L){ +global_State*g=G(L); +luaF_close(L,L->stack); +luaC_freeall(L); +luaM_freearray(L,G(L)->strt.hash,G(L)->strt.size,TString*); +luaZ_freebuffer(L,&g->buff); +freestack(L,L); +(*g->frealloc)(g->ud,fromstate(L),state_size(LG),0); +} +static void luaE_freethread(lua_State*L,lua_State*L1){ +luaF_close(L1,L1->stack); +freestack(L,L1); +luaM_freemem(L,fromstate(L1),state_size(lua_State)); +} +static lua_State*lua_newstate(lua_Alloc f,void*ud){ +int i; +lua_State*L; +global_State*g; +void*l=(*f)(ud,NULL,0,state_size(LG)); +if(l==NULL)return NULL; +L=tostate(l); +g=&((LG*)L)->g; +L->next=NULL; +L->tt=8; +g->currentwhite=bit2mask(0,5); +L->marked=luaC_white(g); +set2bits(L->marked,5,6); +preinit_state(L,g); +g->frealloc=f; +g->ud=ud; +g->mainthread=L; +g->uvhead.u.l.prev=&g->uvhead; +g->uvhead.u.l.next=&g->uvhead; +g->GCthreshold=0; +g->strt.size=0; +g->strt.nuse=0; +g->strt.hash=NULL; +setnilvalue(registry(L)); +luaZ_initbuffer(L,&g->buff); +g->panic=NULL; +g->gcstate=0; +g->rootgc=obj2gco(L); +g->sweepstrgc=0; +g->sweepgc=&g->rootgc; +g->gray=NULL; +g->grayagain=NULL; +g->weak=NULL; +g->tmudata=NULL; +g->totalbytes=sizeof(LG); +g->gcpause=200; +g->gcstepmul=200; +g->gcdept=0; +for(i=0;i<(8+1);i++)g->mt[i]=NULL; +if(luaD_rawrunprotected(L,f_luaopen,NULL)!=0){ +close_state(L); +L=NULL; +} +else +{} +return L; +} +static void callallgcTM(lua_State*L,void*ud){ +UNUSED(ud); +luaC_callGCTM(L); +} +static void lua_close(lua_State*L){ +L=G(L)->mainthread; +luaF_close(L,L->stack); +luaC_separateudata(L,1); +L->errfunc=0; +do{ +L->ci=L->base_ci; +L->base=L->top=L->ci->base; +L->nCcalls=L->baseCcalls=0; +}while(luaD_rawrunprotected(L,callallgcTM,NULL)!=0); +close_state(L); +} +#define getcode(fs,e)((fs)->f->code[(e)->u.s.info]) +#define luaK_codeAsBx(fs,o,A,sBx)luaK_codeABx(fs,o,A,(sBx)+(((1<<(9+9))-1)>>1)) +#define luaK_setmultret(fs,e)luaK_setreturns(fs,e,(-1)) +static int luaK_codeABx(FuncState*fs,OpCode o,int A,unsigned int Bx); +static int luaK_codeABC(FuncState*fs,OpCode o,int A,int B,int C); +static void luaK_setreturns(FuncState*fs,expdesc*e,int nresults); +static void luaK_patchtohere(FuncState*fs,int list); +static void luaK_concat(FuncState*fs,int*l1,int l2); +static int currentpc(lua_State*L,CallInfo*ci){ +if(!isLua(ci))return-1; +if(ci==L->ci) +ci->savedpc=L->savedpc; +return pcRel(ci->savedpc,ci_func(ci)->l.p); +} +static int currentline(lua_State*L,CallInfo*ci){ +int pc=currentpc(L,ci); +if(pc<0) +return-1; +else +return getline_(ci_func(ci)->l.p,pc); +} +static int lua_getstack(lua_State*L,int level,lua_Debug*ar){ +int status; +CallInfo*ci; +for(ci=L->ci;level>0&&ci>L->base_ci;ci--){ +level--; +if(f_isLua(ci)) +level-=ci->tailcalls; +} +if(level==0&&ci>L->base_ci){ +status=1; +ar->i_ci=cast_int(ci-L->base_ci); +} +else if(level<0){ +status=1; +ar->i_ci=0; +} +else status=0; +return status; +} +static Proto*getluaproto(CallInfo*ci){ +return(isLua(ci)?ci_func(ci)->l.p:NULL); +} +static void funcinfo(lua_Debug*ar,Closure*cl){ +if(cl->c.isC){ +ar->source="=[C]"; +ar->linedefined=-1; +ar->lastlinedefined=-1; +ar->what="C"; +} +else{ +ar->source=getstr(cl->l.p->source); +ar->linedefined=cl->l.p->linedefined; +ar->lastlinedefined=cl->l.p->lastlinedefined; +ar->what=(ar->linedefined==0)?"main":"Lua"; +} +luaO_chunkid(ar->short_src,ar->source,60); +} +static void info_tailcall(lua_Debug*ar){ +ar->name=ar->namewhat=""; +ar->what="tail"; +ar->lastlinedefined=ar->linedefined=ar->currentline=-1; +ar->source="=(tail call)"; +luaO_chunkid(ar->short_src,ar->source,60); +ar->nups=0; +} +static void collectvalidlines(lua_State*L,Closure*f){ +if(f==NULL||f->c.isC){ +setnilvalue(L->top); +} +else{ +Table*t=luaH_new(L,0,0); +int*lineinfo=f->l.p->lineinfo; +int i; +for(i=0;il.p->sizelineinfo;i++) +setbvalue(luaH_setnum(L,t,lineinfo[i]),1); +sethvalue(L,L->top,t); +} +incr_top(L); +} +static int auxgetinfo(lua_State*L,const char*what,lua_Debug*ar, +Closure*f,CallInfo*ci){ +int status=1; +if(f==NULL){ +info_tailcall(ar); +return status; +} +for(;*what;what++){ +switch(*what){ +case'S':{ +funcinfo(ar,f); +break; +} +case'l':{ +ar->currentline=(ci)?currentline(L,ci):-1; +break; +} +case'u':{ +ar->nups=f->c.nupvalues; +break; +} +case'n':{ +ar->namewhat=(ci)?NULL:NULL; +if(ar->namewhat==NULL){ +ar->namewhat=""; +ar->name=NULL; +} +break; +} +case'L': +case'f': +break; +default:status=0; +} +} +return status; +} +static int lua_getinfo(lua_State*L,const char*what,lua_Debug*ar){ +int status; +Closure*f=NULL; +CallInfo*ci=NULL; +if(*what=='>'){ +StkId func=L->top-1; +luai_apicheck(L,ttisfunction(func)); +what++; +f=clvalue(func); +L->top--; +} +else if(ar->i_ci!=0){ +ci=L->base_ci+ar->i_ci; +f=clvalue(ci->func); +} +status=auxgetinfo(L,what,ar,f,ci); +if(strchr(what,'f')){ +if(f==NULL)setnilvalue(L->top); +else setclvalue(L,L->top,f); +incr_top(L); +} +if(strchr(what,'L')) +collectvalidlines(L,f); +return status; +} +static int isinstack(CallInfo*ci,const TValue*o){ +StkId p; +for(p=ci->base;ptop;p++) +if(o==p)return 1; +return 0; +} +static void luaG_typeerror(lua_State*L,const TValue*o,const char*op){ +const char*name=NULL; +const char*t=luaT_typenames[ttype(o)]; +const char*kind=(isinstack(L->ci,o))? +NULL: +NULL; +if(kind) +luaG_runerror(L,"attempt to %s %s "LUA_QL("%s")" (a %s value)", +op,kind,name,t); +else +luaG_runerror(L,"attempt to %s a %s value",op,t); +} +static void luaG_concaterror(lua_State*L,StkId p1,StkId p2){ +if(ttisstring(p1)||ttisnumber(p1))p1=p2; +luaG_typeerror(L,p1,"concatenate"); +} +static void luaG_aritherror(lua_State*L,const TValue*p1,const TValue*p2){ +TValue temp; +if(luaV_tonumber(p1,&temp)==NULL) +p2=p1; +luaG_typeerror(L,p2,"perform arithmetic on"); +} +static int luaG_ordererror(lua_State*L,const TValue*p1,const TValue*p2){ +const char*t1=luaT_typenames[ttype(p1)]; +const char*t2=luaT_typenames[ttype(p2)]; +if(t1[2]==t2[2]) +luaG_runerror(L,"attempt to compare two %s values",t1); +else +luaG_runerror(L,"attempt to compare %s with %s",t1,t2); +return 0; +} +static void addinfo(lua_State*L,const char*msg){ +CallInfo*ci=L->ci; +if(isLua(ci)){ +char buff[60]; +int line=currentline(L,ci); +luaO_chunkid(buff,getstr(getluaproto(ci)->source),60); +luaO_pushfstring(L,"%s:%d: %s",buff,line,msg); +} +} +static void luaG_errormsg(lua_State*L){ +if(L->errfunc!=0){ +StkId errfunc=restorestack(L,L->errfunc); +if(!ttisfunction(errfunc))luaD_throw(L,5); +setobj(L,L->top,L->top-1); +setobj(L,L->top-1,errfunc); +incr_top(L); +luaD_call(L,L->top-2,1); +} +luaD_throw(L,2); +} +static void luaG_runerror(lua_State*L,const char*fmt,...){ +va_list argp; +va_start(argp,fmt); +addinfo(L,luaO_pushvfstring(L,fmt,argp)); +va_end(argp); +luaG_errormsg(L); +} +static int luaZ_fill(ZIO*z){ +size_t size; +lua_State*L=z->L; +const char*buff; +buff=z->reader(L,z->data,&size); +if(buff==NULL||size==0)return(-1); +z->n=size-1; +z->p=buff; +return char2int(*(z->p++)); +} +static void luaZ_init(lua_State*L,ZIO*z,lua_Reader reader,void*data){ +z->L=L; +z->reader=reader; +z->data=data; +z->n=0; +z->p=NULL; +} +static char*luaZ_openspace(lua_State*L,Mbuffer*buff,size_t n){ +if(n>buff->buffsize){ +if(n<32)n=32; +luaZ_resizebuffer(L,buff,n); +} +return buff->buffer; +} +#define opmode(t,a,b,c,m)(((t)<<7)|((a)<<6)|((b)<<4)|((c)<<2)|(m)) +static const lu_byte luaP_opmodes[(cast(int,OP_VARARG)+1)]={ +opmode(0,1,OpArgR,OpArgN,iABC) +,opmode(0,1,OpArgK,OpArgN,iABx) +,opmode(0,1,OpArgU,OpArgU,iABC) +,opmode(0,1,OpArgR,OpArgN,iABC) +,opmode(0,1,OpArgU,OpArgN,iABC) +,opmode(0,1,OpArgK,OpArgN,iABx) +,opmode(0,1,OpArgR,OpArgK,iABC) +,opmode(0,0,OpArgK,OpArgN,iABx) +,opmode(0,0,OpArgU,OpArgN,iABC) +,opmode(0,0,OpArgK,OpArgK,iABC) +,opmode(0,1,OpArgU,OpArgU,iABC) +,opmode(0,1,OpArgR,OpArgK,iABC) +,opmode(0,1,OpArgK,OpArgK,iABC) +,opmode(0,1,OpArgK,OpArgK,iABC) +,opmode(0,1,OpArgK,OpArgK,iABC) +,opmode(0,1,OpArgK,OpArgK,iABC) +,opmode(0,1,OpArgK,OpArgK,iABC) +,opmode(0,1,OpArgK,OpArgK,iABC) +,opmode(0,1,OpArgR,OpArgN,iABC) +,opmode(0,1,OpArgR,OpArgN,iABC) +,opmode(0,1,OpArgR,OpArgN,iABC) +,opmode(0,1,OpArgR,OpArgR,iABC) +,opmode(0,0,OpArgR,OpArgN,iAsBx) +,opmode(1,0,OpArgK,OpArgK,iABC) +,opmode(1,0,OpArgK,OpArgK,iABC) +,opmode(1,0,OpArgK,OpArgK,iABC) +,opmode(1,1,OpArgR,OpArgU,iABC) +,opmode(1,1,OpArgR,OpArgU,iABC) +,opmode(0,1,OpArgU,OpArgU,iABC) +,opmode(0,1,OpArgU,OpArgU,iABC) +,opmode(0,0,OpArgU,OpArgN,iABC) +,opmode(0,1,OpArgR,OpArgN,iAsBx) +,opmode(0,1,OpArgR,OpArgN,iAsBx) +,opmode(1,0,OpArgN,OpArgU,iABC) +,opmode(0,0,OpArgU,OpArgU,iABC) +,opmode(0,0,OpArgN,OpArgN,iABC) +,opmode(0,1,OpArgU,OpArgN,iABx) +,opmode(0,1,OpArgU,OpArgN,iABC) +}; +#define next(ls)(ls->current=zgetc(ls->z)) +#define currIsNewline(ls)(ls->current=='\n'||ls->current=='\r') +static const char*const luaX_tokens[]={ +"and","break","do","else","elseif", +"end","false","for","function","if", +"in","local","nil","not","or","repeat", +"return","then","true","until","while", +"..","...","==",">=","<=","~=", +"","","","", +NULL +}; +#define save_and_next(ls)(save(ls,ls->current),next(ls)) +static void save(LexState*ls,int c){ +Mbuffer*b=ls->buff; +if(b->n+1>b->buffsize){ +size_t newsize; +if(b->buffsize>=((size_t)(~(size_t)0)-2)/2) +luaX_lexerror(ls,"lexical element too long",0); +newsize=b->buffsize*2; +luaZ_resizebuffer(ls->L,b,newsize); +} +b->buffer[b->n++]=cast(char,c); +} +static void luaX_init(lua_State*L){ +int i; +for(i=0;i<(cast(int,TK_WHILE-257+1));i++){ +TString*ts=luaS_new(L,luaX_tokens[i]); +luaS_fix(ts); +ts->tsv.reserved=cast_byte(i+1); +} +} +static const char*luaX_token2str(LexState*ls,int token){ +if(token<257){ +return(iscntrl(token))?luaO_pushfstring(ls->L,"char(%d)",token): +luaO_pushfstring(ls->L,"%c",token); +} +else +return luaX_tokens[token-257]; +} +static const char*txtToken(LexState*ls,int token){ +switch(token){ +case TK_NAME: +case TK_STRING: +case TK_NUMBER: +save(ls,'\0'); +return luaZ_buffer(ls->buff); +default: +return luaX_token2str(ls,token); +} +} +static void luaX_lexerror(LexState*ls,const char*msg,int token){ +char buff[80]; +luaO_chunkid(buff,getstr(ls->source),80); +msg=luaO_pushfstring(ls->L,"%s:%d: %s",buff,ls->linenumber,msg); +if(token) +luaO_pushfstring(ls->L,"%s near "LUA_QL("%s"),msg,txtToken(ls,token)); +luaD_throw(ls->L,3); +} +static void luaX_syntaxerror(LexState*ls,const char*msg){ +luaX_lexerror(ls,msg,ls->t.token); +} +static TString*luaX_newstring(LexState*ls,const char*str,size_t l){ +lua_State*L=ls->L; +TString*ts=luaS_newlstr(L,str,l); +TValue*o=luaH_setstr(L,ls->fs->h,ts); +if(ttisnil(o)){ +setbvalue(o,1); +luaC_checkGC(L); +} +return ts; +} +static void inclinenumber(LexState*ls){ +int old=ls->current; +next(ls); +if(currIsNewline(ls)&&ls->current!=old) +next(ls); +if(++ls->linenumber>=(INT_MAX-2)) +luaX_syntaxerror(ls,"chunk has too many lines"); +} +static void luaX_setinput(lua_State*L,LexState*ls,ZIO*z,TString*source){ +ls->decpoint='.'; +ls->L=L; +ls->lookahead.token=TK_EOS; +ls->z=z; +ls->fs=NULL; +ls->linenumber=1; +ls->lastline=1; +ls->source=source; +luaZ_resizebuffer(ls->L,ls->buff,32); +next(ls); +} +static int check_next(LexState*ls,const char*set){ +if(!strchr(set,ls->current)) +return 0; +save_and_next(ls); +return 1; +} +static void buffreplace(LexState*ls,char from,char to){ +size_t n=luaZ_bufflen(ls->buff); +char*p=luaZ_buffer(ls->buff); +while(n--) +if(p[n]==from)p[n]=to; +} +static void read_numeral(LexState*ls,SemInfo*seminfo){ +do{ +save_and_next(ls); +}while(isdigit(ls->current)||ls->current=='.'); +if(check_next(ls,"Ee")) +check_next(ls,"+-"); +while(isalnum(ls->current)||ls->current=='_') +save_and_next(ls); +save(ls,'\0'); +buffreplace(ls,'.',ls->decpoint); +if(!luaO_str2d(luaZ_buffer(ls->buff),&seminfo->r)) +luaX_lexerror(ls,"malformed number",TK_NUMBER); +} +static int skip_sep(LexState*ls){ +int count=0; +int s=ls->current; +save_and_next(ls); +while(ls->current=='='){ +save_and_next(ls); +count++; +} +return(ls->current==s)?count:(-count)-1; +} +static void read_long_string(LexState*ls,SemInfo*seminfo,int sep){ +int cont=0; +(void)(cont); +save_and_next(ls); +if(currIsNewline(ls)) +inclinenumber(ls); +for(;;){ +switch(ls->current){ +case(-1): +luaX_lexerror(ls,(seminfo)?"unfinished long string": +"unfinished long comment",TK_EOS); +break; +case']':{ +if(skip_sep(ls)==sep){ +save_and_next(ls); +goto endloop; +} +break; +} +case'\n': +case'\r':{ +save(ls,'\n'); +inclinenumber(ls); +if(!seminfo)luaZ_resetbuffer(ls->buff); +break; +} +default:{ +if(seminfo)save_and_next(ls); +else next(ls); +} +} +}endloop: +if(seminfo) +seminfo->ts=luaX_newstring(ls,luaZ_buffer(ls->buff)+(2+sep), +luaZ_bufflen(ls->buff)-2*(2+sep)); +} +static void read_string(LexState*ls,int del,SemInfo*seminfo){ +save_and_next(ls); +while(ls->current!=del){ +switch(ls->current){ +case(-1): +luaX_lexerror(ls,"unfinished string",TK_EOS); +continue; +case'\n': +case'\r': +luaX_lexerror(ls,"unfinished string",TK_STRING); +continue; +case'\\':{ +int c; +next(ls); +switch(ls->current){ +case'a':c='\a';break; +case'b':c='\b';break; +case'f':c='\f';break; +case'n':c='\n';break; +case'r':c='\r';break; +case't':c='\t';break; +case'v':c='\v';break; +case'\n': +case'\r':save(ls,'\n');inclinenumber(ls);continue; +case(-1):continue; +default:{ +if(!isdigit(ls->current)) +save_and_next(ls); +else{ +int i=0; +c=0; +do{ +c=10*c+(ls->current-'0'); +next(ls); +}while(++i<3&&isdigit(ls->current)); +if(c>UCHAR_MAX) +luaX_lexerror(ls,"escape sequence too large",TK_STRING); +save(ls,c); +} +continue; +} +} +save(ls,c); +next(ls); +continue; +} +default: +save_and_next(ls); +} +} +save_and_next(ls); +seminfo->ts=luaX_newstring(ls,luaZ_buffer(ls->buff)+1, +luaZ_bufflen(ls->buff)-2); +} +static int llex(LexState*ls,SemInfo*seminfo){ +luaZ_resetbuffer(ls->buff); +for(;;){ +switch(ls->current){ +case'\n': +case'\r':{ +inclinenumber(ls); +continue; +} +case'-':{ +next(ls); +if(ls->current!='-')return'-'; +next(ls); +if(ls->current=='['){ +int sep=skip_sep(ls); +luaZ_resetbuffer(ls->buff); +if(sep>=0){ +read_long_string(ls,NULL,sep); +luaZ_resetbuffer(ls->buff); +continue; +} +} +while(!currIsNewline(ls)&&ls->current!=(-1)) +next(ls); +continue; +} +case'[':{ +int sep=skip_sep(ls); +if(sep>=0){ +read_long_string(ls,seminfo,sep); +return TK_STRING; +} +else if(sep==-1)return'['; +else luaX_lexerror(ls,"invalid long string delimiter",TK_STRING); +} +case'=':{ +next(ls); +if(ls->current!='=')return'='; +else{next(ls);return TK_EQ;} +} +case'<':{ +next(ls); +if(ls->current!='=')return'<'; +else{next(ls);return TK_LE;} +} +case'>':{ +next(ls); +if(ls->current!='=')return'>'; +else{next(ls);return TK_GE;} +} +case'~':{ +next(ls); +if(ls->current!='=')return'~'; +else{next(ls);return TK_NE;} +} +case'"': +case'\'':{ +read_string(ls,ls->current,seminfo); +return TK_STRING; +} +case'.':{ +save_and_next(ls); +if(check_next(ls,".")){ +if(check_next(ls,".")) +return TK_DOTS; +else return TK_CONCAT; +} +else if(!isdigit(ls->current))return'.'; +else{ +read_numeral(ls,seminfo); +return TK_NUMBER; +} +} +case(-1):{ +return TK_EOS; +} +default:{ +if(isspace(ls->current)){ +next(ls); +continue; +} +else if(isdigit(ls->current)){ +read_numeral(ls,seminfo); +return TK_NUMBER; +} +else if(isalpha(ls->current)||ls->current=='_'){ +TString*ts; +do{ +save_and_next(ls); +}while(isalnum(ls->current)||ls->current=='_'); +ts=luaX_newstring(ls,luaZ_buffer(ls->buff), +luaZ_bufflen(ls->buff)); +if(ts->tsv.reserved>0) +return ts->tsv.reserved-1+257; +else{ +seminfo->ts=ts; +return TK_NAME; +} +} +else{ +int c=ls->current; +next(ls); +return c; +} +} +} +} +} +static void luaX_next(LexState*ls){ +ls->lastline=ls->linenumber; +if(ls->lookahead.token!=TK_EOS){ +ls->t=ls->lookahead; +ls->lookahead.token=TK_EOS; +} +else +ls->t.token=llex(ls,&ls->t.seminfo); +} +static void luaX_lookahead(LexState*ls){ +ls->lookahead.token=llex(ls,&ls->lookahead.seminfo); +} +#define hasjumps(e)((e)->t!=(e)->f) +static int isnumeral(expdesc*e){ +return(e->k==VKNUM&&e->t==(-1)&&e->f==(-1)); +} +static void luaK_nil(FuncState*fs,int from,int n){ +Instruction*previous; +if(fs->pc>fs->lasttarget){ +if(fs->pc==0){ +if(from>=fs->nactvar) +return; +} +else{ +previous=&fs->f->code[fs->pc-1]; +if(GET_OPCODE(*previous)==OP_LOADNIL){ +int pfrom=GETARG_A(*previous); +int pto=GETARG_B(*previous); +if(pfrom<=from&&from<=pto+1){ +if(from+n-1>pto) +SETARG_B(*previous,from+n-1); +return; +} +} +} +} +luaK_codeABC(fs,OP_LOADNIL,from,from+n-1,0); +} +static int luaK_jump(FuncState*fs){ +int jpc=fs->jpc; +int j; +fs->jpc=(-1); +j=luaK_codeAsBx(fs,OP_JMP,0,(-1)); +luaK_concat(fs,&j,jpc); +return j; +} +static void luaK_ret(FuncState*fs,int first,int nret){ +luaK_codeABC(fs,OP_RETURN,first,nret+1,0); +} +static int condjump(FuncState*fs,OpCode op,int A,int B,int C){ +luaK_codeABC(fs,op,A,B,C); +return luaK_jump(fs); +} +static void fixjump(FuncState*fs,int pc,int dest){ +Instruction*jmp=&fs->f->code[pc]; +int offset=dest-(pc+1); +if(abs(offset)>(((1<<(9+9))-1)>>1)) +luaX_syntaxerror(fs->ls,"control structure too long"); +SETARG_sBx(*jmp,offset); +} +static int luaK_getlabel(FuncState*fs){ +fs->lasttarget=fs->pc; +return fs->pc; +} +static int getjump(FuncState*fs,int pc){ +int offset=GETARG_sBx(fs->f->code[pc]); +if(offset==(-1)) +return(-1); +else +return(pc+1)+offset; +} +static Instruction*getjumpcontrol(FuncState*fs,int pc){ +Instruction*pi=&fs->f->code[pc]; +if(pc>=1&&testTMode(GET_OPCODE(*(pi-1)))) +return pi-1; +else +return pi; +} +static int need_value(FuncState*fs,int list){ +for(;list!=(-1);list=getjump(fs,list)){ +Instruction i=*getjumpcontrol(fs,list); +if(GET_OPCODE(i)!=OP_TESTSET)return 1; +} +return 0; +} +static int patchtestreg(FuncState*fs,int node,int reg){ +Instruction*i=getjumpcontrol(fs,node); +if(GET_OPCODE(*i)!=OP_TESTSET) +return 0; +if(reg!=((1<<8)-1)&®!=GETARG_B(*i)) +SETARG_A(*i,reg); +else +*i=CREATE_ABC(OP_TEST,GETARG_B(*i),0,GETARG_C(*i)); +return 1; +} +static void removevalues(FuncState*fs,int list){ +for(;list!=(-1);list=getjump(fs,list)) +patchtestreg(fs,list,((1<<8)-1)); +} +static void patchlistaux(FuncState*fs,int list,int vtarget,int reg, +int dtarget){ +while(list!=(-1)){ +int next=getjump(fs,list); +if(patchtestreg(fs,list,reg)) +fixjump(fs,list,vtarget); +else +fixjump(fs,list,dtarget); +list=next; +} +} +static void dischargejpc(FuncState*fs){ +patchlistaux(fs,fs->jpc,fs->pc,((1<<8)-1),fs->pc); +fs->jpc=(-1); +} +static void luaK_patchlist(FuncState*fs,int list,int target){ +if(target==fs->pc) +luaK_patchtohere(fs,list); +else{ +patchlistaux(fs,list,target,((1<<8)-1),target); +} +} +static void luaK_patchtohere(FuncState*fs,int list){ +luaK_getlabel(fs); +luaK_concat(fs,&fs->jpc,list); +} +static void luaK_concat(FuncState*fs,int*l1,int l2){ +if(l2==(-1))return; +else if(*l1==(-1)) +*l1=l2; +else{ +int list=*l1; +int next; +while((next=getjump(fs,list))!=(-1)) +list=next; +fixjump(fs,list,l2); +} +} +static void luaK_checkstack(FuncState*fs,int n){ +int newstack=fs->freereg+n; +if(newstack>fs->f->maxstacksize){ +if(newstack>=250) +luaX_syntaxerror(fs->ls,"function or expression too complex"); +fs->f->maxstacksize=cast_byte(newstack); +} +} +static void luaK_reserveregs(FuncState*fs,int n){ +luaK_checkstack(fs,n); +fs->freereg+=n; +} +static void freereg(FuncState*fs,int reg){ +if(!ISK(reg)&®>=fs->nactvar){ +fs->freereg--; +} +} +static void freeexp(FuncState*fs,expdesc*e){ +if(e->k==VNONRELOC) +freereg(fs,e->u.s.info); +} +static int addk(FuncState*fs,TValue*k,TValue*v){ +lua_State*L=fs->L; +TValue*idx=luaH_set(L,fs->h,k); +Proto*f=fs->f; +int oldsize=f->sizek; +if(ttisnumber(idx)){ +return cast_int(nvalue(idx)); +} +else{ +setnvalue(idx,cast_num(fs->nk)); +luaM_growvector(L,f->k,fs->nk,f->sizek,TValue, +((1<<(9+9))-1),"constant table overflow"); +while(oldsizesizek)setnilvalue(&f->k[oldsize++]); +setobj(L,&f->k[fs->nk],v); +luaC_barrier(L,f,v); +return fs->nk++; +} +} +static int luaK_stringK(FuncState*fs,TString*s){ +TValue o; +setsvalue(fs->L,&o,s); +return addk(fs,&o,&o); +} +static int luaK_numberK(FuncState*fs,lua_Number r){ +TValue o; +setnvalue(&o,r); +return addk(fs,&o,&o); +} +static int boolK(FuncState*fs,int b){ +TValue o; +setbvalue(&o,b); +return addk(fs,&o,&o); +} +static int nilK(FuncState*fs){ +TValue k,v; +setnilvalue(&v); +sethvalue(fs->L,&k,fs->h); +return addk(fs,&k,&v); +} +static void luaK_setreturns(FuncState*fs,expdesc*e,int nresults){ +if(e->k==VCALL){ +SETARG_C(getcode(fs,e),nresults+1); +} +else if(e->k==VVARARG){ +SETARG_B(getcode(fs,e),nresults+1); +SETARG_A(getcode(fs,e),fs->freereg); +luaK_reserveregs(fs,1); +} +} +static void luaK_setoneret(FuncState*fs,expdesc*e){ +if(e->k==VCALL){ +e->k=VNONRELOC; +e->u.s.info=GETARG_A(getcode(fs,e)); +} +else if(e->k==VVARARG){ +SETARG_B(getcode(fs,e),2); +e->k=VRELOCABLE; +} +} +static void luaK_dischargevars(FuncState*fs,expdesc*e){ +switch(e->k){ +case VLOCAL:{ +e->k=VNONRELOC; +break; +} +case VUPVAL:{ +e->u.s.info=luaK_codeABC(fs,OP_GETUPVAL,0,e->u.s.info,0); +e->k=VRELOCABLE; +break; +} +case VGLOBAL:{ +e->u.s.info=luaK_codeABx(fs,OP_GETGLOBAL,0,e->u.s.info); +e->k=VRELOCABLE; +break; +} +case VINDEXED:{ +freereg(fs,e->u.s.aux); +freereg(fs,e->u.s.info); +e->u.s.info=luaK_codeABC(fs,OP_GETTABLE,0,e->u.s.info,e->u.s.aux); +e->k=VRELOCABLE; +break; +} +case VVARARG: +case VCALL:{ +luaK_setoneret(fs,e); +break; +} +default:break; +} +} +static int code_label(FuncState*fs,int A,int b,int jump){ +luaK_getlabel(fs); +return luaK_codeABC(fs,OP_LOADBOOL,A,b,jump); +} +static void discharge2reg(FuncState*fs,expdesc*e,int reg){ +luaK_dischargevars(fs,e); +switch(e->k){ +case VNIL:{ +luaK_nil(fs,reg,1); +break; +} +case VFALSE:case VTRUE:{ +luaK_codeABC(fs,OP_LOADBOOL,reg,e->k==VTRUE,0); +break; +} +case VK:{ +luaK_codeABx(fs,OP_LOADK,reg,e->u.s.info); +break; +} +case VKNUM:{ +luaK_codeABx(fs,OP_LOADK,reg,luaK_numberK(fs,e->u.nval)); +break; +} +case VRELOCABLE:{ +Instruction*pc=&getcode(fs,e); +SETARG_A(*pc,reg); +break; +} +case VNONRELOC:{ +if(reg!=e->u.s.info) +luaK_codeABC(fs,OP_MOVE,reg,e->u.s.info,0); +break; +} +default:{ +return; +} +} +e->u.s.info=reg; +e->k=VNONRELOC; +} +static void discharge2anyreg(FuncState*fs,expdesc*e){ +if(e->k!=VNONRELOC){ +luaK_reserveregs(fs,1); +discharge2reg(fs,e,fs->freereg-1); +} +} +static void exp2reg(FuncState*fs,expdesc*e,int reg){ +discharge2reg(fs,e,reg); +if(e->k==VJMP) +luaK_concat(fs,&e->t,e->u.s.info); +if(hasjumps(e)){ +int final; +int p_f=(-1); +int p_t=(-1); +if(need_value(fs,e->t)||need_value(fs,e->f)){ +int fj=(e->k==VJMP)?(-1):luaK_jump(fs); +p_f=code_label(fs,reg,0,1); +p_t=code_label(fs,reg,1,0); +luaK_patchtohere(fs,fj); +} +final=luaK_getlabel(fs); +patchlistaux(fs,e->f,final,reg,p_f); +patchlistaux(fs,e->t,final,reg,p_t); +} +e->f=e->t=(-1); +e->u.s.info=reg; +e->k=VNONRELOC; +} +static void luaK_exp2nextreg(FuncState*fs,expdesc*e){ +luaK_dischargevars(fs,e); +freeexp(fs,e); +luaK_reserveregs(fs,1); +exp2reg(fs,e,fs->freereg-1); +} +static int luaK_exp2anyreg(FuncState*fs,expdesc*e){ +luaK_dischargevars(fs,e); +if(e->k==VNONRELOC){ +if(!hasjumps(e))return e->u.s.info; +if(e->u.s.info>=fs->nactvar){ +exp2reg(fs,e,e->u.s.info); +return e->u.s.info; +} +} +luaK_exp2nextreg(fs,e); +return e->u.s.info; +} +static void luaK_exp2val(FuncState*fs,expdesc*e){ +if(hasjumps(e)) +luaK_exp2anyreg(fs,e); +else +luaK_dischargevars(fs,e); +} +static int luaK_exp2RK(FuncState*fs,expdesc*e){ +luaK_exp2val(fs,e); +switch(e->k){ +case VKNUM: +case VTRUE: +case VFALSE: +case VNIL:{ +if(fs->nk<=((1<<(9-1))-1)){ +e->u.s.info=(e->k==VNIL)?nilK(fs): +(e->k==VKNUM)?luaK_numberK(fs,e->u.nval): +boolK(fs,(e->k==VTRUE)); +e->k=VK; +return RKASK(e->u.s.info); +} +else break; +} +case VK:{ +if(e->u.s.info<=((1<<(9-1))-1)) +return RKASK(e->u.s.info); +else break; +} +default:break; +} +return luaK_exp2anyreg(fs,e); +} +static void luaK_storevar(FuncState*fs,expdesc*var,expdesc*ex){ +switch(var->k){ +case VLOCAL:{ +freeexp(fs,ex); +exp2reg(fs,ex,var->u.s.info); +return; +} +case VUPVAL:{ +int e=luaK_exp2anyreg(fs,ex); +luaK_codeABC(fs,OP_SETUPVAL,e,var->u.s.info,0); +break; +} +case VGLOBAL:{ +int e=luaK_exp2anyreg(fs,ex); +luaK_codeABx(fs,OP_SETGLOBAL,e,var->u.s.info); +break; +} +case VINDEXED:{ +int e=luaK_exp2RK(fs,ex); +luaK_codeABC(fs,OP_SETTABLE,var->u.s.info,var->u.s.aux,e); +break; +} +default:{ +break; +} +} +freeexp(fs,ex); +} +static void luaK_self(FuncState*fs,expdesc*e,expdesc*key){ +int func; +luaK_exp2anyreg(fs,e); +freeexp(fs,e); +func=fs->freereg; +luaK_reserveregs(fs,2); +luaK_codeABC(fs,OP_SELF,func,e->u.s.info,luaK_exp2RK(fs,key)); +freeexp(fs,key); +e->u.s.info=func; +e->k=VNONRELOC; +} +static void invertjump(FuncState*fs,expdesc*e){ +Instruction*pc=getjumpcontrol(fs,e->u.s.info); +SETARG_A(*pc,!(GETARG_A(*pc))); +} +static int jumponcond(FuncState*fs,expdesc*e,int cond){ +if(e->k==VRELOCABLE){ +Instruction ie=getcode(fs,e); +if(GET_OPCODE(ie)==OP_NOT){ +fs->pc--; +return condjump(fs,OP_TEST,GETARG_B(ie),0,!cond); +} +} +discharge2anyreg(fs,e); +freeexp(fs,e); +return condjump(fs,OP_TESTSET,((1<<8)-1),e->u.s.info,cond); +} +static void luaK_goiftrue(FuncState*fs,expdesc*e){ +int pc; +luaK_dischargevars(fs,e); +switch(e->k){ +case VK:case VKNUM:case VTRUE:{ +pc=(-1); +break; +} +case VJMP:{ +invertjump(fs,e); +pc=e->u.s.info; +break; +} +default:{ +pc=jumponcond(fs,e,0); +break; +} +} +luaK_concat(fs,&e->f,pc); +luaK_patchtohere(fs,e->t); +e->t=(-1); +} +static void luaK_goiffalse(FuncState*fs,expdesc*e){ +int pc; +luaK_dischargevars(fs,e); +switch(e->k){ +case VNIL:case VFALSE:{ +pc=(-1); +break; +} +case VJMP:{ +pc=e->u.s.info; +break; +} +default:{ +pc=jumponcond(fs,e,1); +break; +} +} +luaK_concat(fs,&e->t,pc); +luaK_patchtohere(fs,e->f); +e->f=(-1); +} +static void codenot(FuncState*fs,expdesc*e){ +luaK_dischargevars(fs,e); +switch(e->k){ +case VNIL:case VFALSE:{ +e->k=VTRUE; +break; +} +case VK:case VKNUM:case VTRUE:{ +e->k=VFALSE; +break; +} +case VJMP:{ +invertjump(fs,e); +break; +} +case VRELOCABLE: +case VNONRELOC:{ +discharge2anyreg(fs,e); +freeexp(fs,e); +e->u.s.info=luaK_codeABC(fs,OP_NOT,0,e->u.s.info,0); +e->k=VRELOCABLE; +break; +} +default:{ +break; +} +} +{int temp=e->f;e->f=e->t;e->t=temp;} +removevalues(fs,e->f); +removevalues(fs,e->t); +} +static void luaK_indexed(FuncState*fs,expdesc*t,expdesc*k){ +t->u.s.aux=luaK_exp2RK(fs,k); +t->k=VINDEXED; +} +static int constfolding(OpCode op,expdesc*e1,expdesc*e2){ +lua_Number v1,v2,r; +if(!isnumeral(e1)||!isnumeral(e2))return 0; +v1=e1->u.nval; +v2=e2->u.nval; +switch(op){ +case OP_ADD:r=luai_numadd(v1,v2);break; +case OP_SUB:r=luai_numsub(v1,v2);break; +case OP_MUL:r=luai_nummul(v1,v2);break; +case OP_DIV: +if(v2==0)return 0; +r=luai_numdiv(v1,v2);break; +case OP_MOD: +if(v2==0)return 0; +r=luai_nummod(v1,v2);break; +case OP_POW:r=luai_numpow(v1,v2);break; +case OP_UNM:r=luai_numunm(v1);break; +case OP_LEN:return 0; +default:r=0;break; +} +if(luai_numisnan(r))return 0; +e1->u.nval=r; +return 1; +} +static void codearith(FuncState*fs,OpCode op,expdesc*e1,expdesc*e2){ +if(constfolding(op,e1,e2)) +return; +else{ +int o2=(op!=OP_UNM&&op!=OP_LEN)?luaK_exp2RK(fs,e2):0; +int o1=luaK_exp2RK(fs,e1); +if(o1>o2){ +freeexp(fs,e1); +freeexp(fs,e2); +} +else{ +freeexp(fs,e2); +freeexp(fs,e1); +} +e1->u.s.info=luaK_codeABC(fs,op,0,o1,o2); +e1->k=VRELOCABLE; +} +} +static void codecomp(FuncState*fs,OpCode op,int cond,expdesc*e1, +expdesc*e2){ +int o1=luaK_exp2RK(fs,e1); +int o2=luaK_exp2RK(fs,e2); +freeexp(fs,e2); +freeexp(fs,e1); +if(cond==0&&op!=OP_EQ){ +int temp; +temp=o1;o1=o2;o2=temp; +cond=1; +} +e1->u.s.info=condjump(fs,op,cond,o1,o2); +e1->k=VJMP; +} +static void luaK_prefix(FuncState*fs,UnOpr op,expdesc*e){ +expdesc e2; +e2.t=e2.f=(-1);e2.k=VKNUM;e2.u.nval=0; +switch(op){ +case OPR_MINUS:{ +if(!isnumeral(e)) +luaK_exp2anyreg(fs,e); +codearith(fs,OP_UNM,e,&e2); +break; +} +case OPR_NOT:codenot(fs,e);break; +case OPR_LEN:{ +luaK_exp2anyreg(fs,e); +codearith(fs,OP_LEN,e,&e2); +break; +} +default:; +} +} +static void luaK_infix(FuncState*fs,BinOpr op,expdesc*v){ +switch(op){ +case OPR_AND:{ +luaK_goiftrue(fs,v); +break; +} +case OPR_OR:{ +luaK_goiffalse(fs,v); +break; +} +case OPR_CONCAT:{ +luaK_exp2nextreg(fs,v); +break; +} +case OPR_ADD:case OPR_SUB:case OPR_MUL:case OPR_DIV: +case OPR_MOD:case OPR_POW:{ +if(!isnumeral(v))luaK_exp2RK(fs,v); +break; +} +default:{ +luaK_exp2RK(fs,v); +break; +} +} +} +static void luaK_posfix(FuncState*fs,BinOpr op,expdesc*e1,expdesc*e2){ +switch(op){ +case OPR_AND:{ +luaK_dischargevars(fs,e2); +luaK_concat(fs,&e2->f,e1->f); +*e1=*e2; +break; +} +case OPR_OR:{ +luaK_dischargevars(fs,e2); +luaK_concat(fs,&e2->t,e1->t); +*e1=*e2; +break; +} +case OPR_CONCAT:{ +luaK_exp2val(fs,e2); +if(e2->k==VRELOCABLE&&GET_OPCODE(getcode(fs,e2))==OP_CONCAT){ +freeexp(fs,e1); +SETARG_B(getcode(fs,e2),e1->u.s.info); +e1->k=VRELOCABLE;e1->u.s.info=e2->u.s.info; +} +else{ +luaK_exp2nextreg(fs,e2); +codearith(fs,OP_CONCAT,e1,e2); +} +break; +} +case OPR_ADD:codearith(fs,OP_ADD,e1,e2);break; +case OPR_SUB:codearith(fs,OP_SUB,e1,e2);break; +case OPR_MUL:codearith(fs,OP_MUL,e1,e2);break; +case OPR_DIV:codearith(fs,OP_DIV,e1,e2);break; +case OPR_MOD:codearith(fs,OP_MOD,e1,e2);break; +case OPR_POW:codearith(fs,OP_POW,e1,e2);break; +case OPR_EQ:codecomp(fs,OP_EQ,1,e1,e2);break; +case OPR_NE:codecomp(fs,OP_EQ,0,e1,e2);break; +case OPR_LT:codecomp(fs,OP_LT,1,e1,e2);break; +case OPR_LE:codecomp(fs,OP_LE,1,e1,e2);break; +case OPR_GT:codecomp(fs,OP_LT,0,e1,e2);break; +case OPR_GE:codecomp(fs,OP_LE,0,e1,e2);break; +default:; +} +} +static void luaK_fixline(FuncState*fs,int line){ +fs->f->lineinfo[fs->pc-1]=line; +} +static int luaK_code(FuncState*fs,Instruction i,int line){ +Proto*f=fs->f; +dischargejpc(fs); +luaM_growvector(fs->L,f->code,fs->pc,f->sizecode,Instruction, +(INT_MAX-2),"code size overflow"); +f->code[fs->pc]=i; +luaM_growvector(fs->L,f->lineinfo,fs->pc,f->sizelineinfo,int, +(INT_MAX-2),"code size overflow"); +f->lineinfo[fs->pc]=line; +return fs->pc++; +} +static int luaK_codeABC(FuncState*fs,OpCode o,int a,int b,int c){ +return luaK_code(fs,CREATE_ABC(o,a,b,c),fs->ls->lastline); +} +static int luaK_codeABx(FuncState*fs,OpCode o,int a,unsigned int bc){ +return luaK_code(fs,CREATE_ABx(o,a,bc),fs->ls->lastline); +} +static void luaK_setlist(FuncState*fs,int base,int nelems,int tostore){ +int c=(nelems-1)/50+1; +int b=(tostore==(-1))?0:tostore; +if(c<=((1<<9)-1)) +luaK_codeABC(fs,OP_SETLIST,base,b,c); +else{ +luaK_codeABC(fs,OP_SETLIST,base,b,0); +luaK_code(fs,cast(Instruction,c),fs->ls->lastline); +} +fs->freereg=base+1; +} +#define hasmultret(k)((k)==VCALL||(k)==VVARARG) +#define getlocvar(fs,i)((fs)->f->locvars[(fs)->actvar[i]]) +#define luaY_checklimit(fs,v,l,m)if((v)>(l))errorlimit(fs,l,m) +typedef struct BlockCnt{ +struct BlockCnt*previous; +int breaklist; +lu_byte nactvar; +lu_byte upval; +lu_byte isbreakable; +}BlockCnt; +static void chunk(LexState*ls); +static void expr(LexState*ls,expdesc*v); +static void anchor_token(LexState*ls){ +if(ls->t.token==TK_NAME||ls->t.token==TK_STRING){ +TString*ts=ls->t.seminfo.ts; +luaX_newstring(ls,getstr(ts),ts->tsv.len); +} +} +static void error_expected(LexState*ls,int token){ +luaX_syntaxerror(ls, +luaO_pushfstring(ls->L,LUA_QL("%s")" expected",luaX_token2str(ls,token))); +} +static void errorlimit(FuncState*fs,int limit,const char*what){ +const char*msg=(fs->f->linedefined==0)? +luaO_pushfstring(fs->L,"main function has more than %d %s",limit,what): +luaO_pushfstring(fs->L,"function at line %d has more than %d %s", +fs->f->linedefined,limit,what); +luaX_lexerror(fs->ls,msg,0); +} +static int testnext(LexState*ls,int c){ +if(ls->t.token==c){ +luaX_next(ls); +return 1; +} +else return 0; +} +static void check(LexState*ls,int c){ +if(ls->t.token!=c) +error_expected(ls,c); +} +static void checknext(LexState*ls,int c){ +check(ls,c); +luaX_next(ls); +} +#define check_condition(ls,c,msg){if(!(c))luaX_syntaxerror(ls,msg);} +static void check_match(LexState*ls,int what,int who,int where){ +if(!testnext(ls,what)){ +if(where==ls->linenumber) +error_expected(ls,what); +else{ +luaX_syntaxerror(ls,luaO_pushfstring(ls->L, +LUA_QL("%s")" expected (to close "LUA_QL("%s")" at line %d)", +luaX_token2str(ls,what),luaX_token2str(ls,who),where)); +} +} +} +static TString*str_checkname(LexState*ls){ +TString*ts; +check(ls,TK_NAME); +ts=ls->t.seminfo.ts; +luaX_next(ls); +return ts; +} +static void init_exp(expdesc*e,expkind k,int i){ +e->f=e->t=(-1); +e->k=k; +e->u.s.info=i; +} +static void codestring(LexState*ls,expdesc*e,TString*s){ +init_exp(e,VK,luaK_stringK(ls->fs,s)); +} +static void checkname(LexState*ls,expdesc*e){ +codestring(ls,e,str_checkname(ls)); +} +static int registerlocalvar(LexState*ls,TString*varname){ +FuncState*fs=ls->fs; +Proto*f=fs->f; +int oldsize=f->sizelocvars; +luaM_growvector(ls->L,f->locvars,fs->nlocvars,f->sizelocvars, +LocVar,SHRT_MAX,"too many local variables"); +while(oldsizesizelocvars)f->locvars[oldsize++].varname=NULL; +f->locvars[fs->nlocvars].varname=varname; +luaC_objbarrier(ls->L,f,varname); +return fs->nlocvars++; +} +#define new_localvarliteral(ls,v,n)new_localvar(ls,luaX_newstring(ls,""v,(sizeof(v)/sizeof(char))-1),n) +static void new_localvar(LexState*ls,TString*name,int n){ +FuncState*fs=ls->fs; +luaY_checklimit(fs,fs->nactvar+n+1,200,"local variables"); +fs->actvar[fs->nactvar+n]=cast(unsigned short,registerlocalvar(ls,name)); +} +static void adjustlocalvars(LexState*ls,int nvars){ +FuncState*fs=ls->fs; +fs->nactvar=cast_byte(fs->nactvar+nvars); +for(;nvars;nvars--){ +getlocvar(fs,fs->nactvar-nvars).startpc=fs->pc; +} +} +static void removevars(LexState*ls,int tolevel){ +FuncState*fs=ls->fs; +while(fs->nactvar>tolevel) +getlocvar(fs,--fs->nactvar).endpc=fs->pc; +} +static int indexupvalue(FuncState*fs,TString*name,expdesc*v){ +int i; +Proto*f=fs->f; +int oldsize=f->sizeupvalues; +for(i=0;inups;i++){ +if(fs->upvalues[i].k==v->k&&fs->upvalues[i].info==v->u.s.info){ +return i; +} +} +luaY_checklimit(fs,f->nups+1,60,"upvalues"); +luaM_growvector(fs->L,f->upvalues,f->nups,f->sizeupvalues, +TString*,(INT_MAX-2),""); +while(oldsizesizeupvalues)f->upvalues[oldsize++]=NULL; +f->upvalues[f->nups]=name; +luaC_objbarrier(fs->L,f,name); +fs->upvalues[f->nups].k=cast_byte(v->k); +fs->upvalues[f->nups].info=cast_byte(v->u.s.info); +return f->nups++; +} +static int searchvar(FuncState*fs,TString*n){ +int i; +for(i=fs->nactvar-1;i>=0;i--){ +if(n==getlocvar(fs,i).varname) +return i; +} +return-1; +} +static void markupval(FuncState*fs,int level){ +BlockCnt*bl=fs->bl; +while(bl&&bl->nactvar>level)bl=bl->previous; +if(bl)bl->upval=1; +} +static int singlevaraux(FuncState*fs,TString*n,expdesc*var,int base){ +if(fs==NULL){ +init_exp(var,VGLOBAL,((1<<8)-1)); +return VGLOBAL; +} +else{ +int v=searchvar(fs,n); +if(v>=0){ +init_exp(var,VLOCAL,v); +if(!base) +markupval(fs,v); +return VLOCAL; +} +else{ +if(singlevaraux(fs->prev,n,var,0)==VGLOBAL) +return VGLOBAL; +var->u.s.info=indexupvalue(fs,n,var); +var->k=VUPVAL; +return VUPVAL; +} +} +} +static void singlevar(LexState*ls,expdesc*var){ +TString*varname=str_checkname(ls); +FuncState*fs=ls->fs; +if(singlevaraux(fs,varname,var,1)==VGLOBAL) +var->u.s.info=luaK_stringK(fs,varname); +} +static void adjust_assign(LexState*ls,int nvars,int nexps,expdesc*e){ +FuncState*fs=ls->fs; +int extra=nvars-nexps; +if(hasmultret(e->k)){ +extra++; +if(extra<0)extra=0; +luaK_setreturns(fs,e,extra); +if(extra>1)luaK_reserveregs(fs,extra-1); +} +else{ +if(e->k!=VVOID)luaK_exp2nextreg(fs,e); +if(extra>0){ +int reg=fs->freereg; +luaK_reserveregs(fs,extra); +luaK_nil(fs,reg,extra); +} +} +} +static void enterlevel(LexState*ls){ +if(++ls->L->nCcalls>200) +luaX_lexerror(ls,"chunk has too many syntax levels",0); +} +#define leavelevel(ls)((ls)->L->nCcalls--) +static void enterblock(FuncState*fs,BlockCnt*bl,lu_byte isbreakable){ +bl->breaklist=(-1); +bl->isbreakable=isbreakable; +bl->nactvar=fs->nactvar; +bl->upval=0; +bl->previous=fs->bl; +fs->bl=bl; +} +static void leaveblock(FuncState*fs){ +BlockCnt*bl=fs->bl; +fs->bl=bl->previous; +removevars(fs->ls,bl->nactvar); +if(bl->upval) +luaK_codeABC(fs,OP_CLOSE,bl->nactvar,0,0); +fs->freereg=fs->nactvar; +luaK_patchtohere(fs,bl->breaklist); +} +static void pushclosure(LexState*ls,FuncState*func,expdesc*v){ +FuncState*fs=ls->fs; +Proto*f=fs->f; +int oldsize=f->sizep; +int i; +luaM_growvector(ls->L,f->p,fs->np,f->sizep,Proto*, +((1<<(9+9))-1),"constant table overflow"); +while(oldsizesizep)f->p[oldsize++]=NULL; +f->p[fs->np++]=func->f; +luaC_objbarrier(ls->L,f,func->f); +init_exp(v,VRELOCABLE,luaK_codeABx(fs,OP_CLOSURE,0,fs->np-1)); +for(i=0;if->nups;i++){ +OpCode o=(func->upvalues[i].k==VLOCAL)?OP_MOVE:OP_GETUPVAL; +luaK_codeABC(fs,o,0,func->upvalues[i].info,0); +} +} +static void open_func(LexState*ls,FuncState*fs){ +lua_State*L=ls->L; +Proto*f=luaF_newproto(L); +fs->f=f; +fs->prev=ls->fs; +fs->ls=ls; +fs->L=L; +ls->fs=fs; +fs->pc=0; +fs->lasttarget=-1; +fs->jpc=(-1); +fs->freereg=0; +fs->nk=0; +fs->np=0; +fs->nlocvars=0; +fs->nactvar=0; +fs->bl=NULL; +f->source=ls->source; +f->maxstacksize=2; +fs->h=luaH_new(L,0,0); +sethvalue(L,L->top,fs->h); +incr_top(L); +setptvalue(L,L->top,f); +incr_top(L); +} +static void close_func(LexState*ls){ +lua_State*L=ls->L; +FuncState*fs=ls->fs; +Proto*f=fs->f; +removevars(ls,0); +luaK_ret(fs,0,0); +luaM_reallocvector(L,f->code,f->sizecode,fs->pc,Instruction); +f->sizecode=fs->pc; +luaM_reallocvector(L,f->lineinfo,f->sizelineinfo,fs->pc,int); +f->sizelineinfo=fs->pc; +luaM_reallocvector(L,f->k,f->sizek,fs->nk,TValue); +f->sizek=fs->nk; +luaM_reallocvector(L,f->p,f->sizep,fs->np,Proto*); +f->sizep=fs->np; +luaM_reallocvector(L,f->locvars,f->sizelocvars,fs->nlocvars,LocVar); +f->sizelocvars=fs->nlocvars; +luaM_reallocvector(L,f->upvalues,f->sizeupvalues,f->nups,TString*); +f->sizeupvalues=f->nups; +ls->fs=fs->prev; +if(fs)anchor_token(ls); +L->top-=2; +} +static Proto*luaY_parser(lua_State*L,ZIO*z,Mbuffer*buff,const char*name){ +struct LexState lexstate; +struct FuncState funcstate; +lexstate.buff=buff; +luaX_setinput(L,&lexstate,z,luaS_new(L,name)); +open_func(&lexstate,&funcstate); +funcstate.f->is_vararg=2; +luaX_next(&lexstate); +chunk(&lexstate); +check(&lexstate,TK_EOS); +close_func(&lexstate); +return funcstate.f; +} +static void field(LexState*ls,expdesc*v){ +FuncState*fs=ls->fs; +expdesc key; +luaK_exp2anyreg(fs,v); +luaX_next(ls); +checkname(ls,&key); +luaK_indexed(fs,v,&key); +} +static void yindex(LexState*ls,expdesc*v){ +luaX_next(ls); +expr(ls,v); +luaK_exp2val(ls->fs,v); +checknext(ls,']'); +} +struct ConsControl{ +expdesc v; +expdesc*t; +int nh; +int na; +int tostore; +}; +static void recfield(LexState*ls,struct ConsControl*cc){ +FuncState*fs=ls->fs; +int reg=ls->fs->freereg; +expdesc key,val; +int rkkey; +if(ls->t.token==TK_NAME){ +luaY_checklimit(fs,cc->nh,(INT_MAX-2),"items in a constructor"); +checkname(ls,&key); +} +else +yindex(ls,&key); +cc->nh++; +checknext(ls,'='); +rkkey=luaK_exp2RK(fs,&key); +expr(ls,&val); +luaK_codeABC(fs,OP_SETTABLE,cc->t->u.s.info,rkkey,luaK_exp2RK(fs,&val)); +fs->freereg=reg; +} +static void closelistfield(FuncState*fs,struct ConsControl*cc){ +if(cc->v.k==VVOID)return; +luaK_exp2nextreg(fs,&cc->v); +cc->v.k=VVOID; +if(cc->tostore==50){ +luaK_setlist(fs,cc->t->u.s.info,cc->na,cc->tostore); +cc->tostore=0; +} +} +static void lastlistfield(FuncState*fs,struct ConsControl*cc){ +if(cc->tostore==0)return; +if(hasmultret(cc->v.k)){ +luaK_setmultret(fs,&cc->v); +luaK_setlist(fs,cc->t->u.s.info,cc->na,(-1)); +cc->na--; +} +else{ +if(cc->v.k!=VVOID) +luaK_exp2nextreg(fs,&cc->v); +luaK_setlist(fs,cc->t->u.s.info,cc->na,cc->tostore); +} +} +static void listfield(LexState*ls,struct ConsControl*cc){ +expr(ls,&cc->v); +luaY_checklimit(ls->fs,cc->na,(INT_MAX-2),"items in a constructor"); +cc->na++; +cc->tostore++; +} +static void constructor(LexState*ls,expdesc*t){ +FuncState*fs=ls->fs; +int line=ls->linenumber; +int pc=luaK_codeABC(fs,OP_NEWTABLE,0,0,0); +struct ConsControl cc; +cc.na=cc.nh=cc.tostore=0; +cc.t=t; +init_exp(t,VRELOCABLE,pc); +init_exp(&cc.v,VVOID,0); +luaK_exp2nextreg(ls->fs,t); +checknext(ls,'{'); +do{ +if(ls->t.token=='}')break; +closelistfield(fs,&cc); +switch(ls->t.token){ +case TK_NAME:{ +luaX_lookahead(ls); +if(ls->lookahead.token!='=') +listfield(ls,&cc); +else +recfield(ls,&cc); +break; +} +case'[':{ +recfield(ls,&cc); +break; +} +default:{ +listfield(ls,&cc); +break; +} +} +}while(testnext(ls,',')||testnext(ls,';')); +check_match(ls,'}','{',line); +lastlistfield(fs,&cc); +SETARG_B(fs->f->code[pc],luaO_int2fb(cc.na)); +SETARG_C(fs->f->code[pc],luaO_int2fb(cc.nh)); +} +static void parlist(LexState*ls){ +FuncState*fs=ls->fs; +Proto*f=fs->f; +int nparams=0; +f->is_vararg=0; +if(ls->t.token!=')'){ +do{ +switch(ls->t.token){ +case TK_NAME:{ +new_localvar(ls,str_checkname(ls),nparams++); +break; +} +case TK_DOTS:{ +luaX_next(ls); +f->is_vararg|=2; +break; +} +default:luaX_syntaxerror(ls," or "LUA_QL("...")" expected"); +} +}while(!f->is_vararg&&testnext(ls,',')); +} +adjustlocalvars(ls,nparams); +f->numparams=cast_byte(fs->nactvar-(f->is_vararg&1)); +luaK_reserveregs(fs,fs->nactvar); +} +static void body(LexState*ls,expdesc*e,int needself,int line){ +FuncState new_fs; +open_func(ls,&new_fs); +new_fs.f->linedefined=line; +checknext(ls,'('); +if(needself){ +new_localvarliteral(ls,"self",0); +adjustlocalvars(ls,1); +} +parlist(ls); +checknext(ls,')'); +chunk(ls); +new_fs.f->lastlinedefined=ls->linenumber; +check_match(ls,TK_END,TK_FUNCTION,line); +close_func(ls); +pushclosure(ls,&new_fs,e); +} +static int explist1(LexState*ls,expdesc*v){ +int n=1; +expr(ls,v); +while(testnext(ls,',')){ +luaK_exp2nextreg(ls->fs,v); +expr(ls,v); +n++; +} +return n; +} +static void funcargs(LexState*ls,expdesc*f){ +FuncState*fs=ls->fs; +expdesc args; +int base,nparams; +int line=ls->linenumber; +switch(ls->t.token){ +case'(':{ +if(line!=ls->lastline) +luaX_syntaxerror(ls,"ambiguous syntax (function call x new statement)"); +luaX_next(ls); +if(ls->t.token==')') +args.k=VVOID; +else{ +explist1(ls,&args); +luaK_setmultret(fs,&args); +} +check_match(ls,')','(',line); +break; +} +case'{':{ +constructor(ls,&args); +break; +} +case TK_STRING:{ +codestring(ls,&args,ls->t.seminfo.ts); +luaX_next(ls); +break; +} +default:{ +luaX_syntaxerror(ls,"function arguments expected"); +return; +} +} +base=f->u.s.info; +if(hasmultret(args.k)) +nparams=(-1); +else{ +if(args.k!=VVOID) +luaK_exp2nextreg(fs,&args); +nparams=fs->freereg-(base+1); +} +init_exp(f,VCALL,luaK_codeABC(fs,OP_CALL,base,nparams+1,2)); +luaK_fixline(fs,line); +fs->freereg=base+1; +} +static void prefixexp(LexState*ls,expdesc*v){ +switch(ls->t.token){ +case'(':{ +int line=ls->linenumber; +luaX_next(ls); +expr(ls,v); +check_match(ls,')','(',line); +luaK_dischargevars(ls->fs,v); +return; +} +case TK_NAME:{ +singlevar(ls,v); +return; +} +default:{ +luaX_syntaxerror(ls,"unexpected symbol"); +return; +} +} +} +static void primaryexp(LexState*ls,expdesc*v){ +FuncState*fs=ls->fs; +prefixexp(ls,v); +for(;;){ +switch(ls->t.token){ +case'.':{ +field(ls,v); +break; +} +case'[':{ +expdesc key; +luaK_exp2anyreg(fs,v); +yindex(ls,&key); +luaK_indexed(fs,v,&key); +break; +} +case':':{ +expdesc key; +luaX_next(ls); +checkname(ls,&key); +luaK_self(fs,v,&key); +funcargs(ls,v); +break; +} +case'(':case TK_STRING:case'{':{ +luaK_exp2nextreg(fs,v); +funcargs(ls,v); +break; +} +default:return; +} +} +} +static void simpleexp(LexState*ls,expdesc*v){ +switch(ls->t.token){ +case TK_NUMBER:{ +init_exp(v,VKNUM,0); +v->u.nval=ls->t.seminfo.r; +break; +} +case TK_STRING:{ +codestring(ls,v,ls->t.seminfo.ts); +break; +} +case TK_NIL:{ +init_exp(v,VNIL,0); +break; +} +case TK_TRUE:{ +init_exp(v,VTRUE,0); +break; +} +case TK_FALSE:{ +init_exp(v,VFALSE,0); +break; +} +case TK_DOTS:{ +FuncState*fs=ls->fs; +check_condition(ls,fs->f->is_vararg, +"cannot use "LUA_QL("...")" outside a vararg function"); +fs->f->is_vararg&=~4; +init_exp(v,VVARARG,luaK_codeABC(fs,OP_VARARG,0,1,0)); +break; +} +case'{':{ +constructor(ls,v); +return; +} +case TK_FUNCTION:{ +luaX_next(ls); +body(ls,v,0,ls->linenumber); +return; +} +default:{ +primaryexp(ls,v); +return; +} +} +luaX_next(ls); +} +static UnOpr getunopr(int op){ +switch(op){ +case TK_NOT:return OPR_NOT; +case'-':return OPR_MINUS; +case'#':return OPR_LEN; +default:return OPR_NOUNOPR; +} +} +static BinOpr getbinopr(int op){ +switch(op){ +case'+':return OPR_ADD; +case'-':return OPR_SUB; +case'*':return OPR_MUL; +case'/':return OPR_DIV; +case'%':return OPR_MOD; +case'^':return OPR_POW; +case TK_CONCAT:return OPR_CONCAT; +case TK_NE:return OPR_NE; +case TK_EQ:return OPR_EQ; +case'<':return OPR_LT; +case TK_LE:return OPR_LE; +case'>':return OPR_GT; +case TK_GE:return OPR_GE; +case TK_AND:return OPR_AND; +case TK_OR:return OPR_OR; +default:return OPR_NOBINOPR; +} +} +static const struct{ +lu_byte left; +lu_byte right; +}priority[]={ +{6,6},{6,6},{7,7},{7,7},{7,7}, +{10,9},{5,4}, +{3,3},{3,3}, +{3,3},{3,3},{3,3},{3,3}, +{2,2},{1,1} +}; +static BinOpr subexpr(LexState*ls,expdesc*v,unsigned int limit){ +BinOpr op; +UnOpr uop; +enterlevel(ls); +uop=getunopr(ls->t.token); +if(uop!=OPR_NOUNOPR){ +luaX_next(ls); +subexpr(ls,v,8); +luaK_prefix(ls->fs,uop,v); +} +else simpleexp(ls,v); +op=getbinopr(ls->t.token); +while(op!=OPR_NOBINOPR&&priority[op].left>limit){ +expdesc v2; +BinOpr nextop; +luaX_next(ls); +luaK_infix(ls->fs,op,v); +nextop=subexpr(ls,&v2,priority[op].right); +luaK_posfix(ls->fs,op,v,&v2); +op=nextop; +} +leavelevel(ls); +return op; +} +static void expr(LexState*ls,expdesc*v){ +subexpr(ls,v,0); +} +static int block_follow(int token){ +switch(token){ +case TK_ELSE:case TK_ELSEIF:case TK_END: +case TK_UNTIL:case TK_EOS: +return 1; +default:return 0; +} +} +static void block(LexState*ls){ +FuncState*fs=ls->fs; +BlockCnt bl; +enterblock(fs,&bl,0); +chunk(ls); +leaveblock(fs); +} +struct LHS_assign{ +struct LHS_assign*prev; +expdesc v; +}; +static void check_conflict(LexState*ls,struct LHS_assign*lh,expdesc*v){ +FuncState*fs=ls->fs; +int extra=fs->freereg; +int conflict=0; +for(;lh;lh=lh->prev){ +if(lh->v.k==VINDEXED){ +if(lh->v.u.s.info==v->u.s.info){ +conflict=1; +lh->v.u.s.info=extra; +} +if(lh->v.u.s.aux==v->u.s.info){ +conflict=1; +lh->v.u.s.aux=extra; +} +} +} +if(conflict){ +luaK_codeABC(fs,OP_MOVE,fs->freereg,v->u.s.info,0); +luaK_reserveregs(fs,1); +} +} +static void assignment(LexState*ls,struct LHS_assign*lh,int nvars){ +expdesc e; +check_condition(ls,VLOCAL<=lh->v.k&&lh->v.k<=VINDEXED, +"syntax error"); +if(testnext(ls,',')){ +struct LHS_assign nv; +nv.prev=lh; +primaryexp(ls,&nv.v); +if(nv.v.k==VLOCAL) +check_conflict(ls,lh,&nv.v); +luaY_checklimit(ls->fs,nvars,200-ls->L->nCcalls, +"variables in assignment"); +assignment(ls,&nv,nvars+1); +} +else{ +int nexps; +checknext(ls,'='); +nexps=explist1(ls,&e); +if(nexps!=nvars){ +adjust_assign(ls,nvars,nexps,&e); +if(nexps>nvars) +ls->fs->freereg-=nexps-nvars; +} +else{ +luaK_setoneret(ls->fs,&e); +luaK_storevar(ls->fs,&lh->v,&e); +return; +} +} +init_exp(&e,VNONRELOC,ls->fs->freereg-1); +luaK_storevar(ls->fs,&lh->v,&e); +} +static int cond(LexState*ls){ +expdesc v; +expr(ls,&v); +if(v.k==VNIL)v.k=VFALSE; +luaK_goiftrue(ls->fs,&v); +return v.f; +} +static void breakstat(LexState*ls){ +FuncState*fs=ls->fs; +BlockCnt*bl=fs->bl; +int upval=0; +while(bl&&!bl->isbreakable){ +upval|=bl->upval; +bl=bl->previous; +} +if(!bl) +luaX_syntaxerror(ls,"no loop to break"); +if(upval) +luaK_codeABC(fs,OP_CLOSE,bl->nactvar,0,0); +luaK_concat(fs,&bl->breaklist,luaK_jump(fs)); +} +static void whilestat(LexState*ls,int line){ +FuncState*fs=ls->fs; +int whileinit; +int condexit; +BlockCnt bl; +luaX_next(ls); +whileinit=luaK_getlabel(fs); +condexit=cond(ls); +enterblock(fs,&bl,1); +checknext(ls,TK_DO); +block(ls); +luaK_patchlist(fs,luaK_jump(fs),whileinit); +check_match(ls,TK_END,TK_WHILE,line); +leaveblock(fs); +luaK_patchtohere(fs,condexit); +} +static void repeatstat(LexState*ls,int line){ +int condexit; +FuncState*fs=ls->fs; +int repeat_init=luaK_getlabel(fs); +BlockCnt bl1,bl2; +enterblock(fs,&bl1,1); +enterblock(fs,&bl2,0); +luaX_next(ls); +chunk(ls); +check_match(ls,TK_UNTIL,TK_REPEAT,line); +condexit=cond(ls); +if(!bl2.upval){ +leaveblock(fs); +luaK_patchlist(ls->fs,condexit,repeat_init); +} +else{ +breakstat(ls); +luaK_patchtohere(ls->fs,condexit); +leaveblock(fs); +luaK_patchlist(ls->fs,luaK_jump(fs),repeat_init); +} +leaveblock(fs); +} +static int exp1(LexState*ls){ +expdesc e; +int k; +expr(ls,&e); +k=e.k; +luaK_exp2nextreg(ls->fs,&e); +return k; +} +static void forbody(LexState*ls,int base,int line,int nvars,int isnum){ +BlockCnt bl; +FuncState*fs=ls->fs; +int prep,endfor; +adjustlocalvars(ls,3); +checknext(ls,TK_DO); +prep=isnum?luaK_codeAsBx(fs,OP_FORPREP,base,(-1)):luaK_jump(fs); +enterblock(fs,&bl,0); +adjustlocalvars(ls,nvars); +luaK_reserveregs(fs,nvars); +block(ls); +leaveblock(fs); +luaK_patchtohere(fs,prep); +endfor=(isnum)?luaK_codeAsBx(fs,OP_FORLOOP,base,(-1)): +luaK_codeABC(fs,OP_TFORLOOP,base,0,nvars); +luaK_fixline(fs,line); +luaK_patchlist(fs,(isnum?endfor:luaK_jump(fs)),prep+1); +} +static void fornum(LexState*ls,TString*varname,int line){ +FuncState*fs=ls->fs; +int base=fs->freereg; +new_localvarliteral(ls,"(for index)",0); +new_localvarliteral(ls,"(for limit)",1); +new_localvarliteral(ls,"(for step)",2); +new_localvar(ls,varname,3); +checknext(ls,'='); +exp1(ls); +checknext(ls,','); +exp1(ls); +if(testnext(ls,',')) +exp1(ls); +else{ +luaK_codeABx(fs,OP_LOADK,fs->freereg,luaK_numberK(fs,1)); +luaK_reserveregs(fs,1); +} +forbody(ls,base,line,1,1); +} +static void forlist(LexState*ls,TString*indexname){ +FuncState*fs=ls->fs; +expdesc e; +int nvars=0; +int line; +int base=fs->freereg; +new_localvarliteral(ls,"(for generator)",nvars++); +new_localvarliteral(ls,"(for state)",nvars++); +new_localvarliteral(ls,"(for control)",nvars++); +new_localvar(ls,indexname,nvars++); +while(testnext(ls,',')) +new_localvar(ls,str_checkname(ls),nvars++); +checknext(ls,TK_IN); +line=ls->linenumber; +adjust_assign(ls,3,explist1(ls,&e),&e); +luaK_checkstack(fs,3); +forbody(ls,base,line,nvars-3,0); +} +static void forstat(LexState*ls,int line){ +FuncState*fs=ls->fs; +TString*varname; +BlockCnt bl; +enterblock(fs,&bl,1); +luaX_next(ls); +varname=str_checkname(ls); +switch(ls->t.token){ +case'=':fornum(ls,varname,line);break; +case',':case TK_IN:forlist(ls,varname);break; +default:luaX_syntaxerror(ls,LUA_QL("=")" or "LUA_QL("in")" expected"); +} +check_match(ls,TK_END,TK_FOR,line); +leaveblock(fs); +} +static int test_then_block(LexState*ls){ +int condexit; +luaX_next(ls); +condexit=cond(ls); +checknext(ls,TK_THEN); +block(ls); +return condexit; +} +static void ifstat(LexState*ls,int line){ +FuncState*fs=ls->fs; +int flist; +int escapelist=(-1); +flist=test_then_block(ls); +while(ls->t.token==TK_ELSEIF){ +luaK_concat(fs,&escapelist,luaK_jump(fs)); +luaK_patchtohere(fs,flist); +flist=test_then_block(ls); +} +if(ls->t.token==TK_ELSE){ +luaK_concat(fs,&escapelist,luaK_jump(fs)); +luaK_patchtohere(fs,flist); +luaX_next(ls); +block(ls); +} +else +luaK_concat(fs,&escapelist,flist); +luaK_patchtohere(fs,escapelist); +check_match(ls,TK_END,TK_IF,line); +} +static void localfunc(LexState*ls){ +expdesc v,b; +FuncState*fs=ls->fs; +new_localvar(ls,str_checkname(ls),0); +init_exp(&v,VLOCAL,fs->freereg); +luaK_reserveregs(fs,1); +adjustlocalvars(ls,1); +body(ls,&b,0,ls->linenumber); +luaK_storevar(fs,&v,&b); +getlocvar(fs,fs->nactvar-1).startpc=fs->pc; +} +static void localstat(LexState*ls){ +int nvars=0; +int nexps; +expdesc e; +do{ +new_localvar(ls,str_checkname(ls),nvars++); +}while(testnext(ls,',')); +if(testnext(ls,'=')) +nexps=explist1(ls,&e); +else{ +e.k=VVOID; +nexps=0; +} +adjust_assign(ls,nvars,nexps,&e); +adjustlocalvars(ls,nvars); +} +static int funcname(LexState*ls,expdesc*v){ +int needself=0; +singlevar(ls,v); +while(ls->t.token=='.') +field(ls,v); +if(ls->t.token==':'){ +needself=1; +field(ls,v); +} +return needself; +} +static void funcstat(LexState*ls,int line){ +int needself; +expdesc v,b; +luaX_next(ls); +needself=funcname(ls,&v); +body(ls,&b,needself,line); +luaK_storevar(ls->fs,&v,&b); +luaK_fixline(ls->fs,line); +} +static void exprstat(LexState*ls){ +FuncState*fs=ls->fs; +struct LHS_assign v; +primaryexp(ls,&v.v); +if(v.v.k==VCALL) +SETARG_C(getcode(fs,&v.v),1); +else{ +v.prev=NULL; +assignment(ls,&v,1); +} +} +static void retstat(LexState*ls){ +FuncState*fs=ls->fs; +expdesc e; +int first,nret; +luaX_next(ls); +if(block_follow(ls->t.token)||ls->t.token==';') +first=nret=0; +else{ +nret=explist1(ls,&e); +if(hasmultret(e.k)){ +luaK_setmultret(fs,&e); +if(e.k==VCALL&&nret==1){ +SET_OPCODE(getcode(fs,&e),OP_TAILCALL); +} +first=fs->nactvar; +nret=(-1); +} +else{ +if(nret==1) +first=luaK_exp2anyreg(fs,&e); +else{ +luaK_exp2nextreg(fs,&e); +first=fs->nactvar; +} +} +} +luaK_ret(fs,first,nret); +} +static int statement(LexState*ls){ +int line=ls->linenumber; +switch(ls->t.token){ +case TK_IF:{ +ifstat(ls,line); +return 0; +} +case TK_WHILE:{ +whilestat(ls,line); +return 0; +} +case TK_DO:{ +luaX_next(ls); +block(ls); +check_match(ls,TK_END,TK_DO,line); +return 0; +} +case TK_FOR:{ +forstat(ls,line); +return 0; +} +case TK_REPEAT:{ +repeatstat(ls,line); +return 0; +} +case TK_FUNCTION:{ +funcstat(ls,line); +return 0; +} +case TK_LOCAL:{ +luaX_next(ls); +if(testnext(ls,TK_FUNCTION)) +localfunc(ls); +else +localstat(ls); +return 0; +} +case TK_RETURN:{ +retstat(ls); +return 1; +} +case TK_BREAK:{ +luaX_next(ls); +breakstat(ls); +return 1; +} +default:{ +exprstat(ls); +return 0; +} +} +} +static void chunk(LexState*ls){ +int islast=0; +enterlevel(ls); +while(!islast&&!block_follow(ls->t.token)){ +islast=statement(ls); +testnext(ls,';'); +ls->fs->freereg=ls->fs->nactvar; +} +leavelevel(ls); +} +static const TValue*luaV_tonumber(const TValue*obj,TValue*n){ +lua_Number num; +if(ttisnumber(obj))return obj; +if(ttisstring(obj)&&luaO_str2d(svalue(obj),&num)){ +setnvalue(n,num); +return n; +} +else +return NULL; +} +static int luaV_tostring(lua_State*L,StkId obj){ +if(!ttisnumber(obj)) +return 0; +else{ +char s[32]; +lua_Number n=nvalue(obj); +lua_number2str(s,n); +setsvalue(L,obj,luaS_new(L,s)); +return 1; +} +} +static void callTMres(lua_State*L,StkId res,const TValue*f, +const TValue*p1,const TValue*p2){ +ptrdiff_t result=savestack(L,res); +setobj(L,L->top,f); +setobj(L,L->top+1,p1); +setobj(L,L->top+2,p2); +luaD_checkstack(L,3); +L->top+=3; +luaD_call(L,L->top-3,1); +res=restorestack(L,result); +L->top--; +setobj(L,res,L->top); +} +static void callTM(lua_State*L,const TValue*f,const TValue*p1, +const TValue*p2,const TValue*p3){ +setobj(L,L->top,f); +setobj(L,L->top+1,p1); +setobj(L,L->top+2,p2); +setobj(L,L->top+3,p3); +luaD_checkstack(L,4); +L->top+=4; +luaD_call(L,L->top-4,0); +} +static void luaV_gettable(lua_State*L,const TValue*t,TValue*key,StkId val){ +int loop; +for(loop=0;loop<100;loop++){ +const TValue*tm; +if(ttistable(t)){ +Table*h=hvalue(t); +const TValue*res=luaH_get(h,key); +if(!ttisnil(res)|| +(tm=fasttm(L,h->metatable,TM_INDEX))==NULL){ +setobj(L,val,res); +return; +} +} +else if(ttisnil(tm=luaT_gettmbyobj(L,t,TM_INDEX))) +luaG_typeerror(L,t,"index"); +if(ttisfunction(tm)){ +callTMres(L,val,tm,t,key); +return; +} +t=tm; +} +luaG_runerror(L,"loop in gettable"); +} +static void luaV_settable(lua_State*L,const TValue*t,TValue*key,StkId val){ +int loop; +TValue temp; +for(loop=0;loop<100;loop++){ +const TValue*tm; +if(ttistable(t)){ +Table*h=hvalue(t); +TValue*oldval=luaH_set(L,h,key); +if(!ttisnil(oldval)|| +(tm=fasttm(L,h->metatable,TM_NEWINDEX))==NULL){ +setobj(L,oldval,val); +h->flags=0; +luaC_barriert(L,h,val); +return; +} +} +else if(ttisnil(tm=luaT_gettmbyobj(L,t,TM_NEWINDEX))) +luaG_typeerror(L,t,"index"); +if(ttisfunction(tm)){ +callTM(L,tm,t,key,val); +return; +} +setobj(L,&temp,tm); +t=&temp; +} +luaG_runerror(L,"loop in settable"); +} +static int call_binTM(lua_State*L,const TValue*p1,const TValue*p2, +StkId res,TMS event){ +const TValue*tm=luaT_gettmbyobj(L,p1,event); +if(ttisnil(tm)) +tm=luaT_gettmbyobj(L,p2,event); +if(ttisnil(tm))return 0; +callTMres(L,res,tm,p1,p2); +return 1; +} +static const TValue*get_compTM(lua_State*L,Table*mt1,Table*mt2, +TMS event){ +const TValue*tm1=fasttm(L,mt1,event); +const TValue*tm2; +if(tm1==NULL)return NULL; +if(mt1==mt2)return tm1; +tm2=fasttm(L,mt2,event); +if(tm2==NULL)return NULL; +if(luaO_rawequalObj(tm1,tm2)) +return tm1; +return NULL; +} +static int call_orderTM(lua_State*L,const TValue*p1,const TValue*p2, +TMS event){ +const TValue*tm1=luaT_gettmbyobj(L,p1,event); +const TValue*tm2; +if(ttisnil(tm1))return-1; +tm2=luaT_gettmbyobj(L,p2,event); +if(!luaO_rawequalObj(tm1,tm2)) +return-1; +callTMres(L,L->top,tm1,p1,p2); +return!l_isfalse(L->top); +} +static int l_strcmp(const TString*ls,const TString*rs){ +const char*l=getstr(ls); +size_t ll=ls->tsv.len; +const char*r=getstr(rs); +size_t lr=rs->tsv.len; +for(;;){ +int temp=strcoll(l,r); +if(temp!=0)return temp; +else{ +size_t len=strlen(l); +if(len==lr) +return(len==ll)?0:1; +else if(len==ll) +return-1; +len++; +l+=len;ll-=len;r+=len;lr-=len; +} +} +} +static int luaV_lessthan(lua_State*L,const TValue*l,const TValue*r){ +int res; +if(ttype(l)!=ttype(r)) +return luaG_ordererror(L,l,r); +else if(ttisnumber(l)) +return luai_numlt(nvalue(l),nvalue(r)); +else if(ttisstring(l)) +return l_strcmp(rawtsvalue(l),rawtsvalue(r))<0; +else if((res=call_orderTM(L,l,r,TM_LT))!=-1) +return res; +return luaG_ordererror(L,l,r); +} +static int lessequal(lua_State*L,const TValue*l,const TValue*r){ +int res; +if(ttype(l)!=ttype(r)) +return luaG_ordererror(L,l,r); +else if(ttisnumber(l)) +return luai_numle(nvalue(l),nvalue(r)); +else if(ttisstring(l)) +return l_strcmp(rawtsvalue(l),rawtsvalue(r))<=0; +else if((res=call_orderTM(L,l,r,TM_LE))!=-1) +return res; +else if((res=call_orderTM(L,r,l,TM_LT))!=-1) +return!res; +return luaG_ordererror(L,l,r); +} +static int luaV_equalval(lua_State*L,const TValue*t1,const TValue*t2){ +const TValue*tm; +switch(ttype(t1)){ +case 0:return 1; +case 3:return luai_numeq(nvalue(t1),nvalue(t2)); +case 1:return bvalue(t1)==bvalue(t2); +case 2:return pvalue(t1)==pvalue(t2); +case 7:{ +if(uvalue(t1)==uvalue(t2))return 1; +tm=get_compTM(L,uvalue(t1)->metatable,uvalue(t2)->metatable, +TM_EQ); +break; +} +case 5:{ +if(hvalue(t1)==hvalue(t2))return 1; +tm=get_compTM(L,hvalue(t1)->metatable,hvalue(t2)->metatable,TM_EQ); +break; +} +default:return gcvalue(t1)==gcvalue(t2); +} +if(tm==NULL)return 0; +callTMres(L,L->top,tm,t1,t2); +return!l_isfalse(L->top); +} +static void luaV_concat(lua_State*L,int total,int last){ +do{ +StkId top=L->base+last+1; +int n=2; +if(!(ttisstring(top-2)||ttisnumber(top-2))||!tostring(L,top-1)){ +if(!call_binTM(L,top-2,top-1,top-2,TM_CONCAT)) +luaG_concaterror(L,top-2,top-1); +}else if(tsvalue(top-1)->len==0) +(void)tostring(L,top-2); +else{ +size_t tl=tsvalue(top-1)->len; +char*buffer; +int i; +for(n=1;nlen; +if(l>=((size_t)(~(size_t)0)-2)-tl)luaG_runerror(L,"string length overflow"); +tl+=l; +} +buffer=luaZ_openspace(L,&G(L)->buff,tl); +tl=0; +for(i=n;i>0;i--){ +size_t l=tsvalue(top-i)->len; +memcpy(buffer+tl,svalue(top-i),l); +tl+=l; +} +setsvalue(L,top-n,luaS_newlstr(L,buffer,tl)); +} +total-=n-1; +last-=n-1; +}while(total>1); +} +static void Arith(lua_State*L,StkId ra,const TValue*rb, +const TValue*rc,TMS op){ +TValue tempb,tempc; +const TValue*b,*c; +if((b=luaV_tonumber(rb,&tempb))!=NULL&& +(c=luaV_tonumber(rc,&tempc))!=NULL){ +lua_Number nb=nvalue(b),nc=nvalue(c); +switch(op){ +case TM_ADD:setnvalue(ra,luai_numadd(nb,nc));break; +case TM_SUB:setnvalue(ra,luai_numsub(nb,nc));break; +case TM_MUL:setnvalue(ra,luai_nummul(nb,nc));break; +case TM_DIV:setnvalue(ra,luai_numdiv(nb,nc));break; +case TM_MOD:setnvalue(ra,luai_nummod(nb,nc));break; +case TM_POW:setnvalue(ra,luai_numpow(nb,nc));break; +case TM_UNM:setnvalue(ra,luai_numunm(nb));break; +default:break; +} +} +else if(!call_binTM(L,rb,rc,ra,op)) +luaG_aritherror(L,rb,rc); +} +#define runtime_check(L,c){if(!(c))break;} +#define RA(i)(base+GETARG_A(i)) +#define RB(i)check_exp(getBMode(GET_OPCODE(i))==OpArgR,base+GETARG_B(i)) +#define RKB(i)check_exp(getBMode(GET_OPCODE(i))==OpArgK,ISK(GETARG_B(i))?k+INDEXK(GETARG_B(i)):base+GETARG_B(i)) +#define RKC(i)check_exp(getCMode(GET_OPCODE(i))==OpArgK,ISK(GETARG_C(i))?k+INDEXK(GETARG_C(i)):base+GETARG_C(i)) +#define KBx(i)check_exp(getBMode(GET_OPCODE(i))==OpArgK,k+GETARG_Bx(i)) +#define dojump(L,pc,i){(pc)+=(i);} +#define Protect(x){L->savedpc=pc;{x;};base=L->base;} +#define arith_op(op,tm){TValue*rb=RKB(i);TValue*rc=RKC(i);if(ttisnumber(rb)&&ttisnumber(rc)){lua_Number nb=nvalue(rb),nc=nvalue(rc);setnvalue(ra,op(nb,nc));}else Protect(Arith(L,ra,rb,rc,tm));} +static void luaV_execute(lua_State*L,int nexeccalls){ +LClosure*cl; +StkId base; +TValue*k; +const Instruction*pc; +reentry: +pc=L->savedpc; +cl=&clvalue(L->ci->func)->l; +base=L->base; +k=cl->p->k; +for(;;){ +const Instruction i=*pc++; +StkId ra; +ra=RA(i); +switch(GET_OPCODE(i)){ +case OP_MOVE:{ +setobj(L,ra,RB(i)); +continue; +} +case OP_LOADK:{ +setobj(L,ra,KBx(i)); +continue; +} +case OP_LOADBOOL:{ +setbvalue(ra,GETARG_B(i)); +if(GETARG_C(i))pc++; +continue; +} +case OP_LOADNIL:{ +TValue*rb=RB(i); +do{ +setnilvalue(rb--); +}while(rb>=ra); +continue; +} +case OP_GETUPVAL:{ +int b=GETARG_B(i); +setobj(L,ra,cl->upvals[b]->v); +continue; +} +case OP_GETGLOBAL:{ +TValue g; +TValue*rb=KBx(i); +sethvalue(L,&g,cl->env); +Protect(luaV_gettable(L,&g,rb,ra)); +continue; +} +case OP_GETTABLE:{ +Protect(luaV_gettable(L,RB(i),RKC(i),ra)); +continue; +} +case OP_SETGLOBAL:{ +TValue g; +sethvalue(L,&g,cl->env); +Protect(luaV_settable(L,&g,KBx(i),ra)); +continue; +} +case OP_SETUPVAL:{ +UpVal*uv=cl->upvals[GETARG_B(i)]; +setobj(L,uv->v,ra); +luaC_barrier(L,uv,ra); +continue; +} +case OP_SETTABLE:{ +Protect(luaV_settable(L,ra,RKB(i),RKC(i))); +continue; +} +case OP_NEWTABLE:{ +int b=GETARG_B(i); +int c=GETARG_C(i); +sethvalue(L,ra,luaH_new(L,luaO_fb2int(b),luaO_fb2int(c))); +Protect(luaC_checkGC(L)); +continue; +} +case OP_SELF:{ +StkId rb=RB(i); +setobj(L,ra+1,rb); +Protect(luaV_gettable(L,rb,RKC(i),ra)); +continue; +} +case OP_ADD:{ +arith_op(luai_numadd,TM_ADD); +continue; +} +case OP_SUB:{ +arith_op(luai_numsub,TM_SUB); +continue; +} +case OP_MUL:{ +arith_op(luai_nummul,TM_MUL); +continue; +} +case OP_DIV:{ +arith_op(luai_numdiv,TM_DIV); +continue; +} +case OP_MOD:{ +arith_op(luai_nummod,TM_MOD); +continue; +} +case OP_POW:{ +arith_op(luai_numpow,TM_POW); +continue; +} +case OP_UNM:{ +TValue*rb=RB(i); +if(ttisnumber(rb)){ +lua_Number nb=nvalue(rb); +setnvalue(ra,luai_numunm(nb)); +} +else{ +Protect(Arith(L,ra,rb,rb,TM_UNM)); +} +continue; +} +case OP_NOT:{ +int res=l_isfalse(RB(i)); +setbvalue(ra,res); +continue; +} +case OP_LEN:{ +const TValue*rb=RB(i); +switch(ttype(rb)){ +case 5:{ +setnvalue(ra,cast_num(luaH_getn(hvalue(rb)))); +break; +} +case 4:{ +setnvalue(ra,cast_num(tsvalue(rb)->len)); +break; +} +default:{ +Protect( +if(!call_binTM(L,rb,(&luaO_nilobject_),ra,TM_LEN)) +luaG_typeerror(L,rb,"get length of"); +) +} +} +continue; +} +case OP_CONCAT:{ +int b=GETARG_B(i); +int c=GETARG_C(i); +Protect(luaV_concat(L,c-b+1,c);luaC_checkGC(L)); +setobj(L,RA(i),base+b); +continue; +} +case OP_JMP:{ +dojump(L,pc,GETARG_sBx(i)); +continue; +} +case OP_EQ:{ +TValue*rb=RKB(i); +TValue*rc=RKC(i); +Protect( +if(equalobj(L,rb,rc)==GETARG_A(i)) +dojump(L,pc,GETARG_sBx(*pc)); +) +pc++; +continue; +} +case OP_LT:{ +Protect( +if(luaV_lessthan(L,RKB(i),RKC(i))==GETARG_A(i)) +dojump(L,pc,GETARG_sBx(*pc)); +) +pc++; +continue; +} +case OP_LE:{ +Protect( +if(lessequal(L,RKB(i),RKC(i))==GETARG_A(i)) +dojump(L,pc,GETARG_sBx(*pc)); +) +pc++; +continue; +} +case OP_TEST:{ +if(l_isfalse(ra)!=GETARG_C(i)) +dojump(L,pc,GETARG_sBx(*pc)); +pc++; +continue; +} +case OP_TESTSET:{ +TValue*rb=RB(i); +if(l_isfalse(rb)!=GETARG_C(i)){ +setobj(L,ra,rb); +dojump(L,pc,GETARG_sBx(*pc)); +} +pc++; +continue; +} +case OP_CALL:{ +int b=GETARG_B(i); +int nresults=GETARG_C(i)-1; +if(b!=0)L->top=ra+b; +L->savedpc=pc; +switch(luaD_precall(L,ra,nresults)){ +case 0:{ +nexeccalls++; +goto reentry; +} +case 1:{ +if(nresults>=0)L->top=L->ci->top; +base=L->base; +continue; +} +default:{ +return; +} +} +} +case OP_TAILCALL:{ +int b=GETARG_B(i); +if(b!=0)L->top=ra+b; +L->savedpc=pc; +switch(luaD_precall(L,ra,(-1))){ +case 0:{ +CallInfo*ci=L->ci-1; +int aux; +StkId func=ci->func; +StkId pfunc=(ci+1)->func; +if(L->openupval)luaF_close(L,ci->base); +L->base=ci->base=ci->func+((ci+1)->base-pfunc); +for(aux=0;pfunc+auxtop;aux++) +setobj(L,func+aux,pfunc+aux); +ci->top=L->top=func+aux; +ci->savedpc=L->savedpc; +ci->tailcalls++; +L->ci--; +goto reentry; +} +case 1:{ +base=L->base; +continue; +} +default:{ +return; +} +} +} +case OP_RETURN:{ +int b=GETARG_B(i); +if(b!=0)L->top=ra+b-1; +if(L->openupval)luaF_close(L,base); +L->savedpc=pc; +b=luaD_poscall(L,ra); +if(--nexeccalls==0) +return; +else{ +if(b)L->top=L->ci->top; +goto reentry; +} +} +case OP_FORLOOP:{ +lua_Number step=nvalue(ra+2); +lua_Number idx=luai_numadd(nvalue(ra),step); +lua_Number limit=nvalue(ra+1); +if(luai_numlt(0,step)?luai_numle(idx,limit) +:luai_numle(limit,idx)){ +dojump(L,pc,GETARG_sBx(i)); +setnvalue(ra,idx); +setnvalue(ra+3,idx); +} +continue; +} +case OP_FORPREP:{ +const TValue*init=ra; +const TValue*plimit=ra+1; +const TValue*pstep=ra+2; +L->savedpc=pc; +if(!tonumber(init,ra)) +luaG_runerror(L,LUA_QL("for")" initial value must be a number"); +else if(!tonumber(plimit,ra+1)) +luaG_runerror(L,LUA_QL("for")" limit must be a number"); +else if(!tonumber(pstep,ra+2)) +luaG_runerror(L,LUA_QL("for")" step must be a number"); +setnvalue(ra,luai_numsub(nvalue(ra),nvalue(pstep))); +dojump(L,pc,GETARG_sBx(i)); +continue; +} +case OP_TFORLOOP:{ +StkId cb=ra+3; +setobj(L,cb+2,ra+2); +setobj(L,cb+1,ra+1); +setobj(L,cb,ra); +L->top=cb+3; +Protect(luaD_call(L,cb,GETARG_C(i))); +L->top=L->ci->top; +cb=RA(i)+3; +if(!ttisnil(cb)){ +setobj(L,cb-1,cb); +dojump(L,pc,GETARG_sBx(*pc)); +} +pc++; +continue; +} +case OP_SETLIST:{ +int n=GETARG_B(i); +int c=GETARG_C(i); +int last; +Table*h; +if(n==0){ +n=cast_int(L->top-ra)-1; +L->top=L->ci->top; +} +if(c==0)c=cast_int(*pc++); +runtime_check(L,ttistable(ra)); +h=hvalue(ra); +last=((c-1)*50)+n; +if(last>h->sizearray) +luaH_resizearray(L,h,last); +for(;n>0;n--){ +TValue*val=ra+n; +setobj(L,luaH_setnum(L,h,last--),val); +luaC_barriert(L,h,val); +} +continue; +} +case OP_CLOSE:{ +luaF_close(L,ra); +continue; +} +case OP_CLOSURE:{ +Proto*p; +Closure*ncl; +int nup,j; +p=cl->p->p[GETARG_Bx(i)]; +nup=p->nups; +ncl=luaF_newLclosure(L,nup,cl->env); +ncl->l.p=p; +for(j=0;jl.upvals[j]=cl->upvals[GETARG_B(*pc)]; +else{ +ncl->l.upvals[j]=luaF_findupval(L,base+GETARG_B(*pc)); +} +} +setclvalue(L,ra,ncl); +Protect(luaC_checkGC(L)); +continue; +} +case OP_VARARG:{ +int b=GETARG_B(i)-1; +int j; +CallInfo*ci=L->ci; +int n=cast_int(ci->base-ci->func)-cl->p->numparams-1; +if(b==(-1)){ +Protect(luaD_checkstack(L,n)); +ra=RA(i); +b=n; +L->top=ra+n; +} +for(j=0;jbase-n+j); +} +else{ +setnilvalue(ra+j); +} +} +continue; +} +} +} +} +#define api_checknelems(L,n)luai_apicheck(L,(n)<=(L->top-L->base)) +#define api_checkvalidindex(L,i)luai_apicheck(L,(i)!=(&luaO_nilobject_)) +#define api_incr_top(L){luai_apicheck(L,L->topci->top);L->top++;} +static TValue*index2adr(lua_State*L,int idx){ +if(idx>0){ +TValue*o=L->base+(idx-1); +luai_apicheck(L,idx<=L->ci->top-L->base); +if(o>=L->top)return cast(TValue*,(&luaO_nilobject_)); +else return o; +} +else if(idx>(-10000)){ +luai_apicheck(L,idx!=0&&-idx<=L->top-L->base); +return L->top+idx; +} +else switch(idx){ +case(-10000):return registry(L); +case(-10001):{ +Closure*func=curr_func(L); +sethvalue(L,&L->env,func->c.env); +return&L->env; +} +case(-10002):return gt(L); +default:{ +Closure*func=curr_func(L); +idx=(-10002)-idx; +return(idx<=func->c.nupvalues) +?&func->c.upvalue[idx-1] +:cast(TValue*,(&luaO_nilobject_)); +} +} +} +static Table*getcurrenv(lua_State*L){ +if(L->ci==L->base_ci) +return hvalue(gt(L)); +else{ +Closure*func=curr_func(L); +return func->c.env; +} +} +static int lua_checkstack(lua_State*L,int size){ +int res=1; +if(size>8000||(L->top-L->base+size)>8000) +res=0; +else if(size>0){ +luaD_checkstack(L,size); +if(L->ci->toptop+size) +L->ci->top=L->top+size; +} +return res; +} +static lua_CFunction lua_atpanic(lua_State*L,lua_CFunction panicf){ +lua_CFunction old; +old=G(L)->panic; +G(L)->panic=panicf; +return old; +} +static int lua_gettop(lua_State*L){ +return cast_int(L->top-L->base); +} +static void lua_settop(lua_State*L,int idx){ +if(idx>=0){ +luai_apicheck(L,idx<=L->stack_last-L->base); +while(L->topbase+idx) +setnilvalue(L->top++); +L->top=L->base+idx; +} +else{ +luai_apicheck(L,-(idx+1)<=(L->top-L->base)); +L->top+=idx+1; +} +} +static void lua_remove(lua_State*L,int idx){ +StkId p; +p=index2adr(L,idx); +api_checkvalidindex(L,p); +while(++ptop)setobj(L,p-1,p); +L->top--; +} +static void lua_insert(lua_State*L,int idx){ +StkId p; +StkId q; +p=index2adr(L,idx); +api_checkvalidindex(L,p); +for(q=L->top;q>p;q--)setobj(L,q,q-1); +setobj(L,p,L->top); +} +static void lua_replace(lua_State*L,int idx){ +StkId o; +if(idx==(-10001)&&L->ci==L->base_ci) +luaG_runerror(L,"no calling environment"); +api_checknelems(L,1); +o=index2adr(L,idx); +api_checkvalidindex(L,o); +if(idx==(-10001)){ +Closure*func=curr_func(L); +luai_apicheck(L,ttistable(L->top-1)); +func->c.env=hvalue(L->top-1); +luaC_barrier(L,func,L->top-1); +} +else{ +setobj(L,o,L->top-1); +if(idx<(-10002)) +luaC_barrier(L,curr_func(L),L->top-1); +} +L->top--; +} +static void lua_pushvalue(lua_State*L,int idx){ +setobj(L,L->top,index2adr(L,idx)); +api_incr_top(L); +} +static int lua_type(lua_State*L,int idx){ +StkId o=index2adr(L,idx); +return(o==(&luaO_nilobject_))?(-1):ttype(o); +} +static const char*lua_typename(lua_State*L,int t){ +UNUSED(L); +return(t==(-1))?"no value":luaT_typenames[t]; +} +static int lua_iscfunction(lua_State*L,int idx){ +StkId o=index2adr(L,idx); +return iscfunction(o); +} +static int lua_isnumber(lua_State*L,int idx){ +TValue n; +const TValue*o=index2adr(L,idx); +return tonumber(o,&n); +} +static int lua_isstring(lua_State*L,int idx){ +int t=lua_type(L,idx); +return(t==4||t==3); +} +static int lua_rawequal(lua_State*L,int index1,int index2){ +StkId o1=index2adr(L,index1); +StkId o2=index2adr(L,index2); +return(o1==(&luaO_nilobject_)||o2==(&luaO_nilobject_))?0 +:luaO_rawequalObj(o1,o2); +} +static int lua_lessthan(lua_State*L,int index1,int index2){ +StkId o1,o2; +int i; +o1=index2adr(L,index1); +o2=index2adr(L,index2); +i=(o1==(&luaO_nilobject_)||o2==(&luaO_nilobject_))?0 +:luaV_lessthan(L,o1,o2); +return i; +} +static lua_Number lua_tonumber(lua_State*L,int idx){ +TValue n; +const TValue*o=index2adr(L,idx); +if(tonumber(o,&n)) +return nvalue(o); +else +return 0; +} +static lua_Integer lua_tointeger(lua_State*L,int idx){ +TValue n; +const TValue*o=index2adr(L,idx); +if(tonumber(o,&n)){ +lua_Integer res; +lua_Number num=nvalue(o); +lua_number2integer(res,num); +return res; +} +else +return 0; +} +static int lua_toboolean(lua_State*L,int idx){ +const TValue*o=index2adr(L,idx); +return!l_isfalse(o); +} +static const char*lua_tolstring(lua_State*L,int idx,size_t*len){ +StkId o=index2adr(L,idx); +if(!ttisstring(o)){ +if(!luaV_tostring(L,o)){ +if(len!=NULL)*len=0; +return NULL; +} +luaC_checkGC(L); +o=index2adr(L,idx); +} +if(len!=NULL)*len=tsvalue(o)->len; +return svalue(o); +} +static size_t lua_objlen(lua_State*L,int idx){ +StkId o=index2adr(L,idx); +switch(ttype(o)){ +case 4:return tsvalue(o)->len; +case 7:return uvalue(o)->len; +case 5:return luaH_getn(hvalue(o)); +case 3:{ +size_t l; +l=(luaV_tostring(L,o)?tsvalue(o)->len:0); +return l; +} +default:return 0; +} +} +static lua_CFunction lua_tocfunction(lua_State*L,int idx){ +StkId o=index2adr(L,idx); +return(!iscfunction(o))?NULL:clvalue(o)->c.f; +} +static void*lua_touserdata(lua_State*L,int idx){ +StkId o=index2adr(L,idx); +switch(ttype(o)){ +case 7:return(rawuvalue(o)+1); +case 2:return pvalue(o); +default:return NULL; +} +} +static void lua_pushnil(lua_State*L){ +setnilvalue(L->top); +api_incr_top(L); +} +static void lua_pushnumber(lua_State*L,lua_Number n){ +setnvalue(L->top,n); +api_incr_top(L); +} +static void lua_pushinteger(lua_State*L,lua_Integer n){ +setnvalue(L->top,cast_num(n)); +api_incr_top(L); +} +static void lua_pushlstring(lua_State*L,const char*s,size_t len){ +luaC_checkGC(L); +setsvalue(L,L->top,luaS_newlstr(L,s,len)); +api_incr_top(L); +} +static void lua_pushstring(lua_State*L,const char*s){ +if(s==NULL) +lua_pushnil(L); +else +lua_pushlstring(L,s,strlen(s)); +} +static const char*lua_pushvfstring(lua_State*L,const char*fmt, +va_list argp){ +const char*ret; +luaC_checkGC(L); +ret=luaO_pushvfstring(L,fmt,argp); +return ret; +} +static const char*lua_pushfstring(lua_State*L,const char*fmt,...){ +const char*ret; +va_list argp; +luaC_checkGC(L); +va_start(argp,fmt); +ret=luaO_pushvfstring(L,fmt,argp); +va_end(argp); +return ret; +} +static void lua_pushcclosure(lua_State*L,lua_CFunction fn,int n){ +Closure*cl; +luaC_checkGC(L); +api_checknelems(L,n); +cl=luaF_newCclosure(L,n,getcurrenv(L)); +cl->c.f=fn; +L->top-=n; +while(n--) +setobj(L,&cl->c.upvalue[n],L->top+n); +setclvalue(L,L->top,cl); +api_incr_top(L); +} +static void lua_pushboolean(lua_State*L,int b){ +setbvalue(L->top,(b!=0)); +api_incr_top(L); +} +static int lua_pushthread(lua_State*L){ +setthvalue(L,L->top,L); +api_incr_top(L); +return(G(L)->mainthread==L); +} +static void lua_gettable(lua_State*L,int idx){ +StkId t; +t=index2adr(L,idx); +api_checkvalidindex(L,t); +luaV_gettable(L,t,L->top-1,L->top-1); +} +static void lua_getfield(lua_State*L,int idx,const char*k){ +StkId t; +TValue key; +t=index2adr(L,idx); +api_checkvalidindex(L,t); +setsvalue(L,&key,luaS_new(L,k)); +luaV_gettable(L,t,&key,L->top); +api_incr_top(L); +} +static void lua_rawget(lua_State*L,int idx){ +StkId t; +t=index2adr(L,idx); +luai_apicheck(L,ttistable(t)); +setobj(L,L->top-1,luaH_get(hvalue(t),L->top-1)); +} +static void lua_rawgeti(lua_State*L,int idx,int n){ +StkId o; +o=index2adr(L,idx); +luai_apicheck(L,ttistable(o)); +setobj(L,L->top,luaH_getnum(hvalue(o),n)); +api_incr_top(L); +} +static void lua_createtable(lua_State*L,int narray,int nrec){ +luaC_checkGC(L); +sethvalue(L,L->top,luaH_new(L,narray,nrec)); +api_incr_top(L); +} +static int lua_getmetatable(lua_State*L,int objindex){ +const TValue*obj; +Table*mt=NULL; +int res; +obj=index2adr(L,objindex); +switch(ttype(obj)){ +case 5: +mt=hvalue(obj)->metatable; +break; +case 7: +mt=uvalue(obj)->metatable; +break; +default: +mt=G(L)->mt[ttype(obj)]; +break; +} +if(mt==NULL) +res=0; +else{ +sethvalue(L,L->top,mt); +api_incr_top(L); +res=1; +} +return res; +} +static void lua_getfenv(lua_State*L,int idx){ +StkId o; +o=index2adr(L,idx); +api_checkvalidindex(L,o); +switch(ttype(o)){ +case 6: +sethvalue(L,L->top,clvalue(o)->c.env); +break; +case 7: +sethvalue(L,L->top,uvalue(o)->env); +break; +case 8: +setobj(L,L->top,gt(thvalue(o))); +break; +default: +setnilvalue(L->top); +break; +} +api_incr_top(L); +} +static void lua_settable(lua_State*L,int idx){ +StkId t; +api_checknelems(L,2); +t=index2adr(L,idx); +api_checkvalidindex(L,t); +luaV_settable(L,t,L->top-2,L->top-1); +L->top-=2; +} +static void lua_setfield(lua_State*L,int idx,const char*k){ +StkId t; +TValue key; +api_checknelems(L,1); +t=index2adr(L,idx); +api_checkvalidindex(L,t); +setsvalue(L,&key,luaS_new(L,k)); +luaV_settable(L,t,&key,L->top-1); +L->top--; +} +static void lua_rawset(lua_State*L,int idx){ +StkId t; +api_checknelems(L,2); +t=index2adr(L,idx); +luai_apicheck(L,ttistable(t)); +setobj(L,luaH_set(L,hvalue(t),L->top-2),L->top-1); +luaC_barriert(L,hvalue(t),L->top-1); +L->top-=2; +} +static void lua_rawseti(lua_State*L,int idx,int n){ +StkId o; +api_checknelems(L,1); +o=index2adr(L,idx); +luai_apicheck(L,ttistable(o)); +setobj(L,luaH_setnum(L,hvalue(o),n),L->top-1); +luaC_barriert(L,hvalue(o),L->top-1); +L->top--; +} +static int lua_setmetatable(lua_State*L,int objindex){ +TValue*obj; +Table*mt; +api_checknelems(L,1); +obj=index2adr(L,objindex); +api_checkvalidindex(L,obj); +if(ttisnil(L->top-1)) +mt=NULL; +else{ +luai_apicheck(L,ttistable(L->top-1)); +mt=hvalue(L->top-1); +} +switch(ttype(obj)){ +case 5:{ +hvalue(obj)->metatable=mt; +if(mt) +luaC_objbarriert(L,hvalue(obj),mt); +break; +} +case 7:{ +uvalue(obj)->metatable=mt; +if(mt) +luaC_objbarrier(L,rawuvalue(obj),mt); +break; +} +default:{ +G(L)->mt[ttype(obj)]=mt; +break; +} +} +L->top--; +return 1; +} +static int lua_setfenv(lua_State*L,int idx){ +StkId o; +int res=1; +api_checknelems(L,1); +o=index2adr(L,idx); +api_checkvalidindex(L,o); +luai_apicheck(L,ttistable(L->top-1)); +switch(ttype(o)){ +case 6: +clvalue(o)->c.env=hvalue(L->top-1); +break; +case 7: +uvalue(o)->env=hvalue(L->top-1); +break; +case 8: +sethvalue(L,gt(thvalue(o)),hvalue(L->top-1)); +break; +default: +res=0; +break; +} +if(res)luaC_objbarrier(L,gcvalue(o),hvalue(L->top-1)); +L->top--; +return res; +} +#define adjustresults(L,nres){if(nres==(-1)&&L->top>=L->ci->top)L->ci->top=L->top;} +#define checkresults(L,na,nr)luai_apicheck(L,(nr)==(-1)||(L->ci->top-L->top>=(nr)-(na))) +static void lua_call(lua_State*L,int nargs,int nresults){ +StkId func; +api_checknelems(L,nargs+1); +checkresults(L,nargs,nresults); +func=L->top-(nargs+1); +luaD_call(L,func,nresults); +adjustresults(L,nresults); +} +struct CallS{ +StkId func; +int nresults; +}; +static void f_call(lua_State*L,void*ud){ +struct CallS*c=cast(struct CallS*,ud); +luaD_call(L,c->func,c->nresults); +} +static int lua_pcall(lua_State*L,int nargs,int nresults,int errfunc){ +struct CallS c; +int status; +ptrdiff_t func; +api_checknelems(L,nargs+1); +checkresults(L,nargs,nresults); +if(errfunc==0) +func=0; +else{ +StkId o=index2adr(L,errfunc); +api_checkvalidindex(L,o); +func=savestack(L,o); +} +c.func=L->top-(nargs+1); +c.nresults=nresults; +status=luaD_pcall(L,f_call,&c,savestack(L,c.func),func); +adjustresults(L,nresults); +return status; +} +static int lua_load(lua_State*L,lua_Reader reader,void*data, +const char*chunkname){ +ZIO z; +int status; +if(!chunkname)chunkname="?"; +luaZ_init(L,&z,reader,data); +status=luaD_protectedparser(L,&z,chunkname); +return status; +} +static int lua_error(lua_State*L){ +api_checknelems(L,1); +luaG_errormsg(L); +return 0; +} +static int lua_next(lua_State*L,int idx){ +StkId t; +int more; +t=index2adr(L,idx); +luai_apicheck(L,ttistable(t)); +more=luaH_next(L,hvalue(t),L->top-1); +if(more){ +api_incr_top(L); +} +else +L->top-=1; +return more; +} +static void lua_concat(lua_State*L,int n){ +api_checknelems(L,n); +if(n>=2){ +luaC_checkGC(L); +luaV_concat(L,n,cast_int(L->top-L->base)-1); +L->top-=(n-1); +} +else if(n==0){ +setsvalue(L,L->top,luaS_newlstr(L,"",0)); +api_incr_top(L); +} +} +static void*lua_newuserdata(lua_State*L,size_t size){ +Udata*u; +luaC_checkGC(L); +u=luaS_newudata(L,size,getcurrenv(L)); +setuvalue(L,L->top,u); +api_incr_top(L); +return u+1; +} +#define luaL_getn(L,i)((int)lua_objlen(L,i)) +#define luaL_setn(L,i,j)((void)0) +typedef struct luaL_Reg{ +const char*name; +lua_CFunction func; +}luaL_Reg; +static void luaI_openlib(lua_State*L,const char*libname, +const luaL_Reg*l,int nup); +static int luaL_argerror(lua_State*L,int numarg,const char*extramsg); +static const char* luaL_checklstring(lua_State*L,int numArg, +size_t*l); +static const char* luaL_optlstring(lua_State*L,int numArg, +const char*def,size_t*l); +static lua_Integer luaL_checkinteger(lua_State*L,int numArg); +static lua_Integer luaL_optinteger(lua_State*L,int nArg, +lua_Integer def); +static int luaL_error(lua_State*L,const char*fmt,...); +static const char* luaL_findtable(lua_State*L,int idx, +const char*fname,int szhint); +#define luaL_argcheck(L,cond,numarg,extramsg)((void)((cond)||luaL_argerror(L,(numarg),(extramsg)))) +#define luaL_checkstring(L,n)(luaL_checklstring(L,(n),NULL)) +#define luaL_optstring(L,n,d)(luaL_optlstring(L,(n),(d),NULL)) +#define luaL_checkint(L,n)((int)luaL_checkinteger(L,(n))) +#define luaL_optint(L,n,d)((int)luaL_optinteger(L,(n),(d))) +#define luaL_typename(L,i)lua_typename(L,lua_type(L,(i))) +#define luaL_getmetatable(L,n)(lua_getfield(L,(-10000),(n))) +#define luaL_opt(L,f,n,d)(lua_isnoneornil(L,(n))?(d):f(L,(n))) +typedef struct luaL_Buffer{ +char*p; +int lvl; +lua_State*L; +char buffer[BUFSIZ]; +}luaL_Buffer; +#define luaL_addchar(B,c)((void)((B)->p<((B)->buffer+BUFSIZ)||luaL_prepbuffer(B)),(*(B)->p++=(char)(c))) +#define luaL_addsize(B,n)((B)->p+=(n)) +static char* luaL_prepbuffer(luaL_Buffer*B); +static int luaL_argerror(lua_State*L,int narg,const char*extramsg){ +lua_Debug ar; +if(!lua_getstack(L,0,&ar)) +return luaL_error(L,"bad argument #%d (%s)",narg,extramsg); +lua_getinfo(L,"n",&ar); +if(strcmp(ar.namewhat,"method")==0){ +narg--; +if(narg==0) +return luaL_error(L,"calling "LUA_QL("%s")" on bad self (%s)", +ar.name,extramsg); +} +if(ar.name==NULL) +ar.name="?"; +return luaL_error(L,"bad argument #%d to "LUA_QL("%s")" (%s)", +narg,ar.name,extramsg); +} +static int luaL_typerror(lua_State*L,int narg,const char*tname){ +const char*msg=lua_pushfstring(L,"%s expected, got %s", +tname,luaL_typename(L,narg)); +return luaL_argerror(L,narg,msg); +} +static void tag_error(lua_State*L,int narg,int tag){ +luaL_typerror(L,narg,lua_typename(L,tag)); +} +static void luaL_where(lua_State*L,int level){ +lua_Debug ar; +if(lua_getstack(L,level,&ar)){ +lua_getinfo(L,"Sl",&ar); +if(ar.currentline>0){ +lua_pushfstring(L,"%s:%d: ",ar.short_src,ar.currentline); +return; +} +} +lua_pushliteral(L,""); +} +static int luaL_error(lua_State*L,const char*fmt,...){ +va_list argp; +va_start(argp,fmt); +luaL_where(L,1); +lua_pushvfstring(L,fmt,argp); +va_end(argp); +lua_concat(L,2); +return lua_error(L); +} +static int luaL_newmetatable(lua_State*L,const char*tname){ +lua_getfield(L,(-10000),tname); +if(!lua_isnil(L,-1)) +return 0; +lua_pop(L,1); +lua_newtable(L); +lua_pushvalue(L,-1); +lua_setfield(L,(-10000),tname); +return 1; +} +static void*luaL_checkudata(lua_State*L,int ud,const char*tname){ +void*p=lua_touserdata(L,ud); +if(p!=NULL){ +if(lua_getmetatable(L,ud)){ +lua_getfield(L,(-10000),tname); +if(lua_rawequal(L,-1,-2)){ +lua_pop(L,2); +return p; +} +} +} +luaL_typerror(L,ud,tname); +return NULL; +} +static void luaL_checkstack(lua_State*L,int space,const char*mes){ +if(!lua_checkstack(L,space)) +luaL_error(L,"stack overflow (%s)",mes); +} +static void luaL_checktype(lua_State*L,int narg,int t){ +if(lua_type(L,narg)!=t) +tag_error(L,narg,t); +} +static void luaL_checkany(lua_State*L,int narg){ +if(lua_type(L,narg)==(-1)) +luaL_argerror(L,narg,"value expected"); +} +static const char*luaL_checklstring(lua_State*L,int narg,size_t*len){ +const char*s=lua_tolstring(L,narg,len); +if(!s)tag_error(L,narg,4); +return s; +} +static const char*luaL_optlstring(lua_State*L,int narg, +const char*def,size_t*len){ +if(lua_isnoneornil(L,narg)){ +if(len) +*len=(def?strlen(def):0); +return def; +} +else return luaL_checklstring(L,narg,len); +} +static lua_Number luaL_checknumber(lua_State*L,int narg){ +lua_Number d=lua_tonumber(L,narg); +if(d==0&&!lua_isnumber(L,narg)) +tag_error(L,narg,3); +return d; +} +static lua_Integer luaL_checkinteger(lua_State*L,int narg){ +lua_Integer d=lua_tointeger(L,narg); +if(d==0&&!lua_isnumber(L,narg)) +tag_error(L,narg,3); +return d; +} +static lua_Integer luaL_optinteger(lua_State*L,int narg, +lua_Integer def){ +return luaL_opt(L,luaL_checkinteger,narg,def); +} +static int luaL_getmetafield(lua_State*L,int obj,const char*event){ +if(!lua_getmetatable(L,obj)) +return 0; +lua_pushstring(L,event); +lua_rawget(L,-2); +if(lua_isnil(L,-1)){ +lua_pop(L,2); +return 0; +} +else{ +lua_remove(L,-2); +return 1; +} +} +static void luaL_register(lua_State*L,const char*libname, +const luaL_Reg*l){ +luaI_openlib(L,libname,l,0); +} +static int libsize(const luaL_Reg*l){ +int size=0; +for(;l->name;l++)size++; +return size; +} +static void luaI_openlib(lua_State*L,const char*libname, +const luaL_Reg*l,int nup){ +if(libname){ +int size=libsize(l); +luaL_findtable(L,(-10000),"_LOADED",1); +lua_getfield(L,-1,libname); +if(!lua_istable(L,-1)){ +lua_pop(L,1); +if(luaL_findtable(L,(-10002),libname,size)!=NULL) +luaL_error(L,"name conflict for module "LUA_QL("%s"),libname); +lua_pushvalue(L,-1); +lua_setfield(L,-3,libname); +} +lua_remove(L,-2); +lua_insert(L,-(nup+1)); +} +for(;l->name;l++){ +int i; +for(i=0;ifunc,nup); +lua_setfield(L,-(nup+2),l->name); +} +lua_pop(L,nup); +} +static const char*luaL_findtable(lua_State*L,int idx, +const char*fname,int szhint){ +const char*e; +lua_pushvalue(L,idx); +do{ +e=strchr(fname,'.'); +if(e==NULL)e=fname+strlen(fname); +lua_pushlstring(L,fname,e-fname); +lua_rawget(L,-2); +if(lua_isnil(L,-1)){ +lua_pop(L,1); +lua_createtable(L,0,(*e=='.'?1:szhint)); +lua_pushlstring(L,fname,e-fname); +lua_pushvalue(L,-2); +lua_settable(L,-4); +} +else if(!lua_istable(L,-1)){ +lua_pop(L,2); +return fname; +} +lua_remove(L,-2); +fname=e+1; +}while(*e=='.'); +return NULL; +} +#define bufflen(B)((B)->p-(B)->buffer) +#define bufffree(B)((size_t)(BUFSIZ-bufflen(B))) +static int emptybuffer(luaL_Buffer*B){ +size_t l=bufflen(B); +if(l==0)return 0; +else{ +lua_pushlstring(B->L,B->buffer,l); +B->p=B->buffer; +B->lvl++; +return 1; +} +} +static void adjuststack(luaL_Buffer*B){ +if(B->lvl>1){ +lua_State*L=B->L; +int toget=1; +size_t toplen=lua_strlen(L,-1); +do{ +size_t l=lua_strlen(L,-(toget+1)); +if(B->lvl-toget+1>=(20/2)||toplen>l){ +toplen+=l; +toget++; +} +else break; +}while(togetlvl); +lua_concat(L,toget); +B->lvl=B->lvl-toget+1; +} +} +static char*luaL_prepbuffer(luaL_Buffer*B){ +if(emptybuffer(B)) +adjuststack(B); +return B->buffer; +} +static void luaL_addlstring(luaL_Buffer*B,const char*s,size_t l){ +while(l--) +luaL_addchar(B,*s++); +} +static void luaL_pushresult(luaL_Buffer*B){ +emptybuffer(B); +lua_concat(B->L,B->lvl); +B->lvl=1; +} +static void luaL_addvalue(luaL_Buffer*B){ +lua_State*L=B->L; +size_t vl; +const char*s=lua_tolstring(L,-1,&vl); +if(vl<=bufffree(B)){ +memcpy(B->p,s,vl); +B->p+=vl; +lua_pop(L,1); +} +else{ +if(emptybuffer(B)) +lua_insert(L,-2); +B->lvl++; +adjuststack(B); +} +} +static void luaL_buffinit(lua_State*L,luaL_Buffer*B){ +B->L=L; +B->p=B->buffer; +B->lvl=0; +} +typedef struct LoadF{ +int extraline; +FILE*f; +char buff[BUFSIZ]; +}LoadF; +static const char*getF(lua_State*L,void*ud,size_t*size){ +LoadF*lf=(LoadF*)ud; +(void)L; +if(lf->extraline){ +lf->extraline=0; +*size=1; +return"\n"; +} +if(feof(lf->f))return NULL; +*size=fread(lf->buff,1,sizeof(lf->buff),lf->f); +return(*size>0)?lf->buff:NULL; +} +static int errfile(lua_State*L,const char*what,int fnameindex){ +const char*serr=strerror(errno); +const char*filename=lua_tostring(L,fnameindex)+1; +lua_pushfstring(L,"cannot %s %s: %s",what,filename,serr); +lua_remove(L,fnameindex); +return(5+1); +} +static int luaL_loadfile(lua_State*L,const char*filename){ +LoadF lf; +int status,readstatus; +int c; +int fnameindex=lua_gettop(L)+1; +lf.extraline=0; +if(filename==NULL){ +lua_pushliteral(L,"=stdin"); +lf.f=stdin; +} +else{ +lua_pushfstring(L,"@%s",filename); +lf.f=fopen(filename,"r"); +if(lf.f==NULL)return errfile(L,"open",fnameindex); +} +c=getc(lf.f); +if(c=='#'){ +lf.extraline=1; +while((c=getc(lf.f))!=EOF&&c!='\n'); +if(c=='\n')c=getc(lf.f); +} +if(c=="\033Lua"[0]&&filename){ +lf.f=freopen(filename,"rb",lf.f); +if(lf.f==NULL)return errfile(L,"reopen",fnameindex); +while((c=getc(lf.f))!=EOF&&c!="\033Lua"[0]); +lf.extraline=0; +} +ungetc(c,lf.f); +status=lua_load(L,getF,&lf,lua_tostring(L,-1)); +readstatus=ferror(lf.f); +if(filename)fclose(lf.f); +if(readstatus){ +lua_settop(L,fnameindex); +return errfile(L,"read",fnameindex); +} +lua_remove(L,fnameindex); +return status; +} +typedef struct LoadS{ +const char*s; +size_t size; +}LoadS; +static const char*getS(lua_State*L,void*ud,size_t*size){ +LoadS*ls=(LoadS*)ud; +(void)L; +if(ls->size==0)return NULL; +*size=ls->size; +ls->size=0; +return ls->s; +} +static int luaL_loadbuffer(lua_State*L,const char*buff,size_t size, +const char*name){ +LoadS ls; +ls.s=buff; +ls.size=size; +return lua_load(L,getS,&ls,name); +} +static void*l_alloc(void*ud,void*ptr,size_t osize,size_t nsize){ +(void)ud; +(void)osize; +if(nsize==0){ +free(ptr); +return NULL; +} +else +return realloc(ptr,nsize); +} +static int panic(lua_State*L){ +(void)L; +fprintf(stderr,"PANIC: unprotected error in call to Lua API (%s)\n", +lua_tostring(L,-1)); +return 0; +} +static lua_State*luaL_newstate(void){ +lua_State*L=lua_newstate(l_alloc,NULL); +if(L)lua_atpanic(L,&panic); +return L; +} +static int luaB_tonumber(lua_State*L){ +int base=luaL_optint(L,2,10); +if(base==10){ +luaL_checkany(L,1); +if(lua_isnumber(L,1)){ +lua_pushnumber(L,lua_tonumber(L,1)); +return 1; +} +} +else{ +const char*s1=luaL_checkstring(L,1); +char*s2; +unsigned long n; +luaL_argcheck(L,2<=base&&base<=36,2,"base out of range"); +n=strtoul(s1,&s2,base); +if(s1!=s2){ +while(isspace((unsigned char)(*s2)))s2++; +if(*s2=='\0'){ +lua_pushnumber(L,(lua_Number)n); +return 1; +} +} +} +lua_pushnil(L); +return 1; +} +static int luaB_error(lua_State*L){ +int level=luaL_optint(L,2,1); +lua_settop(L,1); +if(lua_isstring(L,1)&&level>0){ +luaL_where(L,level); +lua_pushvalue(L,1); +lua_concat(L,2); +} +return lua_error(L); +} +static int luaB_setmetatable(lua_State*L){ +int t=lua_type(L,2); +luaL_checktype(L,1,5); +luaL_argcheck(L,t==0||t==5,2, +"nil or table expected"); +if(luaL_getmetafield(L,1,"__metatable")) +luaL_error(L,"cannot change a protected metatable"); +lua_settop(L,2); +lua_setmetatable(L,1); +return 1; +} +static void getfunc(lua_State*L,int opt){ +if(lua_isfunction(L,1))lua_pushvalue(L,1); +else{ +lua_Debug ar; +int level=opt?luaL_optint(L,1,1):luaL_checkint(L,1); +luaL_argcheck(L,level>=0,1,"level must be non-negative"); +if(lua_getstack(L,level,&ar)==0) +luaL_argerror(L,1,"invalid level"); +lua_getinfo(L,"f",&ar); +if(lua_isnil(L,-1)) +luaL_error(L,"no function environment for tail call at level %d", +level); +} +} +static int luaB_setfenv(lua_State*L){ +luaL_checktype(L,2,5); +getfunc(L,0); +lua_pushvalue(L,2); +if(lua_isnumber(L,1)&&lua_tonumber(L,1)==0){ +lua_pushthread(L); +lua_insert(L,-2); +lua_setfenv(L,-2); +return 0; +} +else if(lua_iscfunction(L,-2)||lua_setfenv(L,-2)==0) +luaL_error(L, +LUA_QL("setfenv")" cannot change environment of given object"); +return 1; +} +static int luaB_rawget(lua_State*L){ +luaL_checktype(L,1,5); +luaL_checkany(L,2); +lua_settop(L,2); +lua_rawget(L,1); +return 1; +} +static int luaB_type(lua_State*L){ +luaL_checkany(L,1); +lua_pushstring(L,luaL_typename(L,1)); +return 1; +} +static int luaB_next(lua_State*L){ +luaL_checktype(L,1,5); +lua_settop(L,2); +if(lua_next(L,1)) +return 2; +else{ +lua_pushnil(L); +return 1; +} +} +static int luaB_pairs(lua_State*L){ +luaL_checktype(L,1,5); +lua_pushvalue(L,lua_upvalueindex(1)); +lua_pushvalue(L,1); +lua_pushnil(L); +return 3; +} +static int ipairsaux(lua_State*L){ +int i=luaL_checkint(L,2); +luaL_checktype(L,1,5); +i++; +lua_pushinteger(L,i); +lua_rawgeti(L,1,i); +return(lua_isnil(L,-1))?0:2; +} +static int luaB_ipairs(lua_State*L){ +luaL_checktype(L,1,5); +lua_pushvalue(L,lua_upvalueindex(1)); +lua_pushvalue(L,1); +lua_pushinteger(L,0); +return 3; +} +static int load_aux(lua_State*L,int status){ +if(status==0) +return 1; +else{ +lua_pushnil(L); +lua_insert(L,-2); +return 2; +} +} +static int luaB_loadstring(lua_State*L){ +size_t l; +const char*s=luaL_checklstring(L,1,&l); +const char*chunkname=luaL_optstring(L,2,s); +return load_aux(L,luaL_loadbuffer(L,s,l,chunkname)); +} +static int luaB_loadfile(lua_State*L){ +const char*fname=luaL_optstring(L,1,NULL); +return load_aux(L,luaL_loadfile(L,fname)); +} +static int luaB_assert(lua_State*L){ +luaL_checkany(L,1); +if(!lua_toboolean(L,1)) +return luaL_error(L,"%s",luaL_optstring(L,2,"assertion failed!")); +return lua_gettop(L); +} +static int luaB_unpack(lua_State*L){ +int i,e,n; +luaL_checktype(L,1,5); +i=luaL_optint(L,2,1); +e=luaL_opt(L,luaL_checkint,3,luaL_getn(L,1)); +if(i>e)return 0; +n=e-i+1; +if(n<=0||!lua_checkstack(L,n)) +return luaL_error(L,"too many results to unpack"); +lua_rawgeti(L,1,i); +while(i++e)e=pos; +for(i=e;i>pos;i--){ +lua_rawgeti(L,1,i-1); +lua_rawseti(L,1,i); +} +break; +} +default:{ +return luaL_error(L,"wrong number of arguments to "LUA_QL("insert")); +} +} +luaL_setn(L,1,e); +lua_rawseti(L,1,pos); +return 0; +} +static int tremove(lua_State*L){ +int e=aux_getn(L,1); +int pos=luaL_optint(L,2,e); +if(!(1<=pos&&pos<=e)) +return 0; +luaL_setn(L,1,e-1); +lua_rawgeti(L,1,pos); +for(;posu)luaL_error(L,"invalid order function for sorting"); +lua_pop(L,1); +} +while(lua_rawgeti(L,1,--j),sort_comp(L,-3,-1)){ +if(j0); +} +l=strlen(p); +if(l==0||p[l-1]!='\n') +luaL_addsize(&b,l); +else{ +luaL_addsize(&b,l-1); +luaL_pushresult(&b); +return 1; +} +} +} +static int read_chars(lua_State*L,FILE*f,size_t n){ +size_t rlen; +size_t nr; +luaL_Buffer b; +luaL_buffinit(L,&b); +rlen=BUFSIZ; +do{ +char*p=luaL_prepbuffer(&b); +if(rlen>n)rlen=n; +nr=fread(p,sizeof(char),rlen,f); +luaL_addsize(&b,nr); +n-=nr; +}while(n>0&&nr==rlen); +luaL_pushresult(&b); +return(n==0||lua_objlen(L,-1)>0); +} +static int g_read(lua_State*L,FILE*f,int first){ +int nargs=lua_gettop(L)-1; +int success; +int n; +clearerr(f); +if(nargs==0){ +success=read_line(L,f); +n=first+1; +} +else{ +luaL_checkstack(L,nargs+20,"too many arguments"); +success=1; +for(n=first;nargs--&&success;n++){ +if(lua_type(L,n)==3){ +size_t l=(size_t)lua_tointeger(L,n); +success=(l==0)?test_eof(L,f):read_chars(L,f,l); +} +else{ +const char*p=lua_tostring(L,n); +luaL_argcheck(L,p&&p[0]=='*',n,"invalid option"); +switch(p[1]){ +case'n': +success=read_number(L,f); +break; +case'l': +success=read_line(L,f); +break; +case'a': +read_chars(L,f,~((size_t)0)); +success=1; +break; +default: +return luaL_argerror(L,n,"invalid format"); +} +} +} +} +if(ferror(f)) +return pushresult(L,0,NULL); +if(!success){ +lua_pop(L,1); +lua_pushnil(L); +} +return n-first; +} +static int io_read(lua_State*L){ +return g_read(L,getiofile(L,1),1); +} +static int f_read(lua_State*L){ +return g_read(L,tofile(L),2); +} +static int io_readline(lua_State*L){ +FILE*f=*(FILE**)lua_touserdata(L,lua_upvalueindex(1)); +int sucess; +if(f==NULL) +luaL_error(L,"file is already closed"); +sucess=read_line(L,f); +if(ferror(f)) +return luaL_error(L,"%s",strerror(errno)); +if(sucess)return 1; +else{ +if(lua_toboolean(L,lua_upvalueindex(2))){ +lua_settop(L,0); +lua_pushvalue(L,lua_upvalueindex(1)); +aux_close(L); +} +return 0; +} +} +static int g_write(lua_State*L,FILE*f,int arg){ +int nargs=lua_gettop(L)-1; +int status=1; +for(;nargs--;arg++){ +if(lua_type(L,arg)==3){ +status=status&& +fprintf(f,"%.14g",lua_tonumber(L,arg))>0; +} +else{ +size_t l; +const char*s=luaL_checklstring(L,arg,&l); +status=status&&(fwrite(s,sizeof(char),l,f)==l); +} +} +return pushresult(L,status,NULL); +} +static int io_write(lua_State*L){ +return g_write(L,getiofile(L,2),1); +} +static int f_write(lua_State*L){ +return g_write(L,tofile(L),2); +} +static int io_flush(lua_State*L){ +return pushresult(L,fflush(getiofile(L,2))==0,NULL); +} +static int f_flush(lua_State*L){ +return pushresult(L,fflush(tofile(L))==0,NULL); +} +static const luaL_Reg iolib[]={ +{"close",io_close}, +{"flush",io_flush}, +{"input",io_input}, +{"lines",io_lines}, +{"open",io_open}, +{"output",io_output}, +{"read",io_read}, +{"type",io_type}, +{"write",io_write}, +{NULL,NULL} +}; +static const luaL_Reg flib[]={ +{"close",io_close}, +{"flush",f_flush}, +{"lines",f_lines}, +{"read",f_read}, +{"write",f_write}, +{"__gc",io_gc}, +{NULL,NULL} +}; +static void createmeta(lua_State*L){ +luaL_newmetatable(L,"FILE*"); +lua_pushvalue(L,-1); +lua_setfield(L,-2,"__index"); +luaL_register(L,NULL,flib); +} +static void createstdfile(lua_State*L,FILE*f,int k,const char*fname){ +*newfile(L)=f; +if(k>0){ +lua_pushvalue(L,-1); +lua_rawseti(L,(-10001),k); +} +lua_pushvalue(L,-2); +lua_setfenv(L,-2); +lua_setfield(L,-3,fname); +} +static void newfenv(lua_State*L,lua_CFunction cls){ +lua_createtable(L,0,1); +lua_pushcfunction(L,cls); +lua_setfield(L,-2,"__close"); +} +static int luaopen_io(lua_State*L){ +createmeta(L); +newfenv(L,io_fclose); +lua_replace(L,(-10001)); +luaL_register(L,"io",iolib); +newfenv(L,io_noclose); +createstdfile(L,stdin,1,"stdin"); +createstdfile(L,stdout,2,"stdout"); +createstdfile(L,stderr,0,"stderr"); +lua_pop(L,1); +lua_getfield(L,-1,"popen"); +newfenv(L,io_pclose); +lua_setfenv(L,-2); +lua_pop(L,1); +return 1; +} +static int os_pushresult(lua_State*L,int i,const char*filename){ +int en=errno; +if(i){ +lua_pushboolean(L,1); +return 1; +} +else{ +lua_pushnil(L); +lua_pushfstring(L,"%s: %s",filename,strerror(en)); +lua_pushinteger(L,en); +return 3; +} +} +static int os_remove(lua_State*L){ +const char*filename=luaL_checkstring(L,1); +return os_pushresult(L,remove(filename)==0,filename); +} +static int os_exit(lua_State*L){ +exit(luaL_optint(L,1,EXIT_SUCCESS)); +} +static const luaL_Reg syslib[]={ +{"exit",os_exit}, +{"remove",os_remove}, +{NULL,NULL} +}; +static int luaopen_os(lua_State*L){ +luaL_register(L,"os",syslib); +return 1; +} +#define uchar(c)((unsigned char)(c)) +static ptrdiff_t posrelat(ptrdiff_t pos,size_t len){ +if(pos<0)pos+=(ptrdiff_t)len+1; +return(pos>=0)?pos:0; +} +static int str_sub(lua_State*L){ +size_t l; +const char*s=luaL_checklstring(L,1,&l); +ptrdiff_t start=posrelat(luaL_checkinteger(L,2),l); +ptrdiff_t end=posrelat(luaL_optinteger(L,3,-1),l); +if(start<1)start=1; +if(end>(ptrdiff_t)l)end=(ptrdiff_t)l; +if(start<=end) +lua_pushlstring(L,s+start-1,end-start+1); +else lua_pushliteral(L,""); +return 1; +} +static int str_lower(lua_State*L){ +size_t l; +size_t i; +luaL_Buffer b; +const char*s=luaL_checklstring(L,1,&l); +luaL_buffinit(L,&b); +for(i=0;i0) +luaL_addlstring(&b,s,l); +luaL_pushresult(&b); +return 1; +} +static int str_byte(lua_State*L){ +size_t l; +const char*s=luaL_checklstring(L,1,&l); +ptrdiff_t posi=posrelat(luaL_optinteger(L,2,1),l); +ptrdiff_t pose=posrelat(luaL_optinteger(L,3,posi),l); +int n,i; +if(posi<=0)posi=1; +if((size_t)pose>l)pose=l; +if(posi>pose)return 0; +n=(int)(pose-posi+1); +if(posi+n<=pose) +luaL_error(L,"string slice too long"); +luaL_checkstack(L,n,"string slice too long"); +for(i=0;i=ms->level||ms->capture[l].len==(-1)) +return luaL_error(ms->L,"invalid capture index"); +return l; +} +static int capture_to_close(MatchState*ms){ +int level=ms->level; +for(level--;level>=0;level--) +if(ms->capture[level].len==(-1))return level; +return luaL_error(ms->L,"invalid pattern capture"); +} +static const char*classend(MatchState*ms,const char*p){ +switch(*p++){ +case'%':{ +if(*p=='\0') +luaL_error(ms->L,"malformed pattern (ends with "LUA_QL("%%")")"); +return p+1; +} +case'[':{ +if(*p=='^')p++; +do{ +if(*p=='\0') +luaL_error(ms->L,"malformed pattern (missing "LUA_QL("]")")"); +if(*(p++)=='%'&&*p!='\0') +p++; +}while(*p!=']'); +return p+1; +} +default:{ +return p; +} +} +} +static int match_class(int c,int cl){ +int res; +switch(tolower(cl)){ +case'a':res=isalpha(c);break; +case'c':res=iscntrl(c);break; +case'd':res=isdigit(c);break; +case'l':res=islower(c);break; +case'p':res=ispunct(c);break; +case's':res=isspace(c);break; +case'u':res=isupper(c);break; +case'w':res=isalnum(c);break; +case'x':res=isxdigit(c);break; +case'z':res=(c==0);break; +default:return(cl==c); +} +return(islower(cl)?res:!res); +} +static int matchbracketclass(int c,const char*p,const char*ec){ +int sig=1; +if(*(p+1)=='^'){ +sig=0; +p++; +} +while(++pL,"unbalanced pattern"); +if(*s!=*p)return NULL; +else{ +int b=*p; +int e=*(p+1); +int cont=1; +while(++ssrc_end){ +if(*s==e){ +if(--cont==0)return s+1; +} +else if(*s==b)cont++; +} +} +return NULL; +} +static const char*max_expand(MatchState*ms,const char*s, +const char*p,const char*ep){ +ptrdiff_t i=0; +while((s+i)src_end&&singlematch(uchar(*(s+i)),p,ep)) +i++; +while(i>=0){ +const char*res=match(ms,(s+i),ep+1); +if(res)return res; +i--; +} +return NULL; +} +static const char*min_expand(MatchState*ms,const char*s, +const char*p,const char*ep){ +for(;;){ +const char*res=match(ms,s,ep+1); +if(res!=NULL) +return res; +else if(ssrc_end&&singlematch(uchar(*s),p,ep)) +s++; +else return NULL; +} +} +static const char*start_capture(MatchState*ms,const char*s, +const char*p,int what){ +const char*res; +int level=ms->level; +if(level>=32)luaL_error(ms->L,"too many captures"); +ms->capture[level].init=s; +ms->capture[level].len=what; +ms->level=level+1; +if((res=match(ms,s,p))==NULL) +ms->level--; +return res; +} +static const char*end_capture(MatchState*ms,const char*s, +const char*p){ +int l=capture_to_close(ms); +const char*res; +ms->capture[l].len=s-ms->capture[l].init; +if((res=match(ms,s,p))==NULL) +ms->capture[l].len=(-1); +return res; +} +static const char*match_capture(MatchState*ms,const char*s,int l){ +size_t len; +l=check_capture(ms,l); +len=ms->capture[l].len; +if((size_t)(ms->src_end-s)>=len&& +memcmp(ms->capture[l].init,s,len)==0) +return s+len; +else return NULL; +} +static const char*match(MatchState*ms,const char*s,const char*p){ +init: +switch(*p){ +case'(':{ +if(*(p+1)==')') +return start_capture(ms,s,p+2,(-2)); +else +return start_capture(ms,s,p+1,(-1)); +} +case')':{ +return end_capture(ms,s,p+1); +} +case'%':{ +switch(*(p+1)){ +case'b':{ +s=matchbalance(ms,s,p+2); +if(s==NULL)return NULL; +p+=4;goto init; +} +case'f':{ +const char*ep;char previous; +p+=2; +if(*p!='[') +luaL_error(ms->L,"missing "LUA_QL("[")" after " +LUA_QL("%%f")" in pattern"); +ep=classend(ms,p); +previous=(s==ms->src_init)?'\0':*(s-1); +if(matchbracketclass(uchar(previous),p,ep-1)|| +!matchbracketclass(uchar(*s),p,ep-1))return NULL; +p=ep;goto init; +} +default:{ +if(isdigit(uchar(*(p+1)))){ +s=match_capture(ms,s,uchar(*(p+1))); +if(s==NULL)return NULL; +p+=2;goto init; +} +goto dflt; +} +} +} +case'\0':{ +return s; +} +case'$':{ +if(*(p+1)=='\0') +return(s==ms->src_end)?s:NULL; +else goto dflt; +} +default:dflt:{ +const char*ep=classend(ms,p); +int m=ssrc_end&&singlematch(uchar(*s),p,ep); +switch(*ep){ +case'?':{ +const char*res; +if(m&&((res=match(ms,s+1,ep+1))!=NULL)) +return res; +p=ep+1;goto init; +} +case'*':{ +return max_expand(ms,s,p,ep); +} +case'+':{ +return(m?max_expand(ms,s+1,p,ep):NULL); +} +case'-':{ +return min_expand(ms,s,p,ep); +} +default:{ +if(!m)return NULL; +s++;p=ep;goto init; +} +} +} +} +} +static const char*lmemfind(const char*s1,size_t l1, +const char*s2,size_t l2){ +if(l2==0)return s1; +else if(l2>l1)return NULL; +else{ +const char*init; +l2--; +l1=l1-l2; +while(l1>0&&(init=(const char*)memchr(s1,*s2,l1))!=NULL){ +init++; +if(memcmp(init,s2+1,l2)==0) +return init-1; +else{ +l1-=init-s1; +s1=init; +} +} +return NULL; +} +} +static void push_onecapture(MatchState*ms,int i,const char*s, +const char*e){ +if(i>=ms->level){ +if(i==0) +lua_pushlstring(ms->L,s,e-s); +else +luaL_error(ms->L,"invalid capture index"); +} +else{ +ptrdiff_t l=ms->capture[i].len; +if(l==(-1))luaL_error(ms->L,"unfinished capture"); +if(l==(-2)) +lua_pushinteger(ms->L,ms->capture[i].init-ms->src_init+1); +else +lua_pushlstring(ms->L,ms->capture[i].init,l); +} +} +static int push_captures(MatchState*ms,const char*s,const char*e){ +int i; +int nlevels=(ms->level==0&&s)?1:ms->level; +luaL_checkstack(ms->L,nlevels,"too many captures"); +for(i=0;il1)init=(ptrdiff_t)l1; +if(find&&(lua_toboolean(L,4)|| +strpbrk(p,"^$*+?.([%-")==NULL)){ +const char*s2=lmemfind(s+init,l1-init,p,l2); +if(s2){ +lua_pushinteger(L,s2-s+1); +lua_pushinteger(L,s2-s+l2); +return 2; +} +} +else{ +MatchState ms; +int anchor=(*p=='^')?(p++,1):0; +const char*s1=s+init; +ms.L=L; +ms.src_init=s; +ms.src_end=s+l1; +do{ +const char*res; +ms.level=0; +if((res=match(&ms,s1,p))!=NULL){ +if(find){ +lua_pushinteger(L,s1-s+1); +lua_pushinteger(L,res-s); +return push_captures(&ms,NULL,0)+2; +} +else +return push_captures(&ms,s1,res); +} +}while(s1++L,3,&l); +for(i=0;iL; +switch(lua_type(L,3)){ +case 3: +case 4:{ +add_s(ms,b,s,e); +return; +} +case 6:{ +int n; +lua_pushvalue(L,3); +n=push_captures(ms,s,e); +lua_call(L,n,1); +break; +} +case 5:{ +push_onecapture(ms,0,s,e); +lua_gettable(L,3); +break; +} +} +if(!lua_toboolean(L,-1)){ +lua_pop(L,1); +lua_pushlstring(L,s,e-s); +} +else if(!lua_isstring(L,-1)) +luaL_error(L,"invalid replacement value (a %s)",luaL_typename(L,-1)); +luaL_addvalue(b); +} +static int str_gsub(lua_State*L){ +size_t srcl; +const char*src=luaL_checklstring(L,1,&srcl); +const char*p=luaL_checkstring(L,2); +int tr=lua_type(L,3); +int max_s=luaL_optint(L,4,srcl+1); +int anchor=(*p=='^')?(p++,1):0; +int n=0; +MatchState ms; +luaL_Buffer b; +luaL_argcheck(L,tr==3||tr==4|| +tr==6||tr==5,3, +"string/function/table expected"); +luaL_buffinit(L,&b); +ms.L=L; +ms.src_init=src; +ms.src_end=src+srcl; +while(nsrc) +src=e; +else if(src=sizeof("-+ #0")) +luaL_error(L,"invalid format (repeated flags)"); +if(isdigit(uchar(*p)))p++; +if(isdigit(uchar(*p)))p++; +if(*p=='.'){ +p++; +if(isdigit(uchar(*p)))p++; +if(isdigit(uchar(*p)))p++; +} +if(isdigit(uchar(*p))) +luaL_error(L,"invalid format (width or precision too long)"); +*(form++)='%'; +strncpy(form,strfrmt,p-strfrmt+1); +form+=p-strfrmt+1; +*form='\0'; +return p; +} +static void addintlen(char*form){ +size_t l=strlen(form); +char spec=form[l-1]; +strcpy(form+l-1,"l"); +form[l+sizeof("l")-2]=spec; +form[l+sizeof("l")-1]='\0'; +} +static int str_format(lua_State*L){ +int top=lua_gettop(L); +int arg=1; +size_t sfl; +const char*strfrmt=luaL_checklstring(L,arg,&sfl); +const char*strfrmt_end=strfrmt+sfl; +luaL_Buffer b; +luaL_buffinit(L,&b); +while(strfrmttop) +luaL_argerror(L,arg,"no value"); +strfrmt=scanformat(L,strfrmt,form); +switch(*strfrmt++){ +case'c':{ +sprintf(buff,form,(int)luaL_checknumber(L,arg)); +break; +} +case'd':case'i':{ +addintlen(form); +sprintf(buff,form,(long)luaL_checknumber(L,arg)); +break; +} +case'o':case'u':case'x':case'X':{ +addintlen(form); +sprintf(buff,form,(unsigned long)luaL_checknumber(L,arg)); +break; +} +case'e':case'E':case'f': +case'g':case'G':{ +sprintf(buff,form,(double)luaL_checknumber(L,arg)); +break; +} +case'q':{ +addquoted(L,&b,arg); +continue; +} +case's':{ +size_t l; +const char*s=luaL_checklstring(L,arg,&l); +if(!strchr(form,'.')&&l>=100){ +lua_pushvalue(L,arg); +luaL_addvalue(&b); +continue; +} +else{ +sprintf(buff,form,s); +break; +} +} +default:{ +return luaL_error(L,"invalid option "LUA_QL("%%%c")" to " +LUA_QL("format"),*(strfrmt-1)); +} +} +luaL_addlstring(&b,buff,strlen(buff)); +} +} +luaL_pushresult(&b); +return 1; +} +static const luaL_Reg strlib[]={ +{"byte",str_byte}, +{"char",str_char}, +{"find",str_find}, +{"format",str_format}, +{"gmatch",gmatch}, +{"gsub",str_gsub}, +{"lower",str_lower}, +{"match",str_match}, +{"rep",str_rep}, +{"sub",str_sub}, +{"upper",str_upper}, +{NULL,NULL} +}; +static void createmetatable(lua_State*L){ +lua_createtable(L,0,1); +lua_pushliteral(L,""); +lua_pushvalue(L,-2); +lua_setmetatable(L,-2); +lua_pop(L,1); +lua_pushvalue(L,-2); +lua_setfield(L,-2,"__index"); +lua_pop(L,1); +} +static int luaopen_string(lua_State*L){ +luaL_register(L,"string",strlib); +createmetatable(L); +return 1; +} +static const luaL_Reg lualibs[]={ +{"",luaopen_base}, +{"table",luaopen_table}, +{"io",luaopen_io}, +{"os",luaopen_os}, +{"string",luaopen_string}, +{NULL,NULL} +}; +static void luaL_openlibs(lua_State*L){ +const luaL_Reg*lib=lualibs; +for(;lib->func;lib++){ +lua_pushcfunction(L,lib->func); +lua_pushstring(L,lib->name); +lua_call(L,1,0); +} +} +typedef unsigned int UB; +static UB barg(lua_State*L,int idx){ +union{lua_Number n;U64 b;}bn; +bn.n=lua_tonumber(L,idx)+6755399441055744.0; +if(bn.n==0.0&&!lua_isnumber(L,idx))luaL_typerror(L,idx,"number"); +return(UB)bn.b; +} +#define BRET(b)lua_pushnumber(L,(lua_Number)(int)(b));return 1; +static int tobit(lua_State*L){ +BRET(barg(L,1))} +static int bnot(lua_State*L){ +BRET(~barg(L,1))} +static int band(lua_State*L){ +int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b&=barg(L,i);BRET(b)} +static int bor(lua_State*L){ +int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b|=barg(L,i);BRET(b)} +static int bxor(lua_State*L){ +int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b^=barg(L,i);BRET(b)} +static int lshift(lua_State*L){ +UB b=barg(L,1),n=barg(L,2)&31;BRET(b<>n)} +static int arshift(lua_State*L){ +UB b=barg(L,1),n=barg(L,2)&31;BRET((int)b>>n)} +static int rol(lua_State*L){ +UB b=barg(L,1),n=barg(L,2)&31;BRET((b<>(32-n)))} +static int ror(lua_State*L){ +UB b=barg(L,1),n=barg(L,2)&31;BRET((b>>n)|(b<<(32-n)))} +static int bswap(lua_State*L){ +UB b=barg(L,1);b=(b>>24)|((b>>8)&0xff00)|((b&0xff00)<<8)|(b<<24);BRET(b)} +static int tohex(lua_State*L){ +UB b=barg(L,1); +int n=lua_isnone(L,2)?8:(int)barg(L,2); +const char*hexdigits="0123456789abcdef"; +char buf[8]; +int i; +if(n<0){n=-n;hexdigits="0123456789ABCDEF";} +if(n>8)n=8; +for(i=(int)n;--i>=0;){buf[i]=hexdigits[b&15];b>>=4;} +lua_pushlstring(L,buf,(size_t)n); +return 1; +} +static const struct luaL_Reg bitlib[]={ +{"tobit",tobit}, +{"bnot",bnot}, +{"band",band}, +{"bor",bor}, +{"bxor",bxor}, +{"lshift",lshift}, +{"rshift",rshift}, +{"arshift",arshift}, +{"rol",rol}, +{"ror",ror}, +{"bswap",bswap}, +{"tohex",tohex}, +{NULL,NULL} +}; +int main(int argc,char**argv){ +lua_State*L=luaL_newstate(); +int i; +luaL_openlibs(L); +luaL_register(L,"bit",bitlib); +if(argc<2)return sizeof(void*); +lua_createtable(L,0,1); +lua_pushstring(L,argv[1]); +lua_rawseti(L,-2,0); +lua_setglobal(L,"arg"); +if(luaL_loadfile(L,argv[1])) +goto err; +for(i=2;i -- BYTECODE -- [...] +-- print(bc.line(foo, 2)) --> 0002 KSTR 1 1 ; "hello" +-- +-- local out = { +-- -- Do something with each line: +-- write = function(t, ...) io.write(...) end, +-- close = function(t) end, +-- flush = function(t) end, +-- } +-- bc.dump(foo, out) +-- +------------------------------------------------------------------------------ + +-- Cache some library functions and objects. +local jit = require("jit") +assert(jit.version_num == 20100, "LuaJIT core/library version mismatch") +local jutil = require("jit.util") +local vmdef = require("jit.vmdef") +local bit = require("bit") +local sub, gsub, format = string.sub, string.gsub, string.format +local byte, band, shr = string.byte, bit.band, bit.rshift +local funcinfo, funcbc, funck = jutil.funcinfo, jutil.funcbc, jutil.funck +local funcuvname = jutil.funcuvname +local bcnames = vmdef.bcnames +local stdout, stderr = io.stdout, io.stderr + +------------------------------------------------------------------------------ + +local function ctlsub(c) + if c == "\n" then return "\\n" + elseif c == "\r" then return "\\r" + elseif c == "\t" then return "\\t" + else return format("\\%03d", byte(c)) + end +end + +-- Return one bytecode line. +local function bcline(func, pc, prefix) + local ins, m = funcbc(func, pc) + if not ins then return end + local ma, mb, mc = band(m, 7), band(m, 15*8), band(m, 15*128) + local a = band(shr(ins, 8), 0xff) + local oidx = 6*band(ins, 0xff) + local op = sub(bcnames, oidx+1, oidx+6) + local s = format("%04d %s %-6s %3s ", + pc, prefix or " ", op, ma == 0 and "" or a) + local d = shr(ins, 16) + if mc == 13*128 then -- BCMjump + return format("%s=> %04d\n", s, pc+d-0x7fff) + end + if mb ~= 0 then + d = band(d, 0xff) + elseif mc == 0 then + return s.."\n" + end + local kc + if mc == 10*128 then -- BCMstr + kc = funck(func, -d-1) + kc = format(#kc > 40 and '"%.40s"~' or '"%s"', gsub(kc, "%c", ctlsub)) + elseif mc == 9*128 then -- BCMnum + kc = funck(func, d) + if op == "TSETM " then kc = kc - 2^52 end + elseif mc == 12*128 then -- BCMfunc + local fi = funcinfo(funck(func, -d-1)) + if fi.ffid then + kc = vmdef.ffnames[fi.ffid] + else + kc = fi.loc + end + elseif mc == 5*128 then -- BCMuv + kc = funcuvname(func, d) + end + if ma == 5 then -- BCMuv + local ka = funcuvname(func, a) + if kc then kc = ka.." ; "..kc else kc = ka end + end + if mb ~= 0 then + local b = shr(ins, 24) + if kc then return format("%s%3d %3d ; %s\n", s, b, d, kc) end + return format("%s%3d %3d\n", s, b, d) + end + if kc then return format("%s%3d ; %s\n", s, d, kc) end + if mc == 7*128 and d > 32767 then d = d - 65536 end -- BCMlits + return format("%s%3d\n", s, d) +end + +-- Collect branch targets of a function. +local function bctargets(func) + local target = {} + for pc=1,1000000000 do + local ins, m = funcbc(func, pc) + if not ins then break end + if band(m, 15*128) == 13*128 then target[pc+shr(ins, 16)-0x7fff] = true end + end + return target +end + +-- Dump bytecode instructions of a function. +local function bcdump(func, out, all) + if not out then out = stdout end + local fi = funcinfo(func) + if all and fi.children then + for n=-1,-1000000000,-1 do + local k = funck(func, n) + if not k then break end + if type(k) == "proto" then bcdump(k, out, true) end + end + end + out:write(format("-- BYTECODE -- %s-%d\n", fi.loc, fi.lastlinedefined)) + local target = bctargets(func) + for pc=1,1000000000 do + local s = bcline(func, pc, target[pc] and "=>") + if not s then break end + out:write(s) + end + out:write("\n") + out:flush() +end + +------------------------------------------------------------------------------ + +-- Active flag and output file handle. +local active, out + +-- List handler. +local function h_list(func) + return bcdump(func, out) +end + +-- Detach list handler. +local function bclistoff() + if active then + active = false + jit.attach(h_list) + if out and out ~= stdout and out ~= stderr then out:close() end + out = nil + end +end + +-- Open the output file and attach list handler. +local function bcliston(outfile) + if active then bclistoff() end + if not outfile then outfile = os.getenv("LUAJIT_LISTFILE") end + if outfile then + out = outfile == "-" and stdout or assert(io.open(outfile, "w")) + else + out = stderr + end + jit.attach(h_list, "bc") + active = true +end + +-- Public module functions. +return { + line = bcline, + dump = bcdump, + targets = bctargets, + on = bcliston, + off = bclistoff, + start = bcliston -- For -j command line option. +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/jit/bcsave.lua b/Build/source/libs/luajit/LuaJIT-src/src/jit/bcsave.lua new file mode 100644 index 00000000000..1fe9d389697 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/jit/bcsave.lua @@ -0,0 +1,661 @@ +---------------------------------------------------------------------------- +-- LuaJIT module to save/list bytecode. +-- +-- Copyright (C) 2005-2015 Mike Pall. All rights reserved. +-- Released under the MIT license. See Copyright Notice in luajit.h +---------------------------------------------------------------------------- +-- +-- This module saves or lists the bytecode for an input file. +-- It's run by the -b command line option. +-- +------------------------------------------------------------------------------ + +local jit = require("jit") +assert(jit.version_num == 20100, "LuaJIT core/library version mismatch") +local bit = require("bit") + +-- Symbol name prefix for LuaJIT bytecode. +local LJBC_PREFIX = "luaJIT_BC_" + +------------------------------------------------------------------------------ + +local function usage() + io.stderr:write[[ +Save LuaJIT bytecode: luajit -b[options] input output + -l Only list bytecode. + -s Strip debug info (default). + -g Keep debug info. + -n name Set module name (default: auto-detect from input name). + -t type Set output file type (default: auto-detect from output name). + -a arch Override architecture for object files (default: native). + -o os Override OS for object files (default: native). + -e chunk Use chunk string as input. + -- Stop handling options. + - Use stdin as input and/or stdout as output. + +File types: c h obj o raw (default) +]] + os.exit(1) +end + +local function check(ok, ...) + if ok then return ok, ... end + io.stderr:write("luajit: ", ...) + io.stderr:write("\n") + os.exit(1) +end + +local function readfile(input) + if type(input) == "function" then return input end + if input == "-" then input = nil end + return check(loadfile(input)) +end + +local function savefile(name, mode) + if name == "-" then return io.stdout end + return check(io.open(name, mode)) +end + +------------------------------------------------------------------------------ + +local map_type = { + raw = "raw", c = "c", h = "h", o = "obj", obj = "obj", +} + +local map_arch = { + x86 = true, x64 = true, arm = true, arm64 = true, ppc = true, + mips = true, mipsel = true, +} + +local map_os = { + linux = true, windows = true, osx = true, freebsd = true, netbsd = true, + openbsd = true, dragonfly = true, solaris = true, +} + +local function checkarg(str, map, err) + str = string.lower(str) + local s = check(map[str], "unknown ", err) + return s == true and str or s +end + +local function detecttype(str) + local ext = string.match(string.lower(str), "%.(%a+)$") + return map_type[ext] or "raw" +end + +local function checkmodname(str) + check(string.match(str, "^[%w_.%-]+$"), "bad module name") + return string.gsub(str, "[%.%-]", "_") +end + +local function detectmodname(str) + if type(str) == "string" then + local tail = string.match(str, "[^/\\]+$") + if tail then str = tail end + local head = string.match(str, "^(.*)%.[^.]*$") + if head then str = head end + str = string.match(str, "^[%w_.%-]+") + else + str = nil + end + check(str, "cannot derive module name, use -n name") + return string.gsub(str, "[%.%-]", "_") +end + +------------------------------------------------------------------------------ + +local function bcsave_tail(fp, output, s) + local ok, err = fp:write(s) + if ok and output ~= "-" then ok, err = fp:close() end + check(ok, "cannot write ", output, ": ", err) +end + +local function bcsave_raw(output, s) + local fp = savefile(output, "wb") + bcsave_tail(fp, output, s) +end + +local function bcsave_c(ctx, output, s) + local fp = savefile(output, "w") + if ctx.type == "c" then + fp:write(string.format([[ +#ifdef _cplusplus +extern "C" +#endif +#ifdef _WIN32 +__declspec(dllexport) +#endif +const char %s%s[] = { +]], LJBC_PREFIX, ctx.modname)) + else + fp:write(string.format([[ +#define %s%s_SIZE %d +static const char %s%s[] = { +]], LJBC_PREFIX, ctx.modname, #s, LJBC_PREFIX, ctx.modname)) + end + local t, n, m = {}, 0, 0 + for i=1,#s do + local b = tostring(string.byte(s, i)) + m = m + #b + 1 + if m > 78 then + fp:write(table.concat(t, ",", 1, n), ",\n") + n, m = 0, #b + 1 + end + n = n + 1 + t[n] = b + end + bcsave_tail(fp, output, table.concat(t, ",", 1, n).."\n};\n") +end + +local function bcsave_elfobj(ctx, output, s, ffi) + ffi.cdef[[ +typedef struct { + uint8_t emagic[4], eclass, eendian, eversion, eosabi, eabiversion, epad[7]; + uint16_t type, machine; + uint32_t version; + uint32_t entry, phofs, shofs; + uint32_t flags; + uint16_t ehsize, phentsize, phnum, shentsize, shnum, shstridx; +} ELF32header; +typedef struct { + uint8_t emagic[4], eclass, eendian, eversion, eosabi, eabiversion, epad[7]; + uint16_t type, machine; + uint32_t version; + uint64_t entry, phofs, shofs; + uint32_t flags; + uint16_t ehsize, phentsize, phnum, shentsize, shnum, shstridx; +} ELF64header; +typedef struct { + uint32_t name, type, flags, addr, ofs, size, link, info, align, entsize; +} ELF32sectheader; +typedef struct { + uint32_t name, type; + uint64_t flags, addr, ofs, size; + uint32_t link, info; + uint64_t align, entsize; +} ELF64sectheader; +typedef struct { + uint32_t name, value, size; + uint8_t info, other; + uint16_t sectidx; +} ELF32symbol; +typedef struct { + uint32_t name; + uint8_t info, other; + uint16_t sectidx; + uint64_t value, size; +} ELF64symbol; +typedef struct { + ELF32header hdr; + ELF32sectheader sect[6]; + ELF32symbol sym[2]; + uint8_t space[4096]; +} ELF32obj; +typedef struct { + ELF64header hdr; + ELF64sectheader sect[6]; + ELF64symbol sym[2]; + uint8_t space[4096]; +} ELF64obj; +]] + local symname = LJBC_PREFIX..ctx.modname + local is64, isbe = false, false + if ctx.arch == "x64" then + is64 = true + elseif ctx.arch == "ppc" or ctx.arch == "mips" then + isbe = true + end + + -- Handle different host/target endianess. + local function f32(x) return x end + local f16, fofs = f32, f32 + if ffi.abi("be") ~= isbe then + f32 = bit.bswap + function f16(x) return bit.rshift(bit.bswap(x), 16) end + if is64 then + local two32 = ffi.cast("int64_t", 2^32) + function fofs(x) return bit.bswap(x)*two32 end + else + fofs = f32 + end + end + + -- Create ELF object and fill in header. + local o = ffi.new(is64 and "ELF64obj" or "ELF32obj") + local hdr = o.hdr + if ctx.os == "bsd" or ctx.os == "other" then -- Determine native hdr.eosabi. + local bf = assert(io.open("/bin/ls", "rb")) + local bs = bf:read(9) + bf:close() + ffi.copy(o, bs, 9) + check(hdr.emagic[0] == 127, "no support for writing native object files") + else + hdr.emagic = "\127ELF" + hdr.eosabi = ({ freebsd=9, netbsd=2, openbsd=12, solaris=6 })[ctx.os] or 0 + end + hdr.eclass = is64 and 2 or 1 + hdr.eendian = isbe and 2 or 1 + hdr.eversion = 1 + hdr.type = f16(1) + hdr.machine = f16(({ x86=3, x64=62, arm=40, arm64=183, ppc=20, mips=8, mipsel=8 })[ctx.arch]) + if ctx.arch == "mips" or ctx.arch == "mipsel" then + hdr.flags = 0x50001006 + end + hdr.version = f32(1) + hdr.shofs = fofs(ffi.offsetof(o, "sect")) + hdr.ehsize = f16(ffi.sizeof(hdr)) + hdr.shentsize = f16(ffi.sizeof(o.sect[0])) + hdr.shnum = f16(6) + hdr.shstridx = f16(2) + + -- Fill in sections and symbols. + local sofs, ofs = ffi.offsetof(o, "space"), 1 + for i,name in ipairs{ + ".symtab", ".shstrtab", ".strtab", ".rodata", ".note.GNU-stack", + } do + local sect = o.sect[i] + sect.align = fofs(1) + sect.name = f32(ofs) + ffi.copy(o.space+ofs, name) + ofs = ofs + #name+1 + end + o.sect[1].type = f32(2) -- .symtab + o.sect[1].link = f32(3) + o.sect[1].info = f32(1) + o.sect[1].align = fofs(8) + o.sect[1].ofs = fofs(ffi.offsetof(o, "sym")) + o.sect[1].entsize = fofs(ffi.sizeof(o.sym[0])) + o.sect[1].size = fofs(ffi.sizeof(o.sym)) + o.sym[1].name = f32(1) + o.sym[1].sectidx = f16(4) + o.sym[1].size = fofs(#s) + o.sym[1].info = 17 + o.sect[2].type = f32(3) -- .shstrtab + o.sect[2].ofs = fofs(sofs) + o.sect[2].size = fofs(ofs) + o.sect[3].type = f32(3) -- .strtab + o.sect[3].ofs = fofs(sofs + ofs) + o.sect[3].size = fofs(#symname+1) + ffi.copy(o.space+ofs+1, symname) + ofs = ofs + #symname + 2 + o.sect[4].type = f32(1) -- .rodata + o.sect[4].flags = fofs(2) + o.sect[4].ofs = fofs(sofs + ofs) + o.sect[4].size = fofs(#s) + o.sect[5].type = f32(1) -- .note.GNU-stack + o.sect[5].ofs = fofs(sofs + ofs + #s) + + -- Write ELF object file. + local fp = savefile(output, "wb") + fp:write(ffi.string(o, ffi.sizeof(o)-4096+ofs)) + bcsave_tail(fp, output, s) +end + +local function bcsave_peobj(ctx, output, s, ffi) + ffi.cdef[[ +typedef struct { + uint16_t arch, nsects; + uint32_t time, symtabofs, nsyms; + uint16_t opthdrsz, flags; +} PEheader; +typedef struct { + char name[8]; + uint32_t vsize, vaddr, size, ofs, relocofs, lineofs; + uint16_t nreloc, nline; + uint32_t flags; +} PEsection; +typedef struct __attribute((packed)) { + union { + char name[8]; + uint32_t nameref[2]; + }; + uint32_t value; + int16_t sect; + uint16_t type; + uint8_t scl, naux; +} PEsym; +typedef struct __attribute((packed)) { + uint32_t size; + uint16_t nreloc, nline; + uint32_t cksum; + uint16_t assoc; + uint8_t comdatsel, unused[3]; +} PEsymaux; +typedef struct { + PEheader hdr; + PEsection sect[2]; + // Must be an even number of symbol structs. + PEsym sym0; + PEsymaux sym0aux; + PEsym sym1; + PEsymaux sym1aux; + PEsym sym2; + PEsym sym3; + uint32_t strtabsize; + uint8_t space[4096]; +} PEobj; +]] + local symname = LJBC_PREFIX..ctx.modname + local is64 = false + if ctx.arch == "x86" then + symname = "_"..symname + elseif ctx.arch == "x64" then + is64 = true + end + local symexport = " /EXPORT:"..symname..",DATA " + + -- The file format is always little-endian. Swap if the host is big-endian. + local function f32(x) return x end + local f16 = f32 + if ffi.abi("be") then + f32 = bit.bswap + function f16(x) return bit.rshift(bit.bswap(x), 16) end + end + + -- Create PE object and fill in header. + local o = ffi.new("PEobj") + local hdr = o.hdr + hdr.arch = f16(({ x86=0x14c, x64=0x8664, arm=0x1c0, ppc=0x1f2, mips=0x366, mipsel=0x366 })[ctx.arch]) + hdr.nsects = f16(2) + hdr.symtabofs = f32(ffi.offsetof(o, "sym0")) + hdr.nsyms = f32(6) + + -- Fill in sections and symbols. + o.sect[0].name = ".drectve" + o.sect[0].size = f32(#symexport) + o.sect[0].flags = f32(0x00100a00) + o.sym0.sect = f16(1) + o.sym0.scl = 3 + o.sym0.name = ".drectve" + o.sym0.naux = 1 + o.sym0aux.size = f32(#symexport) + o.sect[1].name = ".rdata" + o.sect[1].size = f32(#s) + o.sect[1].flags = f32(0x40300040) + o.sym1.sect = f16(2) + o.sym1.scl = 3 + o.sym1.name = ".rdata" + o.sym1.naux = 1 + o.sym1aux.size = f32(#s) + o.sym2.sect = f16(2) + o.sym2.scl = 2 + o.sym2.nameref[1] = f32(4) + o.sym3.sect = f16(-1) + o.sym3.scl = 2 + o.sym3.value = f32(1) + o.sym3.name = "@feat.00" -- Mark as SafeSEH compliant. + ffi.copy(o.space, symname) + local ofs = #symname + 1 + o.strtabsize = f32(ofs + 4) + o.sect[0].ofs = f32(ffi.offsetof(o, "space") + ofs) + ffi.copy(o.space + ofs, symexport) + ofs = ofs + #symexport + o.sect[1].ofs = f32(ffi.offsetof(o, "space") + ofs) + + -- Write PE object file. + local fp = savefile(output, "wb") + fp:write(ffi.string(o, ffi.sizeof(o)-4096+ofs)) + bcsave_tail(fp, output, s) +end + +local function bcsave_machobj(ctx, output, s, ffi) + ffi.cdef[[ +typedef struct +{ + uint32_t magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags; +} mach_header; +typedef struct +{ + mach_header; uint32_t reserved; +} mach_header_64; +typedef struct { + uint32_t cmd, cmdsize; + char segname[16]; + uint32_t vmaddr, vmsize, fileoff, filesize; + uint32_t maxprot, initprot, nsects, flags; +} mach_segment_command; +typedef struct { + uint32_t cmd, cmdsize; + char segname[16]; + uint64_t vmaddr, vmsize, fileoff, filesize; + uint32_t maxprot, initprot, nsects, flags; +} mach_segment_command_64; +typedef struct { + char sectname[16], segname[16]; + uint32_t addr, size; + uint32_t offset, align, reloff, nreloc, flags; + uint32_t reserved1, reserved2; +} mach_section; +typedef struct { + char sectname[16], segname[16]; + uint64_t addr, size; + uint32_t offset, align, reloff, nreloc, flags; + uint32_t reserved1, reserved2, reserved3; +} mach_section_64; +typedef struct { + uint32_t cmd, cmdsize, symoff, nsyms, stroff, strsize; +} mach_symtab_command; +typedef struct { + int32_t strx; + uint8_t type, sect; + int16_t desc; + uint32_t value; +} mach_nlist; +typedef struct { + uint32_t strx; + uint8_t type, sect; + uint16_t desc; + uint64_t value; +} mach_nlist_64; +typedef struct +{ + uint32_t magic, nfat_arch; +} mach_fat_header; +typedef struct +{ + uint32_t cputype, cpusubtype, offset, size, align; +} mach_fat_arch; +typedef struct { + struct { + mach_header hdr; + mach_segment_command seg; + mach_section sec; + mach_symtab_command sym; + } arch[1]; + mach_nlist sym_entry; + uint8_t space[4096]; +} mach_obj; +typedef struct { + struct { + mach_header_64 hdr; + mach_segment_command_64 seg; + mach_section_64 sec; + mach_symtab_command sym; + } arch[1]; + mach_nlist_64 sym_entry; + uint8_t space[4096]; +} mach_obj_64; +typedef struct { + mach_fat_header fat; + mach_fat_arch fat_arch[2]; + struct { + mach_header hdr; + mach_segment_command seg; + mach_section sec; + mach_symtab_command sym; + } arch[2]; + mach_nlist sym_entry; + uint8_t space[4096]; +} mach_fat_obj; +]] + local symname = '_'..LJBC_PREFIX..ctx.modname + local isfat, is64, align, mobj = false, false, 4, "mach_obj" + if ctx.arch == "x64" then + is64, align, mobj = true, 8, "mach_obj_64" + elseif ctx.arch == "arm" then + isfat, mobj = true, "mach_fat_obj" + elseif ctx.arch == "arm64" then + is64, align, isfat, mobj = true, 8, true, "mach_fat_obj" + else + check(ctx.arch == "x86", "unsupported architecture for OSX") + end + local function aligned(v, a) return bit.band(v+a-1, -a) end + local be32 = bit.bswap -- Mach-O FAT is BE, supported archs are LE. + + -- Create Mach-O object and fill in header. + local o = ffi.new(mobj) + local mach_size = aligned(ffi.offsetof(o, "space")+#symname+2, align) + local cputype = ({ x86={7}, x64={0x01000007}, arm={7,12}, arm64={0x01000007,0x0100000c} })[ctx.arch] + local cpusubtype = ({ x86={3}, x64={3}, arm={3,9}, arm64={3,0} })[ctx.arch] + if isfat then + o.fat.magic = be32(0xcafebabe) + o.fat.nfat_arch = be32(#cpusubtype) + end + + -- Fill in sections and symbols. + for i=0,#cpusubtype-1 do + local ofs = 0 + if isfat then + local a = o.fat_arch[i] + a.cputype = be32(cputype[i+1]) + a.cpusubtype = be32(cpusubtype[i+1]) + -- Subsequent slices overlap each other to share data. + ofs = ffi.offsetof(o, "arch") + i*ffi.sizeof(o.arch[0]) + a.offset = be32(ofs) + a.size = be32(mach_size-ofs+#s) + end + local a = o.arch[i] + a.hdr.magic = is64 and 0xfeedfacf or 0xfeedface + a.hdr.cputype = cputype[i+1] + a.hdr.cpusubtype = cpusubtype[i+1] + a.hdr.filetype = 1 + a.hdr.ncmds = 2 + a.hdr.sizeofcmds = ffi.sizeof(a.seg)+ffi.sizeof(a.sec)+ffi.sizeof(a.sym) + a.seg.cmd = is64 and 0x19 or 0x1 + a.seg.cmdsize = ffi.sizeof(a.seg)+ffi.sizeof(a.sec) + a.seg.vmsize = #s + a.seg.fileoff = mach_size-ofs + a.seg.filesize = #s + a.seg.maxprot = 1 + a.seg.initprot = 1 + a.seg.nsects = 1 + ffi.copy(a.sec.sectname, "__data") + ffi.copy(a.sec.segname, "__DATA") + a.sec.size = #s + a.sec.offset = mach_size-ofs + a.sym.cmd = 2 + a.sym.cmdsize = ffi.sizeof(a.sym) + a.sym.symoff = ffi.offsetof(o, "sym_entry")-ofs + a.sym.nsyms = 1 + a.sym.stroff = ffi.offsetof(o, "sym_entry")+ffi.sizeof(o.sym_entry)-ofs + a.sym.strsize = aligned(#symname+2, align) + end + o.sym_entry.type = 0xf + o.sym_entry.sect = 1 + o.sym_entry.strx = 1 + ffi.copy(o.space+1, symname) + + -- Write Macho-O object file. + local fp = savefile(output, "wb") + fp:write(ffi.string(o, mach_size)) + bcsave_tail(fp, output, s) +end + +local function bcsave_obj(ctx, output, s) + local ok, ffi = pcall(require, "ffi") + check(ok, "FFI library required to write this file type") + if ctx.os == "windows" then + return bcsave_peobj(ctx, output, s, ffi) + elseif ctx.os == "osx" then + return bcsave_machobj(ctx, output, s, ffi) + else + return bcsave_elfobj(ctx, output, s, ffi) + end +end + +------------------------------------------------------------------------------ + +local function bclist(input, output) + local f = readfile(input) + require("jit.bc").dump(f, savefile(output, "w"), true) +end + +local function bcsave(ctx, input, output) + local f = readfile(input) + local s = string.dump(f, ctx.strip) + local t = ctx.type + if not t then + t = detecttype(output) + ctx.type = t + end + if t == "raw" then + bcsave_raw(output, s) + else + if not ctx.modname then ctx.modname = detectmodname(input) end + if t == "obj" then + bcsave_obj(ctx, output, s) + else + bcsave_c(ctx, output, s) + end + end +end + +local function docmd(...) + local arg = {...} + local n = 1 + local list = false + local ctx = { + strip = true, arch = jit.arch, os = string.lower(jit.os), + type = false, modname = false, + } + while n <= #arg do + local a = arg[n] + if type(a) == "string" and string.sub(a, 1, 1) == "-" and a ~= "-" then + table.remove(arg, n) + if a == "--" then break end + for m=2,#a do + local opt = string.sub(a, m, m) + if opt == "l" then + list = true + elseif opt == "s" then + ctx.strip = true + elseif opt == "g" then + ctx.strip = false + else + if arg[n] == nil or m ~= #a then usage() end + if opt == "e" then + if n ~= 1 then usage() end + arg[1] = check(loadstring(arg[1])) + elseif opt == "n" then + ctx.modname = checkmodname(table.remove(arg, n)) + elseif opt == "t" then + ctx.type = checkarg(table.remove(arg, n), map_type, "file type") + elseif opt == "a" then + ctx.arch = checkarg(table.remove(arg, n), map_arch, "architecture") + elseif opt == "o" then + ctx.os = checkarg(table.remove(arg, n), map_os, "OS name") + else + usage() + end + end + end + else + n = n + 1 + end + end + if list then + if #arg == 0 or #arg > 2 then usage() end + bclist(arg[1], arg[2] or "-") + else + if #arg ~= 2 then usage() end + bcsave(ctx, arg[1], arg[2]) + end +end + +------------------------------------------------------------------------------ + +-- Public module functions. +return { + start = docmd -- Process -b command line option. +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/jit/dis_arm.lua b/Build/source/libs/luajit/LuaJIT-src/src/jit/dis_arm.lua new file mode 100644 index 00000000000..dfcbeeecf14 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/jit/dis_arm.lua @@ -0,0 +1,689 @@ +---------------------------------------------------------------------------- +-- LuaJIT ARM disassembler module. +-- +-- Copyright (C) 2005-2015 Mike Pall. All rights reserved. +-- Released under the MIT license. See Copyright Notice in luajit.h +---------------------------------------------------------------------------- +-- This is a helper module used by the LuaJIT machine code dumper module. +-- +-- It disassembles most user-mode ARMv7 instructions +-- NYI: Advanced SIMD and VFP instructions. +------------------------------------------------------------------------------ + +local type = type +local sub, byte, format = string.sub, string.byte, string.format +local match, gmatch, gsub = string.match, string.gmatch, string.gsub +local concat = table.concat +local bit = require("bit") +local band, bor, ror, tohex = bit.band, bit.bor, bit.ror, bit.tohex +local lshift, rshift, arshift = bit.lshift, bit.rshift, bit.arshift + +------------------------------------------------------------------------------ +-- Opcode maps +------------------------------------------------------------------------------ + +local map_loadc = { + shift = 8, mask = 15, + [10] = { + shift = 20, mask = 1, + [0] = { + shift = 23, mask = 3, + [0] = "vmovFmDN", "vstmFNdr", + _ = { + shift = 21, mask = 1, + [0] = "vstrFdl", + { shift = 16, mask = 15, [13] = "vpushFdr", _ = "vstmdbFNdr", } + }, + }, + { + shift = 23, mask = 3, + [0] = "vmovFDNm", + { shift = 16, mask = 15, [13] = "vpopFdr", _ = "vldmFNdr", }, + _ = { + shift = 21, mask = 1, + [0] = "vldrFdl", "vldmdbFNdr", + }, + }, + }, + [11] = { + shift = 20, mask = 1, + [0] = { + shift = 23, mask = 3, + [0] = "vmovGmDN", "vstmGNdr", + _ = { + shift = 21, mask = 1, + [0] = "vstrGdl", + { shift = 16, mask = 15, [13] = "vpushGdr", _ = "vstmdbGNdr", } + }, + }, + { + shift = 23, mask = 3, + [0] = "vmovGDNm", + { shift = 16, mask = 15, [13] = "vpopGdr", _ = "vldmGNdr", }, + _ = { + shift = 21, mask = 1, + [0] = "vldrGdl", "vldmdbGNdr", + }, + }, + }, + _ = { + shift = 0, mask = 0 -- NYI ldc, mcrr, mrrc. + }, +} + +local map_vfps = { + shift = 6, mask = 0x2c001, + [0] = "vmlaF.dnm", "vmlsF.dnm", + [0x04000] = "vnmlsF.dnm", [0x04001] = "vnmlaF.dnm", + [0x08000] = "vmulF.dnm", [0x08001] = "vnmulF.dnm", + [0x0c000] = "vaddF.dnm", [0x0c001] = "vsubF.dnm", + [0x20000] = "vdivF.dnm", + [0x24000] = "vfnmsF.dnm", [0x24001] = "vfnmaF.dnm", + [0x28000] = "vfmaF.dnm", [0x28001] = "vfmsF.dnm", + [0x2c000] = "vmovF.dY", + [0x2c001] = { + shift = 7, mask = 0x1e01, + [0] = "vmovF.dm", "vabsF.dm", + [0x0200] = "vnegF.dm", [0x0201] = "vsqrtF.dm", + [0x0800] = "vcmpF.dm", [0x0801] = "vcmpeF.dm", + [0x0a00] = "vcmpzF.d", [0x0a01] = "vcmpzeF.d", + [0x0e01] = "vcvtG.dF.m", + [0x1000] = "vcvt.f32.u32Fdm", [0x1001] = "vcvt.f32.s32Fdm", + [0x1800] = "vcvtr.u32F.dm", [0x1801] = "vcvt.u32F.dm", + [0x1a00] = "vcvtr.s32F.dm", [0x1a01] = "vcvt.s32F.dm", + }, +} + +local map_vfpd = { + shift = 6, mask = 0x2c001, + [0] = "vmlaG.dnm", "vmlsG.dnm", + [0x04000] = "vnmlsG.dnm", [0x04001] = "vnmlaG.dnm", + [0x08000] = "vmulG.dnm", [0x08001] = "vnmulG.dnm", + [0x0c000] = "vaddG.dnm", [0x0c001] = "vsubG.dnm", + [0x20000] = "vdivG.dnm", + [0x24000] = "vfnmsG.dnm", [0x24001] = "vfnmaG.dnm", + [0x28000] = "vfmaG.dnm", [0x28001] = "vfmsG.dnm", + [0x2c000] = "vmovG.dY", + [0x2c001] = { + shift = 7, mask = 0x1e01, + [0] = "vmovG.dm", "vabsG.dm", + [0x0200] = "vnegG.dm", [0x0201] = "vsqrtG.dm", + [0x0800] = "vcmpG.dm", [0x0801] = "vcmpeG.dm", + [0x0a00] = "vcmpzG.d", [0x0a01] = "vcmpzeG.d", + [0x0e01] = "vcvtF.dG.m", + [0x1000] = "vcvt.f64.u32GdFm", [0x1001] = "vcvt.f64.s32GdFm", + [0x1800] = "vcvtr.u32FdG.m", [0x1801] = "vcvt.u32FdG.m", + [0x1a00] = "vcvtr.s32FdG.m", [0x1a01] = "vcvt.s32FdG.m", + }, +} + +local map_datac = { + shift = 24, mask = 1, + [0] = { + shift = 4, mask = 1, + [0] = { + shift = 8, mask = 15, + [10] = map_vfps, + [11] = map_vfpd, + -- NYI cdp, mcr, mrc. + }, + { + shift = 8, mask = 15, + [10] = { + shift = 20, mask = 15, + [0] = "vmovFnD", "vmovFDn", + [14] = "vmsrD", + [15] = { shift = 12, mask = 15, [15] = "vmrs", _ = "vmrsD", }, + }, + }, + }, + "svcT", +} + +local map_loadcu = { + shift = 0, mask = 0, -- NYI unconditional CP load/store. +} + +local map_datacu = { + shift = 0, mask = 0, -- NYI unconditional CP data. +} + +local map_simddata = { + shift = 0, mask = 0, -- NYI SIMD data. +} + +local map_simdload = { + shift = 0, mask = 0, -- NYI SIMD load/store, preload. +} + +local map_preload = { + shift = 0, mask = 0, -- NYI preload. +} + +local map_media = { + shift = 20, mask = 31, + [0] = false, + { --01 + shift = 5, mask = 7, + [0] = "sadd16DNM", "sasxDNM", "ssaxDNM", "ssub16DNM", + "sadd8DNM", false, false, "ssub8DNM", + }, + { --02 + shift = 5, mask = 7, + [0] = "qadd16DNM", "qasxDNM", "qsaxDNM", "qsub16DNM", + "qadd8DNM", false, false, "qsub8DNM", + }, + { --03 + shift = 5, mask = 7, + [0] = "shadd16DNM", "shasxDNM", "shsaxDNM", "shsub16DNM", + "shadd8DNM", false, false, "shsub8DNM", + }, + false, + { --05 + shift = 5, mask = 7, + [0] = "uadd16DNM", "uasxDNM", "usaxDNM", "usub16DNM", + "uadd8DNM", false, false, "usub8DNM", + }, + { --06 + shift = 5, mask = 7, + [0] = "uqadd16DNM", "uqasxDNM", "uqsaxDNM", "uqsub16DNM", + "uqadd8DNM", false, false, "uqsub8DNM", + }, + { --07 + shift = 5, mask = 7, + [0] = "uhadd16DNM", "uhasxDNM", "uhsaxDNM", "uhsub16DNM", + "uhadd8DNM", false, false, "uhsub8DNM", + }, + { --08 + shift = 5, mask = 7, + [0] = "pkhbtDNMU", false, "pkhtbDNMU", + { shift = 16, mask = 15, [15] = "sxtb16DMU", _ = "sxtab16DNMU", }, + "pkhbtDNMU", "selDNM", "pkhtbDNMU", + }, + false, + { --0a + shift = 5, mask = 7, + [0] = "ssatDxMu", "ssat16DxM", "ssatDxMu", + { shift = 16, mask = 15, [15] = "sxtbDMU", _ = "sxtabDNMU", }, + "ssatDxMu", false, "ssatDxMu", + }, + { --0b + shift = 5, mask = 7, + [0] = "ssatDxMu", "revDM", "ssatDxMu", + { shift = 16, mask = 15, [15] = "sxthDMU", _ = "sxtahDNMU", }, + "ssatDxMu", "rev16DM", "ssatDxMu", + }, + { --0c + shift = 5, mask = 7, + [3] = { shift = 16, mask = 15, [15] = "uxtb16DMU", _ = "uxtab16DNMU", }, + }, + false, + { --0e + shift = 5, mask = 7, + [0] = "usatDwMu", "usat16DwM", "usatDwMu", + { shift = 16, mask = 15, [15] = "uxtbDMU", _ = "uxtabDNMU", }, + "usatDwMu", false, "usatDwMu", + }, + { --0f + shift = 5, mask = 7, + [0] = "usatDwMu", "rbitDM", "usatDwMu", + { shift = 16, mask = 15, [15] = "uxthDMU", _ = "uxtahDNMU", }, + "usatDwMu", "revshDM", "usatDwMu", + }, + { --10 + shift = 12, mask = 15, + [15] = { + shift = 5, mask = 7, + "smuadNMS", "smuadxNMS", "smusdNMS", "smusdxNMS", + }, + _ = { + shift = 5, mask = 7, + [0] = "smladNMSD", "smladxNMSD", "smlsdNMSD", "smlsdxNMSD", + }, + }, + false, false, false, + { --14 + shift = 5, mask = 7, + [0] = "smlaldDNMS", "smlaldxDNMS", "smlsldDNMS", "smlsldxDNMS", + }, + { --15 + shift = 5, mask = 7, + [0] = { shift = 12, mask = 15, [15] = "smmulNMS", _ = "smmlaNMSD", }, + { shift = 12, mask = 15, [15] = "smmulrNMS", _ = "smmlarNMSD", }, + false, false, false, false, + "smmlsNMSD", "smmlsrNMSD", + }, + false, false, + { --18 + shift = 5, mask = 7, + [0] = { shift = 12, mask = 15, [15] = "usad8NMS", _ = "usada8NMSD", }, + }, + false, + { --1a + shift = 5, mask = 3, [2] = "sbfxDMvw", + }, + { --1b + shift = 5, mask = 3, [2] = "sbfxDMvw", + }, + { --1c + shift = 5, mask = 3, + [0] = { shift = 0, mask = 15, [15] = "bfcDvX", _ = "bfiDMvX", }, + }, + { --1d + shift = 5, mask = 3, + [0] = { shift = 0, mask = 15, [15] = "bfcDvX", _ = "bfiDMvX", }, + }, + { --1e + shift = 5, mask = 3, [2] = "ubfxDMvw", + }, + { --1f + shift = 5, mask = 3, [2] = "ubfxDMvw", + }, +} + +local map_load = { + shift = 21, mask = 9, + { + shift = 20, mask = 5, + [0] = "strtDL", "ldrtDL", [4] = "strbtDL", [5] = "ldrbtDL", + }, + _ = { + shift = 20, mask = 5, + [0] = "strDL", "ldrDL", [4] = "strbDL", [5] = "ldrbDL", + } +} + +local map_load1 = { + shift = 4, mask = 1, + [0] = map_load, map_media, +} + +local map_loadm = { + shift = 20, mask = 1, + [0] = { + shift = 23, mask = 3, + [0] = "stmdaNR", "stmNR", + { shift = 16, mask = 63, [45] = "pushR", _ = "stmdbNR", }, "stmibNR", + }, + { + shift = 23, mask = 3, + [0] = "ldmdaNR", { shift = 16, mask = 63, [61] = "popR", _ = "ldmNR", }, + "ldmdbNR", "ldmibNR", + }, +} + +local map_data = { + shift = 21, mask = 15, + [0] = "andDNPs", "eorDNPs", "subDNPs", "rsbDNPs", + "addDNPs", "adcDNPs", "sbcDNPs", "rscDNPs", + "tstNP", "teqNP", "cmpNP", "cmnNP", + "orrDNPs", "movDPs", "bicDNPs", "mvnDPs", +} + +local map_mul = { + shift = 21, mask = 7, + [0] = "mulNMSs", "mlaNMSDs", "umaalDNMS", "mlsDNMS", + "umullDNMSs", "umlalDNMSs", "smullDNMSs", "smlalDNMSs", +} + +local map_sync = { + shift = 20, mask = 15, -- NYI: brackets around N. R(D+1) for ldrexd/strexd. + [0] = "swpDMN", false, false, false, + "swpbDMN", false, false, false, + "strexDMN", "ldrexDN", "strexdDN", "ldrexdDN", + "strexbDMN", "ldrexbDN", "strexhDN", "ldrexhDN", +} + +local map_mulh = { + shift = 21, mask = 3, + [0] = { shift = 5, mask = 3, + [0] = "smlabbNMSD", "smlatbNMSD", "smlabtNMSD", "smlattNMSD", }, + { shift = 5, mask = 3, + [0] = "smlawbNMSD", "smulwbNMS", "smlawtNMSD", "smulwtNMS", }, + { shift = 5, mask = 3, + [0] = "smlalbbDNMS", "smlaltbDNMS", "smlalbtDNMS", "smlalttDNMS", }, + { shift = 5, mask = 3, + [0] = "smulbbNMS", "smultbNMS", "smulbtNMS", "smulttNMS", }, +} + +local map_misc = { + shift = 4, mask = 7, + -- NYI: decode PSR bits of msr. + [0] = { shift = 21, mask = 1, [0] = "mrsD", "msrM", }, + { shift = 21, mask = 3, "bxM", false, "clzDM", }, + { shift = 21, mask = 3, "bxjM", }, + { shift = 21, mask = 3, "blxM", }, + false, + { shift = 21, mask = 3, [0] = "qaddDMN", "qsubDMN", "qdaddDMN", "qdsubDMN", }, + false, + { shift = 21, mask = 3, "bkptK", }, +} + +local map_datar = { + shift = 4, mask = 9, + [9] = { + shift = 5, mask = 3, + [0] = { shift = 24, mask = 1, [0] = map_mul, map_sync, }, + { shift = 20, mask = 1, [0] = "strhDL", "ldrhDL", }, + { shift = 20, mask = 1, [0] = "ldrdDL", "ldrsbDL", }, + { shift = 20, mask = 1, [0] = "strdDL", "ldrshDL", }, + }, + _ = { + shift = 20, mask = 25, + [16] = { shift = 7, mask = 1, [0] = map_misc, map_mulh, }, + _ = { + shift = 0, mask = 0xffffffff, + [bor(0xe1a00000)] = "nop", + _ = map_data, + } + }, +} + +local map_datai = { + shift = 20, mask = 31, -- NYI: decode PSR bits of msr. Decode imm12. + [16] = "movwDW", [20] = "movtDW", + [18] = { shift = 0, mask = 0xf00ff, [0] = "nopv6", _ = "msrNW", }, + [22] = "msrNW", + _ = map_data, +} + +local map_branch = { + shift = 24, mask = 1, + [0] = "bB", "blB" +} + +local map_condins = { + [0] = map_datar, map_datai, map_load, map_load1, + map_loadm, map_branch, map_loadc, map_datac +} + +-- NYI: setend. +local map_uncondins = { + [0] = false, map_simddata, map_simdload, map_preload, + false, "blxB", map_loadcu, map_datacu, +} + +------------------------------------------------------------------------------ + +local map_gpr = { + [0] = "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", + "r8", "r9", "r10", "r11", "r12", "sp", "lr", "pc", +} + +local map_cond = { + [0] = "eq", "ne", "hs", "lo", "mi", "pl", "vs", "vc", + "hi", "ls", "ge", "lt", "gt", "le", "al", +} + +local map_shift = { [0] = "lsl", "lsr", "asr", "ror", } + +------------------------------------------------------------------------------ + +-- Output a nicely formatted line with an opcode and operands. +local function putop(ctx, text, operands) + local pos = ctx.pos + local extra = "" + if ctx.rel then + local sym = ctx.symtab[ctx.rel] + if sym then + extra = "\t->"..sym + elseif band(ctx.op, 0x0e000000) ~= 0x0a000000 then + extra = "\t; 0x"..tohex(ctx.rel) + end + end + if ctx.hexdump > 0 then + ctx.out(format("%08x %s %-5s %s%s\n", + ctx.addr+pos, tohex(ctx.op), text, concat(operands, ", "), extra)) + else + ctx.out(format("%08x %-5s %s%s\n", + ctx.addr+pos, text, concat(operands, ", "), extra)) + end + ctx.pos = pos + 4 +end + +-- Fallback for unknown opcodes. +local function unknown(ctx) + return putop(ctx, ".long", { "0x"..tohex(ctx.op) }) +end + +-- Format operand 2 of load/store opcodes. +local function fmtload(ctx, op, pos) + local base = map_gpr[band(rshift(op, 16), 15)] + local x, ofs + local ext = (band(op, 0x04000000) == 0) + if not ext and band(op, 0x02000000) == 0 then + ofs = band(op, 4095) + if band(op, 0x00800000) == 0 then ofs = -ofs end + if base == "pc" then ctx.rel = ctx.addr + pos + 8 + ofs end + ofs = "#"..ofs + elseif ext and band(op, 0x00400000) ~= 0 then + ofs = band(op, 15) + band(rshift(op, 4), 0xf0) + if band(op, 0x00800000) == 0 then ofs = -ofs end + if base == "pc" then ctx.rel = ctx.addr + pos + 8 + ofs end + ofs = "#"..ofs + else + ofs = map_gpr[band(op, 15)] + if ext or band(op, 0xfe0) == 0 then + elseif band(op, 0xfe0) == 0x60 then + ofs = format("%s, rrx", ofs) + else + local sh = band(rshift(op, 7), 31) + if sh == 0 then sh = 32 end + ofs = format("%s, %s #%d", ofs, map_shift[band(rshift(op, 5), 3)], sh) + end + if band(op, 0x00800000) == 0 then ofs = "-"..ofs end + end + if ofs == "#0" then + x = format("[%s]", base) + elseif band(op, 0x01000000) == 0 then + x = format("[%s], %s", base, ofs) + else + x = format("[%s, %s]", base, ofs) + end + if band(op, 0x01200000) == 0x01200000 then x = x.."!" end + return x +end + +-- Format operand 2 of vector load/store opcodes. +local function fmtvload(ctx, op, pos) + local base = map_gpr[band(rshift(op, 16), 15)] + local ofs = band(op, 255)*4 + if band(op, 0x00800000) == 0 then ofs = -ofs end + if base == "pc" then ctx.rel = ctx.addr + pos + 8 + ofs end + if ofs == 0 then + return format("[%s]", base) + else + return format("[%s, #%d]", base, ofs) + end +end + +local function fmtvr(op, vr, sh0, sh1) + if vr == "s" then + return format("s%d", 2*band(rshift(op, sh0), 15)+band(rshift(op, sh1), 1)) + else + return format("d%d", band(rshift(op, sh0), 15)+band(rshift(op, sh1-4), 16)) + end +end + +-- Disassemble a single instruction. +local function disass_ins(ctx) + local pos = ctx.pos + local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4) + local op = bor(lshift(b3, 24), lshift(b2, 16), lshift(b1, 8), b0) + local operands = {} + local suffix = "" + local last, name, pat + local vr + ctx.op = op + ctx.rel = nil + + local cond = rshift(op, 28) + local opat + if cond == 15 then + opat = map_uncondins[band(rshift(op, 25), 7)] + else + if cond ~= 14 then suffix = map_cond[cond] end + opat = map_condins[band(rshift(op, 25), 7)] + end + while type(opat) ~= "string" do + if not opat then return unknown(ctx) end + opat = opat[band(rshift(op, opat.shift), opat.mask)] or opat._ + end + name, pat = match(opat, "^([a-z0-9]*)(.*)") + if sub(pat, 1, 1) == "." then + local s2, p2 = match(pat, "^([a-z0-9.]*)(.*)") + suffix = suffix..s2 + pat = p2 + end + + for p in gmatch(pat, ".") do + local x = nil + if p == "D" then + x = map_gpr[band(rshift(op, 12), 15)] + elseif p == "N" then + x = map_gpr[band(rshift(op, 16), 15)] + elseif p == "S" then + x = map_gpr[band(rshift(op, 8), 15)] + elseif p == "M" then + x = map_gpr[band(op, 15)] + elseif p == "d" then + x = fmtvr(op, vr, 12, 22) + elseif p == "n" then + x = fmtvr(op, vr, 16, 7) + elseif p == "m" then + x = fmtvr(op, vr, 0, 5) + elseif p == "P" then + if band(op, 0x02000000) ~= 0 then + x = ror(band(op, 255), 2*band(rshift(op, 8), 15)) + else + x = map_gpr[band(op, 15)] + if band(op, 0xff0) ~= 0 then + operands[#operands+1] = x + local s = map_shift[band(rshift(op, 5), 3)] + local r = nil + if band(op, 0xf90) == 0 then + if s == "ror" then s = "rrx" else r = "#32" end + elseif band(op, 0x10) == 0 then + r = "#"..band(rshift(op, 7), 31) + else + r = map_gpr[band(rshift(op, 8), 15)] + end + if name == "mov" then name = s; x = r + elseif r then x = format("%s %s", s, r) + else x = s end + end + end + elseif p == "L" then + x = fmtload(ctx, op, pos) + elseif p == "l" then + x = fmtvload(ctx, op, pos) + elseif p == "B" then + local addr = ctx.addr + pos + 8 + arshift(lshift(op, 8), 6) + if cond == 15 then addr = addr + band(rshift(op, 23), 2) end + ctx.rel = addr + x = "0x"..tohex(addr) + elseif p == "F" then + vr = "s" + elseif p == "G" then + vr = "d" + elseif p == "." then + suffix = suffix..(vr == "s" and ".f32" or ".f64") + elseif p == "R" then + if band(op, 0x00200000) ~= 0 and #operands == 1 then + operands[1] = operands[1].."!" + end + local t = {} + for i=0,15 do + if band(rshift(op, i), 1) == 1 then t[#t+1] = map_gpr[i] end + end + x = "{"..concat(t, ", ").."}" + elseif p == "r" then + if band(op, 0x00200000) ~= 0 and #operands == 2 then + operands[1] = operands[1].."!" + end + local s = tonumber(sub(last, 2)) + local n = band(op, 255) + if vr == "d" then n = rshift(n, 1) end + operands[#operands] = format("{%s-%s%d}", last, vr, s+n-1) + elseif p == "W" then + x = band(op, 0x0fff) + band(rshift(op, 4), 0xf000) + elseif p == "T" then + x = "#0x"..tohex(band(op, 0x00ffffff), 6) + elseif p == "U" then + x = band(rshift(op, 7), 31) + if x == 0 then x = nil end + elseif p == "u" then + x = band(rshift(op, 7), 31) + if band(op, 0x40) == 0 then + if x == 0 then x = nil else x = "lsl #"..x end + else + if x == 0 then x = "asr #32" else x = "asr #"..x end + end + elseif p == "v" then + x = band(rshift(op, 7), 31) + elseif p == "w" then + x = band(rshift(op, 16), 31) + elseif p == "x" then + x = band(rshift(op, 16), 31) + 1 + elseif p == "X" then + x = band(rshift(op, 16), 31) - last + 1 + elseif p == "Y" then + x = band(rshift(op, 12), 0xf0) + band(op, 0x0f) + elseif p == "K" then + x = "#0x"..tohex(band(rshift(op, 4), 0x0000fff0) + band(op, 15), 4) + elseif p == "s" then + if band(op, 0x00100000) ~= 0 then suffix = "s"..suffix end + else + assert(false) + end + if x then + last = x + if type(x) == "number" then x = "#"..x end + operands[#operands+1] = x + end + end + + return putop(ctx, name..suffix, operands) +end + +------------------------------------------------------------------------------ + +-- Disassemble a block of code. +local function disass_block(ctx, ofs, len) + if not ofs then ofs = 0 end + local stop = len and ofs+len or #ctx.code + ctx.pos = ofs + ctx.rel = nil + while ctx.pos < stop do disass_ins(ctx) end +end + +-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len). +local function create(code, addr, out) + local ctx = {} + ctx.code = code + ctx.addr = addr or 0 + ctx.out = out or io.write + ctx.symtab = {} + ctx.disass = disass_block + ctx.hexdump = 8 + return ctx +end + +-- Simple API: disassemble code (a string) at address and output via out. +local function disass(code, addr, out) + create(code, addr, out):disass() +end + +-- Return register name for RID. +local function regname(r) + if r < 16 then return map_gpr[r] end + return "d"..(r-16) +end + +-- Public module functions. +return { + create = create, + disass = disass, + regname = regname +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/jit/dis_mips.lua b/Build/source/libs/luajit/LuaJIT-src/src/jit/dis_mips.lua new file mode 100644 index 00000000000..9466f45c2c2 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/jit/dis_mips.lua @@ -0,0 +1,428 @@ +---------------------------------------------------------------------------- +-- LuaJIT MIPS disassembler module. +-- +-- Copyright (C) 2005-2015 Mike Pall. All rights reserved. +-- Released under the MIT/X license. See Copyright Notice in luajit.h +---------------------------------------------------------------------------- +-- This is a helper module used by the LuaJIT machine code dumper module. +-- +-- It disassembles all standard MIPS32R1/R2 instructions. +-- Default mode is big-endian, but see: dis_mipsel.lua +------------------------------------------------------------------------------ + +local type = type +local sub, byte, format = string.sub, string.byte, string.format +local match, gmatch, gsub = string.match, string.gmatch, string.gsub +local concat = table.concat +local bit = require("bit") +local band, bor, tohex = bit.band, bit.bor, bit.tohex +local lshift, rshift, arshift = bit.lshift, bit.rshift, bit.arshift + +------------------------------------------------------------------------------ +-- Primary and extended opcode maps +------------------------------------------------------------------------------ + +local map_movci = { shift = 16, mask = 1, [0] = "movfDSC", "movtDSC", } +local map_srl = { shift = 21, mask = 1, [0] = "srlDTA", "rotrDTA", } +local map_srlv = { shift = 6, mask = 1, [0] = "srlvDTS", "rotrvDTS", } + +local map_special = { + shift = 0, mask = 63, + [0] = { shift = 0, mask = -1, [0] = "nop", _ = "sllDTA" }, + map_movci, map_srl, "sraDTA", + "sllvDTS", false, map_srlv, "sravDTS", + "jrS", "jalrD1S", "movzDST", "movnDST", + "syscallY", "breakY", false, "sync", + "mfhiD", "mthiS", "mfloD", "mtloS", + false, false, false, false, + "multST", "multuST", "divST", "divuST", + false, false, false, false, + "addDST", "addu|moveDST0", "subDST", "subu|neguDS0T", + "andDST", "orDST", "xorDST", "nor|notDST0", + false, false, "sltDST", "sltuDST", + false, false, false, false, + "tgeSTZ", "tgeuSTZ", "tltSTZ", "tltuSTZ", + "teqSTZ", false, "tneSTZ", +} + +local map_special2 = { + shift = 0, mask = 63, + [0] = "maddST", "madduST", "mulDST", false, + "msubST", "msubuST", + [32] = "clzDS", [33] = "cloDS", + [63] = "sdbbpY", +} + +local map_bshfl = { + shift = 6, mask = 31, + [2] = "wsbhDT", + [16] = "sebDT", + [24] = "sehDT", +} + +local map_special3 = { + shift = 0, mask = 63, + [0] = "extTSAK", [4] = "insTSAL", + [32] = map_bshfl, + [59] = "rdhwrTD", +} + +local map_regimm = { + shift = 16, mask = 31, + [0] = "bltzSB", "bgezSB", "bltzlSB", "bgezlSB", + false, false, false, false, + "tgeiSI", "tgeiuSI", "tltiSI", "tltiuSI", + "teqiSI", false, "tneiSI", false, + "bltzalSB", "bgezalSB", "bltzallSB", "bgezallSB", + false, false, false, false, + false, false, false, false, + false, false, false, "synciSO", +} + +local map_cop0 = { + shift = 25, mask = 1, + [0] = { + shift = 21, mask = 15, + [0] = "mfc0TDW", [4] = "mtc0TDW", + [10] = "rdpgprDT", + [11] = { shift = 5, mask = 1, [0] = "diT0", "eiT0", }, + [14] = "wrpgprDT", + }, { + shift = 0, mask = 63, + [1] = "tlbr", [2] = "tlbwi", [6] = "tlbwr", [8] = "tlbp", + [24] = "eret", [31] = "deret", + [32] = "wait", + }, +} + +local map_cop1s = { + shift = 0, mask = 63, + [0] = "add.sFGH", "sub.sFGH", "mul.sFGH", "div.sFGH", + "sqrt.sFG", "abs.sFG", "mov.sFG", "neg.sFG", + "round.l.sFG", "trunc.l.sFG", "ceil.l.sFG", "floor.l.sFG", + "round.w.sFG", "trunc.w.sFG", "ceil.w.sFG", "floor.w.sFG", + false, + { shift = 16, mask = 1, [0] = "movf.sFGC", "movt.sFGC" }, + "movz.sFGT", "movn.sFGT", + false, "recip.sFG", "rsqrt.sFG", false, + false, false, false, false, + false, false, false, false, + false, "cvt.d.sFG", false, false, + "cvt.w.sFG", "cvt.l.sFG", "cvt.ps.sFGH", false, + false, false, false, false, + false, false, false, false, + "c.f.sVGH", "c.un.sVGH", "c.eq.sVGH", "c.ueq.sVGH", + "c.olt.sVGH", "c.ult.sVGH", "c.ole.sVGH", "c.ule.sVGH", + "c.sf.sVGH", "c.ngle.sVGH", "c.seq.sVGH", "c.ngl.sVGH", + "c.lt.sVGH", "c.nge.sVGH", "c.le.sVGH", "c.ngt.sVGH", +} + +local map_cop1d = { + shift = 0, mask = 63, + [0] = "add.dFGH", "sub.dFGH", "mul.dFGH", "div.dFGH", + "sqrt.dFG", "abs.dFG", "mov.dFG", "neg.dFG", + "round.l.dFG", "trunc.l.dFG", "ceil.l.dFG", "floor.l.dFG", + "round.w.dFG", "trunc.w.dFG", "ceil.w.dFG", "floor.w.dFG", + false, + { shift = 16, mask = 1, [0] = "movf.dFGC", "movt.dFGC" }, + "movz.dFGT", "movn.dFGT", + false, "recip.dFG", "rsqrt.dFG", false, + false, false, false, false, + false, false, false, false, + "cvt.s.dFG", false, false, false, + "cvt.w.dFG", "cvt.l.dFG", false, false, + false, false, false, false, + false, false, false, false, + "c.f.dVGH", "c.un.dVGH", "c.eq.dVGH", "c.ueq.dVGH", + "c.olt.dVGH", "c.ult.dVGH", "c.ole.dVGH", "c.ule.dVGH", + "c.df.dVGH", "c.ngle.dVGH", "c.deq.dVGH", "c.ngl.dVGH", + "c.lt.dVGH", "c.nge.dVGH", "c.le.dVGH", "c.ngt.dVGH", +} + +local map_cop1ps = { + shift = 0, mask = 63, + [0] = "add.psFGH", "sub.psFGH", "mul.psFGH", false, + false, "abs.psFG", "mov.psFG", "neg.psFG", + false, false, false, false, + false, false, false, false, + false, + { shift = 16, mask = 1, [0] = "movf.psFGC", "movt.psFGC" }, + "movz.psFGT", "movn.psFGT", + false, false, false, false, + false, false, false, false, + false, false, false, false, + "cvt.s.puFG", false, false, false, + false, false, false, false, + "cvt.s.plFG", false, false, false, + "pll.psFGH", "plu.psFGH", "pul.psFGH", "puu.psFGH", + "c.f.psVGH", "c.un.psVGH", "c.eq.psVGH", "c.ueq.psVGH", + "c.olt.psVGH", "c.ult.psVGH", "c.ole.psVGH", "c.ule.psVGH", + "c.psf.psVGH", "c.ngle.psVGH", "c.pseq.psVGH", "c.ngl.psVGH", + "c.lt.psVGH", "c.nge.psVGH", "c.le.psVGH", "c.ngt.psVGH", +} + +local map_cop1w = { + shift = 0, mask = 63, + [32] = "cvt.s.wFG", [33] = "cvt.d.wFG", +} + +local map_cop1l = { + shift = 0, mask = 63, + [32] = "cvt.s.lFG", [33] = "cvt.d.lFG", +} + +local map_cop1bc = { + shift = 16, mask = 3, + [0] = "bc1fCB", "bc1tCB", "bc1flCB", "bc1tlCB", +} + +local map_cop1 = { + shift = 21, mask = 31, + [0] = "mfc1TG", false, "cfc1TG", "mfhc1TG", + "mtc1TG", false, "ctc1TG", "mthc1TG", + map_cop1bc, false, false, false, + false, false, false, false, + map_cop1s, map_cop1d, false, false, + map_cop1w, map_cop1l, map_cop1ps, +} + +local map_cop1x = { + shift = 0, mask = 63, + [0] = "lwxc1FSX", "ldxc1FSX", false, false, + false, "luxc1FSX", false, false, + "swxc1FSX", "sdxc1FSX", false, false, + false, "suxc1FSX", false, "prefxMSX", + false, false, false, false, + false, false, false, false, + false, false, false, false, + false, false, "alnv.psFGHS", false, + "madd.sFRGH", "madd.dFRGH", false, false, + false, false, "madd.psFRGH", false, + "msub.sFRGH", "msub.dFRGH", false, false, + false, false, "msub.psFRGH", false, + "nmadd.sFRGH", "nmadd.dFRGH", false, false, + false, false, "nmadd.psFRGH", false, + "nmsub.sFRGH", "nmsub.dFRGH", false, false, + false, false, "nmsub.psFRGH", false, +} + +local map_pri = { + [0] = map_special, map_regimm, "jJ", "jalJ", + "beq|beqz|bST00B", "bne|bnezST0B", "blezSB", "bgtzSB", + "addiTSI", "addiu|liTS0I", "sltiTSI", "sltiuTSI", + "andiTSU", "ori|liTS0U", "xoriTSU", "luiTU", + map_cop0, map_cop1, false, map_cop1x, + "beql|beqzlST0B", "bnel|bnezlST0B", "blezlSB", "bgtzlSB", + false, false, false, false, + map_special2, false, false, map_special3, + "lbTSO", "lhTSO", "lwlTSO", "lwTSO", + "lbuTSO", "lhuTSO", "lwrTSO", false, + "sbTSO", "shTSO", "swlTSO", "swTSO", + false, false, "swrTSO", "cacheNSO", + "llTSO", "lwc1HSO", "lwc2TSO", "prefNSO", + false, "ldc1HSO", "ldc2TSO", false, + "scTSO", "swc1HSO", "swc2TSO", false, + false, "sdc1HSO", "sdc2TSO", false, +} + +------------------------------------------------------------------------------ + +local map_gpr = { + [0] = "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", + "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", + "r24", "r25", "r26", "r27", "r28", "sp", "r30", "ra", +} + +------------------------------------------------------------------------------ + +-- Output a nicely formatted line with an opcode and operands. +local function putop(ctx, text, operands) + local pos = ctx.pos + local extra = "" + if ctx.rel then + local sym = ctx.symtab[ctx.rel] + if sym then extra = "\t->"..sym end + end + if ctx.hexdump > 0 then + ctx.out(format("%08x %s %-7s %s%s\n", + ctx.addr+pos, tohex(ctx.op), text, concat(operands, ", "), extra)) + else + ctx.out(format("%08x %-7s %s%s\n", + ctx.addr+pos, text, concat(operands, ", "), extra)) + end + ctx.pos = pos + 4 +end + +-- Fallback for unknown opcodes. +local function unknown(ctx) + return putop(ctx, ".long", { "0x"..tohex(ctx.op) }) +end + +local function get_be(ctx) + local pos = ctx.pos + local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4) + return bor(lshift(b0, 24), lshift(b1, 16), lshift(b2, 8), b3) +end + +local function get_le(ctx) + local pos = ctx.pos + local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4) + return bor(lshift(b3, 24), lshift(b2, 16), lshift(b1, 8), b0) +end + +-- Disassemble a single instruction. +local function disass_ins(ctx) + local op = ctx:get() + local operands = {} + local last = nil + ctx.op = op + ctx.rel = nil + + local opat = map_pri[rshift(op, 26)] + while type(opat) ~= "string" do + if not opat then return unknown(ctx) end + opat = opat[band(rshift(op, opat.shift), opat.mask)] or opat._ + end + local name, pat = match(opat, "^([a-z0-9_.]*)(.*)") + local altname, pat2 = match(pat, "|([a-z0-9_.|]*)(.*)") + if altname then pat = pat2 end + + for p in gmatch(pat, ".") do + local x = nil + if p == "S" then + x = map_gpr[band(rshift(op, 21), 31)] + elseif p == "T" then + x = map_gpr[band(rshift(op, 16), 31)] + elseif p == "D" then + x = map_gpr[band(rshift(op, 11), 31)] + elseif p == "F" then + x = "f"..band(rshift(op, 6), 31) + elseif p == "G" then + x = "f"..band(rshift(op, 11), 31) + elseif p == "H" then + x = "f"..band(rshift(op, 16), 31) + elseif p == "R" then + x = "f"..band(rshift(op, 21), 31) + elseif p == "A" then + x = band(rshift(op, 6), 31) + elseif p == "M" then + x = band(rshift(op, 11), 31) + elseif p == "N" then + x = band(rshift(op, 16), 31) + elseif p == "C" then + x = band(rshift(op, 18), 7) + if x == 0 then x = nil end + elseif p == "K" then + x = band(rshift(op, 11), 31) + 1 + elseif p == "L" then + x = band(rshift(op, 11), 31) - last + 1 + elseif p == "I" then + x = arshift(lshift(op, 16), 16) + elseif p == "U" then + x = band(op, 0xffff) + elseif p == "O" then + local disp = arshift(lshift(op, 16), 16) + operands[#operands] = format("%d(%s)", disp, last) + elseif p == "X" then + local index = map_gpr[band(rshift(op, 16), 31)] + operands[#operands] = format("%s(%s)", index, last) + elseif p == "B" then + x = ctx.addr + ctx.pos + arshift(lshift(op, 16), 16)*4 + 4 + ctx.rel = x + x = "0x"..tohex(x) + elseif p == "J" then + x = band(ctx.addr + ctx.pos, 0xf0000000) + band(op, 0x03ffffff)*4 + ctx.rel = x + x = "0x"..tohex(x) + elseif p == "V" then + x = band(rshift(op, 8), 7) + if x == 0 then x = nil end + elseif p == "W" then + x = band(op, 7) + if x == 0 then x = nil end + elseif p == "Y" then + x = band(rshift(op, 6), 0x000fffff) + if x == 0 then x = nil end + elseif p == "Z" then + x = band(rshift(op, 6), 1023) + if x == 0 then x = nil end + elseif p == "0" then + if last == "r0" or last == 0 then + local n = #operands + operands[n] = nil + last = operands[n-1] + if altname then + local a1, a2 = match(altname, "([^|]*)|(.*)") + if a1 then name, altname = a1, a2 + else name = altname end + end + end + elseif p == "1" then + if last == "ra" then + operands[#operands] = nil + end + else + assert(false) + end + if x then operands[#operands+1] = x; last = x end + end + + return putop(ctx, name, operands) +end + +------------------------------------------------------------------------------ + +-- Disassemble a block of code. +local function disass_block(ctx, ofs, len) + if not ofs then ofs = 0 end + local stop = len and ofs+len or #ctx.code + stop = stop - stop % 4 + ctx.pos = ofs - ofs % 4 + ctx.rel = nil + while ctx.pos < stop do disass_ins(ctx) end +end + +-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len). +local function create(code, addr, out) + local ctx = {} + ctx.code = code + ctx.addr = addr or 0 + ctx.out = out or io.write + ctx.symtab = {} + ctx.disass = disass_block + ctx.hexdump = 8 + ctx.get = get_be + return ctx +end + +local function create_el(code, addr, out) + local ctx = create(code, addr, out) + ctx.get = get_le + return ctx +end + +-- Simple API: disassemble code (a string) at address and output via out. +local function disass(code, addr, out) + create(code, addr, out):disass() +end + +local function disass_el(code, addr, out) + create_el(code, addr, out):disass() +end + +-- Return register name for RID. +local function regname(r) + if r < 32 then return map_gpr[r] end + return "f"..(r-32) +end + +-- Public module functions. +return { + create = create, + create_el = create_el, + disass = disass, + disass_el = disass_el, + regname = regname +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/jit/dis_mipsel.lua b/Build/source/libs/luajit/LuaJIT-src/src/jit/dis_mipsel.lua new file mode 100644 index 00000000000..f06ffe855e3 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/jit/dis_mipsel.lua @@ -0,0 +1,17 @@ +---------------------------------------------------------------------------- +-- LuaJIT MIPSEL disassembler wrapper module. +-- +-- Copyright (C) 2005-2015 Mike Pall. All rights reserved. +-- Released under the MIT license. See Copyright Notice in luajit.h +---------------------------------------------------------------------------- +-- This module just exports the little-endian functions from the +-- MIPS disassembler module. All the interesting stuff is there. +------------------------------------------------------------------------------ + +local dis_mips = require((string.match(..., ".*%.") or "").."dis_mips") +return { + create = dis_mips.create_el, + disass = dis_mips.disass_el, + regname = dis_mips.regname +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/jit/dis_ppc.lua b/Build/source/libs/luajit/LuaJIT-src/src/jit/dis_ppc.lua new file mode 100644 index 00000000000..e077d7acdd9 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/jit/dis_ppc.lua @@ -0,0 +1,591 @@ +---------------------------------------------------------------------------- +-- LuaJIT PPC disassembler module. +-- +-- Copyright (C) 2005-2015 Mike Pall. All rights reserved. +-- Released under the MIT/X license. See Copyright Notice in luajit.h +---------------------------------------------------------------------------- +-- This is a helper module used by the LuaJIT machine code dumper module. +-- +-- It disassembles all common, non-privileged 32/64 bit PowerPC instructions +-- plus the e500 SPE instructions and some Cell/Xenon extensions. +-- +-- NYI: VMX, VMX128 +------------------------------------------------------------------------------ + +local type = type +local sub, byte, format = string.sub, string.byte, string.format +local match, gmatch, gsub = string.match, string.gmatch, string.gsub +local concat = table.concat +local bit = require("bit") +local band, bor, tohex = bit.band, bit.bor, bit.tohex +local lshift, rshift, arshift = bit.lshift, bit.rshift, bit.arshift + +------------------------------------------------------------------------------ +-- Primary and extended opcode maps +------------------------------------------------------------------------------ + +local map_crops = { + shift = 1, mask = 1023, + [0] = "mcrfXX", + [33] = "crnor|crnotCCC=", [129] = "crandcCCC", + [193] = "crxor|crclrCCC%", [225] = "crnandCCC", + [257] = "crandCCC", [289] = "creqv|crsetCCC%", + [417] = "crorcCCC", [449] = "cror|crmoveCCC=", + [16] = "b_lrKB", [528] = "b_ctrKB", + [150] = "isync", +} + +local map_rlwinm = setmetatable({ + shift = 0, mask = -1, +}, +{ __index = function(t, x) + local rot = band(rshift(x, 11), 31) + local mb = band(rshift(x, 6), 31) + local me = band(rshift(x, 1), 31) + if mb == 0 and me == 31-rot then + return "slwiRR~A." + elseif me == 31 and mb == 32-rot then + return "srwiRR~-A." + else + return "rlwinmRR~AAA." + end + end +}) + +local map_rld = { + shift = 2, mask = 7, + [0] = "rldiclRR~HM.", "rldicrRR~HM.", "rldicRR~HM.", "rldimiRR~HM.", + { + shift = 1, mask = 1, + [0] = "rldclRR~RM.", "rldcrRR~RM.", + }, +} + +local map_ext = setmetatable({ + shift = 1, mask = 1023, + + [0] = "cmp_YLRR", [32] = "cmpl_YLRR", + [4] = "twARR", [68] = "tdARR", + + [8] = "subfcRRR.", [40] = "subfRRR.", + [104] = "negRR.", [136] = "subfeRRR.", + [200] = "subfzeRR.", [232] = "subfmeRR.", + [520] = "subfcoRRR.", [552] = "subfoRRR.", + [616] = "negoRR.", [648] = "subfeoRRR.", + [712] = "subfzeoRR.", [744] = "subfmeoRR.", + + [9] = "mulhduRRR.", [73] = "mulhdRRR.", [233] = "mulldRRR.", + [457] = "divduRRR.", [489] = "divdRRR.", + [745] = "mulldoRRR.", + [969] = "divduoRRR.", [1001] = "divdoRRR.", + + [10] = "addcRRR.", [138] = "addeRRR.", + [202] = "addzeRR.", [234] = "addmeRR.", [266] = "addRRR.", + [522] = "addcoRRR.", [650] = "addeoRRR.", + [714] = "addzeoRR.", [746] = "addmeoRR.", [778] = "addoRRR.", + + [11] = "mulhwuRRR.", [75] = "mulhwRRR.", [235] = "mullwRRR.", + [459] = "divwuRRR.", [491] = "divwRRR.", + [747] = "mullwoRRR.", + [971] = "divwouRRR.", [1003] = "divwoRRR.", + + [15] = "iselltRRR", [47] = "iselgtRRR", [79] = "iseleqRRR", + + [144] = { shift = 20, mask = 1, [0] = "mtcrfRZ~", "mtocrfRZ~", }, + [19] = { shift = 20, mask = 1, [0] = "mfcrR", "mfocrfRZ", }, + [371] = { shift = 11, mask = 1023, [392] = "mftbR", [424] = "mftbuR", }, + [339] = { + shift = 11, mask = 1023, + [32] = "mferR", [256] = "mflrR", [288] = "mfctrR", [16] = "mfspefscrR", + }, + [467] = { + shift = 11, mask = 1023, + [32] = "mtxerR", [256] = "mtlrR", [288] = "mtctrR", [16] = "mtspefscrR", + }, + + [20] = "lwarxRR0R", [84] = "ldarxRR0R", + + [21] = "ldxRR0R", [53] = "lduxRRR", + [149] = "stdxRR0R", [181] = "stduxRRR", + [341] = "lwaxRR0R", [373] = "lwauxRRR", + + [23] = "lwzxRR0R", [55] = "lwzuxRRR", + [87] = "lbzxRR0R", [119] = "lbzuxRRR", + [151] = "stwxRR0R", [183] = "stwuxRRR", + [215] = "stbxRR0R", [247] = "stbuxRRR", + [279] = "lhzxRR0R", [311] = "lhzuxRRR", + [343] = "lhaxRR0R", [375] = "lhauxRRR", + [407] = "sthxRR0R", [439] = "sthuxRRR", + + [54] = "dcbst-R0R", [86] = "dcbf-R0R", + [150] = "stwcxRR0R.", [214] = "stdcxRR0R.", + [246] = "dcbtst-R0R", [278] = "dcbt-R0R", + [310] = "eciwxRR0R", [438] = "ecowxRR0R", + [470] = "dcbi-RR", + + [598] = { + shift = 21, mask = 3, + [0] = "sync", "lwsync", "ptesync", + }, + [758] = "dcba-RR", + [854] = "eieio", [982] = "icbi-R0R", [1014] = "dcbz-R0R", + + [26] = "cntlzwRR~", [58] = "cntlzdRR~", + [122] = "popcntbRR~", + [154] = "prtywRR~", [186] = "prtydRR~", + + [28] = "andRR~R.", [60] = "andcRR~R.", [124] = "nor|notRR~R=.", + [284] = "eqvRR~R.", [316] = "xorRR~R.", + [412] = "orcRR~R.", [444] = "or|mrRR~R=.", [476] = "nandRR~R.", + [508] = "cmpbRR~R", + + [512] = "mcrxrX", + + [532] = "ldbrxRR0R", [660] = "stdbrxRR0R", + + [533] = "lswxRR0R", [597] = "lswiRR0A", + [661] = "stswxRR0R", [725] = "stswiRR0A", + + [534] = "lwbrxRR0R", [662] = "stwbrxRR0R", + [790] = "lhbrxRR0R", [918] = "sthbrxRR0R", + + [535] = "lfsxFR0R", [567] = "lfsuxFRR", + [599] = "lfdxFR0R", [631] = "lfduxFRR", + [663] = "stfsxFR0R", [695] = "stfsuxFRR", + [727] = "stfdxFR0R", [759] = "stfduxFR0R", + [855] = "lfiwaxFR0R", + [983] = "stfiwxFR0R", + + [24] = "slwRR~R.", + + [27] = "sldRR~R.", [536] = "srwRR~R.", + [792] = "srawRR~R.", [824] = "srawiRR~A.", + + [794] = "sradRR~R.", [826] = "sradiRR~H.", [827] = "sradiRR~H.", + [922] = "extshRR~.", [954] = "extsbRR~.", [986] = "extswRR~.", + + [539] = "srdRR~R.", +}, +{ __index = function(t, x) + if band(x, 31) == 15 then return "iselRRRC" end + end +}) + +local map_ld = { + shift = 0, mask = 3, + [0] = "ldRRE", "lduRRE", "lwaRRE", +} + +local map_std = { + shift = 0, mask = 3, + [0] = "stdRRE", "stduRRE", +} + +local map_fps = { + shift = 5, mask = 1, + { + shift = 1, mask = 15, + [0] = false, false, "fdivsFFF.", false, + "fsubsFFF.", "faddsFFF.", "fsqrtsF-F.", false, + "fresF-F.", "fmulsFF-F.", "frsqrtesF-F.", false, + "fmsubsFFFF~.", "fmaddsFFFF~.", "fnmsubsFFFF~.", "fnmaddsFFFF~.", + } +} + +local map_fpd = { + shift = 5, mask = 1, + [0] = { + shift = 1, mask = 1023, + [0] = "fcmpuXFF", [32] = "fcmpoXFF", [64] = "mcrfsXX", + [38] = "mtfsb1A.", [70] = "mtfsb0A.", [134] = "mtfsfiA>>-A>", + [8] = "fcpsgnFFF.", [40] = "fnegF-F.", [72] = "fmrF-F.", + [136] = "fnabsF-F.", [264] = "fabsF-F.", + [12] = "frspF-F.", + [14] = "fctiwF-F.", [15] = "fctiwzF-F.", + [583] = "mffsF.", [711] = "mtfsfZF.", + [392] = "frinF-F.", [424] = "frizF-F.", + [456] = "fripF-F.", [488] = "frimF-F.", + [814] = "fctidF-F.", [815] = "fctidzF-F.", [846] = "fcfidF-F.", + }, + { + shift = 1, mask = 15, + [0] = false, false, "fdivFFF.", false, + "fsubFFF.", "faddFFF.", "fsqrtF-F.", "fselFFFF~.", + "freF-F.", "fmulFF-F.", "frsqrteF-F.", false, + "fmsubFFFF~.", "fmaddFFFF~.", "fnmsubFFFF~.", "fnmaddFFFF~.", + } +} + +local map_spe = { + shift = 0, mask = 2047, + + [512] = "evaddwRRR", [514] = "evaddiwRAR~", + [516] = "evsubwRRR~", [518] = "evsubiwRAR~", + [520] = "evabsRR", [521] = "evnegRR", + [522] = "evextsbRR", [523] = "evextshRR", [524] = "evrndwRR", + [525] = "evcntlzwRR", [526] = "evcntlswRR", + + [527] = "brincRRR", + + [529] = "evandRRR", [530] = "evandcRRR", [534] = "evxorRRR", + [535] = "evor|evmrRRR=", [536] = "evnor|evnotRRR=", + [537] = "eveqvRRR", [539] = "evorcRRR", [542] = "evnandRRR", + + [544] = "evsrwuRRR", [545] = "evsrwsRRR", + [546] = "evsrwiuRRA", [547] = "evsrwisRRA", + [548] = "evslwRRR", [550] = "evslwiRRA", + [552] = "evrlwRRR", [553] = "evsplatiRS", + [554] = "evrlwiRRA", [555] = "evsplatfiRS", + [556] = "evmergehiRRR", [557] = "evmergeloRRR", + [558] = "evmergehiloRRR", [559] = "evmergelohiRRR", + + [560] = "evcmpgtuYRR", [561] = "evcmpgtsYRR", + [562] = "evcmpltuYRR", [563] = "evcmpltsYRR", + [564] = "evcmpeqYRR", + + [632] = "evselRRR", [633] = "evselRRRW", + [634] = "evselRRRW", [635] = "evselRRRW", + [636] = "evselRRRW", [637] = "evselRRRW", + [638] = "evselRRRW", [639] = "evselRRRW", + + [640] = "evfsaddRRR", [641] = "evfssubRRR", + [644] = "evfsabsRR", [645] = "evfsnabsRR", [646] = "evfsnegRR", + [648] = "evfsmulRRR", [649] = "evfsdivRRR", + [652] = "evfscmpgtYRR", [653] = "evfscmpltYRR", [654] = "evfscmpeqYRR", + [656] = "evfscfuiR-R", [657] = "evfscfsiR-R", + [658] = "evfscfufR-R", [659] = "evfscfsfR-R", + [660] = "evfsctuiR-R", [661] = "evfsctsiR-R", + [662] = "evfsctufR-R", [663] = "evfsctsfR-R", + [664] = "evfsctuizR-R", [666] = "evfsctsizR-R", + [668] = "evfststgtYRR", [669] = "evfststltYRR", [670] = "evfststeqYRR", + + [704] = "efsaddRRR", [705] = "efssubRRR", + [708] = "efsabsRR", [709] = "efsnabsRR", [710] = "efsnegRR", + [712] = "efsmulRRR", [713] = "efsdivRRR", + [716] = "efscmpgtYRR", [717] = "efscmpltYRR", [718] = "efscmpeqYRR", + [719] = "efscfdR-R", + [720] = "efscfuiR-R", [721] = "efscfsiR-R", + [722] = "efscfufR-R", [723] = "efscfsfR-R", + [724] = "efsctuiR-R", [725] = "efsctsiR-R", + [726] = "efsctufR-R", [727] = "efsctsfR-R", + [728] = "efsctuizR-R", [730] = "efsctsizR-R", + [732] = "efststgtYRR", [733] = "efststltYRR", [734] = "efststeqYRR", + + [736] = "efdaddRRR", [737] = "efdsubRRR", + [738] = "efdcfuidR-R", [739] = "efdcfsidR-R", + [740] = "efdabsRR", [741] = "efdnabsRR", [742] = "efdnegRR", + [744] = "efdmulRRR", [745] = "efddivRRR", + [746] = "efdctuidzR-R", [747] = "efdctsidzR-R", + [748] = "efdcmpgtYRR", [749] = "efdcmpltYRR", [750] = "efdcmpeqYRR", + [751] = "efdcfsR-R", + [752] = "efdcfuiR-R", [753] = "efdcfsiR-R", + [754] = "efdcfufR-R", [755] = "efdcfsfR-R", + [756] = "efdctuiR-R", [757] = "efdctsiR-R", + [758] = "efdctufR-R", [759] = "efdctsfR-R", + [760] = "efdctuizR-R", [762] = "efdctsizR-R", + [764] = "efdtstgtYRR", [765] = "efdtstltYRR", [766] = "efdtsteqYRR", + + [768] = "evlddxRR0R", [769] = "evlddRR8", + [770] = "evldwxRR0R", [771] = "evldwRR8", + [772] = "evldhxRR0R", [773] = "evldhRR8", + [776] = "evlhhesplatxRR0R", [777] = "evlhhesplatRR2", + [780] = "evlhhousplatxRR0R", [781] = "evlhhousplatRR2", + [782] = "evlhhossplatxRR0R", [783] = "evlhhossplatRR2", + [784] = "evlwhexRR0R", [785] = "evlwheRR4", + [788] = "evlwhouxRR0R", [789] = "evlwhouRR4", + [790] = "evlwhosxRR0R", [791] = "evlwhosRR4", + [792] = "evlwwsplatxRR0R", [793] = "evlwwsplatRR4", + [796] = "evlwhsplatxRR0R", [797] = "evlwhsplatRR4", + + [800] = "evstddxRR0R", [801] = "evstddRR8", + [802] = "evstdwxRR0R", [803] = "evstdwRR8", + [804] = "evstdhxRR0R", [805] = "evstdhRR8", + [816] = "evstwhexRR0R", [817] = "evstwheRR4", + [820] = "evstwhoxRR0R", [821] = "evstwhoRR4", + [824] = "evstwwexRR0R", [825] = "evstwweRR4", + [828] = "evstwwoxRR0R", [829] = "evstwwoRR4", + + [1027] = "evmhessfRRR", [1031] = "evmhossfRRR", [1032] = "evmheumiRRR", + [1033] = "evmhesmiRRR", [1035] = "evmhesmfRRR", [1036] = "evmhoumiRRR", + [1037] = "evmhosmiRRR", [1039] = "evmhosmfRRR", [1059] = "evmhessfaRRR", + [1063] = "evmhossfaRRR", [1064] = "evmheumiaRRR", [1065] = "evmhesmiaRRR", + [1067] = "evmhesmfaRRR", [1068] = "evmhoumiaRRR", [1069] = "evmhosmiaRRR", + [1071] = "evmhosmfaRRR", [1095] = "evmwhssfRRR", [1096] = "evmwlumiRRR", + [1100] = "evmwhumiRRR", [1101] = "evmwhsmiRRR", [1103] = "evmwhsmfRRR", + [1107] = "evmwssfRRR", [1112] = "evmwumiRRR", [1113] = "evmwsmiRRR", + [1115] = "evmwsmfRRR", [1127] = "evmwhssfaRRR", [1128] = "evmwlumiaRRR", + [1132] = "evmwhumiaRRR", [1133] = "evmwhsmiaRRR", [1135] = "evmwhsmfaRRR", + [1139] = "evmwssfaRRR", [1144] = "evmwumiaRRR", [1145] = "evmwsmiaRRR", + [1147] = "evmwsmfaRRR", + + [1216] = "evaddusiaawRR", [1217] = "evaddssiaawRR", + [1218] = "evsubfusiaawRR", [1219] = "evsubfssiaawRR", + [1220] = "evmraRR", + [1222] = "evdivwsRRR", [1223] = "evdivwuRRR", + [1224] = "evaddumiaawRR", [1225] = "evaddsmiaawRR", + [1226] = "evsubfumiaawRR", [1227] = "evsubfsmiaawRR", + + [1280] = "evmheusiaawRRR", [1281] = "evmhessiaawRRR", + [1283] = "evmhessfaawRRR", [1284] = "evmhousiaawRRR", + [1285] = "evmhossiaawRRR", [1287] = "evmhossfaawRRR", + [1288] = "evmheumiaawRRR", [1289] = "evmhesmiaawRRR", + [1291] = "evmhesmfaawRRR", [1292] = "evmhoumiaawRRR", + [1293] = "evmhosmiaawRRR", [1295] = "evmhosmfaawRRR", + [1320] = "evmhegumiaaRRR", [1321] = "evmhegsmiaaRRR", + [1323] = "evmhegsmfaaRRR", [1324] = "evmhogumiaaRRR", + [1325] = "evmhogsmiaaRRR", [1327] = "evmhogsmfaaRRR", + [1344] = "evmwlusiaawRRR", [1345] = "evmwlssiaawRRR", + [1352] = "evmwlumiaawRRR", [1353] = "evmwlsmiaawRRR", + [1363] = "evmwssfaaRRR", [1368] = "evmwumiaaRRR", + [1369] = "evmwsmiaaRRR", [1371] = "evmwsmfaaRRR", + [1408] = "evmheusianwRRR", [1409] = "evmhessianwRRR", + [1411] = "evmhessfanwRRR", [1412] = "evmhousianwRRR", + [1413] = "evmhossianwRRR", [1415] = "evmhossfanwRRR", + [1416] = "evmheumianwRRR", [1417] = "evmhesmianwRRR", + [1419] = "evmhesmfanwRRR", [1420] = "evmhoumianwRRR", + [1421] = "evmhosmianwRRR", [1423] = "evmhosmfanwRRR", + [1448] = "evmhegumianRRR", [1449] = "evmhegsmianRRR", + [1451] = "evmhegsmfanRRR", [1452] = "evmhogumianRRR", + [1453] = "evmhogsmianRRR", [1455] = "evmhogsmfanRRR", + [1472] = "evmwlusianwRRR", [1473] = "evmwlssianwRRR", + [1480] = "evmwlumianwRRR", [1481] = "evmwlsmianwRRR", + [1491] = "evmwssfanRRR", [1496] = "evmwumianRRR", + [1497] = "evmwsmianRRR", [1499] = "evmwsmfanRRR", +} + +local map_pri = { + [0] = false, false, "tdiARI", "twiARI", + map_spe, false, false, "mulliRRI", + "subficRRI", false, "cmpl_iYLRU", "cmp_iYLRI", + "addicRRI", "addic.RRI", "addi|liRR0I", "addis|lisRR0I", + "b_KBJ", "sc", "bKJ", map_crops, + "rlwimiRR~AAA.", map_rlwinm, false, "rlwnmRR~RAA.", + "oriNRR~U", "orisRR~U", "xoriRR~U", "xorisRR~U", + "andi.RR~U", "andis.RR~U", map_rld, map_ext, + "lwzRRD", "lwzuRRD", "lbzRRD", "lbzuRRD", + "stwRRD", "stwuRRD", "stbRRD", "stbuRRD", + "lhzRRD", "lhzuRRD", "lhaRRD", "lhauRRD", + "sthRRD", "sthuRRD", "lmwRRD", "stmwRRD", + "lfsFRD", "lfsuFRD", "lfdFRD", "lfduFRD", + "stfsFRD", "stfsuFRD", "stfdFRD", "stfduFRD", + false, false, map_ld, map_fps, + false, false, map_std, map_fpd, +} + +------------------------------------------------------------------------------ + +local map_gpr = { + [0] = "r0", "sp", "r2", "r3", "r4", "r5", "r6", "r7", + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", + "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", + "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", +} + +local map_cond = { [0] = "lt", "gt", "eq", "so", "ge", "le", "ne", "ns", } + +-- Format a condition bit. +local function condfmt(cond) + if cond <= 3 then + return map_cond[band(cond, 3)] + else + return format("4*cr%d+%s", rshift(cond, 2), map_cond[band(cond, 3)]) + end +end + +------------------------------------------------------------------------------ + +-- Output a nicely formatted line with an opcode and operands. +local function putop(ctx, text, operands) + local pos = ctx.pos + local extra = "" + if ctx.rel then + local sym = ctx.symtab[ctx.rel] + if sym then extra = "\t->"..sym end + end + if ctx.hexdump > 0 then + ctx.out(format("%08x %s %-7s %s%s\n", + ctx.addr+pos, tohex(ctx.op), text, concat(operands, ", "), extra)) + else + ctx.out(format("%08x %-7s %s%s\n", + ctx.addr+pos, text, concat(operands, ", "), extra)) + end + ctx.pos = pos + 4 +end + +-- Fallback for unknown opcodes. +local function unknown(ctx) + return putop(ctx, ".long", { "0x"..tohex(ctx.op) }) +end + +-- Disassemble a single instruction. +local function disass_ins(ctx) + local pos = ctx.pos + local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4) + local op = bor(lshift(b0, 24), lshift(b1, 16), lshift(b2, 8), b3) + local operands = {} + local last = nil + local rs = 21 + ctx.op = op + ctx.rel = nil + + local opat = map_pri[rshift(b0, 2)] + while type(opat) ~= "string" do + if not opat then return unknown(ctx) end + opat = opat[band(rshift(op, opat.shift), opat.mask)] + end + local name, pat = match(opat, "^([a-z0-9_.]*)(.*)") + local altname, pat2 = match(pat, "|([a-z0-9_.]*)(.*)") + if altname then pat = pat2 end + + for p in gmatch(pat, ".") do + local x = nil + if p == "R" then + x = map_gpr[band(rshift(op, rs), 31)] + rs = rs - 5 + elseif p == "F" then + x = "f"..band(rshift(op, rs), 31) + rs = rs - 5 + elseif p == "A" then + x = band(rshift(op, rs), 31) + rs = rs - 5 + elseif p == "S" then + x = arshift(lshift(op, 27-rs), 27) + rs = rs - 5 + elseif p == "I" then + x = arshift(lshift(op, 16), 16) + elseif p == "U" then + x = band(op, 0xffff) + elseif p == "D" or p == "E" then + local disp = arshift(lshift(op, 16), 16) + if p == "E" then disp = band(disp, -4) end + if last == "r0" then last = "0" end + operands[#operands] = format("%d(%s)", disp, last) + elseif p >= "2" and p <= "8" then + local disp = band(rshift(op, rs), 31) * p + if last == "r0" then last = "0" end + operands[#operands] = format("%d(%s)", disp, last) + elseif p == "H" then + x = band(rshift(op, rs), 31) + lshift(band(op, 2), 4) + rs = rs - 5 + elseif p == "M" then + x = band(rshift(op, rs), 31) + band(op, 0x20) + elseif p == "C" then + x = condfmt(band(rshift(op, rs), 31)) + rs = rs - 5 + elseif p == "B" then + local bo = rshift(op, 21) + local cond = band(rshift(op, 16), 31) + local cn = "" + rs = rs - 10 + if band(bo, 4) == 0 then + cn = band(bo, 2) == 0 and "dnz" or "dz" + if band(bo, 0x10) == 0 then + cn = cn..(band(bo, 8) == 0 and "f" or "t") + end + if band(bo, 0x10) == 0 then x = condfmt(cond) end + name = name..(band(bo, 1) == band(rshift(op, 15), 1) and "-" or "+") + elseif band(bo, 0x10) == 0 then + cn = map_cond[band(cond, 3) + (band(bo, 8) == 0 and 4 or 0)] + if cond > 3 then x = "cr"..rshift(cond, 2) end + name = name..(band(bo, 1) == band(rshift(op, 15), 1) and "-" or "+") + end + name = gsub(name, "_", cn) + elseif p == "J" then + x = arshift(lshift(op, 27-rs), 29-rs)*4 + if band(op, 2) == 0 then x = ctx.addr + pos + x end + ctx.rel = x + x = "0x"..tohex(x) + elseif p == "K" then + if band(op, 1) ~= 0 then name = name.."l" end + if band(op, 2) ~= 0 then name = name.."a" end + elseif p == "X" or p == "Y" then + x = band(rshift(op, rs+2), 7) + if x == 0 and p == "Y" then x = nil else x = "cr"..x end + rs = rs - 5 + elseif p == "W" then + x = "cr"..band(op, 7) + elseif p == "Z" then + x = band(rshift(op, rs-4), 255) + rs = rs - 10 + elseif p == ">" then + operands[#operands] = rshift(operands[#operands], 1) + elseif p == "0" then + if last == "r0" then + operands[#operands] = nil + if altname then name = altname end + end + elseif p == "L" then + name = gsub(name, "_", band(op, 0x00200000) ~= 0 and "d" or "w") + elseif p == "." then + if band(op, 1) == 1 then name = name.."." end + elseif p == "N" then + if op == 0x60000000 then name = "nop"; break end + elseif p == "~" then + local n = #operands + operands[n-1], operands[n] = operands[n], operands[n-1] + elseif p == "=" then + local n = #operands + if last == operands[n-1] then + operands[n] = nil + name = altname + end + elseif p == "%" then + local n = #operands + if last == operands[n-1] and last == operands[n-2] then + operands[n] = nil + operands[n-1] = nil + name = altname + end + elseif p == "-" then + rs = rs - 5 + else + assert(false) + end + if x then operands[#operands+1] = x; last = x end + end + + return putop(ctx, name, operands) +end + +------------------------------------------------------------------------------ + +-- Disassemble a block of code. +local function disass_block(ctx, ofs, len) + if not ofs then ofs = 0 end + local stop = len and ofs+len or #ctx.code + stop = stop - stop % 4 + ctx.pos = ofs - ofs % 4 + ctx.rel = nil + while ctx.pos < stop do disass_ins(ctx) end +end + +-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len). +local function create(code, addr, out) + local ctx = {} + ctx.code = code + ctx.addr = addr or 0 + ctx.out = out or io.write + ctx.symtab = {} + ctx.disass = disass_block + ctx.hexdump = 8 + return ctx +end + +-- Simple API: disassemble code (a string) at address and output via out. +local function disass(code, addr, out) + create(code, addr, out):disass() +end + +-- Return register name for RID. +local function regname(r) + if r < 32 then return map_gpr[r] end + return "f"..(r-32) +end + +-- Public module functions. +return { + create = create, + disass = disass, + regname = regname +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/jit/dis_x64.lua b/Build/source/libs/luajit/LuaJIT-src/src/jit/dis_x64.lua new file mode 100644 index 00000000000..15d55243a85 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/jit/dis_x64.lua @@ -0,0 +1,17 @@ +---------------------------------------------------------------------------- +-- LuaJIT x64 disassembler wrapper module. +-- +-- Copyright (C) 2005-2015 Mike Pall. All rights reserved. +-- Released under the MIT license. See Copyright Notice in luajit.h +---------------------------------------------------------------------------- +-- This module just exports the 64 bit functions from the combined +-- x86/x64 disassembler module. All the interesting stuff is there. +------------------------------------------------------------------------------ + +local dis_x86 = require((string.match(..., ".*%.") or "").."dis_x86") +return { + create = dis_x86.create64, + disass = dis_x86.disass64, + regname = dis_x86.regname64 +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/jit/dis_x86.lua b/Build/source/libs/luajit/LuaJIT-src/src/jit/dis_x86.lua new file mode 100644 index 00000000000..6bc38066fe3 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/jit/dis_x86.lua @@ -0,0 +1,838 @@ +---------------------------------------------------------------------------- +-- LuaJIT x86/x64 disassembler module. +-- +-- Copyright (C) 2005-2015 Mike Pall. All rights reserved. +-- Released under the MIT license. See Copyright Notice in luajit.h +---------------------------------------------------------------------------- +-- This is a helper module used by the LuaJIT machine code dumper module. +-- +-- Sending small code snippets to an external disassembler and mixing the +-- output with our own stuff was too fragile. So I had to bite the bullet +-- and write yet another x86 disassembler. Oh well ... +-- +-- The output format is very similar to what ndisasm generates. But it has +-- been developed independently by looking at the opcode tables from the +-- Intel and AMD manuals. The supported instruction set is quite extensive +-- and reflects what a current generation Intel or AMD CPU implements in +-- 32 bit and 64 bit mode. Yes, this includes MMX, SSE, SSE2, SSE3, SSSE3, +-- SSE4.1, SSE4.2, SSE4a and even privileged and hypervisor (VMX/SVM) +-- instructions. +-- +-- Notes: +-- * The (useless) a16 prefix, 3DNow and pre-586 opcodes are unsupported. +-- * No attempt at optimization has been made -- it's fast enough for my needs. +-- * The public API may change when more architectures are added. +------------------------------------------------------------------------------ + +local type = type +local sub, byte, format = string.sub, string.byte, string.format +local match, gmatch, gsub = string.match, string.gmatch, string.gsub +local lower, rep = string.lower, string.rep +local bit = require("bit") +local tohex = bit.tohex + +-- Map for 1st opcode byte in 32 bit mode. Ugly? Well ... read on. +local map_opc1_32 = { +--0x +[0]="addBmr","addVmr","addBrm","addVrm","addBai","addVai","push es","pop es", +"orBmr","orVmr","orBrm","orVrm","orBai","orVai","push cs","opc2*", +--1x +"adcBmr","adcVmr","adcBrm","adcVrm","adcBai","adcVai","push ss","pop ss", +"sbbBmr","sbbVmr","sbbBrm","sbbVrm","sbbBai","sbbVai","push ds","pop ds", +--2x +"andBmr","andVmr","andBrm","andVrm","andBai","andVai","es:seg","daa", +"subBmr","subVmr","subBrm","subVrm","subBai","subVai","cs:seg","das", +--3x +"xorBmr","xorVmr","xorBrm","xorVrm","xorBai","xorVai","ss:seg","aaa", +"cmpBmr","cmpVmr","cmpBrm","cmpVrm","cmpBai","cmpVai","ds:seg","aas", +--4x +"incVR","incVR","incVR","incVR","incVR","incVR","incVR","incVR", +"decVR","decVR","decVR","decVR","decVR","decVR","decVR","decVR", +--5x +"pushUR","pushUR","pushUR","pushUR","pushUR","pushUR","pushUR","pushUR", +"popUR","popUR","popUR","popUR","popUR","popUR","popUR","popUR", +--6x +"sz*pushaw,pusha","sz*popaw,popa","boundVrm","arplWmr", +"fs:seg","gs:seg","o16:","a16", +"pushUi","imulVrmi","pushBs","imulVrms", +"insb","insVS","outsb","outsVS", +--7x +"joBj","jnoBj","jbBj","jnbBj","jzBj","jnzBj","jbeBj","jaBj", +"jsBj","jnsBj","jpeBj","jpoBj","jlBj","jgeBj","jleBj","jgBj", +--8x +"arith!Bmi","arith!Vmi","arith!Bmi","arith!Vms", +"testBmr","testVmr","xchgBrm","xchgVrm", +"movBmr","movVmr","movBrm","movVrm", +"movVmg","leaVrm","movWgm","popUm", +--9x +"nop*xchgVaR|pause|xchgWaR|repne nop","xchgVaR","xchgVaR","xchgVaR", +"xchgVaR","xchgVaR","xchgVaR","xchgVaR", +"sz*cbw,cwde,cdqe","sz*cwd,cdq,cqo","call farViw","wait", +"sz*pushfw,pushf","sz*popfw,popf","sahf","lahf", +--Ax +"movBao","movVao","movBoa","movVoa", +"movsb","movsVS","cmpsb","cmpsVS", +"testBai","testVai","stosb","stosVS", +"lodsb","lodsVS","scasb","scasVS", +--Bx +"movBRi","movBRi","movBRi","movBRi","movBRi","movBRi","movBRi","movBRi", +"movVRI","movVRI","movVRI","movVRI","movVRI","movVRI","movVRI","movVRI", +--Cx +"shift!Bmu","shift!Vmu","retBw","ret","$lesVrm","$ldsVrm","movBmi","movVmi", +"enterBwu","leave","retfBw","retf","int3","intBu","into","iretVS", +--Dx +"shift!Bm1","shift!Vm1","shift!Bmc","shift!Vmc","aamBu","aadBu","salc","xlatb", +"fp*0","fp*1","fp*2","fp*3","fp*4","fp*5","fp*6","fp*7", +--Ex +"loopneBj","loopeBj","loopBj","sz*jcxzBj,jecxzBj,jrcxzBj", +"inBau","inVau","outBua","outVua", +"callVj","jmpVj","jmp farViw","jmpBj","inBad","inVad","outBda","outVda", +--Fx +"lock:","int1","repne:rep","rep:","hlt","cmc","testb!Bm","testv!Vm", +"clc","stc","cli","sti","cld","std","incb!Bm","incd!Vm", +} +assert(#map_opc1_32 == 255) + +-- Map for 1st opcode byte in 64 bit mode (overrides only). +local map_opc1_64 = setmetatable({ + [0x06]=false, [0x07]=false, [0x0e]=false, + [0x16]=false, [0x17]=false, [0x1e]=false, [0x1f]=false, + [0x27]=false, [0x2f]=false, [0x37]=false, [0x3f]=false, + [0x60]=false, [0x61]=false, [0x62]=false, [0x63]="movsxdVrDmt", [0x67]="a32:", + [0x40]="rex*", [0x41]="rex*b", [0x42]="rex*x", [0x43]="rex*xb", + [0x44]="rex*r", [0x45]="rex*rb", [0x46]="rex*rx", [0x47]="rex*rxb", + [0x48]="rex*w", [0x49]="rex*wb", [0x4a]="rex*wx", [0x4b]="rex*wxb", + [0x4c]="rex*wr", [0x4d]="rex*wrb", [0x4e]="rex*wrx", [0x4f]="rex*wrxb", + [0x82]=false, [0x9a]=false, [0xc4]=false, [0xc5]=false, [0xce]=false, + [0xd4]=false, [0xd5]=false, [0xd6]=false, [0xea]=false, +}, { __index = map_opc1_32 }) + +-- Map for 2nd opcode byte (0F xx). True CISC hell. Hey, I told you. +-- Prefix dependent MMX/SSE opcodes: (none)|rep|o16|repne, -|F3|66|F2 +local map_opc2 = { +--0x +[0]="sldt!Dmp","sgdt!Ump","larVrm","lslVrm",nil,"syscall","clts","sysret", +"invd","wbinvd",nil,"ud1",nil,"$prefetch!Bm","femms","3dnowMrmu", +--1x +"movupsXrm|movssXrm|movupdXrm|movsdXrm", +"movupsXmr|movssXmr|movupdXmr|movsdXmr", +"movhlpsXrm$movlpsXrm|movsldupXrm|movlpdXrm|movddupXrm", +"movlpsXmr||movlpdXmr", +"unpcklpsXrm||unpcklpdXrm", +"unpckhpsXrm||unpckhpdXrm", +"movlhpsXrm$movhpsXrm|movshdupXrm|movhpdXrm", +"movhpsXmr||movhpdXmr", +"$prefetcht!Bm","hintnopVm","hintnopVm","hintnopVm", +"hintnopVm","hintnopVm","hintnopVm","hintnopVm", +--2x +"movUmx$","movUmy$","movUxm$","movUym$","movUmz$",nil,"movUzm$",nil, +"movapsXrm||movapdXrm", +"movapsXmr||movapdXmr", +"cvtpi2psXrMm|cvtsi2ssXrVmt|cvtpi2pdXrMm|cvtsi2sdXrVmt", +"movntpsXmr|movntssXmr|movntpdXmr|movntsdXmr", +"cvttps2piMrXm|cvttss2siVrXm|cvttpd2piMrXm|cvttsd2siVrXm", +"cvtps2piMrXm|cvtss2siVrXm|cvtpd2piMrXm|cvtsd2siVrXm", +"ucomissXrm||ucomisdXrm", +"comissXrm||comisdXrm", +--3x +"wrmsr","rdtsc","rdmsr","rdpmc","sysenter","sysexit",nil,"getsec", +"opc3*38",nil,"opc3*3a",nil,nil,nil,nil,nil, +--4x +"cmovoVrm","cmovnoVrm","cmovbVrm","cmovnbVrm", +"cmovzVrm","cmovnzVrm","cmovbeVrm","cmovaVrm", +"cmovsVrm","cmovnsVrm","cmovpeVrm","cmovpoVrm", +"cmovlVrm","cmovgeVrm","cmovleVrm","cmovgVrm", +--5x +"movmskpsVrXm$||movmskpdVrXm$","sqrtpsXrm|sqrtssXrm|sqrtpdXrm|sqrtsdXrm", +"rsqrtpsXrm|rsqrtssXrm","rcppsXrm|rcpssXrm", +"andpsXrm||andpdXrm","andnpsXrm||andnpdXrm", +"orpsXrm||orpdXrm","xorpsXrm||xorpdXrm", +"addpsXrm|addssXrm|addpdXrm|addsdXrm","mulpsXrm|mulssXrm|mulpdXrm|mulsdXrm", +"cvtps2pdXrm|cvtss2sdXrm|cvtpd2psXrm|cvtsd2ssXrm", +"cvtdq2psXrm|cvttps2dqXrm|cvtps2dqXrm", +"subpsXrm|subssXrm|subpdXrm|subsdXrm","minpsXrm|minssXrm|minpdXrm|minsdXrm", +"divpsXrm|divssXrm|divpdXrm|divsdXrm","maxpsXrm|maxssXrm|maxpdXrm|maxsdXrm", +--6x +"punpcklbwPrm","punpcklwdPrm","punpckldqPrm","packsswbPrm", +"pcmpgtbPrm","pcmpgtwPrm","pcmpgtdPrm","packuswbPrm", +"punpckhbwPrm","punpckhwdPrm","punpckhdqPrm","packssdwPrm", +"||punpcklqdqXrm","||punpckhqdqXrm", +"movPrVSm","movqMrm|movdquXrm|movdqaXrm", +--7x +"pshufwMrmu|pshufhwXrmu|pshufdXrmu|pshuflwXrmu","pshiftw!Pmu", +"pshiftd!Pmu","pshiftq!Mmu||pshiftdq!Xmu", +"pcmpeqbPrm","pcmpeqwPrm","pcmpeqdPrm","emms|", +"vmreadUmr||extrqXmuu$|insertqXrmuu$","vmwriteUrm||extrqXrm$|insertqXrm$", +nil,nil, +"||haddpdXrm|haddpsXrm","||hsubpdXrm|hsubpsXrm", +"movVSmMr|movqXrm|movVSmXr","movqMmr|movdquXmr|movdqaXmr", +--8x +"joVj","jnoVj","jbVj","jnbVj","jzVj","jnzVj","jbeVj","jaVj", +"jsVj","jnsVj","jpeVj","jpoVj","jlVj","jgeVj","jleVj","jgVj", +--9x +"setoBm","setnoBm","setbBm","setnbBm","setzBm","setnzBm","setbeBm","setaBm", +"setsBm","setnsBm","setpeBm","setpoBm","setlBm","setgeBm","setleBm","setgBm", +--Ax +"push fs","pop fs","cpuid","btVmr","shldVmru","shldVmrc",nil,nil, +"push gs","pop gs","rsm","btsVmr","shrdVmru","shrdVmrc","fxsave!Dmp","imulVrm", +--Bx +"cmpxchgBmr","cmpxchgVmr","$lssVrm","btrVmr", +"$lfsVrm","$lgsVrm","movzxVrBmt","movzxVrWmt", +"|popcntVrm","ud2Dp","bt!Vmu","btcVmr", +"bsfVrm","bsrVrm|lzcntVrm|bsrWrm","movsxVrBmt","movsxVrWmt", +--Cx +"xaddBmr","xaddVmr", +"cmppsXrmu|cmpssXrmu|cmppdXrmu|cmpsdXrmu","$movntiVmr|", +"pinsrwPrWmu","pextrwDrPmu", +"shufpsXrmu||shufpdXrmu","$cmpxchg!Qmp", +"bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR", +--Dx +"||addsubpdXrm|addsubpsXrm","psrlwPrm","psrldPrm","psrlqPrm", +"paddqPrm","pmullwPrm", +"|movq2dqXrMm|movqXmr|movdq2qMrXm$","pmovmskbVrMm||pmovmskbVrXm", +"psubusbPrm","psubuswPrm","pminubPrm","pandPrm", +"paddusbPrm","padduswPrm","pmaxubPrm","pandnPrm", +--Ex +"pavgbPrm","psrawPrm","psradPrm","pavgwPrm", +"pmulhuwPrm","pmulhwPrm", +"|cvtdq2pdXrm|cvttpd2dqXrm|cvtpd2dqXrm","$movntqMmr||$movntdqXmr", +"psubsbPrm","psubswPrm","pminswPrm","porPrm", +"paddsbPrm","paddswPrm","pmaxswPrm","pxorPrm", +--Fx +"|||lddquXrm","psllwPrm","pslldPrm","psllqPrm", +"pmuludqPrm","pmaddwdPrm","psadbwPrm","maskmovqMrm||maskmovdquXrm$", +"psubbPrm","psubwPrm","psubdPrm","psubqPrm", +"paddbPrm","paddwPrm","padddPrm","ud", +} +assert(map_opc2[255] == "ud") + +-- Map for three-byte opcodes. Can't wait for their next invention. +local map_opc3 = { +["38"] = { -- [66] 0f 38 xx +--0x +[0]="pshufbPrm","phaddwPrm","phadddPrm","phaddswPrm", +"pmaddubswPrm","phsubwPrm","phsubdPrm","phsubswPrm", +"psignbPrm","psignwPrm","psigndPrm","pmulhrswPrm", +nil,nil,nil,nil, +--1x +"||pblendvbXrma",nil,nil,nil, +"||blendvpsXrma","||blendvpdXrma",nil,"||ptestXrm", +nil,nil,nil,nil, +"pabsbPrm","pabswPrm","pabsdPrm",nil, +--2x +"||pmovsxbwXrm","||pmovsxbdXrm","||pmovsxbqXrm","||pmovsxwdXrm", +"||pmovsxwqXrm","||pmovsxdqXrm",nil,nil, +"||pmuldqXrm","||pcmpeqqXrm","||$movntdqaXrm","||packusdwXrm", +nil,nil,nil,nil, +--3x +"||pmovzxbwXrm","||pmovzxbdXrm","||pmovzxbqXrm","||pmovzxwdXrm", +"||pmovzxwqXrm","||pmovzxdqXrm",nil,"||pcmpgtqXrm", +"||pminsbXrm","||pminsdXrm","||pminuwXrm","||pminudXrm", +"||pmaxsbXrm","||pmaxsdXrm","||pmaxuwXrm","||pmaxudXrm", +--4x +"||pmulddXrm","||phminposuwXrm", +--Fx +[0xf0] = "|||crc32TrBmt",[0xf1] = "|||crc32TrVmt", +}, + +["3a"] = { -- [66] 0f 3a xx +--0x +[0x00]=nil,nil,nil,nil,nil,nil,nil,nil, +"||roundpsXrmu","||roundpdXrmu","||roundssXrmu","||roundsdXrmu", +"||blendpsXrmu","||blendpdXrmu","||pblendwXrmu","palignrPrmu", +--1x +nil,nil,nil,nil, +"||pextrbVmXru","||pextrwVmXru","||pextrVmSXru","||extractpsVmXru", +nil,nil,nil,nil,nil,nil,nil,nil, +--2x +"||pinsrbXrVmu","||insertpsXrmu","||pinsrXrVmuS",nil, +--4x +[0x40] = "||dppsXrmu", +[0x41] = "||dppdXrmu", +[0x42] = "||mpsadbwXrmu", +--6x +[0x60] = "||pcmpestrmXrmu",[0x61] = "||pcmpestriXrmu", +[0x62] = "||pcmpistrmXrmu",[0x63] = "||pcmpistriXrmu", +}, +} + +-- Map for VMX/SVM opcodes 0F 01 C0-FF (sgdt group with register operands). +local map_opcvm = { +[0xc1]="vmcall",[0xc2]="vmlaunch",[0xc3]="vmresume",[0xc4]="vmxoff", +[0xc8]="monitor",[0xc9]="mwait", +[0xd8]="vmrun",[0xd9]="vmmcall",[0xda]="vmload",[0xdb]="vmsave", +[0xdc]="stgi",[0xdd]="clgi",[0xde]="skinit",[0xdf]="invlpga", +[0xf8]="swapgs",[0xf9]="rdtscp", +} + +-- Map for FP opcodes. And you thought stack machines are simple? +local map_opcfp = { +-- D8-DF 00-BF: opcodes with a memory operand. +-- D8 +[0]="faddFm","fmulFm","fcomFm","fcompFm","fsubFm","fsubrFm","fdivFm","fdivrFm", +"fldFm",nil,"fstFm","fstpFm","fldenvVm","fldcwWm","fnstenvVm","fnstcwWm", +-- DA +"fiaddDm","fimulDm","ficomDm","ficompDm", +"fisubDm","fisubrDm","fidivDm","fidivrDm", +-- DB +"fildDm","fisttpDm","fistDm","fistpDm",nil,"fld twordFmp",nil,"fstp twordFmp", +-- DC +"faddGm","fmulGm","fcomGm","fcompGm","fsubGm","fsubrGm","fdivGm","fdivrGm", +-- DD +"fldGm","fisttpQm","fstGm","fstpGm","frstorDmp",nil,"fnsaveDmp","fnstswWm", +-- DE +"fiaddWm","fimulWm","ficomWm","ficompWm", +"fisubWm","fisubrWm","fidivWm","fidivrWm", +-- DF +"fildWm","fisttpWm","fistWm","fistpWm", +"fbld twordFmp","fildQm","fbstp twordFmp","fistpQm", +-- xx C0-FF: opcodes with a pseudo-register operand. +-- D8 +"faddFf","fmulFf","fcomFf","fcompFf","fsubFf","fsubrFf","fdivFf","fdivrFf", +-- D9 +"fldFf","fxchFf",{"fnop"},nil, +{"fchs","fabs",nil,nil,"ftst","fxam"}, +{"fld1","fldl2t","fldl2e","fldpi","fldlg2","fldln2","fldz"}, +{"f2xm1","fyl2x","fptan","fpatan","fxtract","fprem1","fdecstp","fincstp"}, +{"fprem","fyl2xp1","fsqrt","fsincos","frndint","fscale","fsin","fcos"}, +-- DA +"fcmovbFf","fcmoveFf","fcmovbeFf","fcmovuFf",nil,{nil,"fucompp"},nil,nil, +-- DB +"fcmovnbFf","fcmovneFf","fcmovnbeFf","fcmovnuFf", +{nil,nil,"fnclex","fninit"},"fucomiFf","fcomiFf",nil, +-- DC +"fadd toFf","fmul toFf",nil,nil, +"fsub toFf","fsubr toFf","fdivr toFf","fdiv toFf", +-- DD +"ffreeFf",nil,"fstFf","fstpFf","fucomFf","fucompFf",nil,nil, +-- DE +"faddpFf","fmulpFf",nil,{nil,"fcompp"}, +"fsubrpFf","fsubpFf","fdivrpFf","fdivpFf", +-- DF +nil,nil,nil,nil,{"fnstsw ax"},"fucomipFf","fcomipFf",nil, +} +assert(map_opcfp[126] == "fcomipFf") + +-- Map for opcode groups. The subkey is sp from the ModRM byte. +local map_opcgroup = { + arith = { "add", "or", "adc", "sbb", "and", "sub", "xor", "cmp" }, + shift = { "rol", "ror", "rcl", "rcr", "shl", "shr", "sal", "sar" }, + testb = { "testBmi", "testBmi", "not", "neg", "mul", "imul", "div", "idiv" }, + testv = { "testVmi", "testVmi", "not", "neg", "mul", "imul", "div", "idiv" }, + incb = { "inc", "dec" }, + incd = { "inc", "dec", "callUmp", "$call farDmp", + "jmpUmp", "$jmp farDmp", "pushUm" }, + sldt = { "sldt", "str", "lldt", "ltr", "verr", "verw" }, + sgdt = { "vm*$sgdt", "vm*$sidt", "$lgdt", "vm*$lidt", + "smsw", nil, "lmsw", "vm*$invlpg" }, + bt = { nil, nil, nil, nil, "bt", "bts", "btr", "btc" }, + cmpxchg = { nil, "sz*,cmpxchg8bQmp,cmpxchg16bXmp", nil, nil, + nil, nil, "vmptrld|vmxon|vmclear", "vmptrst" }, + pshiftw = { nil, nil, "psrlw", nil, "psraw", nil, "psllw" }, + pshiftd = { nil, nil, "psrld", nil, "psrad", nil, "pslld" }, + pshiftq = { nil, nil, "psrlq", nil, nil, nil, "psllq" }, + pshiftdq = { nil, nil, "psrlq", "psrldq", nil, nil, "psllq", "pslldq" }, + fxsave = { "$fxsave", "$fxrstor", "$ldmxcsr", "$stmxcsr", + nil, "lfenceDp$", "mfenceDp$", "sfenceDp$clflush" }, + prefetch = { "prefetch", "prefetchw" }, + prefetcht = { "prefetchnta", "prefetcht0", "prefetcht1", "prefetcht2" }, +} + +------------------------------------------------------------------------------ + +-- Maps for register names. +local map_regs = { + B = { "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh", + "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", "r14b", "r15b" }, + B64 = { "al", "cl", "dl", "bl", "spl", "bpl", "sil", "dil", + "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", "r14b", "r15b" }, + W = { "ax", "cx", "dx", "bx", "sp", "bp", "si", "di", + "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w" }, + D = { "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi", + "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d" }, + Q = { "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" }, + M = { "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", + "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7" }, -- No x64 ext! + X = { "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", + "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15" }, +} +local map_segregs = { "es", "cs", "ss", "ds", "fs", "gs", "segr6", "segr7" } + +-- Maps for size names. +local map_sz2n = { + B = 1, W = 2, D = 4, Q = 8, M = 8, X = 16, +} +local map_sz2prefix = { + B = "byte", W = "word", D = "dword", + Q = "qword", + M = "qword", X = "xword", + F = "dword", G = "qword", -- No need for sizes/register names for these two. +} + +------------------------------------------------------------------------------ + +-- Output a nicely formatted line with an opcode and operands. +local function putop(ctx, text, operands) + local code, pos, hex = ctx.code, ctx.pos, "" + local hmax = ctx.hexdump + if hmax > 0 then + for i=ctx.start,pos-1 do + hex = hex..format("%02X", byte(code, i, i)) + end + if #hex > hmax then hex = sub(hex, 1, hmax)..". " + else hex = hex..rep(" ", hmax-#hex+2) end + end + if operands then text = text.." "..operands end + if ctx.o16 then text = "o16 "..text; ctx.o16 = false end + if ctx.a32 then text = "a32 "..text; ctx.a32 = false end + if ctx.rep then text = ctx.rep.." "..text; ctx.rep = false end + if ctx.rex then + local t = (ctx.rexw and "w" or "")..(ctx.rexr and "r" or "").. + (ctx.rexx and "x" or "")..(ctx.rexb and "b" or "") + if t ~= "" then text = "rex."..t.." "..text end + ctx.rexw = false; ctx.rexr = false; ctx.rexx = false; ctx.rexb = false + ctx.rex = false + end + if ctx.seg then + local text2, n = gsub(text, "%[", "["..ctx.seg..":") + if n == 0 then text = ctx.seg.." "..text else text = text2 end + ctx.seg = false + end + if ctx.lock then text = "lock "..text; ctx.lock = false end + local imm = ctx.imm + if imm then + local sym = ctx.symtab[imm] + if sym then text = text.."\t->"..sym end + end + ctx.out(format("%08x %s%s\n", ctx.addr+ctx.start, hex, text)) + ctx.mrm = false + ctx.start = pos + ctx.imm = nil +end + +-- Clear all prefix flags. +local function clearprefixes(ctx) + ctx.o16 = false; ctx.seg = false; ctx.lock = false; ctx.rep = false + ctx.rexw = false; ctx.rexr = false; ctx.rexx = false; ctx.rexb = false + ctx.rex = false; ctx.a32 = false +end + +-- Fallback for incomplete opcodes at the end. +local function incomplete(ctx) + ctx.pos = ctx.stop+1 + clearprefixes(ctx) + return putop(ctx, "(incomplete)") +end + +-- Fallback for unknown opcodes. +local function unknown(ctx) + clearprefixes(ctx) + return putop(ctx, "(unknown)") +end + +-- Return an immediate of the specified size. +local function getimm(ctx, pos, n) + if pos+n-1 > ctx.stop then return incomplete(ctx) end + local code = ctx.code + if n == 1 then + local b1 = byte(code, pos, pos) + return b1 + elseif n == 2 then + local b1, b2 = byte(code, pos, pos+1) + return b1+b2*256 + else + local b1, b2, b3, b4 = byte(code, pos, pos+3) + local imm = b1+b2*256+b3*65536+b4*16777216 + ctx.imm = imm + return imm + end +end + +-- Process pattern string and generate the operands. +local function putpat(ctx, name, pat) + local operands, regs, sz, mode, sp, rm, sc, rx, sdisp + local code, pos, stop = ctx.code, ctx.pos, ctx.stop + + -- Chars used: 1DFGIMPQRSTUVWXacdfgijmoprstuwxyz + for p in gmatch(pat, ".") do + local x = nil + if p == "V" or p == "U" then + if ctx.rexw then sz = "Q"; ctx.rexw = false + elseif ctx.o16 then sz = "W"; ctx.o16 = false + elseif p == "U" and ctx.x64 then sz = "Q" + else sz = "D" end + regs = map_regs[sz] + elseif p == "T" then + if ctx.rexw then sz = "Q"; ctx.rexw = false else sz = "D" end + regs = map_regs[sz] + elseif p == "B" then + sz = "B" + regs = ctx.rex and map_regs.B64 or map_regs.B + elseif match(p, "[WDQMXFG]") then + sz = p + regs = map_regs[sz] + elseif p == "P" then + sz = ctx.o16 and "X" or "M"; ctx.o16 = false + regs = map_regs[sz] + elseif p == "S" then + name = name..lower(sz) + elseif p == "s" then + local imm = getimm(ctx, pos, 1); if not imm then return end + x = imm <= 127 and format("+0x%02x", imm) + or format("-0x%02x", 256-imm) + pos = pos+1 + elseif p == "u" then + local imm = getimm(ctx, pos, 1); if not imm then return end + x = format("0x%02x", imm) + pos = pos+1 + elseif p == "w" then + local imm = getimm(ctx, pos, 2); if not imm then return end + x = format("0x%x", imm) + pos = pos+2 + elseif p == "o" then -- [offset] + if ctx.x64 then + local imm1 = getimm(ctx, pos, 4); if not imm1 then return end + local imm2 = getimm(ctx, pos+4, 4); if not imm2 then return end + x = format("[0x%08x%08x]", imm2, imm1) + pos = pos+8 + else + local imm = getimm(ctx, pos, 4); if not imm then return end + x = format("[0x%08x]", imm) + pos = pos+4 + end + elseif p == "i" or p == "I" then + local n = map_sz2n[sz] + if n == 8 and ctx.x64 and p == "I" then + local imm1 = getimm(ctx, pos, 4); if not imm1 then return end + local imm2 = getimm(ctx, pos+4, 4); if not imm2 then return end + x = format("0x%08x%08x", imm2, imm1) + else + if n == 8 then n = 4 end + local imm = getimm(ctx, pos, n); if not imm then return end + if sz == "Q" and (imm < 0 or imm > 0x7fffffff) then + imm = (0xffffffff+1)-imm + x = format(imm > 65535 and "-0x%08x" or "-0x%x", imm) + else + x = format(imm > 65535 and "0x%08x" or "0x%x", imm) + end + end + pos = pos+n + elseif p == "j" then + local n = map_sz2n[sz] + if n == 8 then n = 4 end + local imm = getimm(ctx, pos, n); if not imm then return end + if sz == "B" and imm > 127 then imm = imm-256 + elseif imm > 2147483647 then imm = imm-4294967296 end + pos = pos+n + imm = imm + pos + ctx.addr + if imm > 4294967295 and not ctx.x64 then imm = imm-4294967296 end + ctx.imm = imm + if sz == "W" then + x = format("word 0x%04x", imm%65536) + elseif ctx.x64 then + local lo = imm % 0x1000000 + x = format("0x%02x%06x", (imm-lo) / 0x1000000, lo) + else + x = "0x"..tohex(imm) + end + elseif p == "R" then + local r = byte(code, pos-1, pos-1)%8 + if ctx.rexb then r = r + 8; ctx.rexb = false end + x = regs[r+1] + elseif p == "a" then x = regs[1] + elseif p == "c" then x = "cl" + elseif p == "d" then x = "dx" + elseif p == "1" then x = "1" + else + if not mode then + mode = ctx.mrm + if not mode then + if pos > stop then return incomplete(ctx) end + mode = byte(code, pos, pos) + pos = pos+1 + end + rm = mode%8; mode = (mode-rm)/8 + sp = mode%8; mode = (mode-sp)/8 + sdisp = "" + if mode < 3 then + if rm == 4 then + if pos > stop then return incomplete(ctx) end + sc = byte(code, pos, pos) + pos = pos+1 + rm = sc%8; sc = (sc-rm)/8 + rx = sc%8; sc = (sc-rx)/8 + if ctx.rexx then rx = rx + 8; ctx.rexx = false end + if rx == 4 then rx = nil end + end + if mode > 0 or rm == 5 then + local dsz = mode + if dsz ~= 1 then dsz = 4 end + local disp = getimm(ctx, pos, dsz); if not disp then return end + if mode == 0 then rm = nil end + if rm or rx or (not sc and ctx.x64 and not ctx.a32) then + if dsz == 1 and disp > 127 then + sdisp = format("-0x%x", 256-disp) + elseif disp >= 0 and disp <= 0x7fffffff then + sdisp = format("+0x%x", disp) + else + sdisp = format("-0x%x", (0xffffffff+1)-disp) + end + else + sdisp = format(ctx.x64 and not ctx.a32 and + not (disp >= 0 and disp <= 0x7fffffff) + and "0xffffffff%08x" or "0x%08x", disp) + end + pos = pos+dsz + end + end + if rm and ctx.rexb then rm = rm + 8; ctx.rexb = false end + if ctx.rexr then sp = sp + 8; ctx.rexr = false end + end + if p == "m" then + if mode == 3 then x = regs[rm+1] + else + local aregs = ctx.a32 and map_regs.D or ctx.aregs + local srm, srx = "", "" + if rm then srm = aregs[rm+1] + elseif not sc and ctx.x64 and not ctx.a32 then srm = "rip" end + ctx.a32 = false + if rx then + if rm then srm = srm.."+" end + srx = aregs[rx+1] + if sc > 0 then srx = srx.."*"..(2^sc) end + end + x = format("[%s%s%s]", srm, srx, sdisp) + end + if mode < 3 and + (not match(pat, "[aRrgp]") or match(pat, "t")) then -- Yuck. + x = map_sz2prefix[sz].." "..x + end + elseif p == "r" then x = regs[sp+1] + elseif p == "g" then x = map_segregs[sp+1] + elseif p == "p" then -- Suppress prefix. + elseif p == "f" then x = "st"..rm + elseif p == "x" then + if sp == 0 and ctx.lock and not ctx.x64 then + x = "CR8"; ctx.lock = false + else + x = "CR"..sp + end + elseif p == "y" then x = "DR"..sp + elseif p == "z" then x = "TR"..sp + elseif p == "t" then + else + error("bad pattern `"..pat.."'") + end + end + if x then operands = operands and operands..", "..x or x end + end + ctx.pos = pos + return putop(ctx, name, operands) +end + +-- Forward declaration. +local map_act + +-- Fetch and cache MRM byte. +local function getmrm(ctx) + local mrm = ctx.mrm + if not mrm then + local pos = ctx.pos + if pos > ctx.stop then return nil end + mrm = byte(ctx.code, pos, pos) + ctx.pos = pos+1 + ctx.mrm = mrm + end + return mrm +end + +-- Dispatch to handler depending on pattern. +local function dispatch(ctx, opat, patgrp) + if not opat then return unknown(ctx) end + if match(opat, "%|") then -- MMX/SSE variants depending on prefix. + local p + if ctx.rep then + p = ctx.rep=="rep" and "%|([^%|]*)" or "%|[^%|]*%|[^%|]*%|([^%|]*)" + ctx.rep = false + elseif ctx.o16 then p = "%|[^%|]*%|([^%|]*)"; ctx.o16 = false + else p = "^[^%|]*" end + opat = match(opat, p) + if not opat then return unknown(ctx) end +-- ctx.rep = false; ctx.o16 = false + --XXX fails for 66 f2 0f 38 f1 06 crc32 eax,WORD PTR [esi] + --XXX remove in branches? + end + if match(opat, "%$") then -- reg$mem variants. + local mrm = getmrm(ctx); if not mrm then return incomplete(ctx) end + opat = match(opat, mrm >= 192 and "^[^%$]*" or "%$(.*)") + if opat == "" then return unknown(ctx) end + end + if opat == "" then return unknown(ctx) end + local name, pat = match(opat, "^([a-z0-9 ]*)(.*)") + if pat == "" and patgrp then pat = patgrp end + return map_act[sub(pat, 1, 1)](ctx, name, pat) +end + +-- Get a pattern from an opcode map and dispatch to handler. +local function dispatchmap(ctx, opcmap) + local pos = ctx.pos + local opat = opcmap[byte(ctx.code, pos, pos)] + pos = pos + 1 + ctx.pos = pos + return dispatch(ctx, opat) +end + +-- Map for action codes. The key is the first char after the name. +map_act = { + -- Simple opcodes without operands. + [""] = function(ctx, name, pat) + return putop(ctx, name) + end, + + -- Operand size chars fall right through. + B = putpat, W = putpat, D = putpat, Q = putpat, + V = putpat, U = putpat, T = putpat, + M = putpat, X = putpat, P = putpat, + F = putpat, G = putpat, + + -- Collect prefixes. + [":"] = function(ctx, name, pat) + ctx[pat == ":" and name or sub(pat, 2)] = name + if ctx.pos - ctx.start > 5 then return unknown(ctx) end -- Limit #prefixes. + end, + + -- Chain to special handler specified by name. + ["*"] = function(ctx, name, pat) + return map_act[name](ctx, name, sub(pat, 2)) + end, + + -- Use named subtable for opcode group. + ["!"] = function(ctx, name, pat) + local mrm = getmrm(ctx); if not mrm then return incomplete(ctx) end + return dispatch(ctx, map_opcgroup[name][((mrm-(mrm%8))/8)%8+1], sub(pat, 2)) + end, + + -- o16,o32[,o64] variants. + sz = function(ctx, name, pat) + if ctx.o16 then ctx.o16 = false + else + pat = match(pat, ",(.*)") + if ctx.rexw then + local p = match(pat, ",(.*)") + if p then pat = p; ctx.rexw = false end + end + end + pat = match(pat, "^[^,]*") + return dispatch(ctx, pat) + end, + + -- Two-byte opcode dispatch. + opc2 = function(ctx, name, pat) + return dispatchmap(ctx, map_opc2) + end, + + -- Three-byte opcode dispatch. + opc3 = function(ctx, name, pat) + return dispatchmap(ctx, map_opc3[pat]) + end, + + -- VMX/SVM dispatch. + vm = function(ctx, name, pat) + return dispatch(ctx, map_opcvm[ctx.mrm]) + end, + + -- Floating point opcode dispatch. + fp = function(ctx, name, pat) + local mrm = getmrm(ctx); if not mrm then return incomplete(ctx) end + local rm = mrm%8 + local idx = pat*8 + ((mrm-rm)/8)%8 + if mrm >= 192 then idx = idx + 64 end + local opat = map_opcfp[idx] + if type(opat) == "table" then opat = opat[rm+1] end + return dispatch(ctx, opat) + end, + + -- REX prefix. + rex = function(ctx, name, pat) + if ctx.rex then return unknown(ctx) end -- Only 1 REX prefix allowed. + for p in gmatch(pat, ".") do ctx["rex"..p] = true end + ctx.rex = true + end, + + -- Special case for nop with REX prefix. + nop = function(ctx, name, pat) + return dispatch(ctx, ctx.rex and pat or "nop") + end, +} + +------------------------------------------------------------------------------ + +-- Disassemble a block of code. +local function disass_block(ctx, ofs, len) + if not ofs then ofs = 0 end + local stop = len and ofs+len or #ctx.code + ofs = ofs + 1 + ctx.start = ofs + ctx.pos = ofs + ctx.stop = stop + ctx.imm = nil + ctx.mrm = false + clearprefixes(ctx) + while ctx.pos <= stop do dispatchmap(ctx, ctx.map1) end + if ctx.pos ~= ctx.start then incomplete(ctx) end +end + +-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len). +local function create(code, addr, out) + local ctx = {} + ctx.code = code + ctx.addr = (addr or 0) - 1 + ctx.out = out or io.write + ctx.symtab = {} + ctx.disass = disass_block + ctx.hexdump = 16 + ctx.x64 = false + ctx.map1 = map_opc1_32 + ctx.aregs = map_regs.D + return ctx +end + +local function create64(code, addr, out) + local ctx = create(code, addr, out) + ctx.x64 = true + ctx.map1 = map_opc1_64 + ctx.aregs = map_regs.Q + return ctx +end + +-- Simple API: disassemble code (a string) at address and output via out. +local function disass(code, addr, out) + create(code, addr, out):disass() +end + +local function disass64(code, addr, out) + create64(code, addr, out):disass() +end + +-- Return register name for RID. +local function regname(r) + if r < 8 then return map_regs.D[r+1] end + return map_regs.X[r-7] +end + +local function regname64(r) + if r < 16 then return map_regs.Q[r+1] end + return map_regs.X[r-15] +end + +-- Public module functions. +return { + create = create, + create64 = create64, + disass = disass, + disass64 = disass64, + regname = regname, + regname64 = regname64 +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/jit/dump.lua b/Build/source/libs/luajit/LuaJIT-src/src/jit/dump.lua new file mode 100644 index 00000000000..5f858492bc4 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/jit/dump.lua @@ -0,0 +1,706 @@ +---------------------------------------------------------------------------- +-- LuaJIT compiler dump module. +-- +-- Copyright (C) 2005-2015 Mike Pall. All rights reserved. +-- Released under the MIT license. See Copyright Notice in luajit.h +---------------------------------------------------------------------------- +-- +-- This module can be used to debug the JIT compiler itself. It dumps the +-- code representations and structures used in various compiler stages. +-- +-- Example usage: +-- +-- luajit -jdump -e "local x=0; for i=1,1e6 do x=x+i end; print(x)" +-- luajit -jdump=im -e "for i=1,1000 do for j=1,1000 do end end" | less -R +-- luajit -jdump=is myapp.lua | less -R +-- luajit -jdump=-b myapp.lua +-- luajit -jdump=+aH,myapp.html myapp.lua +-- luajit -jdump=ixT,myapp.dump myapp.lua +-- +-- The first argument specifies the dump mode. The second argument gives +-- the output file name. Default output is to stdout, unless the environment +-- variable LUAJIT_DUMPFILE is set. The file is overwritten every time the +-- module is started. +-- +-- Different features can be turned on or off with the dump mode. If the +-- mode starts with a '+', the following features are added to the default +-- set of features; a '-' removes them. Otherwise the features are replaced. +-- +-- The following dump features are available (* marks the default): +-- +-- * t Print a line for each started, ended or aborted trace (see also -jv). +-- * b Dump the traced bytecode. +-- * i Dump the IR (intermediate representation). +-- r Augment the IR with register/stack slots. +-- s Dump the snapshot map. +-- * m Dump the generated machine code. +-- x Print each taken trace exit. +-- X Print each taken trace exit and the contents of all registers. +-- a Print the IR of aborted traces, too. +-- +-- The output format can be set with the following characters: +-- +-- T Plain text output. +-- A ANSI-colored text output +-- H Colorized HTML + CSS output. +-- +-- The default output format is plain text. It's set to ANSI-colored text +-- if the COLORTERM variable is set. Note: this is independent of any output +-- redirection, which is actually considered a feature. +-- +-- You probably want to use less -R to enjoy viewing ANSI-colored text from +-- a pipe or a file. Add this to your ~/.bashrc: export LESS="-R" +-- +------------------------------------------------------------------------------ + +-- Cache some library functions and objects. +local jit = require("jit") +assert(jit.version_num == 20100, "LuaJIT core/library version mismatch") +local jutil = require("jit.util") +local vmdef = require("jit.vmdef") +local funcinfo, funcbc = jutil.funcinfo, jutil.funcbc +local traceinfo, traceir, tracek = jutil.traceinfo, jutil.traceir, jutil.tracek +local tracemc, tracesnap = jutil.tracemc, jutil.tracesnap +local traceexitstub, ircalladdr = jutil.traceexitstub, jutil.ircalladdr +local bit = require("bit") +local band, shl, shr, tohex = bit.band, bit.lshift, bit.rshift, bit.tohex +local sub, gsub, format = string.sub, string.gsub, string.format +local byte, char, rep = string.byte, string.char, string.rep +local type, tostring = type, tostring +local stdout, stderr = io.stdout, io.stderr + +-- Load other modules on-demand. +local bcline, disass + +-- Active flag, output file handle and dump mode. +local active, out, dumpmode + +------------------------------------------------------------------------------ + +local symtabmt = { __index = false } +local symtab = {} +local nexitsym = 0 + +-- Fill nested symbol table with per-trace exit stub addresses. +local function fillsymtab_tr(tr, nexit) + local t = {} + symtabmt.__index = t + if jit.arch == "mips" or jit.arch == "mipsel" then + t[traceexitstub(tr, 0)] = "exit" + return + end + for i=0,nexit-1 do + local addr = traceexitstub(tr, i) + if addr < 0 then addr = addr + 2^32 end + t[addr] = tostring(i) + end + local addr = traceexitstub(tr, nexit) + if addr then t[addr] = "stack_check" end +end + +-- Fill symbol table with trace exit stub addresses. +local function fillsymtab(tr, nexit) + local t = symtab + if nexitsym == 0 then + local ircall = vmdef.ircall + for i=0,#ircall do + local addr = ircalladdr(i) + if addr ~= 0 then + if addr < 0 then addr = addr + 2^32 end + t[addr] = ircall[i] + end + end + end + if nexitsym == 1000000 then -- Per-trace exit stubs. + fillsymtab_tr(tr, nexit) + elseif nexit > nexitsym then -- Shared exit stubs. + for i=nexitsym,nexit-1 do + local addr = traceexitstub(i) + if addr == nil then -- Fall back to per-trace exit stubs. + fillsymtab_tr(tr, nexit) + setmetatable(symtab, symtabmt) + nexit = 1000000 + break + end + if addr < 0 then addr = addr + 2^32 end + t[addr] = tostring(i) + end + nexitsym = nexit + end + return t +end + +local function dumpwrite(s) + out:write(s) +end + +-- Disassemble machine code. +local function dump_mcode(tr) + local info = traceinfo(tr) + if not info then return end + local mcode, addr, loop = tracemc(tr) + if not mcode then return end + if not disass then disass = require("jit.dis_"..jit.arch) end + if addr < 0 then addr = addr + 2^32 end + out:write("---- TRACE ", tr, " mcode ", #mcode, "\n") + local ctx = disass.create(mcode, addr, dumpwrite) + ctx.hexdump = 0 + ctx.symtab = fillsymtab(tr, info.nexit) + if loop ~= 0 then + symtab[addr+loop] = "LOOP" + ctx:disass(0, loop) + out:write("->LOOP:\n") + ctx:disass(loop, #mcode-loop) + symtab[addr+loop] = nil + else + ctx:disass(0, #mcode) + end +end + +------------------------------------------------------------------------------ + +local irtype_text = { + [0] = "nil", + "fal", + "tru", + "lud", + "str", + "p32", + "thr", + "pro", + "fun", + "p64", + "cdt", + "tab", + "udt", + "flt", + "num", + "i8 ", + "u8 ", + "i16", + "u16", + "int", + "u32", + "i64", + "u64", + "sfp", +} + +local colortype_ansi = { + [0] = "%s", + "%s", + "%s", + "\027[36m%s\027[m", + "\027[32m%s\027[m", + "%s", + "\027[1m%s\027[m", + "%s", + "\027[1m%s\027[m", + "%s", + "\027[33m%s\027[m", + "\027[31m%s\027[m", + "\027[36m%s\027[m", + "\027[34m%s\027[m", + "\027[34m%s\027[m", + "\027[35m%s\027[m", + "\027[35m%s\027[m", + "\027[35m%s\027[m", + "\027[35m%s\027[m", + "\027[35m%s\027[m", + "\027[35m%s\027[m", + "\027[35m%s\027[m", + "\027[35m%s\027[m", + "\027[35m%s\027[m", +} + +local function colorize_text(s, t) + return s +end + +local function colorize_ansi(s, t) + return format(colortype_ansi[t], s) +end + +local irtype_ansi = setmetatable({}, + { __index = function(tab, t) + local s = colorize_ansi(irtype_text[t], t); tab[t] = s; return s; end }) + +local html_escape = { ["<"] = "<", [">"] = ">", ["&"] = "&", } + +local function colorize_html(s, t) + s = gsub(s, "[<>&]", html_escape) + return format('%s', irtype_text[t], s) +end + +local irtype_html = setmetatable({}, + { __index = function(tab, t) + local s = colorize_html(irtype_text[t], t); tab[t] = s; return s; end }) + +local header_html = [[ + +]] + +local colorize, irtype + +-- Lookup tables to convert some literals into names. +local litname = { + ["SLOAD "] = setmetatable({}, { __index = function(t, mode) + local s = "" + if band(mode, 1) ~= 0 then s = s.."P" end + if band(mode, 2) ~= 0 then s = s.."F" end + if band(mode, 4) ~= 0 then s = s.."T" end + if band(mode, 8) ~= 0 then s = s.."C" end + if band(mode, 16) ~= 0 then s = s.."R" end + if band(mode, 32) ~= 0 then s = s.."I" end + t[mode] = s + return s + end}), + ["XLOAD "] = { [0] = "", "R", "V", "RV", "U", "RU", "VU", "RVU", }, + ["CONV "] = setmetatable({}, { __index = function(t, mode) + local s = irtype[band(mode, 31)] + s = irtype[band(shr(mode, 5), 31)].."."..s + if band(mode, 0x800) ~= 0 then s = s.." sext" end + local c = shr(mode, 14) + if c == 2 then s = s.." index" elseif c == 3 then s = s.." check" end + t[mode] = s + return s + end}), + ["FLOAD "] = vmdef.irfield, + ["FREF "] = vmdef.irfield, + ["FPMATH"] = vmdef.irfpm, + ["BUFHDR"] = { [0] = "RESET", "APPEND" }, + ["TOSTR "] = { [0] = "INT", "NUM", "CHAR" }, +} + +local function ctlsub(c) + if c == "\n" then return "\\n" + elseif c == "\r" then return "\\r" + elseif c == "\t" then return "\\t" + else return format("\\%03d", byte(c)) + end +end + +local function fmtfunc(func, pc) + local fi = funcinfo(func, pc) + if fi.loc then + return fi.loc + elseif fi.ffid then + return vmdef.ffnames[fi.ffid] + elseif fi.addr then + return format("C:%x", fi.addr) + else + return "(?)" + end +end + +local function formatk(tr, idx) + local k, t, slot = tracek(tr, idx) + local tn = type(k) + local s + if tn == "number" then + if k == 2^52+2^51 then + s = "bias" + else + s = format("%+.14g", k) + end + elseif tn == "string" then + s = format(#k > 20 and '"%.20s"~' or '"%s"', gsub(k, "%c", ctlsub)) + elseif tn == "function" then + s = fmtfunc(k) + elseif tn == "table" then + s = format("{%p}", k) + elseif tn == "userdata" then + if t == 12 then + s = format("userdata:%p", k) + else + s = format("[%p]", k) + if s == "[0x00000000]" then s = "NULL" end + end + elseif t == 21 then -- int64_t + s = sub(tostring(k), 1, -3) + if sub(s, 1, 1) ~= "-" then s = "+"..s end + else + s = tostring(k) -- For primitives. + end + s = colorize(format("%-4s", s), t) + if slot then + s = format("%s @%d", s, slot) + end + return s +end + +local function printsnap(tr, snap) + local n = 2 + for s=0,snap[1]-1 do + local sn = snap[n] + if shr(sn, 24) == s then + n = n + 1 + local ref = band(sn, 0xffff) - 0x8000 -- REF_BIAS + if ref < 0 then + out:write(formatk(tr, ref)) + elseif band(sn, 0x80000) ~= 0 then -- SNAP_SOFTFPNUM + out:write(colorize(format("%04d/%04d", ref, ref+1), 14)) + else + local m, ot, op1, op2 = traceir(tr, ref) + out:write(colorize(format("%04d", ref), band(ot, 31))) + end + out:write(band(sn, 0x10000) == 0 and " " or "|") -- SNAP_FRAME + else + out:write("---- ") + end + end + out:write("]\n") +end + +-- Dump snapshots (not interleaved with IR). +local function dump_snap(tr) + out:write("---- TRACE ", tr, " snapshots\n") + for i=0,1000000000 do + local snap = tracesnap(tr, i) + if not snap then break end + out:write(format("#%-3d %04d [ ", i, snap[0])) + printsnap(tr, snap) + end +end + +-- Return a register name or stack slot for a rid/sp location. +local function ridsp_name(ridsp, ins) + if not disass then disass = require("jit.dis_"..jit.arch) end + local rid, slot = band(ridsp, 0xff), shr(ridsp, 8) + if rid == 253 or rid == 254 then + return (slot == 0 or slot == 255) and " {sink" or format(" {%04d", ins-slot) + end + if ridsp > 255 then return format("[%x]", slot*4) end + if rid < 128 then return disass.regname(rid) end + return "" +end + +-- Dump CALL* function ref and return optional ctype. +local function dumpcallfunc(tr, ins) + local ctype + if ins > 0 then + local m, ot, op1, op2 = traceir(tr, ins) + if band(ot, 31) == 0 then -- nil type means CARG(func, ctype). + ins = op1 + ctype = formatk(tr, op2) + end + end + if ins < 0 then + out:write(format("[0x%x](", tonumber((tracek(tr, ins))))) + else + out:write(format("%04d (", ins)) + end + return ctype +end + +-- Recursively gather CALL* args and dump them. +local function dumpcallargs(tr, ins) + if ins < 0 then + out:write(formatk(tr, ins)) + else + local m, ot, op1, op2 = traceir(tr, ins) + local oidx = 6*shr(ot, 8) + local op = sub(vmdef.irnames, oidx+1, oidx+6) + if op == "CARG " then + dumpcallargs(tr, op1) + if op2 < 0 then + out:write(" ", formatk(tr, op2)) + else + out:write(" ", format("%04d", op2)) + end + else + out:write(format("%04d", ins)) + end + end +end + +-- Dump IR and interleaved snapshots. +local function dump_ir(tr, dumpsnap, dumpreg) + local info = traceinfo(tr) + if not info then return end + local nins = info.nins + out:write("---- TRACE ", tr, " IR\n") + local irnames = vmdef.irnames + local snapref = 65536 + local snap, snapno + if dumpsnap then + snap = tracesnap(tr, 0) + snapref = snap[0] + snapno = 0 + end + for ins=1,nins do + if ins >= snapref then + if dumpreg then + out:write(format(".... SNAP #%-3d [ ", snapno)) + else + out:write(format(".... SNAP #%-3d [ ", snapno)) + end + printsnap(tr, snap) + snapno = snapno + 1 + snap = tracesnap(tr, snapno) + snapref = snap and snap[0] or 65536 + end + local m, ot, op1, op2, ridsp = traceir(tr, ins) + local oidx, t = 6*shr(ot, 8), band(ot, 31) + local op = sub(irnames, oidx+1, oidx+6) + if op == "LOOP " then + if dumpreg then + out:write(format("%04d ------------ LOOP ------------\n", ins)) + else + out:write(format("%04d ------ LOOP ------------\n", ins)) + end + elseif op ~= "NOP " and op ~= "CARG " and + (dumpreg or op ~= "RENAME") then + local rid = band(ridsp, 255) + if dumpreg then + out:write(format("%04d %-6s", ins, ridsp_name(ridsp, ins))) + else + out:write(format("%04d ", ins)) + end + out:write(format("%s%s %s %s ", + (rid == 254 or rid == 253) and "}" or + (band(ot, 128) == 0 and " " or ">"), + band(ot, 64) == 0 and " " or "+", + irtype[t], op)) + local m1, m2 = band(m, 3), band(m, 3*4) + if sub(op, 1, 4) == "CALL" then + local ctype + if m2 == 1*4 then -- op2 == IRMlit + out:write(format("%-10s (", vmdef.ircall[op2])) + else + ctype = dumpcallfunc(tr, op2) + end + if op1 ~= -1 then dumpcallargs(tr, op1) end + out:write(")") + if ctype then out:write(" ctype ", ctype) end + elseif op == "CNEW " and op2 == -1 then + out:write(formatk(tr, op1)) + elseif m1 ~= 3 then -- op1 != IRMnone + if op1 < 0 then + out:write(formatk(tr, op1)) + else + out:write(format(m1 == 0 and "%04d" or "#%-3d", op1)) + end + if m2 ~= 3*4 then -- op2 != IRMnone + if m2 == 1*4 then -- op2 == IRMlit + local litn = litname[op] + if litn and litn[op2] then + out:write(" ", litn[op2]) + elseif op == "UREFO " or op == "UREFC " then + out:write(format(" #%-3d", shr(op2, 8))) + else + out:write(format(" #%-3d", op2)) + end + elseif op2 < 0 then + out:write(" ", formatk(tr, op2)) + else + out:write(format(" %04d", op2)) + end + end + end + out:write("\n") + end + end + if snap then + if dumpreg then + out:write(format(".... SNAP #%-3d [ ", snapno)) + else + out:write(format(".... SNAP #%-3d [ ", snapno)) + end + printsnap(tr, snap) + end +end + +------------------------------------------------------------------------------ + +local recprefix = "" +local recdepth = 0 + +-- Format trace error message. +local function fmterr(err, info) + if type(err) == "number" then + if type(info) == "function" then info = fmtfunc(info) end + err = format(vmdef.traceerr[err], info) + end + return err +end + +-- Dump trace states. +local function dump_trace(what, tr, func, pc, otr, oex) + if what == "stop" or (what == "abort" and dumpmode.a) then + if dumpmode.i then dump_ir(tr, dumpmode.s, dumpmode.r and what == "stop") + elseif dumpmode.s then dump_snap(tr) end + if dumpmode.m then dump_mcode(tr) end + end + if what == "start" then + if dumpmode.H then out:write('
\n') end
+    out:write("---- TRACE ", tr, " ", what)
+    if otr then out:write(" ", otr, "/", oex) end
+    out:write(" ", fmtfunc(func, pc), "\n")
+  elseif what == "stop" or what == "abort" then
+    out:write("---- TRACE ", tr, " ", what)
+    if what == "abort" then
+      out:write(" ", fmtfunc(func, pc), " -- ", fmterr(otr, oex), "\n")
+    else
+      local info = traceinfo(tr)
+      local link, ltype = info.link, info.linktype
+      if link == tr or link == 0 then
+	out:write(" -> ", ltype, "\n")
+      elseif ltype == "root" then
+	out:write(" -> ", link, "\n")
+      else
+	out:write(" -> ", link, " ", ltype, "\n")
+      end
+    end
+    if dumpmode.H then out:write("
\n\n") else out:write("\n") end + else + out:write("---- TRACE ", what, "\n\n") + end + out:flush() +end + +-- Dump recorded bytecode. +local function dump_record(tr, func, pc, depth, callee) + if depth ~= recdepth then + recdepth = depth + recprefix = rep(" .", depth) + end + local line + if pc >= 0 then + line = bcline(func, pc, recprefix) + if dumpmode.H then line = gsub(line, "[<>&]", html_escape) end + else + line = "0000 "..recprefix.." FUNCC \n" + callee = func + end + if pc <= 0 then + out:write(sub(line, 1, -2), " ; ", fmtfunc(func), "\n") + else + out:write(line) + end + if pc >= 0 and band(funcbc(func, pc), 0xff) < 16 then -- ORDER BC + out:write(bcline(func, pc+1, recprefix)) -- Write JMP for cond. + end +end + +------------------------------------------------------------------------------ + +-- Dump taken trace exits. +local function dump_texit(tr, ex, ngpr, nfpr, ...) + out:write("---- TRACE ", tr, " exit ", ex, "\n") + if dumpmode.X then + local regs = {...} + if jit.arch == "x64" then + for i=1,ngpr do + out:write(format(" %016x", regs[i])) + if i % 4 == 0 then out:write("\n") end + end + else + for i=1,ngpr do + out:write(" ", tohex(regs[i])) + if i % 8 == 0 then out:write("\n") end + end + end + if jit.arch == "mips" or jit.arch == "mipsel" then + for i=1,nfpr,2 do + out:write(format(" %+17.14g", regs[ngpr+i])) + if i % 8 == 7 then out:write("\n") end + end + else + for i=1,nfpr do + out:write(format(" %+17.14g", regs[ngpr+i])) + if i % 4 == 0 then out:write("\n") end + end + end + end +end + +------------------------------------------------------------------------------ + +-- Detach dump handlers. +local function dumpoff() + if active then + active = false + jit.attach(dump_texit) + jit.attach(dump_record) + jit.attach(dump_trace) + if out and out ~= stdout and out ~= stderr then out:close() end + out = nil + end +end + +-- Open the output file and attach dump handlers. +local function dumpon(opt, outfile) + if active then dumpoff() end + + local colormode = os.getenv("COLORTERM") and "A" or "T" + if opt then + opt = gsub(opt, "[TAH]", function(mode) colormode = mode; return ""; end) + end + + local m = { t=true, b=true, i=true, m=true, } + if opt and opt ~= "" then + local o = sub(opt, 1, 1) + if o ~= "+" and o ~= "-" then m = {} end + for i=1,#opt do m[sub(opt, i, i)] = (o ~= "-") end + end + dumpmode = m + + if m.t or m.b or m.i or m.s or m.m then + jit.attach(dump_trace, "trace") + end + if m.b then + jit.attach(dump_record, "record") + if not bcline then bcline = require("jit.bc").line end + end + if m.x or m.X then + jit.attach(dump_texit, "texit") + end + + if not outfile then outfile = os.getenv("LUAJIT_DUMPFILE") end + if outfile then + out = outfile == "-" and stdout or assert(io.open(outfile, "w")) + else + out = stdout + end + + m[colormode] = true + if colormode == "A" then + colorize = colorize_ansi + irtype = irtype_ansi + elseif colormode == "H" then + colorize = colorize_html + irtype = irtype_html + out:write(header_html) + else + colorize = colorize_text + irtype = irtype_text + end + + active = true +end + +-- Public module functions. +return { + on = dumpon, + off = dumpoff, + start = dumpon -- For -j command line option. +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/jit/p.lua b/Build/source/libs/luajit/LuaJIT-src/src/jit/p.lua new file mode 100644 index 00000000000..97d4ccdf87c --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/jit/p.lua @@ -0,0 +1,310 @@ +---------------------------------------------------------------------------- +-- LuaJIT profiler. +-- +-- Copyright (C) 2005-2015 Mike Pall. All rights reserved. +-- Released under the MIT license. See Copyright Notice in luajit.h +---------------------------------------------------------------------------- +-- +-- This module is a simple command line interface to the built-in +-- low-overhead profiler of LuaJIT. +-- +-- The lower-level API of the profiler is accessible via the "jit.profile" +-- module or the luaJIT_profile_* C API. +-- +-- Example usage: +-- +-- luajit -jp myapp.lua +-- luajit -jp=s myapp.lua +-- luajit -jp=-s myapp.lua +-- luajit -jp=vl myapp.lua +-- luajit -jp=G,profile.txt myapp.lua +-- +-- The following dump features are available: +-- +-- f Stack dump: function name, otherwise module:line. Default mode. +-- F Stack dump: ditto, but always prepend module. +-- l Stack dump: module:line. +-- stack dump depth (callee < caller). Default: 1. +-- - Inverse stack dump depth (caller > callee). +-- s Split stack dump after first stack level. Implies abs(depth) >= 2. +-- p Show full path for module names. +-- v Show VM states. Can be combined with stack dumps, e.g. vf or fv. +-- z Show zones. Can be combined with stack dumps, e.g. zf or fz. +-- r Show raw sample counts. Default: show percentages. +-- a Annotate excerpts from source code files. +-- A Annotate complete source code files. +-- G Produce raw output suitable for graphical tools (e.g. flame graphs). +-- m Minimum sample percentage to be shown. Default: 3. +-- i Sampling interval in milliseconds. Default: 10. +-- +---------------------------------------------------------------------------- + +-- Cache some library functions and objects. +local jit = require("jit") +assert(jit.version_num == 20100, "LuaJIT core/library version mismatch") +local profile = require("jit.profile") +local vmdef = require("jit.vmdef") +local math = math +local pairs, ipairs, tonumber, floor = pairs, ipairs, tonumber, math.floor +local sort, format = table.sort, string.format +local stdout = io.stdout +local zone -- Load jit.zone module on demand. + +-- Output file handle. +local out + +------------------------------------------------------------------------------ + +local prof_ud +local prof_states, prof_split, prof_min, prof_raw, prof_fmt, prof_depth +local prof_ann, prof_count1, prof_count2, prof_samples + +local map_vmmode = { + N = "Compiled", + I = "Interpreted", + C = "C code", + G = "Garbage Collector", + J = "JIT Compiler", +} + +-- Profiler callback. +local function prof_cb(th, samples, vmmode) + prof_samples = prof_samples + samples + local key_stack, key_stack2, key_state + -- Collect keys for sample. + if prof_states then + if prof_states == "v" then + key_state = map_vmmode[vmmode] or vmmode + else + key_state = zone:get() or "(none)" + end + end + if prof_fmt then + key_stack = profile.dumpstack(th, prof_fmt, prof_depth) + key_stack = key_stack:gsub("%[builtin#(%d+)%]", function(x) + return vmdef.ffnames[tonumber(x)] + end) + if prof_split == 2 then + local k1, k2 = key_stack:match("(.-) [<>] (.*)") + if k2 then key_stack, key_stack2 = k1, k2 end + elseif prof_split == 3 then + key_stack2 = profile.dumpstack(th, "l", 1) + end + end + -- Order keys. + local k1, k2 + if prof_split == 1 then + if key_state then + k1 = key_state + if key_stack then k2 = key_stack end + end + elseif key_stack then + k1 = key_stack + if key_stack2 then k2 = key_stack2 elseif key_state then k2 = key_state end + end + -- Coalesce samples in one or two levels. + if k1 then + local t1 = prof_count1 + t1[k1] = (t1[k1] or 0) + samples + if k2 then + local t2 = prof_count2 + local t3 = t2[k1] + if not t3 then t3 = {}; t2[k1] = t3 end + t3[k2] = (t3[k2] or 0) + samples + end + end +end + +------------------------------------------------------------------------------ + +-- Show top N list. +local function prof_top(count1, count2, samples, indent) + local t, n = {}, 0 + for k, v in pairs(count1) do + n = n + 1 + t[n] = k + end + sort(t, function(a, b) return count1[a] > count1[b] end) + for i=1,n do + local k = t[i] + local v = count1[k] + local pct = floor(v*100/samples + 0.5) + if pct < prof_min then break end + if not prof_raw then + out:write(format("%s%2d%% %s\n", indent, pct, k)) + elseif prof_raw == "r" then + out:write(format("%s%5d %s\n", indent, v, k)) + else + out:write(format("%s %d\n", k, v)) + end + if count2 then + local r = count2[k] + if r then + prof_top(r, nil, v, (prof_split == 3 or prof_split == 1) and " -- " or + (prof_depth < 0 and " -> " or " <- ")) + end + end + end +end + +-- Annotate source code +local function prof_annotate(count1, samples) + local files = {} + local ms = 0 + for k, v in pairs(count1) do + local pct = floor(v*100/samples + 0.5) + ms = math.max(ms, v) + if pct >= prof_min then + local file, line = k:match("^(.*):(%d+)$") + local fl = files[file] + if not fl then fl = {}; files[file] = fl; files[#files+1] = file end + line = tonumber(line) + fl[line] = prof_raw and v or pct + end + end + sort(files) + local fmtv, fmtn = " %3d%% | %s\n", " | %s\n" + if prof_raw then + local n = math.max(5, math.ceil(math.log10(ms))) + fmtv = "%"..n.."d | %s\n" + fmtn = (" "):rep(n).." | %s\n" + end + local ann = prof_ann + for _, file in ipairs(files) do + local f0 = file:byte() + if f0 == 40 or f0 == 91 then + out:write(format("\n====== %s ======\n[Cannot annotate non-file]\n", file)) + break + end + local fp, err = io.open(file) + if not fp then + out:write(format("====== ERROR: %s: %s\n", file, err)) + break + end + out:write(format("\n====== %s ======\n", file)) + local fl = files[file] + local n, show = 1, false + if ann ~= 0 then + for i=1,ann do + if fl[i] then show = true; out:write("@@ 1 @@\n"); break end + end + end + for line in fp:lines() do + if line:byte() == 27 then + out:write("[Cannot annotate bytecode file]\n") + break + end + local v = fl[n] + if ann ~= 0 then + local v2 = fl[n+ann] + if show then + if v2 then show = n+ann elseif v then show = n + elseif show+ann < n then show = false end + elseif v2 then + show = n+ann + out:write(format("@@ %d @@\n", n)) + end + if not show then goto next end + end + if v then + out:write(format(fmtv, v, line)) + else + out:write(format(fmtn, line)) + end + ::next:: + n = n + 1 + end + fp:close() + end +end + +------------------------------------------------------------------------------ + +-- Finish profiling and dump result. +local function prof_finish() + if prof_ud then + profile.stop() + local samples = prof_samples + if samples == 0 then + if prof_raw ~= true then out:write("[No samples collected]\n") end + return + end + if prof_ann then + prof_annotate(prof_count1, samples) + else + prof_top(prof_count1, prof_count2, samples, "") + end + prof_count1 = nil + prof_count2 = nil + prof_ud = nil + end +end + +-- Start profiling. +local function prof_start(mode) + local interval = "" + mode = mode:gsub("i%d*", function(s) interval = s; return "" end) + prof_min = 3 + mode = mode:gsub("m(%d+)", function(s) prof_min = tonumber(s); return "" end) + prof_depth = 1 + mode = mode:gsub("%-?%d+", function(s) prof_depth = tonumber(s); return "" end) + local m = {} + for c in mode:gmatch(".") do m[c] = c end + prof_states = m.z or m.v + if prof_states == "z" then zone = require("jit.zone") end + local scope = m.l or m.f or m.F or (prof_states and "" or "f") + local flags = (m.p or "") + prof_raw = m.r + if m.s then + prof_split = 2 + if prof_depth == -1 or m["-"] then prof_depth = -2 + elseif prof_depth == 1 then prof_depth = 2 end + elseif mode:find("[fF].*l") then + scope = "l" + prof_split = 3 + else + prof_split = (scope == "" or mode:find("[zv].*[lfF]")) and 1 or 0 + end + prof_ann = m.A and 0 or (m.a and 3) + if prof_ann then + scope = "l" + prof_fmt = "pl" + prof_split = 0 + prof_depth = 1 + elseif m.G and scope ~= "" then + prof_fmt = flags..scope.."Z;" + prof_depth = -100 + prof_raw = true + prof_min = 0 + elseif scope == "" then + prof_fmt = false + else + local sc = prof_split == 3 and m.f or m.F or scope + prof_fmt = flags..sc..(prof_depth >= 0 and "Z < " or "Z > ") + end + prof_count1 = {} + prof_count2 = {} + prof_samples = 0 + profile.start(scope:lower()..interval, prof_cb) + prof_ud = newproxy(true) + getmetatable(prof_ud).__gc = prof_finish +end + +------------------------------------------------------------------------------ + +local function start(mode, outfile) + if not outfile then outfile = os.getenv("LUAJIT_PROFILEFILE") end + if outfile then + out = outfile == "-" and stdout or assert(io.open(outfile, "w")) + else + out = stdout + end + prof_start(mode or "f") +end + +-- Public module functions. +return { + start = start, -- For -j command line option. + stop = prof_finish +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/jit/v.lua b/Build/source/libs/luajit/LuaJIT-src/src/jit/v.lua new file mode 100644 index 00000000000..157c34bc643 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/jit/v.lua @@ -0,0 +1,170 @@ +---------------------------------------------------------------------------- +-- Verbose mode of the LuaJIT compiler. +-- +-- Copyright (C) 2005-2015 Mike Pall. All rights reserved. +-- Released under the MIT license. See Copyright Notice in luajit.h +---------------------------------------------------------------------------- +-- +-- This module shows verbose information about the progress of the +-- JIT compiler. It prints one line for each generated trace. This module +-- is useful to see which code has been compiled or where the compiler +-- punts and falls back to the interpreter. +-- +-- Example usage: +-- +-- luajit -jv -e "for i=1,1000 do for j=1,1000 do end end" +-- luajit -jv=myapp.out myapp.lua +-- +-- Default output is to stderr. To redirect the output to a file, pass a +-- filename as an argument (use '-' for stdout) or set the environment +-- variable LUAJIT_VERBOSEFILE. The file is overwritten every time the +-- module is started. +-- +-- The output from the first example should look like this: +-- +-- [TRACE 1 (command line):1 loop] +-- [TRACE 2 (1/3) (command line):1 -> 1] +-- +-- The first number in each line is the internal trace number. Next are +-- the file name ('(command line)') and the line number (':1') where the +-- trace has started. Side traces also show the parent trace number and +-- the exit number where they are attached to in parentheses ('(1/3)'). +-- An arrow at the end shows where the trace links to ('-> 1'), unless +-- it loops to itself. +-- +-- In this case the inner loop gets hot and is traced first, generating +-- a root trace. Then the last exit from the 1st trace gets hot, too, +-- and triggers generation of the 2nd trace. The side trace follows the +-- path along the outer loop and *around* the inner loop, back to its +-- start, and then links to the 1st trace. Yes, this may seem unusual, +-- if you know how traditional compilers work. Trace compilers are full +-- of surprises like this -- have fun! :-) +-- +-- Aborted traces are shown like this: +-- +-- [TRACE --- foo.lua:44 -- leaving loop in root trace at foo:lua:50] +-- +-- Don't worry -- trace aborts are quite common, even in programs which +-- can be fully compiled. The compiler may retry several times until it +-- finds a suitable trace. +-- +-- Of course this doesn't work with features that are not-yet-implemented +-- (NYI error messages). The VM simply falls back to the interpreter. This +-- may not matter at all if the particular trace is not very high up in +-- the CPU usage profile. Oh, and the interpreter is quite fast, too. +-- +-- Also check out the -jdump module, which prints all the gory details. +-- +------------------------------------------------------------------------------ + +-- Cache some library functions and objects. +local jit = require("jit") +assert(jit.version_num == 20100, "LuaJIT core/library version mismatch") +local jutil = require("jit.util") +local vmdef = require("jit.vmdef") +local funcinfo, traceinfo = jutil.funcinfo, jutil.traceinfo +local type, format = type, string.format +local stdout, stderr = io.stdout, io.stderr + +-- Active flag and output file handle. +local active, out + +------------------------------------------------------------------------------ + +local startloc, startex + +local function fmtfunc(func, pc) + local fi = funcinfo(func, pc) + if fi.loc then + return fi.loc + elseif fi.ffid then + return vmdef.ffnames[fi.ffid] + elseif fi.addr then + return format("C:%x", fi.addr) + else + return "(?)" + end +end + +-- Format trace error message. +local function fmterr(err, info) + if type(err) == "number" then + if type(info) == "function" then info = fmtfunc(info) end + err = format(vmdef.traceerr[err], info) + end + return err +end + +-- Dump trace states. +local function dump_trace(what, tr, func, pc, otr, oex) + if what == "start" then + startloc = fmtfunc(func, pc) + startex = otr and "("..otr.."/"..oex..") " or "" + else + if what == "abort" then + local loc = fmtfunc(func, pc) + if loc ~= startloc then + out:write(format("[TRACE --- %s%s -- %s at %s]\n", + startex, startloc, fmterr(otr, oex), loc)) + else + out:write(format("[TRACE --- %s%s -- %s]\n", + startex, startloc, fmterr(otr, oex))) + end + elseif what == "stop" then + local info = traceinfo(tr) + local link, ltype = info.link, info.linktype + if ltype == "interpreter" then + out:write(format("[TRACE %3s %s%s -- fallback to interpreter]\n", + tr, startex, startloc)) + elseif ltype == "stitch" then + out:write(format("[TRACE %3s %s%s %s %s]\n", + tr, startex, startloc, ltype, fmtfunc(func, pc))) + elseif link == tr or link == 0 then + out:write(format("[TRACE %3s %s%s %s]\n", + tr, startex, startloc, ltype)) + elseif ltype == "root" then + out:write(format("[TRACE %3s %s%s -> %d]\n", + tr, startex, startloc, link)) + else + out:write(format("[TRACE %3s %s%s -> %d %s]\n", + tr, startex, startloc, link, ltype)) + end + else + out:write(format("[TRACE %s]\n", what)) + end + out:flush() + end +end + +------------------------------------------------------------------------------ + +-- Detach dump handlers. +local function dumpoff() + if active then + active = false + jit.attach(dump_trace) + if out and out ~= stdout and out ~= stderr then out:close() end + out = nil + end +end + +-- Open the output file and attach dump handlers. +local function dumpon(outfile) + if active then dumpoff() end + if not outfile then outfile = os.getenv("LUAJIT_VERBOSEFILE") end + if outfile then + out = outfile == "-" and stdout or assert(io.open(outfile, "w")) + else + out = stderr + end + jit.attach(dump_trace, "trace") + active = true +end + +-- Public module functions. +return { + on = dumpon, + off = dumpoff, + start = dumpon -- For -j command line option. +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/jit/zone.lua b/Build/source/libs/luajit/LuaJIT-src/src/jit/zone.lua new file mode 100644 index 00000000000..69f0f169147 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/jit/zone.lua @@ -0,0 +1,45 @@ +---------------------------------------------------------------------------- +-- LuaJIT profiler zones. +-- +-- Copyright (C) 2005-2015 Mike Pall. All rights reserved. +-- Released under the MIT license. See Copyright Notice in luajit.h +---------------------------------------------------------------------------- +-- +-- This module implements a simple hierarchical zone model. +-- +-- Example usage: +-- +-- local zone = require("jit.zone") +-- zone("AI") +-- ... +-- zone("A*") +-- ... +-- print(zone:get()) --> "A*" +-- ... +-- zone() +-- ... +-- print(zone:get()) --> "AI" +-- ... +-- zone() +-- +---------------------------------------------------------------------------- + +local remove = table.remove + +return setmetatable({ + flush = function(t) + for i=#t,1,-1 do t[i] = nil end + end, + get = function(t) + return t[#t] + end +}, { + __call = function(t, zone) + if zone then + t[#t+1] = zone + else + return (assert(remove(t), "empty zone stack")) + end + end +}) + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lauxlib.h b/Build/source/libs/luajit/LuaJIT-src/src/lauxlib.h new file mode 100644 index 00000000000..b6ed1a7d04a --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lauxlib.h @@ -0,0 +1,193 @@ +/* +** $Id: lauxlib.h,v 1.88.1.1 2007/12/27 13:02:25 roberto Exp $ +** Auxiliary functions for building Lua libraries +** See Copyright Notice in lua.h +*/ + + +#ifndef lauxlib_h +#define lauxlib_h + + +#include +#include + +#include "lua.h" + + +#define luaL_getn(L,i) ((int)lua_objlen(L, i)) +#define luaL_setn(L,i,j) ((void)0) /* no op! */ + +/* extra error code for `luaL_load' */ +#define LUA_ERRFILE (LUA_ERRERR+1) + +typedef struct luaL_Reg { + const char *name; + lua_CFunction func; +} luaL_Reg; + +LUALIB_API void (luaL_openlib) (lua_State *L, const char *libname, + const luaL_Reg *l, int nup); +LUALIB_API void (luaL_register) (lua_State *L, const char *libname, + const luaL_Reg *l); +LUALIB_API int (luaL_getmetafield) (lua_State *L, int obj, const char *e); +LUALIB_API int (luaL_callmeta) (lua_State *L, int obj, const char *e); +LUALIB_API int (luaL_typerror) (lua_State *L, int narg, const char *tname); +LUALIB_API int (luaL_argerror) (lua_State *L, int numarg, const char *extramsg); +LUALIB_API const char *(luaL_checklstring) (lua_State *L, int numArg, + size_t *l); +LUALIB_API const char *(luaL_optlstring) (lua_State *L, int numArg, + const char *def, size_t *l); +LUALIB_API lua_Number (luaL_checknumber) (lua_State *L, int numArg); +LUALIB_API lua_Number (luaL_optnumber) (lua_State *L, int nArg, lua_Number def); + +LUALIB_API lua_Integer (luaL_checkinteger) (lua_State *L, int numArg); +LUALIB_API lua_Integer (luaL_optinteger) (lua_State *L, int nArg, + lua_Integer def); + +LUALIB_API void (luaL_checkstack) (lua_State *L, int sz, const char *msg); +LUALIB_API void (luaL_checktype) (lua_State *L, int narg, int t); +LUALIB_API void (luaL_checkany) (lua_State *L, int narg); + +LUALIB_API int (luaL_newmetatable) (lua_State *L, const char *tname); +LUALIB_API void *(luaL_checkudata) (lua_State *L, int ud, const char *tname); + +LUALIB_API void (luaL_where) (lua_State *L, int lvl); +LUALIB_API int (luaL_error) (lua_State *L, const char *fmt, ...); + +LUALIB_API int (luaL_checkoption) (lua_State *L, int narg, const char *def, + const char *const lst[]); + +LUALIB_API int (luaL_ref) (lua_State *L, int t); +LUALIB_API void (luaL_unref) (lua_State *L, int t, int ref); + +LUALIB_API int (luaL_loadfile) (lua_State *L, const char *filename); +LUALIB_API int (luaL_loadbuffer) (lua_State *L, const char *buff, size_t sz, + const char *name); +LUALIB_API int (luaL_loadstring) (lua_State *L, const char *s); + +LUALIB_API lua_State *(luaL_newstate) (void); + + +LUALIB_API const char *(luaL_gsub) (lua_State *L, const char *s, const char *p, + const char *r); + +LUALIB_API const char *(luaL_findtable) (lua_State *L, int idx, + const char *fname, int szhint); + +/* From Lua 5.2. */ +LUALIB_API int luaL_fileresult(lua_State *L, int stat, const char *fname); +LUALIB_API int luaL_execresult(lua_State *L, int stat); +LUALIB_API int (luaL_loadfilex) (lua_State *L, const char *filename, + const char *mode); +LUALIB_API int (luaL_loadbufferx) (lua_State *L, const char *buff, size_t sz, + const char *name, const char *mode); +LUALIB_API void luaL_traceback (lua_State *L, lua_State *L1, const char *msg, + int level); + + + +/* +** {====================================================== +** File handles for IO library +** ======================================================= +*/ + +/* +** A file handle is a userdata with metatable 'LUA_FILEHANDLE' and +** initial structure 'luaL_Stream' (it may contain other fields +** after that initial structure). +*/ + +#define LUA_FILEHANDLE "FILE*" + + +typedef struct luaL_Stream { + FILE *f; /* stream (NULL for incompletely created streams) */ + lua_CFunction closef; /* to close stream (NULL for closed streams) */ +} luaL_Stream; + +/* }====================================================== */ + + + + +/* +** =============================================================== +** some useful macros +** =============================================================== +*/ + +#define luaL_argcheck(L, cond,numarg,extramsg) \ + ((void)((cond) || luaL_argerror(L, (numarg), (extramsg)))) +#define luaL_checkstring(L,n) (luaL_checklstring(L, (n), NULL)) +#define luaL_optstring(L,n,d) (luaL_optlstring(L, (n), (d), NULL)) +#define luaL_checkint(L,n) ((int)luaL_checkinteger(L, (n))) +#define luaL_optint(L,n,d) ((int)luaL_optinteger(L, (n), (d))) +#define luaL_checklong(L,n) ((long)luaL_checkinteger(L, (n))) +#define luaL_optlong(L,n,d) ((long)luaL_optinteger(L, (n), (d))) + +#define luaL_typename(L,i) lua_typename(L, lua_type(L,(i))) + +#define luaL_dofile(L, fn) \ + (luaL_loadfile(L, fn) || lua_pcall(L, 0, LUA_MULTRET, 0)) + +#define luaL_dostring(L, s) \ + (luaL_loadstring(L, s) || lua_pcall(L, 0, LUA_MULTRET, 0)) + +#define luaL_getmetatable(L,n) (lua_getfield(L, LUA_REGISTRYINDEX, (n))) + +#define luaL_opt(L,f,n,d) (lua_isnoneornil(L,(n)) ? (d) : f(L,(n))) + +/* +** {====================================================== +** Generic Buffer manipulation +** ======================================================= +*/ + + + +typedef struct luaL_Buffer { + char *p; /* current position in buffer */ + int lvl; /* number of strings in the stack (level) */ + lua_State *L; + char buffer[LUAL_BUFFERSIZE]; +} luaL_Buffer; + +#define luaL_addchar(B,c) \ + ((void)((B)->p < ((B)->buffer+LUAL_BUFFERSIZE) || luaL_prepbuffer(B)), \ + (*(B)->p++ = (char)(c))) + +/* compatibility only */ +#define luaL_putchar(B,c) luaL_addchar(B,c) + +#define luaL_addsize(B,n) ((B)->p += (n)) + +LUALIB_API void (luaL_buffinit) (lua_State *L, luaL_Buffer *B); +LUALIB_API char *(luaL_prepbuffer) (luaL_Buffer *B); +LUALIB_API void (luaL_addlstring) (luaL_Buffer *B, const char *s, size_t l); +LUALIB_API void (luaL_addstring) (luaL_Buffer *B, const char *s); +LUALIB_API void (luaL_addvalue) (luaL_Buffer *B); +LUALIB_API void (luaL_pushresult) (luaL_Buffer *B); + + +/* }====================================================== */ + + +/* compatibility with ref system */ + +/* pre-defined references */ +#define LUA_NOREF (-2) +#define LUA_REFNIL (-1) + +#define lua_ref(L,lock) ((lock) ? luaL_ref(L, LUA_REGISTRYINDEX) : \ + (lua_pushstring(L, "unlocked references are obsolete"), lua_error(L), 0)) + +#define lua_unref(L,ref) luaL_unref(L, LUA_REGISTRYINDEX, (ref)) + +#define lua_getref(L,ref) lua_rawgeti(L, LUA_REGISTRYINDEX, (ref)) + + +#define luaL_reg luaL_Reg + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lib_aux.c b/Build/source/libs/luajit/LuaJIT-src/src/lib_aux.c new file mode 100644 index 00000000000..4a1b70ddc09 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lib_aux.c @@ -0,0 +1,356 @@ +/* +** Auxiliary library for the Lua/C API. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +** +** Major parts taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#include +#include +#include + +#define lib_aux_c +#define LUA_LIB + +#include "lua.h" +#include "lauxlib.h" + +#include "lj_obj.h" +#include "lj_err.h" +#include "lj_state.h" +#include "lj_trace.h" +#include "lj_lib.h" + +#if LJ_TARGET_POSIX +#include +#endif + +/* -- I/O error handling -------------------------------------------------- */ + +LUALIB_API int luaL_fileresult(lua_State *L, int stat, const char *fname) +{ + if (stat) { + setboolV(L->top++, 1); + return 1; + } else { + int en = errno; /* Lua API calls may change this value. */ + setnilV(L->top++); + if (fname) + lua_pushfstring(L, "%s: %s", fname, strerror(en)); + else + lua_pushfstring(L, "%s", strerror(en)); + setintV(L->top++, en); + lj_trace_abort(G(L)); + return 3; + } +} + +LUALIB_API int luaL_execresult(lua_State *L, int stat) +{ + if (stat != -1) { +#if LJ_TARGET_POSIX + if (WIFSIGNALED(stat)) { + stat = WTERMSIG(stat); + setnilV(L->top++); + lua_pushliteral(L, "signal"); + } else { + if (WIFEXITED(stat)) + stat = WEXITSTATUS(stat); + if (stat == 0) + setboolV(L->top++, 1); + else + setnilV(L->top++); + lua_pushliteral(L, "exit"); + } +#else + if (stat == 0) + setboolV(L->top++, 1); + else + setnilV(L->top++); + lua_pushliteral(L, "exit"); +#endif + setintV(L->top++, stat); + return 3; + } + return luaL_fileresult(L, 0, NULL); +} + +/* -- Module registration ------------------------------------------------- */ + +LUALIB_API const char *luaL_findtable(lua_State *L, int idx, + const char *fname, int szhint) +{ + const char *e; + lua_pushvalue(L, idx); + do { + e = strchr(fname, '.'); + if (e == NULL) e = fname + strlen(fname); + lua_pushlstring(L, fname, (size_t)(e - fname)); + lua_rawget(L, -2); + if (lua_isnil(L, -1)) { /* no such field? */ + lua_pop(L, 1); /* remove this nil */ + lua_createtable(L, 0, (*e == '.' ? 1 : szhint)); /* new table for field */ + lua_pushlstring(L, fname, (size_t)(e - fname)); + lua_pushvalue(L, -2); + lua_settable(L, -4); /* set new table into field */ + } else if (!lua_istable(L, -1)) { /* field has a non-table value? */ + lua_pop(L, 2); /* remove table and value */ + return fname; /* return problematic part of the name */ + } + lua_remove(L, -2); /* remove previous table */ + fname = e + 1; + } while (*e == '.'); + return NULL; +} + +static int libsize(const luaL_Reg *l) +{ + int size = 0; + for (; l->name; l++) size++; + return size; +} + +LUALIB_API void luaL_openlib(lua_State *L, const char *libname, + const luaL_Reg *l, int nup) +{ + lj_lib_checkfpu(L); + if (libname) { + int size = libsize(l); + /* check whether lib already exists */ + luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16); + lua_getfield(L, -1, libname); /* get _LOADED[libname] */ + if (!lua_istable(L, -1)) { /* not found? */ + lua_pop(L, 1); /* remove previous result */ + /* try global variable (and create one if it does not exist) */ + if (luaL_findtable(L, LUA_GLOBALSINDEX, libname, size) != NULL) + lj_err_callerv(L, LJ_ERR_BADMODN, libname); + lua_pushvalue(L, -1); + lua_setfield(L, -3, libname); /* _LOADED[libname] = new table */ + } + lua_remove(L, -2); /* remove _LOADED table */ + lua_insert(L, -(nup+1)); /* move library table to below upvalues */ + } + for (; l->name; l++) { + int i; + for (i = 0; i < nup; i++) /* copy upvalues to the top */ + lua_pushvalue(L, -nup); + lua_pushcclosure(L, l->func, nup); + lua_setfield(L, -(nup+2), l->name); + } + lua_pop(L, nup); /* remove upvalues */ +} + +LUALIB_API void luaL_register(lua_State *L, const char *libname, + const luaL_Reg *l) +{ + luaL_openlib(L, libname, l, 0); +} + +LUALIB_API const char *luaL_gsub(lua_State *L, const char *s, + const char *p, const char *r) +{ + const char *wild; + size_t l = strlen(p); + luaL_Buffer b; + luaL_buffinit(L, &b); + while ((wild = strstr(s, p)) != NULL) { + luaL_addlstring(&b, s, (size_t)(wild - s)); /* push prefix */ + luaL_addstring(&b, r); /* push replacement in place of pattern */ + s = wild + l; /* continue after `p' */ + } + luaL_addstring(&b, s); /* push last suffix */ + luaL_pushresult(&b); + return lua_tostring(L, -1); +} + +/* -- Buffer handling ----------------------------------------------------- */ + +#define bufflen(B) ((size_t)((B)->p - (B)->buffer)) +#define bufffree(B) ((size_t)(LUAL_BUFFERSIZE - bufflen(B))) + +static int emptybuffer(luaL_Buffer *B) +{ + size_t l = bufflen(B); + if (l == 0) + return 0; /* put nothing on stack */ + lua_pushlstring(B->L, B->buffer, l); + B->p = B->buffer; + B->lvl++; + return 1; +} + +static void adjuststack(luaL_Buffer *B) +{ + if (B->lvl > 1) { + lua_State *L = B->L; + int toget = 1; /* number of levels to concat */ + size_t toplen = lua_strlen(L, -1); + do { + size_t l = lua_strlen(L, -(toget+1)); + if (!(B->lvl - toget + 1 >= LUA_MINSTACK/2 || toplen > l)) + break; + toplen += l; + toget++; + } while (toget < B->lvl); + lua_concat(L, toget); + B->lvl = B->lvl - toget + 1; + } +} + +LUALIB_API char *luaL_prepbuffer(luaL_Buffer *B) +{ + if (emptybuffer(B)) + adjuststack(B); + return B->buffer; +} + +LUALIB_API void luaL_addlstring(luaL_Buffer *B, const char *s, size_t l) +{ + while (l--) + luaL_addchar(B, *s++); +} + +LUALIB_API void luaL_addstring(luaL_Buffer *B, const char *s) +{ + luaL_addlstring(B, s, strlen(s)); +} + +LUALIB_API void luaL_pushresult(luaL_Buffer *B) +{ + emptybuffer(B); + lua_concat(B->L, B->lvl); + B->lvl = 1; +} + +LUALIB_API void luaL_addvalue(luaL_Buffer *B) +{ + lua_State *L = B->L; + size_t vl; + const char *s = lua_tolstring(L, -1, &vl); + if (vl <= bufffree(B)) { /* fit into buffer? */ + memcpy(B->p, s, vl); /* put it there */ + B->p += vl; + lua_pop(L, 1); /* remove from stack */ + } else { + if (emptybuffer(B)) + lua_insert(L, -2); /* put buffer before new value */ + B->lvl++; /* add new value into B stack */ + adjuststack(B); + } +} + +LUALIB_API void luaL_buffinit(lua_State *L, luaL_Buffer *B) +{ + B->L = L; + B->p = B->buffer; + B->lvl = 0; +} + +/* -- Reference management ------------------------------------------------ */ + +#define FREELIST_REF 0 + +/* Convert a stack index to an absolute index. */ +#define abs_index(L, i) \ + ((i) > 0 || (i) <= LUA_REGISTRYINDEX ? (i) : lua_gettop(L) + (i) + 1) + +LUALIB_API int luaL_ref(lua_State *L, int t) +{ + int ref; + t = abs_index(L, t); + if (lua_isnil(L, -1)) { + lua_pop(L, 1); /* remove from stack */ + return LUA_REFNIL; /* `nil' has a unique fixed reference */ + } + lua_rawgeti(L, t, FREELIST_REF); /* get first free element */ + ref = (int)lua_tointeger(L, -1); /* ref = t[FREELIST_REF] */ + lua_pop(L, 1); /* remove it from stack */ + if (ref != 0) { /* any free element? */ + lua_rawgeti(L, t, ref); /* remove it from list */ + lua_rawseti(L, t, FREELIST_REF); /* (t[FREELIST_REF] = t[ref]) */ + } else { /* no free elements */ + ref = (int)lua_objlen(L, t); + ref++; /* create new reference */ + } + lua_rawseti(L, t, ref); + return ref; +} + +LUALIB_API void luaL_unref(lua_State *L, int t, int ref) +{ + if (ref >= 0) { + t = abs_index(L, t); + lua_rawgeti(L, t, FREELIST_REF); + lua_rawseti(L, t, ref); /* t[ref] = t[FREELIST_REF] */ + lua_pushinteger(L, ref); + lua_rawseti(L, t, FREELIST_REF); /* t[FREELIST_REF] = ref */ + } +} + +/* -- Default allocator and panic function -------------------------------- */ + +static int panic(lua_State *L) +{ + const char *s = lua_tostring(L, -1); + fputs("PANIC: unprotected error in call to Lua API (", stderr); + fputs(s ? s : "?", stderr); + fputc(')', stderr); fputc('\n', stderr); + fflush(stderr); + return 0; +} + +#ifdef LUAJIT_USE_SYSMALLOC + +#if LJ_64 && !defined(LUAJIT_USE_VALGRIND) +#error "Must use builtin allocator for 64 bit target" +#endif + +static void *mem_alloc(void *ud, void *ptr, size_t osize, size_t nsize) +{ + (void)ud; + (void)osize; + if (nsize == 0) { + free(ptr); + return NULL; + } else { + return realloc(ptr, nsize); + } +} + +LUALIB_API lua_State *luaL_newstate(void) +{ + lua_State *L = lua_newstate(mem_alloc, NULL); + if (L) G(L)->panic = panic; + return L; +} + +#else + +#include "lj_alloc.h" + +LUALIB_API lua_State *luaL_newstate(void) +{ + lua_State *L; + void *ud = lj_alloc_create(); + if (ud == NULL) return NULL; +#if LJ_64 + L = lj_state_newstate(lj_alloc_f, ud); +#else + L = lua_newstate(lj_alloc_f, ud); +#endif + if (L) G(L)->panic = panic; + return L; +} + +#if LJ_64 +LUA_API lua_State *lua_newstate(lua_Alloc f, void *ud) +{ + UNUSED(f); UNUSED(ud); + fputs("Must use luaL_newstate() for 64 bit target\n", stderr); + return NULL; +} +#endif + +#endif + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lib_base.c b/Build/source/libs/luajit/LuaJIT-src/src/lib_base.c new file mode 100644 index 00000000000..887fea7a58a --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lib_base.c @@ -0,0 +1,664 @@ +/* +** Base and coroutine library. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2011 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#include + +#define lib_base_c +#define LUA_LIB + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_debug.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_meta.h" +#include "lj_state.h" +#if LJ_HASFFI +#include "lj_ctype.h" +#include "lj_cconv.h" +#endif +#include "lj_bc.h" +#include "lj_ff.h" +#include "lj_dispatch.h" +#include "lj_char.h" +#include "lj_strscan.h" +#include "lj_strfmt.h" +#include "lj_lib.h" + +/* -- Base library: checks ------------------------------------------------ */ + +#define LJLIB_MODULE_base + +LJLIB_ASM(assert) LJLIB_REC(.) +{ + GCstr *s; + lj_lib_checkany(L, 1); + s = lj_lib_optstr(L, 2); + if (s) + lj_err_callermsg(L, strdata(s)); + else + lj_err_caller(L, LJ_ERR_ASSERT); + return FFH_UNREACHABLE; +} + +/* ORDER LJ_T */ +LJLIB_PUSH("nil") +LJLIB_PUSH("boolean") +LJLIB_PUSH(top-1) /* boolean */ +LJLIB_PUSH("userdata") +LJLIB_PUSH("string") +LJLIB_PUSH("upval") +LJLIB_PUSH("thread") +LJLIB_PUSH("proto") +LJLIB_PUSH("function") +LJLIB_PUSH("trace") +LJLIB_PUSH("cdata") +LJLIB_PUSH("table") +LJLIB_PUSH(top-9) /* userdata */ +LJLIB_PUSH("number") +LJLIB_ASM_(type) LJLIB_REC(.) +/* Recycle the lj_lib_checkany(L, 1) from assert. */ + +/* -- Base library: iterators --------------------------------------------- */ + +/* This solves a circular dependency problem -- change FF_next_N as needed. */ +LJ_STATIC_ASSERT((int)FF_next == FF_next_N); + +LJLIB_ASM(next) +{ + lj_lib_checktab(L, 1); + return FFH_UNREACHABLE; +} + +#if LJ_52 || LJ_HASFFI +static int ffh_pairs(lua_State *L, MMS mm) +{ + TValue *o = lj_lib_checkany(L, 1); + cTValue *mo = lj_meta_lookup(L, o, mm); + if ((LJ_52 || tviscdata(o)) && !tvisnil(mo)) { + L->top = o+1; /* Only keep one argument. */ + copyTV(L, L->base-1-LJ_FR2, mo); /* Replace callable. */ + return FFH_TAILCALL; + } else { + if (!tvistab(o)) lj_err_argt(L, 1, LUA_TTABLE); + if (LJ_FR2) { copyTV(L, o-1, o); o--; } + setfuncV(L, o-1, funcV(lj_lib_upvalue(L, 1))); + if (mm == MM_pairs) setnilV(o+1); else setintV(o+1, 0); + return FFH_RES(3); + } +} +#else +#define ffh_pairs(L, mm) (lj_lib_checktab(L, 1), FFH_UNREACHABLE) +#endif + +LJLIB_PUSH(lastcl) +LJLIB_ASM(pairs) LJLIB_REC(xpairs 0) +{ + return ffh_pairs(L, MM_pairs); +} + +LJLIB_NOREGUV LJLIB_ASM(ipairs_aux) LJLIB_REC(.) +{ + lj_lib_checktab(L, 1); + lj_lib_checkint(L, 2); + return FFH_UNREACHABLE; +} + +LJLIB_PUSH(lastcl) +LJLIB_ASM(ipairs) LJLIB_REC(xpairs 1) +{ + return ffh_pairs(L, MM_ipairs); +} + +/* -- Base library: getters and setters ----------------------------------- */ + +LJLIB_ASM_(getmetatable) LJLIB_REC(.) +/* Recycle the lj_lib_checkany(L, 1) from assert. */ + +LJLIB_ASM(setmetatable) LJLIB_REC(.) +{ + GCtab *t = lj_lib_checktab(L, 1); + GCtab *mt = lj_lib_checktabornil(L, 2); + if (!tvisnil(lj_meta_lookup(L, L->base, MM_metatable))) + lj_err_caller(L, LJ_ERR_PROTMT); + setgcref(t->metatable, obj2gco(mt)); + if (mt) { lj_gc_objbarriert(L, t, mt); } + settabV(L, L->base-1-LJ_FR2, t); + return FFH_RES(1); +} + +LJLIB_CF(getfenv) LJLIB_REC(.) +{ + GCfunc *fn; + cTValue *o = L->base; + if (!(o < L->top && tvisfunc(o))) { + int level = lj_lib_optint(L, 1, 1); + o = lj_debug_frame(L, level, &level); + if (o == NULL) + lj_err_arg(L, 1, LJ_ERR_INVLVL); + if (LJ_FR2) o--; + } + fn = &gcval(o)->fn; + settabV(L, L->top++, isluafunc(fn) ? tabref(fn->l.env) : tabref(L->env)); + return 1; +} + +LJLIB_CF(setfenv) +{ + GCfunc *fn; + GCtab *t = lj_lib_checktab(L, 2); + cTValue *o = L->base; + if (!(o < L->top && tvisfunc(o))) { + int level = lj_lib_checkint(L, 1); + if (level == 0) { + /* NOBARRIER: A thread (i.e. L) is never black. */ + setgcref(L->env, obj2gco(t)); + return 0; + } + o = lj_debug_frame(L, level, &level); + if (o == NULL) + lj_err_arg(L, 1, LJ_ERR_INVLVL); + if (LJ_FR2) o--; + } + fn = &gcval(o)->fn; + if (!isluafunc(fn)) + lj_err_caller(L, LJ_ERR_SETFENV); + setgcref(fn->l.env, obj2gco(t)); + lj_gc_objbarrier(L, obj2gco(fn), t); + setfuncV(L, L->top++, fn); + return 1; +} + +LJLIB_ASM(rawget) LJLIB_REC(.) +{ + lj_lib_checktab(L, 1); + lj_lib_checkany(L, 2); + return FFH_UNREACHABLE; +} + +LJLIB_CF(rawset) LJLIB_REC(.) +{ + lj_lib_checktab(L, 1); + lj_lib_checkany(L, 2); + L->top = 1+lj_lib_checkany(L, 3); + lua_rawset(L, 1); + return 1; +} + +LJLIB_CF(rawequal) LJLIB_REC(.) +{ + cTValue *o1 = lj_lib_checkany(L, 1); + cTValue *o2 = lj_lib_checkany(L, 2); + setboolV(L->top-1, lj_obj_equal(o1, o2)); + return 1; +} + +#if LJ_52 +LJLIB_CF(rawlen) LJLIB_REC(.) +{ + cTValue *o = L->base; + int32_t len; + if (L->top > o && tvisstr(o)) + len = (int32_t)strV(o)->len; + else + len = (int32_t)lj_tab_len(lj_lib_checktab(L, 1)); + setintV(L->top-1, len); + return 1; +} +#endif + +LJLIB_CF(unpack) +{ + GCtab *t = lj_lib_checktab(L, 1); + int32_t n, i = lj_lib_optint(L, 2, 1); + int32_t e = (L->base+3-1 < L->top && !tvisnil(L->base+3-1)) ? + lj_lib_checkint(L, 3) : (int32_t)lj_tab_len(t); + if (i > e) return 0; + n = e - i + 1; + if (n <= 0 || !lua_checkstack(L, n)) + lj_err_caller(L, LJ_ERR_UNPACK); + do { + cTValue *tv = lj_tab_getint(t, i); + if (tv) { + copyTV(L, L->top++, tv); + } else { + setnilV(L->top++); + } + } while (i++ < e); + return n; +} + +LJLIB_CF(select) LJLIB_REC(.) +{ + int32_t n = (int32_t)(L->top - L->base); + if (n >= 1 && tvisstr(L->base) && *strVdata(L->base) == '#') { + setintV(L->top-1, n-1); + return 1; + } else { + int32_t i = lj_lib_checkint(L, 1); + if (i < 0) i = n + i; else if (i > n) i = n; + if (i < 1) + lj_err_arg(L, 1, LJ_ERR_IDXRNG); + return n - i; + } +} + +/* -- Base library: conversions ------------------------------------------- */ + +LJLIB_ASM(tonumber) LJLIB_REC(.) +{ + int32_t base = lj_lib_optint(L, 2, 10); + if (base == 10) { + TValue *o = lj_lib_checkany(L, 1); + if (lj_strscan_numberobj(o)) { + copyTV(L, L->base-1-LJ_FR2, o); + return FFH_RES(1); + } +#if LJ_HASFFI + if (tviscdata(o)) { + CTState *cts = ctype_cts(L); + CType *ct = lj_ctype_rawref(cts, cdataV(o)->ctypeid); + if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct); + if (ctype_isnum(ct->info) || ctype_iscomplex(ct->info)) { + if (LJ_DUALNUM && ctype_isinteger_or_bool(ct->info) && + ct->size <= 4 && !(ct->size == 4 && (ct->info & CTF_UNSIGNED))) { + int32_t i; + lj_cconv_ct_tv(cts, ctype_get(cts, CTID_INT32), (uint8_t *)&i, o, 0); + setintV(L->base-1-LJ_FR2, i); + return FFH_RES(1); + } + lj_cconv_ct_tv(cts, ctype_get(cts, CTID_DOUBLE), + (uint8_t *)&(L->base-1-LJ_FR2)->n, o, 0); + return FFH_RES(1); + } + } +#endif + } else { + const char *p = strdata(lj_lib_checkstr(L, 1)); + char *ep; + unsigned long ul; + if (base < 2 || base > 36) + lj_err_arg(L, 2, LJ_ERR_BASERNG); + ul = strtoul(p, &ep, base); + if (p != ep) { + while (lj_char_isspace((unsigned char)(*ep))) ep++; + if (*ep == '\0') { + if (LJ_DUALNUM && LJ_LIKELY(ul < 0x80000000u)) + setintV(L->base-1-LJ_FR2, (int32_t)ul); + else + setnumV(L->base-1-LJ_FR2, (lua_Number)ul); + return FFH_RES(1); + } + } + } + setnilV(L->base-1-LJ_FR2); + return FFH_RES(1); +} + +LJLIB_ASM(tostring) LJLIB_REC(.) +{ + TValue *o = lj_lib_checkany(L, 1); + cTValue *mo; + L->top = o+1; /* Only keep one argument. */ + if (!tvisnil(mo = lj_meta_lookup(L, o, MM_tostring))) { + copyTV(L, L->base-1-LJ_FR2, mo); /* Replace callable. */ + return FFH_TAILCALL; + } + lj_gc_check(L); + setstrV(L, L->base-1-LJ_FR2, lj_strfmt_obj(L, L->base)); + return FFH_RES(1); +} + +/* -- Base library: throw and catch errors -------------------------------- */ + +LJLIB_CF(error) +{ + int32_t level = lj_lib_optint(L, 2, 1); + lua_settop(L, 1); + if (lua_isstring(L, 1) && level > 0) { + luaL_where(L, level); + lua_pushvalue(L, 1); + lua_concat(L, 2); + } + return lua_error(L); +} + +LJLIB_ASM(pcall) LJLIB_REC(.) +{ + lj_lib_checkany(L, 1); + lj_lib_checkfunc(L, 2); /* For xpcall only. */ + return FFH_UNREACHABLE; +} +LJLIB_ASM_(xpcall) LJLIB_REC(.) + +/* -- Base library: load Lua code ----------------------------------------- */ + +static int load_aux(lua_State *L, int status, int envarg) +{ + if (status == 0) { + if (tvistab(L->base+envarg-1)) { + GCfunc *fn = funcV(L->top-1); + GCtab *t = tabV(L->base+envarg-1); + setgcref(fn->c.env, obj2gco(t)); + lj_gc_objbarrier(L, fn, t); + } + return 1; + } else { + setnilV(L->top-2); + return 2; + } +} + +LJLIB_CF(loadfile) +{ + GCstr *fname = lj_lib_optstr(L, 1); + GCstr *mode = lj_lib_optstr(L, 2); + int status; + lua_settop(L, 3); /* Ensure env arg exists. */ + status = luaL_loadfilex(L, fname ? strdata(fname) : NULL, + mode ? strdata(mode) : NULL); + return load_aux(L, status, 3); +} + +static const char *reader_func(lua_State *L, void *ud, size_t *size) +{ + UNUSED(ud); + luaL_checkstack(L, 2, "too many nested functions"); + copyTV(L, L->top++, L->base); + lua_call(L, 0, 1); /* Call user-supplied function. */ + L->top--; + if (tvisnil(L->top)) { + *size = 0; + return NULL; + } else if (tvisstr(L->top) || tvisnumber(L->top)) { + copyTV(L, L->base+4, L->top); /* Anchor string in reserved stack slot. */ + return lua_tolstring(L, 5, size); + } else { + lj_err_caller(L, LJ_ERR_RDRSTR); + return NULL; + } +} + +LJLIB_CF(load) +{ + GCstr *name = lj_lib_optstr(L, 2); + GCstr *mode = lj_lib_optstr(L, 3); + int status; + if (L->base < L->top && (tvisstr(L->base) || tvisnumber(L->base))) { + GCstr *s = lj_lib_checkstr(L, 1); + lua_settop(L, 4); /* Ensure env arg exists. */ + status = luaL_loadbufferx(L, strdata(s), s->len, strdata(name ? name : s), + mode ? strdata(mode) : NULL); + } else { + lj_lib_checkfunc(L, 1); + lua_settop(L, 5); /* Reserve a slot for the string from the reader. */ + status = lua_loadx(L, reader_func, NULL, name ? strdata(name) : "=(load)", + mode ? strdata(mode) : NULL); + } + return load_aux(L, status, 4); +} + +LJLIB_CF(loadstring) +{ + return lj_cf_load(L); +} + +LJLIB_CF(dofile) +{ + GCstr *fname = lj_lib_optstr(L, 1); + setnilV(L->top); + L->top = L->base+1; + if (luaL_loadfile(L, fname ? strdata(fname) : NULL) != 0) + lua_error(L); + lua_call(L, 0, LUA_MULTRET); + return (int)(L->top - L->base) - 1; +} + +/* -- Base library: GC control -------------------------------------------- */ + +LJLIB_CF(gcinfo) +{ + setintV(L->top++, (int32_t)(G(L)->gc.total >> 10)); + return 1; +} + +LJLIB_CF(collectgarbage) +{ + int opt = lj_lib_checkopt(L, 1, LUA_GCCOLLECT, /* ORDER LUA_GC* */ + "\4stop\7restart\7collect\5count\1\377\4step\10setpause\12setstepmul"); + int32_t data = lj_lib_optint(L, 2, 0); + if (opt == LUA_GCCOUNT) { + setnumV(L->top, (lua_Number)G(L)->gc.total/1024.0); + } else { + int res = lua_gc(L, opt, data); + if (opt == LUA_GCSTEP) + setboolV(L->top, res); + else + setintV(L->top, res); + } + L->top++; + return 1; +} + +/* -- Base library: miscellaneous functions ------------------------------- */ + +LJLIB_PUSH(top-2) /* Upvalue holds weak table. */ +LJLIB_CF(newproxy) +{ + lua_settop(L, 1); + lua_newuserdata(L, 0); + if (lua_toboolean(L, 1) == 0) { /* newproxy(): without metatable. */ + return 1; + } else if (lua_isboolean(L, 1)) { /* newproxy(true): with metatable. */ + lua_newtable(L); + lua_pushvalue(L, -1); + lua_pushboolean(L, 1); + lua_rawset(L, lua_upvalueindex(1)); /* Remember mt in weak table. */ + } else { /* newproxy(proxy): inherit metatable. */ + int validproxy = 0; + if (lua_getmetatable(L, 1)) { + lua_rawget(L, lua_upvalueindex(1)); + validproxy = lua_toboolean(L, -1); + lua_pop(L, 1); + } + if (!validproxy) + lj_err_arg(L, 1, LJ_ERR_NOPROXY); + lua_getmetatable(L, 1); + } + lua_setmetatable(L, 2); + return 1; +} + +LJLIB_PUSH("tostring") +LJLIB_CF(print) +{ + ptrdiff_t i, nargs = L->top - L->base; + cTValue *tv = lj_tab_getstr(tabref(L->env), strV(lj_lib_upvalue(L, 1))); + int shortcut; + if (tv && !tvisnil(tv)) { + copyTV(L, L->top++, tv); + } else { + setstrV(L, L->top++, strV(lj_lib_upvalue(L, 1))); + lua_gettable(L, LUA_GLOBALSINDEX); + tv = L->top-1; + } + shortcut = (tvisfunc(tv) && funcV(tv)->c.ffid == FF_tostring); + for (i = 0; i < nargs; i++) { + cTValue *o = &L->base[i]; + char buf[STRFMT_MAXBUF_NUM]; + const char *str; + size_t size; + MSize len; + if (shortcut && (str = lj_strfmt_wstrnum(buf, o, &len)) != NULL) { + size = len; + } else { + copyTV(L, L->top+1, o); + copyTV(L, L->top, L->top-1); + L->top += 2; + lua_call(L, 1, 1); + str = lua_tolstring(L, -1, &size); + if (!str) + lj_err_caller(L, LJ_ERR_PRTOSTR); + L->top--; + } + if (i) + putchar('\t'); + fwrite(str, 1, size, stdout); + } + putchar('\n'); + return 0; +} + +LJLIB_PUSH(top-3) +LJLIB_SET(_VERSION) + +#include "lj_libdef.h" + +/* -- Coroutine library --------------------------------------------------- */ + +#define LJLIB_MODULE_coroutine + +LJLIB_CF(coroutine_status) +{ + const char *s; + lua_State *co; + if (!(L->top > L->base && tvisthread(L->base))) + lj_err_arg(L, 1, LJ_ERR_NOCORO); + co = threadV(L->base); + if (co == L) s = "running"; + else if (co->status == LUA_YIELD) s = "suspended"; + else if (co->status != 0) s = "dead"; + else if (co->base > tvref(co->stack)+1+LJ_FR2) s = "normal"; + else if (co->top == co->base) s = "dead"; + else s = "suspended"; + lua_pushstring(L, s); + return 1; +} + +LJLIB_CF(coroutine_running) +{ +#if LJ_52 + int ismain = lua_pushthread(L); + setboolV(L->top++, ismain); + return 2; +#else + if (lua_pushthread(L)) + setnilV(L->top++); + return 1; +#endif +} + +LJLIB_CF(coroutine_create) +{ + lua_State *L1; + if (!(L->base < L->top && tvisfunc(L->base))) + lj_err_argt(L, 1, LUA_TFUNCTION); + L1 = lua_newthread(L); + setfuncV(L, L1->top++, funcV(L->base)); + return 1; +} + +LJLIB_ASM(coroutine_yield) +{ + lj_err_caller(L, LJ_ERR_CYIELD); + return FFH_UNREACHABLE; +} + +static int ffh_resume(lua_State *L, lua_State *co, int wrap) +{ + if (co->cframe != NULL || co->status > LUA_YIELD || + (co->status == 0 && co->top == co->base)) { + ErrMsg em = co->cframe ? LJ_ERR_CORUN : LJ_ERR_CODEAD; + if (wrap) lj_err_caller(L, em); + setboolV(L->base-1-LJ_FR2, 0); + setstrV(L, L->base-LJ_FR2, lj_err_str(L, em)); + return FFH_RES(2); + } + lj_state_growstack(co, (MSize)(L->top - L->base)); + return FFH_RETRY; +} + +LJLIB_ASM(coroutine_resume) +{ + if (!(L->top > L->base && tvisthread(L->base))) + lj_err_arg(L, 1, LJ_ERR_NOCORO); + return ffh_resume(L, threadV(L->base), 0); +} + +LJLIB_NOREG LJLIB_ASM(coroutine_wrap_aux) +{ + return ffh_resume(L, threadV(lj_lib_upvalue(L, 1)), 1); +} + +/* Inline declarations. */ +LJ_ASMF void lj_ff_coroutine_wrap_aux(void); +#if !(LJ_TARGET_MIPS && defined(ljamalg_c)) +LJ_FUNCA_NORET void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L, + lua_State *co); +#endif + +/* Error handler, called from assembler VM. */ +void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L, lua_State *co) +{ + co->top--; copyTV(L, L->top, co->top); L->top++; + if (tvisstr(L->top-1)) + lj_err_callermsg(L, strVdata(L->top-1)); + else + lj_err_run(L); +} + +/* Forward declaration. */ +static void setpc_wrap_aux(lua_State *L, GCfunc *fn); + +LJLIB_CF(coroutine_wrap) +{ + GCfunc *fn; + lj_cf_coroutine_create(L); + fn = lj_lib_pushcc(L, lj_ffh_coroutine_wrap_aux, FF_coroutine_wrap_aux, 1); + setpc_wrap_aux(L, fn); + return 1; +} + +#include "lj_libdef.h" + +/* Fix the PC of wrap_aux. Really ugly workaround. */ +static void setpc_wrap_aux(lua_State *L, GCfunc *fn) +{ + setmref(fn->c.pc, &L2GG(L)->bcff[lj_lib_init_coroutine[1]+2]); +} + +/* ------------------------------------------------------------------------ */ + +static void newproxy_weaktable(lua_State *L) +{ + /* NOBARRIER: The table is new (marked white). */ + GCtab *t = lj_tab_new(L, 0, 1); + settabV(L, L->top++, t); + setgcref(t->metatable, obj2gco(t)); + setstrV(L, lj_tab_setstr(L, t, lj_str_newlit(L, "__mode")), + lj_str_newlit(L, "kv")); + t->nomm = (uint8_t)(~(1u<env); + settabV(L, lj_tab_setstr(L, env, lj_str_newlit(L, "_G")), env); + lua_pushliteral(L, LUA_VERSION); /* top-3. */ + newproxy_weaktable(L); /* top-2. */ + LJ_LIB_REG(L, "_G", base); + LJ_LIB_REG(L, LUA_COLIBNAME, coroutine); + return 2; +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lib_bit.c b/Build/source/libs/luajit/LuaJIT-src/src/lib_bit.c new file mode 100644 index 00000000000..55cb2a845a8 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lib_bit.c @@ -0,0 +1,180 @@ +/* +** Bit manipulation library. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lib_bit_c +#define LUA_LIB + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" + +#include "lj_obj.h" +#include "lj_err.h" +#include "lj_buf.h" +#include "lj_strscan.h" +#include "lj_strfmt.h" +#if LJ_HASFFI +#include "lj_ctype.h" +#include "lj_cdata.h" +#include "lj_cconv.h" +#include "lj_carith.h" +#endif +#include "lj_ff.h" +#include "lj_lib.h" + +/* ------------------------------------------------------------------------ */ + +#define LJLIB_MODULE_bit + +#if LJ_HASFFI +static int bit_result64(lua_State *L, CTypeID id, uint64_t x) +{ + GCcdata *cd = lj_cdata_new_(L, id, 8); + *(uint64_t *)cdataptr(cd) = x; + setcdataV(L, L->base-1-LJ_FR2, cd); + return FFH_RES(1); +} +#else +static int32_t bit_checkbit(lua_State *L, int narg) +{ + TValue *o = L->base + narg-1; + if (!(o < L->top && lj_strscan_numberobj(o))) + lj_err_argt(L, narg, LUA_TNUMBER); + if (LJ_LIKELY(tvisint(o))) { + return intV(o); + } else { + int32_t i = lj_num2bit(numV(o)); + if (LJ_DUALNUM) setintV(o, i); + return i; + } +} +#endif + +LJLIB_ASM(bit_tobit) LJLIB_REC(bit_tobit) +{ +#if LJ_HASFFI + CTypeID id = 0; + setintV(L->base-1-LJ_FR2, (int32_t)lj_carith_check64(L, 1, &id)); + return FFH_RES(1); +#else + lj_lib_checknumber(L, 1); + return FFH_RETRY; +#endif +} + +LJLIB_ASM(bit_bnot) LJLIB_REC(bit_unary IR_BNOT) +{ +#if LJ_HASFFI + CTypeID id = 0; + uint64_t x = lj_carith_check64(L, 1, &id); + return id ? bit_result64(L, id, ~x) : FFH_RETRY; +#else + lj_lib_checknumber(L, 1); + return FFH_RETRY; +#endif +} + +LJLIB_ASM(bit_bswap) LJLIB_REC(bit_unary IR_BSWAP) +{ +#if LJ_HASFFI + CTypeID id = 0; + uint64_t x = lj_carith_check64(L, 1, &id); + return id ? bit_result64(L, id, lj_bswap64(x)) : FFH_RETRY; +#else + lj_lib_checknumber(L, 1); + return FFH_RETRY; +#endif +} + +LJLIB_ASM(bit_lshift) LJLIB_REC(bit_shift IR_BSHL) +{ +#if LJ_HASFFI + CTypeID id = 0, id2 = 0; + uint64_t x = lj_carith_check64(L, 1, &id); + int32_t sh = (int32_t)lj_carith_check64(L, 2, &id2); + if (id) { + x = lj_carith_shift64(x, sh, curr_func(L)->c.ffid - (int)FF_bit_lshift); + return bit_result64(L, id, x); + } + if (id2) setintV(L->base+1, sh); + return FFH_RETRY; +#else + lj_lib_checknumber(L, 1); + bit_checkbit(L, 2); + return FFH_RETRY; +#endif +} +LJLIB_ASM_(bit_rshift) LJLIB_REC(bit_shift IR_BSHR) +LJLIB_ASM_(bit_arshift) LJLIB_REC(bit_shift IR_BSAR) +LJLIB_ASM_(bit_rol) LJLIB_REC(bit_shift IR_BROL) +LJLIB_ASM_(bit_ror) LJLIB_REC(bit_shift IR_BROR) + +LJLIB_ASM(bit_band) LJLIB_REC(bit_nary IR_BAND) +{ +#if LJ_HASFFI + CTypeID id = 0; + TValue *o = L->base, *top = L->top; + int i = 0; + do { lj_carith_check64(L, ++i, &id); } while (++o < top); + if (id) { + CTState *cts = ctype_cts(L); + CType *ct = ctype_get(cts, id); + int op = curr_func(L)->c.ffid - (int)FF_bit_bor; + uint64_t x, y = op >= 0 ? 0 : ~(uint64_t)0; + o = L->base; + do { + lj_cconv_ct_tv(cts, ct, (uint8_t *)&x, o, 0); + if (op < 0) y &= x; else if (op == 0) y |= x; else y ^= x; + } while (++o < top); + return bit_result64(L, id, y); + } + return FFH_RETRY; +#else + int i = 0; + do { lj_lib_checknumber(L, ++i); } while (L->base+i < L->top); + return FFH_RETRY; +#endif +} +LJLIB_ASM_(bit_bor) LJLIB_REC(bit_nary IR_BOR) +LJLIB_ASM_(bit_bxor) LJLIB_REC(bit_nary IR_BXOR) + +/* ------------------------------------------------------------------------ */ + +LJLIB_CF(bit_tohex) LJLIB_REC(.) +{ +#if LJ_HASFFI + CTypeID id = 0, id2 = 0; + uint64_t b = lj_carith_check64(L, 1, &id); + int32_t n = L->base+1>=L->top ? (id ? 16 : 8) : + (int32_t)lj_carith_check64(L, 2, &id2); +#else + uint32_t b = (uint32_t)bit_checkbit(L, 1); + int32_t n = L->base+1>=L->top ? 8 : bit_checkbit(L, 2); +#endif + SBuf *sb = lj_buf_tmp_(L); + SFormat sf = (STRFMT_UINT|STRFMT_T_HEX); + if (n < 0) { n = -n; sf |= STRFMT_F_UPPER; } + sf |= ((SFormat)((n+1)&255) << STRFMT_SH_PREC); +#if LJ_HASFFI + if (n < 16) b &= ((uint64_t)1 << 4*n)-1; +#else + if (n < 8) b &= (1u << 4*n)-1; +#endif + sb = lj_strfmt_putfxint(sb, sf, b); + setstrV(L, L->top-1, lj_buf_str(L, sb)); + lj_gc_check(L); + return 1; +} + +/* ------------------------------------------------------------------------ */ + +#include "lj_libdef.h" + +LUALIB_API int luaopen_bit(lua_State *L) +{ + LJ_LIB_REG(L, LUA_BITLIBNAME, bit); + return 1; +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lib_debug.c b/Build/source/libs/luajit/LuaJIT-src/src/lib_debug.c new file mode 100644 index 00000000000..b610fb4dcdd --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lib_debug.c @@ -0,0 +1,405 @@ +/* +** Debug library. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lib_debug_c +#define LUA_LIB + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_debug.h" +#include "lj_lib.h" + +/* ------------------------------------------------------------------------ */ + +#define LJLIB_MODULE_debug + +LJLIB_CF(debug_getregistry) +{ + copyTV(L, L->top++, registry(L)); + return 1; +} + +LJLIB_CF(debug_getmetatable) LJLIB_REC(.) +{ + lj_lib_checkany(L, 1); + if (!lua_getmetatable(L, 1)) { + setnilV(L->top-1); + } + return 1; +} + +LJLIB_CF(debug_setmetatable) +{ + lj_lib_checktabornil(L, 2); + L->top = L->base+2; + lua_setmetatable(L, 1); +#if !LJ_52 + setboolV(L->top-1, 1); +#endif + return 1; +} + +LJLIB_CF(debug_getfenv) +{ + lj_lib_checkany(L, 1); + lua_getfenv(L, 1); + return 1; +} + +LJLIB_CF(debug_setfenv) +{ + lj_lib_checktab(L, 2); + L->top = L->base+2; + if (!lua_setfenv(L, 1)) + lj_err_caller(L, LJ_ERR_SETFENV); + return 1; +} + +/* ------------------------------------------------------------------------ */ + +static void settabss(lua_State *L, const char *i, const char *v) +{ + lua_pushstring(L, v); + lua_setfield(L, -2, i); +} + +static void settabsi(lua_State *L, const char *i, int v) +{ + lua_pushinteger(L, v); + lua_setfield(L, -2, i); +} + +static void settabsb(lua_State *L, const char *i, int v) +{ + lua_pushboolean(L, v); + lua_setfield(L, -2, i); +} + +static lua_State *getthread(lua_State *L, int *arg) +{ + if (L->base < L->top && tvisthread(L->base)) { + *arg = 1; + return threadV(L->base); + } else { + *arg = 0; + return L; + } +} + +static void treatstackoption(lua_State *L, lua_State *L1, const char *fname) +{ + if (L == L1) { + lua_pushvalue(L, -2); + lua_remove(L, -3); + } + else + lua_xmove(L1, L, 1); + lua_setfield(L, -2, fname); +} + +LJLIB_CF(debug_getinfo) +{ + lj_Debug ar; + int arg, opt_f = 0, opt_L = 0; + lua_State *L1 = getthread(L, &arg); + const char *options = luaL_optstring(L, arg+2, "flnSu"); + if (lua_isnumber(L, arg+1)) { + if (!lua_getstack(L1, (int)lua_tointeger(L, arg+1), (lua_Debug *)&ar)) { + setnilV(L->top-1); + return 1; + } + } else if (L->base+arg < L->top && tvisfunc(L->base+arg)) { + options = lua_pushfstring(L, ">%s", options); + setfuncV(L1, L1->top++, funcV(L->base+arg)); + } else { + lj_err_arg(L, arg+1, LJ_ERR_NOFUNCL); + } + if (!lj_debug_getinfo(L1, options, &ar, 1)) + lj_err_arg(L, arg+2, LJ_ERR_INVOPT); + lua_createtable(L, 0, 16); /* Create result table. */ + for (; *options; options++) { + switch (*options) { + case 'S': + settabss(L, "source", ar.source); + settabss(L, "short_src", ar.short_src); + settabsi(L, "linedefined", ar.linedefined); + settabsi(L, "lastlinedefined", ar.lastlinedefined); + settabss(L, "what", ar.what); + break; + case 'l': + settabsi(L, "currentline", ar.currentline); + break; + case 'u': + settabsi(L, "nups", ar.nups); + settabsi(L, "nparams", ar.nparams); + settabsb(L, "isvararg", ar.isvararg); + break; + case 'n': + settabss(L, "name", ar.name); + settabss(L, "namewhat", ar.namewhat); + break; + case 'f': opt_f = 1; break; + case 'L': opt_L = 1; break; + default: break; + } + } + if (opt_L) treatstackoption(L, L1, "activelines"); + if (opt_f) treatstackoption(L, L1, "func"); + return 1; /* Return result table. */ +} + +LJLIB_CF(debug_getlocal) +{ + int arg; + lua_State *L1 = getthread(L, &arg); + lua_Debug ar; + const char *name; + int slot = lj_lib_checkint(L, arg+2); + if (tvisfunc(L->base+arg)) { + L->top = L->base+arg+1; + lua_pushstring(L, lua_getlocal(L, NULL, slot)); + return 1; + } + if (!lua_getstack(L1, lj_lib_checkint(L, arg+1), &ar)) + lj_err_arg(L, arg+1, LJ_ERR_LVLRNG); + name = lua_getlocal(L1, &ar, slot); + if (name) { + lua_xmove(L1, L, 1); + lua_pushstring(L, name); + lua_pushvalue(L, -2); + return 2; + } else { + setnilV(L->top-1); + return 1; + } +} + +LJLIB_CF(debug_setlocal) +{ + int arg; + lua_State *L1 = getthread(L, &arg); + lua_Debug ar; + TValue *tv; + if (!lua_getstack(L1, lj_lib_checkint(L, arg+1), &ar)) + lj_err_arg(L, arg+1, LJ_ERR_LVLRNG); + tv = lj_lib_checkany(L, arg+3); + copyTV(L1, L1->top++, tv); + lua_pushstring(L, lua_setlocal(L1, &ar, lj_lib_checkint(L, arg+2))); + return 1; +} + +static int debug_getupvalue(lua_State *L, int get) +{ + int32_t n = lj_lib_checkint(L, 2); + const char *name; + lj_lib_checkfunc(L, 1); + name = get ? lua_getupvalue(L, 1, n) : lua_setupvalue(L, 1, n); + if (name) { + lua_pushstring(L, name); + if (!get) return 1; + copyTV(L, L->top, L->top-2); + L->top++; + return 2; + } + return 0; +} + +LJLIB_CF(debug_getupvalue) +{ + return debug_getupvalue(L, 1); +} + +LJLIB_CF(debug_setupvalue) +{ + lj_lib_checkany(L, 3); + return debug_getupvalue(L, 0); +} + +LJLIB_CF(debug_upvalueid) +{ + GCfunc *fn = lj_lib_checkfunc(L, 1); + int32_t n = lj_lib_checkint(L, 2) - 1; + if ((uint32_t)n >= fn->l.nupvalues) + lj_err_arg(L, 2, LJ_ERR_IDXRNG); + setlightudV(L->top-1, isluafunc(fn) ? (void *)gcref(fn->l.uvptr[n]) : + (void *)&fn->c.upvalue[n]); + return 1; +} + +LJLIB_CF(debug_upvaluejoin) +{ + GCfunc *fn[2]; + GCRef *p[2]; + int i; + for (i = 0; i < 2; i++) { + int32_t n; + fn[i] = lj_lib_checkfunc(L, 2*i+1); + if (!isluafunc(fn[i])) + lj_err_arg(L, 2*i+1, LJ_ERR_NOLFUNC); + n = lj_lib_checkint(L, 2*i+2) - 1; + if ((uint32_t)n >= fn[i]->l.nupvalues) + lj_err_arg(L, 2*i+2, LJ_ERR_IDXRNG); + p[i] = &fn[i]->l.uvptr[n]; + } + setgcrefr(*p[0], *p[1]); + lj_gc_objbarrier(L, fn[0], gcref(*p[1])); + return 0; +} + +#if LJ_52 +LJLIB_CF(debug_getuservalue) +{ + TValue *o = L->base; + if (o < L->top && tvisudata(o)) + settabV(L, o, tabref(udataV(o)->env)); + else + setnilV(o); + L->top = o+1; + return 1; +} + +LJLIB_CF(debug_setuservalue) +{ + TValue *o = L->base; + if (!(o < L->top && tvisudata(o))) + lj_err_argt(L, 1, LUA_TUSERDATA); + if (!(o+1 < L->top && tvistab(o+1))) + lj_err_argt(L, 2, LUA_TTABLE); + L->top = o+2; + lua_setfenv(L, 1); + return 1; +} +#endif + +/* ------------------------------------------------------------------------ */ + +static const char KEY_HOOK = 'h'; + +static void hookf(lua_State *L, lua_Debug *ar) +{ + static const char *const hooknames[] = + {"call", "return", "line", "count", "tail return"}; + lua_pushlightuserdata(L, (void *)&KEY_HOOK); + lua_rawget(L, LUA_REGISTRYINDEX); + if (lua_isfunction(L, -1)) { + lua_pushstring(L, hooknames[(int)ar->event]); + if (ar->currentline >= 0) + lua_pushinteger(L, ar->currentline); + else lua_pushnil(L); + lua_call(L, 2, 0); + } +} + +static int makemask(const char *smask, int count) +{ + int mask = 0; + if (strchr(smask, 'c')) mask |= LUA_MASKCALL; + if (strchr(smask, 'r')) mask |= LUA_MASKRET; + if (strchr(smask, 'l')) mask |= LUA_MASKLINE; + if (count > 0) mask |= LUA_MASKCOUNT; + return mask; +} + +static char *unmakemask(int mask, char *smask) +{ + int i = 0; + if (mask & LUA_MASKCALL) smask[i++] = 'c'; + if (mask & LUA_MASKRET) smask[i++] = 'r'; + if (mask & LUA_MASKLINE) smask[i++] = 'l'; + smask[i] = '\0'; + return smask; +} + +LJLIB_CF(debug_sethook) +{ + int arg, mask, count; + lua_Hook func; + (void)getthread(L, &arg); + if (lua_isnoneornil(L, arg+1)) { + lua_settop(L, arg+1); + func = NULL; mask = 0; count = 0; /* turn off hooks */ + } else { + const char *smask = luaL_checkstring(L, arg+2); + luaL_checktype(L, arg+1, LUA_TFUNCTION); + count = luaL_optint(L, arg+3, 0); + func = hookf; mask = makemask(smask, count); + } + lua_pushlightuserdata(L, (void *)&KEY_HOOK); + lua_pushvalue(L, arg+1); + lua_rawset(L, LUA_REGISTRYINDEX); + lua_sethook(L, func, mask, count); + return 0; +} + +LJLIB_CF(debug_gethook) +{ + char buff[5]; + int mask = lua_gethookmask(L); + lua_Hook hook = lua_gethook(L); + if (hook != NULL && hook != hookf) { /* external hook? */ + lua_pushliteral(L, "external hook"); + } else { + lua_pushlightuserdata(L, (void *)&KEY_HOOK); + lua_rawget(L, LUA_REGISTRYINDEX); /* get hook */ + } + lua_pushstring(L, unmakemask(mask, buff)); + lua_pushinteger(L, lua_gethookcount(L)); + return 3; +} + +/* ------------------------------------------------------------------------ */ + +LJLIB_CF(debug_debug) +{ + for (;;) { + char buffer[250]; + fputs("lua_debug> ", stderr); + if (fgets(buffer, sizeof(buffer), stdin) == 0 || + strcmp(buffer, "cont\n") == 0) + return 0; + if (luaL_loadbuffer(L, buffer, strlen(buffer), "=(debug command)") || + lua_pcall(L, 0, 0, 0)) { + fputs(lua_tostring(L, -1), stderr); + fputs("\n", stderr); + } + lua_settop(L, 0); /* remove eventual returns */ + } +} + +/* ------------------------------------------------------------------------ */ + +#define LEVELS1 12 /* size of the first part of the stack */ +#define LEVELS2 10 /* size of the second part of the stack */ + +LJLIB_CF(debug_traceback) +{ + int arg; + lua_State *L1 = getthread(L, &arg); + const char *msg = lua_tostring(L, arg+1); + if (msg == NULL && L->top > L->base+arg) + L->top = L->base+arg+1; + else + luaL_traceback(L, L1, msg, lj_lib_optint(L, arg+2, (L == L1))); + return 1; +} + +/* ------------------------------------------------------------------------ */ + +#include "lj_libdef.h" + +LUALIB_API int luaopen_debug(lua_State *L) +{ + LJ_LIB_REG(L, LUA_DBLIBNAME, debug); + return 1; +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lib_ffi.c b/Build/source/libs/luajit/LuaJIT-src/src/lib_ffi.c new file mode 100644 index 00000000000..b2b2d37ff7b --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lib_ffi.c @@ -0,0 +1,872 @@ +/* +** FFI library. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lib_ffi_c +#define LUA_LIB + +#include + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" + +#include "lj_obj.h" + +#if LJ_HASFFI + +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_meta.h" +#include "lj_ctype.h" +#include "lj_cparse.h" +#include "lj_cdata.h" +#include "lj_cconv.h" +#include "lj_carith.h" +#include "lj_ccall.h" +#include "lj_ccallback.h" +#include "lj_clib.h" +#include "lj_strfmt.h" +#include "lj_ff.h" +#include "lj_lib.h" + +/* -- C type checks ------------------------------------------------------- */ + +/* Check first argument for a C type and returns its ID. */ +static CTypeID ffi_checkctype(lua_State *L, CTState *cts, TValue *param) +{ + TValue *o = L->base; + if (!(o < L->top)) { + err_argtype: + lj_err_argtype(L, 1, "C type"); + } + if (tvisstr(o)) { /* Parse an abstract C type declaration. */ + GCstr *s = strV(o); + CPState cp; + int errcode; + cp.L = L; + cp.cts = cts; + cp.srcname = strdata(s); + cp.p = strdata(s); + cp.param = param; + cp.mode = CPARSE_MODE_ABSTRACT|CPARSE_MODE_NOIMPLICIT; + errcode = lj_cparse(&cp); + if (errcode) lj_err_throw(L, errcode); /* Propagate errors. */ + return cp.val.id; + } else { + GCcdata *cd; + if (!tviscdata(o)) goto err_argtype; + if (param && param < L->top) lj_err_arg(L, 1, LJ_ERR_FFI_NUMPARAM); + cd = cdataV(o); + return cd->ctypeid == CTID_CTYPEID ? *(CTypeID *)cdataptr(cd) : cd->ctypeid; + } +} + +/* Check argument for C data and return it. */ +static GCcdata *ffi_checkcdata(lua_State *L, int narg) +{ + TValue *o = L->base + narg-1; + if (!(o < L->top && tviscdata(o))) + lj_err_argt(L, narg, LUA_TCDATA); + return cdataV(o); +} + +/* Convert argument to C pointer. */ +static void *ffi_checkptr(lua_State *L, int narg, CTypeID id) +{ + CTState *cts = ctype_cts(L); + TValue *o = L->base + narg-1; + void *p; + if (o >= L->top) + lj_err_arg(L, narg, LJ_ERR_NOVAL); + lj_cconv_ct_tv(cts, ctype_get(cts, id), (uint8_t *)&p, o, CCF_ARG(narg)); + return p; +} + +/* Convert argument to int32_t. */ +static int32_t ffi_checkint(lua_State *L, int narg) +{ + CTState *cts = ctype_cts(L); + TValue *o = L->base + narg-1; + int32_t i; + if (o >= L->top) + lj_err_arg(L, narg, LJ_ERR_NOVAL); + lj_cconv_ct_tv(cts, ctype_get(cts, CTID_INT32), (uint8_t *)&i, o, + CCF_ARG(narg)); + return i; +} + +/* -- C type metamethods -------------------------------------------------- */ + +#define LJLIB_MODULE_ffi_meta + +/* Handle ctype __index/__newindex metamethods. */ +static int ffi_index_meta(lua_State *L, CTState *cts, CType *ct, MMS mm) +{ + CTypeID id = ctype_typeid(cts, ct); + cTValue *tv = lj_ctype_meta(cts, id, mm); + TValue *base = L->base; + if (!tv) { + const char *s; + err_index: + s = strdata(lj_ctype_repr(L, id, NULL)); + if (tvisstr(L->base+1)) { + lj_err_callerv(L, LJ_ERR_FFI_BADMEMBER, s, strVdata(L->base+1)); + } else { + const char *key = tviscdata(L->base+1) ? + strdata(lj_ctype_repr(L, cdataV(L->base+1)->ctypeid, NULL)) : + lj_typename(L->base+1); + lj_err_callerv(L, LJ_ERR_FFI_BADIDXW, s, key); + } + } + if (!tvisfunc(tv)) { + if (mm == MM_index) { + cTValue *o = lj_meta_tget(L, tv, base+1); + if (o) { + if (tvisnil(o)) goto err_index; + copyTV(L, L->top-1, o); + return 1; + } + } else { + TValue *o = lj_meta_tset(L, tv, base+1); + if (o) { + copyTV(L, o, base+2); + return 0; + } + } + copyTV(L, base, L->top); + tv = L->top-1-LJ_FR2; + } + return lj_meta_tailcall(L, tv); +} + +LJLIB_CF(ffi_meta___index) LJLIB_REC(cdata_index 0) +{ + CTState *cts = ctype_cts(L); + CTInfo qual = 0; + CType *ct; + uint8_t *p; + TValue *o = L->base; + if (!(o+1 < L->top && tviscdata(o))) /* Also checks for presence of key. */ + lj_err_argt(L, 1, LUA_TCDATA); + ct = lj_cdata_index(cts, cdataV(o), o+1, &p, &qual); + if ((qual & 1)) + return ffi_index_meta(L, cts, ct, MM_index); + if (lj_cdata_get(cts, ct, L->top-1, p)) + lj_gc_check(L); + return 1; +} + +LJLIB_CF(ffi_meta___newindex) LJLIB_REC(cdata_index 1) +{ + CTState *cts = ctype_cts(L); + CTInfo qual = 0; + CType *ct; + uint8_t *p; + TValue *o = L->base; + if (!(o+2 < L->top && tviscdata(o))) /* Also checks for key and value. */ + lj_err_argt(L, 1, LUA_TCDATA); + ct = lj_cdata_index(cts, cdataV(o), o+1, &p, &qual); + if ((qual & 1)) { + if ((qual & CTF_CONST)) + lj_err_caller(L, LJ_ERR_FFI_WRCONST); + return ffi_index_meta(L, cts, ct, MM_newindex); + } + lj_cdata_set(cts, ct, p, o+2, qual); + return 0; +} + +/* Common handler for cdata arithmetic. */ +static int ffi_arith(lua_State *L) +{ + MMS mm = (MMS)(curr_func(L)->c.ffid - (int)FF_ffi_meta___eq + (int)MM_eq); + return lj_carith_op(L, mm); +} + +/* The following functions must be in contiguous ORDER MM. */ +LJLIB_CF(ffi_meta___eq) LJLIB_REC(cdata_arith MM_eq) +{ + return ffi_arith(L); +} + +LJLIB_CF(ffi_meta___len) LJLIB_REC(cdata_arith MM_len) +{ + return ffi_arith(L); +} + +LJLIB_CF(ffi_meta___lt) LJLIB_REC(cdata_arith MM_lt) +{ + return ffi_arith(L); +} + +LJLIB_CF(ffi_meta___le) LJLIB_REC(cdata_arith MM_le) +{ + return ffi_arith(L); +} + +LJLIB_CF(ffi_meta___concat) LJLIB_REC(cdata_arith MM_concat) +{ + return ffi_arith(L); +} + +/* Forward declaration. */ +static int lj_cf_ffi_new(lua_State *L); + +LJLIB_CF(ffi_meta___call) LJLIB_REC(cdata_call) +{ + CTState *cts = ctype_cts(L); + GCcdata *cd = ffi_checkcdata(L, 1); + CTypeID id = cd->ctypeid; + CType *ct; + cTValue *tv; + MMS mm = MM_call; + if (cd->ctypeid == CTID_CTYPEID) { + id = *(CTypeID *)cdataptr(cd); + mm = MM_new; + } else { + int ret = lj_ccall_func(L, cd); + if (ret >= 0) + return ret; + } + /* Handle ctype __call/__new metamethod. */ + ct = ctype_raw(cts, id); + if (ctype_isptr(ct->info)) id = ctype_cid(ct->info); + tv = lj_ctype_meta(cts, id, mm); + if (tv) + return lj_meta_tailcall(L, tv); + else if (mm == MM_call) + lj_err_callerv(L, LJ_ERR_FFI_BADCALL, strdata(lj_ctype_repr(L, id, NULL))); + return lj_cf_ffi_new(L); +} + +LJLIB_CF(ffi_meta___add) LJLIB_REC(cdata_arith MM_add) +{ + return ffi_arith(L); +} + +LJLIB_CF(ffi_meta___sub) LJLIB_REC(cdata_arith MM_sub) +{ + return ffi_arith(L); +} + +LJLIB_CF(ffi_meta___mul) LJLIB_REC(cdata_arith MM_mul) +{ + return ffi_arith(L); +} + +LJLIB_CF(ffi_meta___div) LJLIB_REC(cdata_arith MM_div) +{ + return ffi_arith(L); +} + +LJLIB_CF(ffi_meta___mod) LJLIB_REC(cdata_arith MM_mod) +{ + return ffi_arith(L); +} + +LJLIB_CF(ffi_meta___pow) LJLIB_REC(cdata_arith MM_pow) +{ + return ffi_arith(L); +} + +LJLIB_CF(ffi_meta___unm) LJLIB_REC(cdata_arith MM_unm) +{ + return ffi_arith(L); +} +/* End of contiguous ORDER MM. */ + +LJLIB_CF(ffi_meta___tostring) +{ + GCcdata *cd = ffi_checkcdata(L, 1); + const char *msg = "cdata<%s>: %p"; + CTypeID id = cd->ctypeid; + void *p = cdataptr(cd); + if (id == CTID_CTYPEID) { + msg = "ctype<%s>"; + id = *(CTypeID *)p; + } else { + CTState *cts = ctype_cts(L); + CType *ct = ctype_raw(cts, id); + if (ctype_isref(ct->info)) { + p = *(void **)p; + ct = ctype_rawchild(cts, ct); + } + if (ctype_iscomplex(ct->info)) { + setstrV(L, L->top-1, lj_ctype_repr_complex(L, cdataptr(cd), ct->size)); + goto checkgc; + } else if (ct->size == 8 && ctype_isinteger(ct->info)) { + setstrV(L, L->top-1, lj_ctype_repr_int64(L, *(uint64_t *)cdataptr(cd), + (ct->info & CTF_UNSIGNED))); + goto checkgc; + } else if (ctype_isfunc(ct->info)) { + p = *(void **)p; + } else if (ctype_isenum(ct->info)) { + msg = "cdata<%s>: %d"; + p = (void *)(uintptr_t)*(uint32_t **)p; + } else { + if (ctype_isptr(ct->info)) { + p = cdata_getptr(p, ct->size); + ct = ctype_rawchild(cts, ct); + } + if (ctype_isstruct(ct->info) || ctype_isvector(ct->info)) { + /* Handle ctype __tostring metamethod. */ + cTValue *tv = lj_ctype_meta(cts, ctype_typeid(cts, ct), MM_tostring); + if (tv) + return lj_meta_tailcall(L, tv); + } + } + } + lj_strfmt_pushf(L, msg, strdata(lj_ctype_repr(L, id, NULL)), p); +checkgc: + lj_gc_check(L); + return 1; +} + +static int ffi_pairs(lua_State *L, MMS mm) +{ + CTState *cts = ctype_cts(L); + CTypeID id = ffi_checkcdata(L, 1)->ctypeid; + CType *ct = ctype_raw(cts, id); + cTValue *tv; + if (ctype_isptr(ct->info)) id = ctype_cid(ct->info); + tv = lj_ctype_meta(cts, id, mm); + if (!tv) + lj_err_callerv(L, LJ_ERR_FFI_BADMM, strdata(lj_ctype_repr(L, id, NULL)), + strdata(mmname_str(G(L), mm))); + return lj_meta_tailcall(L, tv); +} + +LJLIB_CF(ffi_meta___pairs) +{ + return ffi_pairs(L, MM_pairs); +} + +LJLIB_CF(ffi_meta___ipairs) +{ + return ffi_pairs(L, MM_ipairs); +} + +LJLIB_PUSH("ffi") LJLIB_SET(__metatable) + +#include "lj_libdef.h" + +/* -- C library metamethods ----------------------------------------------- */ + +#define LJLIB_MODULE_ffi_clib + +/* Index C library by a name. */ +static TValue *ffi_clib_index(lua_State *L) +{ + TValue *o = L->base; + CLibrary *cl; + if (!(o < L->top && tvisudata(o) && udataV(o)->udtype == UDTYPE_FFI_CLIB)) + lj_err_argt(L, 1, LUA_TUSERDATA); + cl = (CLibrary *)uddata(udataV(o)); + if (!(o+1 < L->top && tvisstr(o+1))) + lj_err_argt(L, 2, LUA_TSTRING); + return lj_clib_index(L, cl, strV(o+1)); +} + +LJLIB_CF(ffi_clib___index) LJLIB_REC(clib_index 1) +{ + TValue *tv = ffi_clib_index(L); + if (tviscdata(tv)) { + CTState *cts = ctype_cts(L); + GCcdata *cd = cdataV(tv); + CType *s = ctype_get(cts, cd->ctypeid); + if (ctype_isextern(s->info)) { + CTypeID sid = ctype_cid(s->info); + void *sp = *(void **)cdataptr(cd); + CType *ct = ctype_raw(cts, sid); + if (lj_cconv_tv_ct(cts, ct, sid, L->top-1, sp)) + lj_gc_check(L); + return 1; + } + } + copyTV(L, L->top-1, tv); + return 1; +} + +LJLIB_CF(ffi_clib___newindex) LJLIB_REC(clib_index 0) +{ + TValue *tv = ffi_clib_index(L); + TValue *o = L->base+2; + if (o < L->top && tviscdata(tv)) { + CTState *cts = ctype_cts(L); + GCcdata *cd = cdataV(tv); + CType *d = ctype_get(cts, cd->ctypeid); + if (ctype_isextern(d->info)) { + CTInfo qual = 0; + for (;;) { /* Skip attributes and collect qualifiers. */ + d = ctype_child(cts, d); + if (!ctype_isattrib(d->info)) break; + if (ctype_attrib(d->info) == CTA_QUAL) qual |= d->size; + } + if (!((d->info|qual) & CTF_CONST)) { + lj_cconv_ct_tv(cts, d, *(void **)cdataptr(cd), o, 0); + return 0; + } + } + } + lj_err_caller(L, LJ_ERR_FFI_WRCONST); + return 0; /* unreachable */ +} + +LJLIB_CF(ffi_clib___gc) +{ + TValue *o = L->base; + if (o < L->top && tvisudata(o) && udataV(o)->udtype == UDTYPE_FFI_CLIB) + lj_clib_unload((CLibrary *)uddata(udataV(o))); + return 0; +} + +#include "lj_libdef.h" + +/* -- Callback function metamethods --------------------------------------- */ + +#define LJLIB_MODULE_ffi_callback + +static int ffi_callback_set(lua_State *L, GCfunc *fn) +{ + GCcdata *cd = ffi_checkcdata(L, 1); + CTState *cts = ctype_cts(L); + CType *ct = ctype_raw(cts, cd->ctypeid); + if (ctype_isptr(ct->info) && (LJ_32 || ct->size == 8)) { + MSize slot = lj_ccallback_ptr2slot(cts, *(void **)cdataptr(cd)); + if (slot < cts->cb.sizeid && cts->cb.cbid[slot] != 0) { + GCtab *t = cts->miscmap; + TValue *tv = lj_tab_setint(L, t, (int32_t)slot); + if (fn) { + setfuncV(L, tv, fn); + lj_gc_anybarriert(L, t); + } else { + setnilV(tv); + cts->cb.cbid[slot] = 0; + cts->cb.topid = slot < cts->cb.topid ? slot : cts->cb.topid; + } + return 0; + } + } + lj_err_caller(L, LJ_ERR_FFI_BADCBACK); + return 0; +} + +LJLIB_CF(ffi_callback_free) +{ + return ffi_callback_set(L, NULL); +} + +LJLIB_CF(ffi_callback_set) +{ + GCfunc *fn = lj_lib_checkfunc(L, 2); + return ffi_callback_set(L, fn); +} + +LJLIB_PUSH(top-1) LJLIB_SET(__index) + +#include "lj_libdef.h" + +/* -- FFI library functions ----------------------------------------------- */ + +#define LJLIB_MODULE_ffi + +LJLIB_CF(ffi_cdef) +{ + GCstr *s = lj_lib_checkstr(L, 1); + CPState cp; + int errcode; + cp.L = L; + cp.cts = ctype_cts(L); + cp.srcname = strdata(s); + cp.p = strdata(s); + cp.param = L->base+1; + cp.mode = CPARSE_MODE_MULTI|CPARSE_MODE_DIRECT; + errcode = lj_cparse(&cp); + if (errcode) lj_err_throw(L, errcode); /* Propagate errors. */ + lj_gc_check(L); + return 0; +} + +LJLIB_CF(ffi_new) LJLIB_REC(.) +{ + CTState *cts = ctype_cts(L); + CTypeID id = ffi_checkctype(L, cts, NULL); + CType *ct = ctype_raw(cts, id); + CTSize sz; + CTInfo info = lj_ctype_info(cts, id, &sz); + TValue *o = L->base+1; + GCcdata *cd; + if ((info & CTF_VLA)) { + o++; + sz = lj_ctype_vlsize(cts, ct, (CTSize)ffi_checkint(L, 2)); + } + if (sz == CTSIZE_INVALID) + lj_err_arg(L, 1, LJ_ERR_FFI_INVSIZE); + if (!(info & CTF_VLA) && ctype_align(info) <= CT_MEMALIGN) + cd = lj_cdata_new(cts, id, sz); + else + cd = lj_cdata_newv(L, id, sz, ctype_align(info)); + setcdataV(L, o-1, cd); /* Anchor the uninitialized cdata. */ + lj_cconv_ct_init(cts, ct, sz, cdataptr(cd), + o, (MSize)(L->top - o)); /* Initialize cdata. */ + if (ctype_isstruct(ct->info)) { + /* Handle ctype __gc metamethod. Use the fast lookup here. */ + cTValue *tv = lj_tab_getinth(cts->miscmap, -(int32_t)id); + if (tv && tvistab(tv) && (tv = lj_meta_fast(L, tabV(tv), MM_gc))) { + GCtab *t = cts->finalizer; + if (gcref(t->metatable)) { + /* Add to finalizer table, if still enabled. */ + copyTV(L, lj_tab_set(L, t, o-1), tv); + lj_gc_anybarriert(L, t); + cd->marked |= LJ_GC_CDATA_FIN; + } + } + } + L->top = o; /* Only return the cdata itself. */ + lj_gc_check(L); + return 1; +} + +LJLIB_CF(ffi_cast) LJLIB_REC(ffi_new) +{ + CTState *cts = ctype_cts(L); + CTypeID id = ffi_checkctype(L, cts, NULL); + CType *d = ctype_raw(cts, id); + TValue *o = lj_lib_checkany(L, 2); + L->top = o+1; /* Make sure this is the last item on the stack. */ + if (!(ctype_isnum(d->info) || ctype_isptr(d->info) || ctype_isenum(d->info))) + lj_err_arg(L, 1, LJ_ERR_FFI_INVTYPE); + if (!(tviscdata(o) && cdataV(o)->ctypeid == id)) { + GCcdata *cd = lj_cdata_new(cts, id, d->size); + lj_cconv_ct_tv(cts, d, cdataptr(cd), o, CCF_CAST); + setcdataV(L, o, cd); + lj_gc_check(L); + } + return 1; +} + +LJLIB_CF(ffi_typeof) LJLIB_REC(.) +{ + CTState *cts = ctype_cts(L); + CTypeID id = ffi_checkctype(L, cts, L->base+1); + GCcdata *cd = lj_cdata_new(cts, CTID_CTYPEID, 4); + *(CTypeID *)cdataptr(cd) = id; + setcdataV(L, L->top-1, cd); + lj_gc_check(L); + return 1; +} + +/* Internal and unsupported API. */ +LJLIB_CF(ffi_typeinfo) +{ + CTState *cts = ctype_cts(L); + CTypeID id = (CTypeID)ffi_checkint(L, 1); + if (id > 0 && id < cts->top) { + CType *ct = ctype_get(cts, id); + GCtab *t; + lua_createtable(L, 0, 4); /* Increment hash size if fields are added. */ + t = tabV(L->top-1); + setintV(lj_tab_setstr(L, t, lj_str_newlit(L, "info")), (int32_t)ct->info); + if (ct->size != CTSIZE_INVALID) + setintV(lj_tab_setstr(L, t, lj_str_newlit(L, "size")), (int32_t)ct->size); + if (ct->sib) + setintV(lj_tab_setstr(L, t, lj_str_newlit(L, "sib")), (int32_t)ct->sib); + if (gcref(ct->name)) { + GCstr *s = gco2str(gcref(ct->name)); + setstrV(L, lj_tab_setstr(L, t, lj_str_newlit(L, "name")), s); + } + lj_gc_check(L); + return 1; + } + return 0; +} + +LJLIB_CF(ffi_istype) LJLIB_REC(.) +{ + CTState *cts = ctype_cts(L); + CTypeID id1 = ffi_checkctype(L, cts, NULL); + TValue *o = lj_lib_checkany(L, 2); + int b = 0; + if (tviscdata(o)) { + GCcdata *cd = cdataV(o); + CTypeID id2 = cd->ctypeid == CTID_CTYPEID ? *(CTypeID *)cdataptr(cd) : + cd->ctypeid; + CType *ct1 = lj_ctype_rawref(cts, id1); + CType *ct2 = lj_ctype_rawref(cts, id2); + if (ct1 == ct2) { + b = 1; + } else if (ctype_type(ct1->info) == ctype_type(ct2->info) && + ct1->size == ct2->size) { + if (ctype_ispointer(ct1->info)) + b = lj_cconv_compatptr(cts, ct1, ct2, CCF_IGNQUAL); + else if (ctype_isnum(ct1->info) || ctype_isvoid(ct1->info)) + b = (((ct1->info ^ ct2->info) & ~(CTF_QUAL|CTF_LONG)) == 0); + } else if (ctype_isstruct(ct1->info) && ctype_isptr(ct2->info) && + ct1 == ctype_rawchild(cts, ct2)) { + b = 1; + } + } + setboolV(L->top-1, b); + setboolV(&G(L)->tmptv2, b); /* Remember for trace recorder. */ + return 1; +} + +LJLIB_CF(ffi_sizeof) LJLIB_REC(ffi_xof FF_ffi_sizeof) +{ + CTState *cts = ctype_cts(L); + CTypeID id = ffi_checkctype(L, cts, NULL); + CTSize sz; + if (LJ_UNLIKELY(tviscdata(L->base) && cdataisv(cdataV(L->base)))) { + sz = cdatavlen(cdataV(L->base)); + } else { + CType *ct = lj_ctype_rawref(cts, id); + if (ctype_isvltype(ct->info)) + sz = lj_ctype_vlsize(cts, ct, (CTSize)ffi_checkint(L, 2)); + else + sz = ctype_hassize(ct->info) ? ct->size : CTSIZE_INVALID; + if (LJ_UNLIKELY(sz == CTSIZE_INVALID)) { + setnilV(L->top-1); + return 1; + } + } + setintV(L->top-1, (int32_t)sz); + return 1; +} + +LJLIB_CF(ffi_alignof) LJLIB_REC(ffi_xof FF_ffi_alignof) +{ + CTState *cts = ctype_cts(L); + CTypeID id = ffi_checkctype(L, cts, NULL); + CTSize sz = 0; + CTInfo info = lj_ctype_info(cts, id, &sz); + setintV(L->top-1, 1 << ctype_align(info)); + return 1; +} + +LJLIB_CF(ffi_offsetof) LJLIB_REC(ffi_xof FF_ffi_offsetof) +{ + CTState *cts = ctype_cts(L); + CTypeID id = ffi_checkctype(L, cts, NULL); + GCstr *name = lj_lib_checkstr(L, 2); + CType *ct = lj_ctype_rawref(cts, id); + CTSize ofs; + if (ctype_isstruct(ct->info) && ct->size != CTSIZE_INVALID) { + CType *fct = lj_ctype_getfield(cts, ct, name, &ofs); + if (fct) { + setintV(L->top-1, ofs); + if (ctype_isfield(fct->info)) { + return 1; + } else if (ctype_isbitfield(fct->info)) { + setintV(L->top++, ctype_bitpos(fct->info)); + setintV(L->top++, ctype_bitbsz(fct->info)); + return 3; + } + } + } + return 0; +} + +LJLIB_CF(ffi_errno) LJLIB_REC(.) +{ + int err = errno; + if (L->top > L->base) + errno = ffi_checkint(L, 1); + setintV(L->top++, err); + return 1; +} + +LJLIB_CF(ffi_string) LJLIB_REC(.) +{ + CTState *cts = ctype_cts(L); + TValue *o = lj_lib_checkany(L, 1); + const char *p; + size_t len; + if (o+1 < L->top && !tvisnil(o+1)) { + len = (size_t)ffi_checkint(L, 2); + lj_cconv_ct_tv(cts, ctype_get(cts, CTID_P_CVOID), (uint8_t *)&p, o, + CCF_ARG(1)); + } else { + lj_cconv_ct_tv(cts, ctype_get(cts, CTID_P_CCHAR), (uint8_t *)&p, o, + CCF_ARG(1)); + len = strlen(p); + } + L->top = o+1; /* Make sure this is the last item on the stack. */ + setstrV(L, o, lj_str_new(L, p, len)); + lj_gc_check(L); + return 1; +} + +LJLIB_CF(ffi_copy) LJLIB_REC(.) +{ + void *dp = ffi_checkptr(L, 1, CTID_P_VOID); + void *sp = ffi_checkptr(L, 2, CTID_P_CVOID); + TValue *o = L->base+1; + CTSize len; + if (tvisstr(o) && o+1 >= L->top) + len = strV(o)->len+1; /* Copy Lua string including trailing '\0'. */ + else + len = (CTSize)ffi_checkint(L, 3); + memcpy(dp, sp, len); + return 0; +} + +LJLIB_CF(ffi_fill) LJLIB_REC(.) +{ + void *dp = ffi_checkptr(L, 1, CTID_P_VOID); + CTSize len = (CTSize)ffi_checkint(L, 2); + int32_t fill = 0; + if (L->base+2 < L->top && !tvisnil(L->base+2)) fill = ffi_checkint(L, 3); + memset(dp, fill, len); + return 0; +} + +#define H_(le, be) LJ_ENDIAN_SELECT(0x##le, 0x##be) + +/* Test ABI string. */ +LJLIB_CF(ffi_abi) LJLIB_REC(.) +{ + GCstr *s = lj_lib_checkstr(L, 1); + int b = 0; + switch (s->hash) { +#if LJ_64 + case H_(849858eb,ad35fd06): b = 1; break; /* 64bit */ +#else + case H_(662d3c79,d0e22477): b = 1; break; /* 32bit */ +#endif +#if LJ_ARCH_HASFPU + case H_(e33ee463,e33ee463): b = 1; break; /* fpu */ +#endif +#if LJ_ABI_SOFTFP + case H_(61211a23,c2e8c81c): b = 1; break; /* softfp */ +#else + case H_(539417a8,8ce0812f): b = 1; break; /* hardfp */ +#endif +#if LJ_ABI_EABI + case H_(2182df8f,f2ed1152): b = 1; break; /* eabi */ +#endif +#if LJ_ABI_WIN + case H_(4ab624a8,4ab624a8): b = 1; break; /* win */ +#endif + case H_(3af93066,1f001464): b = 1; break; /* le/be */ +#if LJ_GC64 + case H_(9e89d2c9,13c83c92): b = 1; break; /* gc64 */ +#endif + default: + break; + } + setboolV(L->top-1, b); + setboolV(&G(L)->tmptv2, b); /* Remember for trace recorder. */ + return 1; +} + +#undef H_ + +LJLIB_PUSH(top-8) LJLIB_SET(!) /* Store reference to miscmap table. */ + +LJLIB_CF(ffi_metatype) +{ + CTState *cts = ctype_cts(L); + CTypeID id = ffi_checkctype(L, cts, NULL); + GCtab *mt = lj_lib_checktab(L, 2); + GCtab *t = cts->miscmap; + CType *ct = ctype_get(cts, id); /* Only allow raw types. */ + TValue *tv; + GCcdata *cd; + if (!(ctype_isstruct(ct->info) || ctype_iscomplex(ct->info) || + ctype_isvector(ct->info))) + lj_err_arg(L, 1, LJ_ERR_FFI_INVTYPE); + tv = lj_tab_setinth(L, t, -(int32_t)id); + if (!tvisnil(tv)) + lj_err_caller(L, LJ_ERR_PROTMT); + settabV(L, tv, mt); + lj_gc_anybarriert(L, t); + cd = lj_cdata_new(cts, CTID_CTYPEID, 4); + *(CTypeID *)cdataptr(cd) = id; + setcdataV(L, L->top-1, cd); + lj_gc_check(L); + return 1; +} + +LJLIB_PUSH(top-7) LJLIB_SET(!) /* Store reference to finalizer table. */ + +LJLIB_CF(ffi_gc) LJLIB_REC(.) +{ + GCcdata *cd = ffi_checkcdata(L, 1); + TValue *fin = lj_lib_checkany(L, 2); + CTState *cts = ctype_cts(L); + CType *ct = ctype_raw(cts, cd->ctypeid); + if (!(ctype_isptr(ct->info) || ctype_isstruct(ct->info) || + ctype_isrefarray(ct->info))) + lj_err_arg(L, 1, LJ_ERR_FFI_INVTYPE); + lj_cdata_setfin(L, cd, gcval(fin), itype(fin)); + L->top = L->base+1; /* Pass through the cdata object. */ + return 1; +} + +LJLIB_PUSH(top-5) LJLIB_SET(!) /* Store clib metatable in func environment. */ + +LJLIB_CF(ffi_load) +{ + GCstr *name = lj_lib_checkstr(L, 1); + int global = (L->base+1 < L->top && tvistruecond(L->base+1)); + lj_clib_load(L, tabref(curr_func(L)->c.env), name, global); + return 1; +} + +LJLIB_PUSH(top-4) LJLIB_SET(C) +LJLIB_PUSH(top-3) LJLIB_SET(os) +LJLIB_PUSH(top-2) LJLIB_SET(arch) + +#include "lj_libdef.h" + +/* ------------------------------------------------------------------------ */ + +/* Create special weak-keyed finalizer table. */ +static GCtab *ffi_finalizer(lua_State *L) +{ + /* NOBARRIER: The table is new (marked white). */ + GCtab *t = lj_tab_new(L, 0, 1); + settabV(L, L->top++, t); + setgcref(t->metatable, obj2gco(t)); + setstrV(L, lj_tab_setstr(L, t, lj_str_newlit(L, "__mode")), + lj_str_newlit(L, "K")); + t->nomm = (uint8_t)(~(1u<top-1); + lj_gc_anybarriert(L, t); + } +} + +LUALIB_API int luaopen_ffi(lua_State *L) +{ + CTState *cts = lj_ctype_init(L); + settabV(L, L->top++, (cts->miscmap = lj_tab_new(L, 0, 1))); + cts->finalizer = ffi_finalizer(L); + LJ_LIB_REG(L, NULL, ffi_meta); + /* NOBARRIER: basemt is a GC root. */ + setgcref(basemt_it(G(L), LJ_TCDATA), obj2gco(tabV(L->top-1))); + LJ_LIB_REG(L, NULL, ffi_clib); + LJ_LIB_REG(L, NULL, ffi_callback); + /* NOBARRIER: the key is new and lj_tab_newkey() handles the barrier. */ + settabV(L, lj_tab_setstr(L, cts->miscmap, &cts->g->strempty), tabV(L->top-1)); + L->top--; + lj_clib_default(L, tabV(L->top-1)); /* Create ffi.C default namespace. */ + lua_pushliteral(L, LJ_OS_NAME); + lua_pushliteral(L, LJ_ARCH_NAME); + LJ_LIB_REG(L, NULL, ffi); /* Note: no global "ffi" created! */ + ffi_register_module(L); + return 1; +} + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lib_init.c b/Build/source/libs/luajit/LuaJIT-src/src/lib_init.c new file mode 100644 index 00000000000..8e0d4428fd3 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lib_init.c @@ -0,0 +1,56 @@ +/* +** Library initialization. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +** +** Major parts taken verbatim from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lib_init_c +#define LUA_LIB + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" + +#include "lj_arch.h" + +static const luaL_Reg lj_lib_load[] = { + { "", luaopen_base }, + { LUA_LOADLIBNAME, luaopen_package }, + { LUA_TABLIBNAME, luaopen_table }, + { LUA_IOLIBNAME, luaopen_io }, + { LUA_OSLIBNAME, luaopen_os }, + { LUA_STRLIBNAME, luaopen_string }, + { LUA_MATHLIBNAME, luaopen_math }, + { LUA_DBLIBNAME, luaopen_debug }, + { LUA_BITLIBNAME, luaopen_bit }, + { LUA_JITLIBNAME, luaopen_jit }, + { LUA_BITLIBNAME_32, luaopen_bit32 }, + { NULL, NULL } +}; + +static const luaL_Reg lj_lib_preload[] = { +#if LJ_HASFFI + { LUA_FFILIBNAME, luaopen_ffi }, +#endif + { NULL, NULL } +}; + +LUALIB_API void luaL_openlibs(lua_State *L) +{ + const luaL_Reg *lib; + for (lib = lj_lib_load; lib->func; lib++) { + lua_pushcfunction(L, lib->func); + lua_pushstring(L, lib->name); + lua_call(L, 1, 0); + } + luaL_findtable(L, LUA_REGISTRYINDEX, "_PRELOAD", + sizeof(lj_lib_preload)/sizeof(lj_lib_preload[0])-1); + for (lib = lj_lib_preload; lib->func; lib++) { + lua_pushcfunction(L, lib->func); + lua_setfield(L, -2, lib->name); + } + lua_pop(L, 1); +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lib_io.c b/Build/source/libs/luajit/LuaJIT-src/src/lib_io.c new file mode 100644 index 00000000000..2aa834746e1 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lib_io.c @@ -0,0 +1,541 @@ +/* +** I/O library. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2011 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#include +#include + +#define lib_io_c +#define LUA_LIB + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_buf.h" +#include "lj_str.h" +#include "lj_state.h" +#include "lj_strfmt.h" +#include "lj_ff.h" +#include "lj_lib.h" + +/* Userdata payload for I/O file. */ +typedef struct IOFileUD { + FILE *fp; /* File handle. */ + uint32_t type; /* File type. */ +} IOFileUD; + +#define IOFILE_TYPE_FILE 0 /* Regular file. */ +#define IOFILE_TYPE_PIPE 1 /* Pipe. */ +#define IOFILE_TYPE_STDF 2 /* Standard file handle. */ +#define IOFILE_TYPE_MASK 3 + +#define IOFILE_FLAG_CLOSE 4 /* Close after io.lines() iterator. */ + +#define IOSTDF_UD(L, id) (&gcref(G(L)->gcroot[(id)])->ud) +#define IOSTDF_IOF(L, id) ((IOFileUD *)uddata(IOSTDF_UD(L, (id)))) + +/* -- Open/close helpers -------------------------------------------------- */ + +static IOFileUD *io_tofilep(lua_State *L) +{ + if (!(L->base < L->top && tvisudata(L->base) && + udataV(L->base)->udtype == UDTYPE_IO_FILE)) + lj_err_argtype(L, 1, "FILE*"); + return (IOFileUD *)uddata(udataV(L->base)); +} + +static IOFileUD *io_tofile(lua_State *L) +{ + IOFileUD *iof = io_tofilep(L); + if (iof->fp == NULL) + lj_err_caller(L, LJ_ERR_IOCLFL); + return iof; +} + +static FILE *io_stdfile(lua_State *L, ptrdiff_t id) +{ + IOFileUD *iof = IOSTDF_IOF(L, id); + if (iof->fp == NULL) + lj_err_caller(L, LJ_ERR_IOSTDCL); + return iof->fp; +} + +static IOFileUD *io_file_new(lua_State *L) +{ + IOFileUD *iof = (IOFileUD *)lua_newuserdata(L, sizeof(IOFileUD)); + GCudata *ud = udataV(L->top-1); + ud->udtype = UDTYPE_IO_FILE; + /* NOBARRIER: The GCudata is new (marked white). */ + setgcrefr(ud->metatable, curr_func(L)->c.env); + iof->fp = NULL; + iof->type = IOFILE_TYPE_FILE; + return iof; +} + +static IOFileUD *io_file_open(lua_State *L, const char *mode) +{ + const char *fname = strdata(lj_lib_checkstr(L, 1)); + IOFileUD *iof = io_file_new(L); + iof->fp = fopen(fname, mode); + if (iof->fp == NULL) + luaL_argerror(L, 1, lj_strfmt_pushf(L, "%s: %s", fname, strerror(errno))); + return iof; +} + +static int io_file_close(lua_State *L, IOFileUD *iof) +{ + int ok; + if ((iof->type & IOFILE_TYPE_MASK) == IOFILE_TYPE_FILE) { + ok = (fclose(iof->fp) == 0); + } else if ((iof->type & IOFILE_TYPE_MASK) == IOFILE_TYPE_PIPE) { + int stat = -1; +#if LJ_TARGET_POSIX + stat = pclose(iof->fp); +#elif LJ_TARGET_WINDOWS && !LJ_TARGET_XBOXONE + stat = _pclose(iof->fp); +#else + lua_assert(0); + return 0; +#endif +#if LJ_52 + iof->fp = NULL; + return luaL_execresult(L, stat); +#else + ok = (stat != -1); +#endif + } else { + lua_assert((iof->type & IOFILE_TYPE_MASK) == IOFILE_TYPE_STDF); + setnilV(L->top++); + lua_pushliteral(L, "cannot close standard file"); + return 2; + } + iof->fp = NULL; + return luaL_fileresult(L, ok, NULL); +} + +/* -- Read/write helpers -------------------------------------------------- */ + +static int io_file_readnum(lua_State *L, FILE *fp) +{ + lua_Number d; + if (fscanf(fp, LUA_NUMBER_SCAN, &d) == 1) { + if (LJ_DUALNUM) { + int32_t i = lj_num2int(d); + if (d == (lua_Number)i && !tvismzero((cTValue *)&d)) { + setintV(L->top++, i); + return 1; + } + } + setnumV(L->top++, d); + return 1; + } else { + setnilV(L->top++); + return 0; + } +} + +static int io_file_readline(lua_State *L, FILE *fp, MSize chop) +{ + MSize m = LUAL_BUFFERSIZE, n = 0, ok = 0; + char *buf; + for (;;) { + buf = lj_buf_tmp(L, m); + if (fgets(buf+n, m-n, fp) == NULL) break; + n += (MSize)strlen(buf+n); + ok |= n; + if (n && buf[n-1] == '\n') { n -= chop; break; } + if (n >= m - 64) m += m; + } + setstrV(L, L->top++, lj_str_new(L, buf, (size_t)n)); + lj_gc_check(L); + return (int)ok; +} + +static void io_file_readall(lua_State *L, FILE *fp) +{ + MSize m, n; + for (m = LUAL_BUFFERSIZE, n = 0; ; m += m) { + char *buf = lj_buf_tmp(L, m); + n += (MSize)fread(buf+n, 1, m-n, fp); + if (n != m) { + setstrV(L, L->top++, lj_str_new(L, buf, (size_t)n)); + lj_gc_check(L); + return; + } + } +} + +static int io_file_readlen(lua_State *L, FILE *fp, MSize m) +{ + if (m) { + char *buf = lj_buf_tmp(L, m); + MSize n = (MSize)fread(buf, 1, m, fp); + setstrV(L, L->top++, lj_str_new(L, buf, (size_t)n)); + lj_gc_check(L); + return (n > 0 || m == 0); + } else { + int c = getc(fp); + ungetc(c, fp); + setstrV(L, L->top++, &G(L)->strempty); + return (c != EOF); + } +} + +static int io_file_read(lua_State *L, FILE *fp, int start) +{ + int ok, n, nargs = (int)(L->top - L->base) - start; + clearerr(fp); + if (nargs == 0) { + ok = io_file_readline(L, fp, 1); + n = start+1; /* Return 1 result. */ + } else { + /* The results plus the buffers go on top of the args. */ + luaL_checkstack(L, nargs+LUA_MINSTACK, "too many arguments"); + ok = 1; + for (n = start; nargs-- && ok; n++) { + if (tvisstr(L->base+n)) { + const char *p = strVdata(L->base+n); + if (p[0] != '*') + lj_err_arg(L, n+1, LJ_ERR_INVOPT); + if (p[1] == 'n') + ok = io_file_readnum(L, fp); + else if ((p[1] & ~0x20) == 'L') + ok = io_file_readline(L, fp, (p[1] == 'l')); + else if (p[1] == 'a') + io_file_readall(L, fp); + else + lj_err_arg(L, n+1, LJ_ERR_INVFMT); + } else if (tvisnumber(L->base+n)) { + ok = io_file_readlen(L, fp, (MSize)lj_lib_checkint(L, n+1)); + } else { + lj_err_arg(L, n+1, LJ_ERR_INVOPT); + } + } + } + if (ferror(fp)) + return luaL_fileresult(L, 0, NULL); + if (!ok) + setnilV(L->top-1); /* Replace last result with nil. */ + return n - start; +} + +static int io_file_write(lua_State *L, FILE *fp, int start) +{ + cTValue *tv; + int status = 1; + for (tv = L->base+start; tv < L->top; tv++) { + char buf[STRFMT_MAXBUF_NUM]; + MSize len; + const char *p = lj_strfmt_wstrnum(buf, tv, &len); + if (!p) + lj_err_argt(L, (int)(tv - L->base) + 1, LUA_TSTRING); + status = status && (fwrite(p, 1, len, fp) == len); + } + if (LJ_52 && status) { + L->top = L->base+1; + if (start == 0) + setudataV(L, L->base, IOSTDF_UD(L, GCROOT_IO_OUTPUT)); + return 1; + } + return luaL_fileresult(L, status, NULL); +} + +static int io_file_iter(lua_State *L) +{ + GCfunc *fn = curr_func(L); + IOFileUD *iof = uddata(udataV(&fn->c.upvalue[0])); + int n = fn->c.nupvalues - 1; + if (iof->fp == NULL) + lj_err_caller(L, LJ_ERR_IOCLFL); + L->top = L->base; + if (n) { /* Copy upvalues with options to stack. */ + if (n > LUAI_MAXCSTACK) + lj_err_caller(L, LJ_ERR_STKOV); + lj_state_checkstack(L, (MSize)n); + memcpy(L->top, &fn->c.upvalue[1], n*sizeof(TValue)); + L->top += n; + } + n = io_file_read(L, iof->fp, 0); + if (ferror(iof->fp)) + lj_err_callermsg(L, strVdata(L->top-2)); + if (tvisnil(L->base) && (iof->type & IOFILE_FLAG_CLOSE)) { + io_file_close(L, iof); /* Return values are ignored. */ + return 0; + } + return n; +} + +static int io_file_lines(lua_State *L) +{ + int n = (int)(L->top - L->base); + if (n > LJ_MAX_UPVAL) + lj_err_caller(L, LJ_ERR_UNPACK); + lua_pushcclosure(L, io_file_iter, n); + return 1; +} + +/* -- I/O file methods ---------------------------------------------------- */ + +#define LJLIB_MODULE_io_method + +LJLIB_CF(io_method_close) +{ + IOFileUD *iof = L->base < L->top ? io_tofile(L) : + IOSTDF_IOF(L, GCROOT_IO_OUTPUT); + return io_file_close(L, iof); +} + +LJLIB_CF(io_method_read) +{ + return io_file_read(L, io_tofile(L)->fp, 1); +} + +LJLIB_CF(io_method_write) LJLIB_REC(io_write 0) +{ + return io_file_write(L, io_tofile(L)->fp, 1); +} + +LJLIB_CF(io_method_flush) LJLIB_REC(io_flush 0) +{ + return luaL_fileresult(L, fflush(io_tofile(L)->fp) == 0, NULL); +} + +LJLIB_CF(io_method_seek) +{ + FILE *fp = io_tofile(L)->fp; + int opt = lj_lib_checkopt(L, 2, 1, "\3set\3cur\3end"); + int64_t ofs = 0; + cTValue *o; + int res; + if (opt == 0) opt = SEEK_SET; + else if (opt == 1) opt = SEEK_CUR; + else if (opt == 2) opt = SEEK_END; + o = L->base+2; + if (o < L->top) { + if (tvisint(o)) + ofs = (int64_t)intV(o); + else if (tvisnum(o)) + ofs = (int64_t)numV(o); + else if (!tvisnil(o)) + lj_err_argt(L, 3, LUA_TNUMBER); + } +#if LJ_TARGET_POSIX + res = fseeko(fp, ofs, opt); +#elif _MSC_VER >= 1400 + res = _fseeki64(fp, ofs, opt); +#elif defined(__MINGW32__) + res = fseeko64(fp, ofs, opt); +#else + res = fseek(fp, (long)ofs, opt); +#endif + if (res) + return luaL_fileresult(L, 0, NULL); +#if LJ_TARGET_POSIX + ofs = ftello(fp); +#elif _MSC_VER >= 1400 + ofs = _ftelli64(fp); +#elif defined(__MINGW32__) + ofs = ftello64(fp); +#else + ofs = (int64_t)ftell(fp); +#endif + setint64V(L->top-1, ofs); + return 1; +} + +LJLIB_CF(io_method_setvbuf) +{ + FILE *fp = io_tofile(L)->fp; + int opt = lj_lib_checkopt(L, 2, -1, "\4full\4line\2no"); + size_t sz = (size_t)lj_lib_optint(L, 3, LUAL_BUFFERSIZE); + if (opt == 0) opt = _IOFBF; + else if (opt == 1) opt = _IOLBF; + else if (opt == 2) opt = _IONBF; + return luaL_fileresult(L, setvbuf(fp, NULL, opt, sz) == 0, NULL); +} + +LJLIB_CF(io_method_lines) +{ + io_tofile(L); + return io_file_lines(L); +} + +LJLIB_CF(io_method___gc) +{ + IOFileUD *iof = io_tofilep(L); + if (iof->fp != NULL && (iof->type & IOFILE_TYPE_MASK) != IOFILE_TYPE_STDF) + io_file_close(L, iof); + return 0; +} + +LJLIB_CF(io_method___tostring) +{ + IOFileUD *iof = io_tofilep(L); + if (iof->fp != NULL) + lua_pushfstring(L, "file (%p)", iof->fp); + else + lua_pushliteral(L, "file (closed)"); + return 1; +} + +LJLIB_PUSH(top-1) LJLIB_SET(__index) + +#include "lj_libdef.h" + +/* -- I/O library functions ----------------------------------------------- */ + +#define LJLIB_MODULE_io + +LJLIB_PUSH(top-2) LJLIB_SET(!) /* Set environment. */ + +LJLIB_CF(io_open) +{ + const char *fname = strdata(lj_lib_checkstr(L, 1)); + GCstr *s = lj_lib_optstr(L, 2); + const char *mode = s ? strdata(s) : "r"; + IOFileUD *iof = io_file_new(L); + iof->fp = fopen(fname, mode); + return iof->fp != NULL ? 1 : luaL_fileresult(L, 0, fname); +} + +LJLIB_CF(io_popen) +{ +#if LJ_TARGET_POSIX || (LJ_TARGET_WINDOWS && !LJ_TARGET_XBOXONE) + const char *fname = strdata(lj_lib_checkstr(L, 1)); + GCstr *s = lj_lib_optstr(L, 2); + const char *mode = s ? strdata(s) : "r"; + IOFileUD *iof = io_file_new(L); + iof->type = IOFILE_TYPE_PIPE; +#if LJ_TARGET_POSIX + fflush(NULL); + iof->fp = popen(fname, mode); +#else + iof->fp = _popen(fname, mode); +#endif + return iof->fp != NULL ? 1 : luaL_fileresult(L, 0, fname); +#else + return luaL_error(L, LUA_QL("popen") " not supported"); +#endif +} + +LJLIB_CF(io_tmpfile) +{ + IOFileUD *iof = io_file_new(L); +#if LJ_TARGET_PS3 || LJ_TARGET_PS4 || LJ_TARGET_PSVITA + iof->fp = NULL; errno = ENOSYS; +#else + iof->fp = tmpfile(); +#endif + return iof->fp != NULL ? 1 : luaL_fileresult(L, 0, NULL); +} + +LJLIB_CF(io_close) +{ + return lj_cf_io_method_close(L); +} + +LJLIB_CF(io_read) +{ + return io_file_read(L, io_stdfile(L, GCROOT_IO_INPUT), 0); +} + +LJLIB_CF(io_write) LJLIB_REC(io_write GCROOT_IO_OUTPUT) +{ + return io_file_write(L, io_stdfile(L, GCROOT_IO_OUTPUT), 0); +} + +LJLIB_CF(io_flush) LJLIB_REC(io_flush GCROOT_IO_OUTPUT) +{ + return luaL_fileresult(L, fflush(io_stdfile(L, GCROOT_IO_OUTPUT)) == 0, NULL); +} + +static int io_std_getset(lua_State *L, ptrdiff_t id, const char *mode) +{ + if (L->base < L->top && !tvisnil(L->base)) { + if (tvisudata(L->base)) { + io_tofile(L); + L->top = L->base+1; + } else { + io_file_open(L, mode); + } + /* NOBARRIER: The standard I/O handles are GC roots. */ + setgcref(G(L)->gcroot[id], gcV(L->top-1)); + } else { + setudataV(L, L->top++, IOSTDF_UD(L, id)); + } + return 1; +} + +LJLIB_CF(io_input) +{ + return io_std_getset(L, GCROOT_IO_INPUT, "r"); +} + +LJLIB_CF(io_output) +{ + return io_std_getset(L, GCROOT_IO_OUTPUT, "w"); +} + +LJLIB_CF(io_lines) +{ + if (L->base == L->top) setnilV(L->top++); + if (!tvisnil(L->base)) { /* io.lines(fname) */ + IOFileUD *iof = io_file_open(L, "r"); + iof->type = IOFILE_TYPE_FILE|IOFILE_FLAG_CLOSE; + L->top--; + setudataV(L, L->base, udataV(L->top)); + } else { /* io.lines() iterates over stdin. */ + setudataV(L, L->base, IOSTDF_UD(L, GCROOT_IO_INPUT)); + } + return io_file_lines(L); +} + +LJLIB_CF(io_type) +{ + cTValue *o = lj_lib_checkany(L, 1); + if (!(tvisudata(o) && udataV(o)->udtype == UDTYPE_IO_FILE)) + setnilV(L->top++); + else if (((IOFileUD *)uddata(udataV(o)))->fp != NULL) + lua_pushliteral(L, "file"); + else + lua_pushliteral(L, "closed file"); + return 1; +} + +#include "lj_libdef.h" + +/* ------------------------------------------------------------------------ */ + +static GCobj *io_std_new(lua_State *L, FILE *fp, const char *name) +{ + IOFileUD *iof = (IOFileUD *)lua_newuserdata(L, sizeof(IOFileUD)); + GCudata *ud = udataV(L->top-1); + ud->udtype = UDTYPE_IO_FILE; + /* NOBARRIER: The GCudata is new (marked white). */ + setgcref(ud->metatable, gcV(L->top-3)); + iof->fp = fp; + iof->type = IOFILE_TYPE_STDF; + lua_setfield(L, -2, name); + return obj2gco(ud); +} + +LUALIB_API int luaopen_io(lua_State *L) +{ + LJ_LIB_REG(L, NULL, io_method); + copyTV(L, L->top, L->top-1); L->top++; + lua_setfield(L, LUA_REGISTRYINDEX, LUA_FILEHANDLE); + LJ_LIB_REG(L, LUA_IOLIBNAME, io); + setgcref(G(L)->gcroot[GCROOT_IO_INPUT], io_std_new(L, stdin, "stdin")); + setgcref(G(L)->gcroot[GCROOT_IO_OUTPUT], io_std_new(L, stdout, "stdout")); + io_std_new(L, stderr, "stderr"); + return 1; +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lib_jit.c b/Build/source/libs/luajit/LuaJIT-src/src/lib_jit.c new file mode 100644 index 00000000000..178ef249df3 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lib_jit.c @@ -0,0 +1,767 @@ +/* +** JIT library. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lib_jit_c +#define LUA_LIB + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_debug.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_state.h" +#include "lj_bc.h" +#if LJ_HASFFI +#include "lj_ctype.h" +#endif +#if LJ_HASJIT +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_ircall.h" +#include "lj_iropt.h" +#include "lj_target.h" +#endif +#include "lj_trace.h" +#include "lj_dispatch.h" +#include "lj_vm.h" +#include "lj_vmevent.h" +#include "lj_lib.h" + +#include "luajit.h" + +/* -- jit.* functions ----------------------------------------------------- */ + +#define LJLIB_MODULE_jit + +static int setjitmode(lua_State *L, int mode) +{ + int idx = 0; + if (L->base == L->top || tvisnil(L->base)) { /* jit.on/off/flush([nil]) */ + mode |= LUAJIT_MODE_ENGINE; + } else { + /* jit.on/off/flush(func|proto, nil|true|false) */ + if (tvisfunc(L->base) || tvisproto(L->base)) + idx = 1; + else if (!tvistrue(L->base)) /* jit.on/off/flush(true, nil|true|false) */ + goto err; + if (L->base+1 < L->top && tvisbool(L->base+1)) + mode |= boolV(L->base+1) ? LUAJIT_MODE_ALLFUNC : LUAJIT_MODE_ALLSUBFUNC; + else + mode |= LUAJIT_MODE_FUNC; + } + if (luaJIT_setmode(L, idx, mode) != 1) { + if ((mode & LUAJIT_MODE_MASK) == LUAJIT_MODE_ENGINE) + lj_err_caller(L, LJ_ERR_NOJIT); + err: + lj_err_argt(L, 1, LUA_TFUNCTION); + } + return 0; +} + +LJLIB_CF(jit_on) +{ + return setjitmode(L, LUAJIT_MODE_ON); +} + +LJLIB_CF(jit_off) +{ + return setjitmode(L, LUAJIT_MODE_OFF); +} + +LJLIB_CF(jit_flush) +{ +#if LJ_HASJIT + if (L->base < L->top && tvisnumber(L->base)) { + int traceno = lj_lib_checkint(L, 1); + luaJIT_setmode(L, traceno, LUAJIT_MODE_FLUSH|LUAJIT_MODE_TRACE); + return 0; + } +#endif + return setjitmode(L, LUAJIT_MODE_FLUSH); +} + +#if LJ_HASJIT +/* Push a string for every flag bit that is set. */ +static void flagbits_to_strings(lua_State *L, uint32_t flags, uint32_t base, + const char *str) +{ + for (; *str; base <<= 1, str += 1+*str) + if (flags & base) + setstrV(L, L->top++, lj_str_new(L, str+1, *(uint8_t *)str)); +} +#endif + +LJLIB_CF(jit_status) +{ +#if LJ_HASJIT + jit_State *J = L2J(L); + L->top = L->base; + setboolV(L->top++, (J->flags & JIT_F_ON) ? 1 : 0); + flagbits_to_strings(L, J->flags, JIT_F_CPU_FIRST, JIT_F_CPUSTRING); + flagbits_to_strings(L, J->flags, JIT_F_OPT_FIRST, JIT_F_OPTSTRING); + return (int)(L->top - L->base); +#else + setboolV(L->top++, 0); + return 1; +#endif +} + +LJLIB_CF(jit_attach) +{ +#ifdef LUAJIT_DISABLE_VMEVENT + luaL_error(L, "vmevent API disabled"); +#else + GCfunc *fn = lj_lib_checkfunc(L, 1); + GCstr *s = lj_lib_optstr(L, 2); + luaL_findtable(L, LUA_REGISTRYINDEX, LJ_VMEVENTS_REGKEY, LJ_VMEVENTS_HSIZE); + if (s) { /* Attach to given event. */ + const uint8_t *p = (const uint8_t *)strdata(s); + uint32_t h = s->len; + while (*p) h = h ^ (lj_rol(h, 6) + *p++); + lua_pushvalue(L, 1); + lua_rawseti(L, -2, VMEVENT_HASHIDX(h)); + G(L)->vmevmask = VMEVENT_NOCACHE; /* Invalidate cache. */ + } else { /* Detach if no event given. */ + setnilV(L->top++); + while (lua_next(L, -2)) { + L->top--; + if (tvisfunc(L->top) && funcV(L->top) == fn) { + setnilV(lj_tab_set(L, tabV(L->top-2), L->top-1)); + } + } + } +#endif + return 0; +} + +LJLIB_PUSH(top-5) LJLIB_SET(os) +LJLIB_PUSH(top-4) LJLIB_SET(arch) +LJLIB_PUSH(top-3) LJLIB_SET(version_num) +LJLIB_PUSH(top-2) LJLIB_SET(version) + +#include "lj_libdef.h" + +/* -- jit.util.* functions ------------------------------------------------ */ + +#define LJLIB_MODULE_jit_util + +/* -- Reflection API for Lua functions ------------------------------------ */ + +/* Return prototype of first argument (Lua function or prototype object) */ +static GCproto *check_Lproto(lua_State *L, int nolua) +{ + TValue *o = L->base; + if (L->top > o) { + if (tvisproto(o)) { + return protoV(o); + } else if (tvisfunc(o)) { + if (isluafunc(funcV(o))) + return funcproto(funcV(o)); + else if (nolua) + return NULL; + } + } + lj_err_argt(L, 1, LUA_TFUNCTION); + return NULL; /* unreachable */ +} + +static void setintfield(lua_State *L, GCtab *t, const char *name, int32_t val) +{ + setintV(lj_tab_setstr(L, t, lj_str_newz(L, name)), val); +} + +/* local info = jit.util.funcinfo(func [,pc]) */ +LJLIB_CF(jit_util_funcinfo) +{ + GCproto *pt = check_Lproto(L, 1); + if (pt) { + BCPos pc = (BCPos)lj_lib_optint(L, 2, 0); + GCtab *t; + lua_createtable(L, 0, 16); /* Increment hash size if fields are added. */ + t = tabV(L->top-1); + setintfield(L, t, "linedefined", pt->firstline); + setintfield(L, t, "lastlinedefined", pt->firstline + pt->numline); + setintfield(L, t, "stackslots", pt->framesize); + setintfield(L, t, "params", pt->numparams); + setintfield(L, t, "bytecodes", (int32_t)pt->sizebc); + setintfield(L, t, "gcconsts", (int32_t)pt->sizekgc); + setintfield(L, t, "nconsts", (int32_t)pt->sizekn); + setintfield(L, t, "upvalues", (int32_t)pt->sizeuv); + if (pc < pt->sizebc) + setintfield(L, t, "currentline", lj_debug_line(pt, pc)); + lua_pushboolean(L, (pt->flags & PROTO_VARARG)); + lua_setfield(L, -2, "isvararg"); + lua_pushboolean(L, (pt->flags & PROTO_CHILD)); + lua_setfield(L, -2, "children"); + setstrV(L, L->top++, proto_chunkname(pt)); + lua_setfield(L, -2, "source"); + lj_debug_pushloc(L, pt, pc); + lua_setfield(L, -2, "loc"); + } else { + GCfunc *fn = funcV(L->base); + GCtab *t; + lua_createtable(L, 0, 4); /* Increment hash size if fields are added. */ + t = tabV(L->top-1); + if (!iscfunc(fn)) + setintfield(L, t, "ffid", fn->c.ffid); + setintptrV(lj_tab_setstr(L, t, lj_str_newlit(L, "addr")), + (intptr_t)(void *)fn->c.f); + setintfield(L, t, "upvalues", fn->c.nupvalues); + } + return 1; +} + +/* local ins, m = jit.util.funcbc(func, pc) */ +LJLIB_CF(jit_util_funcbc) +{ + GCproto *pt = check_Lproto(L, 0); + BCPos pc = (BCPos)lj_lib_checkint(L, 2); + if (pc < pt->sizebc) { + BCIns ins = proto_bc(pt)[pc]; + BCOp op = bc_op(ins); + lua_assert(op < BC__MAX); + setintV(L->top, ins); + setintV(L->top+1, lj_bc_mode[op]); + L->top += 2; + return 2; + } + return 0; +} + +/* local k = jit.util.funck(func, idx) */ +LJLIB_CF(jit_util_funck) +{ + GCproto *pt = check_Lproto(L, 0); + ptrdiff_t idx = (ptrdiff_t)lj_lib_checkint(L, 2); + if (idx >= 0) { + if (idx < (ptrdiff_t)pt->sizekn) { + copyTV(L, L->top-1, proto_knumtv(pt, idx)); + return 1; + } + } else { + if (~idx < (ptrdiff_t)pt->sizekgc) { + GCobj *gc = proto_kgc(pt, idx); + setgcV(L, L->top-1, gc, ~gc->gch.gct); + return 1; + } + } + return 0; +} + +/* local name = jit.util.funcuvname(func, idx) */ +LJLIB_CF(jit_util_funcuvname) +{ + GCproto *pt = check_Lproto(L, 0); + uint32_t idx = (uint32_t)lj_lib_checkint(L, 2); + if (idx < pt->sizeuv) { + setstrV(L, L->top-1, lj_str_newz(L, lj_debug_uvname(pt, idx))); + return 1; + } + return 0; +} + +/* -- Reflection API for traces ------------------------------------------- */ + +#if LJ_HASJIT + +/* Check trace argument. Must not throw for non-existent trace numbers. */ +static GCtrace *jit_checktrace(lua_State *L) +{ + TraceNo tr = (TraceNo)lj_lib_checkint(L, 1); + jit_State *J = L2J(L); + if (tr > 0 && tr < J->sizetrace) + return traceref(J, tr); + return NULL; +} + +/* Names of link types. ORDER LJ_TRLINK */ +static const char *const jit_trlinkname[] = { + "none", "root", "loop", "tail-recursion", "up-recursion", "down-recursion", + "interpreter", "return", "stitch" +}; + +/* local info = jit.util.traceinfo(tr) */ +LJLIB_CF(jit_util_traceinfo) +{ + GCtrace *T = jit_checktrace(L); + if (T) { + GCtab *t; + lua_createtable(L, 0, 8); /* Increment hash size if fields are added. */ + t = tabV(L->top-1); + setintfield(L, t, "nins", (int32_t)T->nins - REF_BIAS - 1); + setintfield(L, t, "nk", REF_BIAS - (int32_t)T->nk); + setintfield(L, t, "link", T->link); + setintfield(L, t, "nexit", T->nsnap); + setstrV(L, L->top++, lj_str_newz(L, jit_trlinkname[T->linktype])); + lua_setfield(L, -2, "linktype"); + /* There are many more fields. Add them only when needed. */ + return 1; + } + return 0; +} + +/* local m, ot, op1, op2, prev = jit.util.traceir(tr, idx) */ +LJLIB_CF(jit_util_traceir) +{ + GCtrace *T = jit_checktrace(L); + IRRef ref = (IRRef)lj_lib_checkint(L, 2) + REF_BIAS; + if (T && ref >= REF_BIAS && ref < T->nins) { + IRIns *ir = &T->ir[ref]; + int32_t m = lj_ir_mode[ir->o]; + setintV(L->top-2, m); + setintV(L->top-1, ir->ot); + setintV(L->top++, (int32_t)ir->op1 - (irm_op1(m)==IRMref ? REF_BIAS : 0)); + setintV(L->top++, (int32_t)ir->op2 - (irm_op2(m)==IRMref ? REF_BIAS : 0)); + setintV(L->top++, ir->prev); + return 5; + } + return 0; +} + +/* local k, t [, slot] = jit.util.tracek(tr, idx) */ +LJLIB_CF(jit_util_tracek) +{ + GCtrace *T = jit_checktrace(L); + IRRef ref = (IRRef)lj_lib_checkint(L, 2) + REF_BIAS; + if (T && ref >= T->nk && ref < REF_BIAS) { + IRIns *ir = &T->ir[ref]; + int32_t slot = -1; + if (ir->o == IR_KSLOT) { + slot = ir->op2; + ir = &T->ir[ir->op1]; + } +#if LJ_HASFFI + if (ir->o == IR_KINT64 && !ctype_ctsG(G(L))) { + ptrdiff_t oldtop = savestack(L, L->top); + luaopen_ffi(L); /* Load FFI library on-demand. */ + L->top = restorestack(L, oldtop); + } +#endif + lj_ir_kvalue(L, L->top-2, ir); + setintV(L->top-1, (int32_t)irt_type(ir->t)); + if (slot == -1) + return 2; + setintV(L->top++, slot); + return 3; + } + return 0; +} + +/* local snap = jit.util.tracesnap(tr, sn) */ +LJLIB_CF(jit_util_tracesnap) +{ + GCtrace *T = jit_checktrace(L); + SnapNo sn = (SnapNo)lj_lib_checkint(L, 2); + if (T && sn < T->nsnap) { + SnapShot *snap = &T->snap[sn]; + SnapEntry *map = &T->snapmap[snap->mapofs]; + MSize n, nent = snap->nent; + GCtab *t; + lua_createtable(L, nent+2, 0); + t = tabV(L->top-1); + setintV(lj_tab_setint(L, t, 0), (int32_t)snap->ref - REF_BIAS); + setintV(lj_tab_setint(L, t, 1), (int32_t)snap->nslots); + for (n = 0; n < nent; n++) + setintV(lj_tab_setint(L, t, (int32_t)(n+2)), (int32_t)map[n]); + setintV(lj_tab_setint(L, t, (int32_t)(nent+2)), (int32_t)SNAP(255, 0, 0)); + return 1; + } + return 0; +} + +/* local mcode, addr, loop = jit.util.tracemc(tr) */ +LJLIB_CF(jit_util_tracemc) +{ + GCtrace *T = jit_checktrace(L); + if (T && T->mcode != NULL) { + setstrV(L, L->top-1, lj_str_new(L, (const char *)T->mcode, T->szmcode)); + setintptrV(L->top++, (intptr_t)(void *)T->mcode); + setintV(L->top++, T->mcloop); + return 3; + } + return 0; +} + +/* local addr = jit.util.traceexitstub([tr,] exitno) */ +LJLIB_CF(jit_util_traceexitstub) +{ +#ifdef EXITSTUBS_PER_GROUP + ExitNo exitno = (ExitNo)lj_lib_checkint(L, 1); + jit_State *J = L2J(L); + if (exitno < EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR) { + setintptrV(L->top-1, (intptr_t)(void *)exitstub_addr(J, exitno)); + return 1; + } +#else + if (L->top > L->base+1) { /* Don't throw for one-argument variant. */ + GCtrace *T = jit_checktrace(L); + ExitNo exitno = (ExitNo)lj_lib_checkint(L, 2); + ExitNo maxexit = T->root ? T->nsnap+1 : T->nsnap; + if (T && T->mcode != NULL && exitno < maxexit) { + setintptrV(L->top-1, (intptr_t)(void *)exitstub_trace_addr(T, exitno)); + return 1; + } + } +#endif + return 0; +} + +/* local addr = jit.util.ircalladdr(idx) */ +LJLIB_CF(jit_util_ircalladdr) +{ + uint32_t idx = (uint32_t)lj_lib_checkint(L, 1); + if (idx < IRCALL__MAX) { + setintptrV(L->top-1, (intptr_t)(void *)lj_ir_callinfo[idx].func); + return 1; + } + return 0; +} + +#endif + +#include "lj_libdef.h" + +static int luaopen_jit_util(lua_State *L) +{ + LJ_LIB_REG(L, NULL, jit_util); + return 1; +} + +/* -- jit.opt module ------------------------------------------------------ */ + +#if LJ_HASJIT + +#define LJLIB_MODULE_jit_opt + +/* Parse optimization level. */ +static int jitopt_level(jit_State *J, const char *str) +{ + if (str[0] >= '0' && str[0] <= '9' && str[1] == '\0') { + uint32_t flags; + if (str[0] == '0') flags = JIT_F_OPT_0; + else if (str[0] == '1') flags = JIT_F_OPT_1; + else if (str[0] == '2') flags = JIT_F_OPT_2; + else flags = JIT_F_OPT_3; + J->flags = (J->flags & ~JIT_F_OPT_MASK) | flags; + return 1; /* Ok. */ + } + return 0; /* No match. */ +} + +/* Parse optimization flag. */ +static int jitopt_flag(jit_State *J, const char *str) +{ + const char *lst = JIT_F_OPTSTRING; + uint32_t opt; + int set = 1; + if (str[0] == '+') { + str++; + } else if (str[0] == '-') { + str++; + set = 0; + } else if (str[0] == 'n' && str[1] == 'o') { + str += str[2] == '-' ? 3 : 2; + set = 0; + } + for (opt = JIT_F_OPT_FIRST; ; opt <<= 1) { + size_t len = *(const uint8_t *)lst; + if (len == 0) + break; + if (strncmp(str, lst+1, len) == 0 && str[len] == '\0') { + if (set) J->flags |= opt; else J->flags &= ~opt; + return 1; /* Ok. */ + } + lst += 1+len; + } + return 0; /* No match. */ +} + +/* Parse optimization parameter. */ +static int jitopt_param(jit_State *J, const char *str) +{ + const char *lst = JIT_P_STRING; + int i; + for (i = 0; i < JIT_P__MAX; i++) { + size_t len = *(const uint8_t *)lst; + lua_assert(len != 0); + if (strncmp(str, lst+1, len) == 0 && str[len] == '=') { + int32_t n = 0; + const char *p = &str[len+1]; + while (*p >= '0' && *p <= '9') + n = n*10 + (*p++ - '0'); + if (*p) return 0; /* Malformed number. */ + J->param[i] = n; + if (i == JIT_P_hotloop) + lj_dispatch_init_hotcount(J2G(J)); + return 1; /* Ok. */ + } + lst += 1+len; + } + return 0; /* No match. */ +} + +/* jit.opt.start(flags...) */ +LJLIB_CF(jit_opt_start) +{ + jit_State *J = L2J(L); + int nargs = (int)(L->top - L->base); + if (nargs == 0) { + J->flags = (J->flags & ~JIT_F_OPT_MASK) | JIT_F_OPT_DEFAULT; + } else { + int i; + for (i = 1; i <= nargs; i++) { + const char *str = strdata(lj_lib_checkstr(L, i)); + if (!jitopt_level(J, str) && + !jitopt_flag(J, str) && + !jitopt_param(J, str)) + lj_err_callerv(L, LJ_ERR_JITOPT, str); + } + } + return 0; +} + +#include "lj_libdef.h" + +#endif + +/* -- jit.profile module -------------------------------------------------- */ + +#if LJ_HASPROFILE + +#define LJLIB_MODULE_jit_profile + +/* Not loaded by default, use: local profile = require("jit.profile") */ + +static const char KEY_PROFILE_THREAD = 't'; +static const char KEY_PROFILE_FUNC = 'f'; + +static void jit_profile_callback(lua_State *L2, lua_State *L, int samples, + int vmstate) +{ + TValue key; + cTValue *tv; + setlightudV(&key, (void *)&KEY_PROFILE_FUNC); + tv = lj_tab_get(L, tabV(registry(L)), &key); + if (tvisfunc(tv)) { + char vmst = (char)vmstate; + int status; + setfuncV(L2, L2->top++, funcV(tv)); + setthreadV(L2, L2->top++, L); + setintV(L2->top++, samples); + setstrV(L2, L2->top++, lj_str_new(L2, &vmst, 1)); + status = lua_pcall(L2, 3, 0, 0); /* callback(thread, samples, vmstate) */ + if (status) { + if (G(L2)->panic) G(L2)->panic(L2); + exit(EXIT_FAILURE); + } + lj_trace_abort(G(L2)); + } +} + +/* profile.start(mode, cb) */ +LJLIB_CF(jit_profile_start) +{ + GCtab *registry = tabV(registry(L)); + GCstr *mode = lj_lib_optstr(L, 1); + GCfunc *func = lj_lib_checkfunc(L, 2); + lua_State *L2 = lua_newthread(L); /* Thread that runs profiler callback. */ + TValue key; + /* Anchor thread and function in registry. */ + setlightudV(&key, (void *)&KEY_PROFILE_THREAD); + setthreadV(L, lj_tab_set(L, registry, &key), L2); + setlightudV(&key, (void *)&KEY_PROFILE_FUNC); + setfuncV(L, lj_tab_set(L, registry, &key), func); + lj_gc_anybarriert(L, registry); + luaJIT_profile_start(L, mode ? strdata(mode) : "", + (luaJIT_profile_callback)jit_profile_callback, L2); + return 0; +} + +/* profile.stop() */ +LJLIB_CF(jit_profile_stop) +{ + GCtab *registry; + TValue key; + luaJIT_profile_stop(L); + registry = tabV(registry(L)); + setlightudV(&key, (void *)&KEY_PROFILE_THREAD); + setnilV(lj_tab_set(L, registry, &key)); + setlightudV(&key, (void *)&KEY_PROFILE_FUNC); + setnilV(lj_tab_set(L, registry, &key)); + lj_gc_anybarriert(L, registry); + return 0; +} + +/* dump = profile.dumpstack([thread,] fmt, depth) */ +LJLIB_CF(jit_profile_dumpstack) +{ + lua_State *L2 = L; + int arg = 0; + size_t len; + int depth; + GCstr *fmt; + const char *p; + if (L->top > L->base && tvisthread(L->base)) { + L2 = threadV(L->base); + arg = 1; + } + fmt = lj_lib_checkstr(L, arg+1); + depth = lj_lib_checkint(L, arg+2); + p = luaJIT_profile_dumpstack(L2, strdata(fmt), depth, &len); + lua_pushlstring(L, p, len); + return 1; +} + +#include "lj_libdef.h" + +static int luaopen_jit_profile(lua_State *L) +{ + LJ_LIB_REG(L, NULL, jit_profile); + return 1; +} + +#endif + +/* -- JIT compiler initialization ----------------------------------------- */ + +#if LJ_HASJIT +/* Default values for JIT parameters. */ +static const int32_t jit_param_default[JIT_P__MAX+1] = { +#define JIT_PARAMINIT(len, name, value) (value), +JIT_PARAMDEF(JIT_PARAMINIT) +#undef JIT_PARAMINIT + 0 +}; +#endif + +#if LJ_TARGET_ARM && LJ_TARGET_LINUX +#include +#endif + +/* Arch-dependent CPU detection. */ +static uint32_t jit_cpudetect(lua_State *L) +{ + uint32_t flags = 0; +#if LJ_TARGET_X86ORX64 + uint32_t vendor[4]; + uint32_t features[4]; + if (lj_vm_cpuid(0, vendor) && lj_vm_cpuid(1, features)) { +#if !LJ_HASJIT +#define JIT_F_SSE2 2 +#endif + flags |= ((features[3] >> 26)&1) * JIT_F_SSE2; +#if LJ_HASJIT + flags |= ((features[2] >> 0)&1) * JIT_F_SSE3; + flags |= ((features[2] >> 19)&1) * JIT_F_SSE4_1; + if (vendor[2] == 0x6c65746e) { /* Intel. */ + if ((features[0] & 0x0fff0ff0) == 0x000106c0) /* Atom. */ + flags |= JIT_F_LEA_AGU; + } else if (vendor[2] == 0x444d4163) { /* AMD. */ + uint32_t fam = (features[0] & 0x0ff00f00); + if (fam >= 0x00000f00) /* K8, K10. */ + flags |= JIT_F_PREFER_IMUL; + } +#endif + } + /* Check for required instruction set support on x86 (unnecessary on x64). */ +#if LJ_TARGET_X86 + if (!(flags & JIT_F_SSE2)) + luaL_error(L, "CPU with SSE2 required"); +#endif +#elif LJ_TARGET_ARM +#if LJ_HASJIT + int ver = LJ_ARCH_VERSION; /* Compile-time ARM CPU detection. */ +#if LJ_TARGET_LINUX + if (ver < 70) { /* Runtime ARM CPU detection. */ + struct utsname ut; + uname(&ut); + if (strncmp(ut.machine, "armv", 4) == 0) { + if (ut.machine[4] >= '7') + ver = 70; + else if (ut.machine[4] == '6') + ver = 60; + } + } +#endif + flags |= ver >= 70 ? JIT_F_ARMV7 : + ver >= 61 ? JIT_F_ARMV6T2_ : + ver >= 60 ? JIT_F_ARMV6_ : 0; + flags |= LJ_ARCH_HASFPU == 0 ? 0 : ver >= 70 ? JIT_F_VFPV3 : JIT_F_VFPV2; +#endif +#elif LJ_TARGET_ARM64 + /* No optional CPU features to detect (for now). */ +#elif LJ_TARGET_PPC +#if LJ_HASJIT +#if LJ_ARCH_SQRT + flags |= JIT_F_SQRT; +#endif +#if LJ_ARCH_ROUND + flags |= JIT_F_ROUND; +#endif +#endif +#elif LJ_TARGET_MIPS +#if LJ_HASJIT + /* Compile-time MIPS CPU detection. */ +#if LJ_ARCH_VERSION >= 20 + flags |= JIT_F_MIPS32R2; +#endif + /* Runtime MIPS CPU detection. */ +#if defined(__GNUC__) + if (!(flags & JIT_F_MIPS32R2)) { + int x; + /* On MIPS32R1 rotr is treated as srl. rotr r2,r2,1 -> srl r2,r2,1. */ + __asm__("li $2, 1\n\t.long 0x00221042\n\tmove %0, $2" : "=r"(x) : : "$2"); + if (x) flags |= JIT_F_MIPS32R2; /* Either 0x80000000 (R2) or 0 (R1). */ + } +#endif +#endif +#else +#error "Missing CPU detection for this architecture" +#endif + UNUSED(L); + return flags; +} + +/* Initialize JIT compiler. */ +static void jit_init(lua_State *L) +{ + uint32_t flags = jit_cpudetect(L); +#if LJ_HASJIT + jit_State *J = L2J(L); + J->flags = flags | JIT_F_ON | JIT_F_OPT_DEFAULT; + memcpy(J->param, jit_param_default, sizeof(J->param)); + lj_dispatch_update(G(L)); +#else + UNUSED(flags); +#endif +} + +LUALIB_API int luaopen_jit(lua_State *L) +{ + jit_init(L); + lua_pushliteral(L, LJ_OS_NAME); + lua_pushliteral(L, LJ_ARCH_NAME); + lua_pushinteger(L, LUAJIT_VERSION_NUM); + lua_pushliteral(L, LUAJIT_VERSION); + LJ_LIB_REG(L, LUA_JITLIBNAME, jit); +#if LJ_HASPROFILE + lj_lib_prereg(L, LUA_JITLIBNAME ".profile", luaopen_jit_profile, + tabref(L->env)); +#endif +#ifndef LUAJIT_DISABLE_JITUTIL + lj_lib_prereg(L, LUA_JITLIBNAME ".util", luaopen_jit_util, tabref(L->env)); +#endif +#if LJ_HASJIT + LJ_LIB_REG(L, "jit.opt", jit_opt); +#endif + L->top -= 2; + return 1; +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lib_math.c b/Build/source/libs/luajit/LuaJIT-src/src/lib_math.c new file mode 100644 index 00000000000..78838fcd6c0 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lib_math.c @@ -0,0 +1,230 @@ +/* +** Math library. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include + +#define lib_math_c +#define LUA_LIB + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" + +#include "lj_obj.h" +#include "lj_lib.h" +#include "lj_vm.h" + +/* ------------------------------------------------------------------------ */ + +#define LJLIB_MODULE_math + +LJLIB_ASM(math_abs) LJLIB_REC(.) +{ + lj_lib_checknumber(L, 1); + return FFH_RETRY; +} +LJLIB_ASM_(math_floor) LJLIB_REC(math_round IRFPM_FLOOR) +LJLIB_ASM_(math_ceil) LJLIB_REC(math_round IRFPM_CEIL) + +LJLIB_ASM(math_sqrt) LJLIB_REC(math_unary IRFPM_SQRT) +{ + lj_lib_checknum(L, 1); + return FFH_RETRY; +} +LJLIB_ASM_(math_log10) LJLIB_REC(math_unary IRFPM_LOG10) +LJLIB_ASM_(math_exp) LJLIB_REC(math_unary IRFPM_EXP) +LJLIB_ASM_(math_sin) LJLIB_REC(math_unary IRFPM_SIN) +LJLIB_ASM_(math_cos) LJLIB_REC(math_unary IRFPM_COS) +LJLIB_ASM_(math_tan) LJLIB_REC(math_unary IRFPM_TAN) +LJLIB_ASM_(math_asin) LJLIB_REC(math_atrig FF_math_asin) +LJLIB_ASM_(math_acos) LJLIB_REC(math_atrig FF_math_acos) +LJLIB_ASM_(math_atan) LJLIB_REC(math_atrig FF_math_atan) +LJLIB_ASM_(math_sinh) LJLIB_REC(math_htrig IRCALL_sinh) +LJLIB_ASM_(math_cosh) LJLIB_REC(math_htrig IRCALL_cosh) +LJLIB_ASM_(math_tanh) LJLIB_REC(math_htrig IRCALL_tanh) +LJLIB_ASM_(math_frexp) +LJLIB_ASM_(math_modf) LJLIB_REC(.) + +LJLIB_ASM(math_log) LJLIB_REC(math_log) +{ + double x = lj_lib_checknum(L, 1); + if (L->base+1 < L->top) { + double y = lj_lib_checknum(L, 2); +#ifdef LUAJIT_NO_LOG2 + x = log(x); y = 1.0 / log(y); +#else + x = lj_vm_log2(x); y = 1.0 / lj_vm_log2(y); +#endif + setnumV(L->base-1-LJ_FR2, x*y); /* Do NOT join the expression to x / y. */ + return FFH_RES(1); + } + return FFH_RETRY; +} + +LJLIB_LUA(math_deg) /* function(x) return x * 57.29577951308232 end */ +LJLIB_LUA(math_rad) /* function(x) return x * 0.017453292519943295 end */ + +LJLIB_ASM(math_atan2) LJLIB_REC(.) +{ + lj_lib_checknum(L, 1); + lj_lib_checknum(L, 2); + return FFH_RETRY; +} +LJLIB_ASM_(math_pow) LJLIB_REC(.) +LJLIB_ASM_(math_fmod) + +LJLIB_ASM(math_ldexp) LJLIB_REC(.) +{ + lj_lib_checknum(L, 1); +#if LJ_DUALNUM && !LJ_TARGET_X86ORX64 + lj_lib_checkint(L, 2); +#else + lj_lib_checknum(L, 2); +#endif + return FFH_RETRY; +} + +LJLIB_ASM(math_min) LJLIB_REC(math_minmax IR_MIN) +{ + int i = 0; + do { lj_lib_checknumber(L, ++i); } while (L->base+i < L->top); + return FFH_RETRY; +} +LJLIB_ASM_(math_max) LJLIB_REC(math_minmax IR_MAX) + +LJLIB_PUSH(3.14159265358979323846) LJLIB_SET(pi) +LJLIB_PUSH(1e310) LJLIB_SET(huge) + +/* ------------------------------------------------------------------------ */ + +/* This implements a Tausworthe PRNG with period 2^223. Based on: +** Tables of maximally-equidistributed combined LFSR generators, +** Pierre L'Ecuyer, 1991, table 3, 1st entry. +** Full-period ME-CF generator with L=64, J=4, k=223, N1=49. +*/ + +/* PRNG state. */ +struct RandomState { + uint64_t gen[4]; /* State of the 4 LFSR generators. */ + int valid; /* State is valid. */ +}; + +/* Union needed for bit-pattern conversion between uint64_t and double. */ +typedef union { uint64_t u64; double d; } U64double; + +/* Update generator i and compute a running xor of all states. */ +#define TW223_GEN(i, k, q, s) \ + z = rs->gen[i]; \ + z = (((z<> (k-s)) ^ ((z&((uint64_t)(int64_t)-1 << (64-k)))<gen[i] = z; + +/* PRNG step function. Returns a double in the range 1.0 <= d < 2.0. */ +LJ_NOINLINE uint64_t LJ_FASTCALL lj_math_random_step(RandomState *rs) +{ + uint64_t z, r = 0; + TW223_GEN(0, 63, 31, 18) + TW223_GEN(1, 58, 19, 28) + TW223_GEN(2, 55, 24, 7) + TW223_GEN(3, 47, 21, 8) + return (r & U64x(000fffff,ffffffff)) | U64x(3ff00000,00000000); +} + +/* PRNG initialization function. */ +static void random_init(RandomState *rs, double d) +{ + uint32_t r = 0x11090601; /* 64-k[i] as four 8 bit constants. */ + int i; + for (i = 0; i < 4; i++) { + U64double u; + uint32_t m = 1u << (r&255); + r >>= 8; + u.d = d = d * 3.14159265358979323846 + 2.7182818284590452354; + if (u.u64 < m) u.u64 += m; /* Ensure k[i] MSB of gen[i] are non-zero. */ + rs->gen[i] = u.u64; + } + rs->valid = 1; + for (i = 0; i < 10; i++) + lj_math_random_step(rs); +} + +/* PRNG extract function. */ +LJLIB_PUSH(top-2) /* Upvalue holds userdata with RandomState. */ +LJLIB_CF(math_random) LJLIB_REC(.) +{ + int n = (int)(L->top - L->base); + RandomState *rs = (RandomState *)(uddata(udataV(lj_lib_upvalue(L, 1)))); + U64double u; + double d; + if (LJ_UNLIKELY(!rs->valid)) random_init(rs, 0.0); + u.u64 = lj_math_random_step(rs); + d = u.d - 1.0; + if (n > 0) { +#if LJ_DUALNUM + int isint = 1; + double r1; + lj_lib_checknumber(L, 1); + if (tvisint(L->base)) { + r1 = (lua_Number)intV(L->base); + } else { + isint = 0; + r1 = numV(L->base); + } +#else + double r1 = lj_lib_checknum(L, 1); +#endif + if (n == 1) { + d = lj_vm_floor(d*r1) + 1.0; /* d is an int in range [1, r1] */ + } else { +#if LJ_DUALNUM + double r2; + lj_lib_checknumber(L, 2); + if (tvisint(L->base+1)) { + r2 = (lua_Number)intV(L->base+1); + } else { + isint = 0; + r2 = numV(L->base+1); + } +#else + double r2 = lj_lib_checknum(L, 2); +#endif + d = lj_vm_floor(d*(r2-r1+1.0)) + r1; /* d is an int in range [r1, r2] */ + } +#if LJ_DUALNUM + if (isint) { + setintV(L->top-1, lj_num2int(d)); + return 1; + } +#endif + } /* else: d is a double in range [0, 1] */ + setnumV(L->top++, d); + return 1; +} + +/* PRNG seed function. */ +LJLIB_PUSH(top-2) /* Upvalue holds userdata with RandomState. */ +LJLIB_CF(math_randomseed) +{ + RandomState *rs = (RandomState *)(uddata(udataV(lj_lib_upvalue(L, 1)))); + random_init(rs, lj_lib_checknum(L, 1)); + return 0; +} + +/* ------------------------------------------------------------------------ */ + +#include "lj_libdef.h" + +LUALIB_API int luaopen_math(lua_State *L) +{ + RandomState *rs; + rs = (RandomState *)lua_newuserdata(L, sizeof(RandomState)); + rs->valid = 0; /* Use lazy initialization to save some time on startup. */ + LJ_LIB_REG(L, LUA_MATHLIBNAME, math); +#if defined(LUA_COMPAT_MOD) && !LJ_52 + lua_getfield(L, -1, "fmod"); + lua_setfield(L, -2, "mod"); +#endif + return 1; +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lib_os.c b/Build/source/libs/luajit/LuaJIT-src/src/lib_os.c new file mode 100644 index 00000000000..7b5873a518a --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lib_os.c @@ -0,0 +1,292 @@ +/* +** OS library. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#include +#include + +#define lib_os_c +#define LUA_LIB + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_buf.h" +#include "lj_str.h" +#include "lj_lib.h" + +#if LJ_TARGET_POSIX +#include +#else +#include +#endif + +#if !LJ_TARGET_PSVITA +#include +#endif + +/* ------------------------------------------------------------------------ */ + +#define LJLIB_MODULE_os + +LJLIB_CF(os_execute) +{ +#if LJ_TARGET_CONSOLE +#if LJ_52 + errno = ENOSYS; + return luaL_fileresult(L, 0, NULL); +#else + lua_pushinteger(L, -1); + return 1; +#endif +#else + const char *cmd = luaL_optstring(L, 1, NULL); + int stat = system(cmd); +#if LJ_52 + if (cmd) + return luaL_execresult(L, stat); + setboolV(L->top++, 1); +#else + setintV(L->top++, stat); +#endif + return 1; +#endif +} + +LJLIB_CF(os_remove) +{ + const char *filename = luaL_checkstring(L, 1); + return luaL_fileresult(L, remove(filename) == 0, filename); +} + +LJLIB_CF(os_rename) +{ + const char *fromname = luaL_checkstring(L, 1); + const char *toname = luaL_checkstring(L, 2); + return luaL_fileresult(L, rename(fromname, toname) == 0, fromname); +} + +LJLIB_CF(os_tmpname) +{ +#if LJ_TARGET_PS3 || LJ_TARGET_PS4 || LJ_TARGET_PSVITA + lj_err_caller(L, LJ_ERR_OSUNIQF); + return 0; +#else +#if LJ_TARGET_POSIX + char buf[15+1]; + int fp; + strcpy(buf, "/tmp/lua_XXXXXX"); + fp = mkstemp(buf); + if (fp != -1) + close(fp); + else + lj_err_caller(L, LJ_ERR_OSUNIQF); +#else + char buf[L_tmpnam]; + if (tmpnam(buf) == NULL) + lj_err_caller(L, LJ_ERR_OSUNIQF); +#endif + lua_pushstring(L, buf); + return 1; +#endif +} + +LJLIB_CF(os_getenv) +{ +#if LJ_TARGET_CONSOLE + lua_pushnil(L); +#else + lua_pushstring(L, getenv(luaL_checkstring(L, 1))); /* if NULL push nil */ +#endif + return 1; +} + +LJLIB_CF(os_exit) +{ + int status; + if (L->base < L->top && tvisbool(L->base)) + status = boolV(L->base) ? EXIT_SUCCESS : EXIT_FAILURE; + else + status = lj_lib_optint(L, 1, EXIT_SUCCESS); + if (L->base+1 < L->top && tvistruecond(L->base+1)) + lua_close(L); + exit(status); + return 0; /* Unreachable. */ +} + +LJLIB_CF(os_clock) +{ + setnumV(L->top++, ((lua_Number)clock())*(1.0/(lua_Number)CLOCKS_PER_SEC)); + return 1; +} + +/* ------------------------------------------------------------------------ */ + +static void setfield(lua_State *L, const char *key, int value) +{ + lua_pushinteger(L, value); + lua_setfield(L, -2, key); +} + +static void setboolfield(lua_State *L, const char *key, int value) +{ + if (value < 0) /* undefined? */ + return; /* does not set field */ + lua_pushboolean(L, value); + lua_setfield(L, -2, key); +} + +static int getboolfield(lua_State *L, const char *key) +{ + int res; + lua_getfield(L, -1, key); + res = lua_isnil(L, -1) ? -1 : lua_toboolean(L, -1); + lua_pop(L, 1); + return res; +} + +static int getfield(lua_State *L, const char *key, int d) +{ + int res; + lua_getfield(L, -1, key); + if (lua_isnumber(L, -1)) { + res = (int)lua_tointeger(L, -1); + } else { + if (d < 0) + lj_err_callerv(L, LJ_ERR_OSDATEF, key); + res = d; + } + lua_pop(L, 1); + return res; +} + +LJLIB_CF(os_date) +{ + const char *s = luaL_optstring(L, 1, "%c"); + time_t t = luaL_opt(L, (time_t)luaL_checknumber, 2, time(NULL)); + struct tm *stm; +#if LJ_TARGET_POSIX + struct tm rtm; +#endif + if (*s == '!') { /* UTC? */ + s++; /* Skip '!' */ +#if LJ_TARGET_POSIX + stm = gmtime_r(&t, &rtm); +#else + stm = gmtime(&t); +#endif + } else { +#if LJ_TARGET_POSIX + stm = localtime_r(&t, &rtm); +#else + stm = localtime(&t); +#endif + } + if (stm == NULL) { /* Invalid date? */ + setnilV(L->top++); + } else if (strcmp(s, "*t") == 0) { + lua_createtable(L, 0, 9); /* 9 = number of fields */ + setfield(L, "sec", stm->tm_sec); + setfield(L, "min", stm->tm_min); + setfield(L, "hour", stm->tm_hour); + setfield(L, "day", stm->tm_mday); + setfield(L, "month", stm->tm_mon+1); + setfield(L, "year", stm->tm_year+1900); + setfield(L, "wday", stm->tm_wday+1); + setfield(L, "yday", stm->tm_yday+1); + setboolfield(L, "isdst", stm->tm_isdst); + } else if (*s) { + SBuf *sb = &G(L)->tmpbuf; + MSize sz = 0; + const char *q; + for (q = s; *q; q++) + sz += (*q == '%') ? 30 : 1; /* Overflow doesn't matter. */ + setsbufL(sb, L); + for (;;) { + char *buf = lj_buf_need(sb, sz); + size_t len = strftime(buf, sbufsz(sb), s, stm); + if (len) { + setstrV(L, L->top++, lj_str_new(L, buf, len)); + lj_gc_check(L); + break; + } + sz += (sz|1); + } + } else { + setstrV(L, L->top++, &G(L)->strempty); + } + return 1; +} + +LJLIB_CF(os_time) +{ + time_t t; + if (lua_isnoneornil(L, 1)) { /* called without args? */ + t = time(NULL); /* get current time */ + } else { + struct tm ts; + luaL_checktype(L, 1, LUA_TTABLE); + lua_settop(L, 1); /* make sure table is at the top */ + ts.tm_sec = getfield(L, "sec", 0); + ts.tm_min = getfield(L, "min", 0); + ts.tm_hour = getfield(L, "hour", 12); + ts.tm_mday = getfield(L, "day", -1); + ts.tm_mon = getfield(L, "month", -1) - 1; + ts.tm_year = getfield(L, "year", -1) - 1900; + ts.tm_isdst = getboolfield(L, "isdst"); + t = mktime(&ts); + } + if (t == (time_t)(-1)) + lua_pushnil(L); + else + lua_pushnumber(L, (lua_Number)t); + return 1; +} + +LJLIB_CF(os_difftime) +{ + lua_pushnumber(L, difftime((time_t)(luaL_checknumber(L, 1)), + (time_t)(luaL_optnumber(L, 2, (lua_Number)0)))); + return 1; +} + +/* ------------------------------------------------------------------------ */ + +LJLIB_CF(os_setlocale) +{ +#if LJ_TARGET_PSVITA + lua_pushliteral(L, "C"); +#else + GCstr *s = lj_lib_optstr(L, 1); + const char *str = s ? strdata(s) : NULL; + int opt = lj_lib_checkopt(L, 2, 6, + "\5ctype\7numeric\4time\7collate\10monetary\1\377\3all"); + if (opt == 0) opt = LC_CTYPE; + else if (opt == 1) opt = LC_NUMERIC; + else if (opt == 2) opt = LC_TIME; + else if (opt == 3) opt = LC_COLLATE; + else if (opt == 4) opt = LC_MONETARY; + else if (opt == 6) opt = LC_ALL; + lua_pushstring(L, setlocale(opt, str)); +#endif + return 1; +} + +/* ------------------------------------------------------------------------ */ + +#include "lj_libdef.h" + +LUALIB_API int luaopen_os(lua_State *L) +{ + LJ_LIB_REG(L, LUA_OSLIBNAME, os); + return 1; +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lib_package.c b/Build/source/libs/luajit/LuaJIT-src/src/lib_package.c new file mode 100644 index 00000000000..f8d69bfea82 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lib_package.c @@ -0,0 +1,648 @@ +/* +** Package library. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2012 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lib_package_c +#define LUA_LIB + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" + +#include "lj_obj.h" +#include "lj_err.h" +#include "lj_lib.h" + +/* ------------------------------------------------------------------------ */ + +/* Error codes for ll_loadfunc. */ +#define PACKAGE_ERR_LIB 1 +#define PACKAGE_ERR_FUNC 2 +#define PACKAGE_ERR_LOAD 3 + +/* Redefined in platform specific part. */ +#define PACKAGE_LIB_FAIL "open" +#define setprogdir(L) ((void)0) + +/* Symbol name prefixes. */ +#define SYMPREFIX_CF "luaopen_%s" +#define SYMPREFIX_BC "luaJIT_BC_%s" + +#if LJ_TARGET_DLOPEN + +#include + +static void ll_unloadlib(void *lib) +{ + dlclose(lib); +} + +static void *ll_load(lua_State *L, const char *path, int gl) +{ + void *lib = dlopen(path, RTLD_NOW | (gl ? RTLD_GLOBAL : RTLD_LOCAL)); + if (lib == NULL) lua_pushstring(L, dlerror()); + return lib; +} + +static lua_CFunction ll_sym(lua_State *L, void *lib, const char *sym) +{ + lua_CFunction f = (lua_CFunction)dlsym(lib, sym); + if (f == NULL) lua_pushstring(L, dlerror()); + return f; +} + +static const char *ll_bcsym(void *lib, const char *sym) +{ +#if defined(RTLD_DEFAULT) + if (lib == NULL) lib = RTLD_DEFAULT; +#elif LJ_TARGET_OSX || LJ_TARGET_BSD + if (lib == NULL) lib = (void *)(intptr_t)-2; +#endif + return (const char *)dlsym(lib, sym); +} + +#elif LJ_TARGET_WINDOWS + +#define WIN32_LEAN_AND_MEAN +#include + +#ifndef GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS +#define GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS 4 +#define GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT 2 +BOOL WINAPI GetModuleHandleExA(DWORD, LPCSTR, HMODULE*); +#endif + +#undef setprogdir + +static void setprogdir(lua_State *L) +{ + char buff[MAX_PATH + 1]; + char *lb; + DWORD nsize = sizeof(buff); + DWORD n = GetModuleFileNameA(NULL, buff, nsize); + if (n == 0 || n == nsize || (lb = strrchr(buff, '\\')) == NULL) { + luaL_error(L, "unable to get ModuleFileName"); + } else { + *lb = '\0'; + luaL_gsub(L, lua_tostring(L, -1), LUA_EXECDIR, buff); + lua_remove(L, -2); /* remove original string */ + } +} + +static void pusherror(lua_State *L) +{ + DWORD error = GetLastError(); +#if LJ_TARGET_XBOXONE + wchar_t wbuffer[128]; + char buffer[128*2]; + if (FormatMessageW(FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_FROM_SYSTEM, + NULL, error, 0, wbuffer, sizeof(wbuffer)/sizeof(wchar_t), NULL) && + WideCharToMultiByte(CP_ACP, 0, wbuffer, 128, buffer, 128*2, NULL, NULL)) +#else + char buffer[128]; + if (FormatMessageA(FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_FROM_SYSTEM, + NULL, error, 0, buffer, sizeof(buffer), NULL)) +#endif + lua_pushstring(L, buffer); + else + lua_pushfstring(L, "system error %d\n", error); +} + +static void ll_unloadlib(void *lib) +{ + FreeLibrary((HINSTANCE)lib); +} + +static void *ll_load(lua_State *L, const char *path, int gl) +{ + HINSTANCE lib = LoadLibraryExA(path, NULL, 0); + if (lib == NULL) pusherror(L); + UNUSED(gl); + return lib; +} + +static lua_CFunction ll_sym(lua_State *L, void *lib, const char *sym) +{ + lua_CFunction f = (lua_CFunction)GetProcAddress((HINSTANCE)lib, sym); + if (f == NULL) pusherror(L); + return f; +} + +static const char *ll_bcsym(void *lib, const char *sym) +{ + if (lib) { + return (const char *)GetProcAddress((HINSTANCE)lib, sym); + } else { + HINSTANCE h = GetModuleHandleA(NULL); + const char *p = (const char *)GetProcAddress(h, sym); + if (p == NULL && GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS|GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, + (const char *)ll_bcsym, &h)) + p = (const char *)GetProcAddress(h, sym); + return p; + } +} + +#else + +#undef PACKAGE_LIB_FAIL +#define PACKAGE_LIB_FAIL "absent" + +#define DLMSG "dynamic libraries not enabled; no support for target OS" + +static void ll_unloadlib(void *lib) +{ + UNUSED(lib); +} + +static void *ll_load(lua_State *L, const char *path, int gl) +{ + UNUSED(path); UNUSED(gl); + lua_pushliteral(L, DLMSG); + return NULL; +} + +static lua_CFunction ll_sym(lua_State *L, void *lib, const char *sym) +{ + UNUSED(lib); UNUSED(sym); + lua_pushliteral(L, DLMSG); + return NULL; +} + +static const char *ll_bcsym(void *lib, const char *sym) +{ + UNUSED(lib); UNUSED(sym); + return NULL; +} + +#endif + +/* ------------------------------------------------------------------------ */ + +static void **ll_register(lua_State *L, const char *path) +{ + void **plib; + lua_pushfstring(L, "LOADLIB: %s", path); + lua_gettable(L, LUA_REGISTRYINDEX); /* check library in registry? */ + if (!lua_isnil(L, -1)) { /* is there an entry? */ + plib = (void **)lua_touserdata(L, -1); + } else { /* no entry yet; create one */ + lua_pop(L, 1); + plib = (void **)lua_newuserdata(L, sizeof(void *)); + *plib = NULL; + luaL_getmetatable(L, "_LOADLIB"); + lua_setmetatable(L, -2); + lua_pushfstring(L, "LOADLIB: %s", path); + lua_pushvalue(L, -2); + lua_settable(L, LUA_REGISTRYINDEX); + } + return plib; +} + +static const char *mksymname(lua_State *L, const char *modname, + const char *prefix) +{ + const char *funcname; + const char *mark = strchr(modname, *LUA_IGMARK); + if (mark) modname = mark + 1; + funcname = luaL_gsub(L, modname, ".", "_"); + funcname = lua_pushfstring(L, prefix, funcname); + lua_remove(L, -2); /* remove 'gsub' result */ + return funcname; +} + +static int ll_loadfunc(lua_State *L, const char *path, const char *name, int r) +{ + void **reg = ll_register(L, path); + if (*reg == NULL) *reg = ll_load(L, path, (*name == '*')); + if (*reg == NULL) { + return PACKAGE_ERR_LIB; /* Unable to load library. */ + } else if (*name == '*') { /* Only load library into global namespace. */ + lua_pushboolean(L, 1); + return 0; + } else { + const char *sym = r ? name : mksymname(L, name, SYMPREFIX_CF); + lua_CFunction f = ll_sym(L, *reg, sym); + if (f) { + lua_pushcfunction(L, f); + return 0; + } + if (!r) { + const char *bcdata = ll_bcsym(*reg, mksymname(L, name, SYMPREFIX_BC)); + lua_pop(L, 1); + if (bcdata) { + if (luaL_loadbuffer(L, bcdata, LJ_MAX_BUF, name) != 0) + return PACKAGE_ERR_LOAD; + return 0; + } + } + return PACKAGE_ERR_FUNC; /* Unable to find function. */ + } +} + +static int lj_cf_package_loadlib(lua_State *L) +{ + const char *path = luaL_checkstring(L, 1); + const char *init = luaL_checkstring(L, 2); + int st = ll_loadfunc(L, path, init, 1); + if (st == 0) { /* no errors? */ + return 1; /* return the loaded function */ + } else { /* error; error message is on stack top */ + lua_pushnil(L); + lua_insert(L, -2); + lua_pushstring(L, (st == PACKAGE_ERR_LIB) ? PACKAGE_LIB_FAIL : "init"); + return 3; /* return nil, error message, and where */ + } +} + +static int lj_cf_package_unloadlib(lua_State *L) +{ + void **lib = (void **)luaL_checkudata(L, 1, "_LOADLIB"); + if (*lib) ll_unloadlib(*lib); + *lib = NULL; /* mark library as closed */ + return 0; +} + +/* ------------------------------------------------------------------------ */ + +static int readable(const char *filename) +{ + FILE *f = fopen(filename, "r"); /* try to open file */ + if (f == NULL) return 0; /* open failed */ + fclose(f); + return 1; +} + +static const char *pushnexttemplate(lua_State *L, const char *path) +{ + const char *l; + while (*path == *LUA_PATHSEP) path++; /* skip separators */ + if (*path == '\0') return NULL; /* no more templates */ + l = strchr(path, *LUA_PATHSEP); /* find next separator */ + if (l == NULL) l = path + strlen(path); + lua_pushlstring(L, path, (size_t)(l - path)); /* template */ + return l; +} + +static const char *searchpath (lua_State *L, const char *name, + const char *path, const char *sep, + const char *dirsep) +{ + luaL_Buffer msg; /* to build error message */ + luaL_buffinit(L, &msg); + if (*sep != '\0') /* non-empty separator? */ + name = luaL_gsub(L, name, sep, dirsep); /* replace it by 'dirsep' */ + while ((path = pushnexttemplate(L, path)) != NULL) { + const char *filename = luaL_gsub(L, lua_tostring(L, -1), + LUA_PATH_MARK, name); + lua_remove(L, -2); /* remove path template */ + if (readable(filename)) /* does file exist and is readable? */ + return filename; /* return that file name */ + lua_pushfstring(L, "\n\tno file " LUA_QS, filename); + lua_remove(L, -2); /* remove file name */ + luaL_addvalue(&msg); /* concatenate error msg. entry */ + } + luaL_pushresult(&msg); /* create error message */ + return NULL; /* not found */ +} + +static int lj_cf_package_searchpath(lua_State *L) +{ + const char *f = searchpath(L, luaL_checkstring(L, 1), + luaL_checkstring(L, 2), + luaL_optstring(L, 3, "."), + luaL_optstring(L, 4, LUA_DIRSEP)); + if (f != NULL) { + return 1; + } else { /* error message is on top of the stack */ + lua_pushnil(L); + lua_insert(L, -2); + return 2; /* return nil + error message */ + } +} + +static const char *findfile(lua_State *L, const char *name, + const char *pname) +{ + const char *path; + lua_getfield(L, LUA_ENVIRONINDEX, pname); + path = lua_tostring(L, -1); + if (path == NULL) + luaL_error(L, LUA_QL("package.%s") " must be a string", pname); + return searchpath(L, name, path, ".", LUA_DIRSEP); +} + +static void loaderror(lua_State *L, const char *filename) +{ + luaL_error(L, "error loading module " LUA_QS " from file " LUA_QS ":\n\t%s", + lua_tostring(L, 1), filename, lua_tostring(L, -1)); +} + +static int lj_cf_package_loader_lua(lua_State *L) +{ + const char *filename; + const char *name = luaL_checkstring(L, 1); + filename = findfile(L, name, "path"); + if (filename == NULL) return 1; /* library not found in this path */ + if (luaL_loadfile(L, filename) != 0) + loaderror(L, filename); + return 1; /* library loaded successfully */ +} + +static int lj_cf_package_loader_c(lua_State *L) +{ + const char *name = luaL_checkstring(L, 1); + const char *filename = findfile(L, name, "cpath"); + if (filename == NULL) return 1; /* library not found in this path */ + if (ll_loadfunc(L, filename, name, 0) != 0) + loaderror(L, filename); + return 1; /* library loaded successfully */ +} + +#define LUA_POF "luaopen_" +#define LUA_OFSEP "_" +#define POF LUA_POF + +static const char *mkfuncname (lua_State *L, const char *modname) { + const char *funcname; + const char *mark = strchr(modname, *LUA_IGMARK); + if (mark) modname = mark + 1; + funcname = luaL_gsub(L, modname, ".", LUA_OFSEP); + funcname = lua_pushfstring(L, POF"%s", funcname); + lua_remove(L, -2); /* remove 'gsub' result */ + return funcname; +} + + +int loader_C_luatex (lua_State *L, const char *name, const char *filename) { + const char *funcname; + funcname = mkfuncname(L, name); + if (ll_loadfunc(L, filename, funcname,0) != 0) + loaderror(L, filename); + return 1; /* library loaded successfully */ +} + +static int lj_cf_package_loader_croot(lua_State *L) +{ + const char *filename; + const char *name = luaL_checkstring(L, 1); + const char *p = strchr(name, '.'); + int st; + if (p == NULL) return 0; /* is root */ + lua_pushlstring(L, name, (size_t)(p - name)); + filename = findfile(L, lua_tostring(L, -1), "cpath"); + if (filename == NULL) return 1; /* root not found */ + if ((st = ll_loadfunc(L, filename, name, 0)) != 0) { + if (st != PACKAGE_ERR_FUNC) loaderror(L, filename); /* real error */ + lua_pushfstring(L, "\n\tno module " LUA_QS " in file " LUA_QS, + name, filename); + return 1; /* function not found */ + } + return 1; +} + +int loader_Call_luatex (lua_State *L, const char *name, const char *filename) { + const char *funcname; + int stat; + if (filename == NULL) return 1; /* root not found */ + funcname = mkfuncname(L, name); + if ((stat = ll_loadfunc(L, filename, funcname,0)) != 0) { + if (stat != PACKAGE_ERR_FUNC) loaderror(L, filename); /* real error */ + lua_pushfstring(L, "\n\tno module " LUA_QS " in file " LUA_QS, + name, filename); + return 1; /* function not found */ + } + return 1; /* library loaded successfully */ +} + + +static int lj_cf_package_loader_preload(lua_State *L) +{ + const char *name = luaL_checkstring(L, 1); + lua_getfield(L, LUA_ENVIRONINDEX, "preload"); + if (!lua_istable(L, -1)) + luaL_error(L, LUA_QL("package.preload") " must be a table"); + lua_getfield(L, -1, name); + if (lua_isnil(L, -1)) { /* Not found? */ + const char *bcname = mksymname(L, name, SYMPREFIX_BC); + const char *bcdata = ll_bcsym(NULL, bcname); + if (bcdata == NULL || luaL_loadbuffer(L, bcdata, LJ_MAX_BUF, name) != 0) + lua_pushfstring(L, "\n\tno field package.preload['%s']", name); + } + return 1; +} + +/* ------------------------------------------------------------------------ */ + +static const int sentinel_ = 0; +#define sentinel ((void *)&sentinel_) + +static int lj_cf_package_require(lua_State *L) +{ + const char *name = luaL_checkstring(L, 1); + int i; + lua_settop(L, 1); /* _LOADED table will be at index 2 */ + lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); + lua_getfield(L, 2, name); + if (lua_toboolean(L, -1)) { /* is it there? */ + if (lua_touserdata(L, -1) == sentinel) /* check loops */ + luaL_error(L, "loop or previous error loading module " LUA_QS, name); + return 1; /* package is already loaded */ + } + /* else must load it; iterate over available loaders */ + lua_getfield(L, LUA_ENVIRONINDEX, "loaders"); + if (!lua_istable(L, -1)) + luaL_error(L, LUA_QL("package.loaders") " must be a table"); + lua_pushliteral(L, ""); /* error message accumulator */ + for (i = 1; ; i++) { + lua_rawgeti(L, -2, i); /* get a loader */ + if (lua_isnil(L, -1)) + luaL_error(L, "module " LUA_QS " not found:%s", + name, lua_tostring(L, -2)); + lua_pushstring(L, name); + lua_call(L, 1, 1); /* call it */ + if (lua_isfunction(L, -1)) /* did it find module? */ + break; /* module loaded successfully */ + else if (lua_isstring(L, -1)) /* loader returned error message? */ + lua_concat(L, 2); /* accumulate it */ + else + lua_pop(L, 1); + } + lua_pushlightuserdata(L, sentinel); + lua_setfield(L, 2, name); /* _LOADED[name] = sentinel */ + lua_pushstring(L, name); /* pass name as argument to module */ + lua_call(L, 1, 1); /* run loaded module */ + if (!lua_isnil(L, -1)) /* non-nil return? */ + lua_setfield(L, 2, name); /* _LOADED[name] = returned value */ + lua_getfield(L, 2, name); + if (lua_touserdata(L, -1) == sentinel) { /* module did not set a value? */ + lua_pushboolean(L, 1); /* use true as result */ + lua_pushvalue(L, -1); /* extra copy to be returned */ + lua_setfield(L, 2, name); /* _LOADED[name] = true */ + } + lj_lib_checkfpu(L); + return 1; +} + +/* ------------------------------------------------------------------------ */ + +static void setfenv(lua_State *L) +{ + lua_Debug ar; + if (lua_getstack(L, 1, &ar) == 0 || + lua_getinfo(L, "f", &ar) == 0 || /* get calling function */ + lua_iscfunction(L, -1)) + luaL_error(L, LUA_QL("module") " not called from a Lua function"); + lua_pushvalue(L, -2); + lua_setfenv(L, -2); + lua_pop(L, 1); +} + +static void dooptions(lua_State *L, int n) +{ + int i; + for (i = 2; i <= n; i++) { + lua_pushvalue(L, i); /* get option (a function) */ + lua_pushvalue(L, -2); /* module */ + lua_call(L, 1, 0); + } +} + +static void modinit(lua_State *L, const char *modname) +{ + const char *dot; + lua_pushvalue(L, -1); + lua_setfield(L, -2, "_M"); /* module._M = module */ + lua_pushstring(L, modname); + lua_setfield(L, -2, "_NAME"); + dot = strrchr(modname, '.'); /* look for last dot in module name */ + if (dot == NULL) dot = modname; else dot++; + /* set _PACKAGE as package name (full module name minus last part) */ + lua_pushlstring(L, modname, (size_t)(dot - modname)); + lua_setfield(L, -2, "_PACKAGE"); +} + +static int lj_cf_package_module(lua_State *L) +{ + const char *modname = luaL_checkstring(L, 1); + int loaded = lua_gettop(L) + 1; /* index of _LOADED table */ + lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); + lua_getfield(L, loaded, modname); /* get _LOADED[modname] */ + if (!lua_istable(L, -1)) { /* not found? */ + lua_pop(L, 1); /* remove previous result */ + /* try global variable (and create one if it does not exist) */ + if (luaL_findtable(L, LUA_GLOBALSINDEX, modname, 1) != NULL) + lj_err_callerv(L, LJ_ERR_BADMODN, modname); + lua_pushvalue(L, -1); + lua_setfield(L, loaded, modname); /* _LOADED[modname] = new table */ + } + /* check whether table already has a _NAME field */ + lua_getfield(L, -1, "_NAME"); + if (!lua_isnil(L, -1)) { /* is table an initialized module? */ + lua_pop(L, 1); + } else { /* no; initialize it */ + lua_pop(L, 1); + modinit(L, modname); + } + lua_pushvalue(L, -1); + setfenv(L); + dooptions(L, loaded - 1); + return 0; +} + +static int lj_cf_package_seeall(lua_State *L) +{ + luaL_checktype(L, 1, LUA_TTABLE); + if (!lua_getmetatable(L, 1)) { + lua_createtable(L, 0, 1); /* create new metatable */ + lua_pushvalue(L, -1); + lua_setmetatable(L, 1); + } + lua_pushvalue(L, LUA_GLOBALSINDEX); + lua_setfield(L, -2, "__index"); /* mt.__index = _G */ + return 0; +} + +/* ------------------------------------------------------------------------ */ + +#define AUXMARK "\1" + +static void setpath(lua_State *L, const char *fieldname, const char *envname, + const char *def, int noenv) +{ +#if LJ_TARGET_CONSOLE + const char *path = NULL; + UNUSED(envname); +#else + const char *path = getenv(envname); +#endif + if (path == NULL || noenv) { + lua_pushstring(L, def); + } else { + path = luaL_gsub(L, path, LUA_PATHSEP LUA_PATHSEP, + LUA_PATHSEP AUXMARK LUA_PATHSEP); + luaL_gsub(L, path, AUXMARK, def); + lua_remove(L, -2); + } + setprogdir(L); + lua_setfield(L, -2, fieldname); +} + +static const luaL_Reg package_lib[] = { + { "loadlib", lj_cf_package_loadlib }, + { "searchpath", lj_cf_package_searchpath }, + { "seeall", lj_cf_package_seeall }, + { NULL, NULL } +}; + +static const luaL_Reg package_global[] = { + { "module", lj_cf_package_module }, + { "require", lj_cf_package_require }, + { NULL, NULL } +}; + +static const lua_CFunction package_loaders[] = +{ + lj_cf_package_loader_preload, + lj_cf_package_loader_lua, + lj_cf_package_loader_c, + lj_cf_package_loader_croot, + NULL +}; + +LUALIB_API int luaopen_package(lua_State *L) +{ + int i; + int noenv; + luaL_newmetatable(L, "_LOADLIB"); + lj_lib_pushcf(L, lj_cf_package_unloadlib, 1); + lua_setfield(L, -2, "__gc"); + luaL_register(L, LUA_LOADLIBNAME, package_lib); + lua_pushvalue(L, -1); + lua_replace(L, LUA_ENVIRONINDEX); + lua_createtable(L, sizeof(package_loaders)/sizeof(package_loaders[0])-1, 0); + for (i = 0; package_loaders[i] != NULL; i++) { + lj_lib_pushcf(L, package_loaders[i], 1); + lua_rawseti(L, -2, i+1); + } + lua_setfield(L, -2, "loaders"); + lua_getfield(L, LUA_REGISTRYINDEX, "LUA_NOENV"); + noenv = lua_toboolean(L, -1); + lua_pop(L, 1); + setpath(L, "path", LUA_PATH, LUA_PATH_DEFAULT, noenv); + setpath(L, "cpath", LUA_CPATH, LUA_CPATH_DEFAULT, noenv); + lua_pushliteral(L, LUA_PATH_CONFIG); + lua_setfield(L, -2, "config"); + luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16); + lua_setfield(L, -2, "loaded"); + luaL_findtable(L, LUA_REGISTRYINDEX, "_PRELOAD", 4); + lua_setfield(L, -2, "preload"); + lua_pushvalue(L, LUA_GLOBALSINDEX); + luaL_register(L, NULL, package_global); + lua_pop(L, 1); + return 1; +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lib_string.c b/Build/source/libs/luajit/LuaJIT-src/src/lib_string.c new file mode 100644 index 00000000000..a6d9986ac63 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lib_string.c @@ -0,0 +1,752 @@ +/* +** String library. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lib_string_c +#define LUA_LIB + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_buf.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_meta.h" +#include "lj_state.h" +#include "lj_ff.h" +#include "lj_bcdump.h" +#include "lj_char.h" +#include "lj_strfmt.h" +#include "lj_lib.h" + +/* ------------------------------------------------------------------------ */ + +#define LJLIB_MODULE_string + +LJLIB_LUA(string_len) /* + function(s) + CHECK_str(s) + return #s + end +*/ + +LJLIB_ASM(string_byte) LJLIB_REC(string_range 0) +{ + GCstr *s = lj_lib_checkstr(L, 1); + int32_t len = (int32_t)s->len; + int32_t start = lj_lib_optint(L, 2, 1); + int32_t stop = lj_lib_optint(L, 3, start); + int32_t n, i; + const unsigned char *p; + if (stop < 0) stop += len+1; + if (start < 0) start += len+1; + if (start <= 0) start = 1; + if (stop > len) stop = len; + if (start > stop) return FFH_RES(0); /* Empty interval: return no results. */ + start--; + n = stop - start; + if ((uint32_t)n > LUAI_MAXCSTACK) + lj_err_caller(L, LJ_ERR_STRSLC); + lj_state_checkstack(L, (MSize)n); + p = (const unsigned char *)strdata(s) + start; + for (i = 0; i < n; i++) + setintV(L->base + i-1-LJ_FR2, p[i]); + return FFH_RES(n); +} + +LJLIB_ASM(string_char) LJLIB_REC(.) +{ + int i, nargs = (int)(L->top - L->base); + char *buf = lj_buf_tmp(L, (MSize)nargs); + for (i = 1; i <= nargs; i++) { + int32_t k = lj_lib_checkint(L, i); + if (!checku8(k)) + lj_err_arg(L, i, LJ_ERR_BADVAL); + buf[i-1] = (char)k; + } + setstrV(L, L->base-1-LJ_FR2, lj_str_new(L, buf, (size_t)nargs)); + return FFH_RES(1); +} + +LJLIB_ASM(string_sub) LJLIB_REC(string_range 1) +{ + lj_lib_checkstr(L, 1); + lj_lib_checkint(L, 2); + setintV(L->base+2, lj_lib_optint(L, 3, -1)); + return FFH_RETRY; +} + +LJLIB_CF(string_rep) LJLIB_REC(.) +{ + GCstr *s = lj_lib_checkstr(L, 1); + int32_t rep = lj_lib_checkint(L, 2); + GCstr *sep = lj_lib_optstr(L, 3); + SBuf *sb = lj_buf_tmp_(L); + if (sep && rep > 1) { + GCstr *s2 = lj_buf_cat2str(L, sep, s); + lj_buf_reset(sb); + lj_buf_putstr(sb, s); + s = s2; + rep--; + } + sb = lj_buf_putstr_rep(sb, s, rep); + setstrV(L, L->top-1, lj_buf_str(L, sb)); + lj_gc_check(L); + return 1; +} + +LJLIB_ASM(string_reverse) LJLIB_REC(string_op IRCALL_lj_buf_putstr_reverse) +{ + lj_lib_checkstr(L, 1); + return FFH_RETRY; +} +LJLIB_ASM_(string_lower) LJLIB_REC(string_op IRCALL_lj_buf_putstr_lower) +LJLIB_ASM_(string_upper) LJLIB_REC(string_op IRCALL_lj_buf_putstr_upper) + +/* ------------------------------------------------------------------------ */ + +static int writer_buf(lua_State *L, const void *p, size_t size, void *sb) +{ + lj_buf_putmem((SBuf *)sb, p, (MSize)size); + UNUSED(L); + return 0; +} + +LJLIB_CF(string_dump) +{ + GCfunc *fn = lj_lib_checkfunc(L, 1); + int strip = L->base+1 < L->top && tvistruecond(L->base+1); + SBuf *sb = lj_buf_tmp_(L); /* Assumes lj_bcwrite() doesn't use tmpbuf. */ + L->top = L->base+1; + if (!isluafunc(fn) || lj_bcwrite(L, funcproto(fn), writer_buf, sb, strip)) + lj_err_caller(L, LJ_ERR_STRDUMP); + setstrV(L, L->top-1, lj_buf_str(L, sb)); + lj_gc_check(L); + return 1; +} + +/* ------------------------------------------------------------------------ */ + +/* macro to `unsign' a character */ +#define uchar(c) ((unsigned char)(c)) + +#define CAP_UNFINISHED (-1) +#define CAP_POSITION (-2) + +typedef struct MatchState { + const char *src_init; /* init of source string */ + const char *src_end; /* end (`\0') of source string */ + lua_State *L; + int level; /* total number of captures (finished or unfinished) */ + int depth; + struct { + const char *init; + ptrdiff_t len; + } capture[LUA_MAXCAPTURES]; +} MatchState; + +#define L_ESC '%' + +static int check_capture(MatchState *ms, int l) +{ + l -= '1'; + if (l < 0 || l >= ms->level || ms->capture[l].len == CAP_UNFINISHED) + lj_err_caller(ms->L, LJ_ERR_STRCAPI); + return l; +} + +static int capture_to_close(MatchState *ms) +{ + int level = ms->level; + for (level--; level>=0; level--) + if (ms->capture[level].len == CAP_UNFINISHED) return level; + lj_err_caller(ms->L, LJ_ERR_STRPATC); + return 0; /* unreachable */ +} + +static const char *classend(MatchState *ms, const char *p) +{ + switch (*p++) { + case L_ESC: + if (*p == '\0') + lj_err_caller(ms->L, LJ_ERR_STRPATE); + return p+1; + case '[': + if (*p == '^') p++; + do { /* look for a `]' */ + if (*p == '\0') + lj_err_caller(ms->L, LJ_ERR_STRPATM); + if (*(p++) == L_ESC && *p != '\0') + p++; /* skip escapes (e.g. `%]') */ + } while (*p != ']'); + return p+1; + default: + return p; + } +} + +static const unsigned char match_class_map[32] = { + 0,LJ_CHAR_ALPHA,0,LJ_CHAR_CNTRL,LJ_CHAR_DIGIT,0,0,LJ_CHAR_GRAPH,0,0,0,0, + LJ_CHAR_LOWER,0,0,0,LJ_CHAR_PUNCT,0,0,LJ_CHAR_SPACE,0, + LJ_CHAR_UPPER,0,LJ_CHAR_ALNUM,LJ_CHAR_XDIGIT,0,0,0,0,0,0,0 +}; + +static int match_class(int c, int cl) +{ + if ((cl & 0xc0) == 0x40) { + int t = match_class_map[(cl&0x1f)]; + if (t) { + t = lj_char_isa(c, t); + return (cl & 0x20) ? t : !t; + } + if (cl == 'z') return c == 0; + if (cl == 'Z') return c != 0; + } + return (cl == c); +} + +static int matchbracketclass(int c, const char *p, const char *ec) +{ + int sig = 1; + if (*(p+1) == '^') { + sig = 0; + p++; /* skip the `^' */ + } + while (++p < ec) { + if (*p == L_ESC) { + p++; + if (match_class(c, uchar(*p))) + return sig; + } + else if ((*(p+1) == '-') && (p+2 < ec)) { + p+=2; + if (uchar(*(p-2)) <= c && c <= uchar(*p)) + return sig; + } + else if (uchar(*p) == c) return sig; + } + return !sig; +} + +static int singlematch(int c, const char *p, const char *ep) +{ + switch (*p) { + case '.': return 1; /* matches any char */ + case L_ESC: return match_class(c, uchar(*(p+1))); + case '[': return matchbracketclass(c, p, ep-1); + default: return (uchar(*p) == c); + } +} + +static const char *match(MatchState *ms, const char *s, const char *p); + +static const char *matchbalance(MatchState *ms, const char *s, const char *p) +{ + if (*p == 0 || *(p+1) == 0) + lj_err_caller(ms->L, LJ_ERR_STRPATU); + if (*s != *p) { + return NULL; + } else { + int b = *p; + int e = *(p+1); + int cont = 1; + while (++s < ms->src_end) { + if (*s == e) { + if (--cont == 0) return s+1; + } else if (*s == b) { + cont++; + } + } + } + return NULL; /* string ends out of balance */ +} + +static const char *max_expand(MatchState *ms, const char *s, + const char *p, const char *ep) +{ + ptrdiff_t i = 0; /* counts maximum expand for item */ + while ((s+i)src_end && singlematch(uchar(*(s+i)), p, ep)) + i++; + /* keeps trying to match with the maximum repetitions */ + while (i>=0) { + const char *res = match(ms, (s+i), ep+1); + if (res) return res; + i--; /* else didn't match; reduce 1 repetition to try again */ + } + return NULL; +} + +static const char *min_expand(MatchState *ms, const char *s, + const char *p, const char *ep) +{ + for (;;) { + const char *res = match(ms, s, ep+1); + if (res != NULL) + return res; + else if (ssrc_end && singlematch(uchar(*s), p, ep)) + s++; /* try with one more repetition */ + else + return NULL; + } +} + +static const char *start_capture(MatchState *ms, const char *s, + const char *p, int what) +{ + const char *res; + int level = ms->level; + if (level >= LUA_MAXCAPTURES) lj_err_caller(ms->L, LJ_ERR_STRCAPN); + ms->capture[level].init = s; + ms->capture[level].len = what; + ms->level = level+1; + if ((res=match(ms, s, p)) == NULL) /* match failed? */ + ms->level--; /* undo capture */ + return res; +} + +static const char *end_capture(MatchState *ms, const char *s, + const char *p) +{ + int l = capture_to_close(ms); + const char *res; + ms->capture[l].len = s - ms->capture[l].init; /* close capture */ + if ((res = match(ms, s, p)) == NULL) /* match failed? */ + ms->capture[l].len = CAP_UNFINISHED; /* undo capture */ + return res; +} + +static const char *match_capture(MatchState *ms, const char *s, int l) +{ + size_t len; + l = check_capture(ms, l); + len = (size_t)ms->capture[l].len; + if ((size_t)(ms->src_end-s) >= len && + memcmp(ms->capture[l].init, s, len) == 0) + return s+len; + else + return NULL; +} + +static const char *match(MatchState *ms, const char *s, const char *p) +{ + if (++ms->depth > LJ_MAX_XLEVEL) + lj_err_caller(ms->L, LJ_ERR_STRPATX); + init: /* using goto's to optimize tail recursion */ + switch (*p) { + case '(': /* start capture */ + if (*(p+1) == ')') /* position capture? */ + s = start_capture(ms, s, p+2, CAP_POSITION); + else + s = start_capture(ms, s, p+1, CAP_UNFINISHED); + break; + case ')': /* end capture */ + s = end_capture(ms, s, p+1); + break; + case L_ESC: + switch (*(p+1)) { + case 'b': /* balanced string? */ + s = matchbalance(ms, s, p+2); + if (s == NULL) break; + p+=4; + goto init; /* else s = match(ms, s, p+4); */ + case 'f': { /* frontier? */ + const char *ep; char previous; + p += 2; + if (*p != '[') + lj_err_caller(ms->L, LJ_ERR_STRPATB); + ep = classend(ms, p); /* points to what is next */ + previous = (s == ms->src_init) ? '\0' : *(s-1); + if (matchbracketclass(uchar(previous), p, ep-1) || + !matchbracketclass(uchar(*s), p, ep-1)) { s = NULL; break; } + p=ep; + goto init; /* else s = match(ms, s, ep); */ + } + default: + if (lj_char_isdigit(uchar(*(p+1)))) { /* capture results (%0-%9)? */ + s = match_capture(ms, s, uchar(*(p+1))); + if (s == NULL) break; + p+=2; + goto init; /* else s = match(ms, s, p+2) */ + } + goto dflt; /* case default */ + } + break; + case '\0': /* end of pattern */ + break; /* match succeeded */ + case '$': + /* is the `$' the last char in pattern? */ + if (*(p+1) != '\0') goto dflt; + if (s != ms->src_end) s = NULL; /* check end of string */ + break; + default: dflt: { /* it is a pattern item */ + const char *ep = classend(ms, p); /* points to what is next */ + int m = ssrc_end && singlematch(uchar(*s), p, ep); + switch (*ep) { + case '?': { /* optional */ + const char *res; + if (m && ((res=match(ms, s+1, ep+1)) != NULL)) { + s = res; + break; + } + p=ep+1; + goto init; /* else s = match(ms, s, ep+1); */ + } + case '*': /* 0 or more repetitions */ + s = max_expand(ms, s, p, ep); + break; + case '+': /* 1 or more repetitions */ + s = (m ? max_expand(ms, s+1, p, ep) : NULL); + break; + case '-': /* 0 or more repetitions (minimum) */ + s = min_expand(ms, s, p, ep); + break; + default: + if (m) { s++; p=ep; goto init; } /* else s = match(ms, s+1, ep); */ + s = NULL; + break; + } + break; + } + } + ms->depth--; + return s; +} + +static void push_onecapture(MatchState *ms, int i, const char *s, const char *e) +{ + if (i >= ms->level) { + if (i == 0) /* ms->level == 0, too */ + lua_pushlstring(ms->L, s, (size_t)(e - s)); /* add whole match */ + else + lj_err_caller(ms->L, LJ_ERR_STRCAPI); + } else { + ptrdiff_t l = ms->capture[i].len; + if (l == CAP_UNFINISHED) lj_err_caller(ms->L, LJ_ERR_STRCAPU); + if (l == CAP_POSITION) + lua_pushinteger(ms->L, ms->capture[i].init - ms->src_init + 1); + else + lua_pushlstring(ms->L, ms->capture[i].init, (size_t)l); + } +} + +static int push_captures(MatchState *ms, const char *s, const char *e) +{ + int i; + int nlevels = (ms->level == 0 && s) ? 1 : ms->level; + luaL_checkstack(ms->L, nlevels, "too many captures"); + for (i = 0; i < nlevels; i++) + push_onecapture(ms, i, s, e); + return nlevels; /* number of strings pushed */ +} + +static int str_find_aux(lua_State *L, int find) +{ + GCstr *s = lj_lib_checkstr(L, 1); + GCstr *p = lj_lib_checkstr(L, 2); + int32_t start = lj_lib_optint(L, 3, 1); + MSize st; + if (start < 0) start += (int32_t)s->len; else start--; + if (start < 0) start = 0; + st = (MSize)start; + if (st > s->len) { +#if LJ_52 + setnilV(L->top-1); + return 1; +#else + st = s->len; +#endif + } + if (find && ((L->base+3 < L->top && tvistruecond(L->base+3)) || + !lj_str_haspattern(p))) { /* Search for fixed string. */ + const char *q = lj_str_find(strdata(s)+st, strdata(p), s->len-st, p->len); + if (q) { + setintV(L->top-2, (int32_t)(q-strdata(s)) + 1); + setintV(L->top-1, (int32_t)(q-strdata(s)) + (int32_t)p->len); + return 2; + } + } else { /* Search for pattern. */ + MatchState ms; + const char *pstr = strdata(p); + const char *sstr = strdata(s) + st; + int anchor = 0; + if (*pstr == '^') { pstr++; anchor = 1; } + ms.L = L; + ms.src_init = strdata(s); + ms.src_end = strdata(s) + s->len; + do { /* Loop through string and try to match the pattern. */ + const char *q; + ms.level = ms.depth = 0; + q = match(&ms, sstr, pstr); + if (q) { + if (find) { + setintV(L->top++, (int32_t)(sstr-(strdata(s)-1))); + setintV(L->top++, (int32_t)(q-strdata(s))); + return push_captures(&ms, NULL, NULL) + 2; + } else { + return push_captures(&ms, sstr, q); + } + } + } while (sstr++ < ms.src_end && !anchor); + } + setnilV(L->top-1); /* Not found. */ + return 1; +} + +LJLIB_CF(string_find) LJLIB_REC(.) +{ + return str_find_aux(L, 1); +} + +LJLIB_CF(string_match) +{ + return str_find_aux(L, 0); +} + +LJLIB_NOREG LJLIB_CF(string_gmatch_aux) +{ + const char *p = strVdata(lj_lib_upvalue(L, 2)); + GCstr *str = strV(lj_lib_upvalue(L, 1)); + const char *s = strdata(str); + TValue *tvpos = lj_lib_upvalue(L, 3); + const char *src = s + tvpos->u32.lo; + MatchState ms; + ms.L = L; + ms.src_init = s; + ms.src_end = s + str->len; + for (; src <= ms.src_end; src++) { + const char *e; + ms.level = ms.depth = 0; + if ((e = match(&ms, src, p)) != NULL) { + int32_t pos = (int32_t)(e - s); + if (e == src) pos++; /* Ensure progress for empty match. */ + tvpos->u32.lo = (uint32_t)pos; + return push_captures(&ms, src, e); + } + } + return 0; /* not found */ +} + +LJLIB_CF(string_gmatch) +{ + lj_lib_checkstr(L, 1); + lj_lib_checkstr(L, 2); + L->top = L->base+3; + (L->top-1)->u64 = 0; + lj_lib_pushcc(L, lj_cf_string_gmatch_aux, FF_string_gmatch_aux, 3); + return 1; +} + +static void add_s(MatchState *ms, luaL_Buffer *b, const char *s, const char *e) +{ + size_t l, i; + const char *news = lua_tolstring(ms->L, 3, &l); + for (i = 0; i < l; i++) { + if (news[i] != L_ESC) { + luaL_addchar(b, news[i]); + } else { + i++; /* skip ESC */ + if (!lj_char_isdigit(uchar(news[i]))) { + luaL_addchar(b, news[i]); + } else if (news[i] == '0') { + luaL_addlstring(b, s, (size_t)(e - s)); + } else { + push_onecapture(ms, news[i] - '1', s, e); + luaL_addvalue(b); /* add capture to accumulated result */ + } + } + } +} + +static void add_value(MatchState *ms, luaL_Buffer *b, + const char *s, const char *e) +{ + lua_State *L = ms->L; + switch (lua_type(L, 3)) { + case LUA_TNUMBER: + case LUA_TSTRING: { + add_s(ms, b, s, e); + return; + } + case LUA_TFUNCTION: { + int n; + lua_pushvalue(L, 3); + n = push_captures(ms, s, e); + lua_call(L, n, 1); + break; + } + case LUA_TTABLE: { + push_onecapture(ms, 0, s, e); + lua_gettable(L, 3); + break; + } + } + if (!lua_toboolean(L, -1)) { /* nil or false? */ + lua_pop(L, 1); + lua_pushlstring(L, s, (size_t)(e - s)); /* keep original text */ + } else if (!lua_isstring(L, -1)) { + lj_err_callerv(L, LJ_ERR_STRGSRV, luaL_typename(L, -1)); + } + luaL_addvalue(b); /* add result to accumulator */ +} + +LJLIB_CF(string_gsub) +{ + size_t srcl; + const char *src = luaL_checklstring(L, 1, &srcl); + const char *p = luaL_checkstring(L, 2); + int tr = lua_type(L, 3); + int max_s = luaL_optint(L, 4, (int)(srcl+1)); + int anchor = (*p == '^') ? (p++, 1) : 0; + int n = 0; + MatchState ms; + luaL_Buffer b; + if (!(tr == LUA_TNUMBER || tr == LUA_TSTRING || + tr == LUA_TFUNCTION || tr == LUA_TTABLE)) + lj_err_arg(L, 3, LJ_ERR_NOSFT); + luaL_buffinit(L, &b); + ms.L = L; + ms.src_init = src; + ms.src_end = src+srcl; + while (n < max_s) { + const char *e; + ms.level = ms.depth = 0; + e = match(&ms, src, p); + if (e) { + n++; + add_value(&ms, &b, src, e); + } + if (e && e>src) /* non empty match? */ + src = e; /* skip it */ + else if (src < ms.src_end) + luaL_addchar(&b, *src++); + else + break; + if (anchor) + break; + } + luaL_addlstring(&b, src, (size_t)(ms.src_end-src)); + luaL_pushresult(&b); + lua_pushinteger(L, n); /* number of substitutions */ + return 2; +} + +/* ------------------------------------------------------------------------ */ + +/* Emulate tostring() inline. */ +static GCstr *string_fmt_tostring(lua_State *L, int arg, int retry) +{ + TValue *o = L->base+arg-1; + cTValue *mo; + lua_assert(o < L->top); /* Caller already checks for existence. */ + if (LJ_LIKELY(tvisstr(o))) + return strV(o); + if (retry != 2 && !tvisnil(mo = lj_meta_lookup(L, o, MM_tostring))) { + copyTV(L, L->top++, mo); + copyTV(L, L->top++, o); + lua_call(L, 1, 1); + copyTV(L, L->base+arg-1, --L->top); + return NULL; /* Buffer may be overwritten, retry. */ + } + return lj_strfmt_obj(L, o); +} + +LJLIB_CF(string_format) LJLIB_REC(.) +{ + int arg, top = (int)(L->top - L->base); + GCstr *fmt; + SBuf *sb; + FormatState fs; + SFormat sf; + int retry = 0; +again: + arg = 1; + sb = lj_buf_tmp_(L); + fmt = lj_lib_checkstr(L, arg); + lj_strfmt_init(&fs, strdata(fmt), fmt->len); + while ((sf = lj_strfmt_parse(&fs)) != STRFMT_EOF) { + if (sf == STRFMT_LIT) { + lj_buf_putmem(sb, fs.str, fs.len); + } else if (sf == STRFMT_ERR) { + lj_err_callerv(L, LJ_ERR_STRFMT, strdata(lj_str_new(L, fs.str, fs.len))); + } else { + if (++arg > top) + luaL_argerror(L, arg, lj_obj_typename[0]); + switch (STRFMT_TYPE(sf)) { + case STRFMT_INT: + if (tvisint(L->base+arg-1)) { + int32_t k = intV(L->base+arg-1); + if (sf == STRFMT_INT) + lj_strfmt_putint(sb, k); /* Shortcut for plain %d. */ + else + lj_strfmt_putfxint(sb, sf, k); + } else { + lj_strfmt_putfnum_int(sb, sf, lj_lib_checknum(L, arg)); + } + break; + case STRFMT_UINT: + if (tvisint(L->base+arg-1)) + lj_strfmt_putfxint(sb, sf, intV(L->base+arg-1)); + else + lj_strfmt_putfnum_uint(sb, sf, lj_lib_checknum(L, arg)); + break; + case STRFMT_NUM: + lj_strfmt_putfnum(sb, sf, lj_lib_checknum(L, arg)); + break; + case STRFMT_STR: { + GCstr *str = string_fmt_tostring(L, arg, retry); + if (str == NULL) + retry = 1; + else if ((sf & STRFMT_T_QUOTED)) + lj_strfmt_putquoted(sb, str); /* No formatting. */ + else + lj_strfmt_putfstr(sb, sf, str); + break; + } + case STRFMT_CHAR: + lj_strfmt_putfchar(sb, sf, lj_lib_checkint(L, arg)); + break; + case STRFMT_PTR: /* No formatting. */ + lj_strfmt_putptr(sb, lj_obj_ptr(L->base+arg-1)); + break; + default: + lua_assert(0); + break; + } + } + } + if (retry++ == 1) goto again; + setstrV(L, L->top-1, lj_buf_str(L, sb)); + lj_gc_check(L); + return 1; +} + +/* ------------------------------------------------------------------------ */ + +#include "lj_libdef.h" + +LUALIB_API int luaopen_string(lua_State *L) +{ + GCtab *mt; + global_State *g; + LJ_LIB_REG(L, LUA_STRLIBNAME, string); +#if defined(LUA_COMPAT_GFIND) && !LJ_52 + lua_getfield(L, -1, "gmatch"); + lua_setfield(L, -2, "gfind"); +#endif + mt = lj_tab_new(L, 0, 1); + /* NOBARRIER: basemt is a GC root. */ + g = G(L); + setgcref(basemt_it(g, LJ_TSTR), obj2gco(mt)); + settabV(L, lj_tab_setstr(L, mt, mmname_str(g, MM_index)), tabV(L->top-1)); + mt->nomm = (uint8_t)(~(1u<array); + Node *node; + lua_Number m = 0; + ptrdiff_t i; + for (i = (ptrdiff_t)t->asize - 1; i >= 0; i--) + if (!tvisnil(&array[i])) { + m = (lua_Number)(int32_t)i; + break; + } + node = noderef(t->node); + for (i = (ptrdiff_t)t->hmask; i >= 0; i--) + if (!tvisnil(&node[i].val) && tvisnumber(&node[i].key)) { + lua_Number n = numberVnum(&node[i].key); + if (n > m) m = n; + } + setnumV(L->top-1, m); + return 1; +} + +LJLIB_CF(table_insert) LJLIB_REC(.) +{ + GCtab *t = lj_lib_checktab(L, 1); + int32_t n, i = (int32_t)lj_tab_len(t) + 1; + int nargs = (int)((char *)L->top - (char *)L->base); + if (nargs != 2*sizeof(TValue)) { + if (nargs != 3*sizeof(TValue)) + lj_err_caller(L, LJ_ERR_TABINS); + /* NOBARRIER: This just moves existing elements around. */ + for (n = lj_lib_checkint(L, 2); i > n; i--) { + /* The set may invalidate the get pointer, so need to do it first! */ + TValue *dst = lj_tab_setint(L, t, i); + cTValue *src = lj_tab_getint(t, i-1); + if (src) { + copyTV(L, dst, src); + } else { + setnilV(dst); + } + } + i = n; + } + { + TValue *dst = lj_tab_setint(L, t, i); + copyTV(L, dst, L->top-1); /* Set new value. */ + lj_gc_barriert(L, t, dst); + } + return 0; +} + +LJLIB_LUA(table_remove) /* + function(t, pos) + CHECK_tab(t) + local len = #t + if pos == nil then + if len ~= 0 then + local old = t[len] + t[len] = nil + return old + end + else + CHECK_int(pos) + if pos >= 1 and pos <= len then + local old = t[pos] + for i=pos+1,len do + t[i-1] = t[i] + end + t[len] = nil + return old + end + end + end +*/ + +LJLIB_CF(table_concat) LJLIB_REC(.) +{ + GCtab *t = lj_lib_checktab(L, 1); + GCstr *sep = lj_lib_optstr(L, 2); + int32_t i = lj_lib_optint(L, 3, 1); + int32_t e = (L->base+3 < L->top && !tvisnil(L->base+3)) ? + lj_lib_checkint(L, 4) : (int32_t)lj_tab_len(t); + SBuf *sb = lj_buf_tmp_(L); + SBuf *sbx = lj_buf_puttab(sb, t, sep, i, e); + if (LJ_UNLIKELY(!sbx)) { /* Error: bad element type. */ + int32_t idx = (int32_t)(intptr_t)sbufP(sb); + cTValue *o = lj_tab_getint(t, idx); + lj_err_callerv(L, LJ_ERR_TABCAT, + lj_obj_itypename[o ? itypemap(o) : ~LJ_TNIL], idx); + } + setstrV(L, L->top-1, lj_buf_str(L, sbx)); + lj_gc_check(L); + return 1; +} + +/* ------------------------------------------------------------------------ */ + +static void set2(lua_State *L, int i, int j) +{ + lua_rawseti(L, 1, i); + lua_rawseti(L, 1, j); +} + +static int sort_comp(lua_State *L, int a, int b) +{ + if (!lua_isnil(L, 2)) { /* function? */ + int res; + lua_pushvalue(L, 2); + lua_pushvalue(L, a-1); /* -1 to compensate function */ + lua_pushvalue(L, b-2); /* -2 to compensate function and `a' */ + lua_call(L, 2, 1); + res = lua_toboolean(L, -1); + lua_pop(L, 1); + return res; + } else { /* a < b? */ + return lua_lessthan(L, a, b); + } +} + +static void auxsort(lua_State *L, int l, int u) +{ + while (l < u) { /* for tail recursion */ + int i, j; + /* sort elements a[l], a[(l+u)/2] and a[u] */ + lua_rawgeti(L, 1, l); + lua_rawgeti(L, 1, u); + if (sort_comp(L, -1, -2)) /* a[u] < a[l]? */ + set2(L, l, u); /* swap a[l] - a[u] */ + else + lua_pop(L, 2); + if (u-l == 1) break; /* only 2 elements */ + i = (l+u)/2; + lua_rawgeti(L, 1, i); + lua_rawgeti(L, 1, l); + if (sort_comp(L, -2, -1)) { /* a[i]= P */ + while (lua_rawgeti(L, 1, ++i), sort_comp(L, -1, -2)) { + if (i>=u) lj_err_caller(L, LJ_ERR_TABSORT); + lua_pop(L, 1); /* remove a[i] */ + } + /* repeat --j until a[j] <= P */ + while (lua_rawgeti(L, 1, --j), sort_comp(L, -3, -1)) { + if (j<=l) lj_err_caller(L, LJ_ERR_TABSORT); + lua_pop(L, 1); /* remove a[j] */ + } + if (jbase+1)) + lj_lib_checkfunc(L, 2); + auxsort(L, 1, n); + return 0; +} + +#if LJ_52 +LJLIB_PUSH("n") +LJLIB_CF(table_pack) +{ + TValue *array, *base = L->base; + MSize i, n = (uint32_t)(L->top - base); + GCtab *t = lj_tab_new(L, n ? n+1 : 0, 1); + /* NOBARRIER: The table is new (marked white). */ + setintV(lj_tab_setstr(L, t, strV(lj_lib_upvalue(L, 1))), (int32_t)n); + for (array = tvref(t->array) + 1, i = 0; i < n; i++) + copyTV(L, &array[i], &base[i]); + settabV(L, base, t); + L->top = base+1; + lj_gc_check(L); + return 1; +} +#endif + +LJLIB_NOREG LJLIB_CF(table_new) LJLIB_REC(.) +{ + int32_t a = lj_lib_checkint(L, 1); + int32_t h = lj_lib_checkint(L, 2); + lua_createtable(L, a, h); + return 1; +} + +LJLIB_NOREG LJLIB_CF(table_clear) LJLIB_REC(.) +{ + lj_tab_clear(lj_lib_checktab(L, 1)); + return 0; +} + +static int luaopen_table_new(lua_State *L) +{ + return lj_lib_postreg(L, lj_cf_table_new, FF_table_new, "new"); +} + +static int luaopen_table_clear(lua_State *L) +{ + return lj_lib_postreg(L, lj_cf_table_clear, FF_table_clear, "clear"); +} + +/* ------------------------------------------------------------------------ */ + +#include "lj_libdef.h" + +LUALIB_API int luaopen_table(lua_State *L) +{ + LJ_LIB_REG(L, LUA_TABLIBNAME, table); +#if LJ_52 + lua_getglobal(L, "unpack"); + lua_setfield(L, -2, "unpack"); +#endif + lj_lib_prereg(L, LUA_TABLIBNAME ".new", luaopen_table_new, tabV(L->top-1)); + lj_lib_prereg(L, LUA_TABLIBNAME ".clear", luaopen_table_clear, tabV(L->top-1)); + return 1; +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj.supp b/Build/source/libs/luajit/LuaJIT-src/src/lj.supp new file mode 100644 index 00000000000..411f2617001 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj.supp @@ -0,0 +1,26 @@ +# Valgrind suppression file for LuaJIT 2.0. +{ + Optimized string compare + Memcheck:Addr4 + fun:lj_str_cmp +} +{ + Optimized string compare + Memcheck:Addr1 + fun:lj_str_cmp +} +{ + Optimized string compare + Memcheck:Addr4 + fun:lj_str_new +} +{ + Optimized string compare + Memcheck:Addr1 + fun:lj_str_new +} +{ + Optimized string compare + Memcheck:Cond + fun:lj_str_new +} diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_alloc.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_alloc.c new file mode 100644 index 00000000000..ddd50cae4f1 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_alloc.c @@ -0,0 +1,1398 @@ +/* +** Bundled memory allocator. +** +** Beware: this is a HEAVILY CUSTOMIZED version of dlmalloc. +** The original bears the following remark: +** +** This is a version (aka dlmalloc) of malloc/free/realloc written by +** Doug Lea and released to the public domain, as explained at +** http://creativecommons.org/licenses/publicdomain. +** +** * Version pre-2.8.4 Wed Mar 29 19:46:29 2006 (dl at gee) +** +** No additional copyright is claimed over the customizations. +** Please do NOT bother the original author about this version here! +** +** If you want to use dlmalloc in another project, you should get +** the original from: ftp://gee.cs.oswego.edu/pub/misc/ +** For thread-safe derivatives, take a look at: +** - ptmalloc: http://www.malloc.de/ +** - nedmalloc: http://www.nedprod.com/programs/portable/nedmalloc/ +*/ + +#define lj_alloc_c +#define LUA_CORE + +/* To get the mremap prototype. Must be defined before any system includes. */ +#if defined(__linux__) && !defined(_GNU_SOURCE) +#define _GNU_SOURCE +#endif + +#include "lj_def.h" +#include "lj_arch.h" +#include "lj_alloc.h" + +#ifndef LUAJIT_USE_SYSMALLOC + +#define MAX_SIZE_T (~(size_t)0) +#define MALLOC_ALIGNMENT ((size_t)8U) + +#define DEFAULT_GRANULARITY ((size_t)128U * (size_t)1024U) +#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) +#define DEFAULT_MMAP_THRESHOLD ((size_t)128U * (size_t)1024U) +#define MAX_RELEASE_CHECK_RATE 255 + +/* ------------------- size_t and alignment properties -------------------- */ + +/* The byte and bit size of a size_t */ +#define SIZE_T_SIZE (sizeof(size_t)) +#define SIZE_T_BITSIZE (sizeof(size_t) << 3) + +/* Some constants coerced to size_t */ +/* Annoying but necessary to avoid errors on some platforms */ +#define SIZE_T_ZERO ((size_t)0) +#define SIZE_T_ONE ((size_t)1) +#define SIZE_T_TWO ((size_t)2) +#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) +#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) +#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) + +/* The bit mask value corresponding to MALLOC_ALIGNMENT */ +#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) + +/* the number of bytes to offset an address to align it */ +#define align_offset(A)\ + ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ + ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) + +/* -------------------------- MMAP support ------------------------------- */ + +#define MFAIL ((void *)(MAX_SIZE_T)) +#define CMFAIL ((char *)(MFAIL)) /* defined for convenience */ + +#define IS_DIRECT_BIT (SIZE_T_ONE) + +#if LJ_TARGET_WINDOWS + +#define WIN32_LEAN_AND_MEAN +#include + +#if LJ_64 && !LJ_GC64 + +/* Undocumented, but hey, that's what we all love so much about Windows. */ +typedef long (*PNTAVM)(HANDLE handle, void **addr, ULONG zbits, + size_t *size, ULONG alloctype, ULONG prot); +static PNTAVM ntavm; + +/* Number of top bits of the lower 32 bits of an address that must be zero. +** Apparently 0 gives us full 64 bit addresses and 1 gives us the lower 2GB. +*/ +#define NTAVM_ZEROBITS 1 + +static void INIT_MMAP(void) +{ + ntavm = (PNTAVM)GetProcAddress(GetModuleHandleA("ntdll.dll"), + "NtAllocateVirtualMemory"); +} + +/* Win64 32 bit MMAP via NtAllocateVirtualMemory. */ +static LJ_AINLINE void *CALL_MMAP(size_t size) +{ + DWORD olderr = GetLastError(); + void *ptr = NULL; + long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size, + MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); + SetLastError(olderr); + return st == 0 ? ptr : MFAIL; +} + +/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ +static LJ_AINLINE void *DIRECT_MMAP(size_t size) +{ + DWORD olderr = GetLastError(); + void *ptr = NULL; + long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size, + MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, PAGE_READWRITE); + SetLastError(olderr); + return st == 0 ? ptr : MFAIL; +} + +#else + +#define INIT_MMAP() ((void)0) + +/* Win32 MMAP via VirtualAlloc */ +static LJ_AINLINE void *CALL_MMAP(size_t size) +{ + DWORD olderr = GetLastError(); + void *ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); + SetLastError(olderr); + return ptr ? ptr : MFAIL; +} + +/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ +static LJ_AINLINE void *DIRECT_MMAP(size_t size) +{ + DWORD olderr = GetLastError(); + void *ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, + PAGE_READWRITE); + SetLastError(olderr); + return ptr ? ptr : MFAIL; +} + +#endif + +/* This function supports releasing coalesed segments */ +static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size) +{ + DWORD olderr = GetLastError(); + MEMORY_BASIC_INFORMATION minfo; + char *cptr = (char *)ptr; + while (size) { + if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) + return -1; + if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || + minfo.State != MEM_COMMIT || minfo.RegionSize > size) + return -1; + if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) + return -1; + cptr += minfo.RegionSize; + size -= minfo.RegionSize; + } + SetLastError(olderr); + return 0; +} + +#else + +#include +#include + +#define MMAP_PROT (PROT_READ|PROT_WRITE) +#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) +#define MAP_ANONYMOUS MAP_ANON +#endif +#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) + +#if LJ_64 && !LJ_GC64 +/* 64 bit mode with 32 bit pointers needs special support for allocating +** memory in the lower 2GB. +*/ + +#if defined(MAP_32BIT) + +#if defined(__sun__) +#define MMAP_REGION_START ((uintptr_t)0x1000) +#else +/* Actually this only gives us max. 1GB in current Linux kernels. */ +#define MMAP_REGION_START ((uintptr_t)0) +#endif + +static LJ_AINLINE void *CALL_MMAP(size_t size) +{ + int olderr = errno; + void *ptr = mmap((void *)MMAP_REGION_START, size, MMAP_PROT, MAP_32BIT|MMAP_FLAGS, -1, 0); + errno = olderr; + return ptr; +} + +#elif LJ_TARGET_OSX || LJ_TARGET_PS4 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__sun__) || defined(__CYGWIN__) + +/* OSX and FreeBSD mmap() use a naive first-fit linear search. +** That's perfect for us. Except that -pagezero_size must be set for OSX, +** otherwise the lower 4GB are blocked. And the 32GB RLIMIT_DATA needs +** to be reduced to 250MB on FreeBSD. +*/ +#if LJ_TARGET_OSX || defined(__DragonFly__) +#define MMAP_REGION_START ((uintptr_t)0x10000) +#elif LJ_TARGET_PS4 +#define MMAP_REGION_START ((uintptr_t)0x4000) +#else +#define MMAP_REGION_START ((uintptr_t)0x10000000) +#endif +#define MMAP_REGION_END ((uintptr_t)0x80000000) + +#if (defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4 +#include +#endif + +static LJ_AINLINE void *CALL_MMAP(size_t size) +{ + int olderr = errno; + /* Hint for next allocation. Doesn't need to be thread-safe. */ + static uintptr_t alloc_hint = MMAP_REGION_START; + int retry = 0; +#if (defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4 + static int rlimit_modified = 0; + if (LJ_UNLIKELY(rlimit_modified == 0)) { + struct rlimit rlim; + rlim.rlim_cur = rlim.rlim_max = MMAP_REGION_START; + setrlimit(RLIMIT_DATA, &rlim); /* Ignore result. May fail below. */ + rlimit_modified = 1; + } +#endif + for (;;) { + void *p = mmap((void *)alloc_hint, size, MMAP_PROT, MMAP_FLAGS, -1, 0); + if ((uintptr_t)p >= MMAP_REGION_START && + (uintptr_t)p + size < MMAP_REGION_END) { + alloc_hint = (uintptr_t)p + size; + errno = olderr; + return p; + } + if (p != CMFAIL) munmap(p, size); +#if defined(__sun__) || defined(__DragonFly__) + alloc_hint += 0x1000000; /* Need near-exhaustive linear scan. */ + if (alloc_hint + size < MMAP_REGION_END) continue; +#endif + if (retry) break; + retry = 1; + alloc_hint = MMAP_REGION_START; + } + errno = olderr; + return CMFAIL; +} + +#else + +#error "NYI: need an equivalent of MAP_32BIT for this 64 bit OS" + +#endif + +#else + +/* 32 bit mode and GC64 mode is easy. */ +static LJ_AINLINE void *CALL_MMAP(size_t size) +{ + int olderr = errno; + void *ptr = mmap(NULL, size, MMAP_PROT, MMAP_FLAGS, -1, 0); + errno = olderr; + return ptr; +} + +#endif + +#define INIT_MMAP() ((void)0) +#define DIRECT_MMAP(s) CALL_MMAP(s) + +static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size) +{ + int olderr = errno; + int ret = munmap(ptr, size); + errno = olderr; + return ret; +} + +#if LJ_TARGET_LINUX +/* Need to define _GNU_SOURCE to get the mremap prototype. */ +static LJ_AINLINE void *CALL_MREMAP_(void *ptr, size_t osz, size_t nsz, + int flags) +{ + int olderr = errno; + ptr = mremap(ptr, osz, nsz, flags); + errno = olderr; + return ptr; +} + +#define CALL_MREMAP(addr, osz, nsz, mv) CALL_MREMAP_((addr), (osz), (nsz), (mv)) +#define CALL_MREMAP_NOMOVE 0 +#define CALL_MREMAP_MAYMOVE 1 +#if LJ_64 && !LJ_GC64 +#define CALL_MREMAP_MV CALL_MREMAP_NOMOVE +#else +#define CALL_MREMAP_MV CALL_MREMAP_MAYMOVE +#endif +#endif + +#endif + +#ifndef CALL_MREMAP +#define CALL_MREMAP(addr, osz, nsz, mv) ((void)osz, MFAIL) +#endif + +/* ----------------------- Chunk representations ------------------------ */ + +struct malloc_chunk { + size_t prev_foot; /* Size of previous chunk (if free). */ + size_t head; /* Size and inuse bits. */ + struct malloc_chunk *fd; /* double links -- used only if free. */ + struct malloc_chunk *bk; +}; + +typedef struct malloc_chunk mchunk; +typedef struct malloc_chunk *mchunkptr; +typedef struct malloc_chunk *sbinptr; /* The type of bins of chunks */ +typedef size_t bindex_t; /* Described below */ +typedef unsigned int binmap_t; /* Described below */ +typedef unsigned int flag_t; /* The type of various bit flag sets */ + +/* ------------------- Chunks sizes and alignments ----------------------- */ + +#define MCHUNK_SIZE (sizeof(mchunk)) + +#define CHUNK_OVERHEAD (SIZE_T_SIZE) + +/* Direct chunks need a second word of overhead ... */ +#define DIRECT_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) +/* ... and additional padding for fake next-chunk at foot */ +#define DIRECT_FOOT_PAD (FOUR_SIZE_T_SIZES) + +/* The smallest size we can malloc is an aligned minimal chunk */ +#define MIN_CHUNK_SIZE\ + ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* conversion from malloc headers to user pointers, and back */ +#define chunk2mem(p) ((void *)((char *)(p) + TWO_SIZE_T_SIZES)) +#define mem2chunk(mem) ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES)) +/* chunk associated with aligned address A */ +#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) + +/* Bounds on request (not chunk) sizes. */ +#define MAX_REQUEST ((~MIN_CHUNK_SIZE+1) << 2) +#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) + +/* pad request bytes into a usable size */ +#define pad_request(req) \ + (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* pad request, checking for minimum (but not maximum) */ +#define request2size(req) \ + (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) + +/* ------------------ Operations on head and foot fields ----------------- */ + +#define PINUSE_BIT (SIZE_T_ONE) +#define CINUSE_BIT (SIZE_T_TWO) +#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) + +/* Head value for fenceposts */ +#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) + +/* extraction of fields from head words */ +#define cinuse(p) ((p)->head & CINUSE_BIT) +#define pinuse(p) ((p)->head & PINUSE_BIT) +#define chunksize(p) ((p)->head & ~(INUSE_BITS)) + +#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) +#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT) + +/* Treat space at ptr +/- offset as a chunk */ +#define chunk_plus_offset(p, s) ((mchunkptr)(((char *)(p)) + (s))) +#define chunk_minus_offset(p, s) ((mchunkptr)(((char *)(p)) - (s))) + +/* Ptr to next or previous physical malloc_chunk. */ +#define next_chunk(p) ((mchunkptr)(((char *)(p)) + ((p)->head & ~INUSE_BITS))) +#define prev_chunk(p) ((mchunkptr)(((char *)(p)) - ((p)->prev_foot) )) + +/* extract next chunk's pinuse bit */ +#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) + +/* Get/set size at footer */ +#define get_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot) +#define set_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s)) + +/* Set size, pinuse bit, and foot */ +#define set_size_and_pinuse_of_free_chunk(p, s)\ + ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) + +/* Set size, pinuse bit, foot, and clear next pinuse */ +#define set_free_with_pinuse(p, s, n)\ + (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) + +#define is_direct(p)\ + (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_DIRECT_BIT)) + +/* Get the internal overhead associated with chunk p */ +#define overhead_for(p)\ + (is_direct(p)? DIRECT_CHUNK_OVERHEAD : CHUNK_OVERHEAD) + +/* ---------------------- Overlaid data structures ----------------------- */ + +struct malloc_tree_chunk { + /* The first four fields must be compatible with malloc_chunk */ + size_t prev_foot; + size_t head; + struct malloc_tree_chunk *fd; + struct malloc_tree_chunk *bk; + + struct malloc_tree_chunk *child[2]; + struct malloc_tree_chunk *parent; + bindex_t index; +}; + +typedef struct malloc_tree_chunk tchunk; +typedef struct malloc_tree_chunk *tchunkptr; +typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */ + +/* A little helper macro for trees */ +#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) + +/* ----------------------------- Segments -------------------------------- */ + +struct malloc_segment { + char *base; /* base address */ + size_t size; /* allocated size */ + struct malloc_segment *next; /* ptr to next segment */ +}; + +typedef struct malloc_segment msegment; +typedef struct malloc_segment *msegmentptr; + +/* ---------------------------- malloc_state ----------------------------- */ + +/* Bin types, widths and sizes */ +#define NSMALLBINS (32U) +#define NTREEBINS (32U) +#define SMALLBIN_SHIFT (3U) +#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) +#define TREEBIN_SHIFT (8U) +#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) +#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) +#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) + +struct malloc_state { + binmap_t smallmap; + binmap_t treemap; + size_t dvsize; + size_t topsize; + mchunkptr dv; + mchunkptr top; + size_t trim_check; + size_t release_checks; + mchunkptr smallbins[(NSMALLBINS+1)*2]; + tbinptr treebins[NTREEBINS]; + msegment seg; +}; + +typedef struct malloc_state *mstate; + +#define is_initialized(M) ((M)->top != 0) + +/* -------------------------- system alloc setup ------------------------- */ + +/* page-align a size */ +#define page_align(S)\ + (((S) + (LJ_PAGESIZE - SIZE_T_ONE)) & ~(LJ_PAGESIZE - SIZE_T_ONE)) + +/* granularity-align a size */ +#define granularity_align(S)\ + (((S) + (DEFAULT_GRANULARITY - SIZE_T_ONE))\ + & ~(DEFAULT_GRANULARITY - SIZE_T_ONE)) + +#if LJ_TARGET_WINDOWS +#define mmap_align(S) granularity_align(S) +#else +#define mmap_align(S) page_align(S) +#endif + +/* True if segment S holds address A */ +#define segment_holds(S, A)\ + ((char *)(A) >= S->base && (char *)(A) < S->base + S->size) + +/* Return segment holding given address */ +static msegmentptr segment_holding(mstate m, char *addr) +{ + msegmentptr sp = &m->seg; + for (;;) { + if (addr >= sp->base && addr < sp->base + sp->size) + return sp; + if ((sp = sp->next) == 0) + return 0; + } +} + +/* Return true if segment contains a segment link */ +static int has_segment_link(mstate m, msegmentptr ss) +{ + msegmentptr sp = &m->seg; + for (;;) { + if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size) + return 1; + if ((sp = sp->next) == 0) + return 0; + } +} + +/* + TOP_FOOT_SIZE is padding at the end of a segment, including space + that may be needed to place segment records and fenceposts when new + noncontiguous segments are added. +*/ +#define TOP_FOOT_SIZE\ + (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) + +/* ---------------------------- Indexing Bins ---------------------------- */ + +#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) +#define small_index(s) ((s) >> SMALLBIN_SHIFT) +#define small_index2size(i) ((i) << SMALLBIN_SHIFT) +#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) + +/* addressing by index. See above about smallbin repositioning */ +#define smallbin_at(M, i) ((sbinptr)((char *)&((M)->smallbins[(i)<<1]))) +#define treebin_at(M,i) (&((M)->treebins[i])) + +/* assign tree index for size S to variable I */ +#define compute_tree_index(S, I)\ +{\ + unsigned int X = (unsigned int)(S >> TREEBIN_SHIFT);\ + if (X == 0) {\ + I = 0;\ + } else if (X > 0xFFFF) {\ + I = NTREEBINS-1;\ + } else {\ + unsigned int K = lj_fls(X);\ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ + }\ +} + +/* Bit representing maximum resolved size in a treebin at i */ +#define bit_for_tree_index(i) \ + (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) + +/* Shift placing maximum resolved bit in a treebin at i as sign bit */ +#define leftshift_for_tree_index(i) \ + ((i == NTREEBINS-1)? 0 : \ + ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) + +/* The size of the smallest chunk held in bin with index i */ +#define minsize_for_tree_index(i) \ + ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ + (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) + +/* ------------------------ Operations on bin maps ----------------------- */ + +/* bit corresponding to given index */ +#define idx2bit(i) ((binmap_t)(1) << (i)) + +/* Mark/Clear bits with given index */ +#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) +#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) +#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) + +#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) +#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) +#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) + +/* mask with all bits to left of least bit of x on */ +#define left_bits(x) ((x<<1) | (~(x<<1)+1)) + +/* Set cinuse bit and pinuse bit of next chunk */ +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set cinuse and pinuse of this chunk and pinuse of next chunk */ +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set size, cinuse and pinuse bit of this chunk */ +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) + +/* ----------------------- Operations on smallbins ----------------------- */ + +/* Link a free chunk into a smallbin */ +#define insert_small_chunk(M, P, S) {\ + bindex_t I = small_index(S);\ + mchunkptr B = smallbin_at(M, I);\ + mchunkptr F = B;\ + if (!smallmap_is_marked(M, I))\ + mark_smallmap(M, I);\ + else\ + F = B->fd;\ + B->fd = P;\ + F->bk = P;\ + P->fd = F;\ + P->bk = B;\ +} + +/* Unlink a chunk from a smallbin */ +#define unlink_small_chunk(M, P, S) {\ + mchunkptr F = P->fd;\ + mchunkptr B = P->bk;\ + bindex_t I = small_index(S);\ + if (F == B) {\ + clear_smallmap(M, I);\ + } else {\ + F->bk = B;\ + B->fd = F;\ + }\ +} + +/* Unlink the first chunk from a smallbin */ +#define unlink_first_small_chunk(M, B, P, I) {\ + mchunkptr F = P->fd;\ + if (B == F) {\ + clear_smallmap(M, I);\ + } else {\ + B->fd = F;\ + F->bk = B;\ + }\ +} + +/* Replace dv node, binning the old one */ +/* Used only when dvsize known to be small */ +#define replace_dv(M, P, S) {\ + size_t DVS = M->dvsize;\ + if (DVS != 0) {\ + mchunkptr DV = M->dv;\ + insert_small_chunk(M, DV, DVS);\ + }\ + M->dvsize = S;\ + M->dv = P;\ +} + +/* ------------------------- Operations on trees ------------------------- */ + +/* Insert chunk into tree */ +#define insert_large_chunk(M, X, S) {\ + tbinptr *H;\ + bindex_t I;\ + compute_tree_index(S, I);\ + H = treebin_at(M, I);\ + X->index = I;\ + X->child[0] = X->child[1] = 0;\ + if (!treemap_is_marked(M, I)) {\ + mark_treemap(M, I);\ + *H = X;\ + X->parent = (tchunkptr)H;\ + X->fd = X->bk = X;\ + } else {\ + tchunkptr T = *H;\ + size_t K = S << leftshift_for_tree_index(I);\ + for (;;) {\ + if (chunksize(T) != S) {\ + tchunkptr *C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ + K <<= 1;\ + if (*C != 0) {\ + T = *C;\ + } else {\ + *C = X;\ + X->parent = T;\ + X->fd = X->bk = X;\ + break;\ + }\ + } else {\ + tchunkptr F = T->fd;\ + T->fd = F->bk = X;\ + X->fd = F;\ + X->bk = T;\ + X->parent = 0;\ + break;\ + }\ + }\ + }\ +} + +#define unlink_large_chunk(M, X) {\ + tchunkptr XP = X->parent;\ + tchunkptr R;\ + if (X->bk != X) {\ + tchunkptr F = X->fd;\ + R = X->bk;\ + F->bk = R;\ + R->fd = F;\ + } else {\ + tchunkptr *RP;\ + if (((R = *(RP = &(X->child[1]))) != 0) ||\ + ((R = *(RP = &(X->child[0]))) != 0)) {\ + tchunkptr *CP;\ + while ((*(CP = &(R->child[1])) != 0) ||\ + (*(CP = &(R->child[0])) != 0)) {\ + R = *(RP = CP);\ + }\ + *RP = 0;\ + }\ + }\ + if (XP != 0) {\ + tbinptr *H = treebin_at(M, X->index);\ + if (X == *H) {\ + if ((*H = R) == 0) \ + clear_treemap(M, X->index);\ + } else {\ + if (XP->child[0] == X) \ + XP->child[0] = R;\ + else \ + XP->child[1] = R;\ + }\ + if (R != 0) {\ + tchunkptr C0, C1;\ + R->parent = XP;\ + if ((C0 = X->child[0]) != 0) {\ + R->child[0] = C0;\ + C0->parent = R;\ + }\ + if ((C1 = X->child[1]) != 0) {\ + R->child[1] = C1;\ + C1->parent = R;\ + }\ + }\ + }\ +} + +/* Relays to large vs small bin operations */ + +#define insert_chunk(M, P, S)\ + if (is_small(S)) { insert_small_chunk(M, P, S)\ + } else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } + +#define unlink_chunk(M, P, S)\ + if (is_small(S)) { unlink_small_chunk(M, P, S)\ + } else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } + +/* ----------------------- Direct-mmapping chunks ----------------------- */ + +static void *direct_alloc(size_t nb) +{ + size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + if (LJ_LIKELY(mmsize > nb)) { /* Check for wrap around 0 */ + char *mm = (char *)(DIRECT_MMAP(mmsize)); + if (mm != CMFAIL) { + size_t offset = align_offset(chunk2mem(mm)); + size_t psize = mmsize - offset - DIRECT_FOOT_PAD; + mchunkptr p = (mchunkptr)(mm + offset); + p->prev_foot = offset | IS_DIRECT_BIT; + p->head = psize|CINUSE_BIT; + chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; + chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; + return chunk2mem(p); + } + } + return NULL; +} + +static mchunkptr direct_resize(mchunkptr oldp, size_t nb) +{ + size_t oldsize = chunksize(oldp); + if (is_small(nb)) /* Can't shrink direct regions below small size */ + return NULL; + /* Keep old chunk if big enough but not too big */ + if (oldsize >= nb + SIZE_T_SIZE && + (oldsize - nb) <= (DEFAULT_GRANULARITY >> 1)) { + return oldp; + } else { + size_t offset = oldp->prev_foot & ~IS_DIRECT_BIT; + size_t oldmmsize = oldsize + offset + DIRECT_FOOT_PAD; + size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + char *cp = (char *)CALL_MREMAP((char *)oldp - offset, + oldmmsize, newmmsize, CALL_MREMAP_MV); + if (cp != CMFAIL) { + mchunkptr newp = (mchunkptr)(cp + offset); + size_t psize = newmmsize - offset - DIRECT_FOOT_PAD; + newp->head = psize|CINUSE_BIT; + chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; + chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; + return newp; + } + } + return NULL; +} + +/* -------------------------- mspace management -------------------------- */ + +/* Initialize top chunk and its size */ +static void init_top(mstate m, mchunkptr p, size_t psize) +{ + /* Ensure alignment */ + size_t offset = align_offset(chunk2mem(p)); + p = (mchunkptr)((char *)p + offset); + psize -= offset; + + m->top = p; + m->topsize = psize; + p->head = psize | PINUSE_BIT; + /* set size of fake trailing chunk holding overhead space only once */ + chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; + m->trim_check = DEFAULT_TRIM_THRESHOLD; /* reset on each update */ +} + +/* Initialize bins for a new mstate that is otherwise zeroed out */ +static void init_bins(mstate m) +{ + /* Establish circular links for smallbins */ + bindex_t i; + for (i = 0; i < NSMALLBINS; i++) { + sbinptr bin = smallbin_at(m,i); + bin->fd = bin->bk = bin; + } +} + +/* Allocate chunk and prepend remainder with chunk in successor base. */ +static void *prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb) +{ + mchunkptr p = align_as_chunk(newbase); + mchunkptr oldfirst = align_as_chunk(oldbase); + size_t psize = (size_t)((char *)oldfirst - (char *)p); + mchunkptr q = chunk_plus_offset(p, nb); + size_t qsize = psize - nb; + set_size_and_pinuse_of_inuse_chunk(m, p, nb); + + /* consolidate remainder with first chunk of old base */ + if (oldfirst == m->top) { + size_t tsize = m->topsize += qsize; + m->top = q; + q->head = tsize | PINUSE_BIT; + } else if (oldfirst == m->dv) { + size_t dsize = m->dvsize += qsize; + m->dv = q; + set_size_and_pinuse_of_free_chunk(q, dsize); + } else { + if (!cinuse(oldfirst)) { + size_t nsize = chunksize(oldfirst); + unlink_chunk(m, oldfirst, nsize); + oldfirst = chunk_plus_offset(oldfirst, nsize); + qsize += nsize; + } + set_free_with_pinuse(q, qsize, oldfirst); + insert_chunk(m, q, qsize); + } + + return chunk2mem(p); +} + +/* Add a segment to hold a new noncontiguous region */ +static void add_segment(mstate m, char *tbase, size_t tsize) +{ + /* Determine locations and sizes of segment, fenceposts, old top */ + char *old_top = (char *)m->top; + msegmentptr oldsp = segment_holding(m, old_top); + char *old_end = oldsp->base + oldsp->size; + size_t ssize = pad_request(sizeof(struct malloc_segment)); + char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + size_t offset = align_offset(chunk2mem(rawsp)); + char *asp = rawsp + offset; + char *csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; + mchunkptr sp = (mchunkptr)csp; + msegmentptr ss = (msegmentptr)(chunk2mem(sp)); + mchunkptr tnext = chunk_plus_offset(sp, ssize); + mchunkptr p = tnext; + + /* reset top to new space */ + init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); + + /* Set up segment record */ + set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); + *ss = m->seg; /* Push current record */ + m->seg.base = tbase; + m->seg.size = tsize; + m->seg.next = ss; + + /* Insert trailing fenceposts */ + for (;;) { + mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); + p->head = FENCEPOST_HEAD; + if ((char *)(&(nextp->head)) < old_end) + p = nextp; + else + break; + } + + /* Insert the rest of old top into a bin as an ordinary free chunk */ + if (csp != old_top) { + mchunkptr q = (mchunkptr)old_top; + size_t psize = (size_t)(csp - old_top); + mchunkptr tn = chunk_plus_offset(q, psize); + set_free_with_pinuse(q, psize, tn); + insert_chunk(m, q, psize); + } +} + +/* -------------------------- System allocation -------------------------- */ + +static void *alloc_sys(mstate m, size_t nb) +{ + char *tbase = CMFAIL; + size_t tsize = 0; + + /* Directly map large chunks */ + if (LJ_UNLIKELY(nb >= DEFAULT_MMAP_THRESHOLD)) { + void *mem = direct_alloc(nb); + if (mem != 0) + return mem; + } + + { + size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE; + size_t rsize = granularity_align(req); + if (LJ_LIKELY(rsize > nb)) { /* Fail if wraps around zero */ + char *mp = (char *)(CALL_MMAP(rsize)); + if (mp != CMFAIL) { + tbase = mp; + tsize = rsize; + } + } + } + + if (tbase != CMFAIL) { + msegmentptr sp = &m->seg; + /* Try to merge with an existing segment */ + while (sp != 0 && tbase != sp->base + sp->size) + sp = sp->next; + if (sp != 0 && segment_holds(sp, m->top)) { /* append */ + sp->size += tsize; + init_top(m, m->top, m->topsize + tsize); + } else { + sp = &m->seg; + while (sp != 0 && sp->base != tbase + tsize) + sp = sp->next; + if (sp != 0) { + char *oldbase = sp->base; + sp->base = tbase; + sp->size += tsize; + return prepend_alloc(m, tbase, oldbase, nb); + } else { + add_segment(m, tbase, tsize); + } + } + + if (nb < m->topsize) { /* Allocate from new or extended top space */ + size_t rsize = m->topsize -= nb; + mchunkptr p = m->top; + mchunkptr r = m->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(m, p, nb); + return chunk2mem(p); + } + } + + return NULL; +} + +/* ----------------------- system deallocation -------------------------- */ + +/* Unmap and unlink any mmapped segments that don't contain used chunks */ +static size_t release_unused_segments(mstate m) +{ + size_t released = 0; + size_t nsegs = 0; + msegmentptr pred = &m->seg; + msegmentptr sp = pred->next; + while (sp != 0) { + char *base = sp->base; + size_t size = sp->size; + msegmentptr next = sp->next; + nsegs++; + { + mchunkptr p = align_as_chunk(base); + size_t psize = chunksize(p); + /* Can unmap if first chunk holds entire segment and not pinned */ + if (!cinuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) { + tchunkptr tp = (tchunkptr)p; + if (p == m->dv) { + m->dv = 0; + m->dvsize = 0; + } else { + unlink_large_chunk(m, tp); + } + if (CALL_MUNMAP(base, size) == 0) { + released += size; + /* unlink obsoleted record */ + sp = pred; + sp->next = next; + } else { /* back out if cannot unmap */ + insert_large_chunk(m, tp, psize); + } + } + } + pred = sp; + sp = next; + } + /* Reset check counter */ + m->release_checks = nsegs > MAX_RELEASE_CHECK_RATE ? + nsegs : MAX_RELEASE_CHECK_RATE; + return released; +} + +static int alloc_trim(mstate m, size_t pad) +{ + size_t released = 0; + if (pad < MAX_REQUEST && is_initialized(m)) { + pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ + + if (m->topsize > pad) { + /* Shrink top space in granularity-size units, keeping at least one */ + size_t unit = DEFAULT_GRANULARITY; + size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - + SIZE_T_ONE) * unit; + msegmentptr sp = segment_holding(m, (char *)m->top); + + if (sp->size >= extra && + !has_segment_link(m, sp)) { /* can't shrink if pinned */ + size_t newsize = sp->size - extra; + /* Prefer mremap, fall back to munmap */ + if ((CALL_MREMAP(sp->base, sp->size, newsize, CALL_MREMAP_NOMOVE) != MFAIL) || + (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { + released = extra; + } + } + + if (released != 0) { + sp->size -= released; + init_top(m, m->top, m->topsize - released); + } + } + + /* Unmap any unused mmapped segments */ + released += release_unused_segments(m); + + /* On failure, disable autotrim to avoid repeated failed future calls */ + if (released == 0 && m->topsize > m->trim_check) + m->trim_check = MAX_SIZE_T; + } + + return (released != 0)? 1 : 0; +} + +/* ---------------------------- malloc support --------------------------- */ + +/* allocate a large request from the best fitting chunk in a treebin */ +static void *tmalloc_large(mstate m, size_t nb) +{ + tchunkptr v = 0; + size_t rsize = ~nb+1; /* Unsigned negation */ + tchunkptr t; + bindex_t idx; + compute_tree_index(nb, idx); + + if ((t = *treebin_at(m, idx)) != 0) { + /* Traverse tree for this bin looking for node with size == nb */ + size_t sizebits = nb << leftshift_for_tree_index(idx); + tchunkptr rst = 0; /* The deepest untaken right subtree */ + for (;;) { + tchunkptr rt; + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + v = t; + if ((rsize = trem) == 0) + break; + } + rt = t->child[1]; + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + if (rt != 0 && rt != t) + rst = rt; + if (t == 0) { + t = rst; /* set t to least subtree holding sizes > nb */ + break; + } + sizebits <<= 1; + } + } + + if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ + binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; + if (leftbits != 0) + t = *treebin_at(m, lj_ffs(leftbits)); + } + + while (t != 0) { /* find smallest of tree or subtree */ + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + t = leftmost_child(t); + } + + /* If dv is a better fit, return NULL so malloc will use it */ + if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { + mchunkptr r = chunk_plus_offset(v, nb); + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) { + set_inuse_and_pinuse(m, v, (rsize + nb)); + } else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + insert_chunk(m, r, rsize); + } + return chunk2mem(v); + } + return NULL; +} + +/* allocate a small request from the best fitting chunk in a treebin */ +static void *tmalloc_small(mstate m, size_t nb) +{ + tchunkptr t, v; + mchunkptr r; + size_t rsize; + bindex_t i = lj_ffs(m->treemap); + + v = t = *treebin_at(m, i); + rsize = chunksize(t) - nb; + + while ((t = leftmost_child(t)) != 0) { + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + } + + r = chunk_plus_offset(v, nb); + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) { + set_inuse_and_pinuse(m, v, (rsize + nb)); + } else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(m, r, rsize); + } + return chunk2mem(v); +} + +/* ----------------------------------------------------------------------- */ + +void *lj_alloc_create(void) +{ + size_t tsize = DEFAULT_GRANULARITY; + char *tbase; + INIT_MMAP(); + tbase = (char *)(CALL_MMAP(tsize)); + if (tbase != CMFAIL) { + size_t msize = pad_request(sizeof(struct malloc_state)); + mchunkptr mn; + mchunkptr msp = align_as_chunk(tbase); + mstate m = (mstate)(chunk2mem(msp)); + memset(m, 0, msize); + msp->head = (msize|PINUSE_BIT|CINUSE_BIT); + m->seg.base = tbase; + m->seg.size = tsize; + m->release_checks = MAX_RELEASE_CHECK_RATE; + init_bins(m); + mn = next_chunk(mem2chunk(m)); + init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE); + return m; + } + return NULL; +} + +void lj_alloc_destroy(void *msp) +{ + mstate ms = (mstate)msp; + msegmentptr sp = &ms->seg; + while (sp != 0) { + char *base = sp->base; + size_t size = sp->size; + sp = sp->next; + CALL_MUNMAP(base, size); + } +} + +static LJ_NOINLINE void *lj_alloc_malloc(void *msp, size_t nsize) +{ + mstate ms = (mstate)msp; + void *mem; + size_t nb; + if (nsize <= MAX_SMALL_REQUEST) { + bindex_t idx; + binmap_t smallbits; + nb = (nsize < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(nsize); + idx = small_index(nb); + smallbits = ms->smallmap >> idx; + + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + mchunkptr b, p; + idx += ~smallbits & 1; /* Uses next bin if idx empty */ + b = smallbin_at(ms, idx); + p = b->fd; + unlink_first_small_chunk(ms, b, p, idx); + set_inuse_and_pinuse(ms, p, small_index2size(idx)); + mem = chunk2mem(p); + return mem; + } else if (nb > ms->dvsize) { + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + mchunkptr b, p, r; + size_t rsize; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + bindex_t i = lj_ffs(leftbits); + b = smallbin_at(ms, i); + p = b->fd; + unlink_first_small_chunk(ms, b, p, i); + rsize = small_index2size(i) - nb; + /* Fit here cannot be remainderless if 4byte sizes */ + if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) { + set_inuse_and_pinuse(ms, p, small_index2size(i)); + } else { + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + r = chunk_plus_offset(p, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(ms, r, rsize); + } + mem = chunk2mem(p); + return mem; + } else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { + return mem; + } + } + } else if (nsize >= MAX_REQUEST) { + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ + } else { + nb = pad_request(nsize); + if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { + return mem; + } + } + + if (nb <= ms->dvsize) { + size_t rsize = ms->dvsize - nb; + mchunkptr p = ms->dv; + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + mchunkptr r = ms->dv = chunk_plus_offset(p, nb); + ms->dvsize = rsize; + set_size_and_pinuse_of_free_chunk(r, rsize); + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + } else { /* exhaust dv */ + size_t dvs = ms->dvsize; + ms->dvsize = 0; + ms->dv = 0; + set_inuse_and_pinuse(ms, p, dvs); + } + mem = chunk2mem(p); + return mem; + } else if (nb < ms->topsize) { /* Split top */ + size_t rsize = ms->topsize -= nb; + mchunkptr p = ms->top; + mchunkptr r = ms->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + mem = chunk2mem(p); + return mem; + } + return alloc_sys(ms, nb); +} + +static LJ_NOINLINE void *lj_alloc_free(void *msp, void *ptr) +{ + if (ptr != 0) { + mchunkptr p = mem2chunk(ptr); + mstate fm = (mstate)msp; + size_t psize = chunksize(p); + mchunkptr next = chunk_plus_offset(p, psize); + if (!pinuse(p)) { + size_t prevsize = p->prev_foot; + if ((prevsize & IS_DIRECT_BIT) != 0) { + prevsize &= ~IS_DIRECT_BIT; + psize += prevsize + DIRECT_FOOT_PAD; + CALL_MUNMAP((char *)p - prevsize, psize); + return NULL; + } else { + mchunkptr prev = chunk_minus_offset(p, prevsize); + psize += prevsize; + p = prev; + /* consolidate backward */ + if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); + } else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; + set_free_with_pinuse(p, psize, next); + return NULL; + } + } + } + if (!cinuse(next)) { /* consolidate forward */ + if (next == fm->top) { + size_t tsize = fm->topsize += psize; + fm->top = p; + p->head = tsize | PINUSE_BIT; + if (p == fm->dv) { + fm->dv = 0; + fm->dvsize = 0; + } + if (tsize > fm->trim_check) + alloc_trim(fm, 0); + return NULL; + } else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; + fm->dv = p; + set_size_and_pinuse_of_free_chunk(p, dsize); + return NULL; + } else { + size_t nsize = chunksize(next); + psize += nsize; + unlink_chunk(fm, next, nsize); + set_size_and_pinuse_of_free_chunk(p, psize); + if (p == fm->dv) { + fm->dvsize = psize; + return NULL; + } + } + } else { + set_free_with_pinuse(p, psize, next); + } + + if (is_small(psize)) { + insert_small_chunk(fm, p, psize); + } else { + tchunkptr tp = (tchunkptr)p; + insert_large_chunk(fm, tp, psize); + if (--fm->release_checks == 0) + release_unused_segments(fm); + } + } + return NULL; +} + +static LJ_NOINLINE void *lj_alloc_realloc(void *msp, void *ptr, size_t nsize) +{ + if (nsize >= MAX_REQUEST) { + return NULL; + } else { + mstate m = (mstate)msp; + mchunkptr oldp = mem2chunk(ptr); + size_t oldsize = chunksize(oldp); + mchunkptr next = chunk_plus_offset(oldp, oldsize); + mchunkptr newp = 0; + size_t nb = request2size(nsize); + + /* Try to either shrink or extend into top. Else malloc-copy-free */ + if (is_direct(oldp)) { + newp = direct_resize(oldp, nb); /* this may return NULL. */ + } else if (oldsize >= nb) { /* already big enough */ + size_t rsize = oldsize - nb; + newp = oldp; + if (rsize >= MIN_CHUNK_SIZE) { + mchunkptr rem = chunk_plus_offset(newp, nb); + set_inuse(m, newp, nb); + set_inuse(m, rem, rsize); + lj_alloc_free(m, chunk2mem(rem)); + } + } else if (next == m->top && oldsize + m->topsize > nb) { + /* Expand into top */ + size_t newsize = oldsize + m->topsize; + size_t newtopsize = newsize - nb; + mchunkptr newtop = chunk_plus_offset(oldp, nb); + set_inuse(m, oldp, nb); + newtop->head = newtopsize |PINUSE_BIT; + m->top = newtop; + m->topsize = newtopsize; + newp = oldp; + } + + if (newp != 0) { + return chunk2mem(newp); + } else { + void *newmem = lj_alloc_malloc(m, nsize); + if (newmem != 0) { + size_t oc = oldsize - overhead_for(oldp); + memcpy(newmem, ptr, oc < nsize ? oc : nsize); + lj_alloc_free(m, ptr); + } + return newmem; + } + } +} + +void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize) +{ + (void)osize; + if (nsize == 0) { + return lj_alloc_free(msp, ptr); + } else if (ptr == NULL) { + return lj_alloc_malloc(msp, nsize); + } else { + return lj_alloc_realloc(msp, ptr, nsize); + } +} + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_alloc.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_alloc.h new file mode 100644 index 00000000000..f87a7cf3422 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_alloc.h @@ -0,0 +1,17 @@ +/* +** Bundled memory allocator. +** Donated to the public domain. +*/ + +#ifndef _LJ_ALLOC_H +#define _LJ_ALLOC_H + +#include "lj_def.h" + +#ifndef LUAJIT_USE_SYSMALLOC +LJ_FUNC void *lj_alloc_create(void); +LJ_FUNC void lj_alloc_destroy(void *msp); +LJ_FUNC void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize); +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_api.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_api.c new file mode 100644 index 00000000000..1f09284f991 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_api.c @@ -0,0 +1,1210 @@ +/* +** Public Lua/C API. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lj_api_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_debug.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_func.h" +#include "lj_udata.h" +#include "lj_meta.h" +#include "lj_state.h" +#include "lj_bc.h" +#include "lj_frame.h" +#include "lj_trace.h" +#include "lj_vm.h" +#include "lj_strscan.h" +#include "lj_strfmt.h" + +/* -- Common helper functions --------------------------------------------- */ + +#define api_checknelems(L, n) api_check(L, (n) <= (L->top - L->base)) +#define api_checkvalidindex(L, i) api_check(L, (i) != niltv(L)) + +static TValue *index2adr(lua_State *L, int idx) +{ + if (idx > 0) { + TValue *o = L->base + (idx - 1); + return o < L->top ? o : niltv(L); + } else if (idx > LUA_REGISTRYINDEX) { + api_check(L, idx != 0 && -idx <= L->top - L->base); + return L->top + idx; + } else if (idx == LUA_GLOBALSINDEX) { + TValue *o = &G(L)->tmptv; + settabV(L, o, tabref(L->env)); + return o; + } else if (idx == LUA_REGISTRYINDEX) { + return registry(L); + } else { + GCfunc *fn = curr_func(L); + api_check(L, fn->c.gct == ~LJ_TFUNC && !isluafunc(fn)); + if (idx == LUA_ENVIRONINDEX) { + TValue *o = &G(L)->tmptv; + settabV(L, o, tabref(fn->c.env)); + return o; + } else { + idx = LUA_GLOBALSINDEX - idx; + return idx <= fn->c.nupvalues ? &fn->c.upvalue[idx-1] : niltv(L); + } + } +} + +static TValue *stkindex2adr(lua_State *L, int idx) +{ + if (idx > 0) { + TValue *o = L->base + (idx - 1); + return o < L->top ? o : niltv(L); + } else { + api_check(L, idx != 0 && -idx <= L->top - L->base); + return L->top + idx; + } +} + +static GCtab *getcurrenv(lua_State *L) +{ + GCfunc *fn = curr_func(L); + return fn->c.gct == ~LJ_TFUNC ? tabref(fn->c.env) : tabref(L->env); +} + +/* -- Miscellaneous API functions ----------------------------------------- */ + +LUA_API int lua_status(lua_State *L) +{ + return L->status; +} + +LUA_API int lua_checkstack(lua_State *L, int size) +{ + if (size > LUAI_MAXCSTACK || (L->top - L->base + size) > LUAI_MAXCSTACK) { + return 0; /* Stack overflow. */ + } else if (size > 0) { + lj_state_checkstack(L, (MSize)size); + } + return 1; +} + +LUALIB_API void luaL_checkstack(lua_State *L, int size, const char *msg) +{ + if (!lua_checkstack(L, size)) + lj_err_callerv(L, LJ_ERR_STKOVM, msg); +} + +LUA_API void lua_xmove(lua_State *from, lua_State *to, int n) +{ + TValue *f, *t; + if (from == to) return; + api_checknelems(from, n); + api_check(from, G(from) == G(to)); + lj_state_checkstack(to, (MSize)n); + f = from->top; + t = to->top = to->top + n; + while (--n >= 0) copyTV(to, --t, --f); + from->top = f; +} + +/* -- Stack manipulation -------------------------------------------------- */ + +LUA_API int lua_gettop(lua_State *L) +{ + return (int)(L->top - L->base); +} + +LUA_API void lua_settop(lua_State *L, int idx) +{ + if (idx >= 0) { + api_check(L, idx <= tvref(L->maxstack) - L->base); + if (L->base + idx > L->top) { + if (L->base + idx >= tvref(L->maxstack)) + lj_state_growstack(L, (MSize)idx - (MSize)(L->top - L->base)); + do { setnilV(L->top++); } while (L->top < L->base + idx); + } else { + L->top = L->base + idx; + } + } else { + api_check(L, -(idx+1) <= (L->top - L->base)); + L->top += idx+1; /* Shrinks top (idx < 0). */ + } +} + +LUA_API void lua_remove(lua_State *L, int idx) +{ + TValue *p = stkindex2adr(L, idx); + api_checkvalidindex(L, p); + while (++p < L->top) copyTV(L, p-1, p); + L->top--; +} + +LUA_API void lua_insert(lua_State *L, int idx) +{ + TValue *q, *p = stkindex2adr(L, idx); + api_checkvalidindex(L, p); + for (q = L->top; q > p; q--) copyTV(L, q, q-1); + copyTV(L, p, L->top); +} + +LUA_API void lua_replace(lua_State *L, int idx) +{ + api_checknelems(L, 1); + if (idx == LUA_GLOBALSINDEX) { + api_check(L, tvistab(L->top-1)); + /* NOBARRIER: A thread (i.e. L) is never black. */ + setgcref(L->env, obj2gco(tabV(L->top-1))); + } else if (idx == LUA_ENVIRONINDEX) { + GCfunc *fn = curr_func(L); + if (fn->c.gct != ~LJ_TFUNC) + lj_err_msg(L, LJ_ERR_NOENV); + api_check(L, tvistab(L->top-1)); + setgcref(fn->c.env, obj2gco(tabV(L->top-1))); + lj_gc_barrier(L, fn, L->top-1); + } else { + TValue *o = index2adr(L, idx); + api_checkvalidindex(L, o); + copyTV(L, o, L->top-1); + if (idx < LUA_GLOBALSINDEX) /* Need a barrier for upvalues. */ + lj_gc_barrier(L, curr_func(L), L->top-1); + } + L->top--; +} + +LUA_API void lua_pushvalue(lua_State *L, int idx) +{ + copyTV(L, L->top, index2adr(L, idx)); + incr_top(L); +} + +/* -- Stack getters ------------------------------------------------------- */ + +LUA_API int lua_type(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + if (tvisnumber(o)) { + return LUA_TNUMBER; +#if LJ_64 && !LJ_GC64 + } else if (tvislightud(o)) { + return LUA_TLIGHTUSERDATA; +#endif + } else if (o == niltv(L)) { + return LUA_TNONE; + } else { /* Magic internal/external tag conversion. ORDER LJ_T */ + uint32_t t = ~itype(o); +#if LJ_64 + int tt = (int)((U64x(75a06,98042110) >> 4*t) & 15u); +#else + int tt = (int)(((t < 8 ? 0x98042110u : 0x75a06u) >> 4*(t&7)) & 15u); +#endif + lua_assert(tt != LUA_TNIL || tvisnil(o)); + return tt; + } +} + +LUALIB_API void luaL_checktype(lua_State *L, int idx, int tt) +{ + if (lua_type(L, idx) != tt) + lj_err_argt(L, idx, tt); +} + +LUALIB_API void luaL_checkany(lua_State *L, int idx) +{ + if (index2adr(L, idx) == niltv(L)) + lj_err_arg(L, idx, LJ_ERR_NOVAL); +} + +LUA_API const char *lua_typename(lua_State *L, int t) +{ + UNUSED(L); + return lj_obj_typename[t+1]; +} + +LUA_API int lua_iscfunction(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + return tvisfunc(o) && !isluafunc(funcV(o)); +} + +LUA_API int lua_isnumber(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + TValue tmp; + return (tvisnumber(o) || (tvisstr(o) && lj_strscan_number(strV(o), &tmp))); +} + +LUA_API int lua_isstring(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + return (tvisstr(o) || tvisnumber(o)); +} + +LUA_API int lua_isuserdata(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + return (tvisudata(o) || tvislightud(o)); +} + +LUA_API int lua_rawequal(lua_State *L, int idx1, int idx2) +{ + cTValue *o1 = index2adr(L, idx1); + cTValue *o2 = index2adr(L, idx2); + return (o1 == niltv(L) || o2 == niltv(L)) ? 0 : lj_obj_equal(o1, o2); +} + +LUA_API int lua_equal(lua_State *L, int idx1, int idx2) +{ + cTValue *o1 = index2adr(L, idx1); + cTValue *o2 = index2adr(L, idx2); + if (tvisint(o1) && tvisint(o2)) { + return intV(o1) == intV(o2); + } else if (tvisnumber(o1) && tvisnumber(o2)) { + return numberVnum(o1) == numberVnum(o2); + } else if (itype(o1) != itype(o2)) { + return 0; + } else if (tvispri(o1)) { + return o1 != niltv(L) && o2 != niltv(L); +#if LJ_64 && !LJ_GC64 + } else if (tvislightud(o1)) { + return o1->u64 == o2->u64; +#endif + } else if (gcrefeq(o1->gcr, o2->gcr)) { + return 1; + } else if (!tvistabud(o1)) { + return 0; + } else { + TValue *base = lj_meta_equal(L, gcV(o1), gcV(o2), 0); + if ((uintptr_t)base <= 1) { + return (int)(uintptr_t)base; + } else { + L->top = base+2; + lj_vm_call(L, base, 1+1); + L->top -= 2+LJ_FR2; + return tvistruecond(L->top+1+LJ_FR2); + } + } +} + +LUA_API int lua_lessthan(lua_State *L, int idx1, int idx2) +{ + cTValue *o1 = index2adr(L, idx1); + cTValue *o2 = index2adr(L, idx2); + if (o1 == niltv(L) || o2 == niltv(L)) { + return 0; + } else if (tvisint(o1) && tvisint(o2)) { + return intV(o1) < intV(o2); + } else if (tvisnumber(o1) && tvisnumber(o2)) { + return numberVnum(o1) < numberVnum(o2); + } else { + TValue *base = lj_meta_comp(L, o1, o2, 0); + if ((uintptr_t)base <= 1) { + return (int)(uintptr_t)base; + } else { + L->top = base+2; + lj_vm_call(L, base, 1+1); + L->top -= 2+LJ_FR2; + return tvistruecond(L->top+1+LJ_FR2); + } + } +} + +LUA_API lua_Number lua_tonumber(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + TValue tmp; + if (LJ_LIKELY(tvisnumber(o))) + return numberVnum(o); + else if (tvisstr(o) && lj_strscan_num(strV(o), &tmp)) + return numV(&tmp); + else + return 0; +} + +LUALIB_API lua_Number luaL_checknumber(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + TValue tmp; + if (LJ_LIKELY(tvisnumber(o))) + return numberVnum(o); + else if (!(tvisstr(o) && lj_strscan_num(strV(o), &tmp))) + lj_err_argt(L, idx, LUA_TNUMBER); + return numV(&tmp); +} + +LUALIB_API lua_Number luaL_optnumber(lua_State *L, int idx, lua_Number def) +{ + cTValue *o = index2adr(L, idx); + TValue tmp; + if (LJ_LIKELY(tvisnumber(o))) + return numberVnum(o); + else if (tvisnil(o)) + return def; + else if (!(tvisstr(o) && lj_strscan_num(strV(o), &tmp))) + lj_err_argt(L, idx, LUA_TNUMBER); + return numV(&tmp); +} + +LUA_API lua_Integer lua_tointeger(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + TValue tmp; + lua_Number n; + if (LJ_LIKELY(tvisint(o))) { + return intV(o); + } else if (LJ_LIKELY(tvisnum(o))) { + n = numV(o); + } else { + if (!(tvisstr(o) && lj_strscan_number(strV(o), &tmp))) + return 0; + if (tvisint(&tmp)) + return (lua_Integer)intV(&tmp); + n = numV(&tmp); + } +#if LJ_64 + return (lua_Integer)n; +#else + return lj_num2int(n); +#endif +} + +LUALIB_API lua_Integer luaL_checkinteger(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + TValue tmp; + lua_Number n; + if (LJ_LIKELY(tvisint(o))) { + return intV(o); + } else if (LJ_LIKELY(tvisnum(o))) { + n = numV(o); + } else { + if (!(tvisstr(o) && lj_strscan_number(strV(o), &tmp))) + lj_err_argt(L, idx, LUA_TNUMBER); + if (tvisint(&tmp)) + return (lua_Integer)intV(&tmp); + n = numV(&tmp); + } +#if LJ_64 + return (lua_Integer)n; +#else + return lj_num2int(n); +#endif +} + +LUALIB_API lua_Integer luaL_optinteger(lua_State *L, int idx, lua_Integer def) +{ + cTValue *o = index2adr(L, idx); + TValue tmp; + lua_Number n; + if (LJ_LIKELY(tvisint(o))) { + return intV(o); + } else if (LJ_LIKELY(tvisnum(o))) { + n = numV(o); + } else if (tvisnil(o)) { + return def; + } else { + if (!(tvisstr(o) && lj_strscan_number(strV(o), &tmp))) + lj_err_argt(L, idx, LUA_TNUMBER); + if (tvisint(&tmp)) + return (lua_Integer)intV(&tmp); + n = numV(&tmp); + } +#if LJ_64 + return (lua_Integer)n; +#else + return lj_num2int(n); +#endif +} + +LUA_API int lua_toboolean(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + return tvistruecond(o); +} + +LUA_API const char *lua_tolstring(lua_State *L, int idx, size_t *len) +{ + TValue *o = index2adr(L, idx); + GCstr *s; + if (LJ_LIKELY(tvisstr(o))) { + s = strV(o); + } else if (tvisnumber(o)) { + lj_gc_check(L); + o = index2adr(L, idx); /* GC may move the stack. */ + s = lj_strfmt_number(L, o); + setstrV(L, o, s); + } else { + if (len != NULL) *len = 0; + return NULL; + } + if (len != NULL) *len = s->len; + return strdata(s); +} + +LUALIB_API const char *luaL_checklstring(lua_State *L, int idx, size_t *len) +{ + TValue *o = index2adr(L, idx); + GCstr *s; + if (LJ_LIKELY(tvisstr(o))) { + s = strV(o); + } else if (tvisnumber(o)) { + lj_gc_check(L); + o = index2adr(L, idx); /* GC may move the stack. */ + s = lj_strfmt_number(L, o); + setstrV(L, o, s); + } else { + lj_err_argt(L, idx, LUA_TSTRING); + } + if (len != NULL) *len = s->len; + return strdata(s); +} + +LUALIB_API const char *luaL_optlstring(lua_State *L, int idx, + const char *def, size_t *len) +{ + TValue *o = index2adr(L, idx); + GCstr *s; + if (LJ_LIKELY(tvisstr(o))) { + s = strV(o); + } else if (tvisnil(o)) { + if (len != NULL) *len = def ? strlen(def) : 0; + return def; + } else if (tvisnumber(o)) { + lj_gc_check(L); + o = index2adr(L, idx); /* GC may move the stack. */ + s = lj_strfmt_number(L, o); + setstrV(L, o, s); + } else { + lj_err_argt(L, idx, LUA_TSTRING); + } + if (len != NULL) *len = s->len; + return strdata(s); +} + +LUALIB_API int luaL_checkoption(lua_State *L, int idx, const char *def, + const char *const lst[]) +{ + ptrdiff_t i; + const char *s = lua_tolstring(L, idx, NULL); + if (s == NULL && (s = def) == NULL) + lj_err_argt(L, idx, LUA_TSTRING); + for (i = 0; lst[i]; i++) + if (strcmp(lst[i], s) == 0) + return (int)i; + lj_err_argv(L, idx, LJ_ERR_INVOPTM, s); +} + +LUA_API size_t lua_objlen(lua_State *L, int idx) +{ + TValue *o = index2adr(L, idx); + if (tvisstr(o)) { + return strV(o)->len; + } else if (tvistab(o)) { + return (size_t)lj_tab_len(tabV(o)); + } else if (tvisudata(o)) { + return udataV(o)->len; + } else if (tvisnumber(o)) { + GCstr *s = lj_strfmt_number(L, o); + setstrV(L, o, s); + return s->len; + } else { + return 0; + } +} + +LUA_API lua_CFunction lua_tocfunction(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + if (tvisfunc(o)) { + BCOp op = bc_op(*mref(funcV(o)->c.pc, BCIns)); + if (op == BC_FUNCC || op == BC_FUNCCW) + return funcV(o)->c.f; + } + return NULL; +} + +LUA_API void *lua_touserdata(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + if (tvisudata(o)) + return uddata(udataV(o)); + else if (tvislightud(o)) + return lightudV(o); + else + return NULL; +} + +LUA_API lua_State *lua_tothread(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + return (!tvisthread(o)) ? NULL : threadV(o); +} + +LUA_API const void *lua_topointer(lua_State *L, int idx) +{ + return lj_obj_ptr(index2adr(L, idx)); +} + +/* -- Stack setters (object creation) ------------------------------------- */ + +LUA_API void lua_pushnil(lua_State *L) +{ + setnilV(L->top); + incr_top(L); +} + +LUA_API void lua_pushnumber(lua_State *L, lua_Number n) +{ + setnumV(L->top, n); + if (LJ_UNLIKELY(tvisnan(L->top))) + setnanV(L->top); /* Canonicalize injected NaNs. */ + incr_top(L); +} + +LUA_API void lua_pushinteger(lua_State *L, lua_Integer n) +{ + setintptrV(L->top, n); + incr_top(L); +} + +LUA_API void lua_pushlstring(lua_State *L, const char *str, size_t len) +{ + GCstr *s; + lj_gc_check(L); + s = lj_str_new(L, str, len); + setstrV(L, L->top, s); + incr_top(L); +} + +LUA_API void lua_pushstring(lua_State *L, const char *str) +{ + if (str == NULL) { + setnilV(L->top); + } else { + GCstr *s; + lj_gc_check(L); + s = lj_str_newz(L, str); + setstrV(L, L->top, s); + } + incr_top(L); +} + +LUA_API const char *lua_pushvfstring(lua_State *L, const char *fmt, + va_list argp) +{ + lj_gc_check(L); + return lj_strfmt_pushvf(L, fmt, argp); +} + +LUA_API const char *lua_pushfstring(lua_State *L, const char *fmt, ...) +{ + const char *ret; + va_list argp; + lj_gc_check(L); + va_start(argp, fmt); + ret = lj_strfmt_pushvf(L, fmt, argp); + va_end(argp); + return ret; +} + +LUA_API void lua_pushcclosure(lua_State *L, lua_CFunction f, int n) +{ + GCfunc *fn; + lj_gc_check(L); + api_checknelems(L, n); + fn = lj_func_newC(L, (MSize)n, getcurrenv(L)); + fn->c.f = f; + L->top -= n; + while (n--) + copyTV(L, &fn->c.upvalue[n], L->top+n); + setfuncV(L, L->top, fn); + lua_assert(iswhite(obj2gco(fn))); + incr_top(L); +} + +LUA_API void lua_pushboolean(lua_State *L, int b) +{ + setboolV(L->top, (b != 0)); + incr_top(L); +} + +LUA_API void lua_pushlightuserdata(lua_State *L, void *p) +{ + setlightudV(L->top, checklightudptr(L, p)); + incr_top(L); +} + +LUA_API void lua_createtable(lua_State *L, int narray, int nrec) +{ + lj_gc_check(L); + settabV(L, L->top, lj_tab_new_ah(L, narray, nrec)); + incr_top(L); +} + +LUALIB_API int luaL_newmetatable(lua_State *L, const char *tname) +{ + GCtab *regt = tabV(registry(L)); + TValue *tv = lj_tab_setstr(L, regt, lj_str_newz(L, tname)); + if (tvisnil(tv)) { + GCtab *mt = lj_tab_new(L, 0, 1); + settabV(L, tv, mt); + settabV(L, L->top++, mt); + lj_gc_anybarriert(L, regt); + return 1; + } else { + copyTV(L, L->top++, tv); + return 0; + } +} + +LUA_API int lua_pushthread(lua_State *L) +{ + setthreadV(L, L->top, L); + incr_top(L); + return (mainthread(G(L)) == L); +} + +LUA_API lua_State *lua_newthread(lua_State *L) +{ + lua_State *L1; + lj_gc_check(L); + L1 = lj_state_new(L); + setthreadV(L, L->top, L1); + incr_top(L); + return L1; +} + +LUA_API void *lua_newuserdata(lua_State *L, size_t size) +{ + GCudata *ud; + lj_gc_check(L); + if (size > LJ_MAX_UDATA) + lj_err_msg(L, LJ_ERR_UDATAOV); + ud = lj_udata_new(L, (MSize)size, getcurrenv(L)); + setudataV(L, L->top, ud); + incr_top(L); + return uddata(ud); +} + +LUA_API void lua_concat(lua_State *L, int n) +{ + api_checknelems(L, n); + if (n >= 2) { + n--; + do { + TValue *top = lj_meta_cat(L, L->top-1, -n); + if (top == NULL) { + L->top -= n; + break; + } + n -= (int)(L->top - top); + L->top = top+2; + lj_vm_call(L, top, 1+1); + L->top -= 1+LJ_FR2; + copyTV(L, L->top-1, L->top+LJ_FR2); + } while (--n > 0); + } else if (n == 0) { /* Push empty string. */ + setstrV(L, L->top, &G(L)->strempty); + incr_top(L); + } + /* else n == 1: nothing to do. */ +} + +/* -- Object getters ------------------------------------------------------ */ + +LUA_API void lua_gettable(lua_State *L, int idx) +{ + cTValue *v, *t = index2adr(L, idx); + api_checkvalidindex(L, t); + v = lj_meta_tget(L, t, L->top-1); + if (v == NULL) { + L->top += 2; + lj_vm_call(L, L->top-2, 1+1); + L->top -= 2+LJ_FR2; + v = L->top+1+LJ_FR2; + } + copyTV(L, L->top-1, v); +} + +LUA_API void lua_getfield(lua_State *L, int idx, const char *k) +{ + cTValue *v, *t = index2adr(L, idx); + TValue key; + api_checkvalidindex(L, t); + setstrV(L, &key, lj_str_newz(L, k)); + v = lj_meta_tget(L, t, &key); + if (v == NULL) { + L->top += 2; + lj_vm_call(L, L->top-2, 1+1); + L->top -= 2+LJ_FR2; + v = L->top+1+LJ_FR2; + } + copyTV(L, L->top, v); + incr_top(L); +} + +LUA_API void lua_rawget(lua_State *L, int idx) +{ + cTValue *t = index2adr(L, idx); + api_check(L, tvistab(t)); + copyTV(L, L->top-1, lj_tab_get(L, tabV(t), L->top-1)); +} + +LUA_API void lua_rawgeti(lua_State *L, int idx, int n) +{ + cTValue *v, *t = index2adr(L, idx); + api_check(L, tvistab(t)); + v = lj_tab_getint(tabV(t), n); + if (v) { + copyTV(L, L->top, v); + } else { + setnilV(L->top); + } + incr_top(L); +} + +LUA_API int lua_getmetatable(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + GCtab *mt = NULL; + if (tvistab(o)) + mt = tabref(tabV(o)->metatable); + else if (tvisudata(o)) + mt = tabref(udataV(o)->metatable); + else + mt = tabref(basemt_obj(G(L), o)); + if (mt == NULL) + return 0; + settabV(L, L->top, mt); + incr_top(L); + return 1; +} + +LUALIB_API int luaL_getmetafield(lua_State *L, int idx, const char *field) +{ + if (lua_getmetatable(L, idx)) { + cTValue *tv = lj_tab_getstr(tabV(L->top-1), lj_str_newz(L, field)); + if (tv && !tvisnil(tv)) { + copyTV(L, L->top-1, tv); + return 1; + } + L->top--; + } + return 0; +} + +LUA_API void lua_getfenv(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + api_checkvalidindex(L, o); + if (tvisfunc(o)) { + settabV(L, L->top, tabref(funcV(o)->c.env)); + } else if (tvisudata(o)) { + settabV(L, L->top, tabref(udataV(o)->env)); + } else if (tvisthread(o)) { + settabV(L, L->top, tabref(threadV(o)->env)); + } else { + setnilV(L->top); + } + incr_top(L); +} + +LUA_API int lua_next(lua_State *L, int idx) +{ + cTValue *t = index2adr(L, idx); + int more; + api_check(L, tvistab(t)); + more = lj_tab_next(L, tabV(t), L->top-1); + if (more) { + incr_top(L); /* Return new key and value slot. */ + } else { /* End of traversal. */ + L->top--; /* Remove key slot. */ + } + return more; +} + +LUA_API const char *lua_getupvalue(lua_State *L, int idx, int n) +{ + TValue *val; + const char *name = lj_debug_uvnamev(index2adr(L, idx), (uint32_t)(n-1), &val); + if (name) { + copyTV(L, L->top, val); + incr_top(L); + } + return name; +} + +LUA_API void *lua_upvalueid(lua_State *L, int idx, int n) +{ + GCfunc *fn = funcV(index2adr(L, idx)); + n--; + api_check(L, (uint32_t)n < fn->l.nupvalues); + return isluafunc(fn) ? (void *)gcref(fn->l.uvptr[n]) : + (void *)&fn->c.upvalue[n]; +} + +LUA_API void lua_upvaluejoin(lua_State *L, int idx1, int n1, int idx2, int n2) +{ + GCfunc *fn1 = funcV(index2adr(L, idx1)); + GCfunc *fn2 = funcV(index2adr(L, idx2)); + n1--; n2--; + api_check(L, isluafunc(fn1) && (uint32_t)n1 < fn1->l.nupvalues); + api_check(L, isluafunc(fn2) && (uint32_t)n2 < fn2->l.nupvalues); + setgcrefr(fn1->l.uvptr[n1], fn2->l.uvptr[n2]); + lj_gc_objbarrier(L, fn1, gcref(fn1->l.uvptr[n1])); +} + +LUALIB_API void *luaL_checkudata(lua_State *L, int idx, const char *tname) +{ + cTValue *o = index2adr(L, idx); + if (tvisudata(o)) { + GCudata *ud = udataV(o); + cTValue *tv = lj_tab_getstr(tabV(registry(L)), lj_str_newz(L, tname)); + if (tv && tvistab(tv) && tabV(tv) == tabref(ud->metatable)) + return uddata(ud); + } + lj_err_argtype(L, idx, tname); + return NULL; /* unreachable */ +} + +/* -- Object setters ------------------------------------------------------ */ + +LUA_API void lua_settable(lua_State *L, int idx) +{ + TValue *o; + cTValue *t = index2adr(L, idx); + api_checknelems(L, 2); + api_checkvalidindex(L, t); + o = lj_meta_tset(L, t, L->top-2); + if (o) { + /* NOBARRIER: lj_meta_tset ensures the table is not black. */ + L->top -= 2; + copyTV(L, o, L->top+1); + } else { + TValue *base = L->top; + copyTV(L, base+2, base-3-2*LJ_FR2); + L->top = base+3; + lj_vm_call(L, base, 0+1); + L->top -= 3+LJ_FR2; + } +} + +LUA_API void lua_setfield(lua_State *L, int idx, const char *k) +{ + TValue *o; + TValue key; + cTValue *t = index2adr(L, idx); + api_checknelems(L, 1); + api_checkvalidindex(L, t); + setstrV(L, &key, lj_str_newz(L, k)); + o = lj_meta_tset(L, t, &key); + if (o) { + /* NOBARRIER: lj_meta_tset ensures the table is not black. */ + copyTV(L, o, --L->top); + } else { + TValue *base = L->top; + copyTV(L, base+2, base-3-2*LJ_FR2); + L->top = base+3; + lj_vm_call(L, base, 0+1); + L->top -= 2+LJ_FR2; + } +} + +LUA_API void lua_rawset(lua_State *L, int idx) +{ + GCtab *t = tabV(index2adr(L, idx)); + TValue *dst, *key; + api_checknelems(L, 2); + key = L->top-2; + dst = lj_tab_set(L, t, key); + copyTV(L, dst, key+1); + lj_gc_anybarriert(L, t); + L->top = key; +} + +LUA_API void lua_rawseti(lua_State *L, int idx, int n) +{ + GCtab *t = tabV(index2adr(L, idx)); + TValue *dst, *src; + api_checknelems(L, 1); + dst = lj_tab_setint(L, t, n); + src = L->top-1; + copyTV(L, dst, src); + lj_gc_barriert(L, t, dst); + L->top = src; +} + +LUA_API int lua_setmetatable(lua_State *L, int idx) +{ + global_State *g; + GCtab *mt; + cTValue *o = index2adr(L, idx); + api_checknelems(L, 1); + api_checkvalidindex(L, o); + if (tvisnil(L->top-1)) { + mt = NULL; + } else { + api_check(L, tvistab(L->top-1)); + mt = tabV(L->top-1); + } + g = G(L); + if (tvistab(o)) { + setgcref(tabV(o)->metatable, obj2gco(mt)); + if (mt) + lj_gc_objbarriert(L, tabV(o), mt); + } else if (tvisudata(o)) { + setgcref(udataV(o)->metatable, obj2gco(mt)); + if (mt) + lj_gc_objbarrier(L, udataV(o), mt); + } else { + /* Flush cache, since traces specialize to basemt. But not during __gc. */ + if (lj_trace_flushall(L)) + lj_err_caller(L, LJ_ERR_NOGCMM); + if (tvisbool(o)) { + /* NOBARRIER: basemt is a GC root. */ + setgcref(basemt_it(g, LJ_TTRUE), obj2gco(mt)); + setgcref(basemt_it(g, LJ_TFALSE), obj2gco(mt)); + } else { + /* NOBARRIER: basemt is a GC root. */ + setgcref(basemt_obj(g, o), obj2gco(mt)); + } + } + L->top--; + return 1; +} + +LUA_API int lua_setfenv(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + GCtab *t; + api_checknelems(L, 1); + api_checkvalidindex(L, o); + api_check(L, tvistab(L->top-1)); + t = tabV(L->top-1); + if (tvisfunc(o)) { + setgcref(funcV(o)->c.env, obj2gco(t)); + } else if (tvisudata(o)) { + setgcref(udataV(o)->env, obj2gco(t)); + } else if (tvisthread(o)) { + setgcref(threadV(o)->env, obj2gco(t)); + } else { + L->top--; + return 0; + } + lj_gc_objbarrier(L, gcV(o), t); + L->top--; + return 1; +} + +LUA_API const char *lua_setupvalue(lua_State *L, int idx, int n) +{ + cTValue *f = index2adr(L, idx); + TValue *val; + const char *name; + api_checknelems(L, 1); + name = lj_debug_uvnamev(f, (uint32_t)(n-1), &val); + if (name) { + L->top--; + copyTV(L, val, L->top); + lj_gc_barrier(L, funcV(f), L->top); + } + return name; +} + +/* -- Calls --------------------------------------------------------------- */ + +#if LJ_FR2 +static TValue *api_call_base(lua_State *L, int nargs) +{ + TValue *o = L->top, *base = o - nargs; + L->top = o+1; + for (; o > base; o--) copyTV(L, o, o-1); + setnilV(o); + return o+1; +} +#else +#define api_call_base(L, nargs) (L->top - (nargs)) +#endif + +LUA_API void lua_call(lua_State *L, int nargs, int nresults) +{ + api_check(L, L->status == 0 || L->status == LUA_ERRERR); + api_checknelems(L, nargs+1); + lj_vm_call(L, api_call_base(L, nargs), nresults+1); +} + +LUA_API int lua_pcall(lua_State *L, int nargs, int nresults, int errfunc) +{ + global_State *g = G(L); + uint8_t oldh = hook_save(g); + ptrdiff_t ef; + int status; + api_check(L, L->status == 0 || L->status == LUA_ERRERR); + api_checknelems(L, nargs+1); + if (errfunc == 0) { + ef = 0; + } else { + cTValue *o = stkindex2adr(L, errfunc); + api_checkvalidindex(L, o); + ef = savestack(L, o); + } + status = lj_vm_pcall(L, api_call_base(L, nargs), nresults+1, ef); + if (status) hook_restore(g, oldh); + return status; +} + +static TValue *cpcall(lua_State *L, lua_CFunction func, void *ud) +{ + GCfunc *fn = lj_func_newC(L, 0, getcurrenv(L)); + TValue *top = L->top; + fn->c.f = func; + setfuncV(L, top++, fn); + if (LJ_FR2) setnilV(top++); + setlightudV(top++, checklightudptr(L, ud)); + cframe_nres(L->cframe) = 1+0; /* Zero results. */ + L->top = top; + return top-1; /* Now call the newly allocated C function. */ +} + +LUA_API int lua_cpcall(lua_State *L, lua_CFunction func, void *ud) +{ + global_State *g = G(L); + uint8_t oldh = hook_save(g); + int status; + api_check(L, L->status == 0 || L->status == LUA_ERRERR); + status = lj_vm_cpcall(L, func, ud, cpcall); + if (status) hook_restore(g, oldh); + return status; +} + +LUALIB_API int luaL_callmeta(lua_State *L, int idx, const char *field) +{ + if (luaL_getmetafield(L, idx, field)) { + TValue *top = L->top--; + if (LJ_FR2) setnilV(top++); + copyTV(L, top++, index2adr(L, idx)); + L->top = top; + lj_vm_call(L, top-1, 1+1); + return 1; + } + return 0; +} + +/* -- Coroutine yield and resume ------------------------------------------ */ + +LUA_API int lua_yield(lua_State *L, int nresults) +{ + void *cf = L->cframe; + global_State *g = G(L); + if (cframe_canyield(cf)) { + cf = cframe_raw(cf); + if (!hook_active(g)) { /* Regular yield: move results down if needed. */ + cTValue *f = L->top - nresults; + if (f > L->base) { + TValue *t = L->base; + while (--nresults >= 0) copyTV(L, t++, f++); + L->top = t; + } + L->cframe = NULL; + L->status = LUA_YIELD; + return -1; + } else { /* Yield from hook: add a pseudo-frame. */ + TValue *top = L->top; + hook_leave(g); + (top++)->u64 = cframe_multres(cf); + setcont(top, lj_cont_hook); + if (LJ_FR2) top++; + setframe_pc(top, cframe_pc(cf)-1); + if (LJ_FR2) top++; + setframe_gc(top, obj2gco(L), LJ_TTHREAD); + setframe_ftsz(top, ((char *)(top+1)-(char *)L->base)+FRAME_CONT); + L->top = L->base = top+1; +#if LJ_TARGET_X64 + lj_err_throw(L, LUA_YIELD); +#else + L->cframe = NULL; + L->status = LUA_YIELD; + lj_vm_unwind_c(cf, LUA_YIELD); +#endif + } + } + lj_err_msg(L, LJ_ERR_CYIELD); + return 0; /* unreachable */ +} + +LUA_API int lua_resume(lua_State *L, int nargs) +{ + if (L->cframe == NULL && L->status <= LUA_YIELD) + return lj_vm_resume(L, + L->status == 0 ? api_call_base(L, nargs) : L->top - nargs, + 0, 0); + L->top = L->base; + setstrV(L, L->top, lj_err_str(L, LJ_ERR_COSUSP)); + incr_top(L); + return LUA_ERRRUN; +} + +/* -- GC and memory management -------------------------------------------- */ + +LUA_API int lua_gc(lua_State *L, int what, int data) +{ + global_State *g = G(L); + int res = 0; + switch (what) { + case LUA_GCSTOP: + g->gc.threshold = LJ_MAX_MEM; + break; + case LUA_GCRESTART: + g->gc.threshold = data == -1 ? (g->gc.total/100)*g->gc.pause : g->gc.total; + break; + case LUA_GCCOLLECT: + lj_gc_fullgc(L); + break; + case LUA_GCCOUNT: + res = (int)(g->gc.total >> 10); + break; + case LUA_GCCOUNTB: + res = (int)(g->gc.total & 0x3ff); + break; + case LUA_GCSTEP: { + GCSize a = (GCSize)data << 10; + g->gc.threshold = (a <= g->gc.total) ? (g->gc.total - a) : 0; + while (g->gc.total >= g->gc.threshold) + if (lj_gc_step(L) > 0) { + res = 1; + break; + } + break; + } + case LUA_GCSETPAUSE: + res = (int)(g->gc.pause); + g->gc.pause = (MSize)data; + break; + case LUA_GCSETSTEPMUL: + res = (int)(g->gc.stepmul); + g->gc.stepmul = (MSize)data; + break; + default: + res = -1; /* Invalid option. */ + } + return res; +} + +LUA_API lua_Alloc lua_getallocf(lua_State *L, void **ud) +{ + global_State *g = G(L); + if (ud) *ud = g->allocd; + return g->allocf; +} + +LUA_API void lua_setallocf(lua_State *L, lua_Alloc f, void *ud) +{ + global_State *g = G(L); + g->allocd = ud; + g->allocf = f; +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_arch.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_arch.h new file mode 100644 index 00000000000..6760111ef5e --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_arch.h @@ -0,0 +1,513 @@ +/* +** Target architecture selection. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_ARCH_H +#define _LJ_ARCH_H + +#include "lua.h" + +/* Target endianess. */ +#define LUAJIT_LE 0 +#define LUAJIT_BE 1 + +/* Target architectures. */ +#define LUAJIT_ARCH_X86 1 +#define LUAJIT_ARCH_x86 1 +#define LUAJIT_ARCH_X64 2 +#define LUAJIT_ARCH_x64 2 +#define LUAJIT_ARCH_ARM 3 +#define LUAJIT_ARCH_arm 3 +#define LUAJIT_ARCH_ARM64 4 +#define LUAJIT_ARCH_arm64 4 +#define LUAJIT_ARCH_PPC 5 +#define LUAJIT_ARCH_ppc 5 +#define LUAJIT_ARCH_MIPS 6 +#define LUAJIT_ARCH_mips 6 + +/* Target OS. */ +#define LUAJIT_OS_OTHER 0 +#define LUAJIT_OS_WINDOWS 1 +#define LUAJIT_OS_LINUX 2 +#define LUAJIT_OS_OSX 3 +#define LUAJIT_OS_BSD 4 +#define LUAJIT_OS_POSIX 5 + +/* Select native target if no target defined. */ +#ifndef LUAJIT_TARGET + +#if defined(__i386) || defined(__i386__) || defined(_M_IX86) +#define LUAJIT_TARGET LUAJIT_ARCH_X86 +#elif defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64) +#define LUAJIT_TARGET LUAJIT_ARCH_X64 +#elif defined(__arm__) || defined(__arm) || defined(__ARM__) || defined(__ARM) +#define LUAJIT_TARGET LUAJIT_ARCH_ARM +#elif defined(__aarch64__) +#define LUAJIT_TARGET LUAJIT_ARCH_ARM64 +#elif defined(__ppc__) || defined(__ppc) || defined(__PPC__) || defined(__PPC) || defined(__powerpc__) || defined(__powerpc) || defined(__POWERPC__) || defined(__POWERPC) || defined(_M_PPC) +#define LUAJIT_TARGET LUAJIT_ARCH_PPC +#elif defined(__mips__) || defined(__mips) || defined(__MIPS__) || defined(__MIPS) +#define LUAJIT_TARGET LUAJIT_ARCH_MIPS +#else +#error "No support for this architecture (yet)" +#endif + +#endif + +/* Select native OS if no target OS defined. */ +#ifndef LUAJIT_OS + +#if defined(_WIN32) && !defined(_XBOX_VER) +#define LUAJIT_OS LUAJIT_OS_WINDOWS +#elif defined(__linux__) +#define LUAJIT_OS LUAJIT_OS_LINUX +#elif defined(__MACH__) && defined(__APPLE__) +#define LUAJIT_OS LUAJIT_OS_OSX +#elif (defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \ + defined(__NetBSD__) || defined(__OpenBSD__) || \ + defined(__DragonFly__)) && !defined(__ORBIS__) +#define LUAJIT_OS LUAJIT_OS_BSD +#elif (defined(__sun__) && defined(__svr4__)) || defined(__CYGWIN__) +#define LUAJIT_OS LUAJIT_OS_POSIX +#else +#define LUAJIT_OS LUAJIT_OS_OTHER +#endif + +#endif + +/* Set target OS properties. */ +#if LUAJIT_OS == LUAJIT_OS_WINDOWS +#define LJ_OS_NAME "Windows" +#elif LUAJIT_OS == LUAJIT_OS_LINUX +#define LJ_OS_NAME "Linux" +#elif LUAJIT_OS == LUAJIT_OS_OSX +#define LJ_OS_NAME "OSX" +#elif LUAJIT_OS == LUAJIT_OS_BSD +#define LJ_OS_NAME "BSD" +#elif LUAJIT_OS == LUAJIT_OS_POSIX +#define LJ_OS_NAME "POSIX" +#else +#define LJ_OS_NAME "Other" +#endif + +#define LJ_TARGET_WINDOWS (LUAJIT_OS == LUAJIT_OS_WINDOWS) +#define LJ_TARGET_LINUX (LUAJIT_OS == LUAJIT_OS_LINUX) +#define LJ_TARGET_OSX (LUAJIT_OS == LUAJIT_OS_OSX) +#define LJ_TARGET_IOS (LJ_TARGET_OSX && (LUAJIT_TARGET == LUAJIT_ARCH_ARM || LUAJIT_TARGET == LUAJIT_ARCH_ARM64)) +#define LJ_TARGET_POSIX (LUAJIT_OS > LUAJIT_OS_WINDOWS) +#define LJ_TARGET_DLOPEN LJ_TARGET_POSIX + +#ifdef __CELLOS_LV2__ +#define LJ_TARGET_PS3 1 +#define LJ_TARGET_CONSOLE 1 +#endif + +#ifdef __ORBIS__ +#define LJ_TARGET_PS4 1 +#define LJ_TARGET_CONSOLE 1 +#undef NULL +#define NULL ((void*)0) +#endif + +#ifdef __psp2__ +#define LJ_TARGET_PSVITA 1 +#define LJ_TARGET_CONSOLE 1 +#endif + +#if _XBOX_VER >= 200 +#define LJ_TARGET_XBOX360 1 +#define LJ_TARGET_CONSOLE 1 +#endif + +#ifdef _DURANGO +#define LJ_TARGET_XBOXONE 1 +#define LJ_TARGET_CONSOLE 1 +#define LJ_TARGET_GC64 1 +#endif + +#define LJ_NUMMODE_SINGLE 0 /* Single-number mode only. */ +#define LJ_NUMMODE_SINGLE_DUAL 1 /* Default to single-number mode. */ +#define LJ_NUMMODE_DUAL 2 /* Dual-number mode only. */ +#define LJ_NUMMODE_DUAL_SINGLE 3 /* Default to dual-number mode. */ + +/* Set target architecture properties. */ +#if LUAJIT_TARGET == LUAJIT_ARCH_X86 + +#define LJ_ARCH_NAME "x86" +#define LJ_ARCH_BITS 32 +#define LJ_ARCH_ENDIAN LUAJIT_LE +#if LJ_TARGET_WINDOWS || __CYGWIN__ +#define LJ_ABI_WIN 1 +#else +#define LJ_ABI_WIN 0 +#endif +#define LJ_TARGET_X86 1 +#define LJ_TARGET_X86ORX64 1 +#define LJ_TARGET_EHRETREG 0 +#define LJ_TARGET_MASKSHIFT 1 +#define LJ_TARGET_MASKROT 1 +#define LJ_TARGET_UNALIGNED 1 +#define LJ_ARCH_NUMMODE LJ_NUMMODE_SINGLE_DUAL + +#elif LUAJIT_TARGET == LUAJIT_ARCH_X64 + +#define LJ_ARCH_NAME "x64" +#define LJ_ARCH_BITS 64 +#define LJ_ARCH_ENDIAN LUAJIT_LE +#if LJ_TARGET_WINDOWS || __CYGWIN__ +#define LJ_ABI_WIN 1 +#else +#define LJ_ABI_WIN 0 +#endif +#define LJ_TARGET_X64 1 +#define LJ_TARGET_X86ORX64 1 +#define LJ_TARGET_EHRETREG 0 +#define LJ_TARGET_JUMPRANGE 31 /* +-2^31 = +-2GB */ +#define LJ_TARGET_MASKSHIFT 1 +#define LJ_TARGET_MASKROT 1 +#define LJ_TARGET_UNALIGNED 1 +#define LJ_ARCH_NUMMODE LJ_NUMMODE_SINGLE_DUAL +#ifdef LUAJIT_ENABLE_GC64 +#define LJ_TARGET_GC64 1 +#endif + +#elif LUAJIT_TARGET == LUAJIT_ARCH_ARM + +#define LJ_ARCH_NAME "arm" +#define LJ_ARCH_BITS 32 +#define LJ_ARCH_ENDIAN LUAJIT_LE +#if !defined(LJ_ARCH_HASFPU) && __SOFTFP__ +#define LJ_ARCH_HASFPU 0 +#endif +#if !defined(LJ_ABI_SOFTFP) && !__ARM_PCS_VFP +#define LJ_ABI_SOFTFP 1 +#endif +#define LJ_ABI_EABI 1 +#define LJ_TARGET_ARM 1 +#define LJ_TARGET_EHRETREG 0 +#define LJ_TARGET_JUMPRANGE 25 /* +-2^25 = +-32MB */ +#define LJ_TARGET_MASKSHIFT 0 +#define LJ_TARGET_MASKROT 1 +#define LJ_TARGET_UNIFYROT 2 /* Want only IR_BROR. */ +#define LJ_ARCH_NUMMODE LJ_NUMMODE_DUAL + +#if __ARM_ARCH____ARM_ARCH_8__ || __ARM_ARCH_8A__ +#define LJ_ARCH_VERSION 80 +#elif __ARM_ARCH_7__ || __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH_7S__ || __ARM_ARCH_7VE__ +#define LJ_ARCH_VERSION 70 +#elif __ARM_ARCH_6T2__ +#define LJ_ARCH_VERSION 61 +#elif __ARM_ARCH_6__ || __ARM_ARCH_6J__ || __ARM_ARCH_6K__ || __ARM_ARCH_6Z__ || __ARM_ARCH_6ZK__ +#define LJ_ARCH_VERSION 60 +#else +#define LJ_ARCH_VERSION 50 +#endif + +#elif LUAJIT_TARGET == LUAJIT_ARCH_ARM64 + +#define LJ_ARCH_NAME "arm64" +#define LJ_ARCH_BITS 64 +#define LJ_ARCH_ENDIAN LUAJIT_LE +#define LJ_TARGET_ARM64 1 +#define LJ_TARGET_EHRETREG 0 +#define LJ_TARGET_JUMPRANGE 27 /* +-2^27 = +-128MB */ +#define LJ_TARGET_MASKSHIFT 1 +#define LJ_TARGET_MASKROT 1 +#define LJ_TARGET_UNIFYROT 2 /* Want only IR_BROR. */ +#define LJ_TARGET_GC64 1 +#define LJ_ARCH_NUMMODE LJ_NUMMODE_DUAL +#define LJ_ARCH_NOJIT 1 /* NYI */ + +#define LJ_ARCH_VERSION 80 + +#elif LUAJIT_TARGET == LUAJIT_ARCH_PPC + +#ifndef LJ_ARCH_ENDIAN +#if __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__ +#define LJ_ARCH_ENDIAN LUAJIT_LE +#else +#define LJ_ARCH_ENDIAN LUAJIT_BE +#endif +#endif + +#if _LP64 +#define LJ_ARCH_BITS 64 +#if LJ_ARCH_ENDIAN == LUAJIT_LE +#define LJ_ARCH_NAME "ppc64le" +#else +#define LJ_ARCH_NAME "ppc64" +#endif +#else +#define LJ_ARCH_BITS 32 +#define LJ_ARCH_NAME "ppc" +#endif + +#define LJ_TARGET_PPC 1 +#define LJ_TARGET_EHRETREG 3 +#define LJ_TARGET_JUMPRANGE 25 /* +-2^25 = +-32MB */ +#define LJ_TARGET_MASKSHIFT 0 +#define LJ_TARGET_MASKROT 1 +#define LJ_TARGET_UNIFYROT 1 /* Want only IR_BROL. */ +#define LJ_ARCH_NUMMODE LJ_NUMMODE_DUAL_SINGLE + +#if LJ_TARGET_CONSOLE +#define LJ_ARCH_PPC32ON64 1 +#define LJ_ARCH_NOFFI 1 +#elif LJ_ARCH_BITS == 64 +#define LJ_ARCH_PPC64 1 +#define LJ_TARGET_GC64 1 +#define LJ_ARCH_NOJIT 1 /* NYI */ +#endif + +#if _ARCH_PWR7 +#define LJ_ARCH_VERSION 70 +#elif _ARCH_PWR6 +#define LJ_ARCH_VERSION 60 +#elif _ARCH_PWR5X +#define LJ_ARCH_VERSION 51 +#elif _ARCH_PWR5 +#define LJ_ARCH_VERSION 50 +#elif _ARCH_PWR4 +#define LJ_ARCH_VERSION 40 +#else +#define LJ_ARCH_VERSION 0 +#endif +#if _ARCH_PPCSQ +#define LJ_ARCH_SQRT 1 +#endif +#if _ARCH_PWR5X +#define LJ_ARCH_ROUND 1 +#endif +#if __PPU__ +#define LJ_ARCH_CELL 1 +#endif +#if LJ_TARGET_XBOX360 +#define LJ_ARCH_XENON 1 +#endif + +#elif LUAJIT_TARGET == LUAJIT_ARCH_MIPS + +#if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) +#define LJ_ARCH_NAME "mipsel" +#define LJ_ARCH_ENDIAN LUAJIT_LE +#else +#define LJ_ARCH_NAME "mips" +#define LJ_ARCH_ENDIAN LUAJIT_BE +#endif +#define LJ_ARCH_BITS 32 +#define LJ_TARGET_MIPS 1 +#define LJ_TARGET_EHRETREG 4 +#define LJ_TARGET_JUMPRANGE 27 /* 2*2^27 = 256MB-aligned region */ +#define LJ_TARGET_MASKSHIFT 1 +#define LJ_TARGET_MASKROT 1 +#define LJ_TARGET_UNIFYROT 2 /* Want only IR_BROR. */ +#define LJ_ARCH_NUMMODE LJ_NUMMODE_SINGLE + +#if _MIPS_ARCH_MIPS32R2 +#define LJ_ARCH_VERSION 20 +#else +#define LJ_ARCH_VERSION 10 +#endif + +#else +#error "No target architecture defined" +#endif + +#ifndef LJ_PAGESIZE +#define LJ_PAGESIZE 4096 +#endif + +/* Check for minimum required compiler versions. */ +#if defined(__GNUC__) +#if LJ_TARGET_X86 +#if (__GNUC__ < 3) || ((__GNUC__ == 3) && __GNUC_MINOR__ < 4) +#error "Need at least GCC 3.4 or newer" +#endif +#elif LJ_TARGET_X64 +#if __GNUC__ < 4 +#error "Need at least GCC 4.0 or newer" +#endif +#elif LJ_TARGET_ARM || LJ_TARGET_PPC +#if (__GNUC__ < 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ < 2) +#error "Need at least GCC 4.2 or newer" +#endif +#elif LJ_TARGET_ARM64 +#if __clang__ +#if (__clang_major__ < 3) || ((__clang_major__ == 3) && __clang_minor__ < 5) +#error "Need at least Clang 3.5 or newer" +#endif +#else +#if (__GNUC__ < 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ < 8) +#error "Need at least GCC 4.8 or newer" +#endif +#endif +#elif !LJ_TARGET_PS3 +#if (__GNUC__ < 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ < 3) +#error "Need at least GCC 4.3 or newer" +#endif +#endif +#endif + +/* Check target-specific constraints. */ +#ifndef _BUILDVM_H +#if LJ_TARGET_X64 +#if __USING_SJLJ_EXCEPTIONS__ +#error "Need a C compiler with native exception handling on x64" +#endif +#elif LJ_TARGET_ARM +#if defined(__ARMEB__) +#error "No support for big-endian ARM" +#endif +#if __ARM_ARCH_6M__ || __ARM_ARCH_7M__ || __ARM_ARCH_7EM__ +#error "No support for Cortex-M CPUs" +#endif +#if !(__ARM_EABI__ || LJ_TARGET_IOS) +#error "Only ARM EABI or iOS 3.0+ ABI is supported" +#endif +#elif LJ_TARGET_ARM64 +#if defined(__AARCH64EB__) +#error "No support for big-endian ARM64" +#endif +#if defined(_ILP32) +#error "No support for ILP32 model on ARM64" +#endif +#elif LJ_TARGET_PPC +#if defined(_SOFT_FLOAT) || defined(_SOFT_DOUBLE) +#error "No support for PowerPC CPUs without double-precision FPU" +#endif +#if !LJ_ARCH_PPC64 && LJ_ARCH_ENDIAN == LUAJIT_LE +#error "No support for little-endian PPC32" +#endif +#if LJ_ARCH_PPC64 +#error "No support for PowerPC 64 bit mode (yet)" +#endif +#ifdef __NO_FPRS__ +#error "No support for PPC/e500 anymore (use LuaJIT 2.0)" +#endif +#elif LJ_TARGET_MIPS +#if defined(__mips_soft_float) +#error "No support for MIPS CPUs without FPU" +#endif +#if defined(_LP64) +#error "No support for MIPS64" +#endif +#endif +#endif + +/* Enable or disable the dual-number mode for the VM. */ +#if (LJ_ARCH_NUMMODE == LJ_NUMMODE_SINGLE && LUAJIT_NUMMODE == 2) || \ + (LJ_ARCH_NUMMODE == LJ_NUMMODE_DUAL && LUAJIT_NUMMODE == 1) +#error "No support for this number mode on this architecture" +#endif +#if LJ_ARCH_NUMMODE == LJ_NUMMODE_DUAL || \ + (LJ_ARCH_NUMMODE == LJ_NUMMODE_DUAL_SINGLE && LUAJIT_NUMMODE != 1) || \ + (LJ_ARCH_NUMMODE == LJ_NUMMODE_SINGLE_DUAL && LUAJIT_NUMMODE == 2) +#define LJ_DUALNUM 1 +#else +#define LJ_DUALNUM 0 +#endif + +#if LJ_TARGET_IOS || LJ_TARGET_CONSOLE +/* Runtime code generation is restricted on iOS. Complain to Apple, not me. */ +/* Ditto for the consoles. Complain to Sony or MS, not me. */ +#ifndef LUAJIT_ENABLE_JIT +#define LJ_OS_NOJIT 1 +#endif +#endif + +/* 64 bit GC references. */ +#if LJ_TARGET_GC64 +#define LJ_GC64 1 +#else +#define LJ_GC64 0 +#endif + +/* 2-slot frame info. */ +#if LJ_GC64 +#define LJ_FR2 1 +#else +#define LJ_FR2 0 +#endif + +/* Disable or enable the JIT compiler. */ +#if defined(LUAJIT_DISABLE_JIT) || defined(LJ_ARCH_NOJIT) || defined(LJ_OS_NOJIT) || LJ_FR2 || LJ_GC64 +#define LJ_HASJIT 0 +#else +#define LJ_HASJIT 1 +#endif + +/* Disable or enable the FFI extension. */ +#if defined(LUAJIT_DISABLE_FFI) || defined(LJ_ARCH_NOFFI) +#define LJ_HASFFI 0 +#else +#define LJ_HASFFI 1 +#endif + +#if defined(LUAJIT_DISABLE_PROFILE) +#define LJ_HASPROFILE 0 +#elif LJ_TARGET_POSIX +#define LJ_HASPROFILE 1 +#define LJ_PROFILE_SIGPROF 1 +#elif LJ_TARGET_PS3 +#define LJ_HASPROFILE 1 +#define LJ_PROFILE_PTHREAD 1 +#elif LJ_TARGET_WINDOWS || LJ_TARGET_XBOX360 +#define LJ_HASPROFILE 1 +#define LJ_PROFILE_WTHREAD 1 +#else +#define LJ_HASPROFILE 0 +#endif + +#ifndef LJ_ARCH_HASFPU +#define LJ_ARCH_HASFPU 1 +#endif +#ifndef LJ_ABI_SOFTFP +#define LJ_ABI_SOFTFP 0 +#endif +#define LJ_SOFTFP (!LJ_ARCH_HASFPU) + +#if LJ_ARCH_ENDIAN == LUAJIT_BE +#define LJ_LE 0 +#define LJ_BE 1 +#define LJ_ENDIAN_SELECT(le, be) be +#define LJ_ENDIAN_LOHI(lo, hi) hi lo +#else +#define LJ_LE 1 +#define LJ_BE 0 +#define LJ_ENDIAN_SELECT(le, be) le +#define LJ_ENDIAN_LOHI(lo, hi) lo hi +#endif + +#if LJ_ARCH_BITS == 32 +#define LJ_32 1 +#define LJ_64 0 +#else +#define LJ_32 0 +#define LJ_64 1 +#endif + +#ifndef LJ_TARGET_UNALIGNED +#define LJ_TARGET_UNALIGNED 0 +#endif + +/* Various workarounds for embedded operating systems or weak C runtimes. */ +#if (defined(__ANDROID__) && !defined(LJ_TARGET_X86ORX64)) || defined(__symbian__) || LJ_TARGET_XBOX360 || LJ_TARGET_WINDOWS +#define LUAJIT_NO_LOG2 +#endif +#if defined(__symbian__) || LJ_TARGET_WINDOWS +#define LUAJIT_NO_EXP2 +#endif + +#if defined(LUAJIT_NO_UNWIND) || defined(__symbian__) || LJ_TARGET_IOS || LJ_TARGET_PS3 || LJ_TARGET_PS4 +#define LJ_NO_UNWIND 1 +#endif + +/* Compatibility with Lua 5.1 vs. 5.2. */ +#ifdef LUAJIT_ENABLE_LUA52COMPAT +#define LJ_52 1 +#else +#define LJ_52 0 +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_asm.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_asm.c new file mode 100644 index 00000000000..9db950a2bd1 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_asm.c @@ -0,0 +1,2278 @@ +/* +** IR assembler (SSA IR -> machine code). +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_asm_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_gc.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_frame.h" +#if LJ_HASFFI +#include "lj_ctype.h" +#endif +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_ircall.h" +#include "lj_iropt.h" +#include "lj_mcode.h" +#include "lj_iropt.h" +#include "lj_trace.h" +#include "lj_snap.h" +#include "lj_asm.h" +#include "lj_dispatch.h" +#include "lj_vm.h" +#include "lj_target.h" + +#ifdef LUA_USE_ASSERT +#include +#endif + +/* -- Assembler state and common macros ----------------------------------- */ + +/* Assembler state. */ +typedef struct ASMState { + RegCost cost[RID_MAX]; /* Reference and blended allocation cost for regs. */ + + MCode *mcp; /* Current MCode pointer (grows down). */ + MCode *mclim; /* Lower limit for MCode memory + red zone. */ +#ifdef LUA_USE_ASSERT + MCode *mcp_prev; /* Red zone overflow check. */ +#endif + + IRIns *ir; /* Copy of pointer to IR instructions/constants. */ + jit_State *J; /* JIT compiler state. */ + +#if LJ_TARGET_X86ORX64 + x86ModRM mrm; /* Fused x86 address operand. */ +#endif + + RegSet freeset; /* Set of free registers. */ + RegSet modset; /* Set of registers modified inside the loop. */ + RegSet weakset; /* Set of weakly referenced registers. */ + RegSet phiset; /* Set of PHI registers. */ + + uint32_t flags; /* Copy of JIT compiler flags. */ + int loopinv; /* Loop branch inversion (0:no, 1:yes, 2:yes+CC_P). */ + + int32_t evenspill; /* Next even spill slot. */ + int32_t oddspill; /* Next odd spill slot (or 0). */ + + IRRef curins; /* Reference of current instruction. */ + IRRef stopins; /* Stop assembly before hitting this instruction. */ + IRRef orignins; /* Original T->nins. */ + + IRRef snapref; /* Current snapshot is active after this reference. */ + IRRef snaprename; /* Rename highwater mark for snapshot check. */ + SnapNo snapno; /* Current snapshot number. */ + SnapNo loopsnapno; /* Loop snapshot number. */ + + IRRef fuseref; /* Fusion limit (loopref, 0 or FUSE_DISABLED). */ + IRRef sectref; /* Section base reference (loopref or 0). */ + IRRef loopref; /* Reference of LOOP instruction (or 0). */ + + BCReg topslot; /* Number of slots for stack check (unless 0). */ + int32_t gcsteps; /* Accumulated number of GC steps (per section). */ + + GCtrace *T; /* Trace to assemble. */ + GCtrace *parent; /* Parent trace (or NULL). */ + + MCode *mcbot; /* Bottom of reserved MCode. */ + MCode *mctop; /* Top of generated MCode. */ + MCode *mcloop; /* Pointer to loop MCode (or NULL). */ + MCode *invmcp; /* Points to invertible loop branch (or NULL). */ + MCode *flagmcp; /* Pending opportunity to merge flag setting ins. */ + MCode *realign; /* Realign loop if not NULL. */ + +#ifdef RID_NUM_KREF + int32_t krefk[RID_NUM_KREF]; +#endif + IRRef1 phireg[RID_MAX]; /* PHI register references. */ + uint16_t parentmap[LJ_MAX_JSLOTS]; /* Parent instruction to RegSP map. */ +} ASMState; + +#define IR(ref) (&as->ir[(ref)]) + +#define ASMREF_TMP1 REF_TRUE /* Temp. register. */ +#define ASMREF_TMP2 REF_FALSE /* Temp. register. */ +#define ASMREF_L REF_NIL /* Stores register for L. */ + +/* Check for variant to invariant references. */ +#define iscrossref(as, ref) ((ref) < as->sectref) + +/* Inhibit memory op fusion from variant to invariant references. */ +#define FUSE_DISABLED (~(IRRef)0) +#define mayfuse(as, ref) ((ref) > as->fuseref) +#define neverfuse(as) (as->fuseref == FUSE_DISABLED) +#define canfuse(as, ir) (!neverfuse(as) && !irt_isphi((ir)->t)) +#define opisfusableload(o) \ + ((o) == IR_ALOAD || (o) == IR_HLOAD || (o) == IR_ULOAD || \ + (o) == IR_FLOAD || (o) == IR_XLOAD || (o) == IR_SLOAD || (o) == IR_VLOAD) + +/* Sparse limit checks using a red zone before the actual limit. */ +#define MCLIM_REDZONE 64 + +static LJ_NORET LJ_NOINLINE void asm_mclimit(ASMState *as) +{ + lj_mcode_limiterr(as->J, (size_t)(as->mctop - as->mcp + 4*MCLIM_REDZONE)); +} + +static LJ_AINLINE void checkmclim(ASMState *as) +{ +#ifdef LUA_USE_ASSERT + if (as->mcp + MCLIM_REDZONE < as->mcp_prev) { + IRIns *ir = IR(as->curins+1); + fprintf(stderr, "RED ZONE OVERFLOW: %p IR %04d %02d %04d %04d\n", as->mcp, + as->curins+1-REF_BIAS, ir->o, ir->op1-REF_BIAS, ir->op2-REF_BIAS); + lua_assert(0); + } +#endif + if (LJ_UNLIKELY(as->mcp < as->mclim)) asm_mclimit(as); +#ifdef LUA_USE_ASSERT + as->mcp_prev = as->mcp; +#endif +} + +#ifdef RID_NUM_KREF +#define ra_iskref(ref) ((ref) < RID_NUM_KREF) +#define ra_krefreg(ref) ((Reg)(RID_MIN_KREF + (Reg)(ref))) +#define ra_krefk(as, ref) (as->krefk[(ref)]) + +static LJ_AINLINE void ra_setkref(ASMState *as, Reg r, int32_t k) +{ + IRRef ref = (IRRef)(r - RID_MIN_KREF); + as->krefk[ref] = k; + as->cost[r] = REGCOST(ref, ref); +} + +#else +#define ra_iskref(ref) 0 +#define ra_krefreg(ref) RID_MIN_GPR +#define ra_krefk(as, ref) 0 +#endif + +/* Arch-specific field offsets. */ +static const uint8_t field_ofs[IRFL__MAX+1] = { +#define FLOFS(name, ofs) (uint8_t)(ofs), +IRFLDEF(FLOFS) +#undef FLOFS + 0 +}; + +/* -- Target-specific instruction emitter --------------------------------- */ + +#if LJ_TARGET_X86ORX64 +#include "lj_emit_x86.h" +#elif LJ_TARGET_ARM +#include "lj_emit_arm.h" +#elif LJ_TARGET_PPC +#include "lj_emit_ppc.h" +#elif LJ_TARGET_MIPS +#include "lj_emit_mips.h" +#else +#error "Missing instruction emitter for target CPU" +#endif + +/* Generic load/store of register from/to stack slot. */ +#define emit_spload(as, ir, r, ofs) \ + emit_loadofs(as, ir, (r), RID_SP, (ofs)) +#define emit_spstore(as, ir, r, ofs) \ + emit_storeofs(as, ir, (r), RID_SP, (ofs)) + +/* -- Register allocator debugging ---------------------------------------- */ + +/* #define LUAJIT_DEBUG_RA */ + +#ifdef LUAJIT_DEBUG_RA + +#include +#include + +#define RIDNAME(name) #name, +static const char *const ra_regname[] = { + GPRDEF(RIDNAME) + FPRDEF(RIDNAME) + VRIDDEF(RIDNAME) + NULL +}; +#undef RIDNAME + +static char ra_dbg_buf[65536]; +static char *ra_dbg_p; +static char *ra_dbg_merge; +static MCode *ra_dbg_mcp; + +static void ra_dstart(void) +{ + ra_dbg_p = ra_dbg_buf; + ra_dbg_merge = NULL; + ra_dbg_mcp = NULL; +} + +static void ra_dflush(void) +{ + fwrite(ra_dbg_buf, 1, (size_t)(ra_dbg_p-ra_dbg_buf), stdout); + ra_dstart(); +} + +static void ra_dprintf(ASMState *as, const char *fmt, ...) +{ + char *p; + va_list argp; + va_start(argp, fmt); + p = ra_dbg_mcp == as->mcp ? ra_dbg_merge : ra_dbg_p; + ra_dbg_mcp = NULL; + p += sprintf(p, "%08x \e[36m%04d ", (uintptr_t)as->mcp, as->curins-REF_BIAS); + for (;;) { + const char *e = strchr(fmt, '$'); + if (e == NULL) break; + memcpy(p, fmt, (size_t)(e-fmt)); + p += e-fmt; + if (e[1] == 'r') { + Reg r = va_arg(argp, Reg) & RID_MASK; + if (r <= RID_MAX) { + const char *q; + for (q = ra_regname[r]; *q; q++) + *p++ = *q >= 'A' && *q <= 'Z' ? *q + 0x20 : *q; + } else { + *p++ = '?'; + lua_assert(0); + } + } else if (e[1] == 'f' || e[1] == 'i') { + IRRef ref; + if (e[1] == 'f') + ref = va_arg(argp, IRRef); + else + ref = va_arg(argp, IRIns *) - as->ir; + if (ref >= REF_BIAS) + p += sprintf(p, "%04d", ref - REF_BIAS); + else + p += sprintf(p, "K%03d", REF_BIAS - ref); + } else if (e[1] == 's') { + uint32_t slot = va_arg(argp, uint32_t); + p += sprintf(p, "[sp+0x%x]", sps_scale(slot)); + } else if (e[1] == 'x') { + p += sprintf(p, "%08x", va_arg(argp, int32_t)); + } else { + lua_assert(0); + } + fmt = e+2; + } + va_end(argp); + while (*fmt) + *p++ = *fmt++; + *p++ = '\e'; *p++ = '['; *p++ = 'm'; *p++ = '\n'; + if (p > ra_dbg_buf+sizeof(ra_dbg_buf)-256) { + fwrite(ra_dbg_buf, 1, (size_t)(p-ra_dbg_buf), stdout); + p = ra_dbg_buf; + } + ra_dbg_p = p; +} + +#define RA_DBG_START() ra_dstart() +#define RA_DBG_FLUSH() ra_dflush() +#define RA_DBG_REF() \ + do { char *_p = ra_dbg_p; ra_dprintf(as, ""); \ + ra_dbg_merge = _p; ra_dbg_mcp = as->mcp; } while (0) +#define RA_DBGX(x) ra_dprintf x + +#else +#define RA_DBG_START() ((void)0) +#define RA_DBG_FLUSH() ((void)0) +#define RA_DBG_REF() ((void)0) +#define RA_DBGX(x) ((void)0) +#endif + +/* -- Register allocator -------------------------------------------------- */ + +#define ra_free(as, r) rset_set(as->freeset, (r)) +#define ra_modified(as, r) rset_set(as->modset, (r)) +#define ra_weak(as, r) rset_set(as->weakset, (r)) +#define ra_noweak(as, r) rset_clear(as->weakset, (r)) + +#define ra_used(ir) (ra_hasreg((ir)->r) || ra_hasspill((ir)->s)) + +/* Setup register allocator. */ +static void ra_setup(ASMState *as) +{ + Reg r; + /* Initially all regs (except the stack pointer) are free for use. */ + as->freeset = RSET_INIT; + as->modset = RSET_EMPTY; + as->weakset = RSET_EMPTY; + as->phiset = RSET_EMPTY; + memset(as->phireg, 0, sizeof(as->phireg)); + for (r = RID_MIN_GPR; r < RID_MAX; r++) + as->cost[r] = REGCOST(~0u, 0u); +} + +/* Rematerialize constants. */ +static Reg ra_rematk(ASMState *as, IRRef ref) +{ + IRIns *ir; + Reg r; + if (ra_iskref(ref)) { + r = ra_krefreg(ref); + lua_assert(!rset_test(as->freeset, r)); + ra_free(as, r); + ra_modified(as, r); + emit_loadi(as, r, ra_krefk(as, ref)); + return r; + } + ir = IR(ref); + r = ir->r; + lua_assert(ra_hasreg(r) && !ra_hasspill(ir->s)); + ra_free(as, r); + ra_modified(as, r); + ir->r = RID_INIT; /* Do not keep any hint. */ + RA_DBGX((as, "remat $i $r", ir, r)); +#if !LJ_SOFTFP + if (ir->o == IR_KNUM) { + emit_loadn(as, r, ir_knum(ir)); + } else +#endif + if (emit_canremat(REF_BASE) && ir->o == IR_BASE) { + ra_sethint(ir->r, RID_BASE); /* Restore BASE register hint. */ + emit_getgl(as, r, jit_base); + } else if (emit_canremat(ASMREF_L) && ir->o == IR_KPRI) { + lua_assert(irt_isnil(ir->t)); /* REF_NIL stores ASMREF_L register. */ + emit_getgl(as, r, cur_L); +#if LJ_64 + } else if (ir->o == IR_KINT64) { + emit_loadu64(as, r, ir_kint64(ir)->u64); +#endif + } else { + lua_assert(ir->o == IR_KINT || ir->o == IR_KGC || + ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL); + emit_loadi(as, r, ir->i); + } + return r; +} + +/* Force a spill. Allocate a new spill slot if needed. */ +static int32_t ra_spill(ASMState *as, IRIns *ir) +{ + int32_t slot = ir->s; + lua_assert(ir >= as->ir + REF_TRUE); + if (!ra_hasspill(slot)) { + if (irt_is64(ir->t)) { + slot = as->evenspill; + as->evenspill += 2; + } else if (as->oddspill) { + slot = as->oddspill; + as->oddspill = 0; + } else { + slot = as->evenspill; + as->oddspill = slot+1; + as->evenspill += 2; + } + if (as->evenspill > 256) + lj_trace_err(as->J, LJ_TRERR_SPILLOV); + ir->s = (uint8_t)slot; + } + return sps_scale(slot); +} + +/* Release the temporarily allocated register in ASMREF_TMP1/ASMREF_TMP2. */ +static Reg ra_releasetmp(ASMState *as, IRRef ref) +{ + IRIns *ir = IR(ref); + Reg r = ir->r; + lua_assert(ra_hasreg(r) && !ra_hasspill(ir->s)); + ra_free(as, r); + ra_modified(as, r); + ir->r = RID_INIT; + return r; +} + +/* Restore a register (marked as free). Rematerialize or force a spill. */ +static Reg ra_restore(ASMState *as, IRRef ref) +{ + if (emit_canremat(ref)) { + return ra_rematk(as, ref); + } else { + IRIns *ir = IR(ref); + int32_t ofs = ra_spill(as, ir); /* Force a spill slot. */ + Reg r = ir->r; + lua_assert(ra_hasreg(r)); + ra_sethint(ir->r, r); /* Keep hint. */ + ra_free(as, r); + if (!rset_test(as->weakset, r)) { /* Only restore non-weak references. */ + ra_modified(as, r); + RA_DBGX((as, "restore $i $r", ir, r)); + emit_spload(as, ir, r, ofs); + } + return r; + } +} + +/* Save a register to a spill slot. */ +static void ra_save(ASMState *as, IRIns *ir, Reg r) +{ + RA_DBGX((as, "save $i $r", ir, r)); + emit_spstore(as, ir, r, sps_scale(ir->s)); +} + +#define MINCOST(name) \ + if (rset_test(RSET_ALL, RID_##name) && \ + LJ_LIKELY(allow&RID2RSET(RID_##name)) && as->cost[RID_##name] < cost) \ + cost = as->cost[RID_##name]; + +/* Evict the register with the lowest cost, forcing a restore. */ +static Reg ra_evict(ASMState *as, RegSet allow) +{ + IRRef ref; + RegCost cost = ~(RegCost)0; + lua_assert(allow != RSET_EMPTY); + if (RID_NUM_FPR == 0 || allow < RID2RSET(RID_MAX_GPR)) { + GPRDEF(MINCOST) + } else { + FPRDEF(MINCOST) + } + ref = regcost_ref(cost); + lua_assert(ra_iskref(ref) || (ref >= as->T->nk && ref < as->T->nins)); + /* Preferably pick any weak ref instead of a non-weak, non-const ref. */ + if (!irref_isk(ref) && (as->weakset & allow)) { + IRIns *ir = IR(ref); + if (!rset_test(as->weakset, ir->r)) + ref = regcost_ref(as->cost[rset_pickbot((as->weakset & allow))]); + } + return ra_restore(as, ref); +} + +/* Pick any register (marked as free). Evict on-demand. */ +static Reg ra_pick(ASMState *as, RegSet allow) +{ + RegSet pick = as->freeset & allow; + if (!pick) + return ra_evict(as, allow); + else + return rset_picktop(pick); +} + +/* Get a scratch register (marked as free). */ +static Reg ra_scratch(ASMState *as, RegSet allow) +{ + Reg r = ra_pick(as, allow); + ra_modified(as, r); + RA_DBGX((as, "scratch $r", r)); + return r; +} + +/* Evict all registers from a set (if not free). */ +static void ra_evictset(ASMState *as, RegSet drop) +{ + RegSet work; + as->modset |= drop; +#if !LJ_SOFTFP + work = (drop & ~as->freeset) & RSET_FPR; + while (work) { + Reg r = rset_pickbot(work); + ra_restore(as, regcost_ref(as->cost[r])); + rset_clear(work, r); + checkmclim(as); + } +#endif + work = (drop & ~as->freeset); + while (work) { + Reg r = rset_pickbot(work); + ra_restore(as, regcost_ref(as->cost[r])); + rset_clear(work, r); + checkmclim(as); + } +} + +/* Evict (rematerialize) all registers allocated to constants. */ +static void ra_evictk(ASMState *as) +{ + RegSet work; +#if !LJ_SOFTFP + work = ~as->freeset & RSET_FPR; + while (work) { + Reg r = rset_pickbot(work); + IRRef ref = regcost_ref(as->cost[r]); + if (emit_canremat(ref) && irref_isk(ref)) { + ra_rematk(as, ref); + checkmclim(as); + } + rset_clear(work, r); + } +#endif + work = ~as->freeset & RSET_GPR; + while (work) { + Reg r = rset_pickbot(work); + IRRef ref = regcost_ref(as->cost[r]); + if (emit_canremat(ref) && irref_isk(ref)) { + ra_rematk(as, ref); + checkmclim(as); + } + rset_clear(work, r); + } +} + +#ifdef RID_NUM_KREF +/* Allocate a register for a constant. */ +static Reg ra_allock(ASMState *as, int32_t k, RegSet allow) +{ + /* First try to find a register which already holds the same constant. */ + RegSet pick, work = ~as->freeset & RSET_GPR; + Reg r; + while (work) { + IRRef ref; + r = rset_pickbot(work); + ref = regcost_ref(as->cost[r]); + if (ref < ASMREF_L && + k == (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i)) + return r; + rset_clear(work, r); + } + pick = as->freeset & allow; + if (pick) { + /* Constants should preferably get unmodified registers. */ + if ((pick & ~as->modset)) + pick &= ~as->modset; + r = rset_pickbot(pick); /* Reduce conflicts with inverse allocation. */ + } else { + r = ra_evict(as, allow); + } + RA_DBGX((as, "allock $x $r", k, r)); + ra_setkref(as, r, k); + rset_clear(as->freeset, r); + ra_noweak(as, r); + return r; +} + +/* Allocate a specific register for a constant. */ +static void ra_allockreg(ASMState *as, int32_t k, Reg r) +{ + Reg kr = ra_allock(as, k, RID2RSET(r)); + if (kr != r) { + IRIns irdummy; + irdummy.t.irt = IRT_INT; + ra_scratch(as, RID2RSET(r)); + emit_movrr(as, &irdummy, r, kr); + } +} +#else +#define ra_allockreg(as, k, r) emit_loadi(as, (r), (k)) +#endif + +/* Allocate a register for ref from the allowed set of registers. +** Note: this function assumes the ref does NOT have a register yet! +** Picks an optimal register, sets the cost and marks the register as non-free. +*/ +static Reg ra_allocref(ASMState *as, IRRef ref, RegSet allow) +{ + IRIns *ir = IR(ref); + RegSet pick = as->freeset & allow; + Reg r; + lua_assert(ra_noreg(ir->r)); + if (pick) { + /* First check register hint from propagation or PHI. */ + if (ra_hashint(ir->r)) { + r = ra_gethint(ir->r); + if (rset_test(pick, r)) /* Use hint register if possible. */ + goto found; + /* Rematerialization is cheaper than missing a hint. */ + if (rset_test(allow, r) && emit_canremat(regcost_ref(as->cost[r]))) { + ra_rematk(as, regcost_ref(as->cost[r])); + goto found; + } + RA_DBGX((as, "hintmiss $f $r", ref, r)); + } + /* Invariants should preferably get unmodified registers. */ + if (ref < as->loopref && !irt_isphi(ir->t)) { + if ((pick & ~as->modset)) + pick &= ~as->modset; + r = rset_pickbot(pick); /* Reduce conflicts with inverse allocation. */ + } else { + /* We've got plenty of regs, so get callee-save regs if possible. */ + if (RID_NUM_GPR > 8 && (pick & ~RSET_SCRATCH)) + pick &= ~RSET_SCRATCH; + r = rset_picktop(pick); + } + } else { + r = ra_evict(as, allow); + } +found: + RA_DBGX((as, "alloc $f $r", ref, r)); + ir->r = (uint8_t)r; + rset_clear(as->freeset, r); + ra_noweak(as, r); + as->cost[r] = REGCOST_REF_T(ref, irt_t(ir->t)); + return r; +} + +/* Allocate a register on-demand. */ +static Reg ra_alloc1(ASMState *as, IRRef ref, RegSet allow) +{ + Reg r = IR(ref)->r; + /* Note: allow is ignored if the register is already allocated. */ + if (ra_noreg(r)) r = ra_allocref(as, ref, allow); + ra_noweak(as, r); + return r; +} + +/* Rename register allocation and emit move. */ +static void ra_rename(ASMState *as, Reg down, Reg up) +{ + IRRef ren, ref = regcost_ref(as->cost[up] = as->cost[down]); + IRIns *ir = IR(ref); + ir->r = (uint8_t)up; + as->cost[down] = 0; + lua_assert((down < RID_MAX_GPR) == (up < RID_MAX_GPR)); + lua_assert(!rset_test(as->freeset, down) && rset_test(as->freeset, up)); + ra_free(as, down); /* 'down' is free ... */ + ra_modified(as, down); + rset_clear(as->freeset, up); /* ... and 'up' is now allocated. */ + ra_noweak(as, up); + RA_DBGX((as, "rename $f $r $r", regcost_ref(as->cost[up]), down, up)); + emit_movrr(as, ir, down, up); /* Backwards codegen needs inverse move. */ + if (!ra_hasspill(IR(ref)->s)) { /* Add the rename to the IR. */ + lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), ref, as->snapno); + ren = tref_ref(lj_ir_emit(as->J)); + as->ir = as->T->ir; /* The IR may have been reallocated. */ + IR(ren)->r = (uint8_t)down; + IR(ren)->s = SPS_NONE; + } +} + +/* Pick a destination register (marked as free). +** Caveat: allow is ignored if there's already a destination register. +** Use ra_destreg() to get a specific register. +*/ +static Reg ra_dest(ASMState *as, IRIns *ir, RegSet allow) +{ + Reg dest = ir->r; + if (ra_hasreg(dest)) { + ra_free(as, dest); + ra_modified(as, dest); + } else { + if (ra_hashint(dest) && rset_test((as->freeset&allow), ra_gethint(dest))) { + dest = ra_gethint(dest); + ra_modified(as, dest); + RA_DBGX((as, "dest $r", dest)); + } else { + dest = ra_scratch(as, allow); + } + ir->r = dest; + } + if (LJ_UNLIKELY(ra_hasspill(ir->s))) ra_save(as, ir, dest); + return dest; +} + +/* Force a specific destination register (marked as free). */ +static void ra_destreg(ASMState *as, IRIns *ir, Reg r) +{ + Reg dest = ra_dest(as, ir, RID2RSET(r)); + if (dest != r) { + lua_assert(rset_test(as->freeset, r)); + ra_modified(as, r); + emit_movrr(as, ir, dest, r); + } +} + +#if LJ_TARGET_X86ORX64 +/* Propagate dest register to left reference. Emit moves as needed. +** This is a required fixup step for all 2-operand machine instructions. +*/ +static void ra_left(ASMState *as, Reg dest, IRRef lref) +{ + IRIns *ir = IR(lref); + Reg left = ir->r; + if (ra_noreg(left)) { + if (irref_isk(lref)) { + if (ir->o == IR_KNUM) { + cTValue *tv = ir_knum(ir); + /* FP remat needs a load except for +0. Still better than eviction. */ + if (tvispzero(tv) || !(as->freeset & RSET_FPR)) { + emit_loadn(as, dest, tv); + return; + } +#if LJ_64 + } else if (ir->o == IR_KINT64) { + emit_loadu64(as, dest, ir_kint64(ir)->u64); + return; +#endif + } else if (ir->o != IR_KPRI) { + lua_assert(ir->o == IR_KINT || ir->o == IR_KGC || + ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL); + emit_loadi(as, dest, ir->i); + return; + } + } + if (!ra_hashint(left) && !iscrossref(as, lref)) + ra_sethint(ir->r, dest); /* Propagate register hint. */ + left = ra_allocref(as, lref, dest < RID_MAX_GPR ? RSET_GPR : RSET_FPR); + } + ra_noweak(as, left); + /* Move needed for true 3-operand instruction: y=a+b ==> y=a; y+=b. */ + if (dest != left) { + /* Use register renaming if dest is the PHI reg. */ + if (irt_isphi(ir->t) && as->phireg[dest] == lref) { + ra_modified(as, left); + ra_rename(as, left, dest); + } else { + emit_movrr(as, ir, dest, left); + } + } +} +#else +/* Similar to ra_left, except we override any hints. */ +static void ra_leftov(ASMState *as, Reg dest, IRRef lref) +{ + IRIns *ir = IR(lref); + Reg left = ir->r; + if (ra_noreg(left)) { + ra_sethint(ir->r, dest); /* Propagate register hint. */ + left = ra_allocref(as, lref, + (LJ_SOFTFP || dest < RID_MAX_GPR) ? RSET_GPR : RSET_FPR); + } + ra_noweak(as, left); + if (dest != left) { + /* Use register renaming if dest is the PHI reg. */ + if (irt_isphi(ir->t) && as->phireg[dest] == lref) { + ra_modified(as, left); + ra_rename(as, left, dest); + } else { + emit_movrr(as, ir, dest, left); + } + } +} +#endif + +#if !LJ_64 +/* Force a RID_RETLO/RID_RETHI destination register pair (marked as free). */ +static void ra_destpair(ASMState *as, IRIns *ir) +{ + Reg destlo = ir->r, desthi = (ir+1)->r; + /* First spill unrelated refs blocking the destination registers. */ + if (!rset_test(as->freeset, RID_RETLO) && + destlo != RID_RETLO && desthi != RID_RETLO) + ra_restore(as, regcost_ref(as->cost[RID_RETLO])); + if (!rset_test(as->freeset, RID_RETHI) && + destlo != RID_RETHI && desthi != RID_RETHI) + ra_restore(as, regcost_ref(as->cost[RID_RETHI])); + /* Next free the destination registers (if any). */ + if (ra_hasreg(destlo)) { + ra_free(as, destlo); + ra_modified(as, destlo); + } else { + destlo = RID_RETLO; + } + if (ra_hasreg(desthi)) { + ra_free(as, desthi); + ra_modified(as, desthi); + } else { + desthi = RID_RETHI; + } + /* Check for conflicts and shuffle the registers as needed. */ + if (destlo == RID_RETHI) { + if (desthi == RID_RETLO) { +#if LJ_TARGET_X86 + *--as->mcp = XI_XCHGa + RID_RETHI; +#else + emit_movrr(as, ir, RID_RETHI, RID_TMP); + emit_movrr(as, ir, RID_RETLO, RID_RETHI); + emit_movrr(as, ir, RID_TMP, RID_RETLO); +#endif + } else { + emit_movrr(as, ir, RID_RETHI, RID_RETLO); + if (desthi != RID_RETHI) emit_movrr(as, ir, desthi, RID_RETHI); + } + } else if (desthi == RID_RETLO) { + emit_movrr(as, ir, RID_RETLO, RID_RETHI); + if (destlo != RID_RETLO) emit_movrr(as, ir, destlo, RID_RETLO); + } else { + if (desthi != RID_RETHI) emit_movrr(as, ir, desthi, RID_RETHI); + if (destlo != RID_RETLO) emit_movrr(as, ir, destlo, RID_RETLO); + } + /* Restore spill slots (if any). */ + if (ra_hasspill((ir+1)->s)) ra_save(as, ir+1, RID_RETHI); + if (ra_hasspill(ir->s)) ra_save(as, ir, RID_RETLO); +} +#endif + +/* -- Snapshot handling --------- ----------------------------------------- */ + +/* Can we rematerialize a KNUM instead of forcing a spill? */ +static int asm_snap_canremat(ASMState *as) +{ + Reg r; + for (r = RID_MIN_FPR; r < RID_MAX_FPR; r++) + if (irref_isk(regcost_ref(as->cost[r]))) + return 1; + return 0; +} + +/* Check whether a sunk store corresponds to an allocation. */ +static int asm_sunk_store(ASMState *as, IRIns *ira, IRIns *irs) +{ + if (irs->s == 255) { + if (irs->o == IR_ASTORE || irs->o == IR_HSTORE || + irs->o == IR_FSTORE || irs->o == IR_XSTORE) { + IRIns *irk = IR(irs->op1); + if (irk->o == IR_AREF || irk->o == IR_HREFK) + irk = IR(irk->op1); + return (IR(irk->op1) == ira); + } + return 0; + } else { + return (ira + irs->s == irs); /* Quick check. */ + } +} + +/* Allocate register or spill slot for a ref that escapes to a snapshot. */ +static void asm_snap_alloc1(ASMState *as, IRRef ref) +{ + IRIns *ir = IR(ref); + if (!irref_isk(ref) && (!(ra_used(ir) || ir->r == RID_SUNK))) { + if (ir->r == RID_SINK) { + ir->r = RID_SUNK; +#if LJ_HASFFI + if (ir->o == IR_CNEWI) { /* Allocate CNEWI value. */ + asm_snap_alloc1(as, ir->op2); + if (LJ_32 && (ir+1)->o == IR_HIOP) + asm_snap_alloc1(as, (ir+1)->op2); + } else +#endif + { /* Allocate stored values for TNEW, TDUP and CNEW. */ + IRIns *irs; + lua_assert(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW); + for (irs = IR(as->snapref-1); irs > ir; irs--) + if (irs->r == RID_SINK && asm_sunk_store(as, ir, irs)) { + lua_assert(irs->o == IR_ASTORE || irs->o == IR_HSTORE || + irs->o == IR_FSTORE || irs->o == IR_XSTORE); + asm_snap_alloc1(as, irs->op2); + if (LJ_32 && (irs+1)->o == IR_HIOP) + asm_snap_alloc1(as, (irs+1)->op2); + } + } + } else { + RegSet allow; + if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT) { + IRIns *irc; + for (irc = IR(as->curins); irc > ir; irc--) + if ((irc->op1 == ref || irc->op2 == ref) && + !(irc->r == RID_SINK || irc->r == RID_SUNK)) + goto nosink; /* Don't sink conversion if result is used. */ + asm_snap_alloc1(as, ir->op1); + return; + } + nosink: + allow = (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR; + if ((as->freeset & allow) || + (allow == RSET_FPR && asm_snap_canremat(as))) { + /* Get a weak register if we have a free one or can rematerialize. */ + Reg r = ra_allocref(as, ref, allow); /* Allocate a register. */ + if (!irt_isphi(ir->t)) + ra_weak(as, r); /* But mark it as weakly referenced. */ + checkmclim(as); + RA_DBGX((as, "snapreg $f $r", ref, ir->r)); + } else { + ra_spill(as, ir); /* Otherwise force a spill slot. */ + RA_DBGX((as, "snapspill $f $s", ref, ir->s)); + } + } + } +} + +/* Allocate refs escaping to a snapshot. */ +static void asm_snap_alloc(ASMState *as) +{ + SnapShot *snap = &as->T->snap[as->snapno]; + SnapEntry *map = &as->T->snapmap[snap->mapofs]; + MSize n, nent = snap->nent; + for (n = 0; n < nent; n++) { + SnapEntry sn = map[n]; + IRRef ref = snap_ref(sn); + if (!irref_isk(ref)) { + asm_snap_alloc1(as, ref); + if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) { + lua_assert(irt_type(IR(ref+1)->t) == IRT_SOFTFP); + asm_snap_alloc1(as, ref+1); + } + } + } +} + +/* All guards for a snapshot use the same exitno. This is currently the +** same as the snapshot number. Since the exact origin of the exit cannot +** be determined, all guards for the same snapshot must exit with the same +** RegSP mapping. +** A renamed ref which has been used in a prior guard for the same snapshot +** would cause an inconsistency. The easy way out is to force a spill slot. +*/ +static int asm_snap_checkrename(ASMState *as, IRRef ren) +{ + SnapShot *snap = &as->T->snap[as->snapno]; + SnapEntry *map = &as->T->snapmap[snap->mapofs]; + MSize n, nent = snap->nent; + for (n = 0; n < nent; n++) { + SnapEntry sn = map[n]; + IRRef ref = snap_ref(sn); + if (ref == ren || (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM) && ++ref == ren)) { + IRIns *ir = IR(ref); + ra_spill(as, ir); /* Register renamed, so force a spill slot. */ + RA_DBGX((as, "snaprensp $f $s", ref, ir->s)); + return 1; /* Found. */ + } + } + return 0; /* Not found. */ +} + +/* Prepare snapshot for next guard instruction. */ +static void asm_snap_prep(ASMState *as) +{ + if (as->curins < as->snapref) { + do { + if (as->snapno == 0) return; /* Called by sunk stores before snap #0. */ + as->snapno--; + as->snapref = as->T->snap[as->snapno].ref; + } while (as->curins < as->snapref); + asm_snap_alloc(as); + as->snaprename = as->T->nins; + } else { + /* Process any renames above the highwater mark. */ + for (; as->snaprename < as->T->nins; as->snaprename++) { + IRIns *ir = IR(as->snaprename); + if (asm_snap_checkrename(as, ir->op1)) + ir->op2 = REF_BIAS-1; /* Kill rename. */ + } + } +} + +/* -- Miscellaneous helpers ----------------------------------------------- */ + +/* Calculate stack adjustment. */ +static int32_t asm_stack_adjust(ASMState *as) +{ + if (as->evenspill <= SPS_FIXED) + return 0; + return sps_scale(sps_align(as->evenspill)); +} + +/* Must match with hash*() in lj_tab.c. */ +static uint32_t ir_khash(IRIns *ir) +{ + uint32_t lo, hi; + if (irt_isstr(ir->t)) { + return ir_kstr(ir)->hash; + } else if (irt_isnum(ir->t)) { + lo = ir_knum(ir)->u32.lo; + hi = ir_knum(ir)->u32.hi << 1; + } else if (irt_ispri(ir->t)) { + lua_assert(!irt_isnil(ir->t)); + return irt_type(ir->t)-IRT_FALSE; + } else { + lua_assert(irt_isgcv(ir->t)); + lo = u32ptr(ir_kgc(ir)); + hi = lo + HASH_BIAS; + } + return hashrot(lo, hi); +} + +/* -- Allocations --------------------------------------------------------- */ + +static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args); +static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci); + +static void asm_snew(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_new]; + IRRef args[3]; + args[0] = ASMREF_L; /* lua_State *L */ + args[1] = ir->op1; /* const char *str */ + args[2] = ir->op2; /* size_t len */ + as->gcsteps++; + asm_setupresult(as, ir, ci); /* GCstr * */ + asm_gencall(as, ci, args); +} + +static void asm_tnew(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_new1]; + IRRef args[2]; + args[0] = ASMREF_L; /* lua_State *L */ + args[1] = ASMREF_TMP1; /* uint32_t ahsize */ + as->gcsteps++; + asm_setupresult(as, ir, ci); /* GCtab * */ + asm_gencall(as, ci, args); + ra_allockreg(as, ir->op1 | (ir->op2 << 24), ra_releasetmp(as, ASMREF_TMP1)); +} + +static void asm_tdup(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_dup]; + IRRef args[2]; + args[0] = ASMREF_L; /* lua_State *L */ + args[1] = ir->op1; /* const GCtab *kt */ + as->gcsteps++; + asm_setupresult(as, ir, ci); /* GCtab * */ + asm_gencall(as, ci, args); +} + +static void asm_gc_check(ASMState *as); + +/* Explicit GC step. */ +static void asm_gcstep(ASMState *as, IRIns *ir) +{ + IRIns *ira; + for (ira = IR(as->stopins+1); ira < ir; ira++) + if ((ira->o == IR_TNEW || ira->o == IR_TDUP || + (LJ_HASFFI && (ira->o == IR_CNEW || ira->o == IR_CNEWI))) && + ra_used(ira)) + as->gcsteps++; + if (as->gcsteps) + asm_gc_check(as); + as->gcsteps = 0x80000000; /* Prevent implicit GC check further up. */ +} + +/* -- Buffer operations --------------------------------------------------- */ + +static void asm_tvptr(ASMState *as, Reg dest, IRRef ref); + +static void asm_bufhdr(ASMState *as, IRIns *ir) +{ + Reg sb = ra_dest(as, ir, RSET_GPR); + if ((ir->op2 & IRBUFHDR_APPEND)) { + /* Rematerialize const buffer pointer instead of likely spill. */ + IRIns *irp = IR(ir->op1); + if (!(ra_hasreg(irp->r) || irp == ir-1 || + (irp == ir-2 && !ra_used(ir-1)))) { + while (!(irp->o == IR_BUFHDR && !(irp->op2 & IRBUFHDR_APPEND))) + irp = IR(irp->op1); + if (irref_isk(irp->op1)) { + ra_weak(as, ra_allocref(as, ir->op1, RSET_GPR)); + ir = irp; + } + } + } else { + Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, sb)); + /* Passing ir isn't strictly correct, but it's an IRT_P32, too. */ + emit_storeofs(as, ir, tmp, sb, offsetof(SBuf, p)); + emit_loadofs(as, ir, tmp, sb, offsetof(SBuf, b)); + } +#if LJ_TARGET_X86ORX64 + ra_left(as, sb, ir->op1); +#else + ra_leftov(as, sb, ir->op1); +#endif +} + +static void asm_bufput(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_buf_putstr]; + IRRef args[3]; + IRIns *irs; + int kchar = -1; + args[0] = ir->op1; /* SBuf * */ + args[1] = ir->op2; /* GCstr * */ + irs = IR(ir->op2); + lua_assert(irt_isstr(irs->t)); + if (irs->o == IR_KGC) { + GCstr *s = ir_kstr(irs); + if (s->len == 1) { /* Optimize put of single-char string constant. */ + kchar = strdata(s)[0]; + args[1] = ASMREF_TMP1; /* int, truncated to char */ + ci = &lj_ir_callinfo[IRCALL_lj_buf_putchar]; + } + } else if (mayfuse(as, ir->op2) && ra_noreg(irs->r)) { + if (irs->o == IR_TOSTR) { /* Fuse number to string conversions. */ + if (irs->op2 == IRTOSTR_NUM) { + args[1] = ASMREF_TMP1; /* TValue * */ + ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putnum]; + } else { + lua_assert(irt_isinteger(IR(irs->op1)->t)); + args[1] = irs->op1; /* int */ + if (irs->op2 == IRTOSTR_INT) + ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putint]; + else + ci = &lj_ir_callinfo[IRCALL_lj_buf_putchar]; + } + } else if (irs->o == IR_SNEW) { /* Fuse string allocation. */ + args[1] = irs->op1; /* const void * */ + args[2] = irs->op2; /* MSize */ + ci = &lj_ir_callinfo[IRCALL_lj_buf_putmem]; + } + } + asm_setupresult(as, ir, ci); /* SBuf * */ + asm_gencall(as, ci, args); + if (args[1] == ASMREF_TMP1) { + Reg tmp = ra_releasetmp(as, ASMREF_TMP1); + if (kchar == -1) + asm_tvptr(as, tmp, irs->op1); + else + ra_allockreg(as, kchar, tmp); + } +} + +static void asm_bufstr(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_buf_tostr]; + IRRef args[1]; + args[0] = ir->op1; /* SBuf *sb */ + as->gcsteps++; + asm_setupresult(as, ir, ci); /* GCstr * */ + asm_gencall(as, ci, args); +} + +/* -- Type conversions ---------------------------------------------------- */ + +static void asm_tostr(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci; + IRRef args[2]; + args[0] = ASMREF_L; + as->gcsteps++; + if (ir->op2 == IRTOSTR_NUM) { + args[1] = ASMREF_TMP1; /* cTValue * */ + ci = &lj_ir_callinfo[IRCALL_lj_strfmt_num]; + } else { + args[1] = ir->op1; /* int32_t k */ + if (ir->op2 == IRTOSTR_INT) + ci = &lj_ir_callinfo[IRCALL_lj_strfmt_int]; + else + ci = &lj_ir_callinfo[IRCALL_lj_strfmt_char]; + } + asm_setupresult(as, ir, ci); /* GCstr * */ + asm_gencall(as, ci, args); + if (ir->op2 == IRTOSTR_NUM) + asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1); +} + +#if LJ_32 && LJ_HASFFI && !LJ_SOFTFP && !LJ_TARGET_X86 +static void asm_conv64(ASMState *as, IRIns *ir) +{ + IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK); + IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH); + IRCallID id; + IRRef args[2]; + lua_assert((ir-1)->o == IR_CONV && ir->o == IR_HIOP); + args[LJ_BE] = (ir-1)->op1; + args[LJ_LE] = ir->op1; + if (st == IRT_NUM || st == IRT_FLOAT) { + id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64); + ir--; + } else { + id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64); + } + { +#if LJ_TARGET_ARM && !LJ_ABI_SOFTFP + CCallInfo cim = lj_ir_callinfo[id], *ci = &cim; + cim.flags |= CCI_VARARG; /* These calls don't use the hard-float ABI! */ +#else + const CCallInfo *ci = &lj_ir_callinfo[id]; +#endif + asm_setupresult(as, ir, ci); + asm_gencall(as, ci, args); + } +} +#endif + +/* -- Memory references --------------------------------------------------- */ + +static void asm_newref(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey]; + IRRef args[3]; + if (ir->r == RID_SINK) + return; + args[0] = ASMREF_L; /* lua_State *L */ + args[1] = ir->op1; /* GCtab *t */ + args[2] = ASMREF_TMP1; /* cTValue *key */ + asm_setupresult(as, ir, ci); /* TValue * */ + asm_gencall(as, ci, args); + asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2); +} + +static void asm_lref(ASMState *as, IRIns *ir) +{ + Reg r = ra_dest(as, ir, RSET_GPR); +#if LJ_TARGET_X86ORX64 + ra_left(as, r, ASMREF_L); +#else + ra_leftov(as, r, ASMREF_L); +#endif +} + +/* -- Calls --------------------------------------------------------------- */ + +/* Collect arguments from CALL* and CARG instructions. */ +static void asm_collectargs(ASMState *as, IRIns *ir, + const CCallInfo *ci, IRRef *args) +{ + uint32_t n = CCI_XNARGS(ci); + lua_assert(n <= CCI_NARGS_MAX*2); /* Account for split args. */ + if ((ci->flags & CCI_L)) { *args++ = ASMREF_L; n--; } + while (n-- > 1) { + ir = IR(ir->op1); + lua_assert(ir->o == IR_CARG); + args[n] = ir->op2 == REF_NIL ? 0 : ir->op2; + } + args[0] = ir->op1 == REF_NIL ? 0 : ir->op1; + lua_assert(IR(ir->op1)->o != IR_CARG); +} + +/* Reconstruct CCallInfo flags for CALLX*. */ +static uint32_t asm_callx_flags(ASMState *as, IRIns *ir) +{ + uint32_t nargs = 0; + if (ir->op1 != REF_NIL) { /* Count number of arguments first. */ + IRIns *ira = IR(ir->op1); + nargs++; + while (ira->o == IR_CARG) { nargs++; ira = IR(ira->op1); } + } +#if LJ_HASFFI + if (IR(ir->op2)->o == IR_CARG) { /* Copy calling convention info. */ + CTypeID id = (CTypeID)IR(IR(ir->op2)->op2)->i; + CType *ct = ctype_get(ctype_ctsG(J2G(as->J)), id); + nargs |= ((ct->info & CTF_VARARG) ? CCI_VARARG : 0); +#if LJ_TARGET_X86 + nargs |= (ctype_cconv(ct->info) << CCI_CC_SHIFT); +#endif + } +#endif + return (nargs | (ir->t.irt << CCI_OTSHIFT)); +} + +static void asm_callid(ASMState *as, IRIns *ir, IRCallID id) +{ + const CCallInfo *ci = &lj_ir_callinfo[id]; + IRRef args[2]; + args[0] = ir->op1; + args[1] = ir->op2; + asm_setupresult(as, ir, ci); + asm_gencall(as, ci, args); +} + +static void asm_call(ASMState *as, IRIns *ir) +{ + IRRef args[CCI_NARGS_MAX]; + const CCallInfo *ci = &lj_ir_callinfo[ir->op2]; + asm_collectargs(as, ir, ci, args); + asm_setupresult(as, ir, ci); + asm_gencall(as, ci, args); +} + +#if !LJ_SOFTFP +static void asm_fppow(ASMState *as, IRIns *ir, IRRef lref, IRRef rref) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_pow]; + IRRef args[2]; + args[0] = lref; + args[1] = rref; + asm_setupresult(as, ir, ci); + asm_gencall(as, ci, args); +} + +static int asm_fpjoin_pow(ASMState *as, IRIns *ir) +{ + IRIns *irp = IR(ir->op1); + if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) { + IRIns *irpp = IR(irp->op1); + if (irpp == ir-2 && irpp->o == IR_FPMATH && + irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) { + asm_fppow(as, ir, irpp->op1, irp->op2); + return 1; + } + } + return 0; +} +#endif + +/* -- PHI and loop handling ----------------------------------------------- */ + +/* Break a PHI cycle by renaming to a free register (evict if needed). */ +static void asm_phi_break(ASMState *as, RegSet blocked, RegSet blockedby, + RegSet allow) +{ + RegSet candidates = blocked & allow; + if (candidates) { /* If this register file has candidates. */ + /* Note: the set for ra_pick cannot be empty, since each register file + ** has some registers never allocated to PHIs. + */ + Reg down, up = ra_pick(as, ~blocked & allow); /* Get a free register. */ + if (candidates & ~blockedby) /* Optimize shifts, else it's a cycle. */ + candidates = candidates & ~blockedby; + down = rset_picktop(candidates); /* Pick candidate PHI register. */ + ra_rename(as, down, up); /* And rename it to the free register. */ + } +} + +/* PHI register shuffling. +** +** The allocator tries hard to preserve PHI register assignments across +** the loop body. Most of the time this loop does nothing, since there +** are no register mismatches. +** +** If a register mismatch is detected and ... +** - the register is currently free: rename it. +** - the register is blocked by an invariant: restore/remat and rename it. +** - Otherwise the register is used by another PHI, so mark it as blocked. +** +** The renames are order-sensitive, so just retry the loop if a register +** is marked as blocked, but has been freed in the meantime. A cycle is +** detected if all of the blocked registers are allocated. To break the +** cycle rename one of them to a free register and retry. +** +** Note that PHI spill slots are kept in sync and don't need to be shuffled. +*/ +static void asm_phi_shuffle(ASMState *as) +{ + RegSet work; + + /* Find and resolve PHI register mismatches. */ + for (;;) { + RegSet blocked = RSET_EMPTY; + RegSet blockedby = RSET_EMPTY; + RegSet phiset = as->phiset; + while (phiset) { /* Check all left PHI operand registers. */ + Reg r = rset_pickbot(phiset); + IRIns *irl = IR(as->phireg[r]); + Reg left = irl->r; + if (r != left) { /* Mismatch? */ + if (!rset_test(as->freeset, r)) { /* PHI register blocked? */ + IRRef ref = regcost_ref(as->cost[r]); + /* Blocked by other PHI (w/reg)? */ + if (!ra_iskref(ref) && irt_ismarked(IR(ref)->t)) { + rset_set(blocked, r); + if (ra_hasreg(left)) + rset_set(blockedby, left); + left = RID_NONE; + } else { /* Otherwise grab register from invariant. */ + ra_restore(as, ref); + checkmclim(as); + } + } + if (ra_hasreg(left)) { + ra_rename(as, left, r); + checkmclim(as); + } + } + rset_clear(phiset, r); + } + if (!blocked) break; /* Finished. */ + if (!(as->freeset & blocked)) { /* Break cycles if none are free. */ + asm_phi_break(as, blocked, blockedby, RSET_GPR); + if (!LJ_SOFTFP) asm_phi_break(as, blocked, blockedby, RSET_FPR); + checkmclim(as); + } /* Else retry some more renames. */ + } + + /* Restore/remat invariants whose registers are modified inside the loop. */ +#if !LJ_SOFTFP + work = as->modset & ~(as->freeset | as->phiset) & RSET_FPR; + while (work) { + Reg r = rset_pickbot(work); + ra_restore(as, regcost_ref(as->cost[r])); + rset_clear(work, r); + checkmclim(as); + } +#endif + work = as->modset & ~(as->freeset | as->phiset); + while (work) { + Reg r = rset_pickbot(work); + ra_restore(as, regcost_ref(as->cost[r])); + rset_clear(work, r); + checkmclim(as); + } + + /* Allocate and save all unsaved PHI regs and clear marks. */ + work = as->phiset; + while (work) { + Reg r = rset_picktop(work); + IRRef lref = as->phireg[r]; + IRIns *ir = IR(lref); + if (ra_hasspill(ir->s)) { /* Left PHI gained a spill slot? */ + irt_clearmark(ir->t); /* Handled here, so clear marker now. */ + ra_alloc1(as, lref, RID2RSET(r)); + ra_save(as, ir, r); /* Save to spill slot inside the loop. */ + checkmclim(as); + } + rset_clear(work, r); + } +} + +/* Copy unsynced left/right PHI spill slots. Rarely needed. */ +static void asm_phi_copyspill(ASMState *as) +{ + int need = 0; + IRIns *ir; + for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) + if (ra_hasspill(ir->s) && ra_hasspill(IR(ir->op1)->s)) + need |= irt_isfp(ir->t) ? 2 : 1; /* Unsynced spill slot? */ + if ((need & 1)) { /* Copy integer spill slots. */ +#if !LJ_TARGET_X86ORX64 + Reg r = RID_TMP; +#else + Reg r = RID_RET; + if ((as->freeset & RSET_GPR)) + r = rset_pickbot((as->freeset & RSET_GPR)); + else + emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP); +#endif + for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) { + if (ra_hasspill(ir->s)) { + IRIns *irl = IR(ir->op1); + if (ra_hasspill(irl->s) && !irt_isfp(ir->t)) { + emit_spstore(as, irl, r, sps_scale(irl->s)); + emit_spload(as, ir, r, sps_scale(ir->s)); + checkmclim(as); + } + } + } +#if LJ_TARGET_X86ORX64 + if (!rset_test(as->freeset, r)) + emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP); +#endif + } +#if !LJ_SOFTFP + if ((need & 2)) { /* Copy FP spill slots. */ +#if LJ_TARGET_X86 + Reg r = RID_XMM0; +#else + Reg r = RID_FPRET; +#endif + if ((as->freeset & RSET_FPR)) + r = rset_pickbot((as->freeset & RSET_FPR)); + if (!rset_test(as->freeset, r)) + emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP); + for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) { + if (ra_hasspill(ir->s)) { + IRIns *irl = IR(ir->op1); + if (ra_hasspill(irl->s) && irt_isfp(ir->t)) { + emit_spstore(as, irl, r, sps_scale(irl->s)); + emit_spload(as, ir, r, sps_scale(ir->s)); + checkmclim(as); + } + } + } + if (!rset_test(as->freeset, r)) + emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP); + } +#endif +} + +/* Emit renames for left PHIs which are only spilled outside the loop. */ +static void asm_phi_fixup(ASMState *as) +{ + RegSet work = as->phiset; + while (work) { + Reg r = rset_picktop(work); + IRRef lref = as->phireg[r]; + IRIns *ir = IR(lref); + if (irt_ismarked(ir->t)) { + irt_clearmark(ir->t); + /* Left PHI gained a spill slot before the loop? */ + if (ra_hasspill(ir->s)) { + IRRef ren; + lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), lref, as->loopsnapno); + ren = tref_ref(lj_ir_emit(as->J)); + as->ir = as->T->ir; /* The IR may have been reallocated. */ + IR(ren)->r = (uint8_t)r; + IR(ren)->s = SPS_NONE; + } + } + rset_clear(work, r); + } +} + +/* Setup right PHI reference. */ +static void asm_phi(ASMState *as, IRIns *ir) +{ + RegSet allow = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) & + ~as->phiset; + RegSet afree = (as->freeset & allow); + IRIns *irl = IR(ir->op1); + IRIns *irr = IR(ir->op2); + if (ir->r == RID_SINK) /* Sink PHI. */ + return; + /* Spill slot shuffling is not implemented yet (but rarely needed). */ + if (ra_hasspill(irl->s) || ra_hasspill(irr->s)) + lj_trace_err(as->J, LJ_TRERR_NYIPHI); + /* Leave at least one register free for non-PHIs (and PHI cycle breaking). */ + if ((afree & (afree-1))) { /* Two or more free registers? */ + Reg r; + if (ra_noreg(irr->r)) { /* Get a register for the right PHI. */ + r = ra_allocref(as, ir->op2, allow); + } else { /* Duplicate right PHI, need a copy (rare). */ + r = ra_scratch(as, allow); + emit_movrr(as, irr, r, irr->r); + } + ir->r = (uint8_t)r; + rset_set(as->phiset, r); + as->phireg[r] = (IRRef1)ir->op1; + irt_setmark(irl->t); /* Marks left PHIs _with_ register. */ + if (ra_noreg(irl->r)) + ra_sethint(irl->r, r); /* Set register hint for left PHI. */ + } else { /* Otherwise allocate a spill slot. */ + /* This is overly restrictive, but it triggers only on synthetic code. */ + if (ra_hasreg(irl->r) || ra_hasreg(irr->r)) + lj_trace_err(as->J, LJ_TRERR_NYIPHI); + ra_spill(as, ir); + irr->s = ir->s; /* Set right PHI spill slot. Sync left slot later. */ + } +} + +static void asm_loop_fixup(ASMState *as); + +/* Middle part of a loop. */ +static void asm_loop(ASMState *as) +{ + MCode *mcspill; + /* LOOP is a guard, so the snapno is up to date. */ + as->loopsnapno = as->snapno; + if (as->gcsteps) + asm_gc_check(as); + /* LOOP marks the transition from the variant to the invariant part. */ + as->flagmcp = as->invmcp = NULL; + as->sectref = 0; + if (!neverfuse(as)) as->fuseref = 0; + asm_phi_shuffle(as); + mcspill = as->mcp; + asm_phi_copyspill(as); + asm_loop_fixup(as); + as->mcloop = as->mcp; + RA_DBGX((as, "===== LOOP =====")); + if (!as->realign) RA_DBG_FLUSH(); + if (as->mcp != mcspill) + emit_jmp(as, mcspill); +} + +/* -- Target-specific assembler ------------------------------------------- */ + +#if LJ_TARGET_X86ORX64 +#include "lj_asm_x86.h" +#elif LJ_TARGET_ARM +#include "lj_asm_arm.h" +#elif LJ_TARGET_PPC +#include "lj_asm_ppc.h" +#elif LJ_TARGET_MIPS +#include "lj_asm_mips.h" +#else +#error "Missing assembler for target CPU" +#endif + +/* -- Instruction dispatch ------------------------------------------------ */ + +/* Assemble a single instruction. */ +static void asm_ir(ASMState *as, IRIns *ir) +{ + switch ((IROp)ir->o) { + /* Miscellaneous ops. */ + case IR_LOOP: asm_loop(as); break; + case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break; + case IR_USE: + ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break; + case IR_PHI: asm_phi(as, ir); break; + case IR_HIOP: asm_hiop(as, ir); break; + case IR_GCSTEP: asm_gcstep(as, ir); break; + case IR_PROF: asm_prof(as, ir); break; + + /* Guarded assertions. */ + case IR_LT: case IR_GE: case IR_LE: case IR_GT: + case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT: + case IR_ABC: + asm_comp(as, ir); + break; + case IR_EQ: case IR_NE: + if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) { + as->curins--; + asm_href(as, ir-1, (IROp)ir->o); + } else { + asm_equal(as, ir); + } + break; + + case IR_RETF: asm_retf(as, ir); break; + + /* Bit ops. */ + case IR_BNOT: asm_bnot(as, ir); break; + case IR_BSWAP: asm_bswap(as, ir); break; + case IR_BAND: asm_band(as, ir); break; + case IR_BOR: asm_bor(as, ir); break; + case IR_BXOR: asm_bxor(as, ir); break; + case IR_BSHL: asm_bshl(as, ir); break; + case IR_BSHR: asm_bshr(as, ir); break; + case IR_BSAR: asm_bsar(as, ir); break; + case IR_BROL: asm_brol(as, ir); break; + case IR_BROR: asm_bror(as, ir); break; + + /* Arithmetic ops. */ + case IR_ADD: asm_add(as, ir); break; + case IR_SUB: asm_sub(as, ir); break; + case IR_MUL: asm_mul(as, ir); break; + case IR_DIV: asm_div(as, ir); break; + case IR_MOD: asm_mod(as, ir); break; + case IR_POW: asm_pow(as, ir); break; + case IR_NEG: asm_neg(as, ir); break; + case IR_ABS: asm_abs(as, ir); break; + case IR_ATAN2: asm_atan2(as, ir); break; + case IR_LDEXP: asm_ldexp(as, ir); break; + case IR_MIN: asm_min(as, ir); break; + case IR_MAX: asm_max(as, ir); break; + case IR_FPMATH: asm_fpmath(as, ir); break; + + /* Overflow-checking arithmetic ops. */ + case IR_ADDOV: asm_addov(as, ir); break; + case IR_SUBOV: asm_subov(as, ir); break; + case IR_MULOV: asm_mulov(as, ir); break; + + /* Memory references. */ + case IR_AREF: asm_aref(as, ir); break; + case IR_HREF: asm_href(as, ir, 0); break; + case IR_HREFK: asm_hrefk(as, ir); break; + case IR_NEWREF: asm_newref(as, ir); break; + case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break; + case IR_FREF: asm_fref(as, ir); break; + case IR_STRREF: asm_strref(as, ir); break; + case IR_LREF: asm_lref(as, ir); break; + + /* Loads and stores. */ + case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: + asm_ahuvload(as, ir); + break; + case IR_FLOAD: asm_fload(as, ir); break; + case IR_XLOAD: asm_xload(as, ir); break; + case IR_SLOAD: asm_sload(as, ir); break; + + case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break; + case IR_FSTORE: asm_fstore(as, ir); break; + case IR_XSTORE: asm_xstore(as, ir); break; + + /* Allocations. */ + case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break; + case IR_TNEW: asm_tnew(as, ir); break; + case IR_TDUP: asm_tdup(as, ir); break; + case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break; + + /* Buffer operations. */ + case IR_BUFHDR: asm_bufhdr(as, ir); break; + case IR_BUFPUT: asm_bufput(as, ir); break; + case IR_BUFSTR: asm_bufstr(as, ir); break; + + /* Write barriers. */ + case IR_TBAR: asm_tbar(as, ir); break; + case IR_OBAR: asm_obar(as, ir); break; + + /* Type conversions. */ + case IR_TOBIT: asm_tobit(as, ir); break; + case IR_CONV: asm_conv(as, ir); break; + case IR_TOSTR: asm_tostr(as, ir); break; + case IR_STRTO: asm_strto(as, ir); break; + + /* Calls. */ + case IR_CALLA: + as->gcsteps++; + /* fallthrough */ + case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break; + case IR_CALLXS: asm_callx(as, ir); break; + case IR_CARG: break; + + default: + setintV(&as->J->errinfo, ir->o); + lj_trace_err_info(as->J, LJ_TRERR_NYIIR); + break; + } +} + +/* -- Head of trace ------------------------------------------------------- */ + +/* Head of a root trace. */ +static void asm_head_root(ASMState *as) +{ + int32_t spadj; + asm_head_root_base(as); + emit_setvmstate(as, (int32_t)as->T->traceno); + spadj = asm_stack_adjust(as); + as->T->spadjust = (uint16_t)spadj; + emit_spsub(as, spadj); + /* Root traces assume a checked stack for the starting proto. */ + as->T->topslot = gcref(as->T->startpt)->pt.framesize; +} + +/* Head of a side trace. +** +** The current simplistic algorithm requires that all slots inherited +** from the parent are live in a register between pass 2 and pass 3. This +** avoids the complexity of stack slot shuffling. But of course this may +** overflow the register set in some cases and cause the dreaded error: +** "NYI: register coalescing too complex". A refined algorithm is needed. +*/ +static void asm_head_side(ASMState *as) +{ + IRRef1 sloadins[RID_MAX]; + RegSet allow = RSET_ALL; /* Inverse of all coalesced registers. */ + RegSet live = RSET_EMPTY; /* Live parent registers. */ + IRIns *irp = &as->parent->ir[REF_BASE]; /* Parent base. */ + int32_t spadj, spdelta; + int pass2 = 0; + int pass3 = 0; + IRRef i; + + if (as->snapno && as->topslot > as->parent->topslot) { + /* Force snap #0 alloc to prevent register overwrite in stack check. */ + as->snapno = 0; + asm_snap_alloc(as); + } + allow = asm_head_side_base(as, irp, allow); + + /* Scan all parent SLOADs and collect register dependencies. */ + for (i = as->stopins; i > REF_BASE; i--) { + IRIns *ir = IR(i); + RegSP rs; + lua_assert((ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_PARENT)) || + (LJ_SOFTFP && ir->o == IR_HIOP) || ir->o == IR_PVAL); + rs = as->parentmap[i - REF_FIRST]; + if (ra_hasreg(ir->r)) { + rset_clear(allow, ir->r); + if (ra_hasspill(ir->s)) { + ra_save(as, ir, ir->r); + checkmclim(as); + } + } else if (ra_hasspill(ir->s)) { + irt_setmark(ir->t); + pass2 = 1; + } + if (ir->r == rs) { /* Coalesce matching registers right now. */ + ra_free(as, ir->r); + } else if (ra_hasspill(regsp_spill(rs))) { + if (ra_hasreg(ir->r)) + pass3 = 1; + } else if (ra_used(ir)) { + sloadins[rs] = (IRRef1)i; + rset_set(live, rs); /* Block live parent register. */ + } + } + + /* Calculate stack frame adjustment. */ + spadj = asm_stack_adjust(as); + spdelta = spadj - (int32_t)as->parent->spadjust; + if (spdelta < 0) { /* Don't shrink the stack frame. */ + spadj = (int32_t)as->parent->spadjust; + spdelta = 0; + } + as->T->spadjust = (uint16_t)spadj; + + /* Reload spilled target registers. */ + if (pass2) { + for (i = as->stopins; i > REF_BASE; i--) { + IRIns *ir = IR(i); + if (irt_ismarked(ir->t)) { + RegSet mask; + Reg r; + RegSP rs; + irt_clearmark(ir->t); + rs = as->parentmap[i - REF_FIRST]; + if (!ra_hasspill(regsp_spill(rs))) + ra_sethint(ir->r, rs); /* Hint may be gone, set it again. */ + else if (sps_scale(regsp_spill(rs))+spdelta == sps_scale(ir->s)) + continue; /* Same spill slot, do nothing. */ + mask = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) & allow; + if (mask == RSET_EMPTY) + lj_trace_err(as->J, LJ_TRERR_NYICOAL); + r = ra_allocref(as, i, mask); + ra_save(as, ir, r); + rset_clear(allow, r); + if (r == rs) { /* Coalesce matching registers right now. */ + ra_free(as, r); + rset_clear(live, r); + } else if (ra_hasspill(regsp_spill(rs))) { + pass3 = 1; + } + checkmclim(as); + } + } + } + + /* Store trace number and adjust stack frame relative to the parent. */ + emit_setvmstate(as, (int32_t)as->T->traceno); + emit_spsub(as, spdelta); + +#if !LJ_TARGET_X86ORX64 + /* Restore BASE register from parent spill slot. */ + if (ra_hasspill(irp->s)) + emit_spload(as, IR(REF_BASE), IR(REF_BASE)->r, sps_scale(irp->s)); +#endif + + /* Restore target registers from parent spill slots. */ + if (pass3) { + RegSet work = ~as->freeset & RSET_ALL; + while (work) { + Reg r = rset_pickbot(work); + IRRef ref = regcost_ref(as->cost[r]); + RegSP rs = as->parentmap[ref - REF_FIRST]; + rset_clear(work, r); + if (ra_hasspill(regsp_spill(rs))) { + int32_t ofs = sps_scale(regsp_spill(rs)); + ra_free(as, r); + emit_spload(as, IR(ref), r, ofs); + checkmclim(as); + } + } + } + + /* Shuffle registers to match up target regs with parent regs. */ + for (;;) { + RegSet work; + + /* Repeatedly coalesce free live registers by moving to their target. */ + while ((work = as->freeset & live) != RSET_EMPTY) { + Reg rp = rset_pickbot(work); + IRIns *ir = IR(sloadins[rp]); + rset_clear(live, rp); + rset_clear(allow, rp); + ra_free(as, ir->r); + emit_movrr(as, ir, ir->r, rp); + checkmclim(as); + } + + /* We're done if no live registers remain. */ + if (live == RSET_EMPTY) + break; + + /* Break cycles by renaming one target to a temp. register. */ + if (live & RSET_GPR) { + RegSet tmpset = as->freeset & ~live & allow & RSET_GPR; + if (tmpset == RSET_EMPTY) + lj_trace_err(as->J, LJ_TRERR_NYICOAL); + ra_rename(as, rset_pickbot(live & RSET_GPR), rset_pickbot(tmpset)); + } + if (!LJ_SOFTFP && (live & RSET_FPR)) { + RegSet tmpset = as->freeset & ~live & allow & RSET_FPR; + if (tmpset == RSET_EMPTY) + lj_trace_err(as->J, LJ_TRERR_NYICOAL); + ra_rename(as, rset_pickbot(live & RSET_FPR), rset_pickbot(tmpset)); + } + checkmclim(as); + /* Continue with coalescing to fix up the broken cycle(s). */ + } + + /* Inherit top stack slot already checked by parent trace. */ + as->T->topslot = as->parent->topslot; + if (as->topslot > as->T->topslot) { /* Need to check for higher slot? */ +#ifdef EXITSTATE_CHECKEXIT + /* Highest exit + 1 indicates stack check. */ + ExitNo exitno = as->T->nsnap; +#else + /* Reuse the parent exit in the context of the parent trace. */ + ExitNo exitno = as->J->exitno; +#endif + as->T->topslot = (uint8_t)as->topslot; /* Remember for child traces. */ + asm_stack_check(as, as->topslot, irp, allow & RSET_GPR, exitno); + } +} + +/* -- Tail of trace ------------------------------------------------------- */ + +/* Get base slot for a snapshot. */ +static BCReg asm_baseslot(ASMState *as, SnapShot *snap, int *gotframe) +{ + SnapEntry *map = &as->T->snapmap[snap->mapofs]; + MSize n; + for (n = snap->nent; n > 0; n--) { + SnapEntry sn = map[n-1]; + if ((sn & SNAP_FRAME)) { + *gotframe = 1; + return snap_slot(sn); + } + } + return 0; +} + +/* Link to another trace. */ +static void asm_tail_link(ASMState *as) +{ + SnapNo snapno = as->T->nsnap-1; /* Last snapshot. */ + SnapShot *snap = &as->T->snap[snapno]; + int gotframe = 0; + BCReg baseslot = asm_baseslot(as, snap, &gotframe); + + as->topslot = snap->topslot; + checkmclim(as); + ra_allocref(as, REF_BASE, RID2RSET(RID_BASE)); + + if (as->T->link == 0) { + /* Setup fixed registers for exit to interpreter. */ + const BCIns *pc = snap_pc(as->T->snapmap[snap->mapofs + snap->nent]); + int32_t mres; + if (bc_op(*pc) == BC_JLOOP) { /* NYI: find a better way to do this. */ + BCIns *retpc = &traceref(as->J, bc_d(*pc))->startins; + if (bc_isret(bc_op(*retpc))) + pc = retpc; + } + ra_allockreg(as, i32ptr(J2GG(as->J)->dispatch), RID_DISPATCH); + ra_allockreg(as, i32ptr(pc), RID_LPC); + mres = (int32_t)(snap->nslots - baseslot); + switch (bc_op(*pc)) { + case BC_CALLM: case BC_CALLMT: + mres -= (int32_t)(1 + LJ_FR2 + bc_a(*pc) + bc_c(*pc)); break; + case BC_RETM: mres -= (int32_t)(bc_a(*pc) + bc_d(*pc)); break; + case BC_TSETM: mres -= (int32_t)bc_a(*pc); break; + default: if (bc_op(*pc) < BC_FUNCF) mres = 0; break; + } + ra_allockreg(as, mres, RID_RET); /* Return MULTRES or 0. */ + } else if (baseslot) { + /* Save modified BASE for linking to trace with higher start frame. */ + emit_setgl(as, RID_BASE, jit_base); + } + emit_addptr(as, RID_BASE, 8*(int32_t)baseslot); + + /* Sync the interpreter state with the on-trace state. */ + asm_stack_restore(as, snap); + + /* Root traces that add frames need to check the stack at the end. */ + if (!as->parent && gotframe) + asm_stack_check(as, as->topslot, NULL, as->freeset & RSET_GPR, snapno); +} + +/* -- Trace setup --------------------------------------------------------- */ + +/* Clear reg/sp for all instructions and add register hints. */ +static void asm_setup_regsp(ASMState *as) +{ + GCtrace *T = as->T; + int sink = T->sinktags; + IRRef nins = T->nins; + IRIns *ir, *lastir; + int inloop; +#if LJ_TARGET_ARM + uint32_t rload = 0xa6402a64; +#endif + + ra_setup(as); + + /* Clear reg/sp for constants. */ + for (ir = IR(T->nk), lastir = IR(REF_BASE); ir < lastir; ir++) + ir->prev = REGSP_INIT; + + /* REF_BASE is used for implicit references to the BASE register. */ + lastir->prev = REGSP_HINT(RID_BASE); + + ir = IR(nins-1); + if (ir->o == IR_RENAME) { + do { ir--; nins--; } while (ir->o == IR_RENAME); + T->nins = nins; /* Remove any renames left over from ASM restart. */ + } + as->snaprename = nins; + as->snapref = nins; + as->snapno = T->nsnap; + + as->stopins = REF_BASE; + as->orignins = nins; + as->curins = nins; + + /* Setup register hints for parent link instructions. */ + ir = IR(REF_FIRST); + if (as->parent) { + uint16_t *p; + lastir = lj_snap_regspmap(as->parent, as->J->exitno, ir); + if (lastir - ir > LJ_MAX_JSLOTS) + lj_trace_err(as->J, LJ_TRERR_NYICOAL); + as->stopins = (IRRef)((lastir-1) - as->ir); + for (p = as->parentmap; ir < lastir; ir++) { + RegSP rs = ir->prev; + *p++ = (uint16_t)rs; /* Copy original parent RegSP to parentmap. */ + if (!ra_hasspill(regsp_spill(rs))) + ir->prev = (uint16_t)REGSP_HINT(regsp_reg(rs)); + else + ir->prev = REGSP_INIT; + } + } + + inloop = 0; + as->evenspill = SPS_FIRST; + for (lastir = IR(nins); ir < lastir; ir++) { + if (sink) { + if (ir->r == RID_SINK) + continue; + if (ir->r == RID_SUNK) { /* Revert after ASM restart. */ + ir->r = RID_SINK; + continue; + } + } + switch (ir->o) { + case IR_LOOP: + inloop = 1; + break; +#if LJ_TARGET_ARM + case IR_SLOAD: + if (!((ir->op2 & IRSLOAD_TYPECHECK) || (ir+1)->o == IR_HIOP)) + break; + /* fallthrough */ + case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: + if (!LJ_SOFTFP && irt_isnum(ir->t)) break; + ir->prev = (uint16_t)REGSP_HINT((rload & 15)); + rload = lj_ror(rload, 4); + continue; +#endif + case IR_CALLXS: { + CCallInfo ci; + ci.flags = asm_callx_flags(as, ir); + ir->prev = asm_setup_call_slots(as, ir, &ci); + if (inloop) + as->modset |= RSET_SCRATCH; + continue; + } + case IR_CALLN: case IR_CALLA: case IR_CALLL: case IR_CALLS: { + const CCallInfo *ci = &lj_ir_callinfo[ir->op2]; + ir->prev = asm_setup_call_slots(as, ir, ci); + if (inloop) + as->modset |= (ci->flags & CCI_NOFPRCLOBBER) ? + (RSET_SCRATCH & ~RSET_FPR) : RSET_SCRATCH; + continue; + } +#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI) + case IR_HIOP: + switch ((ir-1)->o) { +#if LJ_SOFTFP && LJ_TARGET_ARM + case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: + if (ra_hashint((ir-1)->r)) { + ir->prev = (ir-1)->prev + 1; + continue; + } + break; +#endif +#if !LJ_SOFTFP && LJ_NEED_FP64 + case IR_CONV: + if (irt_isfp((ir-1)->t)) { + ir->prev = REGSP_HINT(RID_FPRET); + continue; + } + /* fallthrough */ +#endif + case IR_CALLN: case IR_CALLXS: +#if LJ_SOFTFP + case IR_MIN: case IR_MAX: +#endif + (ir-1)->prev = REGSP_HINT(RID_RETLO); + ir->prev = REGSP_HINT(RID_RETHI); + continue; + default: + break; + } + break; +#endif +#if LJ_SOFTFP + case IR_MIN: case IR_MAX: + if ((ir+1)->o != IR_HIOP) break; + /* fallthrough */ +#endif + /* C calls evict all scratch regs and return results in RID_RET. */ + case IR_SNEW: case IR_XSNEW: case IR_NEWREF: case IR_BUFPUT: + if (REGARG_NUMGPR < 3 && as->evenspill < 3) + as->evenspill = 3; /* lj_str_new and lj_tab_newkey need 3 args. */ +#if LJ_TARGET_X86 && LJ_HASFFI + if (0) { + case IR_CNEW: + if (ir->op2 != REF_NIL && as->evenspill < 4) + as->evenspill = 4; /* lj_cdata_newv needs 4 args. */ + } +#else + case IR_CNEW: +#endif + case IR_TNEW: case IR_TDUP: case IR_CNEWI: case IR_TOSTR: + case IR_BUFSTR: + ir->prev = REGSP_HINT(RID_RET); + if (inloop) + as->modset = RSET_SCRATCH; + continue; + case IR_STRTO: case IR_OBAR: + if (inloop) + as->modset = RSET_SCRATCH; + break; +#if !LJ_SOFTFP + case IR_ATAN2: +#if LJ_TARGET_X86 + if (as->evenspill < 4) /* Leave room to call atan2(). */ + as->evenspill = 4; +#endif +#if !LJ_TARGET_X86ORX64 + case IR_LDEXP: +#endif +#endif + case IR_POW: + if (!LJ_SOFTFP && irt_isnum(ir->t)) { + if (inloop) + as->modset |= RSET_SCRATCH; +#if LJ_TARGET_X86 + break; +#else + ir->prev = REGSP_HINT(RID_FPRET); + continue; +#endif + } + /* fallthrough for integer POW */ + case IR_DIV: case IR_MOD: + if (!irt_isnum(ir->t)) { + ir->prev = REGSP_HINT(RID_RET); + if (inloop) + as->modset |= (RSET_SCRATCH & RSET_GPR); + continue; + } + break; + case IR_FPMATH: +#if LJ_TARGET_X86ORX64 + if (ir->op2 <= IRFPM_TRUNC) { + if (!(as->flags & JIT_F_SSE4_1)) { + ir->prev = REGSP_HINT(RID_XMM0); + if (inloop) + as->modset |= RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX); + continue; + } + break; + } else if (ir->op2 == IRFPM_EXP2 && !LJ_64) { + if (as->evenspill < 4) /* Leave room to call pow(). */ + as->evenspill = 4; + } +#endif + if (inloop) + as->modset |= RSET_SCRATCH; +#if LJ_TARGET_X86 + break; +#else + ir->prev = REGSP_HINT(RID_FPRET); + continue; +#endif +#if LJ_TARGET_X86ORX64 + /* Non-constant shift counts need to be in RID_ECX on x86/x64. */ + case IR_BSHL: case IR_BSHR: case IR_BSAR: case IR_BROL: case IR_BROR: + if (!irref_isk(ir->op2) && !ra_hashint(IR(ir->op2)->r)) { + IR(ir->op2)->r = REGSP_HINT(RID_ECX); + if (inloop) + rset_set(as->modset, RID_ECX); + } + break; +#endif + /* Do not propagate hints across type conversions or loads. */ + case IR_TOBIT: + case IR_XLOAD: +#if !LJ_TARGET_ARM + case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: +#endif + break; + case IR_CONV: + if (irt_isfp(ir->t) || (ir->op2 & IRCONV_SRCMASK) == IRT_NUM || + (ir->op2 & IRCONV_SRCMASK) == IRT_FLOAT) + break; + /* fallthrough */ + default: + /* Propagate hints across likely 'op reg, imm' or 'op reg'. */ + if (irref_isk(ir->op2) && !irref_isk(ir->op1) && + ra_hashint(regsp_reg(IR(ir->op1)->prev))) { + ir->prev = IR(ir->op1)->prev; + continue; + } + break; + } + ir->prev = REGSP_INIT; + } + if ((as->evenspill & 1)) + as->oddspill = as->evenspill++; + else + as->oddspill = 0; +} + +/* -- Assembler core ------------------------------------------------------ */ + +/* Assemble a trace. */ +void lj_asm_trace(jit_State *J, GCtrace *T) +{ + ASMState as_; + ASMState *as = &as_; + MCode *origtop; + + /* Ensure an initialized instruction beyond the last one for HIOP checks. */ + J->cur.nins = lj_ir_nextins(J); + J->cur.ir[J->cur.nins].o = IR_NOP; + + /* Setup initial state. Copy some fields to reduce indirections. */ + as->J = J; + as->T = T; + as->ir = T->ir; + as->flags = J->flags; + as->loopref = J->loopref; + as->realign = NULL; + as->loopinv = 0; + as->parent = J->parent ? traceref(J, J->parent) : NULL; + + /* Reserve MCode memory. */ + as->mctop = origtop = lj_mcode_reserve(J, &as->mcbot); + as->mcp = as->mctop; + as->mclim = as->mcbot + MCLIM_REDZONE; + asm_setup_target(as); + + do { + as->mcp = as->mctop; +#ifdef LUA_USE_ASSERT + as->mcp_prev = as->mcp; +#endif + as->curins = T->nins; + RA_DBG_START(); + RA_DBGX((as, "===== STOP =====")); + + /* General trace setup. Emit tail of trace. */ + asm_tail_prep(as); + as->mcloop = NULL; + as->flagmcp = NULL; + as->topslot = 0; + as->gcsteps = 0; + as->sectref = as->loopref; + as->fuseref = (as->flags & JIT_F_OPT_FUSE) ? as->loopref : FUSE_DISABLED; + asm_setup_regsp(as); + if (!as->loopref) + asm_tail_link(as); + + /* Assemble a trace in linear backwards order. */ + for (as->curins--; as->curins > as->stopins; as->curins--) { + IRIns *ir = IR(as->curins); + lua_assert(!(LJ_32 && irt_isint64(ir->t))); /* Handled by SPLIT. */ + if (!ra_used(ir) && !ir_sideeff(ir) && (as->flags & JIT_F_OPT_DCE)) + continue; /* Dead-code elimination can be soooo easy. */ + if (irt_isguard(ir->t)) + asm_snap_prep(as); + RA_DBG_REF(); + checkmclim(as); + asm_ir(as, ir); + } + } while (as->realign); /* Retry in case the MCode needs to be realigned. */ + + /* Emit head of trace. */ + RA_DBG_REF(); + checkmclim(as); + if (as->gcsteps > 0) { + as->curins = as->T->snap[0].ref; + asm_snap_prep(as); /* The GC check is a guard. */ + asm_gc_check(as); + } + ra_evictk(as); + if (as->parent) + asm_head_side(as); + else + asm_head_root(as); + asm_phi_fixup(as); + + RA_DBGX((as, "===== START ====")); + RA_DBG_FLUSH(); + if (as->freeset != RSET_ALL) + lj_trace_err(as->J, LJ_TRERR_BADRA); /* Ouch! Should never happen. */ + + /* Set trace entry point before fixing up tail to allow link to self. */ + T->mcode = as->mcp; + T->mcloop = as->mcloop ? (MSize)((char *)as->mcloop - (char *)as->mcp) : 0; + if (!as->loopref) + asm_tail_fixup(as, T->link); /* Note: this may change as->mctop! */ + T->szmcode = (MSize)((char *)as->mctop - (char *)as->mcp); + lj_mcode_sync(T->mcode, origtop); +} + +#undef IR + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_asm.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_asm.h new file mode 100644 index 00000000000..85f29763220 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_asm.h @@ -0,0 +1,17 @@ +/* +** IR assembler (SSA IR -> machine code). +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_ASM_H +#define _LJ_ASM_H + +#include "lj_jit.h" + +#if LJ_HASJIT +LJ_FUNC void lj_asm_trace(jit_State *J, GCtrace *T); +LJ_FUNC void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, + MCode *target); +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_asm_arm.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_asm_arm.h new file mode 100644 index 00000000000..81843caf96a --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_asm_arm.h @@ -0,0 +1,2217 @@ +/* +** ARM IR assembler (SSA IR -> machine code). +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +/* -- Register allocator extensions --------------------------------------- */ + +/* Allocate a register with a hint. */ +static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow) +{ + Reg r = IR(ref)->r; + if (ra_noreg(r)) { + if (!ra_hashint(r) && !iscrossref(as, ref)) + ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */ + r = ra_allocref(as, ref, allow); + } + ra_noweak(as, r); + return r; +} + +/* Allocate a scratch register pair. */ +static Reg ra_scratchpair(ASMState *as, RegSet allow) +{ + RegSet pick1 = as->freeset & allow; + RegSet pick2 = pick1 & (pick1 >> 1) & RSET_GPREVEN; + Reg r; + if (pick2) { + r = rset_picktop(pick2); + } else { + RegSet pick = pick1 & (allow >> 1) & RSET_GPREVEN; + if (pick) { + r = rset_picktop(pick); + ra_restore(as, regcost_ref(as->cost[r+1])); + } else { + pick = pick1 & (allow << 1) & RSET_GPRODD; + if (pick) { + r = ra_restore(as, regcost_ref(as->cost[rset_picktop(pick)-1])); + } else { + r = ra_evict(as, allow & (allow >> 1) & RSET_GPREVEN); + ra_restore(as, regcost_ref(as->cost[r+1])); + } + } + } + lua_assert(rset_test(RSET_GPREVEN, r)); + ra_modified(as, r); + ra_modified(as, r+1); + RA_DBGX((as, "scratchpair $r $r", r, r+1)); + return r; +} + +#if !LJ_SOFTFP +/* Allocate two source registers for three-operand instructions. */ +static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow) +{ + IRIns *irl = IR(ir->op1), *irr = IR(ir->op2); + Reg left = irl->r, right = irr->r; + if (ra_hasreg(left)) { + ra_noweak(as, left); + if (ra_noreg(right)) + right = ra_allocref(as, ir->op2, rset_exclude(allow, left)); + else + ra_noweak(as, right); + } else if (ra_hasreg(right)) { + ra_noweak(as, right); + left = ra_allocref(as, ir->op1, rset_exclude(allow, right)); + } else if (ra_hashint(right)) { + right = ra_allocref(as, ir->op2, allow); + left = ra_alloc1(as, ir->op1, rset_exclude(allow, right)); + } else { + left = ra_allocref(as, ir->op1, allow); + right = ra_alloc1(as, ir->op2, rset_exclude(allow, left)); + } + return left | (right << 8); +} +#endif + +/* -- Guard handling ------------------------------------------------------ */ + +/* Generate an exit stub group at the bottom of the reserved MCode memory. */ +static MCode *asm_exitstub_gen(ASMState *as, ExitNo group) +{ + MCode *mxp = as->mcbot; + int i; + if (mxp + 4*4+4*EXITSTUBS_PER_GROUP >= as->mctop) + asm_mclimit(as); + /* str lr, [sp]; bl ->vm_exit_handler; .long DISPATCH_address, group. */ + *mxp++ = ARMI_STR|ARMI_LS_P|ARMI_LS_U|ARMF_D(RID_LR)|ARMF_N(RID_SP); + *mxp = ARMI_BL|((((MCode *)(void *)lj_vm_exit_handler-mxp)-2)&0x00ffffffu); + mxp++; + *mxp++ = (MCode)i32ptr(J2GG(as->J)->dispatch); /* DISPATCH address */ + *mxp++ = group*EXITSTUBS_PER_GROUP; + for (i = 0; i < EXITSTUBS_PER_GROUP; i++) + *mxp++ = ARMI_B|((-6-i)&0x00ffffffu); + lj_mcode_sync(as->mcbot, mxp); + lj_mcode_commitbot(as->J, mxp); + as->mcbot = mxp; + as->mclim = as->mcbot + MCLIM_REDZONE; + return mxp - EXITSTUBS_PER_GROUP; +} + +/* Setup all needed exit stubs. */ +static void asm_exitstub_setup(ASMState *as, ExitNo nexits) +{ + ExitNo i; + if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR) + lj_trace_err(as->J, LJ_TRERR_SNAPOV); + for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++) + if (as->J->exitstubgroup[i] == NULL) + as->J->exitstubgroup[i] = asm_exitstub_gen(as, i); +} + +/* Emit conditional branch to exit for guard. */ +static void asm_guardcc(ASMState *as, ARMCC cc) +{ + MCode *target = exitstub_addr(as->J, as->snapno); + MCode *p = as->mcp; + if (LJ_UNLIKELY(p == as->invmcp)) { + as->loopinv = 1; + *p = ARMI_BL | ((target-p-2) & 0x00ffffffu); + emit_branch(as, ARMF_CC(ARMI_B, cc^1), p+1); + return; + } + emit_branch(as, ARMF_CC(ARMI_BL, cc), target); +} + +/* -- Operand fusion ------------------------------------------------------ */ + +/* Limit linear search to this distance. Avoids O(n^2) behavior. */ +#define CONFLICT_SEARCH_LIM 31 + +/* Check if there's no conflicting instruction between curins and ref. */ +static int noconflict(ASMState *as, IRRef ref, IROp conflict) +{ + IRIns *ir = as->ir; + IRRef i = as->curins; + if (i > ref + CONFLICT_SEARCH_LIM) + return 0; /* Give up, ref is too far away. */ + while (--i > ref) + if (ir[i].o == conflict) + return 0; /* Conflict found. */ + return 1; /* Ok, no conflict. */ +} + +/* Fuse the array base of colocated arrays. */ +static int32_t asm_fuseabase(ASMState *as, IRRef ref) +{ + IRIns *ir = IR(ref); + if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE && + !neverfuse(as) && noconflict(as, ref, IR_NEWREF)) + return (int32_t)sizeof(GCtab); + return 0; +} + +/* Fuse array/hash/upvalue reference into register+offset operand. */ +static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow, + int lim) +{ + IRIns *ir = IR(ref); + if (ra_noreg(ir->r)) { + if (ir->o == IR_AREF) { + if (mayfuse(as, ref)) { + if (irref_isk(ir->op2)) { + IRRef tab = IR(ir->op1)->op1; + int32_t ofs = asm_fuseabase(as, tab); + IRRef refa = ofs ? tab : ir->op1; + ofs += 8*IR(ir->op2)->i; + if (ofs > -lim && ofs < lim) { + *ofsp = ofs; + return ra_alloc1(as, refa, allow); + } + } + } + } else if (ir->o == IR_HREFK) { + if (mayfuse(as, ref)) { + int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node)); + if (ofs < lim) { + *ofsp = ofs; + return ra_alloc1(as, ir->op1, allow); + } + } + } else if (ir->o == IR_UREFC) { + if (irref_isk(ir->op1)) { + GCfunc *fn = ir_kfunc(IR(ir->op1)); + int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv); + *ofsp = (ofs & 255); /* Mask out less bits to allow LDRD. */ + return ra_allock(as, (ofs & ~255), allow); + } + } + } + *ofsp = 0; + return ra_alloc1(as, ref, allow); +} + +/* Fuse m operand into arithmetic/logic instructions. */ +static uint32_t asm_fuseopm(ASMState *as, ARMIns ai, IRRef ref, RegSet allow) +{ + IRIns *ir = IR(ref); + if (ra_hasreg(ir->r)) { + ra_noweak(as, ir->r); + return ARMF_M(ir->r); + } else if (irref_isk(ref)) { + uint32_t k = emit_isk12(ai, ir->i); + if (k) + return k; + } else if (mayfuse(as, ref)) { + if (ir->o >= IR_BSHL && ir->o <= IR_BROR) { + Reg m = ra_alloc1(as, ir->op1, allow); + ARMShift sh = ir->o == IR_BSHL ? ARMSH_LSL : + ir->o == IR_BSHR ? ARMSH_LSR : + ir->o == IR_BSAR ? ARMSH_ASR : ARMSH_ROR; + if (irref_isk(ir->op2)) { + return m | ARMF_SH(sh, (IR(ir->op2)->i & 31)); + } else { + Reg s = ra_alloc1(as, ir->op2, rset_exclude(allow, m)); + return m | ARMF_RSH(sh, s); + } + } else if (ir->o == IR_ADD && ir->op1 == ir->op2) { + Reg m = ra_alloc1(as, ir->op1, allow); + return m | ARMF_SH(ARMSH_LSL, 1); + } + } + return ra_allocref(as, ref, allow); +} + +/* Fuse shifts into loads/stores. Only bother with BSHL 2 => lsl #2. */ +static IRRef asm_fuselsl2(ASMState *as, IRRef ref) +{ + IRIns *ir = IR(ref); + if (ra_noreg(ir->r) && mayfuse(as, ref) && ir->o == IR_BSHL && + irref_isk(ir->op2) && IR(ir->op2)->i == 2) + return ir->op1; + return 0; /* No fusion. */ +} + +/* Fuse XLOAD/XSTORE reference into load/store operand. */ +static void asm_fusexref(ASMState *as, ARMIns ai, Reg rd, IRRef ref, + RegSet allow, int32_t ofs) +{ + IRIns *ir = IR(ref); + Reg base; + if (ra_noreg(ir->r) && canfuse(as, ir)) { + int32_t lim = (!LJ_SOFTFP && (ai & 0x08000000)) ? 1024 : + (ai & 0x04000000) ? 4096 : 256; + if (ir->o == IR_ADD) { + int32_t ofs2; + if (irref_isk(ir->op2) && + (ofs2 = ofs + IR(ir->op2)->i) > -lim && ofs2 < lim && + (!(!LJ_SOFTFP && (ai & 0x08000000)) || !(ofs2 & 3))) { + ofs = ofs2; + ref = ir->op1; + } else if (ofs == 0 && !(!LJ_SOFTFP && (ai & 0x08000000))) { + IRRef lref = ir->op1, rref = ir->op2; + Reg rn, rm; + if ((ai & 0x04000000)) { + IRRef sref = asm_fuselsl2(as, rref); + if (sref) { + rref = sref; + ai |= ARMF_SH(ARMSH_LSL, 2); + } else if ((sref = asm_fuselsl2(as, lref)) != 0) { + lref = rref; + rref = sref; + ai |= ARMF_SH(ARMSH_LSL, 2); + } + } + rn = ra_alloc1(as, lref, allow); + rm = ra_alloc1(as, rref, rset_exclude(allow, rn)); + if ((ai & 0x04000000)) ai |= ARMI_LS_R; + emit_dnm(as, ai|ARMI_LS_P|ARMI_LS_U, rd, rn, rm); + return; + } + } else if (ir->o == IR_STRREF && !(!LJ_SOFTFP && (ai & 0x08000000))) { + lua_assert(ofs == 0); + ofs = (int32_t)sizeof(GCstr); + if (irref_isk(ir->op2)) { + ofs += IR(ir->op2)->i; + ref = ir->op1; + } else if (irref_isk(ir->op1)) { + ofs += IR(ir->op1)->i; + ref = ir->op2; + } else { + /* NYI: Fuse ADD with constant. */ + Reg rn = ra_alloc1(as, ir->op1, allow); + uint32_t m = asm_fuseopm(as, 0, ir->op2, rset_exclude(allow, rn)); + if ((ai & 0x04000000)) + emit_lso(as, ai, rd, rd, ofs); + else + emit_lsox(as, ai, rd, rd, ofs); + emit_dn(as, ARMI_ADD^m, rd, rn); + return; + } + if (ofs <= -lim || ofs >= lim) { + Reg rn = ra_alloc1(as, ref, allow); + Reg rm = ra_allock(as, ofs, rset_exclude(allow, rn)); + if ((ai & 0x04000000)) ai |= ARMI_LS_R; + emit_dnm(as, ai|ARMI_LS_P|ARMI_LS_U, rd, rn, rm); + return; + } + } + } + base = ra_alloc1(as, ref, allow); +#if !LJ_SOFTFP + if ((ai & 0x08000000)) + emit_vlso(as, ai, rd, base, ofs); + else +#endif + if ((ai & 0x04000000)) + emit_lso(as, ai, rd, base, ofs); + else + emit_lsox(as, ai, rd, base, ofs); +} + +#if !LJ_SOFTFP +/* Fuse to multiply-add/sub instruction. */ +static int asm_fusemadd(ASMState *as, IRIns *ir, ARMIns ai, ARMIns air) +{ + IRRef lref = ir->op1, rref = ir->op2; + IRIns *irm; + if (lref != rref && + ((mayfuse(as, lref) && (irm = IR(lref), irm->o == IR_MUL) && + ra_noreg(irm->r)) || + (mayfuse(as, rref) && (irm = IR(rref), irm->o == IR_MUL) && + (rref = lref, ai = air, ra_noreg(irm->r))))) { + Reg dest = ra_dest(as, ir, RSET_FPR); + Reg add = ra_hintalloc(as, rref, dest, RSET_FPR); + Reg right, left = ra_alloc2(as, irm, + rset_exclude(rset_exclude(RSET_FPR, dest), add)); + right = (left >> 8); left &= 255; + emit_dnm(as, ai, (dest & 15), (left & 15), (right & 15)); + if (dest != add) emit_dm(as, ARMI_VMOV_D, (dest & 15), (add & 15)); + return 1; + } + return 0; +} +#endif + +/* -- Calls --------------------------------------------------------------- */ + +/* Generate a call to a C function. */ +static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) +{ + uint32_t n, nargs = CCI_XNARGS(ci); + int32_t ofs = 0; +#if LJ_SOFTFP + Reg gpr = REGARG_FIRSTGPR; +#else + Reg gpr, fpr = REGARG_FIRSTFPR, fprodd = 0; +#endif + if ((void *)ci->func) + emit_call(as, (void *)ci->func); +#if !LJ_SOFTFP + for (gpr = REGARG_FIRSTGPR; gpr <= REGARG_LASTGPR; gpr++) + as->cost[gpr] = REGCOST(~0u, ASMREF_L); + gpr = REGARG_FIRSTGPR; +#endif + for (n = 0; n < nargs; n++) { /* Setup args. */ + IRRef ref = args[n]; + IRIns *ir = IR(ref); +#if !LJ_SOFTFP + if (ref && irt_isfp(ir->t)) { + RegSet of = as->freeset; + Reg src; + if (!LJ_ABI_SOFTFP && !(ci->flags & CCI_VARARG)) { + if (irt_isnum(ir->t)) { + if (fpr <= REGARG_LASTFPR) { + ra_leftov(as, fpr, ref); + fpr++; + continue; + } + } else if (fprodd) { /* Ick. */ + src = ra_alloc1(as, ref, RSET_FPR); + emit_dm(as, ARMI_VMOV_S, (fprodd & 15), (src & 15) | 0x00400000); + fprodd = 0; + continue; + } else if (fpr <= REGARG_LASTFPR) { + ra_leftov(as, fpr, ref); + fprodd = fpr++; + continue; + } + /* Workaround to protect argument GPRs from being used for remat. */ + as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1); + src = ra_alloc1(as, ref, RSET_FPR); /* May alloc GPR to remat FPR. */ + as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1)); + fprodd = 0; + goto stackfp; + } + /* Workaround to protect argument GPRs from being used for remat. */ + as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1); + src = ra_alloc1(as, ref, RSET_FPR); /* May alloc GPR to remat FPR. */ + as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1)); + if (irt_isnum(ir->t)) gpr = (gpr+1) & ~1u; + if (gpr <= REGARG_LASTGPR) { + lua_assert(rset_test(as->freeset, gpr)); /* Must have been evicted. */ + if (irt_isnum(ir->t)) { + lua_assert(rset_test(as->freeset, gpr+1)); /* Ditto. */ + emit_dnm(as, ARMI_VMOV_RR_D, gpr, gpr+1, (src & 15)); + gpr += 2; + } else { + emit_dn(as, ARMI_VMOV_R_S, gpr, (src & 15)); + gpr++; + } + } else { + stackfp: + if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4; + emit_spstore(as, ir, src, ofs); + ofs += irt_isnum(ir->t) ? 8 : 4; + } + } else +#endif + { + if (gpr <= REGARG_LASTGPR) { + lua_assert(rset_test(as->freeset, gpr)); /* Must have been evicted. */ + if (ref) ra_leftov(as, gpr, ref); + gpr++; + } else { + if (ref) { + Reg r = ra_alloc1(as, ref, RSET_GPR); + emit_spstore(as, ir, r, ofs); + } + ofs += 4; + } + } + } +} + +/* Setup result reg/sp for call. Evict scratch regs. */ +static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) +{ + RegSet drop = RSET_SCRATCH; + int hiop = ((ir+1)->o == IR_HIOP); + if (ra_hasreg(ir->r)) + rset_clear(drop, ir->r); /* Dest reg handled below. */ + if (hiop && ra_hasreg((ir+1)->r)) + rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */ + ra_evictset(as, drop); /* Evictions must be performed first. */ + if (ra_used(ir)) { + lua_assert(!irt_ispri(ir->t)); + if (!LJ_SOFTFP && irt_isfp(ir->t)) { + if (LJ_ABI_SOFTFP || (ci->flags & (CCI_CASTU64|CCI_VARARG))) { + Reg dest = (ra_dest(as, ir, RSET_FPR) & 15); + if (irt_isnum(ir->t)) + emit_dnm(as, ARMI_VMOV_D_RR, RID_RETLO, RID_RETHI, dest); + else + emit_dn(as, ARMI_VMOV_S_R, RID_RET, dest); + } else { + ra_destreg(as, ir, RID_FPRET); + } + } else if (hiop) { + ra_destpair(as, ir); + } else { + ra_destreg(as, ir, RID_RET); + } + } + UNUSED(ci); +} + +static void asm_callx(ASMState *as, IRIns *ir) +{ + IRRef args[CCI_NARGS_MAX*2]; + CCallInfo ci; + IRRef func; + IRIns *irf; + ci.flags = asm_callx_flags(as, ir); + asm_collectargs(as, ir, &ci, args); + asm_setupresult(as, ir, &ci); + func = ir->op2; irf = IR(func); + if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); } + if (irref_isk(func)) { /* Call to constant address. */ + ci.func = (ASMFunction)(void *)(irf->i); + } else { /* Need a non-argument register for indirect calls. */ + Reg freg = ra_alloc1(as, func, RSET_RANGE(RID_R4, RID_R12+1)); + emit_m(as, ARMI_BLXr, freg); + ci.func = (ASMFunction)(void *)0; + } + asm_gencall(as, &ci, args); +} + +/* -- Returns ------------------------------------------------------------- */ + +/* Return to lower frame. Guard that it goes to the right spot. */ +static void asm_retf(ASMState *as, IRIns *ir) +{ + Reg base = ra_alloc1(as, REF_BASE, RSET_GPR); + void *pc = ir_kptr(IR(ir->op2)); + int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1)); + as->topslot -= (BCReg)delta; + if ((int32_t)as->topslot < 0) as->topslot = 0; + irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */ + /* Need to force a spill on REF_BASE now to update the stack slot. */ + emit_lso(as, ARMI_STR, base, RID_SP, ra_spill(as, IR(REF_BASE))); + emit_setgl(as, base, jit_base); + emit_addptr(as, base, -8*delta); + asm_guardcc(as, CC_NE); + emit_nm(as, ARMI_CMP, RID_TMP, + ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base))); + emit_lso(as, ARMI_LDR, RID_TMP, base, -4); +} + +/* -- Type conversions ---------------------------------------------------- */ + +#if !LJ_SOFTFP +static void asm_tointg(ASMState *as, IRIns *ir, Reg left) +{ + Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); + Reg dest = ra_dest(as, ir, RSET_GPR); + asm_guardcc(as, CC_NE); + emit_d(as, ARMI_VMRS, 0); + emit_dm(as, ARMI_VCMP_D, (tmp & 15), (left & 15)); + emit_dm(as, ARMI_VCVT_F64_S32, (tmp & 15), (tmp & 15)); + emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15)); + emit_dm(as, ARMI_VCVT_S32_F64, (tmp & 15), (left & 15)); +} + +static void asm_tobit(ASMState *as, IRIns *ir) +{ + RegSet allow = RSET_FPR; + Reg left = ra_alloc1(as, ir->op1, allow); + Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left)); + Reg tmp = ra_scratch(as, rset_clear(allow, right)); + Reg dest = ra_dest(as, ir, RSET_GPR); + emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15)); + emit_dnm(as, ARMI_VADD_D, (tmp & 15), (left & 15), (right & 15)); +} +#else +#define asm_tobit(as, ir) lua_assert(0) +#endif + +static void asm_conv(ASMState *as, IRIns *ir) +{ + IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); +#if !LJ_SOFTFP + int stfp = (st == IRT_NUM || st == IRT_FLOAT); +#endif + IRRef lref = ir->op1; + /* 64 bit integer conversions are handled by SPLIT. */ + lua_assert(!irt_isint64(ir->t) && !(st == IRT_I64 || st == IRT_U64)); +#if LJ_SOFTFP + /* FP conversions are handled by SPLIT. */ + lua_assert(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT)); + /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */ +#else + lua_assert(irt_type(ir->t) != st); + if (irt_isfp(ir->t)) { + Reg dest = ra_dest(as, ir, RSET_FPR); + if (stfp) { /* FP to FP conversion. */ + emit_dm(as, st == IRT_NUM ? ARMI_VCVT_F32_F64 : ARMI_VCVT_F64_F32, + (dest & 15), (ra_alloc1(as, lref, RSET_FPR) & 15)); + } else { /* Integer to FP conversion. */ + Reg left = ra_alloc1(as, lref, RSET_GPR); + ARMIns ai = irt_isfloat(ir->t) ? + (st == IRT_INT ? ARMI_VCVT_F32_S32 : ARMI_VCVT_F32_U32) : + (st == IRT_INT ? ARMI_VCVT_F64_S32 : ARMI_VCVT_F64_U32); + emit_dm(as, ai, (dest & 15), (dest & 15)); + emit_dn(as, ARMI_VMOV_S_R, left, (dest & 15)); + } + } else if (stfp) { /* FP to integer conversion. */ + if (irt_isguard(ir->t)) { + /* Checked conversions are only supported from number to int. */ + lua_assert(irt_isint(ir->t) && st == IRT_NUM); + asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); + } else { + Reg left = ra_alloc1(as, lref, RSET_FPR); + Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); + Reg dest = ra_dest(as, ir, RSET_GPR); + ARMIns ai; + emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15)); + ai = irt_isint(ir->t) ? + (st == IRT_NUM ? ARMI_VCVT_S32_F64 : ARMI_VCVT_S32_F32) : + (st == IRT_NUM ? ARMI_VCVT_U32_F64 : ARMI_VCVT_U32_F32); + emit_dm(as, ai, (tmp & 15), (left & 15)); + } + } else +#endif + { + Reg dest = ra_dest(as, ir, RSET_GPR); + if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ + Reg left = ra_alloc1(as, lref, RSET_GPR); + lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); + if ((as->flags & JIT_F_ARMV6)) { + ARMIns ai = st == IRT_I8 ? ARMI_SXTB : + st == IRT_U8 ? ARMI_UXTB : + st == IRT_I16 ? ARMI_SXTH : ARMI_UXTH; + emit_dm(as, ai, dest, left); + } else if (st == IRT_U8) { + emit_dn(as, ARMI_AND|ARMI_K12|255, dest, left); + } else { + uint32_t shift = st == IRT_I8 ? 24 : 16; + ARMShift sh = st == IRT_U16 ? ARMSH_LSR : ARMSH_ASR; + emit_dm(as, ARMI_MOV|ARMF_SH(sh, shift), dest, RID_TMP); + emit_dm(as, ARMI_MOV|ARMF_SH(ARMSH_LSL, shift), RID_TMP, left); + } + } else { /* Handle 32/32 bit no-op (cast). */ + ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */ + } + } +} + +static void asm_strto(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num]; + IRRef args[2]; + Reg rlo = 0, rhi = 0, tmp; + int destused = ra_used(ir); + int32_t ofs = 0; + ra_evictset(as, RSET_SCRATCH); +#if LJ_SOFTFP + if (destused) { + if (ra_hasspill(ir->s) && ra_hasspill((ir+1)->s) && + (ir->s & 1) == 0 && ir->s + 1 == (ir+1)->s) { + int i; + for (i = 0; i < 2; i++) { + Reg r = (ir+i)->r; + if (ra_hasreg(r)) { + ra_free(as, r); + ra_modified(as, r); + emit_spload(as, ir+i, r, sps_scale((ir+i)->s)); + } + } + ofs = sps_scale(ir->s); + destused = 0; + } else { + rhi = ra_dest(as, ir+1, RSET_GPR); + rlo = ra_dest(as, ir, rset_exclude(RSET_GPR, rhi)); + } + } + asm_guardcc(as, CC_EQ); + if (destused) { + emit_lso(as, ARMI_LDR, rhi, RID_SP, 4); + emit_lso(as, ARMI_LDR, rlo, RID_SP, 0); + } +#else + UNUSED(rhi); + if (destused) { + if (ra_hasspill(ir->s)) { + ofs = sps_scale(ir->s); + destused = 0; + if (ra_hasreg(ir->r)) { + ra_free(as, ir->r); + ra_modified(as, ir->r); + emit_spload(as, ir, ir->r, ofs); + } + } else { + rlo = ra_dest(as, ir, RSET_FPR); + } + } + asm_guardcc(as, CC_EQ); + if (destused) + emit_vlso(as, ARMI_VLDR_D, rlo, RID_SP, 0); +#endif + emit_n(as, ARMI_CMP|ARMI_K12|0, RID_RET); /* Test return status. */ + args[0] = ir->op1; /* GCstr *str */ + args[1] = ASMREF_TMP1; /* TValue *n */ + asm_gencall(as, ci, args); + tmp = ra_releasetmp(as, ASMREF_TMP1); + if (ofs == 0) + emit_dm(as, ARMI_MOV, tmp, RID_SP); + else + emit_opk(as, ARMI_ADD, tmp, RID_SP, ofs, RSET_GPR); +} + +/* -- Memory references --------------------------------------------------- */ + +/* Get pointer to TValue. */ +static void asm_tvptr(ASMState *as, Reg dest, IRRef ref) +{ + IRIns *ir = IR(ref); + if (irt_isnum(ir->t)) { + if (irref_isk(ref)) { + /* Use the number constant itself as a TValue. */ + ra_allockreg(as, i32ptr(ir_knum(ir)), dest); + } else { +#if LJ_SOFTFP + lua_assert(0); +#else + /* Otherwise force a spill and use the spill slot. */ + emit_opk(as, ARMI_ADD, dest, RID_SP, ra_spill(as, ir), RSET_GPR); +#endif + } + } else { + /* Otherwise use [sp] and [sp+4] to hold the TValue. */ + RegSet allow = rset_exclude(RSET_GPR, dest); + Reg type; + emit_dm(as, ARMI_MOV, dest, RID_SP); + if (!irt_ispri(ir->t)) { + Reg src = ra_alloc1(as, ref, allow); + emit_lso(as, ARMI_STR, src, RID_SP, 0); + } + if (LJ_SOFTFP && (ir+1)->o == IR_HIOP) + type = ra_alloc1(as, ref+1, allow); + else + type = ra_allock(as, irt_toitype(ir->t), allow); + emit_lso(as, ARMI_STR, type, RID_SP, 4); + } +} + +static void asm_aref(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg idx, base; + if (irref_isk(ir->op2)) { + IRRef tab = IR(ir->op1)->op1; + int32_t ofs = asm_fuseabase(as, tab); + IRRef refa = ofs ? tab : ir->op1; + uint32_t k = emit_isk12(ARMI_ADD, ofs + 8*IR(ir->op2)->i); + if (k) { + base = ra_alloc1(as, refa, RSET_GPR); + emit_dn(as, ARMI_ADD^k, dest, base); + return; + } + } + base = ra_alloc1(as, ir->op1, RSET_GPR); + idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base)); + emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 3), dest, base, idx); +} + +/* Inlined hash lookup. Specialized for key type and for const keys. +** The equivalent C code is: +** Node *n = hashkey(t, key); +** do { +** if (lj_obj_equal(&n->key, key)) return &n->val; +** } while ((n = nextnode(n))); +** return niltv(L); +*/ +static void asm_href(ASMState *as, IRIns *ir, IROp merge) +{ + RegSet allow = RSET_GPR; + int destused = ra_used(ir); + Reg dest = ra_dest(as, ir, allow); + Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest)); + Reg key = 0, keyhi = 0, keynumhi = RID_NONE, tmp = RID_TMP; + IRRef refkey = ir->op2; + IRIns *irkey = IR(refkey); + IRType1 kt = irkey->t; + int32_t k = 0, khi = emit_isk12(ARMI_CMP, irt_toitype(kt)); + uint32_t khash; + MCLabel l_end, l_loop; + rset_clear(allow, tab); + if (!irref_isk(refkey) || irt_isstr(kt)) { +#if LJ_SOFTFP + key = ra_alloc1(as, refkey, allow); + rset_clear(allow, key); + if (irkey[1].o == IR_HIOP) { + if (ra_hasreg((irkey+1)->r)) { + keynumhi = (irkey+1)->r; + keyhi = RID_TMP; + ra_noweak(as, keynumhi); + } else { + keyhi = keynumhi = ra_allocref(as, refkey+1, allow); + } + rset_clear(allow, keynumhi); + khi = 0; + } +#else + if (irt_isnum(kt)) { + key = ra_scratch(as, allow); + rset_clear(allow, key); + keyhi = keynumhi = ra_scratch(as, allow); + rset_clear(allow, keyhi); + khi = 0; + } else { + key = ra_alloc1(as, refkey, allow); + rset_clear(allow, key); + } +#endif + } else if (irt_isnum(kt)) { + int32_t val = (int32_t)ir_knum(irkey)->u32.lo; + k = emit_isk12(ARMI_CMP, val); + if (!k) { + key = ra_allock(as, val, allow); + rset_clear(allow, key); + } + val = (int32_t)ir_knum(irkey)->u32.hi; + khi = emit_isk12(ARMI_CMP, val); + if (!khi) { + keyhi = ra_allock(as, val, allow); + rset_clear(allow, keyhi); + } + } else if (!irt_ispri(kt)) { + k = emit_isk12(ARMI_CMP, irkey->i); + if (!k) { + key = ra_alloc1(as, refkey, allow); + rset_clear(allow, key); + } + } + if (!irt_ispri(kt)) + tmp = ra_scratchpair(as, allow); + + /* Key not found in chain: jump to exit (if merged) or load niltv. */ + l_end = emit_label(as); + as->invmcp = NULL; + if (merge == IR_NE) + asm_guardcc(as, CC_AL); + else if (destused) + emit_loada(as, dest, niltvg(J2G(as->J))); + + /* Follow hash chain until the end. */ + l_loop = --as->mcp; + emit_n(as, ARMI_CMP|ARMI_K12|0, dest); + emit_lso(as, ARMI_LDR, dest, dest, (int32_t)offsetof(Node, next)); + + /* Type and value comparison. */ + if (merge == IR_EQ) + asm_guardcc(as, CC_EQ); + else + emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end); + if (!irt_ispri(kt)) { + emit_nm(as, ARMF_CC(ARMI_CMP, CC_EQ)^k, tmp, key); + emit_nm(as, ARMI_CMP^khi, tmp+1, keyhi); + emit_lsox(as, ARMI_LDRD, tmp, dest, (int32_t)offsetof(Node, key)); + } else { + emit_n(as, ARMI_CMP^khi, tmp); + emit_lso(as, ARMI_LDR, tmp, dest, (int32_t)offsetof(Node, key.it)); + } + *l_loop = ARMF_CC(ARMI_B, CC_NE) | ((as->mcp-l_loop-2) & 0x00ffffffu); + + /* Load main position relative to tab->node into dest. */ + khash = irref_isk(refkey) ? ir_khash(irkey) : 1; + if (khash == 0) { + emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node)); + } else { + emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 3), dest, dest, tmp); + emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 1), tmp, tmp, tmp); + if (irt_isstr(kt)) { /* Fetch of str->hash is cheaper than ra_allock. */ + emit_dnm(as, ARMI_AND, tmp, tmp+1, RID_TMP); + emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node)); + emit_lso(as, ARMI_LDR, tmp+1, key, (int32_t)offsetof(GCstr, hash)); + emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask)); + } else if (irref_isk(refkey)) { + emit_opk(as, ARMI_AND, tmp, RID_TMP, (int32_t)khash, + rset_exclude(rset_exclude(RSET_GPR, tab), dest)); + emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node)); + emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask)); + } else { /* Must match with hash*() in lj_tab.c. */ + if (ra_hasreg(keynumhi)) { /* Canonicalize +-0.0 to 0.0. */ + if (keyhi == RID_TMP) + emit_dm(as, ARMF_CC(ARMI_MOV, CC_NE), keyhi, keynumhi); + emit_d(as, ARMF_CC(ARMI_MOV, CC_EQ)|ARMI_K12|0, keyhi); + } + emit_dnm(as, ARMI_AND, tmp, tmp, RID_TMP); + emit_dnm(as, ARMI_SUB|ARMF_SH(ARMSH_ROR, 32-HASH_ROT3), tmp, tmp, tmp+1); + emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node)); + emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_ROR, 32-((HASH_ROT2+HASH_ROT1)&31)), + tmp, tmp+1, tmp); + emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask)); + emit_dnm(as, ARMI_SUB|ARMF_SH(ARMSH_ROR, 32-HASH_ROT1), tmp+1, tmp+1, tmp); + if (ra_hasreg(keynumhi)) { + emit_dnm(as, ARMI_EOR, tmp+1, tmp, key); + emit_dnm(as, ARMI_ORR|ARMI_S, RID_TMP, tmp, key); /* Test for +-0.0. */ + emit_dnm(as, ARMI_ADD, tmp, keynumhi, keynumhi); +#if !LJ_SOFTFP + emit_dnm(as, ARMI_VMOV_RR_D, key, keynumhi, + (ra_alloc1(as, refkey, RSET_FPR) & 15)); +#endif + } else { + emit_dnm(as, ARMI_EOR, tmp+1, tmp, key); + emit_opk(as, ARMI_ADD, tmp, key, (int32_t)HASH_BIAS, + rset_exclude(rset_exclude(RSET_GPR, tab), key)); + } + } + } +} + +static void asm_hrefk(ASMState *as, IRIns *ir) +{ + IRIns *kslot = IR(ir->op2); + IRIns *irkey = IR(kslot->op1); + int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node)); + int32_t kofs = ofs + (int32_t)offsetof(Node, key); + Reg dest = (ra_used(ir) || ofs > 4095) ? ra_dest(as, ir, RSET_GPR) : RID_NONE; + Reg node = ra_alloc1(as, ir->op1, RSET_GPR); + Reg key = RID_NONE, type = RID_TMP, idx = node; + RegSet allow = rset_exclude(RSET_GPR, node); + lua_assert(ofs % sizeof(Node) == 0); + if (ofs > 4095) { + idx = dest; + rset_clear(allow, dest); + kofs = (int32_t)offsetof(Node, key); + } else if (ra_hasreg(dest)) { + emit_opk(as, ARMI_ADD, dest, node, ofs, allow); + } + asm_guardcc(as, CC_NE); + if (!irt_ispri(irkey->t)) { + RegSet even = (as->freeset & allow); + even = even & (even >> 1) & RSET_GPREVEN; + if (even) { + key = ra_scratch(as, even); + if (rset_test(as->freeset, key+1)) { + type = key+1; + ra_modified(as, type); + } + } else { + key = ra_scratch(as, allow); + } + rset_clear(allow, key); + } + rset_clear(allow, type); + if (irt_isnum(irkey->t)) { + emit_opk(as, ARMF_CC(ARMI_CMP, CC_EQ), 0, type, + (int32_t)ir_knum(irkey)->u32.hi, allow); + emit_opk(as, ARMI_CMP, 0, key, + (int32_t)ir_knum(irkey)->u32.lo, allow); + } else { + if (ra_hasreg(key)) + emit_opk(as, ARMF_CC(ARMI_CMP, CC_EQ), 0, key, irkey->i, allow); + emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype(irkey->t), type); + } + emit_lso(as, ARMI_LDR, type, idx, kofs+4); + if (ra_hasreg(key)) emit_lso(as, ARMI_LDR, key, idx, kofs); + if (ofs > 4095) + emit_opk(as, ARMI_ADD, dest, node, ofs, RSET_GPR); +} + +static void asm_uref(ASMState *as, IRIns *ir) +{ + /* NYI: Check that UREFO is still open and not aliasing a slot. */ + Reg dest = ra_dest(as, ir, RSET_GPR); + if (irref_isk(ir->op1)) { + GCfunc *fn = ir_kfunc(IR(ir->op1)); + MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v; + emit_lsptr(as, ARMI_LDR, dest, v); + } else { + Reg uv = ra_scratch(as, RSET_GPR); + Reg func = ra_alloc1(as, ir->op1, RSET_GPR); + if (ir->o == IR_UREFC) { + asm_guardcc(as, CC_NE); + emit_n(as, ARMI_CMP|ARMI_K12|1, RID_TMP); + emit_opk(as, ARMI_ADD, dest, uv, + (int32_t)offsetof(GCupval, tv), RSET_GPR); + emit_lso(as, ARMI_LDRB, RID_TMP, uv, (int32_t)offsetof(GCupval, closed)); + } else { + emit_lso(as, ARMI_LDR, dest, uv, (int32_t)offsetof(GCupval, v)); + } + emit_lso(as, ARMI_LDR, uv, func, + (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8)); + } +} + +static void asm_fref(ASMState *as, IRIns *ir) +{ + UNUSED(as); UNUSED(ir); + lua_assert(!ra_used(ir)); +} + +static void asm_strref(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + IRRef ref = ir->op2, refk = ir->op1; + Reg r; + if (irref_isk(ref)) { + IRRef tmp = refk; refk = ref; ref = tmp; + } else if (!irref_isk(refk)) { + uint32_t k, m = ARMI_K12|sizeof(GCstr); + Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR); + IRIns *irr = IR(ir->op2); + if (ra_hasreg(irr->r)) { + ra_noweak(as, irr->r); + right = irr->r; + } else if (mayfuse(as, irr->op2) && + irr->o == IR_ADD && irref_isk(irr->op2) && + (k = emit_isk12(ARMI_ADD, + (int32_t)sizeof(GCstr) + IR(irr->op2)->i))) { + m = k; + right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left)); + } else { + right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left)); + } + emit_dn(as, ARMI_ADD^m, dest, dest); + emit_dnm(as, ARMI_ADD, dest, left, right); + return; + } + r = ra_alloc1(as, ref, RSET_GPR); + emit_opk(as, ARMI_ADD, dest, r, + sizeof(GCstr) + IR(refk)->i, rset_exclude(RSET_GPR, r)); +} + +/* -- Loads and stores ---------------------------------------------------- */ + +static ARMIns asm_fxloadins(IRIns *ir) +{ + switch (irt_type(ir->t)) { + case IRT_I8: return ARMI_LDRSB; + case IRT_U8: return ARMI_LDRB; + case IRT_I16: return ARMI_LDRSH; + case IRT_U16: return ARMI_LDRH; + case IRT_NUM: lua_assert(!LJ_SOFTFP); return ARMI_VLDR_D; + case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VLDR_S; + default: return ARMI_LDR; + } +} + +static ARMIns asm_fxstoreins(IRIns *ir) +{ + switch (irt_type(ir->t)) { + case IRT_I8: case IRT_U8: return ARMI_STRB; + case IRT_I16: case IRT_U16: return ARMI_STRH; + case IRT_NUM: lua_assert(!LJ_SOFTFP); return ARMI_VSTR_D; + case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VSTR_S; + default: return ARMI_STR; + } +} + +static void asm_fload(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg idx = ra_alloc1(as, ir->op1, RSET_GPR); + ARMIns ai = asm_fxloadins(ir); + int32_t ofs; + if (ir->op2 == IRFL_TAB_ARRAY) { + ofs = asm_fuseabase(as, ir->op1); + if (ofs) { /* Turn the t->array load into an add for colocated arrays. */ + emit_dn(as, ARMI_ADD|ARMI_K12|ofs, dest, idx); + return; + } + } + ofs = field_ofs[ir->op2]; + if ((ai & 0x04000000)) + emit_lso(as, ai, dest, idx, ofs); + else + emit_lsox(as, ai, dest, idx, ofs); +} + +static void asm_fstore(ASMState *as, IRIns *ir) +{ + if (ir->r != RID_SINK) { + Reg src = ra_alloc1(as, ir->op2, RSET_GPR); + IRIns *irf = IR(ir->op1); + Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src)); + int32_t ofs = field_ofs[irf->op2]; + ARMIns ai = asm_fxstoreins(ir); + if ((ai & 0x04000000)) + emit_lso(as, ai, src, idx, ofs); + else + emit_lsox(as, ai, src, idx, ofs); + } +} + +static void asm_xload(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, + (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR); + lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED)); + asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0); +} + +static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs) +{ + if (ir->r != RID_SINK) { + Reg src = ra_alloc1(as, ir->op2, + (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR); + asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1, + rset_exclude(RSET_GPR, src), ofs); + } +} + +#define asm_xstore(as, ir) asm_xstore_(as, ir, 0) + +static void asm_ahuvload(ASMState *as, IRIns *ir) +{ + int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP); + IRType t = hiop ? IRT_NUM : irt_type(ir->t); + Reg dest = RID_NONE, type = RID_NONE, idx; + RegSet allow = RSET_GPR; + int32_t ofs = 0; + if (hiop && ra_used(ir+1)) { + type = ra_dest(as, ir+1, allow); + rset_clear(allow, type); + } + if (ra_used(ir)) { + lua_assert((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) || + irt_isint(ir->t) || irt_isaddr(ir->t)); + dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow); + rset_clear(allow, dest); + } + idx = asm_fuseahuref(as, ir->op1, &ofs, allow, + (!LJ_SOFTFP && t == IRT_NUM) ? 1024 : 4096); + if (!hiop || type == RID_NONE) { + rset_clear(allow, idx); + if (ofs < 256 && ra_hasreg(dest) && (dest & 1) == 0 && + rset_test((as->freeset & allow), dest+1)) { + type = dest+1; + ra_modified(as, type); + } else { + type = RID_TMP; + } + } + asm_guardcc(as, t == IRT_NUM ? CC_HS : CC_NE); + emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype_(t), type); + if (ra_hasreg(dest)) { +#if !LJ_SOFTFP + if (t == IRT_NUM) + emit_vlso(as, ARMI_VLDR_D, dest, idx, ofs); + else +#endif + emit_lso(as, ARMI_LDR, dest, idx, ofs); + } + emit_lso(as, ARMI_LDR, type, idx, ofs+4); +} + +static void asm_ahustore(ASMState *as, IRIns *ir) +{ + if (ir->r != RID_SINK) { + RegSet allow = RSET_GPR; + Reg idx, src = RID_NONE, type = RID_NONE; + int32_t ofs = 0; +#if !LJ_SOFTFP + if (irt_isnum(ir->t)) { + src = ra_alloc1(as, ir->op2, RSET_FPR); + idx = asm_fuseahuref(as, ir->op1, &ofs, allow, 1024); + emit_vlso(as, ARMI_VSTR_D, src, idx, ofs); + } else +#endif + { + int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP); + if (!irt_ispri(ir->t)) { + src = ra_alloc1(as, ir->op2, allow); + rset_clear(allow, src); + } + if (hiop) + type = ra_alloc1(as, (ir+1)->op2, allow); + else + type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow); + idx = asm_fuseahuref(as, ir->op1, &ofs, rset_exclude(allow, type), 4096); + if (ra_hasreg(src)) emit_lso(as, ARMI_STR, src, idx, ofs); + emit_lso(as, ARMI_STR, type, idx, ofs+4); + } + } +} + +static void asm_sload(ASMState *as, IRIns *ir) +{ + int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0); + int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP); + IRType t = hiop ? IRT_NUM : irt_type(ir->t); + Reg dest = RID_NONE, type = RID_NONE, base; + RegSet allow = RSET_GPR; + lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ + lua_assert(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK)); +#if LJ_SOFTFP + lua_assert(!(ir->op2 & IRSLOAD_CONVERT)); /* Handled by LJ_SOFTFP SPLIT. */ + if (hiop && ra_used(ir+1)) { + type = ra_dest(as, ir+1, allow); + rset_clear(allow, type); + } +#else + if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(ir->t) && t == IRT_INT) { + dest = ra_scratch(as, RSET_FPR); + asm_tointg(as, ir, dest); + t = IRT_NUM; /* Continue with a regular number type check. */ + } else +#endif + if (ra_used(ir)) { + Reg tmp = RID_NONE; + if ((ir->op2 & IRSLOAD_CONVERT)) + tmp = ra_scratch(as, t == IRT_INT ? RSET_FPR : RSET_GPR); + lua_assert((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) || + irt_isint(ir->t) || irt_isaddr(ir->t)); + dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow); + rset_clear(allow, dest); + base = ra_alloc1(as, REF_BASE, allow); + if ((ir->op2 & IRSLOAD_CONVERT)) { + if (t == IRT_INT) { + emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15)); + emit_dm(as, ARMI_VCVT_S32_F64, (tmp & 15), (tmp & 15)); + t = IRT_NUM; /* Check for original type. */ + } else { + emit_dm(as, ARMI_VCVT_F64_S32, (dest & 15), (dest & 15)); + emit_dn(as, ARMI_VMOV_S_R, tmp, (dest & 15)); + t = IRT_INT; /* Check for original type. */ + } + dest = tmp; + } + goto dotypecheck; + } + base = ra_alloc1(as, REF_BASE, allow); +dotypecheck: + rset_clear(allow, base); + if ((ir->op2 & IRSLOAD_TYPECHECK)) { + if (ra_noreg(type)) { + if (ofs < 256 && ra_hasreg(dest) && (dest & 1) == 0 && + rset_test((as->freeset & allow), dest+1)) { + type = dest+1; + ra_modified(as, type); + } else { + type = RID_TMP; + } + } + asm_guardcc(as, t == IRT_NUM ? CC_HS : CC_NE); + emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype_(t), type); + } + if (ra_hasreg(dest)) { +#if !LJ_SOFTFP + if (t == IRT_NUM) { + if (ofs < 1024) { + emit_vlso(as, ARMI_VLDR_D, dest, base, ofs); + } else { + if (ra_hasreg(type)) emit_lso(as, ARMI_LDR, type, base, ofs+4); + emit_vlso(as, ARMI_VLDR_D, dest, RID_TMP, 0); + emit_opk(as, ARMI_ADD, RID_TMP, base, ofs, allow); + return; + } + } else +#endif + emit_lso(as, ARMI_LDR, dest, base, ofs); + } + if (ra_hasreg(type)) emit_lso(as, ARMI_LDR, type, base, ofs+4); +} + +/* -- Allocations --------------------------------------------------------- */ + +#if LJ_HASFFI +static void asm_cnew(ASMState *as, IRIns *ir) +{ + CTState *cts = ctype_ctsG(J2G(as->J)); + CTypeID id = (CTypeID)IR(ir->op1)->i; + CTSize sz; + CTInfo info = lj_ctype_info(cts, id, &sz); + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; + IRRef args[4]; + RegSet allow = (RSET_GPR & ~RSET_SCRATCH); + RegSet drop = RSET_SCRATCH; + lua_assert(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL)); + + as->gcsteps++; + if (ra_hasreg(ir->r)) + rset_clear(drop, ir->r); /* Dest reg handled below. */ + ra_evictset(as, drop); + if (ra_used(ir)) + ra_destreg(as, ir, RID_RET); /* GCcdata * */ + + /* Initialize immutable cdata object. */ + if (ir->o == IR_CNEWI) { + int32_t ofs = sizeof(GCcdata); + lua_assert(sz == 4 || sz == 8); + if (sz == 8) { + ofs += 4; ir++; + lua_assert(ir->o == IR_HIOP); + } + for (;;) { + Reg r = ra_alloc1(as, ir->op2, allow); + emit_lso(as, ARMI_STR, r, RID_RET, ofs); + rset_clear(allow, r); + if (ofs == sizeof(GCcdata)) break; + ofs -= 4; ir--; + } + } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */ + ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv]; + args[0] = ASMREF_L; /* lua_State *L */ + args[1] = ir->op1; /* CTypeID id */ + args[2] = ir->op2; /* CTSize sz */ + args[3] = ASMREF_TMP1; /* CTSize align */ + asm_gencall(as, ci, args); + emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info)); + return; + } + + /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */ + { + uint32_t k = emit_isk12(ARMI_MOV, id); + Reg r = k ? RID_R1 : ra_allock(as, id, allow); + emit_lso(as, ARMI_STRB, RID_TMP, RID_RET, offsetof(GCcdata, gct)); + emit_lsox(as, ARMI_STRH, r, RID_RET, offsetof(GCcdata, ctypeid)); + emit_d(as, ARMI_MOV|ARMI_K12|~LJ_TCDATA, RID_TMP); + if (k) emit_d(as, ARMI_MOV^k, RID_R1); + } + args[0] = ASMREF_L; /* lua_State *L */ + args[1] = ASMREF_TMP1; /* MSize size */ + asm_gencall(as, ci, args); + ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)), + ra_releasetmp(as, ASMREF_TMP1)); +} +#else +#define asm_cnew(as, ir) ((void)0) +#endif + +/* -- Write barriers ------------------------------------------------------ */ + +static void asm_tbar(ASMState *as, IRIns *ir) +{ + Reg tab = ra_alloc1(as, ir->op1, RSET_GPR); + Reg link = ra_scratch(as, rset_exclude(RSET_GPR, tab)); + Reg gr = ra_allock(as, i32ptr(J2G(as->J)), + rset_exclude(rset_exclude(RSET_GPR, tab), link)); + Reg mark = RID_TMP; + MCLabel l_end = emit_label(as); + emit_lso(as, ARMI_STR, link, tab, (int32_t)offsetof(GCtab, gclist)); + emit_lso(as, ARMI_STRB, mark, tab, (int32_t)offsetof(GCtab, marked)); + emit_lso(as, ARMI_STR, tab, gr, + (int32_t)offsetof(global_State, gc.grayagain)); + emit_dn(as, ARMI_BIC|ARMI_K12|LJ_GC_BLACK, mark, mark); + emit_lso(as, ARMI_LDR, link, gr, + (int32_t)offsetof(global_State, gc.grayagain)); + emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end); + emit_n(as, ARMI_TST|ARMI_K12|LJ_GC_BLACK, mark); + emit_lso(as, ARMI_LDRB, mark, tab, (int32_t)offsetof(GCtab, marked)); +} + +static void asm_obar(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv]; + IRRef args[2]; + MCLabel l_end; + Reg obj, val, tmp; + /* No need for other object barriers (yet). */ + lua_assert(IR(ir->op1)->o == IR_UREFC); + ra_evictset(as, RSET_SCRATCH); + l_end = emit_label(as); + args[0] = ASMREF_TMP1; /* global_State *g */ + args[1] = ir->op1; /* TValue *tv */ + asm_gencall(as, ci, args); + if ((l_end[-1] >> 28) == CC_AL) + l_end[-1] = ARMF_CC(l_end[-1], CC_NE); + else + emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end); + ra_allockreg(as, i32ptr(J2G(as->J)), ra_releasetmp(as, ASMREF_TMP1)); + obj = IR(ir->op1)->r; + tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj)); + emit_n(as, ARMF_CC(ARMI_TST, CC_NE)|ARMI_K12|LJ_GC_BLACK, tmp); + emit_n(as, ARMI_TST|ARMI_K12|LJ_GC_WHITES, RID_TMP); + val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj)); + emit_lso(as, ARMI_LDRB, tmp, obj, + (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)); + emit_lso(as, ARMI_LDRB, RID_TMP, val, (int32_t)offsetof(GChead, marked)); +} + +/* -- Arithmetic and logic operations ------------------------------------- */ + +#if !LJ_SOFTFP +static void asm_fparith(ASMState *as, IRIns *ir, ARMIns ai) +{ + Reg dest = ra_dest(as, ir, RSET_FPR); + Reg right, left = ra_alloc2(as, ir, RSET_FPR); + right = (left >> 8); left &= 255; + emit_dnm(as, ai, (dest & 15), (left & 15), (right & 15)); +} + +static void asm_fpunary(ASMState *as, IRIns *ir, ARMIns ai) +{ + Reg dest = ra_dest(as, ir, RSET_FPR); + Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR); + emit_dm(as, ai, (dest & 15), (left & 15)); +} + +static void asm_callround(ASMState *as, IRIns *ir, int id) +{ + /* The modified regs must match with the *.dasc implementation. */ + RegSet drop = RID2RSET(RID_R0)|RID2RSET(RID_R1)|RID2RSET(RID_R2)| + RID2RSET(RID_R3)|RID2RSET(RID_R12); + RegSet of; + Reg dest, src; + ra_evictset(as, drop); + dest = ra_dest(as, ir, RSET_FPR); + emit_dnm(as, ARMI_VMOV_D_RR, RID_RETLO, RID_RETHI, (dest & 15)); + emit_call(as, id == IRFPM_FLOOR ? (void *)lj_vm_floor_sf : + id == IRFPM_CEIL ? (void *)lj_vm_ceil_sf : + (void *)lj_vm_trunc_sf); + /* Workaround to protect argument GPRs from being used for remat. */ + of = as->freeset; + as->freeset &= ~RSET_RANGE(RID_R0, RID_R1+1); + as->cost[RID_R0] = as->cost[RID_R1] = REGCOST(~0u, ASMREF_L); + src = ra_alloc1(as, ir->op1, RSET_FPR); /* May alloc GPR to remat FPR. */ + as->freeset |= (of & RSET_RANGE(RID_R0, RID_R1+1)); + emit_dnm(as, ARMI_VMOV_RR_D, RID_R0, RID_R1, (src & 15)); +} + +static void asm_fpmath(ASMState *as, IRIns *ir) +{ + if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir)) + return; + if (ir->op2 <= IRFPM_TRUNC) + asm_callround(as, ir, ir->op2); + else if (ir->op2 == IRFPM_SQRT) + asm_fpunary(as, ir, ARMI_VSQRT_D); + else + asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2); +} +#else +#define asm_fpmath(as, ir) lua_assert(0) +#endif + +static int asm_swapops(ASMState *as, IRRef lref, IRRef rref) +{ + IRIns *ir; + if (irref_isk(rref)) + return 0; /* Don't swap constants to the left. */ + if (irref_isk(lref)) + return 1; /* But swap constants to the right. */ + ir = IR(rref); + if ((ir->o >= IR_BSHL && ir->o <= IR_BROR) || + (ir->o == IR_ADD && ir->op1 == ir->op2)) + return 0; /* Don't swap fusable operands to the left. */ + ir = IR(lref); + if ((ir->o >= IR_BSHL && ir->o <= IR_BROR) || + (ir->o == IR_ADD && ir->op1 == ir->op2)) + return 1; /* But swap fusable operands to the right. */ + return 0; /* Otherwise don't swap. */ +} + +static void asm_intop(ASMState *as, IRIns *ir, ARMIns ai) +{ + IRRef lref = ir->op1, rref = ir->op2; + Reg left, dest = ra_dest(as, ir, RSET_GPR); + uint32_t m; + if (asm_swapops(as, lref, rref)) { + IRRef tmp = lref; lref = rref; rref = tmp; + if ((ai & ~ARMI_S) == ARMI_SUB || (ai & ~ARMI_S) == ARMI_SBC) + ai ^= (ARMI_SUB^ARMI_RSB); + } + left = ra_hintalloc(as, lref, dest, RSET_GPR); + m = asm_fuseopm(as, ai, rref, rset_exclude(RSET_GPR, left)); + if (irt_isguard(ir->t)) { /* For IR_ADDOV etc. */ + asm_guardcc(as, CC_VS); + ai |= ARMI_S; + } + emit_dn(as, ai^m, dest, left); +} + +static void asm_intop_s(ASMState *as, IRIns *ir, ARMIns ai) +{ + if (as->flagmcp == as->mcp) { /* Drop cmp r, #0. */ + as->flagmcp = NULL; + as->mcp++; + ai |= ARMI_S; + } + asm_intop(as, ir, ai); +} + +static void asm_intneg(ASMState *as, IRIns *ir, ARMIns ai) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); + emit_dn(as, ai|ARMI_K12|0, dest, left); +} + +/* NYI: use add/shift for MUL(OV) with constants. FOLD only does 2^k. */ +static void asm_intmul(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, dest)); + Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); + Reg tmp = RID_NONE; + /* ARMv5 restriction: dest != left and dest_hi != left. */ + if (dest == left && left != right) { left = right; right = dest; } + if (irt_isguard(ir->t)) { /* IR_MULOV */ + if (!(as->flags & JIT_F_ARMV6) && dest == left) + tmp = left = ra_scratch(as, rset_exclude(RSET_GPR, left)); + asm_guardcc(as, CC_NE); + emit_nm(as, ARMI_TEQ|ARMF_SH(ARMSH_ASR, 31), RID_TMP, dest); + emit_dnm(as, ARMI_SMULL|ARMF_S(right), dest, RID_TMP, left); + } else { + if (!(as->flags & JIT_F_ARMV6) && dest == left) tmp = left = RID_TMP; + emit_nm(as, ARMI_MUL|ARMF_S(right), dest, left); + } + /* Only need this for the dest == left == right case. */ + if (ra_hasreg(tmp)) emit_dm(as, ARMI_MOV, tmp, right); +} + +static void asm_add(ASMState *as, IRIns *ir) +{ +#if !LJ_SOFTFP + if (irt_isnum(ir->t)) { + if (!asm_fusemadd(as, ir, ARMI_VMLA_D, ARMI_VMLA_D)) + asm_fparith(as, ir, ARMI_VADD_D); + return; + } +#endif + asm_intop_s(as, ir, ARMI_ADD); +} + +static void asm_sub(ASMState *as, IRIns *ir) +{ +#if !LJ_SOFTFP + if (irt_isnum(ir->t)) { + if (!asm_fusemadd(as, ir, ARMI_VNMLS_D, ARMI_VMLS_D)) + asm_fparith(as, ir, ARMI_VSUB_D); + return; + } +#endif + asm_intop_s(as, ir, ARMI_SUB); +} + +static void asm_mul(ASMState *as, IRIns *ir) +{ +#if !LJ_SOFTFP + if (irt_isnum(ir->t)) { + asm_fparith(as, ir, ARMI_VMUL_D); + return; + } +#endif + asm_intmul(as, ir); +} + +#define asm_addov(as, ir) asm_add(as, ir) +#define asm_subov(as, ir) asm_sub(as, ir) +#define asm_mulov(as, ir) asm_mul(as, ir) + +#if LJ_SOFTFP +#define asm_div(as, ir) lua_assert(0) +#define asm_pow(as, ir) lua_assert(0) +#define asm_abs(as, ir) lua_assert(0) +#define asm_atan2(as, ir) lua_assert(0) +#define asm_ldexp(as, ir) lua_assert(0) +#else +#define asm_div(as, ir) asm_fparith(as, ir, ARMI_VDIV_D) +#define asm_pow(as, ir) asm_callid(as, ir, IRCALL_lj_vm_powi) +#define asm_abs(as, ir) asm_fpunary(as, ir, ARMI_VABS_D) +#define asm_atan2(as, ir) asm_callid(as, ir, IRCALL_atan2) +#define asm_ldexp(as, ir) asm_callid(as, ir, IRCALL_ldexp) +#endif + +#define asm_mod(as, ir) asm_callid(as, ir, IRCALL_lj_vm_modi) + +static void asm_neg(ASMState *as, IRIns *ir) +{ +#if !LJ_SOFTFP + if (irt_isnum(ir->t)) { + asm_fpunary(as, ir, ARMI_VNEG_D); + return; + } +#endif + asm_intneg(as, ir, ARMI_RSB); +} + +static void asm_bitop(ASMState *as, IRIns *ir, ARMIns ai) +{ + if (as->flagmcp == as->mcp) { /* Try to drop cmp r, #0. */ + uint32_t cc = (as->mcp[1] >> 28); + as->flagmcp = NULL; + if (cc <= CC_NE) { + as->mcp++; + ai |= ARMI_S; + } else if (cc == CC_GE) { + *++as->mcp ^= ((CC_GE^CC_PL) << 28); + ai |= ARMI_S; + } else if (cc == CC_LT) { + *++as->mcp ^= ((CC_LT^CC_MI) << 28); + ai |= ARMI_S; + } /* else: other conds don't work with bit ops. */ + } + if (ir->op2 == 0) { + Reg dest = ra_dest(as, ir, RSET_GPR); + uint32_t m = asm_fuseopm(as, ai, ir->op1, RSET_GPR); + emit_d(as, ai^m, dest); + } else { + /* NYI: Turn BAND !k12 into uxtb, uxth or bfc or shl+shr. */ + asm_intop(as, ir, ai); + } +} + +#define asm_bnot(as, ir) asm_bitop(as, ir, ARMI_MVN) + +static void asm_bswap(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg left = ra_alloc1(as, ir->op1, RSET_GPR); + if ((as->flags & JIT_F_ARMV6)) { + emit_dm(as, ARMI_REV, dest, left); + } else { + Reg tmp2 = dest; + if (tmp2 == left) + tmp2 = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, dest), left)); + emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_LSR, 8), dest, tmp2, RID_TMP); + emit_dm(as, ARMI_MOV|ARMF_SH(ARMSH_ROR, 8), tmp2, left); + emit_dn(as, ARMI_BIC|ARMI_K12|256*8|255, RID_TMP, RID_TMP); + emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_ROR, 16), RID_TMP, left, left); + } +} + +#define asm_band(as, ir) asm_bitop(as, ir, ARMI_AND) +#define asm_bor(as, ir) asm_bitop(as, ir, ARMI_ORR) +#define asm_bxor(as, ir) asm_bitop(as, ir, ARMI_EOR) + +static void asm_bitshift(ASMState *as, IRIns *ir, ARMShift sh) +{ + if (irref_isk(ir->op2)) { /* Constant shifts. */ + /* NYI: Turn SHL+SHR or BAND+SHR into uxtb, uxth or ubfx. */ + /* NYI: Turn SHL+ASR into sxtb, sxth or sbfx. */ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg left = ra_alloc1(as, ir->op1, RSET_GPR); + int32_t shift = (IR(ir->op2)->i & 31); + emit_dm(as, ARMI_MOV|ARMF_SH(sh, shift), dest, left); + } else { + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg left = ra_alloc1(as, ir->op1, RSET_GPR); + Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); + emit_dm(as, ARMI_MOV|ARMF_RSH(sh, right), dest, left); + } +} + +#define asm_bshl(as, ir) asm_bitshift(as, ir, ARMSH_LSL) +#define asm_bshr(as, ir) asm_bitshift(as, ir, ARMSH_LSR) +#define asm_bsar(as, ir) asm_bitshift(as, ir, ARMSH_ASR) +#define asm_bror(as, ir) asm_bitshift(as, ir, ARMSH_ROR) +#define asm_brol(as, ir) lua_assert(0) + +static void asm_intmin_max(ASMState *as, IRIns *ir, int cc) +{ + uint32_t kcmp = 0, kmov = 0; + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); + Reg right = 0; + if (irref_isk(ir->op2)) { + kcmp = emit_isk12(ARMI_CMP, IR(ir->op2)->i); + if (kcmp) kmov = emit_isk12(ARMI_MOV, IR(ir->op2)->i); + } + if (!kmov) { + kcmp = 0; + right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); + } + if (kmov || dest != right) { + emit_dm(as, ARMF_CC(ARMI_MOV, cc)^kmov, dest, right); + cc ^= 1; /* Must use opposite conditions for paired moves. */ + } else { + cc ^= (CC_LT^CC_GT); /* Otherwise may swap CC_LT <-> CC_GT. */ + } + if (dest != left) emit_dm(as, ARMF_CC(ARMI_MOV, cc), dest, left); + emit_nm(as, ARMI_CMP^kcmp, left, right); +} + +#if LJ_SOFTFP +static void asm_sfpmin_max(ASMState *as, IRIns *ir, int cc) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp]; + RegSet drop = RSET_SCRATCH; + Reg r; + IRRef args[4]; + args[0] = ir->op1; args[1] = (ir+1)->op1; + args[2] = ir->op2; args[3] = (ir+1)->op2; + /* __aeabi_cdcmple preserves r0-r3. */ + if (ra_hasreg(ir->r)) rset_clear(drop, ir->r); + if (ra_hasreg((ir+1)->r)) rset_clear(drop, (ir+1)->r); + if (!rset_test(as->freeset, RID_R2) && + regcost_ref(as->cost[RID_R2]) == args[2]) rset_clear(drop, RID_R2); + if (!rset_test(as->freeset, RID_R3) && + regcost_ref(as->cost[RID_R3]) == args[3]) rset_clear(drop, RID_R3); + ra_evictset(as, drop); + ra_destpair(as, ir); + emit_dm(as, ARMF_CC(ARMI_MOV, cc), RID_RETHI, RID_R3); + emit_dm(as, ARMF_CC(ARMI_MOV, cc), RID_RETLO, RID_R2); + emit_call(as, (void *)ci->func); + for (r = RID_R0; r <= RID_R3; r++) + ra_leftov(as, r, args[r-RID_R0]); +} +#else +static void asm_fpmin_max(ASMState *as, IRIns *ir, int cc) +{ + Reg dest = (ra_dest(as, ir, RSET_FPR) & 15); + Reg right, left = ra_alloc2(as, ir, RSET_FPR); + right = ((left >> 8) & 15); left &= 15; + if (dest != left) emit_dm(as, ARMF_CC(ARMI_VMOV_D, cc^1), dest, left); + if (dest != right) emit_dm(as, ARMF_CC(ARMI_VMOV_D, cc), dest, right); + emit_d(as, ARMI_VMRS, 0); + emit_dm(as, ARMI_VCMP_D, left, right); +} +#endif + +static void asm_min_max(ASMState *as, IRIns *ir, int cc, int fcc) +{ +#if LJ_SOFTFP + UNUSED(fcc); +#else + if (irt_isnum(ir->t)) + asm_fpmin_max(as, ir, fcc); + else +#endif + asm_intmin_max(as, ir, cc); +} + +#define asm_min(as, ir) asm_min_max(as, ir, CC_GT, CC_HI) +#define asm_max(as, ir) asm_min_max(as, ir, CC_LT, CC_LO) + +/* -- Comparisons --------------------------------------------------------- */ + +/* Map of comparisons to flags. ORDER IR. */ +static const uint8_t asm_compmap[IR_ABC+1] = { + /* op FP swp int cc FP cc */ + /* LT */ CC_GE + (CC_HS << 4), + /* GE x */ CC_LT + (CC_HI << 4), + /* LE */ CC_GT + (CC_HI << 4), + /* GT x */ CC_LE + (CC_HS << 4), + /* ULT x */ CC_HS + (CC_LS << 4), + /* UGE */ CC_LO + (CC_LO << 4), + /* ULE x */ CC_HI + (CC_LO << 4), + /* UGT */ CC_LS + (CC_LS << 4), + /* EQ */ CC_NE + (CC_NE << 4), + /* NE */ CC_EQ + (CC_EQ << 4), + /* ABC */ CC_LS + (CC_LS << 4) /* Same as UGT. */ +}; + +#if LJ_SOFTFP +/* FP comparisons. */ +static void asm_sfpcomp(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp]; + RegSet drop = RSET_SCRATCH; + Reg r; + IRRef args[4]; + int swp = (((ir->o ^ (ir->o >> 2)) & ~(ir->o >> 3) & 1) << 1); + args[swp^0] = ir->op1; args[swp^1] = (ir+1)->op1; + args[swp^2] = ir->op2; args[swp^3] = (ir+1)->op2; + /* __aeabi_cdcmple preserves r0-r3. This helps to reduce spills. */ + for (r = RID_R0; r <= RID_R3; r++) + if (!rset_test(as->freeset, r) && + regcost_ref(as->cost[r]) == args[r-RID_R0]) rset_clear(drop, r); + ra_evictset(as, drop); + asm_guardcc(as, (asm_compmap[ir->o] >> 4)); + emit_call(as, (void *)ci->func); + for (r = RID_R0; r <= RID_R3; r++) + ra_leftov(as, r, args[r-RID_R0]); +} +#else +/* FP comparisons. */ +static void asm_fpcomp(ASMState *as, IRIns *ir) +{ + Reg left, right; + ARMIns ai; + int swp = ((ir->o ^ (ir->o >> 2)) & ~(ir->o >> 3) & 1); + if (!swp && irref_isk(ir->op2) && ir_knum(IR(ir->op2))->u64 == 0) { + left = (ra_alloc1(as, ir->op1, RSET_FPR) & 15); + right = 0; + ai = ARMI_VCMPZ_D; + } else { + left = ra_alloc2(as, ir, RSET_FPR); + if (swp) { + right = (left & 15); left = ((left >> 8) & 15); + } else { + right = ((left >> 8) & 15); left &= 15; + } + ai = ARMI_VCMP_D; + } + asm_guardcc(as, (asm_compmap[ir->o] >> 4)); + emit_d(as, ARMI_VMRS, 0); + emit_dm(as, ai, left, right); +} +#endif + +/* Integer comparisons. */ +static void asm_intcomp(ASMState *as, IRIns *ir) +{ + ARMCC cc = (asm_compmap[ir->o] & 15); + IRRef lref = ir->op1, rref = ir->op2; + Reg left; + uint32_t m; + int cmpprev0 = 0; + lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)); + if (asm_swapops(as, lref, rref)) { + Reg tmp = lref; lref = rref; rref = tmp; + if (cc >= CC_GE) cc ^= 7; /* LT <-> GT, LE <-> GE */ + else if (cc > CC_NE) cc ^= 11; /* LO <-> HI, LS <-> HS */ + } + if (irref_isk(rref) && IR(rref)->i == 0) { + IRIns *irl = IR(lref); + cmpprev0 = (irl+1 == ir); + /* Combine comp(BAND(left, right), 0) into tst left, right. */ + if (cmpprev0 && irl->o == IR_BAND && !ra_used(irl)) { + IRRef blref = irl->op1, brref = irl->op2; + uint32_t m2 = 0; + Reg bleft; + if (asm_swapops(as, blref, brref)) { + Reg tmp = blref; blref = brref; brref = tmp; + } + if (irref_isk(brref)) { + m2 = emit_isk12(ARMI_AND, IR(brref)->i); + if ((m2 & (ARMI_AND^ARMI_BIC))) + goto notst; /* Not beneficial if we miss a constant operand. */ + } + if (cc == CC_GE) cc = CC_PL; + else if (cc == CC_LT) cc = CC_MI; + else if (cc > CC_NE) goto notst; /* Other conds don't work with tst. */ + bleft = ra_alloc1(as, blref, RSET_GPR); + if (!m2) m2 = asm_fuseopm(as, 0, brref, rset_exclude(RSET_GPR, bleft)); + asm_guardcc(as, cc); + emit_n(as, ARMI_TST^m2, bleft); + return; + } + } +notst: + left = ra_alloc1(as, lref, RSET_GPR); + m = asm_fuseopm(as, ARMI_CMP, rref, rset_exclude(RSET_GPR, left)); + asm_guardcc(as, cc); + emit_n(as, ARMI_CMP^m, left); + /* Signed comparison with zero and referencing previous ins? */ + if (cmpprev0 && (cc <= CC_NE || cc >= CC_GE)) + as->flagmcp = as->mcp; /* Allow elimination of the compare. */ +} + +static void asm_comp(ASMState *as, IRIns *ir) +{ +#if !LJ_SOFTFP + if (irt_isnum(ir->t)) + asm_fpcomp(as, ir); + else +#endif + asm_intcomp(as, ir); +} + +#define asm_equal(as, ir) asm_comp(as, ir) + +#if LJ_HASFFI +/* 64 bit integer comparisons. */ +static void asm_int64comp(ASMState *as, IRIns *ir) +{ + int signedcomp = (ir->o <= IR_GT); + ARMCC cclo, cchi; + Reg leftlo, lefthi; + uint32_t mlo, mhi; + RegSet allow = RSET_GPR, oldfree; + + /* Always use unsigned comparison for loword. */ + cclo = asm_compmap[ir->o + (signedcomp ? 4 : 0)] & 15; + leftlo = ra_alloc1(as, ir->op1, allow); + oldfree = as->freeset; + mlo = asm_fuseopm(as, ARMI_CMP, ir->op2, rset_clear(allow, leftlo)); + allow &= ~(oldfree & ~as->freeset); /* Update for allocs of asm_fuseopm. */ + + /* Use signed or unsigned comparison for hiword. */ + cchi = asm_compmap[ir->o] & 15; + lefthi = ra_alloc1(as, (ir+1)->op1, allow); + mhi = asm_fuseopm(as, ARMI_CMP, (ir+1)->op2, rset_clear(allow, lefthi)); + + /* All register allocations must be performed _before_ this point. */ + if (signedcomp) { + MCLabel l_around = emit_label(as); + asm_guardcc(as, cclo); + emit_n(as, ARMI_CMP^mlo, leftlo); + emit_branch(as, ARMF_CC(ARMI_B, CC_NE), l_around); + if (cchi == CC_GE || cchi == CC_LE) cchi ^= 6; /* GE -> GT, LE -> LT */ + asm_guardcc(as, cchi); + } else { + asm_guardcc(as, cclo); + emit_n(as, ARMF_CC(ARMI_CMP, CC_EQ)^mlo, leftlo); + } + emit_n(as, ARMI_CMP^mhi, lefthi); +} +#endif + +/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */ + +/* Hiword op of a split 64 bit op. Previous op must be the loword op. */ +static void asm_hiop(ASMState *as, IRIns *ir) +{ +#if LJ_HASFFI || LJ_SOFTFP + /* HIOP is marked as a store because it needs its own DCE logic. */ + int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */ + if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1; + if ((ir-1)->o <= IR_NE) { /* 64 bit integer or FP comparisons. ORDER IR. */ + as->curins--; /* Always skip the loword comparison. */ +#if LJ_SOFTFP + if (!irt_isint(ir->t)) { + asm_sfpcomp(as, ir-1); + return; + } +#endif +#if LJ_HASFFI + asm_int64comp(as, ir-1); +#endif + return; +#if LJ_SOFTFP + } else if ((ir-1)->o == IR_MIN || (ir-1)->o == IR_MAX) { + as->curins--; /* Always skip the loword min/max. */ + if (uselo || usehi) + asm_sfpmin_max(as, ir-1, (ir-1)->o == IR_MIN ? CC_HI : CC_LO); + return; +#elif LJ_HASFFI + } else if ((ir-1)->o == IR_CONV) { + as->curins--; /* Always skip the CONV. */ + if (usehi || uselo) + asm_conv64(as, ir); + return; +#endif + } else if ((ir-1)->o == IR_XSTORE) { + if ((ir-1)->r != RID_SINK) + asm_xstore_(as, ir, 4); + return; + } + if (!usehi) return; /* Skip unused hiword op for all remaining ops. */ + switch ((ir-1)->o) { +#if LJ_HASFFI + case IR_ADD: + as->curins--; + asm_intop(as, ir, ARMI_ADC); + asm_intop(as, ir-1, ARMI_ADD|ARMI_S); + break; + case IR_SUB: + as->curins--; + asm_intop(as, ir, ARMI_SBC); + asm_intop(as, ir-1, ARMI_SUB|ARMI_S); + break; + case IR_NEG: + as->curins--; + asm_intneg(as, ir, ARMI_RSC); + asm_intneg(as, ir-1, ARMI_RSB|ARMI_S); + break; +#endif +#if LJ_SOFTFP + case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: + case IR_STRTO: + if (!uselo) + ra_allocref(as, ir->op1, RSET_GPR); /* Mark lo op as used. */ + break; +#endif + case IR_CALLN: + case IR_CALLS: + case IR_CALLXS: + if (!uselo) + ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */ + break; +#if LJ_SOFTFP + case IR_ASTORE: case IR_HSTORE: case IR_USTORE: case IR_TOSTR: +#endif + case IR_CNEWI: + /* Nothing to do here. Handled by lo op itself. */ + break; + default: lua_assert(0); break; + } +#else + UNUSED(as); UNUSED(ir); lua_assert(0); +#endif +} + +/* -- Profiling ----------------------------------------------------------- */ + +static void asm_prof(ASMState *as, IRIns *ir) +{ + UNUSED(ir); + asm_guardcc(as, CC_NE); + emit_n(as, ARMI_TST|ARMI_K12|HOOK_PROFILE, RID_TMP); + emit_lsptr(as, ARMI_LDRB, RID_TMP, (void *)&J2G(as->J)->hookmask); +} + +/* -- Stack handling ------------------------------------------------------ */ + +/* Check Lua stack size for overflow. Use exit handler as fallback. */ +static void asm_stack_check(ASMState *as, BCReg topslot, + IRIns *irp, RegSet allow, ExitNo exitno) +{ + Reg pbase; + uint32_t k; + if (irp) { + if (!ra_hasspill(irp->s)) { + pbase = irp->r; + lua_assert(ra_hasreg(pbase)); + } else if (allow) { + pbase = rset_pickbot(allow); + } else { + pbase = RID_RET; + emit_lso(as, ARMI_LDR, RID_RET, RID_SP, 0); /* Restore temp. register. */ + } + } else { + pbase = RID_BASE; + } + emit_branch(as, ARMF_CC(ARMI_BL, CC_LS), exitstub_addr(as->J, exitno)); + k = emit_isk12(0, (int32_t)(8*topslot)); + lua_assert(k); + emit_n(as, ARMI_CMP^k, RID_TMP); + emit_dnm(as, ARMI_SUB, RID_TMP, RID_TMP, pbase); + emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP, + (int32_t)offsetof(lua_State, maxstack)); + if (irp) { /* Must not spill arbitrary registers in head of side trace. */ + int32_t i = i32ptr(&J2G(as->J)->cur_L); + if (ra_hasspill(irp->s)) + emit_lso(as, ARMI_LDR, pbase, RID_SP, sps_scale(irp->s)); + emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP, (i & 4095)); + if (ra_hasspill(irp->s) && !allow) + emit_lso(as, ARMI_STR, RID_RET, RID_SP, 0); /* Save temp. register. */ + emit_loadi(as, RID_TMP, (i & ~4095)); + } else { + emit_getgl(as, RID_TMP, cur_L); + } +} + +/* Restore Lua stack from on-trace state. */ +static void asm_stack_restore(ASMState *as, SnapShot *snap) +{ + SnapEntry *map = &as->T->snapmap[snap->mapofs]; + SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1]; + MSize n, nent = snap->nent; + /* Store the value of all modified slots to the Lua stack. */ + for (n = 0; n < nent; n++) { + SnapEntry sn = map[n]; + BCReg s = snap_slot(sn); + int32_t ofs = 8*((int32_t)s-1); + IRRef ref = snap_ref(sn); + IRIns *ir = IR(ref); + if ((sn & SNAP_NORESTORE)) + continue; + if (irt_isnum(ir->t)) { +#if LJ_SOFTFP + RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE); + Reg tmp; + lua_assert(irref_isk(ref)); /* LJ_SOFTFP: must be a number constant. */ + tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, + rset_exclude(RSET_GPREVEN, RID_BASE)); + emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs); + if (rset_test(as->freeset, tmp+1)) odd = RID2RSET(tmp+1); + tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, odd); + emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs+4); +#else + Reg src = ra_alloc1(as, ref, RSET_FPR); + emit_vlso(as, ARMI_VSTR_D, src, RID_BASE, ofs); +#endif + } else { + RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE); + Reg type; + lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t)); + if (!irt_ispri(ir->t)) { + Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPREVEN, RID_BASE)); + emit_lso(as, ARMI_STR, src, RID_BASE, ofs); + if (rset_test(as->freeset, src+1)) odd = RID2RSET(src+1); + } + if ((sn & (SNAP_CONT|SNAP_FRAME))) { + if (s == 0) continue; /* Do not overwrite link to previous frame. */ + type = ra_allock(as, (int32_t)(*flinks--), odd); +#if LJ_SOFTFP + } else if ((sn & SNAP_SOFTFPNUM)) { + type = ra_alloc1(as, ref+1, rset_exclude(RSET_GPRODD, RID_BASE)); +#endif + } else { + type = ra_allock(as, (int32_t)irt_toitype(ir->t), odd); + } + emit_lso(as, ARMI_STR, type, RID_BASE, ofs+4); + } + checkmclim(as); + } + lua_assert(map + nent == flinks); +} + +/* -- GC handling --------------------------------------------------------- */ + +/* Check GC threshold and do one or more GC steps. */ +static void asm_gc_check(ASMState *as) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit]; + IRRef args[2]; + MCLabel l_end; + Reg tmp1, tmp2; + ra_evictset(as, RSET_SCRATCH); + l_end = emit_label(as); + /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */ + asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */ + emit_n(as, ARMI_CMP|ARMI_K12|0, RID_RET); + args[0] = ASMREF_TMP1; /* global_State *g */ + args[1] = ASMREF_TMP2; /* MSize steps */ + asm_gencall(as, ci, args); + tmp1 = ra_releasetmp(as, ASMREF_TMP1); + tmp2 = ra_releasetmp(as, ASMREF_TMP2); + emit_loadi(as, tmp2, as->gcsteps); + /* Jump around GC step if GC total < GC threshold. */ + emit_branch(as, ARMF_CC(ARMI_B, CC_LS), l_end); + emit_nm(as, ARMI_CMP, RID_TMP, tmp2); + emit_lso(as, ARMI_LDR, tmp2, tmp1, + (int32_t)offsetof(global_State, gc.threshold)); + emit_lso(as, ARMI_LDR, RID_TMP, tmp1, + (int32_t)offsetof(global_State, gc.total)); + ra_allockreg(as, i32ptr(J2G(as->J)), tmp1); + as->gcsteps = 0; + checkmclim(as); +} + +/* -- Loop handling ------------------------------------------------------- */ + +/* Fixup the loop branch. */ +static void asm_loop_fixup(ASMState *as) +{ + MCode *p = as->mctop; + MCode *target = as->mcp; + if (as->loopinv) { /* Inverted loop branch? */ + /* asm_guardcc already inverted the bcc and patched the final bl. */ + p[-2] |= ((uint32_t)(target-p) & 0x00ffffffu); + } else { + p[-1] = ARMI_B | ((uint32_t)((target-p)-1) & 0x00ffffffu); + } +} + +/* -- Head of trace ------------------------------------------------------- */ + +/* Reload L register from g->cur_L. */ +static void asm_head_lreg(ASMState *as) +{ + IRIns *ir = IR(ASMREF_L); + if (ra_used(ir)) { + Reg r = ra_dest(as, ir, RSET_GPR); + emit_getgl(as, r, cur_L); + ra_evictk(as); + } +} + +/* Coalesce BASE register for a root trace. */ +static void asm_head_root_base(ASMState *as) +{ + IRIns *ir; + asm_head_lreg(as); + ir = IR(REF_BASE); + if (ra_hasreg(ir->r) && (rset_test(as->modset, ir->r) || irt_ismarked(ir->t))) + ra_spill(as, ir); + ra_destreg(as, ir, RID_BASE); +} + +/* Coalesce BASE register for a side trace. */ +static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow) +{ + IRIns *ir; + asm_head_lreg(as); + ir = IR(REF_BASE); + if (ra_hasreg(ir->r) && (rset_test(as->modset, ir->r) || irt_ismarked(ir->t))) + ra_spill(as, ir); + if (ra_hasspill(irp->s)) { + rset_clear(allow, ra_dest(as, ir, allow)); + } else { + Reg r = irp->r; + lua_assert(ra_hasreg(r)); + rset_clear(allow, r); + if (r != ir->r && !rset_test(as->freeset, r)) + ra_restore(as, regcost_ref(as->cost[r])); + ra_destreg(as, ir, r); + } + return allow; +} + +/* -- Tail of trace ------------------------------------------------------- */ + +/* Fixup the tail code. */ +static void asm_tail_fixup(ASMState *as, TraceNo lnk) +{ + MCode *p = as->mctop; + MCode *target; + int32_t spadj = as->T->spadjust; + if (spadj == 0) { + as->mctop = --p; + } else { + /* Patch stack adjustment. */ + uint32_t k = emit_isk12(ARMI_ADD, spadj); + lua_assert(k); + p[-2] = (ARMI_ADD^k) | ARMF_D(RID_SP) | ARMF_N(RID_SP); + } + /* Patch exit branch. */ + target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp; + p[-1] = ARMI_B|(((target-p)-1)&0x00ffffffu); +} + +/* Prepare tail of code. */ +static void asm_tail_prep(ASMState *as) +{ + MCode *p = as->mctop - 1; /* Leave room for exit branch. */ + if (as->loopref) { + as->invmcp = as->mcp = p; + } else { + as->mcp = p-1; /* Leave room for stack pointer adjustment. */ + as->invmcp = NULL; + } + *p = 0; /* Prevent load/store merging. */ +} + +/* -- Trace setup --------------------------------------------------------- */ + +/* Ensure there are enough stack slots for call arguments. */ +static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci) +{ + IRRef args[CCI_NARGS_MAX*2]; + uint32_t i, nargs = CCI_XNARGS(ci); + int nslots = 0, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR, fprodd = 0; + asm_collectargs(as, ir, ci, args); + for (i = 0; i < nargs; i++) { + if (!LJ_SOFTFP && args[i] && irt_isfp(IR(args[i])->t)) { + if (!LJ_ABI_SOFTFP && !(ci->flags & CCI_VARARG)) { + if (irt_isnum(IR(args[i])->t)) { + if (nfpr > 0) nfpr--; + else fprodd = 0, nslots = (nslots + 3) & ~1; + } else { + if (fprodd) fprodd--; + else if (nfpr > 0) fprodd = 1, nfpr--; + else nslots++; + } + } else if (irt_isnum(IR(args[i])->t)) { + ngpr &= ~1; + if (ngpr > 0) ngpr -= 2; else nslots += 2; + } else { + if (ngpr > 0) ngpr--; else nslots++; + } + } else { + if (ngpr > 0) ngpr--; else nslots++; + } + } + if (nslots > as->evenspill) /* Leave room for args in stack slots. */ + as->evenspill = nslots; + return REGSP_HINT(RID_RET); +} + +static void asm_setup_target(ASMState *as) +{ + /* May need extra exit for asm_stack_check on side traces. */ + asm_exitstub_setup(as, as->T->nsnap + (as->parent ? 1 : 0)); +} + +/* -- Trace patching ------------------------------------------------------ */ + +/* Patch exit jumps of existing machine code to a new target. */ +void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target) +{ + MCode *p = T->mcode; + MCode *pe = (MCode *)((char *)p + T->szmcode); + MCode *cstart = NULL, *cend = p; + MCode *mcarea = lj_mcode_patch(J, p, 0); + MCode *px = exitstub_addr(J, exitno) - 2; + for (; p < pe; p++) { + /* Look for bl_cc exitstub, replace with b_cc target. */ + uint32_t ins = *p; + if ((ins & 0x0f000000u) == 0x0b000000u && ins < 0xf0000000u && + ((ins ^ (px-p)) & 0x00ffffffu) == 0) { + *p = (ins & 0xfe000000u) | (((target-p)-2) & 0x00ffffffu); + cend = p+1; + if (!cstart) cstart = p; + } + } + lua_assert(cstart != NULL); + lj_mcode_sync(cstart, cend); + lj_mcode_patch(J, mcarea, 1); +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_asm_mips.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_asm_mips.h new file mode 100644 index 00000000000..adea0e32367 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_asm_mips.h @@ -0,0 +1,1833 @@ +/* +** MIPS IR assembler (SSA IR -> machine code). +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +/* -- Register allocator extensions --------------------------------------- */ + +/* Allocate a register with a hint. */ +static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow) +{ + Reg r = IR(ref)->r; + if (ra_noreg(r)) { + if (!ra_hashint(r) && !iscrossref(as, ref)) + ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */ + r = ra_allocref(as, ref, allow); + } + ra_noweak(as, r); + return r; +} + +/* Allocate a register or RID_ZERO. */ +static Reg ra_alloc1z(ASMState *as, IRRef ref, RegSet allow) +{ + Reg r = IR(ref)->r; + if (ra_noreg(r)) { + if (!(allow & RSET_FPR) && irref_isk(ref) && IR(ref)->i == 0) + return RID_ZERO; + r = ra_allocref(as, ref, allow); + } else { + ra_noweak(as, r); + } + return r; +} + +/* Allocate two source registers for three-operand instructions. */ +static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow) +{ + IRIns *irl = IR(ir->op1), *irr = IR(ir->op2); + Reg left = irl->r, right = irr->r; + if (ra_hasreg(left)) { + ra_noweak(as, left); + if (ra_noreg(right)) + right = ra_alloc1z(as, ir->op2, rset_exclude(allow, left)); + else + ra_noweak(as, right); + } else if (ra_hasreg(right)) { + ra_noweak(as, right); + left = ra_alloc1z(as, ir->op1, rset_exclude(allow, right)); + } else if (ra_hashint(right)) { + right = ra_alloc1z(as, ir->op2, allow); + left = ra_alloc1z(as, ir->op1, rset_exclude(allow, right)); + } else { + left = ra_alloc1z(as, ir->op1, allow); + right = ra_alloc1z(as, ir->op2, rset_exclude(allow, left)); + } + return left | (right << 8); +} + +/* -- Guard handling ------------------------------------------------------ */ + +/* Need some spare long-range jump slots, for out-of-range branches. */ +#define MIPS_SPAREJUMP 4 + +/* Setup spare long-range jump slots per mcarea. */ +static void asm_sparejump_setup(ASMState *as) +{ + MCode *mxp = as->mcbot; + /* Assumes sizeof(MCLink) == 8. */ + if (((uintptr_t)mxp & (LJ_PAGESIZE-1)) == 8) { + lua_assert(MIPSI_NOP == 0); + memset(mxp+2, 0, MIPS_SPAREJUMP*8); + mxp += MIPS_SPAREJUMP*2; + lua_assert(mxp < as->mctop); + lj_mcode_sync(as->mcbot, mxp); + lj_mcode_commitbot(as->J, mxp); + as->mcbot = mxp; + as->mclim = as->mcbot + MCLIM_REDZONE; + } +} + +/* Setup exit stub after the end of each trace. */ +static void asm_exitstub_setup(ASMState *as) +{ + MCode *mxp = as->mctop; + /* sw TMP, 0(sp); j ->vm_exit_handler; li TMP, traceno */ + *--mxp = MIPSI_LI|MIPSF_T(RID_TMP)|as->T->traceno; + *--mxp = MIPSI_J|((((uintptr_t)(void *)lj_vm_exit_handler)>>2)&0x03ffffffu); + lua_assert(((uintptr_t)mxp ^ (uintptr_t)(void *)lj_vm_exit_handler)>>28 == 0); + *--mxp = MIPSI_SW|MIPSF_T(RID_TMP)|MIPSF_S(RID_SP)|0; + as->mctop = mxp; +} + +/* Keep this in-sync with exitstub_trace_addr(). */ +#define asm_exitstub_addr(as) ((as)->mctop) + +/* Emit conditional branch to exit for guard. */ +static void asm_guard(ASMState *as, MIPSIns mi, Reg rs, Reg rt) +{ + MCode *target = asm_exitstub_addr(as); + MCode *p = as->mcp; + if (LJ_UNLIKELY(p == as->invmcp)) { + as->invmcp = NULL; + as->loopinv = 1; + as->mcp = p+1; + mi = mi ^ ((mi>>28) == 1 ? 0x04000000u : 0x00010000u); /* Invert cond. */ + target = p; /* Patch target later in asm_loop_fixup. */ + } + emit_ti(as, MIPSI_LI, RID_TMP, as->snapno); + emit_branch(as, mi, rs, rt, target); +} + +/* -- Operand fusion ------------------------------------------------------ */ + +/* Limit linear search to this distance. Avoids O(n^2) behavior. */ +#define CONFLICT_SEARCH_LIM 31 + +/* Check if there's no conflicting instruction between curins and ref. */ +static int noconflict(ASMState *as, IRRef ref, IROp conflict) +{ + IRIns *ir = as->ir; + IRRef i = as->curins; + if (i > ref + CONFLICT_SEARCH_LIM) + return 0; /* Give up, ref is too far away. */ + while (--i > ref) + if (ir[i].o == conflict) + return 0; /* Conflict found. */ + return 1; /* Ok, no conflict. */ +} + +/* Fuse the array base of colocated arrays. */ +static int32_t asm_fuseabase(ASMState *as, IRRef ref) +{ + IRIns *ir = IR(ref); + if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE && + !neverfuse(as) && noconflict(as, ref, IR_NEWREF)) + return (int32_t)sizeof(GCtab); + return 0; +} + +/* Fuse array/hash/upvalue reference into register+offset operand. */ +static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow) +{ + IRIns *ir = IR(ref); + if (ra_noreg(ir->r)) { + if (ir->o == IR_AREF) { + if (mayfuse(as, ref)) { + if (irref_isk(ir->op2)) { + IRRef tab = IR(ir->op1)->op1; + int32_t ofs = asm_fuseabase(as, tab); + IRRef refa = ofs ? tab : ir->op1; + ofs += 8*IR(ir->op2)->i; + if (checki16(ofs)) { + *ofsp = ofs; + return ra_alloc1(as, refa, allow); + } + } + } + } else if (ir->o == IR_HREFK) { + if (mayfuse(as, ref)) { + int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node)); + if (checki16(ofs)) { + *ofsp = ofs; + return ra_alloc1(as, ir->op1, allow); + } + } + } else if (ir->o == IR_UREFC) { + if (irref_isk(ir->op1)) { + GCfunc *fn = ir_kfunc(IR(ir->op1)); + int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv); + int32_t jgl = (intptr_t)J2G(as->J); + if ((uint32_t)(ofs-jgl) < 65536) { + *ofsp = ofs-jgl-32768; + return RID_JGL; + } else { + *ofsp = (int16_t)ofs; + return ra_allock(as, ofs-(int16_t)ofs, allow); + } + } + } + } + *ofsp = 0; + return ra_alloc1(as, ref, allow); +} + +/* Fuse XLOAD/XSTORE reference into load/store operand. */ +static void asm_fusexref(ASMState *as, MIPSIns mi, Reg rt, IRRef ref, + RegSet allow, int32_t ofs) +{ + IRIns *ir = IR(ref); + Reg base; + if (ra_noreg(ir->r) && canfuse(as, ir)) { + if (ir->o == IR_ADD) { + int32_t ofs2; + if (irref_isk(ir->op2) && (ofs2 = ofs + IR(ir->op2)->i, checki16(ofs2))) { + ref = ir->op1; + ofs = ofs2; + } + } else if (ir->o == IR_STRREF) { + int32_t ofs2 = 65536; + lua_assert(ofs == 0); + ofs = (int32_t)sizeof(GCstr); + if (irref_isk(ir->op2)) { + ofs2 = ofs + IR(ir->op2)->i; + ref = ir->op1; + } else if (irref_isk(ir->op1)) { + ofs2 = ofs + IR(ir->op1)->i; + ref = ir->op2; + } + if (!checki16(ofs2)) { + /* NYI: Fuse ADD with constant. */ + Reg right, left = ra_alloc2(as, ir, allow); + right = (left >> 8); left &= 255; + emit_hsi(as, mi, rt, RID_TMP, ofs); + emit_dst(as, MIPSI_ADDU, RID_TMP, left, right); + return; + } + ofs = ofs2; + } + } + base = ra_alloc1(as, ref, allow); + emit_hsi(as, mi, rt, base, ofs); +} + +/* -- Calls --------------------------------------------------------------- */ + +/* Generate a call to a C function. */ +static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) +{ + uint32_t n, nargs = CCI_XNARGS(ci); + int32_t ofs = 16; + Reg gpr, fpr = REGARG_FIRSTFPR; + if ((void *)ci->func) + emit_call(as, (void *)ci->func); + for (gpr = REGARG_FIRSTGPR; gpr <= REGARG_LASTGPR; gpr++) + as->cost[gpr] = REGCOST(~0u, ASMREF_L); + gpr = REGARG_FIRSTGPR; + for (n = 0; n < nargs; n++) { /* Setup args. */ + IRRef ref = args[n]; + if (ref) { + IRIns *ir = IR(ref); + if (irt_isfp(ir->t) && fpr <= REGARG_LASTFPR && + !(ci->flags & CCI_VARARG)) { + lua_assert(rset_test(as->freeset, fpr)); /* Already evicted. */ + ra_leftov(as, fpr, ref); + fpr += 2; + gpr += irt_isnum(ir->t) ? 2 : 1; + } else { + fpr = REGARG_LASTFPR+1; + if (irt_isnum(ir->t)) gpr = (gpr+1) & ~1; + if (gpr <= REGARG_LASTGPR) { + lua_assert(rset_test(as->freeset, gpr)); /* Already evicted. */ + if (irt_isfp(ir->t)) { + RegSet of = as->freeset; + Reg r; + /* Workaround to protect argument GPRs from being used for remat. */ + as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1); + r = ra_alloc1(as, ref, RSET_FPR); + as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1)); + if (irt_isnum(ir->t)) { + emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?0:1), r+1); + emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?1:0), r); + lua_assert(rset_test(as->freeset, gpr+1)); /* Already evicted. */ + gpr += 2; + } else if (irt_isfloat(ir->t)) { + emit_tg(as, MIPSI_MFC1, gpr, r); + gpr++; + } + } else { + ra_leftov(as, gpr, ref); + gpr++; + } + } else { + Reg r = ra_alloc1z(as, ref, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); + if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4; + emit_spstore(as, ir, r, ofs); + ofs += irt_isnum(ir->t) ? 8 : 4; + } + } + } else { + fpr = REGARG_LASTFPR+1; + if (gpr <= REGARG_LASTGPR) + gpr++; + else + ofs += 4; + } + checkmclim(as); + } +} + +/* Setup result reg/sp for call. Evict scratch regs. */ +static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) +{ + RegSet drop = RSET_SCRATCH; + int hiop = ((ir+1)->o == IR_HIOP); + if ((ci->flags & CCI_NOFPRCLOBBER)) + drop &= ~RSET_FPR; + if (ra_hasreg(ir->r)) + rset_clear(drop, ir->r); /* Dest reg handled below. */ + if (hiop && ra_hasreg((ir+1)->r)) + rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */ + ra_evictset(as, drop); /* Evictions must be performed first. */ + if (ra_used(ir)) { + lua_assert(!irt_ispri(ir->t)); + if (irt_isfp(ir->t)) { + if ((ci->flags & CCI_CASTU64)) { + int32_t ofs = sps_scale(ir->s); + Reg dest = ir->r; + if (ra_hasreg(dest)) { + ra_free(as, dest); + ra_modified(as, dest); + emit_tg(as, MIPSI_MTC1, RID_RETHI, dest+1); + emit_tg(as, MIPSI_MTC1, RID_RETLO, dest); + } + if (ofs) { + emit_tsi(as, MIPSI_SW, RID_RETLO, RID_SP, ofs+(LJ_BE?4:0)); + emit_tsi(as, MIPSI_SW, RID_RETHI, RID_SP, ofs+(LJ_BE?0:4)); + } + } else { + ra_destreg(as, ir, RID_FPRET); + } + } else if (hiop) { + ra_destpair(as, ir); + } else { + ra_destreg(as, ir, RID_RET); + } + } +} + +static void asm_callx(ASMState *as, IRIns *ir) +{ + IRRef args[CCI_NARGS_MAX*2]; + CCallInfo ci; + IRRef func; + IRIns *irf; + ci.flags = asm_callx_flags(as, ir); + asm_collectargs(as, ir, &ci, args); + asm_setupresult(as, ir, &ci); + func = ir->op2; irf = IR(func); + if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); } + if (irref_isk(func)) { /* Call to constant address. */ + ci.func = (ASMFunction)(void *)(irf->i); + } else { /* Need specific register for indirect calls. */ + Reg r = ra_alloc1(as, func, RID2RSET(RID_CFUNCADDR)); + MCode *p = as->mcp; + if (r == RID_CFUNCADDR) + *--p = MIPSI_NOP; + else + *--p = MIPSI_MOVE | MIPSF_D(RID_CFUNCADDR) | MIPSF_S(r); + *--p = MIPSI_JALR | MIPSF_S(r); + as->mcp = p; + ci.func = (ASMFunction)(void *)0; + } + asm_gencall(as, &ci, args); +} + +static void asm_callround(ASMState *as, IRIns *ir, IRCallID id) +{ + /* The modified regs must match with the *.dasc implementation. */ + RegSet drop = RID2RSET(RID_R1)|RID2RSET(RID_R12)|RID2RSET(RID_FPRET)| + RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(REGARG_FIRSTFPR); + if (ra_hasreg(ir->r)) rset_clear(drop, ir->r); + ra_evictset(as, drop); + ra_destreg(as, ir, RID_FPRET); + emit_call(as, (void *)lj_ir_callinfo[id].func); + ra_leftov(as, REGARG_FIRSTFPR, ir->op1); +} + +/* -- Returns ------------------------------------------------------------- */ + +/* Return to lower frame. Guard that it goes to the right spot. */ +static void asm_retf(ASMState *as, IRIns *ir) +{ + Reg base = ra_alloc1(as, REF_BASE, RSET_GPR); + void *pc = ir_kptr(IR(ir->op2)); + int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1)); + as->topslot -= (BCReg)delta; + if ((int32_t)as->topslot < 0) as->topslot = 0; + irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */ + emit_setgl(as, base, jit_base); + emit_addptr(as, base, -8*delta); + asm_guard(as, MIPSI_BNE, RID_TMP, + ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base))); + emit_tsi(as, MIPSI_LW, RID_TMP, base, -8); +} + +/* -- Type conversions ---------------------------------------------------- */ + +static void asm_tointg(ASMState *as, IRIns *ir, Reg left) +{ + Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); + Reg dest = ra_dest(as, ir, RSET_GPR); + asm_guard(as, MIPSI_BC1F, 0, 0); + emit_fgh(as, MIPSI_C_EQ_D, 0, tmp, left); + emit_fg(as, MIPSI_CVT_D_W, tmp, tmp); + emit_tg(as, MIPSI_MFC1, dest, tmp); + emit_fg(as, MIPSI_CVT_W_D, tmp, left); +} + +static void asm_tobit(ASMState *as, IRIns *ir) +{ + RegSet allow = RSET_FPR; + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg left = ra_alloc1(as, ir->op1, allow); + Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left)); + Reg tmp = ra_scratch(as, rset_clear(allow, right)); + emit_tg(as, MIPSI_MFC1, dest, tmp); + emit_fgh(as, MIPSI_ADD_D, tmp, left, right); +} + +static void asm_conv(ASMState *as, IRIns *ir) +{ + IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); + int stfp = (st == IRT_NUM || st == IRT_FLOAT); + IRRef lref = ir->op1; + lua_assert(irt_type(ir->t) != st); + lua_assert(!(irt_isint64(ir->t) || + (st == IRT_I64 || st == IRT_U64))); /* Handled by SPLIT. */ + if (irt_isfp(ir->t)) { + Reg dest = ra_dest(as, ir, RSET_FPR); + if (stfp) { /* FP to FP conversion. */ + emit_fg(as, st == IRT_NUM ? MIPSI_CVT_S_D : MIPSI_CVT_D_S, + dest, ra_alloc1(as, lref, RSET_FPR)); + } else if (st == IRT_U32) { /* U32 to FP conversion. */ + /* y = (x ^ 0x8000000) + 2147483648.0 */ + Reg left = ra_alloc1(as, lref, RSET_GPR); + Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, dest)); + emit_fgh(as, irt_isfloat(ir->t) ? MIPSI_ADD_S : MIPSI_ADD_D, + dest, dest, tmp); + emit_fg(as, irt_isfloat(ir->t) ? MIPSI_CVT_S_W : MIPSI_CVT_D_W, + dest, dest); + if (irt_isfloat(ir->t)) + emit_lsptr(as, MIPSI_LWC1, (tmp & 31), + (void *)lj_ir_k64_find(as->J, U64x(4f000000,4f000000)), + RSET_GPR); + else + emit_lsptr(as, MIPSI_LDC1, (tmp & 31), + (void *)lj_ir_k64_find(as->J, U64x(41e00000,00000000)), + RSET_GPR); + emit_tg(as, MIPSI_MTC1, RID_TMP, dest); + emit_dst(as, MIPSI_XOR, RID_TMP, RID_TMP, left); + emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000); + } else { /* Integer to FP conversion. */ + Reg left = ra_alloc1(as, lref, RSET_GPR); + emit_fg(as, irt_isfloat(ir->t) ? MIPSI_CVT_S_W : MIPSI_CVT_D_W, + dest, dest); + emit_tg(as, MIPSI_MTC1, left, dest); + } + } else if (stfp) { /* FP to integer conversion. */ + if (irt_isguard(ir->t)) { + /* Checked conversions are only supported from number to int. */ + lua_assert(irt_isint(ir->t) && st == IRT_NUM); + asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); + } else { + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg left = ra_alloc1(as, lref, RSET_FPR); + Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); + if (irt_isu32(ir->t)) { + /* y = (int)floor(x - 2147483648.0) ^ 0x80000000 */ + emit_dst(as, MIPSI_XOR, dest, dest, RID_TMP); + emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000); + emit_tg(as, MIPSI_MFC1, dest, tmp); + emit_fg(as, st == IRT_FLOAT ? MIPSI_FLOOR_W_S : MIPSI_FLOOR_W_D, + tmp, tmp); + emit_fgh(as, st == IRT_FLOAT ? MIPSI_SUB_S : MIPSI_SUB_D, + tmp, left, tmp); + if (st == IRT_FLOAT) + emit_lsptr(as, MIPSI_LWC1, (tmp & 31), + (void *)lj_ir_k64_find(as->J, U64x(4f000000,4f000000)), + RSET_GPR); + else + emit_lsptr(as, MIPSI_LDC1, (tmp & 31), + (void *)lj_ir_k64_find(as->J, U64x(41e00000,00000000)), + RSET_GPR); + } else { + emit_tg(as, MIPSI_MFC1, dest, tmp); + emit_fg(as, st == IRT_FLOAT ? MIPSI_TRUNC_W_S : MIPSI_TRUNC_W_D, + tmp, left); + } + } + } else { + Reg dest = ra_dest(as, ir, RSET_GPR); + if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ + Reg left = ra_alloc1(as, ir->op1, RSET_GPR); + lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); + if ((ir->op2 & IRCONV_SEXT)) { + if ((as->flags & JIT_F_MIPS32R2)) { + emit_dst(as, st == IRT_I8 ? MIPSI_SEB : MIPSI_SEH, dest, 0, left); + } else { + uint32_t shift = st == IRT_I8 ? 24 : 16; + emit_dta(as, MIPSI_SRA, dest, dest, shift); + emit_dta(as, MIPSI_SLL, dest, left, shift); + } + } else { + emit_tsi(as, MIPSI_ANDI, dest, left, + (int32_t)(st == IRT_U8 ? 0xff : 0xffff)); + } + } else { /* 32/64 bit integer conversions. */ + /* Only need to handle 32/32 bit no-op (cast) on 32 bit archs. */ + ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */ + } + } +} + +static void asm_strto(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num]; + IRRef args[2]; + RegSet drop = RSET_SCRATCH; + if (ra_hasreg(ir->r)) rset_set(drop, ir->r); /* Spill dest reg (if any). */ + ra_evictset(as, drop); + asm_guard(as, MIPSI_BEQ, RID_RET, RID_ZERO); /* Test return status. */ + args[0] = ir->op1; /* GCstr *str */ + args[1] = ASMREF_TMP1; /* TValue *n */ + asm_gencall(as, ci, args); + /* Store the result to the spill slot or temp slots. */ + emit_tsi(as, MIPSI_ADDIU, ra_releasetmp(as, ASMREF_TMP1), + RID_SP, sps_scale(ir->s)); +} + +/* -- Memory references --------------------------------------------------- */ + +/* Get pointer to TValue. */ +static void asm_tvptr(ASMState *as, Reg dest, IRRef ref) +{ + IRIns *ir = IR(ref); + if (irt_isnum(ir->t)) { + if (irref_isk(ref)) /* Use the number constant itself as a TValue. */ + ra_allockreg(as, i32ptr(ir_knum(ir)), dest); + else /* Otherwise force a spill and use the spill slot. */ + emit_tsi(as, MIPSI_ADDIU, dest, RID_SP, ra_spill(as, ir)); + } else { + /* Otherwise use g->tmptv to hold the TValue. */ + RegSet allow = rset_exclude(RSET_GPR, dest); + Reg type; + emit_tsi(as, MIPSI_ADDIU, dest, RID_JGL, offsetof(global_State, tmptv)-32768); + if (!irt_ispri(ir->t)) { + Reg src = ra_alloc1(as, ref, allow); + emit_setgl(as, src, tmptv.gcr); + } + type = ra_allock(as, irt_toitype(ir->t), allow); + emit_setgl(as, type, tmptv.it); + } +} + +static void asm_aref(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg idx, base; + if (irref_isk(ir->op2)) { + IRRef tab = IR(ir->op1)->op1; + int32_t ofs = asm_fuseabase(as, tab); + IRRef refa = ofs ? tab : ir->op1; + ofs += 8*IR(ir->op2)->i; + if (checki16(ofs)) { + base = ra_alloc1(as, refa, RSET_GPR); + emit_tsi(as, MIPSI_ADDIU, dest, base, ofs); + return; + } + } + base = ra_alloc1(as, ir->op1, RSET_GPR); + idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base)); + emit_dst(as, MIPSI_ADDU, dest, RID_TMP, base); + emit_dta(as, MIPSI_SLL, RID_TMP, idx, 3); +} + +/* Inlined hash lookup. Specialized for key type and for const keys. +** The equivalent C code is: +** Node *n = hashkey(t, key); +** do { +** if (lj_obj_equal(&n->key, key)) return &n->val; +** } while ((n = nextnode(n))); +** return niltv(L); +*/ +static void asm_href(ASMState *as, IRIns *ir, IROp merge) +{ + RegSet allow = RSET_GPR; + int destused = ra_used(ir); + Reg dest = ra_dest(as, ir, allow); + Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest)); + Reg key = RID_NONE, type = RID_NONE, tmpnum = RID_NONE, tmp1 = RID_TMP, tmp2; + IRRef refkey = ir->op2; + IRIns *irkey = IR(refkey); + IRType1 kt = irkey->t; + uint32_t khash; + MCLabel l_end, l_loop, l_next; + + rset_clear(allow, tab); + if (irt_isnum(kt)) { + key = ra_alloc1(as, refkey, RSET_FPR); + tmpnum = ra_scratch(as, rset_exclude(RSET_FPR, key)); + } else if (!irt_ispri(kt)) { + key = ra_alloc1(as, refkey, allow); + rset_clear(allow, key); + type = ra_allock(as, irt_toitype(irkey->t), allow); + rset_clear(allow, type); + } + tmp2 = ra_scratch(as, allow); + rset_clear(allow, tmp2); + + /* Key not found in chain: jump to exit (if merged) or load niltv. */ + l_end = emit_label(as); + as->invmcp = NULL; + if (merge == IR_NE) + asm_guard(as, MIPSI_B, RID_ZERO, RID_ZERO); + else if (destused) + emit_loada(as, dest, niltvg(J2G(as->J))); + /* Follow hash chain until the end. */ + emit_move(as, dest, tmp2); + l_loop = --as->mcp; + emit_tsi(as, MIPSI_LW, tmp2, dest, (int32_t)offsetof(Node, next)); + l_next = emit_label(as); + + /* Type and value comparison. */ + if (merge == IR_EQ) { /* Must match asm_guard(). */ + emit_ti(as, MIPSI_LI, RID_TMP, as->snapno); + l_end = asm_exitstub_addr(as); + } + if (irt_isnum(kt)) { + emit_branch(as, MIPSI_BC1T, 0, 0, l_end); + emit_fgh(as, MIPSI_C_EQ_D, 0, tmpnum, key); + *--as->mcp = MIPSI_NOP; /* Avoid NaN comparison overhead. */ + emit_branch(as, MIPSI_BEQ, tmp2, RID_ZERO, l_next); + emit_tsi(as, MIPSI_SLTIU, tmp2, tmp2, (int32_t)LJ_TISNUM); + emit_hsi(as, MIPSI_LDC1, tmpnum, dest, (int32_t)offsetof(Node, key.n)); + } else { + if (irt_ispri(kt)) { + emit_branch(as, MIPSI_BEQ, tmp2, type, l_end); + } else { + emit_branch(as, MIPSI_BEQ, tmp1, key, l_end); + emit_tsi(as, MIPSI_LW, tmp1, dest, (int32_t)offsetof(Node, key.gcr)); + emit_branch(as, MIPSI_BNE, tmp2, type, l_next); + } + } + emit_tsi(as, MIPSI_LW, tmp2, dest, (int32_t)offsetof(Node, key.it)); + *l_loop = MIPSI_BNE | MIPSF_S(tmp2) | ((as->mcp-l_loop-1) & 0xffffu); + + /* Load main position relative to tab->node into dest. */ + khash = irref_isk(refkey) ? ir_khash(irkey) : 1; + if (khash == 0) { + emit_tsi(as, MIPSI_LW, dest, tab, (int32_t)offsetof(GCtab, node)); + } else { + Reg tmphash = tmp1; + if (irref_isk(refkey)) + tmphash = ra_allock(as, khash, allow); + emit_dst(as, MIPSI_ADDU, dest, dest, tmp1); + lua_assert(sizeof(Node) == 24); + emit_dst(as, MIPSI_SUBU, tmp1, tmp2, tmp1); + emit_dta(as, MIPSI_SLL, tmp1, tmp1, 3); + emit_dta(as, MIPSI_SLL, tmp2, tmp1, 5); + emit_dst(as, MIPSI_AND, tmp1, tmp2, tmphash); + emit_tsi(as, MIPSI_LW, dest, tab, (int32_t)offsetof(GCtab, node)); + emit_tsi(as, MIPSI_LW, tmp2, tab, (int32_t)offsetof(GCtab, hmask)); + if (irref_isk(refkey)) { + /* Nothing to do. */ + } else if (irt_isstr(kt)) { + emit_tsi(as, MIPSI_LW, tmp1, key, (int32_t)offsetof(GCstr, hash)); + } else { /* Must match with hash*() in lj_tab.c. */ + emit_dst(as, MIPSI_SUBU, tmp1, tmp1, tmp2); + emit_rotr(as, tmp2, tmp2, dest, (-HASH_ROT3)&31); + emit_dst(as, MIPSI_XOR, tmp1, tmp1, tmp2); + emit_rotr(as, tmp1, tmp1, dest, (-HASH_ROT2-HASH_ROT1)&31); + emit_dst(as, MIPSI_SUBU, tmp2, tmp2, dest); + if (irt_isnum(kt)) { + emit_dst(as, MIPSI_XOR, tmp2, tmp2, tmp1); + if ((as->flags & JIT_F_MIPS32R2)) { + emit_dta(as, MIPSI_ROTR, dest, tmp1, (-HASH_ROT1)&31); + } else { + emit_dst(as, MIPSI_OR, dest, dest, tmp1); + emit_dta(as, MIPSI_SLL, tmp1, tmp1, HASH_ROT1); + emit_dta(as, MIPSI_SRL, dest, tmp1, (-HASH_ROT1)&31); + } + emit_dst(as, MIPSI_ADDU, tmp1, tmp1, tmp1); + emit_tg(as, MIPSI_MFC1, tmp2, key); + emit_tg(as, MIPSI_MFC1, tmp1, key+1); + } else { + emit_dst(as, MIPSI_XOR, tmp2, key, tmp1); + emit_rotr(as, dest, tmp1, tmp2, (-HASH_ROT1)&31); + emit_dst(as, MIPSI_ADDU, tmp1, key, ra_allock(as, HASH_BIAS, allow)); + } + } + } +} + +static void asm_hrefk(ASMState *as, IRIns *ir) +{ + IRIns *kslot = IR(ir->op2); + IRIns *irkey = IR(kslot->op1); + int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node)); + int32_t kofs = ofs + (int32_t)offsetof(Node, key); + Reg dest = (ra_used(ir)||ofs > 32736) ? ra_dest(as, ir, RSET_GPR) : RID_NONE; + Reg node = ra_alloc1(as, ir->op1, RSET_GPR); + Reg key = RID_NONE, type = RID_TMP, idx = node; + RegSet allow = rset_exclude(RSET_GPR, node); + int32_t lo, hi; + lua_assert(ofs % sizeof(Node) == 0); + if (ofs > 32736) { + idx = dest; + rset_clear(allow, dest); + kofs = (int32_t)offsetof(Node, key); + } else if (ra_hasreg(dest)) { + emit_tsi(as, MIPSI_ADDIU, dest, node, ofs); + } + if (!irt_ispri(irkey->t)) { + key = ra_scratch(as, allow); + rset_clear(allow, key); + } + if (irt_isnum(irkey->t)) { + lo = (int32_t)ir_knum(irkey)->u32.lo; + hi = (int32_t)ir_knum(irkey)->u32.hi; + } else { + lo = irkey->i; + hi = irt_toitype(irkey->t); + if (!ra_hasreg(key)) + goto nolo; + } + asm_guard(as, MIPSI_BNE, key, lo ? ra_allock(as, lo, allow) : RID_ZERO); +nolo: + asm_guard(as, MIPSI_BNE, type, hi ? ra_allock(as, hi, allow) : RID_ZERO); + if (ra_hasreg(key)) emit_tsi(as, MIPSI_LW, key, idx, kofs+(LJ_BE?4:0)); + emit_tsi(as, MIPSI_LW, type, idx, kofs+(LJ_BE?0:4)); + if (ofs > 32736) + emit_tsi(as, MIPSI_ADDU, dest, node, ra_allock(as, ofs, allow)); +} + +static void asm_uref(ASMState *as, IRIns *ir) +{ + /* NYI: Check that UREFO is still open and not aliasing a slot. */ + Reg dest = ra_dest(as, ir, RSET_GPR); + if (irref_isk(ir->op1)) { + GCfunc *fn = ir_kfunc(IR(ir->op1)); + MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v; + emit_lsptr(as, MIPSI_LW, dest, v, RSET_GPR); + } else { + Reg uv = ra_scratch(as, RSET_GPR); + Reg func = ra_alloc1(as, ir->op1, RSET_GPR); + if (ir->o == IR_UREFC) { + asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO); + emit_tsi(as, MIPSI_ADDIU, dest, uv, (int32_t)offsetof(GCupval, tv)); + emit_tsi(as, MIPSI_LBU, RID_TMP, uv, (int32_t)offsetof(GCupval, closed)); + } else { + emit_tsi(as, MIPSI_LW, dest, uv, (int32_t)offsetof(GCupval, v)); + } + emit_tsi(as, MIPSI_LW, uv, func, + (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8)); + } +} + +static void asm_fref(ASMState *as, IRIns *ir) +{ + UNUSED(as); UNUSED(ir); + lua_assert(!ra_used(ir)); +} + +static void asm_strref(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + IRRef ref = ir->op2, refk = ir->op1; + int32_t ofs = (int32_t)sizeof(GCstr); + Reg r; + if (irref_isk(ref)) { + IRRef tmp = refk; refk = ref; ref = tmp; + } else if (!irref_isk(refk)) { + Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR); + IRIns *irr = IR(ir->op2); + if (ra_hasreg(irr->r)) { + ra_noweak(as, irr->r); + right = irr->r; + } else if (mayfuse(as, irr->op2) && + irr->o == IR_ADD && irref_isk(irr->op2) && + checki16(ofs + IR(irr->op2)->i)) { + ofs += IR(irr->op2)->i; + right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left)); + } else { + right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left)); + } + emit_tsi(as, MIPSI_ADDIU, dest, dest, ofs); + emit_dst(as, MIPSI_ADDU, dest, left, right); + return; + } + r = ra_alloc1(as, ref, RSET_GPR); + ofs += IR(refk)->i; + if (checki16(ofs)) + emit_tsi(as, MIPSI_ADDIU, dest, r, ofs); + else + emit_dst(as, MIPSI_ADDU, dest, r, + ra_allock(as, ofs, rset_exclude(RSET_GPR, r))); +} + +/* -- Loads and stores ---------------------------------------------------- */ + +static MIPSIns asm_fxloadins(IRIns *ir) +{ + switch (irt_type(ir->t)) { + case IRT_I8: return MIPSI_LB; + case IRT_U8: return MIPSI_LBU; + case IRT_I16: return MIPSI_LH; + case IRT_U16: return MIPSI_LHU; + case IRT_NUM: return MIPSI_LDC1; + case IRT_FLOAT: return MIPSI_LWC1; + default: return MIPSI_LW; + } +} + +static MIPSIns asm_fxstoreins(IRIns *ir) +{ + switch (irt_type(ir->t)) { + case IRT_I8: case IRT_U8: return MIPSI_SB; + case IRT_I16: case IRT_U16: return MIPSI_SH; + case IRT_NUM: return MIPSI_SDC1; + case IRT_FLOAT: return MIPSI_SWC1; + default: return MIPSI_SW; + } +} + +static void asm_fload(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg idx = ra_alloc1(as, ir->op1, RSET_GPR); + MIPSIns mi = asm_fxloadins(ir); + int32_t ofs; + if (ir->op2 == IRFL_TAB_ARRAY) { + ofs = asm_fuseabase(as, ir->op1); + if (ofs) { /* Turn the t->array load into an add for colocated arrays. */ + emit_tsi(as, MIPSI_ADDIU, dest, idx, ofs); + return; + } + } + ofs = field_ofs[ir->op2]; + lua_assert(!irt_isfp(ir->t)); + emit_tsi(as, mi, dest, idx, ofs); +} + +static void asm_fstore(ASMState *as, IRIns *ir) +{ + if (ir->r != RID_SINK) { + Reg src = ra_alloc1z(as, ir->op2, RSET_GPR); + IRIns *irf = IR(ir->op1); + Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src)); + int32_t ofs = field_ofs[irf->op2]; + MIPSIns mi = asm_fxstoreins(ir); + lua_assert(!irt_isfp(ir->t)); + emit_tsi(as, mi, src, idx, ofs); + } +} + +static void asm_xload(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); + lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED)); + asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0); +} + +static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs) +{ + if (ir->r != RID_SINK) { + Reg src = ra_alloc1z(as, ir->op2, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); + asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1, + rset_exclude(RSET_GPR, src), ofs); + } +} + +#define asm_xstore(as, ir) asm_xstore_(as, ir, 0) + +static void asm_ahuvload(ASMState *as, IRIns *ir) +{ + IRType1 t = ir->t; + Reg dest = RID_NONE, type = RID_TMP, idx; + RegSet allow = RSET_GPR; + int32_t ofs = 0; + if (ra_used(ir)) { + lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t)); + dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR); + rset_clear(allow, dest); + } + idx = asm_fuseahuref(as, ir->op1, &ofs, allow); + rset_clear(allow, idx); + if (irt_isnum(t)) { + asm_guard(as, MIPSI_BEQ, type, RID_ZERO); + emit_tsi(as, MIPSI_SLTIU, type, type, (int32_t)LJ_TISNUM); + if (ra_hasreg(dest)) + emit_hsi(as, MIPSI_LDC1, dest, idx, ofs); + } else { + asm_guard(as, MIPSI_BNE, type, ra_allock(as, irt_toitype(t), allow)); + if (ra_hasreg(dest)) emit_tsi(as, MIPSI_LW, dest, idx, ofs+(LJ_BE?4:0)); + } + emit_tsi(as, MIPSI_LW, type, idx, ofs+(LJ_BE?0:4)); +} + +static void asm_ahustore(ASMState *as, IRIns *ir) +{ + RegSet allow = RSET_GPR; + Reg idx, src = RID_NONE, type = RID_NONE; + int32_t ofs = 0; + if (ir->r == RID_SINK) + return; + if (irt_isnum(ir->t)) { + src = ra_alloc1(as, ir->op2, RSET_FPR); + } else { + if (!irt_ispri(ir->t)) { + src = ra_alloc1(as, ir->op2, allow); + rset_clear(allow, src); + } + type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow); + rset_clear(allow, type); + } + idx = asm_fuseahuref(as, ir->op1, &ofs, allow); + if (irt_isnum(ir->t)) { + emit_hsi(as, MIPSI_SDC1, src, idx, ofs); + } else { + if (ra_hasreg(src)) + emit_tsi(as, MIPSI_SW, src, idx, ofs+(LJ_BE?4:0)); + emit_tsi(as, MIPSI_SW, type, idx, ofs+(LJ_BE?0:4)); + } +} + +static void asm_sload(ASMState *as, IRIns *ir) +{ + int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0); + IRType1 t = ir->t; + Reg dest = RID_NONE, type = RID_NONE, base; + RegSet allow = RSET_GPR; + lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ + lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK)); + lua_assert(!irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME))); + if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) { + dest = ra_scratch(as, RSET_FPR); + asm_tointg(as, ir, dest); + t.irt = IRT_NUM; /* Continue with a regular number type check. */ + } else if (ra_used(ir)) { + lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t)); + dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR); + rset_clear(allow, dest); + base = ra_alloc1(as, REF_BASE, allow); + rset_clear(allow, base); + if ((ir->op2 & IRSLOAD_CONVERT)) { + if (irt_isint(t)) { + Reg tmp = ra_scratch(as, RSET_FPR); + emit_tg(as, MIPSI_MFC1, dest, tmp); + emit_fg(as, MIPSI_TRUNC_W_D, tmp, tmp); + dest = tmp; + t.irt = IRT_NUM; /* Check for original type. */ + } else { + Reg tmp = ra_scratch(as, RSET_GPR); + emit_fg(as, MIPSI_CVT_D_W, dest, dest); + emit_tg(as, MIPSI_MTC1, tmp, dest); + dest = tmp; + t.irt = IRT_INT; /* Check for original type. */ + } + } + goto dotypecheck; + } + base = ra_alloc1(as, REF_BASE, allow); + rset_clear(allow, base); +dotypecheck: + if (irt_isnum(t)) { + if ((ir->op2 & IRSLOAD_TYPECHECK)) { + asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO); + emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)LJ_TISNUM); + type = RID_TMP; + } + if (ra_hasreg(dest)) emit_hsi(as, MIPSI_LDC1, dest, base, ofs); + } else { + if ((ir->op2 & IRSLOAD_TYPECHECK)) { + Reg ktype = ra_allock(as, irt_toitype(t), allow); + asm_guard(as, MIPSI_BNE, RID_TMP, ktype); + type = RID_TMP; + } + if (ra_hasreg(dest)) emit_tsi(as, MIPSI_LW, dest, base, ofs ^ (LJ_BE?4:0)); + } + if (ra_hasreg(type)) emit_tsi(as, MIPSI_LW, type, base, ofs ^ (LJ_BE?0:4)); +} + +/* -- Allocations --------------------------------------------------------- */ + +#if LJ_HASFFI +static void asm_cnew(ASMState *as, IRIns *ir) +{ + CTState *cts = ctype_ctsG(J2G(as->J)); + CTypeID id = (CTypeID)IR(ir->op1)->i; + CTSize sz; + CTInfo info = lj_ctype_info(cts, id, &sz); + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; + IRRef args[4]; + RegSet drop = RSET_SCRATCH; + lua_assert(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL)); + + as->gcsteps++; + if (ra_hasreg(ir->r)) + rset_clear(drop, ir->r); /* Dest reg handled below. */ + ra_evictset(as, drop); + if (ra_used(ir)) + ra_destreg(as, ir, RID_RET); /* GCcdata * */ + + /* Initialize immutable cdata object. */ + if (ir->o == IR_CNEWI) { + RegSet allow = (RSET_GPR & ~RSET_SCRATCH); + int32_t ofs = sizeof(GCcdata); + lua_assert(sz == 4 || sz == 8); + if (sz == 8) { + ofs += 4; + lua_assert((ir+1)->o == IR_HIOP); + if (LJ_LE) ir++; + } + for (;;) { + Reg r = ra_alloc1z(as, ir->op2, allow); + emit_tsi(as, MIPSI_SW, r, RID_RET, ofs); + rset_clear(allow, r); + if (ofs == sizeof(GCcdata)) break; + ofs -= 4; if (LJ_BE) ir++; else ir--; + } + } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */ + ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv]; + args[0] = ASMREF_L; /* lua_State *L */ + args[1] = ir->op1; /* CTypeID id */ + args[2] = ir->op2; /* CTSize sz */ + args[3] = ASMREF_TMP1; /* CTSize align */ + asm_gencall(as, ci, args); + emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info)); + return; + } + + /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */ + emit_tsi(as, MIPSI_SB, RID_RET+1, RID_RET, offsetof(GCcdata, gct)); + emit_tsi(as, MIPSI_SH, RID_TMP, RID_RET, offsetof(GCcdata, ctypeid)); + emit_ti(as, MIPSI_LI, RID_RET+1, ~LJ_TCDATA); + emit_ti(as, MIPSI_LI, RID_TMP, id); /* Lower 16 bit used. Sign-ext ok. */ + args[0] = ASMREF_L; /* lua_State *L */ + args[1] = ASMREF_TMP1; /* MSize size */ + asm_gencall(as, ci, args); + ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)), + ra_releasetmp(as, ASMREF_TMP1)); +} +#else +#define asm_cnew(as, ir) ((void)0) +#endif + +/* -- Write barriers ------------------------------------------------------ */ + +static void asm_tbar(ASMState *as, IRIns *ir) +{ + Reg tab = ra_alloc1(as, ir->op1, RSET_GPR); + Reg mark = ra_scratch(as, rset_exclude(RSET_GPR, tab)); + Reg link = RID_TMP; + MCLabel l_end = emit_label(as); + emit_tsi(as, MIPSI_SW, link, tab, (int32_t)offsetof(GCtab, gclist)); + emit_tsi(as, MIPSI_SB, mark, tab, (int32_t)offsetof(GCtab, marked)); + emit_setgl(as, tab, gc.grayagain); + emit_getgl(as, link, gc.grayagain); + emit_dst(as, MIPSI_XOR, mark, mark, RID_TMP); /* Clear black bit. */ + emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end); + emit_tsi(as, MIPSI_ANDI, RID_TMP, mark, LJ_GC_BLACK); + emit_tsi(as, MIPSI_LBU, mark, tab, (int32_t)offsetof(GCtab, marked)); +} + +static void asm_obar(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv]; + IRRef args[2]; + MCLabel l_end; + Reg obj, val, tmp; + /* No need for other object barriers (yet). */ + lua_assert(IR(ir->op1)->o == IR_UREFC); + ra_evictset(as, RSET_SCRATCH); + l_end = emit_label(as); + args[0] = ASMREF_TMP1; /* global_State *g */ + args[1] = ir->op1; /* TValue *tv */ + asm_gencall(as, ci, args); + emit_tsi(as, MIPSI_ADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768); + obj = IR(ir->op1)->r; + tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj)); + emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end); + emit_tsi(as, MIPSI_ANDI, tmp, tmp, LJ_GC_BLACK); + emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end); + emit_tsi(as, MIPSI_ANDI, RID_TMP, RID_TMP, LJ_GC_WHITES); + val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj)); + emit_tsi(as, MIPSI_LBU, tmp, obj, + (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)); + emit_tsi(as, MIPSI_LBU, RID_TMP, val, (int32_t)offsetof(GChead, marked)); +} + +/* -- Arithmetic and logic operations ------------------------------------- */ + +static void asm_fparith(ASMState *as, IRIns *ir, MIPSIns mi) +{ + Reg dest = ra_dest(as, ir, RSET_FPR); + Reg right, left = ra_alloc2(as, ir, RSET_FPR); + right = (left >> 8); left &= 255; + emit_fgh(as, mi, dest, left, right); +} + +static void asm_fpunary(ASMState *as, IRIns *ir, MIPSIns mi) +{ + Reg dest = ra_dest(as, ir, RSET_FPR); + Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR); + emit_fg(as, mi, dest, left); +} + +static void asm_fpmath(ASMState *as, IRIns *ir) +{ + if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir)) + return; + if (ir->op2 <= IRFPM_TRUNC) + asm_callround(as, ir, IRCALL_lj_vm_floor + ir->op2); + else if (ir->op2 == IRFPM_SQRT) + asm_fpunary(as, ir, MIPSI_SQRT_D); + else + asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2); +} + +static void asm_add(ASMState *as, IRIns *ir) +{ + if (irt_isnum(ir->t)) { + asm_fparith(as, ir, MIPSI_ADD_D); + } else { + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); + if (irref_isk(ir->op2)) { + int32_t k = IR(ir->op2)->i; + if (checki16(k)) { + emit_tsi(as, MIPSI_ADDIU, dest, left, k); + return; + } + } + right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); + emit_dst(as, MIPSI_ADDU, dest, left, right); + } +} + +static void asm_sub(ASMState *as, IRIns *ir) +{ + if (irt_isnum(ir->t)) { + asm_fparith(as, ir, MIPSI_SUB_D); + } else { + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg right, left = ra_alloc2(as, ir, RSET_GPR); + right = (left >> 8); left &= 255; + emit_dst(as, MIPSI_SUBU, dest, left, right); + } +} + +static void asm_mul(ASMState *as, IRIns *ir) +{ + if (irt_isnum(ir->t)) { + asm_fparith(as, ir, MIPSI_MUL_D); + } else { + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg right, left = ra_alloc2(as, ir, RSET_GPR); + right = (left >> 8); left &= 255; + emit_dst(as, MIPSI_MUL, dest, left, right); + } +} + +#define asm_div(as, ir) asm_fparith(as, ir, MIPSI_DIV_D) +#define asm_mod(as, ir) asm_callid(as, ir, IRCALL_lj_vm_modi) +#define asm_pow(as, ir) asm_callid(as, ir, IRCALL_lj_vm_powi) + +static void asm_neg(ASMState *as, IRIns *ir) +{ + if (irt_isnum(ir->t)) { + asm_fpunary(as, ir, MIPSI_NEG_D); + } else { + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); + emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left); + } +} + +#define asm_abs(as, ir) asm_fpunary(as, ir, MIPSI_ABS_D) +#define asm_atan2(as, ir) asm_callid(as, ir, IRCALL_atan2) +#define asm_ldexp(as, ir) asm_callid(as, ir, IRCALL_ldexp) + +static void asm_arithov(ASMState *as, IRIns *ir) +{ + Reg right, left, tmp, dest = ra_dest(as, ir, RSET_GPR); + if (irref_isk(ir->op2)) { + int k = IR(ir->op2)->i; + if (ir->o == IR_SUBOV) k = -k; + if (checki16(k)) { /* (dest < left) == (k >= 0 ? 1 : 0) */ + left = ra_alloc1(as, ir->op1, RSET_GPR); + asm_guard(as, k >= 0 ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO); + emit_dst(as, MIPSI_SLT, RID_TMP, dest, dest == left ? RID_TMP : left); + emit_tsi(as, MIPSI_ADDIU, dest, left, k); + if (dest == left) emit_move(as, RID_TMP, left); + return; + } + } + left = ra_alloc2(as, ir, RSET_GPR); + right = (left >> 8); left &= 255; + tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR, left), + right), dest)); + asm_guard(as, MIPSI_BLTZ, RID_TMP, 0); + emit_dst(as, MIPSI_AND, RID_TMP, RID_TMP, tmp); + if (ir->o == IR_ADDOV) { /* ((dest^left) & (dest^right)) < 0 */ + emit_dst(as, MIPSI_XOR, RID_TMP, dest, dest == right ? RID_TMP : right); + } else { /* ((dest^left) & (dest^~right)) < 0 */ + emit_dst(as, MIPSI_XOR, RID_TMP, RID_TMP, dest); + emit_dst(as, MIPSI_NOR, RID_TMP, dest == right ? RID_TMP : right, RID_ZERO); + } + emit_dst(as, MIPSI_XOR, tmp, dest, dest == left ? RID_TMP : left); + emit_dst(as, ir->o == IR_ADDOV ? MIPSI_ADDU : MIPSI_SUBU, dest, left, right); + if (dest == left || dest == right) + emit_move(as, RID_TMP, dest == left ? left : right); +} + +#define asm_addov(as, ir) asm_arithov(as, ir) +#define asm_subov(as, ir) asm_arithov(as, ir) + +static void asm_mulov(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg tmp, right, left = ra_alloc2(as, ir, RSET_GPR); + right = (left >> 8); left &= 255; + tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR, left), + right), dest)); + asm_guard(as, MIPSI_BNE, RID_TMP, tmp); + emit_dta(as, MIPSI_SRA, RID_TMP, dest, 31); + emit_dst(as, MIPSI_MFHI, tmp, 0, 0); + emit_dst(as, MIPSI_MFLO, dest, 0, 0); + emit_dst(as, MIPSI_MULT, 0, left, right); +} + +#if LJ_HASFFI +static void asm_add64(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR); + if (irref_isk(ir->op2)) { + int32_t k = IR(ir->op2)->i; + if (k == 0) { + emit_dst(as, MIPSI_ADDU, dest, left, RID_TMP); + goto loarith; + } else if (checki16(k)) { + emit_dst(as, MIPSI_ADDU, dest, dest, RID_TMP); + emit_tsi(as, MIPSI_ADDIU, dest, left, k); + goto loarith; + } + } + emit_dst(as, MIPSI_ADDU, dest, dest, RID_TMP); + right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); + emit_dst(as, MIPSI_ADDU, dest, left, right); +loarith: + ir--; + dest = ra_dest(as, ir, RSET_GPR); + left = ra_alloc1(as, ir->op1, RSET_GPR); + if (irref_isk(ir->op2)) { + int32_t k = IR(ir->op2)->i; + if (k == 0) { + if (dest != left) + emit_move(as, dest, left); + return; + } else if (checki16(k)) { + if (dest == left) { + Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, left)); + emit_move(as, dest, tmp); + dest = tmp; + } + emit_dst(as, MIPSI_SLTU, RID_TMP, dest, left); + emit_tsi(as, MIPSI_ADDIU, dest, left, k); + return; + } + } + right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); + if (dest == left && dest == right) { + Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right)); + emit_move(as, dest, tmp); + dest = tmp; + } + emit_dst(as, MIPSI_SLTU, RID_TMP, dest, dest == left ? right : left); + emit_dst(as, MIPSI_ADDU, dest, left, right); +} + +static void asm_sub64(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg right, left = ra_alloc2(as, ir, RSET_GPR); + right = (left >> 8); left &= 255; + emit_dst(as, MIPSI_SUBU, dest, dest, RID_TMP); + emit_dst(as, MIPSI_SUBU, dest, left, right); + ir--; + dest = ra_dest(as, ir, RSET_GPR); + left = ra_alloc2(as, ir, RSET_GPR); + right = (left >> 8); left &= 255; + if (dest == left) { + Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right)); + emit_move(as, dest, tmp); + dest = tmp; + } + emit_dst(as, MIPSI_SLTU, RID_TMP, left, dest); + emit_dst(as, MIPSI_SUBU, dest, left, right); +} + +static void asm_neg64(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg left = ra_alloc1(as, ir->op1, RSET_GPR); + emit_dst(as, MIPSI_SUBU, dest, dest, RID_TMP); + emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left); + ir--; + dest = ra_dest(as, ir, RSET_GPR); + left = ra_alloc1(as, ir->op1, RSET_GPR); + emit_dst(as, MIPSI_SLTU, RID_TMP, RID_ZERO, dest); + emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left); +} +#endif + +static void asm_bnot(ASMState *as, IRIns *ir) +{ + Reg left, right, dest = ra_dest(as, ir, RSET_GPR); + IRIns *irl = IR(ir->op1); + if (mayfuse(as, ir->op1) && irl->o == IR_BOR) { + left = ra_alloc2(as, irl, RSET_GPR); + right = (left >> 8); left &= 255; + } else { + left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); + right = RID_ZERO; + } + emit_dst(as, MIPSI_NOR, dest, left, right); +} + +static void asm_bswap(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg left = ra_alloc1(as, ir->op1, RSET_GPR); + if ((as->flags & JIT_F_MIPS32R2)) { + emit_dta(as, MIPSI_ROTR, dest, RID_TMP, 16); + emit_dst(as, MIPSI_WSBH, RID_TMP, 0, left); + } else { + Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), dest)); + emit_dst(as, MIPSI_OR, dest, dest, tmp); + emit_dst(as, MIPSI_OR, dest, dest, RID_TMP); + emit_tsi(as, MIPSI_ANDI, dest, dest, 0xff00); + emit_dta(as, MIPSI_SLL, RID_TMP, RID_TMP, 8); + emit_dta(as, MIPSI_SRL, dest, left, 8); + emit_tsi(as, MIPSI_ANDI, RID_TMP, left, 0xff00); + emit_dst(as, MIPSI_OR, tmp, tmp, RID_TMP); + emit_dta(as, MIPSI_SRL, tmp, left, 24); + emit_dta(as, MIPSI_SLL, RID_TMP, left, 24); + } +} + +static void asm_bitop(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); + if (irref_isk(ir->op2)) { + int32_t k = IR(ir->op2)->i; + if (checku16(k)) { + emit_tsi(as, mik, dest, left, k); + return; + } + } + right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); + emit_dst(as, mi, dest, left, right); +} + +#define asm_band(as, ir) asm_bitop(as, ir, MIPSI_AND, MIPSI_ANDI) +#define asm_bor(as, ir) asm_bitop(as, ir, MIPSI_OR, MIPSI_ORI) +#define asm_bxor(as, ir) asm_bitop(as, ir, MIPSI_XOR, MIPSI_XORI) + +static void asm_bitshift(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + if (irref_isk(ir->op2)) { /* Constant shifts. */ + uint32_t shift = (uint32_t)(IR(ir->op2)->i & 31); + emit_dta(as, mik, dest, ra_hintalloc(as, ir->op1, dest, RSET_GPR), shift); + } else { + Reg right, left = ra_alloc2(as, ir, RSET_GPR); + right = (left >> 8); left &= 255; + emit_dst(as, mi, dest, right, left); /* Shift amount is in rs. */ + } +} + +#define asm_bshl(as, ir) asm_bitshift(as, ir, MIPSI_SLLV, MIPSI_SLL) +#define asm_bshr(as, ir) asm_bitshift(as, ir, MIPSI_SRLV, MIPSI_SRL) +#define asm_bsar(as, ir) asm_bitshift(as, ir, MIPSI_SRAV, MIPSI_SRA) +#define asm_brol(as, ir) lua_assert(0) + +static void asm_bror(ASMState *as, IRIns *ir) +{ + if ((as->flags & JIT_F_MIPS32R2)) { + asm_bitshift(as, ir, MIPSI_ROTRV, MIPSI_ROTR); + } else { + Reg dest = ra_dest(as, ir, RSET_GPR); + if (irref_isk(ir->op2)) { /* Constant shifts. */ + uint32_t shift = (uint32_t)(IR(ir->op2)->i & 31); + Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); + emit_rotr(as, dest, left, RID_TMP, shift); + } else { + Reg right, left = ra_alloc2(as, ir, RSET_GPR); + right = (left >> 8); left &= 255; + emit_dst(as, MIPSI_OR, dest, dest, RID_TMP); + emit_dst(as, MIPSI_SRLV, dest, right, left); + emit_dst(as, MIPSI_SLLV, RID_TMP, RID_TMP, left); + emit_dst(as, MIPSI_SUBU, RID_TMP, ra_allock(as, 32, RSET_GPR), right); + } + } +} + +static void asm_min_max(ASMState *as, IRIns *ir, int ismax) +{ + if (irt_isnum(ir->t)) { + Reg dest = ra_dest(as, ir, RSET_FPR); + Reg right, left = ra_alloc2(as, ir, RSET_FPR); + right = (left >> 8); left &= 255; + if (dest == left) { + emit_fg(as, MIPSI_MOVT_D, dest, right); + } else { + emit_fg(as, MIPSI_MOVF_D, dest, left); + if (dest != right) emit_fg(as, MIPSI_MOV_D, dest, right); + } + emit_fgh(as, MIPSI_C_OLT_D, 0, ismax ? left : right, ismax ? right : left); + } else { + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg right, left = ra_alloc2(as, ir, RSET_GPR); + right = (left >> 8); left &= 255; + if (dest == left) { + emit_dst(as, MIPSI_MOVN, dest, right, RID_TMP); + } else { + emit_dst(as, MIPSI_MOVZ, dest, left, RID_TMP); + if (dest != right) emit_move(as, dest, right); + } + emit_dst(as, MIPSI_SLT, RID_TMP, + ismax ? left : right, ismax ? right : left); + } +} + +#define asm_min(as, ir) asm_min_max(as, ir, 0) +#define asm_max(as, ir) asm_min_max(as, ir, 1) + +/* -- Comparisons --------------------------------------------------------- */ + +static void asm_comp(ASMState *as, IRIns *ir) +{ + /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */ + IROp op = ir->o; + if (irt_isnum(ir->t)) { + Reg right, left = ra_alloc2(as, ir, RSET_FPR); + right = (left >> 8); left &= 255; + asm_guard(as, (op&1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0); + emit_fgh(as, MIPSI_C_OLT_D + ((op&3) ^ ((op>>2)&1)), 0, left, right); + } else { + Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR); + if (op == IR_ABC) op = IR_UGT; + if ((op&4) == 0 && irref_isk(ir->op2) && IR(ir->op2)->i == 0) { + MIPSIns mi = (op&2) ? ((op&1) ? MIPSI_BLEZ : MIPSI_BGTZ) : + ((op&1) ? MIPSI_BLTZ : MIPSI_BGEZ); + asm_guard(as, mi, left, 0); + } else { + if (irref_isk(ir->op2)) { + int32_t k = IR(ir->op2)->i; + if ((op&2)) k++; + if (checki16(k)) { + asm_guard(as, (op&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO); + emit_tsi(as, (op&4) ? MIPSI_SLTIU : MIPSI_SLTI, + RID_TMP, left, k); + return; + } + } + right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); + asm_guard(as, ((op^(op>>1))&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO); + emit_dst(as, (op&4) ? MIPSI_SLTU : MIPSI_SLT, + RID_TMP, (op&2) ? right : left, (op&2) ? left : right); + } + } +} + +static void asm_equal(ASMState *as, IRIns *ir) +{ + Reg right, left = ra_alloc2(as, ir, irt_isnum(ir->t) ? RSET_FPR : RSET_GPR); + right = (left >> 8); left &= 255; + if (irt_isnum(ir->t)) { + asm_guard(as, (ir->o & 1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0); + emit_fgh(as, MIPSI_C_EQ_D, 0, left, right); + } else { + asm_guard(as, (ir->o & 1) ? MIPSI_BEQ : MIPSI_BNE, left, right); + } +} + +#if LJ_HASFFI +/* 64 bit integer comparisons. */ +static void asm_comp64(ASMState *as, IRIns *ir) +{ + /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */ + IROp op = (ir-1)->o; + MCLabel l_end; + Reg rightlo, leftlo, righthi, lefthi = ra_alloc2(as, ir, RSET_GPR); + righthi = (lefthi >> 8); lefthi &= 255; + leftlo = ra_alloc2(as, ir-1, + rset_exclude(rset_exclude(RSET_GPR, lefthi), righthi)); + rightlo = (leftlo >> 8); leftlo &= 255; + asm_guard(as, ((op^(op>>1))&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO); + l_end = emit_label(as); + if (lefthi != righthi) + emit_dst(as, (op&4) ? MIPSI_SLTU : MIPSI_SLT, RID_TMP, + (op&2) ? righthi : lefthi, (op&2) ? lefthi : righthi); + emit_dst(as, MIPSI_SLTU, RID_TMP, + (op&2) ? rightlo : leftlo, (op&2) ? leftlo : rightlo); + if (lefthi != righthi) + emit_branch(as, MIPSI_BEQ, lefthi, righthi, l_end); +} + +static void asm_comp64eq(ASMState *as, IRIns *ir) +{ + Reg tmp, right, left = ra_alloc2(as, ir, RSET_GPR); + right = (left >> 8); left &= 255; + asm_guard(as, ((ir-1)->o & 1) ? MIPSI_BEQ : MIPSI_BNE, RID_TMP, RID_ZERO); + tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right)); + emit_dst(as, MIPSI_OR, RID_TMP, RID_TMP, tmp); + emit_dst(as, MIPSI_XOR, tmp, left, right); + left = ra_alloc2(as, ir-1, RSET_GPR); + right = (left >> 8); left &= 255; + emit_dst(as, MIPSI_XOR, RID_TMP, left, right); +} +#endif + +/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */ + +/* Hiword op of a split 64 bit op. Previous op must be the loword op. */ +static void asm_hiop(ASMState *as, IRIns *ir) +{ +#if LJ_HASFFI + /* HIOP is marked as a store because it needs its own DCE logic. */ + int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */ + if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1; + if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */ + as->curins--; /* Always skip the CONV. */ + if (usehi || uselo) + asm_conv64(as, ir); + return; + } else if ((ir-1)->o < IR_EQ) { /* 64 bit integer comparisons. ORDER IR. */ + as->curins--; /* Always skip the loword comparison. */ + asm_comp64(as, ir); + return; + } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */ + as->curins--; /* Always skip the loword comparison. */ + asm_comp64eq(as, ir); + return; + } else if ((ir-1)->o == IR_XSTORE) { + as->curins--; /* Handle both stores here. */ + if ((ir-1)->r != RID_SINK) { + asm_xstore_(as, ir, LJ_LE ? 4 : 0); + asm_xstore_(as, ir-1, LJ_LE ? 0 : 4); + } + return; + } + if (!usehi) return; /* Skip unused hiword op for all remaining ops. */ + switch ((ir-1)->o) { + case IR_ADD: as->curins--; asm_add64(as, ir); break; + case IR_SUB: as->curins--; asm_sub64(as, ir); break; + case IR_NEG: as->curins--; asm_neg64(as, ir); break; + case IR_CALLN: + case IR_CALLXS: + if (!uselo) + ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */ + break; + case IR_CNEWI: + /* Nothing to do here. Handled by lo op itself. */ + break; + default: lua_assert(0); break; + } +#else + UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused without FFI. */ +#endif +} + +/* -- Profiling ----------------------------------------------------------- */ + +static void asm_prof(ASMState *as, IRIns *ir) +{ + UNUSED(ir); + asm_guard(as, MIPSI_BNE, RID_TMP, RID_ZERO); + emit_tsi(as, MIPSI_ANDI, RID_TMP, RID_TMP, HOOK_PROFILE); + emit_lsglptr(as, MIPSI_LBU, RID_TMP, + (int32_t)offsetof(global_State, hookmask)); +} + +/* -- Stack handling ------------------------------------------------------ */ + +/* Check Lua stack size for overflow. Use exit handler as fallback. */ +static void asm_stack_check(ASMState *as, BCReg topslot, + IRIns *irp, RegSet allow, ExitNo exitno) +{ + /* Try to get an unused temp. register, otherwise spill/restore RID_RET*. */ + Reg tmp, pbase = irp ? (ra_hasreg(irp->r) ? irp->r : RID_TMP) : RID_BASE; + ExitNo oldsnap = as->snapno; + rset_clear(allow, pbase); + tmp = allow ? rset_pickbot(allow) : + (pbase == RID_RETHI ? RID_RETLO : RID_RETHI); + as->snapno = exitno; + asm_guard(as, MIPSI_BNE, RID_TMP, RID_ZERO); + as->snapno = oldsnap; + if (allow == RSET_EMPTY) /* Restore temp. register. */ + emit_tsi(as, MIPSI_LW, tmp, RID_SP, 0); + else + ra_modified(as, tmp); + emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)(8*topslot)); + emit_dst(as, MIPSI_SUBU, RID_TMP, tmp, pbase); + emit_tsi(as, MIPSI_LW, tmp, tmp, offsetof(lua_State, maxstack)); + if (pbase == RID_TMP) + emit_getgl(as, RID_TMP, jit_base); + emit_getgl(as, tmp, cur_L); + if (allow == RSET_EMPTY) /* Spill temp. register. */ + emit_tsi(as, MIPSI_SW, tmp, RID_SP, 0); +} + +/* Restore Lua stack from on-trace state. */ +static void asm_stack_restore(ASMState *as, SnapShot *snap) +{ + SnapEntry *map = &as->T->snapmap[snap->mapofs]; + SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1]; + MSize n, nent = snap->nent; + /* Store the value of all modified slots to the Lua stack. */ + for (n = 0; n < nent; n++) { + SnapEntry sn = map[n]; + BCReg s = snap_slot(sn); + int32_t ofs = 8*((int32_t)s-1); + IRRef ref = snap_ref(sn); + IRIns *ir = IR(ref); + if ((sn & SNAP_NORESTORE)) + continue; + if (irt_isnum(ir->t)) { + Reg src = ra_alloc1(as, ref, RSET_FPR); + emit_hsi(as, MIPSI_SDC1, src, RID_BASE, ofs); + } else { + Reg type; + RegSet allow = rset_exclude(RSET_GPR, RID_BASE); + lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t)); + if (!irt_ispri(ir->t)) { + Reg src = ra_alloc1(as, ref, allow); + rset_clear(allow, src); + emit_tsi(as, MIPSI_SW, src, RID_BASE, ofs+(LJ_BE?4:0)); + } + if ((sn & (SNAP_CONT|SNAP_FRAME))) { + if (s == 0) continue; /* Do not overwrite link to previous frame. */ + type = ra_allock(as, (int32_t)(*flinks--), allow); + } else { + type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow); + } + emit_tsi(as, MIPSI_SW, type, RID_BASE, ofs+(LJ_BE?0:4)); + } + checkmclim(as); + } + lua_assert(map + nent == flinks); +} + +/* -- GC handling --------------------------------------------------------- */ + +/* Check GC threshold and do one or more GC steps. */ +static void asm_gc_check(ASMState *as) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit]; + IRRef args[2]; + MCLabel l_end; + Reg tmp; + ra_evictset(as, RSET_SCRATCH); + l_end = emit_label(as); + /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */ + /* Assumes asm_snap_prep() already done. */ + asm_guard(as, MIPSI_BNE, RID_RET, RID_ZERO); + args[0] = ASMREF_TMP1; /* global_State *g */ + args[1] = ASMREF_TMP2; /* MSize steps */ + asm_gencall(as, ci, args); + emit_tsi(as, MIPSI_ADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768); + tmp = ra_releasetmp(as, ASMREF_TMP2); + emit_loadi(as, tmp, as->gcsteps); + /* Jump around GC step if GC total < GC threshold. */ + emit_branch(as, MIPSI_BNE, RID_TMP, RID_ZERO, l_end); + emit_dst(as, MIPSI_SLTU, RID_TMP, RID_TMP, tmp); + emit_getgl(as, tmp, gc.threshold); + emit_getgl(as, RID_TMP, gc.total); + as->gcsteps = 0; + checkmclim(as); +} + +/* -- Loop handling ------------------------------------------------------- */ + +/* Fixup the loop branch. */ +static void asm_loop_fixup(ASMState *as) +{ + MCode *p = as->mctop; + MCode *target = as->mcp; + p[-1] = MIPSI_NOP; + if (as->loopinv) { /* Inverted loop branch? */ + /* asm_guard already inverted the cond branch. Only patch the target. */ + p[-3] |= ((target-p+2) & 0x0000ffffu); + } else { + p[-2] = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu); + } +} + +/* -- Head of trace ------------------------------------------------------- */ + +/* Coalesce BASE register for a root trace. */ +static void asm_head_root_base(ASMState *as) +{ + IRIns *ir = IR(REF_BASE); + Reg r = ir->r; + if (as->loopinv) as->mctop--; + if (ra_hasreg(r)) { + ra_free(as, r); + if (rset_test(as->modset, r) || irt_ismarked(ir->t)) + ir->r = RID_INIT; /* No inheritance for modified BASE register. */ + if (r != RID_BASE) + emit_move(as, r, RID_BASE); + } +} + +/* Coalesce BASE register for a side trace. */ +static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow) +{ + IRIns *ir = IR(REF_BASE); + Reg r = ir->r; + if (as->loopinv) as->mctop--; + if (ra_hasreg(r)) { + ra_free(as, r); + if (rset_test(as->modset, r) || irt_ismarked(ir->t)) + ir->r = RID_INIT; /* No inheritance for modified BASE register. */ + if (irp->r == r) { + rset_clear(allow, r); /* Mark same BASE register as coalesced. */ + } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) { + rset_clear(allow, irp->r); + emit_move(as, r, irp->r); /* Move from coalesced parent reg. */ + } else { + emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */ + } + } + return allow; +} + +/* -- Tail of trace ------------------------------------------------------- */ + +/* Fixup the tail code. */ +static void asm_tail_fixup(ASMState *as, TraceNo lnk) +{ + MCode *target = lnk ? traceref(as->J,lnk)->mcode : (MCode *)lj_vm_exit_interp; + int32_t spadj = as->T->spadjust; + MCode *p = as->mctop-1; + *p = spadj ? (MIPSI_ADDIU|MIPSF_T(RID_SP)|MIPSF_S(RID_SP)|spadj) : MIPSI_NOP; + p[-1] = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu); +} + +/* Prepare tail of code. */ +static void asm_tail_prep(ASMState *as) +{ + as->mcp = as->mctop-2; /* Leave room for branch plus nop or stack adj. */ + as->invmcp = as->loopref ? as->mcp : NULL; +} + +/* -- Trace setup --------------------------------------------------------- */ + +/* Ensure there are enough stack slots for call arguments. */ +static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci) +{ + IRRef args[CCI_NARGS_MAX*2]; + uint32_t i, nargs = CCI_XNARGS(ci); + int nslots = 4, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR; + asm_collectargs(as, ir, ci, args); + for (i = 0; i < nargs; i++) { + if (args[i] && irt_isfp(IR(args[i])->t) && + nfpr > 0 && !(ci->flags & CCI_VARARG)) { + nfpr--; + ngpr -= irt_isnum(IR(args[i])->t) ? 2 : 1; + } else if (args[i] && irt_isnum(IR(args[i])->t)) { + nfpr = 0; + ngpr = ngpr & ~1; + if (ngpr > 0) ngpr -= 2; else nslots = (nslots+3) & ~1; + } else { + nfpr = 0; + if (ngpr > 0) ngpr--; else nslots++; + } + } + if (nslots > as->evenspill) /* Leave room for args in stack slots. */ + as->evenspill = nslots; + return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET); +} + +static void asm_setup_target(ASMState *as) +{ + asm_sparejump_setup(as); + asm_exitstub_setup(as); +} + +/* -- Trace patching ------------------------------------------------------ */ + +/* Patch exit jumps of existing machine code to a new target. */ +void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target) +{ + MCode *p = T->mcode; + MCode *pe = (MCode *)((char *)p + T->szmcode); + MCode *px = exitstub_trace_addr(T, exitno); + MCode *cstart = NULL, *cstop = NULL; + MCode *mcarea = lj_mcode_patch(J, p, 0); + MCode exitload = MIPSI_LI | MIPSF_T(RID_TMP) | exitno; + MCode tjump = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu); + for (p++; p < pe; p++) { + if (*p == exitload) { /* Look for load of exit number. */ + if (((p[-1] ^ (px-p)) & 0xffffu) == 0) { /* Look for exitstub branch. */ + ptrdiff_t delta = target - p; + if (((delta + 0x8000) >> 16) == 0) { /* Patch in-range branch. */ + patchbranch: + p[-1] = (p[-1] & 0xffff0000u) | (delta & 0xffffu); + *p = MIPSI_NOP; /* Replace the load of the exit number. */ + cstop = p; + if (!cstart) cstart = p-1; + } else { /* Branch out of range. Use spare jump slot in mcarea. */ + int i; + for (i = 2; i < 2+MIPS_SPAREJUMP*2; i += 2) { + if (mcarea[i] == tjump) { + delta = mcarea+i - p; + goto patchbranch; + } else if (mcarea[i] == MIPSI_NOP) { + mcarea[i] = tjump; + cstart = mcarea+i; + delta = mcarea+i - p; + goto patchbranch; + } + } + /* Ignore jump slot overflow. Child trace is simply not attached. */ + } + } else if (p+1 == pe) { + /* Patch NOP after code for inverted loop branch. Use of J is ok. */ + lua_assert(p[1] == MIPSI_NOP); + p[1] = tjump; + *p = MIPSI_NOP; /* Replace the load of the exit number. */ + cstop = p+2; + if (!cstart) cstart = p+1; + } + } + } + if (cstart) lj_mcode_sync(cstart, cstop); + lj_mcode_patch(J, mcarea, 1); +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_asm_ppc.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_asm_ppc.h new file mode 100644 index 00000000000..7deeb66e51f --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_asm_ppc.h @@ -0,0 +1,2015 @@ +/* +** PPC IR assembler (SSA IR -> machine code). +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +/* -- Register allocator extensions --------------------------------------- */ + +/* Allocate a register with a hint. */ +static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow) +{ + Reg r = IR(ref)->r; + if (ra_noreg(r)) { + if (!ra_hashint(r) && !iscrossref(as, ref)) + ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */ + r = ra_allocref(as, ref, allow); + } + ra_noweak(as, r); + return r; +} + +/* Allocate two source registers for three-operand instructions. */ +static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow) +{ + IRIns *irl = IR(ir->op1), *irr = IR(ir->op2); + Reg left = irl->r, right = irr->r; + if (ra_hasreg(left)) { + ra_noweak(as, left); + if (ra_noreg(right)) + right = ra_allocref(as, ir->op2, rset_exclude(allow, left)); + else + ra_noweak(as, right); + } else if (ra_hasreg(right)) { + ra_noweak(as, right); + left = ra_allocref(as, ir->op1, rset_exclude(allow, right)); + } else if (ra_hashint(right)) { + right = ra_allocref(as, ir->op2, allow); + left = ra_alloc1(as, ir->op1, rset_exclude(allow, right)); + } else { + left = ra_allocref(as, ir->op1, allow); + right = ra_alloc1(as, ir->op2, rset_exclude(allow, left)); + } + return left | (right << 8); +} + +/* -- Guard handling ------------------------------------------------------ */ + +/* Setup exit stubs after the end of each trace. */ +static void asm_exitstub_setup(ASMState *as, ExitNo nexits) +{ + ExitNo i; + MCode *mxp = as->mctop; + if (mxp - (nexits + 3 + MCLIM_REDZONE) < as->mclim) + asm_mclimit(as); + /* 1: mflr r0; bl ->vm_exit_handler; li r0, traceno; bl <1; bl <1; ... */ + for (i = nexits-1; (int32_t)i >= 0; i--) + *--mxp = PPCI_BL|(((-3-i)&0x00ffffffu)<<2); + *--mxp = PPCI_LI|PPCF_T(RID_TMP)|as->T->traceno; /* Read by exit handler. */ + mxp--; + *mxp = PPCI_BL|((((MCode *)(void *)lj_vm_exit_handler-mxp)&0x00ffffffu)<<2); + *--mxp = PPCI_MFLR|PPCF_T(RID_TMP); + as->mctop = mxp; +} + +static MCode *asm_exitstub_addr(ASMState *as, ExitNo exitno) +{ + /* Keep this in-sync with exitstub_trace_addr(). */ + return as->mctop + exitno + 3; +} + +/* Emit conditional branch to exit for guard. */ +static void asm_guardcc(ASMState *as, PPCCC cc) +{ + MCode *target = asm_exitstub_addr(as, as->snapno); + MCode *p = as->mcp; + if (LJ_UNLIKELY(p == as->invmcp)) { + as->loopinv = 1; + *p = PPCI_B | (((target-p) & 0x00ffffffu) << 2); + emit_condbranch(as, PPCI_BC, cc^4, p); + return; + } + emit_condbranch(as, PPCI_BC, cc, target); +} + +/* -- Operand fusion ------------------------------------------------------ */ + +/* Limit linear search to this distance. Avoids O(n^2) behavior. */ +#define CONFLICT_SEARCH_LIM 31 + +/* Check if there's no conflicting instruction between curins and ref. */ +static int noconflict(ASMState *as, IRRef ref, IROp conflict) +{ + IRIns *ir = as->ir; + IRRef i = as->curins; + if (i > ref + CONFLICT_SEARCH_LIM) + return 0; /* Give up, ref is too far away. */ + while (--i > ref) + if (ir[i].o == conflict) + return 0; /* Conflict found. */ + return 1; /* Ok, no conflict. */ +} + +/* Fuse the array base of colocated arrays. */ +static int32_t asm_fuseabase(ASMState *as, IRRef ref) +{ + IRIns *ir = IR(ref); + if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE && + !neverfuse(as) && noconflict(as, ref, IR_NEWREF)) + return (int32_t)sizeof(GCtab); + return 0; +} + +/* Indicates load/store indexed is ok. */ +#define AHUREF_LSX ((int32_t)0x80000000) + +/* Fuse array/hash/upvalue reference into register+offset operand. */ +static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow) +{ + IRIns *ir = IR(ref); + if (ra_noreg(ir->r)) { + if (ir->o == IR_AREF) { + if (mayfuse(as, ref)) { + if (irref_isk(ir->op2)) { + IRRef tab = IR(ir->op1)->op1; + int32_t ofs = asm_fuseabase(as, tab); + IRRef refa = ofs ? tab : ir->op1; + ofs += 8*IR(ir->op2)->i; + if (checki16(ofs)) { + *ofsp = ofs; + return ra_alloc1(as, refa, allow); + } + } + if (*ofsp == AHUREF_LSX) { + Reg base = ra_alloc1(as, ir->op1, allow); + Reg idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base)); + return base | (idx << 8); + } + } + } else if (ir->o == IR_HREFK) { + if (mayfuse(as, ref)) { + int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node)); + if (checki16(ofs)) { + *ofsp = ofs; + return ra_alloc1(as, ir->op1, allow); + } + } + } else if (ir->o == IR_UREFC) { + if (irref_isk(ir->op1)) { + GCfunc *fn = ir_kfunc(IR(ir->op1)); + int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv); + int32_t jgl = (intptr_t)J2G(as->J); + if ((uint32_t)(ofs-jgl) < 65536) { + *ofsp = ofs-jgl-32768; + return RID_JGL; + } else { + *ofsp = (int16_t)ofs; + return ra_allock(as, ofs-(int16_t)ofs, allow); + } + } + } + } + *ofsp = 0; + return ra_alloc1(as, ref, allow); +} + +/* Fuse XLOAD/XSTORE reference into load/store operand. */ +static void asm_fusexref(ASMState *as, PPCIns pi, Reg rt, IRRef ref, + RegSet allow, int32_t ofs) +{ + IRIns *ir = IR(ref); + Reg base; + if (ra_noreg(ir->r) && canfuse(as, ir)) { + if (ir->o == IR_ADD) { + int32_t ofs2; + if (irref_isk(ir->op2) && (ofs2 = ofs + IR(ir->op2)->i, checki16(ofs2))) { + ofs = ofs2; + ref = ir->op1; + } else if (ofs == 0) { + Reg right, left = ra_alloc2(as, ir, allow); + right = (left >> 8); left &= 255; + emit_fab(as, PPCI_LWZX | ((pi >> 20) & 0x780), rt, left, right); + return; + } + } else if (ir->o == IR_STRREF) { + lua_assert(ofs == 0); + ofs = (int32_t)sizeof(GCstr); + if (irref_isk(ir->op2)) { + ofs += IR(ir->op2)->i; + ref = ir->op1; + } else if (irref_isk(ir->op1)) { + ofs += IR(ir->op1)->i; + ref = ir->op2; + } else { + /* NYI: Fuse ADD with constant. */ + Reg tmp, right, left = ra_alloc2(as, ir, allow); + right = (left >> 8); left &= 255; + tmp = ra_scratch(as, rset_exclude(rset_exclude(allow, left), right)); + emit_fai(as, pi, rt, tmp, ofs); + emit_tab(as, PPCI_ADD, tmp, left, right); + return; + } + if (!checki16(ofs)) { + Reg left = ra_alloc1(as, ref, allow); + Reg right = ra_allock(as, ofs, rset_exclude(allow, left)); + emit_fab(as, PPCI_LWZX | ((pi >> 20) & 0x780), rt, left, right); + return; + } + } + } + base = ra_alloc1(as, ref, allow); + emit_fai(as, pi, rt, base, ofs); +} + +/* Fuse XLOAD/XSTORE reference into indexed-only load/store operand. */ +static void asm_fusexrefx(ASMState *as, PPCIns pi, Reg rt, IRRef ref, + RegSet allow) +{ + IRIns *ira = IR(ref); + Reg right, left; + if (canfuse(as, ira) && ira->o == IR_ADD && ra_noreg(ira->r)) { + left = ra_alloc2(as, ira, allow); + right = (left >> 8); left &= 255; + } else { + right = ra_alloc1(as, ref, allow); + left = RID_R0; + } + emit_tab(as, pi, rt, left, right); +} + +/* Fuse to multiply-add/sub instruction. */ +static int asm_fusemadd(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pir) +{ + IRRef lref = ir->op1, rref = ir->op2; + IRIns *irm; + if (lref != rref && + ((mayfuse(as, lref) && (irm = IR(lref), irm->o == IR_MUL) && + ra_noreg(irm->r)) || + (mayfuse(as, rref) && (irm = IR(rref), irm->o == IR_MUL) && + (rref = lref, pi = pir, ra_noreg(irm->r))))) { + Reg dest = ra_dest(as, ir, RSET_FPR); + Reg add = ra_alloc1(as, rref, RSET_FPR); + Reg right, left = ra_alloc2(as, irm, rset_exclude(RSET_FPR, add)); + right = (left >> 8); left &= 255; + emit_facb(as, pi, dest, left, right, add); + return 1; + } + return 0; +} + +/* -- Calls --------------------------------------------------------------- */ + +/* Generate a call to a C function. */ +static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) +{ + uint32_t n, nargs = CCI_XNARGS(ci); + int32_t ofs = 8; + Reg gpr = REGARG_FIRSTGPR, fpr = REGARG_FIRSTFPR; + if ((void *)ci->func) + emit_call(as, (void *)ci->func); + for (n = 0; n < nargs; n++) { /* Setup args. */ + IRRef ref = args[n]; + if (ref) { + IRIns *ir = IR(ref); + if (irt_isfp(ir->t)) { + if (fpr <= REGARG_LASTFPR) { + lua_assert(rset_test(as->freeset, fpr)); /* Already evicted. */ + ra_leftov(as, fpr, ref); + fpr++; + } else { + Reg r = ra_alloc1(as, ref, RSET_FPR); + if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4; + emit_spstore(as, ir, r, ofs); + ofs += irt_isnum(ir->t) ? 8 : 4; + } + } else { + if (gpr <= REGARG_LASTGPR) { + lua_assert(rset_test(as->freeset, gpr)); /* Already evicted. */ + ra_leftov(as, gpr, ref); + gpr++; + } else { + Reg r = ra_alloc1(as, ref, RSET_GPR); + emit_spstore(as, ir, r, ofs); + ofs += 4; + } + } + } else { + if (gpr <= REGARG_LASTGPR) + gpr++; + else + ofs += 4; + } + checkmclim(as); + } + if ((ci->flags & CCI_VARARG)) /* Vararg calls need to know about FPR use. */ + emit_tab(as, fpr == REGARG_FIRSTFPR ? PPCI_CRXOR : PPCI_CREQV, 6, 6, 6); +} + +/* Setup result reg/sp for call. Evict scratch regs. */ +static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) +{ + RegSet drop = RSET_SCRATCH; + int hiop = ((ir+1)->o == IR_HIOP); + if ((ci->flags & CCI_NOFPRCLOBBER)) + drop &= ~RSET_FPR; + if (ra_hasreg(ir->r)) + rset_clear(drop, ir->r); /* Dest reg handled below. */ + if (hiop && ra_hasreg((ir+1)->r)) + rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */ + ra_evictset(as, drop); /* Evictions must be performed first. */ + if (ra_used(ir)) { + lua_assert(!irt_ispri(ir->t)); + if (irt_isfp(ir->t)) { + if ((ci->flags & CCI_CASTU64)) { + /* Use spill slot or temp slots. */ + int32_t ofs = ir->s ? sps_scale(ir->s) : SPOFS_TMP; + Reg dest = ir->r; + if (ra_hasreg(dest)) { + ra_free(as, dest); + ra_modified(as, dest); + emit_fai(as, PPCI_LFD, dest, RID_SP, ofs); + } + emit_tai(as, PPCI_STW, RID_RETHI, RID_SP, ofs); + emit_tai(as, PPCI_STW, RID_RETLO, RID_SP, ofs+4); + } else { + ra_destreg(as, ir, RID_FPRET); + } +#if LJ_32 + } else if (hiop) { + ra_destpair(as, ir); +#endif + } else { + ra_destreg(as, ir, RID_RET); + } + } +} + +static void asm_callx(ASMState *as, IRIns *ir) +{ + IRRef args[CCI_NARGS_MAX*2]; + CCallInfo ci; + IRRef func; + IRIns *irf; + ci.flags = asm_callx_flags(as, ir); + asm_collectargs(as, ir, &ci, args); + asm_setupresult(as, ir, &ci); + func = ir->op2; irf = IR(func); + if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); } + if (irref_isk(func)) { /* Call to constant address. */ + ci.func = (ASMFunction)(void *)(intptr_t)(irf->i); + } else { /* Need a non-argument register for indirect calls. */ + RegSet allow = RSET_GPR & ~RSET_RANGE(RID_R0, REGARG_LASTGPR+1); + Reg freg = ra_alloc1(as, func, allow); + *--as->mcp = PPCI_BCTRL; + *--as->mcp = PPCI_MTCTR | PPCF_T(freg); + ci.func = (ASMFunction)(void *)0; + } + asm_gencall(as, &ci, args); +} + +/* -- Returns ------------------------------------------------------------- */ + +/* Return to lower frame. Guard that it goes to the right spot. */ +static void asm_retf(ASMState *as, IRIns *ir) +{ + Reg base = ra_alloc1(as, REF_BASE, RSET_GPR); + void *pc = ir_kptr(IR(ir->op2)); + int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1)); + as->topslot -= (BCReg)delta; + if ((int32_t)as->topslot < 0) as->topslot = 0; + irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */ + emit_setgl(as, base, jit_base); + emit_addptr(as, base, -8*delta); + asm_guardcc(as, CC_NE); + emit_ab(as, PPCI_CMPW, RID_TMP, + ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base))); + emit_tai(as, PPCI_LWZ, RID_TMP, base, -8); +} + +/* -- Type conversions ---------------------------------------------------- */ + +static void asm_tointg(ASMState *as, IRIns *ir, Reg left) +{ + RegSet allow = RSET_FPR; + Reg tmp = ra_scratch(as, rset_clear(allow, left)); + Reg fbias = ra_scratch(as, rset_clear(allow, tmp)); + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg hibias = ra_allock(as, 0x43300000, rset_exclude(RSET_GPR, dest)); + asm_guardcc(as, CC_NE); + emit_fab(as, PPCI_FCMPU, 0, tmp, left); + emit_fab(as, PPCI_FSUB, tmp, tmp, fbias); + emit_fai(as, PPCI_LFD, tmp, RID_SP, SPOFS_TMP); + emit_tai(as, PPCI_STW, RID_TMP, RID_SP, SPOFS_TMPLO); + emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI); + emit_asi(as, PPCI_XORIS, RID_TMP, dest, 0x8000); + emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO); + emit_lsptr(as, PPCI_LFS, (fbias & 31), + (void *)lj_ir_k64_find(as->J, U64x(59800004,59800000)), + RSET_GPR); + emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP); + emit_fb(as, PPCI_FCTIWZ, tmp, left); +} + +static void asm_tobit(ASMState *as, IRIns *ir) +{ + RegSet allow = RSET_FPR; + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg left = ra_alloc1(as, ir->op1, allow); + Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left)); + Reg tmp = ra_scratch(as, rset_clear(allow, right)); + emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO); + emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP); + emit_fab(as, PPCI_FADD, tmp, left, right); +} + +static void asm_conv(ASMState *as, IRIns *ir) +{ + IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); + int stfp = (st == IRT_NUM || st == IRT_FLOAT); + IRRef lref = ir->op1; + lua_assert(irt_type(ir->t) != st); + lua_assert(!(irt_isint64(ir->t) || + (st == IRT_I64 || st == IRT_U64))); /* Handled by SPLIT. */ + if (irt_isfp(ir->t)) { + Reg dest = ra_dest(as, ir, RSET_FPR); + if (stfp) { /* FP to FP conversion. */ + if (st == IRT_NUM) /* double -> float conversion. */ + emit_fb(as, PPCI_FRSP, dest, ra_alloc1(as, lref, RSET_FPR)); + else /* float -> double conversion is a no-op on PPC. */ + ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */ + } else { /* Integer to FP conversion. */ + /* IRT_INT: Flip hibit, bias with 2^52, subtract 2^52+2^31. */ + /* IRT_U32: Bias with 2^52, subtract 2^52. */ + RegSet allow = RSET_GPR; + Reg left = ra_alloc1(as, lref, allow); + Reg hibias = ra_allock(as, 0x43300000, rset_clear(allow, left)); + Reg fbias = ra_scratch(as, rset_exclude(RSET_FPR, dest)); + const float *kbias; + if (irt_isfloat(ir->t)) emit_fb(as, PPCI_FRSP, dest, dest); + emit_fab(as, PPCI_FSUB, dest, dest, fbias); + emit_fai(as, PPCI_LFD, dest, RID_SP, SPOFS_TMP); + kbias = (const float *)lj_ir_k64_find(as->J, U64x(59800004,59800000)); + if (st == IRT_U32) kbias++; + emit_lsptr(as, PPCI_LFS, (fbias & 31), (void *)kbias, + rset_clear(allow, hibias)); + emit_tai(as, PPCI_STW, st == IRT_U32 ? left : RID_TMP, + RID_SP, SPOFS_TMPLO); + emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI); + if (st != IRT_U32) emit_asi(as, PPCI_XORIS, RID_TMP, left, 0x8000); + } + } else if (stfp) { /* FP to integer conversion. */ + if (irt_isguard(ir->t)) { + /* Checked conversions are only supported from number to int. */ + lua_assert(irt_isint(ir->t) && st == IRT_NUM); + asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); + } else { + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg left = ra_alloc1(as, lref, RSET_FPR); + Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); + if (irt_isu32(ir->t)) { + /* Convert both x and x-2^31 to int and merge results. */ + Reg tmpi = ra_scratch(as, rset_exclude(RSET_GPR, dest)); + emit_asb(as, PPCI_OR, dest, dest, tmpi); /* Select with mask idiom. */ + emit_asb(as, PPCI_AND, tmpi, tmpi, RID_TMP); + emit_asb(as, PPCI_ANDC, dest, dest, RID_TMP); + emit_tai(as, PPCI_LWZ, tmpi, RID_SP, SPOFS_TMPLO); /* tmp = (int)(x) */ + emit_tai(as, PPCI_ADDIS, dest, dest, 0x8000); /* dest += 2^31 */ + emit_asb(as, PPCI_SRAWI, RID_TMP, dest, 31); /* mask = -(dest < 0) */ + emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP); + emit_tai(as, PPCI_LWZ, dest, + RID_SP, SPOFS_TMPLO); /* dest = (int)(x-2^31) */ + emit_fb(as, PPCI_FCTIWZ, tmp, left); + emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP); + emit_fb(as, PPCI_FCTIWZ, tmp, tmp); + emit_fab(as, PPCI_FSUB, tmp, left, tmp); + emit_lsptr(as, PPCI_LFS, (tmp & 31), + (void *)lj_ir_k64_find(as->J, U64x(4f000000,00000000)), + RSET_GPR); + } else { + emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO); + emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP); + emit_fb(as, PPCI_FCTIWZ, tmp, left); + } + } + } else { + Reg dest = ra_dest(as, ir, RSET_GPR); + if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ + Reg left = ra_alloc1(as, ir->op1, RSET_GPR); + lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); + if ((ir->op2 & IRCONV_SEXT)) + emit_as(as, st == IRT_I8 ? PPCI_EXTSB : PPCI_EXTSH, dest, left); + else + emit_rot(as, PPCI_RLWINM, dest, left, 0, st == IRT_U8 ? 24 : 16, 31); + } else { /* 32/64 bit integer conversions. */ + /* Only need to handle 32/32 bit no-op (cast) on 32 bit archs. */ + ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */ + } + } +} + +static void asm_strto(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num]; + IRRef args[2]; + int32_t ofs; + RegSet drop = RSET_SCRATCH; + if (ra_hasreg(ir->r)) rset_set(drop, ir->r); /* Spill dest reg (if any). */ + ra_evictset(as, drop); + asm_guardcc(as, CC_EQ); + emit_ai(as, PPCI_CMPWI, RID_RET, 0); /* Test return status. */ + args[0] = ir->op1; /* GCstr *str */ + args[1] = ASMREF_TMP1; /* TValue *n */ + asm_gencall(as, ci, args); + /* Store the result to the spill slot or temp slots. */ + ofs = ir->s ? sps_scale(ir->s) : SPOFS_TMP; + emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_SP, ofs); +} + +/* -- Memory references --------------------------------------------------- */ + +/* Get pointer to TValue. */ +static void asm_tvptr(ASMState *as, Reg dest, IRRef ref) +{ + IRIns *ir = IR(ref); + if (irt_isnum(ir->t)) { + if (irref_isk(ref)) /* Use the number constant itself as a TValue. */ + ra_allockreg(as, i32ptr(ir_knum(ir)), dest); + else /* Otherwise force a spill and use the spill slot. */ + emit_tai(as, PPCI_ADDI, dest, RID_SP, ra_spill(as, ir)); + } else { + /* Otherwise use g->tmptv to hold the TValue. */ + RegSet allow = rset_exclude(RSET_GPR, dest); + Reg type; + emit_tai(as, PPCI_ADDI, dest, RID_JGL, (int32_t)offsetof(global_State, tmptv)-32768); + if (!irt_ispri(ir->t)) { + Reg src = ra_alloc1(as, ref, allow); + emit_setgl(as, src, tmptv.gcr); + } + type = ra_allock(as, irt_toitype(ir->t), allow); + emit_setgl(as, type, tmptv.it); + } +} + +static void asm_aref(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg idx, base; + if (irref_isk(ir->op2)) { + IRRef tab = IR(ir->op1)->op1; + int32_t ofs = asm_fuseabase(as, tab); + IRRef refa = ofs ? tab : ir->op1; + ofs += 8*IR(ir->op2)->i; + if (checki16(ofs)) { + base = ra_alloc1(as, refa, RSET_GPR); + emit_tai(as, PPCI_ADDI, dest, base, ofs); + return; + } + } + base = ra_alloc1(as, ir->op1, RSET_GPR); + idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base)); + emit_tab(as, PPCI_ADD, dest, RID_TMP, base); + emit_slwi(as, RID_TMP, idx, 3); +} + +/* Inlined hash lookup. Specialized for key type and for const keys. +** The equivalent C code is: +** Node *n = hashkey(t, key); +** do { +** if (lj_obj_equal(&n->key, key)) return &n->val; +** } while ((n = nextnode(n))); +** return niltv(L); +*/ +static void asm_href(ASMState *as, IRIns *ir, IROp merge) +{ + RegSet allow = RSET_GPR; + int destused = ra_used(ir); + Reg dest = ra_dest(as, ir, allow); + Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest)); + Reg key = RID_NONE, tmp1 = RID_TMP, tmp2; + Reg tisnum = RID_NONE, tmpnum = RID_NONE; + IRRef refkey = ir->op2; + IRIns *irkey = IR(refkey); + IRType1 kt = irkey->t; + uint32_t khash; + MCLabel l_end, l_loop, l_next; + + rset_clear(allow, tab); + if (irt_isnum(kt)) { + key = ra_alloc1(as, refkey, RSET_FPR); + tmpnum = ra_scratch(as, rset_exclude(RSET_FPR, key)); + tisnum = ra_allock(as, (int32_t)LJ_TISNUM, allow); + rset_clear(allow, tisnum); + } else if (!irt_ispri(kt)) { + key = ra_alloc1(as, refkey, allow); + rset_clear(allow, key); + } + tmp2 = ra_scratch(as, allow); + rset_clear(allow, tmp2); + + /* Key not found in chain: jump to exit (if merged) or load niltv. */ + l_end = emit_label(as); + as->invmcp = NULL; + if (merge == IR_NE) + asm_guardcc(as, CC_EQ); + else if (destused) + emit_loada(as, dest, niltvg(J2G(as->J))); + + /* Follow hash chain until the end. */ + l_loop = --as->mcp; + emit_ai(as, PPCI_CMPWI, dest, 0); + emit_tai(as, PPCI_LWZ, dest, dest, (int32_t)offsetof(Node, next)); + l_next = emit_label(as); + + /* Type and value comparison. */ + if (merge == IR_EQ) + asm_guardcc(as, CC_EQ); + else + emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end); + if (irt_isnum(kt)) { + emit_fab(as, PPCI_FCMPU, 0, tmpnum, key); + emit_condbranch(as, PPCI_BC, CC_GE, l_next); + emit_ab(as, PPCI_CMPLW, tmp1, tisnum); + emit_fai(as, PPCI_LFD, tmpnum, dest, (int32_t)offsetof(Node, key.n)); + } else { + if (!irt_ispri(kt)) { + emit_ab(as, PPCI_CMPW, tmp2, key); + emit_condbranch(as, PPCI_BC, CC_NE, l_next); + } + emit_ai(as, PPCI_CMPWI, tmp1, irt_toitype(irkey->t)); + if (!irt_ispri(kt)) + emit_tai(as, PPCI_LWZ, tmp2, dest, (int32_t)offsetof(Node, key.gcr)); + } + emit_tai(as, PPCI_LWZ, tmp1, dest, (int32_t)offsetof(Node, key.it)); + *l_loop = PPCI_BC | PPCF_Y | PPCF_CC(CC_NE) | + (((char *)as->mcp-(char *)l_loop) & 0xffffu); + + /* Load main position relative to tab->node into dest. */ + khash = irref_isk(refkey) ? ir_khash(irkey) : 1; + if (khash == 0) { + emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node)); + } else { + Reg tmphash = tmp1; + if (irref_isk(refkey)) + tmphash = ra_allock(as, khash, allow); + emit_tab(as, PPCI_ADD, dest, dest, tmp1); + emit_tai(as, PPCI_MULLI, tmp1, tmp1, sizeof(Node)); + emit_asb(as, PPCI_AND, tmp1, tmp2, tmphash); + emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node)); + emit_tai(as, PPCI_LWZ, tmp2, tab, (int32_t)offsetof(GCtab, hmask)); + if (irref_isk(refkey)) { + /* Nothing to do. */ + } else if (irt_isstr(kt)) { + emit_tai(as, PPCI_LWZ, tmp1, key, (int32_t)offsetof(GCstr, hash)); + } else { /* Must match with hash*() in lj_tab.c. */ + emit_tab(as, PPCI_SUBF, tmp1, tmp2, tmp1); + emit_rotlwi(as, tmp2, tmp2, HASH_ROT3); + emit_asb(as, PPCI_XOR, tmp1, tmp1, tmp2); + emit_rotlwi(as, tmp1, tmp1, (HASH_ROT2+HASH_ROT1)&31); + emit_tab(as, PPCI_SUBF, tmp2, dest, tmp2); + if (irt_isnum(kt)) { + int32_t ofs = ra_spill(as, irkey); + emit_asb(as, PPCI_XOR, tmp2, tmp2, tmp1); + emit_rotlwi(as, dest, tmp1, HASH_ROT1); + emit_tab(as, PPCI_ADD, tmp1, tmp1, tmp1); + emit_tai(as, PPCI_LWZ, tmp2, RID_SP, ofs+4); + emit_tai(as, PPCI_LWZ, tmp1, RID_SP, ofs); + } else { + emit_asb(as, PPCI_XOR, tmp2, key, tmp1); + emit_rotlwi(as, dest, tmp1, HASH_ROT1); + emit_tai(as, PPCI_ADDI, tmp1, tmp2, HASH_BIAS); + emit_tai(as, PPCI_ADDIS, tmp2, key, (HASH_BIAS + 32768)>>16); + } + } + } +} + +static void asm_hrefk(ASMState *as, IRIns *ir) +{ + IRIns *kslot = IR(ir->op2); + IRIns *irkey = IR(kslot->op1); + int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node)); + int32_t kofs = ofs + (int32_t)offsetof(Node, key); + Reg dest = (ra_used(ir)||ofs > 32736) ? ra_dest(as, ir, RSET_GPR) : RID_NONE; + Reg node = ra_alloc1(as, ir->op1, RSET_GPR); + Reg key = RID_NONE, type = RID_TMP, idx = node; + RegSet allow = rset_exclude(RSET_GPR, node); + lua_assert(ofs % sizeof(Node) == 0); + if (ofs > 32736) { + idx = dest; + rset_clear(allow, dest); + kofs = (int32_t)offsetof(Node, key); + } else if (ra_hasreg(dest)) { + emit_tai(as, PPCI_ADDI, dest, node, ofs); + } + asm_guardcc(as, CC_NE); + if (!irt_ispri(irkey->t)) { + key = ra_scratch(as, allow); + rset_clear(allow, key); + } + rset_clear(allow, type); + if (irt_isnum(irkey->t)) { + emit_cmpi(as, key, (int32_t)ir_knum(irkey)->u32.lo); + asm_guardcc(as, CC_NE); + emit_cmpi(as, type, (int32_t)ir_knum(irkey)->u32.hi); + } else { + if (ra_hasreg(key)) { + emit_cmpi(as, key, irkey->i); /* May use RID_TMP, i.e. type. */ + asm_guardcc(as, CC_NE); + } + emit_ai(as, PPCI_CMPWI, type, irt_toitype(irkey->t)); + } + if (ra_hasreg(key)) emit_tai(as, PPCI_LWZ, key, idx, kofs+4); + emit_tai(as, PPCI_LWZ, type, idx, kofs); + if (ofs > 32736) { + emit_tai(as, PPCI_ADDIS, dest, dest, (ofs + 32768) >> 16); + emit_tai(as, PPCI_ADDI, dest, node, ofs); + } +} + +static void asm_uref(ASMState *as, IRIns *ir) +{ + /* NYI: Check that UREFO is still open and not aliasing a slot. */ + Reg dest = ra_dest(as, ir, RSET_GPR); + if (irref_isk(ir->op1)) { + GCfunc *fn = ir_kfunc(IR(ir->op1)); + MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v; + emit_lsptr(as, PPCI_LWZ, dest, v, RSET_GPR); + } else { + Reg uv = ra_scratch(as, RSET_GPR); + Reg func = ra_alloc1(as, ir->op1, RSET_GPR); + if (ir->o == IR_UREFC) { + asm_guardcc(as, CC_NE); + emit_ai(as, PPCI_CMPWI, RID_TMP, 1); + emit_tai(as, PPCI_ADDI, dest, uv, (int32_t)offsetof(GCupval, tv)); + emit_tai(as, PPCI_LBZ, RID_TMP, uv, (int32_t)offsetof(GCupval, closed)); + } else { + emit_tai(as, PPCI_LWZ, dest, uv, (int32_t)offsetof(GCupval, v)); + } + emit_tai(as, PPCI_LWZ, uv, func, + (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8)); + } +} + +static void asm_fref(ASMState *as, IRIns *ir) +{ + UNUSED(as); UNUSED(ir); + lua_assert(!ra_used(ir)); +} + +static void asm_strref(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + IRRef ref = ir->op2, refk = ir->op1; + int32_t ofs = (int32_t)sizeof(GCstr); + Reg r; + if (irref_isk(ref)) { + IRRef tmp = refk; refk = ref; ref = tmp; + } else if (!irref_isk(refk)) { + Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR); + IRIns *irr = IR(ir->op2); + if (ra_hasreg(irr->r)) { + ra_noweak(as, irr->r); + right = irr->r; + } else if (mayfuse(as, irr->op2) && + irr->o == IR_ADD && irref_isk(irr->op2) && + checki16(ofs + IR(irr->op2)->i)) { + ofs += IR(irr->op2)->i; + right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left)); + } else { + right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left)); + } + emit_tai(as, PPCI_ADDI, dest, dest, ofs); + emit_tab(as, PPCI_ADD, dest, left, right); + return; + } + r = ra_alloc1(as, ref, RSET_GPR); + ofs += IR(refk)->i; + if (checki16(ofs)) + emit_tai(as, PPCI_ADDI, dest, r, ofs); + else + emit_tab(as, PPCI_ADD, dest, r, + ra_allock(as, ofs, rset_exclude(RSET_GPR, r))); +} + +/* -- Loads and stores ---------------------------------------------------- */ + +static PPCIns asm_fxloadins(IRIns *ir) +{ + switch (irt_type(ir->t)) { + case IRT_I8: return PPCI_LBZ; /* Needs sign-extension. */ + case IRT_U8: return PPCI_LBZ; + case IRT_I16: return PPCI_LHA; + case IRT_U16: return PPCI_LHZ; + case IRT_NUM: return PPCI_LFD; + case IRT_FLOAT: return PPCI_LFS; + default: return PPCI_LWZ; + } +} + +static PPCIns asm_fxstoreins(IRIns *ir) +{ + switch (irt_type(ir->t)) { + case IRT_I8: case IRT_U8: return PPCI_STB; + case IRT_I16: case IRT_U16: return PPCI_STH; + case IRT_NUM: return PPCI_STFD; + case IRT_FLOAT: return PPCI_STFS; + default: return PPCI_STW; + } +} + +static void asm_fload(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg idx = ra_alloc1(as, ir->op1, RSET_GPR); + PPCIns pi = asm_fxloadins(ir); + int32_t ofs; + if (ir->op2 == IRFL_TAB_ARRAY) { + ofs = asm_fuseabase(as, ir->op1); + if (ofs) { /* Turn the t->array load into an add for colocated arrays. */ + emit_tai(as, PPCI_ADDI, dest, idx, ofs); + return; + } + } + ofs = field_ofs[ir->op2]; + lua_assert(!irt_isi8(ir->t)); + emit_tai(as, pi, dest, idx, ofs); +} + +static void asm_fstore(ASMState *as, IRIns *ir) +{ + if (ir->r != RID_SINK) { + Reg src = ra_alloc1(as, ir->op2, RSET_GPR); + IRIns *irf = IR(ir->op1); + Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src)); + int32_t ofs = field_ofs[irf->op2]; + PPCIns pi = asm_fxstoreins(ir); + emit_tai(as, pi, src, idx, ofs); + } +} + +static void asm_xload(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); + lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED)); + if (irt_isi8(ir->t)) + emit_as(as, PPCI_EXTSB, dest, dest); + asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0); +} + +static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs) +{ + IRIns *irb; + if (ir->r == RID_SINK) + return; + if (ofs == 0 && mayfuse(as, ir->op2) && (irb = IR(ir->op2))->o == IR_BSWAP && + ra_noreg(irb->r) && (irt_isint(ir->t) || irt_isu32(ir->t))) { + /* Fuse BSWAP with XSTORE to stwbrx. */ + Reg src = ra_alloc1(as, irb->op1, RSET_GPR); + asm_fusexrefx(as, PPCI_STWBRX, src, ir->op1, rset_exclude(RSET_GPR, src)); + } else { + Reg src = ra_alloc1(as, ir->op2, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); + asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1, + rset_exclude(RSET_GPR, src), ofs); + } +} + +#define asm_xstore(as, ir) asm_xstore_(as, ir, 0) + +static void asm_ahuvload(ASMState *as, IRIns *ir) +{ + IRType1 t = ir->t; + Reg dest = RID_NONE, type = RID_TMP, tmp = RID_TMP, idx; + RegSet allow = RSET_GPR; + int32_t ofs = AHUREF_LSX; + if (ra_used(ir)) { + lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t)); + if (!irt_isnum(t)) ofs = 0; + dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR); + rset_clear(allow, dest); + } + idx = asm_fuseahuref(as, ir->op1, &ofs, allow); + if (irt_isnum(t)) { + Reg tisnum = ra_allock(as, (int32_t)LJ_TISNUM, rset_exclude(allow, idx)); + asm_guardcc(as, CC_GE); + emit_ab(as, PPCI_CMPLW, type, tisnum); + if (ra_hasreg(dest)) { + if (ofs == AHUREF_LSX) { + tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, + (idx&255)), (idx>>8))); + emit_fab(as, PPCI_LFDX, dest, (idx&255), tmp); + } else { + emit_fai(as, PPCI_LFD, dest, idx, ofs); + } + } + } else { + asm_guardcc(as, CC_NE); + emit_ai(as, PPCI_CMPWI, type, irt_toitype(t)); + if (ra_hasreg(dest)) emit_tai(as, PPCI_LWZ, dest, idx, ofs+4); + } + if (ofs == AHUREF_LSX) { + emit_tab(as, PPCI_LWZX, type, (idx&255), tmp); + emit_slwi(as, tmp, (idx>>8), 3); + } else { + emit_tai(as, PPCI_LWZ, type, idx, ofs); + } +} + +static void asm_ahustore(ASMState *as, IRIns *ir) +{ + RegSet allow = RSET_GPR; + Reg idx, src = RID_NONE, type = RID_NONE; + int32_t ofs = AHUREF_LSX; + if (ir->r == RID_SINK) + return; + if (irt_isnum(ir->t)) { + src = ra_alloc1(as, ir->op2, RSET_FPR); + } else { + if (!irt_ispri(ir->t)) { + src = ra_alloc1(as, ir->op2, allow); + rset_clear(allow, src); + ofs = 0; + } + type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow); + rset_clear(allow, type); + } + idx = asm_fuseahuref(as, ir->op1, &ofs, allow); + if (irt_isnum(ir->t)) { + if (ofs == AHUREF_LSX) { + emit_fab(as, PPCI_STFDX, src, (idx&255), RID_TMP); + emit_slwi(as, RID_TMP, (idx>>8), 3); + } else { + emit_fai(as, PPCI_STFD, src, idx, ofs); + } + } else { + if (ra_hasreg(src)) + emit_tai(as, PPCI_STW, src, idx, ofs+4); + if (ofs == AHUREF_LSX) { + emit_tab(as, PPCI_STWX, type, (idx&255), RID_TMP); + emit_slwi(as, RID_TMP, (idx>>8), 3); + } else { + emit_tai(as, PPCI_STW, type, idx, ofs); + } + } +} + +static void asm_sload(ASMState *as, IRIns *ir) +{ + int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 0 : 4); + IRType1 t = ir->t; + Reg dest = RID_NONE, type = RID_NONE, base; + RegSet allow = RSET_GPR; + lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ + lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK)); + lua_assert(LJ_DUALNUM || + !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME))); + if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) { + dest = ra_scratch(as, RSET_FPR); + asm_tointg(as, ir, dest); + t.irt = IRT_NUM; /* Continue with a regular number type check. */ + } else if (ra_used(ir)) { + lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t)); + dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR); + rset_clear(allow, dest); + base = ra_alloc1(as, REF_BASE, allow); + rset_clear(allow, base); + if ((ir->op2 & IRSLOAD_CONVERT)) { + if (irt_isint(t)) { + emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO); + dest = ra_scratch(as, RSET_FPR); + emit_fai(as, PPCI_STFD, dest, RID_SP, SPOFS_TMP); + emit_fb(as, PPCI_FCTIWZ, dest, dest); + t.irt = IRT_NUM; /* Check for original type. */ + } else { + Reg tmp = ra_scratch(as, allow); + Reg hibias = ra_allock(as, 0x43300000, rset_clear(allow, tmp)); + Reg fbias = ra_scratch(as, rset_exclude(RSET_FPR, dest)); + emit_fab(as, PPCI_FSUB, dest, dest, fbias); + emit_fai(as, PPCI_LFD, dest, RID_SP, SPOFS_TMP); + emit_lsptr(as, PPCI_LFS, (fbias & 31), + (void *)lj_ir_k64_find(as->J, U64x(59800004,59800000)), + rset_clear(allow, hibias)); + emit_tai(as, PPCI_STW, tmp, RID_SP, SPOFS_TMPLO); + emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI); + emit_asi(as, PPCI_XORIS, tmp, tmp, 0x8000); + dest = tmp; + t.irt = IRT_INT; /* Check for original type. */ + } + } + goto dotypecheck; + } + base = ra_alloc1(as, REF_BASE, allow); + rset_clear(allow, base); +dotypecheck: + if (irt_isnum(t)) { + if ((ir->op2 & IRSLOAD_TYPECHECK)) { + Reg tisnum = ra_allock(as, (int32_t)LJ_TISNUM, allow); + asm_guardcc(as, CC_GE); + emit_ab(as, PPCI_CMPLW, RID_TMP, tisnum); + type = RID_TMP; + } + if (ra_hasreg(dest)) emit_fai(as, PPCI_LFD, dest, base, ofs-4); + } else { + if ((ir->op2 & IRSLOAD_TYPECHECK)) { + asm_guardcc(as, CC_NE); + emit_ai(as, PPCI_CMPWI, RID_TMP, irt_toitype(t)); + type = RID_TMP; + } + if (ra_hasreg(dest)) emit_tai(as, PPCI_LWZ, dest, base, ofs); + } + if (ra_hasreg(type)) emit_tai(as, PPCI_LWZ, type, base, ofs-4); +} + +/* -- Allocations --------------------------------------------------------- */ + +#if LJ_HASFFI +static void asm_cnew(ASMState *as, IRIns *ir) +{ + CTState *cts = ctype_ctsG(J2G(as->J)); + CTypeID id = (CTypeID)IR(ir->op1)->i; + CTSize sz; + CTInfo info = lj_ctype_info(cts, id, &sz); + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; + IRRef args[4]; + RegSet drop = RSET_SCRATCH; + lua_assert(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL)); + + as->gcsteps++; + if (ra_hasreg(ir->r)) + rset_clear(drop, ir->r); /* Dest reg handled below. */ + ra_evictset(as, drop); + if (ra_used(ir)) + ra_destreg(as, ir, RID_RET); /* GCcdata * */ + + /* Initialize immutable cdata object. */ + if (ir->o == IR_CNEWI) { + RegSet allow = (RSET_GPR & ~RSET_SCRATCH); + int32_t ofs = sizeof(GCcdata); + lua_assert(sz == 4 || sz == 8); + if (sz == 8) { + ofs += 4; + lua_assert((ir+1)->o == IR_HIOP); + } + for (;;) { + Reg r = ra_alloc1(as, ir->op2, allow); + emit_tai(as, PPCI_STW, r, RID_RET, ofs); + rset_clear(allow, r); + if (ofs == sizeof(GCcdata)) break; + ofs -= 4; ir++; + } + } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */ + ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv]; + args[0] = ASMREF_L; /* lua_State *L */ + args[1] = ir->op1; /* CTypeID id */ + args[2] = ir->op2; /* CTSize sz */ + args[3] = ASMREF_TMP1; /* CTSize align */ + asm_gencall(as, ci, args); + emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info)); + return; + } + + /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */ + emit_tai(as, PPCI_STB, RID_RET+1, RID_RET, offsetof(GCcdata, gct)); + emit_tai(as, PPCI_STH, RID_TMP, RID_RET, offsetof(GCcdata, ctypeid)); + emit_ti(as, PPCI_LI, RID_RET+1, ~LJ_TCDATA); + emit_ti(as, PPCI_LI, RID_TMP, id); /* Lower 16 bit used. Sign-ext ok. */ + args[0] = ASMREF_L; /* lua_State *L */ + args[1] = ASMREF_TMP1; /* MSize size */ + asm_gencall(as, ci, args); + ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)), + ra_releasetmp(as, ASMREF_TMP1)); +} +#else +#define asm_cnew(as, ir) ((void)0) +#endif + +/* -- Write barriers ------------------------------------------------------ */ + +static void asm_tbar(ASMState *as, IRIns *ir) +{ + Reg tab = ra_alloc1(as, ir->op1, RSET_GPR); + Reg mark = ra_scratch(as, rset_exclude(RSET_GPR, tab)); + Reg link = RID_TMP; + MCLabel l_end = emit_label(as); + emit_tai(as, PPCI_STW, link, tab, (int32_t)offsetof(GCtab, gclist)); + emit_tai(as, PPCI_STB, mark, tab, (int32_t)offsetof(GCtab, marked)); + emit_setgl(as, tab, gc.grayagain); + lua_assert(LJ_GC_BLACK == 0x04); + emit_rot(as, PPCI_RLWINM, mark, mark, 0, 30, 28); /* Clear black bit. */ + emit_getgl(as, link, gc.grayagain); + emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end); + emit_asi(as, PPCI_ANDIDOT, RID_TMP, mark, LJ_GC_BLACK); + emit_tai(as, PPCI_LBZ, mark, tab, (int32_t)offsetof(GCtab, marked)); +} + +static void asm_obar(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv]; + IRRef args[2]; + MCLabel l_end; + Reg obj, val, tmp; + /* No need for other object barriers (yet). */ + lua_assert(IR(ir->op1)->o == IR_UREFC); + ra_evictset(as, RSET_SCRATCH); + l_end = emit_label(as); + args[0] = ASMREF_TMP1; /* global_State *g */ + args[1] = ir->op1; /* TValue *tv */ + asm_gencall(as, ci, args); + emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768); + obj = IR(ir->op1)->r; + tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj)); + emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end); + emit_asi(as, PPCI_ANDIDOT, tmp, tmp, LJ_GC_BLACK); + emit_condbranch(as, PPCI_BC, CC_EQ, l_end); + emit_asi(as, PPCI_ANDIDOT, RID_TMP, RID_TMP, LJ_GC_WHITES); + val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj)); + emit_tai(as, PPCI_LBZ, tmp, obj, + (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)); + emit_tai(as, PPCI_LBZ, RID_TMP, val, (int32_t)offsetof(GChead, marked)); +} + +/* -- Arithmetic and logic operations ------------------------------------- */ + +static void asm_fparith(ASMState *as, IRIns *ir, PPCIns pi) +{ + Reg dest = ra_dest(as, ir, RSET_FPR); + Reg right, left = ra_alloc2(as, ir, RSET_FPR); + right = (left >> 8); left &= 255; + if (pi == PPCI_FMUL) + emit_fac(as, pi, dest, left, right); + else + emit_fab(as, pi, dest, left, right); +} + +static void asm_fpunary(ASMState *as, IRIns *ir, PPCIns pi) +{ + Reg dest = ra_dest(as, ir, RSET_FPR); + Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR); + emit_fb(as, pi, dest, left); +} + +static void asm_fpmath(ASMState *as, IRIns *ir) +{ + if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir)) + return; + if (ir->op2 == IRFPM_SQRT && (as->flags & JIT_F_SQRT)) + asm_fpunary(as, ir, PPCI_FSQRT); + else + asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2); +} + +static void asm_add(ASMState *as, IRIns *ir) +{ + if (irt_isnum(ir->t)) { + if (!asm_fusemadd(as, ir, PPCI_FMADD, PPCI_FMADD)) + asm_fparith(as, ir, PPCI_FADD); + } else { + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); + PPCIns pi; + if (irref_isk(ir->op2)) { + int32_t k = IR(ir->op2)->i; + if (checki16(k)) { + pi = PPCI_ADDI; + /* May fail due to spills/restores above, but simplifies the logic. */ + if (as->flagmcp == as->mcp) { + as->flagmcp = NULL; + as->mcp++; + pi = PPCI_ADDICDOT; + } + emit_tai(as, pi, dest, left, k); + return; + } else if ((k & 0xffff) == 0) { + emit_tai(as, PPCI_ADDIS, dest, left, (k >> 16)); + return; + } else if (!as->sectref) { + emit_tai(as, PPCI_ADDIS, dest, dest, (k + 32768) >> 16); + emit_tai(as, PPCI_ADDI, dest, left, k); + return; + } + } + pi = PPCI_ADD; + /* May fail due to spills/restores above, but simplifies the logic. */ + if (as->flagmcp == as->mcp) { + as->flagmcp = NULL; + as->mcp++; + pi |= PPCF_DOT; + } + right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); + emit_tab(as, pi, dest, left, right); + } +} + +static void asm_sub(ASMState *as, IRIns *ir) +{ + if (irt_isnum(ir->t)) { + if (!asm_fusemadd(as, ir, PPCI_FMSUB, PPCI_FNMSUB)) + asm_fparith(as, ir, PPCI_FSUB); + } else { + PPCIns pi = PPCI_SUBF; + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg left, right; + if (irref_isk(ir->op1)) { + int32_t k = IR(ir->op1)->i; + if (checki16(k)) { + right = ra_alloc1(as, ir->op2, RSET_GPR); + emit_tai(as, PPCI_SUBFIC, dest, right, k); + return; + } + } + /* May fail due to spills/restores above, but simplifies the logic. */ + if (as->flagmcp == as->mcp) { + as->flagmcp = NULL; + as->mcp++; + pi |= PPCF_DOT; + } + left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); + right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); + emit_tab(as, pi, dest, right, left); /* Subtract right _from_ left. */ + } +} + +static void asm_mul(ASMState *as, IRIns *ir) +{ + if (irt_isnum(ir->t)) { + asm_fparith(as, ir, PPCI_FMUL); + } else { + PPCIns pi = PPCI_MULLW; + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); + if (irref_isk(ir->op2)) { + int32_t k = IR(ir->op2)->i; + if (checki16(k)) { + emit_tai(as, PPCI_MULLI, dest, left, k); + return; + } + } + /* May fail due to spills/restores above, but simplifies the logic. */ + if (as->flagmcp == as->mcp) { + as->flagmcp = NULL; + as->mcp++; + pi |= PPCF_DOT; + } + right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); + emit_tab(as, pi, dest, left, right); + } +} + +#define asm_div(as, ir) asm_fparith(as, ir, PPCI_FDIV) +#define asm_mod(as, ir) asm_callid(as, ir, IRCALL_lj_vm_modi) +#define asm_pow(as, ir) asm_callid(as, ir, IRCALL_lj_vm_powi) + +static void asm_neg(ASMState *as, IRIns *ir) +{ + if (irt_isnum(ir->t)) { + asm_fpunary(as, ir, PPCI_FNEG); + } else { + Reg dest, left; + PPCIns pi = PPCI_NEG; + if (as->flagmcp == as->mcp) { + as->flagmcp = NULL; + as->mcp++; + pi |= PPCF_DOT; + } + dest = ra_dest(as, ir, RSET_GPR); + left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); + emit_tab(as, pi, dest, left, 0); + } +} + +#define asm_abs(as, ir) asm_fpunary(as, ir, PPCI_FABS) +#define asm_atan2(as, ir) asm_callid(as, ir, IRCALL_atan2) +#define asm_ldexp(as, ir) asm_callid(as, ir, IRCALL_ldexp) + +static void asm_arithov(ASMState *as, IRIns *ir, PPCIns pi) +{ + Reg dest, left, right; + if (as->flagmcp == as->mcp) { + as->flagmcp = NULL; + as->mcp++; + } + asm_guardcc(as, CC_SO); + dest = ra_dest(as, ir, RSET_GPR); + left = ra_alloc2(as, ir, RSET_GPR); + right = (left >> 8); left &= 255; + if (pi == PPCI_SUBFO) { Reg tmp = left; left = right; right = tmp; } + emit_tab(as, pi|PPCF_DOT, dest, left, right); +} + +#define asm_addov(as, ir) asm_arithov(as, ir, PPCI_ADDO) +#define asm_subov(as, ir) asm_arithov(as, ir, PPCI_SUBFO) +#define asm_mulov(as, ir) asm_arithov(as, ir, PPCI_MULLWO) + +#if LJ_HASFFI +static void asm_add64(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR); + PPCIns pi = PPCI_ADDE; + if (irref_isk(ir->op2)) { + int32_t k = IR(ir->op2)->i; + if (k == 0) + pi = PPCI_ADDZE; + else if (k == -1) + pi = PPCI_ADDME; + else + goto needright; + right = 0; + } else { + needright: + right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); + } + emit_tab(as, pi, dest, left, right); + ir--; + dest = ra_dest(as, ir, RSET_GPR); + left = ra_alloc1(as, ir->op1, RSET_GPR); + if (irref_isk(ir->op2)) { + int32_t k = IR(ir->op2)->i; + if (checki16(k)) { + emit_tai(as, PPCI_ADDIC, dest, left, k); + return; + } + } + right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); + emit_tab(as, PPCI_ADDC, dest, left, right); +} + +static void asm_sub64(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg left, right = ra_alloc1(as, ir->op2, RSET_GPR); + PPCIns pi = PPCI_SUBFE; + if (irref_isk(ir->op1)) { + int32_t k = IR(ir->op1)->i; + if (k == 0) + pi = PPCI_SUBFZE; + else if (k == -1) + pi = PPCI_SUBFME; + else + goto needleft; + left = 0; + } else { + needleft: + left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, right)); + } + emit_tab(as, pi, dest, right, left); /* Subtract right _from_ left. */ + ir--; + dest = ra_dest(as, ir, RSET_GPR); + right = ra_alloc1(as, ir->op2, RSET_GPR); + if (irref_isk(ir->op1)) { + int32_t k = IR(ir->op1)->i; + if (checki16(k)) { + emit_tai(as, PPCI_SUBFIC, dest, right, k); + return; + } + } + left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, right)); + emit_tab(as, PPCI_SUBFC, dest, right, left); +} + +static void asm_neg64(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg left = ra_alloc1(as, ir->op1, RSET_GPR); + emit_tab(as, PPCI_SUBFZE, dest, left, 0); + ir--; + dest = ra_dest(as, ir, RSET_GPR); + left = ra_alloc1(as, ir->op1, RSET_GPR); + emit_tai(as, PPCI_SUBFIC, dest, left, 0); +} +#endif + +static void asm_bnot(ASMState *as, IRIns *ir) +{ + Reg dest, left, right; + PPCIns pi = PPCI_NOR; + if (as->flagmcp == as->mcp) { + as->flagmcp = NULL; + as->mcp++; + pi |= PPCF_DOT; + } + dest = ra_dest(as, ir, RSET_GPR); + if (mayfuse(as, ir->op1)) { + IRIns *irl = IR(ir->op1); + if (irl->o == IR_BAND) + pi ^= (PPCI_NOR ^ PPCI_NAND); + else if (irl->o == IR_BXOR) + pi ^= (PPCI_NOR ^ PPCI_EQV); + else if (irl->o != IR_BOR) + goto nofuse; + left = ra_hintalloc(as, irl->op1, dest, RSET_GPR); + right = ra_alloc1(as, irl->op2, rset_exclude(RSET_GPR, left)); + } else { +nofuse: + left = right = ra_hintalloc(as, ir->op1, dest, RSET_GPR); + } + emit_asb(as, pi, dest, left, right); +} + +static void asm_bswap(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + IRIns *irx; + if (mayfuse(as, ir->op1) && (irx = IR(ir->op1))->o == IR_XLOAD && + ra_noreg(irx->r) && (irt_isint(irx->t) || irt_isu32(irx->t))) { + /* Fuse BSWAP with XLOAD to lwbrx. */ + asm_fusexrefx(as, PPCI_LWBRX, dest, irx->op1, RSET_GPR); + } else { + Reg left = ra_alloc1(as, ir->op1, RSET_GPR); + Reg tmp = dest; + if (tmp == left) { + tmp = RID_TMP; + emit_mr(as, dest, RID_TMP); + } + emit_rot(as, PPCI_RLWIMI, tmp, left, 24, 16, 23); + emit_rot(as, PPCI_RLWIMI, tmp, left, 24, 0, 7); + emit_rotlwi(as, tmp, left, 8); + } +} + +/* Fuse BAND with contiguous bitmask and a shift to rlwinm. */ +static void asm_fuseandsh(ASMState *as, PPCIns pi, int32_t mask, IRRef ref) +{ + IRIns *ir; + Reg left; + if (mayfuse(as, ref) && (ir = IR(ref), ra_noreg(ir->r)) && + irref_isk(ir->op2) && ir->o >= IR_BSHL && ir->o <= IR_BROR) { + int32_t sh = (IR(ir->op2)->i & 31); + switch (ir->o) { + case IR_BSHL: + if ((mask & ((1u<>sh))) goto nofuse; + sh = ((32-sh)&31); + break; + case IR_BROL: + break; + default: + goto nofuse; + } + left = ra_alloc1(as, ir->op1, RSET_GPR); + *--as->mcp = pi | PPCF_T(left) | PPCF_B(sh); + return; + } +nofuse: + left = ra_alloc1(as, ref, RSET_GPR); + *--as->mcp = pi | PPCF_T(left); +} + +static void asm_band(ASMState *as, IRIns *ir) +{ + Reg dest, left, right; + IRRef lref = ir->op1; + PPCIns dot = 0; + IRRef op2; + if (as->flagmcp == as->mcp) { + as->flagmcp = NULL; + as->mcp++; + dot = PPCF_DOT; + } + dest = ra_dest(as, ir, RSET_GPR); + if (irref_isk(ir->op2)) { + int32_t k = IR(ir->op2)->i; + if (k) { + /* First check for a contiguous bitmask as used by rlwinm. */ + uint32_t s1 = lj_ffs((uint32_t)k); + uint32_t k1 = ((uint32_t)k >> s1); + if ((k1 & (k1+1)) == 0) { + asm_fuseandsh(as, PPCI_RLWINM|dot | PPCF_A(dest) | + PPCF_MB(31-lj_fls((uint32_t)k)) | PPCF_ME(31-s1), + k, lref); + return; + } + if (~(uint32_t)k) { + uint32_t s2 = lj_ffs(~(uint32_t)k); + uint32_t k2 = (~(uint32_t)k >> s2); + if ((k2 & (k2+1)) == 0) { + asm_fuseandsh(as, PPCI_RLWINM|dot | PPCF_A(dest) | + PPCF_MB(32-s2) | PPCF_ME(30-lj_fls(~(uint32_t)k)), + k, lref); + return; + } + } + } + if (checku16(k)) { + left = ra_alloc1(as, lref, RSET_GPR); + emit_asi(as, PPCI_ANDIDOT, dest, left, k); + return; + } else if ((k & 0xffff) == 0) { + left = ra_alloc1(as, lref, RSET_GPR); + emit_asi(as, PPCI_ANDISDOT, dest, left, (k >> 16)); + return; + } + } + op2 = ir->op2; + if (mayfuse(as, op2) && IR(op2)->o == IR_BNOT && ra_noreg(IR(op2)->r)) { + dot ^= (PPCI_AND ^ PPCI_ANDC); + op2 = IR(op2)->op1; + } + left = ra_hintalloc(as, lref, dest, RSET_GPR); + right = ra_alloc1(as, op2, rset_exclude(RSET_GPR, left)); + emit_asb(as, PPCI_AND ^ dot, dest, left, right); +} + +static void asm_bitop(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); + if (irref_isk(ir->op2)) { + int32_t k = IR(ir->op2)->i; + Reg tmp = left; + if ((checku16(k) || (k & 0xffff) == 0) || (tmp = dest, !as->sectref)) { + if (!checku16(k)) { + emit_asi(as, pik ^ (PPCI_ORI ^ PPCI_ORIS), dest, tmp, (k >> 16)); + if ((k & 0xffff) == 0) return; + } + emit_asi(as, pik, dest, left, k); + return; + } + } + /* May fail due to spills/restores above, but simplifies the logic. */ + if (as->flagmcp == as->mcp) { + as->flagmcp = NULL; + as->mcp++; + pi |= PPCF_DOT; + } + right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); + emit_asb(as, pi, dest, left, right); +} + +#define asm_bor(as, ir) asm_bitop(as, ir, PPCI_OR, PPCI_ORI) +#define asm_bxor(as, ir) asm_bitop(as, ir, PPCI_XOR, PPCI_XORI) + +static void asm_bitshift(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik) +{ + Reg dest, left; + Reg dot = 0; + if (as->flagmcp == as->mcp) { + as->flagmcp = NULL; + as->mcp++; + dot = PPCF_DOT; + } + dest = ra_dest(as, ir, RSET_GPR); + left = ra_alloc1(as, ir->op1, RSET_GPR); + if (irref_isk(ir->op2)) { /* Constant shifts. */ + int32_t shift = (IR(ir->op2)->i & 31); + if (pik == 0) /* SLWI */ + emit_rot(as, PPCI_RLWINM|dot, dest, left, shift, 0, 31-shift); + else if (pik == 1) /* SRWI */ + emit_rot(as, PPCI_RLWINM|dot, dest, left, (32-shift)&31, shift, 31); + else + emit_asb(as, pik|dot, dest, left, shift); + } else { + Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); + emit_asb(as, pi|dot, dest, left, right); + } +} + +#define asm_bshl(as, ir) asm_bitshift(as, ir, PPCI_SLW, 0) +#define asm_bshr(as, ir) asm_bitshift(as, ir, PPCI_SRW, 1) +#define asm_bsar(as, ir) asm_bitshift(as, ir, PPCI_SRAW, PPCI_SRAWI) +#define asm_brol(as, ir) \ + asm_bitshift(as, ir, PPCI_RLWNM|PPCF_MB(0)|PPCF_ME(31), \ + PPCI_RLWINM|PPCF_MB(0)|PPCF_ME(31)) +#define asm_bror(as, ir) lua_assert(0) + +static void asm_min_max(ASMState *as, IRIns *ir, int ismax) +{ + if (irt_isnum(ir->t)) { + Reg dest = ra_dest(as, ir, RSET_FPR); + Reg tmp = dest; + Reg right, left = ra_alloc2(as, ir, RSET_FPR); + right = (left >> 8); left &= 255; + if (tmp == left || tmp == right) + tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_FPR, + dest), left), right)); + emit_facb(as, PPCI_FSEL, dest, tmp, + ismax ? left : right, ismax ? right : left); + emit_fab(as, PPCI_FSUB, tmp, left, right); + } else { + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg tmp1 = RID_TMP, tmp2 = dest; + Reg right, left = ra_alloc2(as, ir, RSET_GPR); + right = (left >> 8); left &= 255; + if (tmp2 == left || tmp2 == right) + tmp2 = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR, + dest), left), right)); + emit_tab(as, PPCI_ADD, dest, tmp2, right); + emit_asb(as, ismax ? PPCI_ANDC : PPCI_AND, tmp2, tmp2, tmp1); + emit_tab(as, PPCI_SUBFE, tmp1, tmp1, tmp1); + emit_tab(as, PPCI_SUBFC, tmp2, tmp2, tmp1); + emit_asi(as, PPCI_XORIS, tmp2, right, 0x8000); + emit_asi(as, PPCI_XORIS, tmp1, left, 0x8000); + } +} + +#define asm_min(as, ir) asm_min_max(as, ir, 0) +#define asm_max(as, ir) asm_min_max(as, ir, 1) + +/* -- Comparisons --------------------------------------------------------- */ + +#define CC_UNSIGNED 0x08 /* Unsigned integer comparison. */ +#define CC_TWO 0x80 /* Check two flags for FP comparison. */ + +/* Map of comparisons to flags. ORDER IR. */ +static const uint8_t asm_compmap[IR_ABC+1] = { + /* op int cc FP cc */ + /* LT */ CC_GE + (CC_GE<<4), + /* GE */ CC_LT + (CC_LE<<4) + CC_TWO, + /* LE */ CC_GT + (CC_GE<<4) + CC_TWO, + /* GT */ CC_LE + (CC_LE<<4), + /* ULT */ CC_GE + CC_UNSIGNED + (CC_GT<<4) + CC_TWO, + /* UGE */ CC_LT + CC_UNSIGNED + (CC_LT<<4), + /* ULE */ CC_GT + CC_UNSIGNED + (CC_GT<<4), + /* UGT */ CC_LE + CC_UNSIGNED + (CC_LT<<4) + CC_TWO, + /* EQ */ CC_NE + (CC_NE<<4), + /* NE */ CC_EQ + (CC_EQ<<4), + /* ABC */ CC_LE + CC_UNSIGNED + (CC_LT<<4) + CC_TWO /* Same as UGT. */ +}; + +static void asm_intcomp_(ASMState *as, IRRef lref, IRRef rref, Reg cr, PPCCC cc) +{ + Reg right, left = ra_alloc1(as, lref, RSET_GPR); + if (irref_isk(rref)) { + int32_t k = IR(rref)->i; + if ((cc & CC_UNSIGNED) == 0) { /* Signed comparison with constant. */ + if (checki16(k)) { + emit_tai(as, PPCI_CMPWI, cr, left, k); + /* Signed comparison with zero and referencing previous ins? */ + if (k == 0 && lref == as->curins-1) + as->flagmcp = as->mcp; /* Allow elimination of the compare. */ + return; + } else if ((cc & 3) == (CC_EQ & 3)) { /* Use CMPLWI for EQ or NE. */ + if (checku16(k)) { + emit_tai(as, PPCI_CMPLWI, cr, left, k); + return; + } else if (!as->sectref && ra_noreg(IR(rref)->r)) { + emit_tai(as, PPCI_CMPLWI, cr, RID_TMP, k); + emit_asi(as, PPCI_XORIS, RID_TMP, left, (k >> 16)); + return; + } + } + } else { /* Unsigned comparison with constant. */ + if (checku16(k)) { + emit_tai(as, PPCI_CMPLWI, cr, left, k); + return; + } + } + } + right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, left)); + emit_tab(as, (cc & CC_UNSIGNED) ? PPCI_CMPLW : PPCI_CMPW, cr, left, right); +} + +static void asm_comp(ASMState *as, IRIns *ir) +{ + PPCCC cc = asm_compmap[ir->o]; + if (irt_isnum(ir->t)) { + Reg right, left = ra_alloc2(as, ir, RSET_FPR); + right = (left >> 8); left &= 255; + asm_guardcc(as, (cc >> 4)); + if ((cc & CC_TWO)) + emit_tab(as, PPCI_CROR, ((cc>>4)&3), ((cc>>4)&3), (CC_EQ&3)); + emit_fab(as, PPCI_FCMPU, 0, left, right); + } else { + IRRef lref = ir->op1, rref = ir->op2; + if (irref_isk(lref) && !irref_isk(rref)) { + /* Swap constants to the right (only for ABC). */ + IRRef tmp = lref; lref = rref; rref = tmp; + if ((cc & 2) == 0) cc ^= 1; /* LT <-> GT, LE <-> GE */ + } + asm_guardcc(as, cc); + asm_intcomp_(as, lref, rref, 0, cc); + } +} + +#define asm_equal(as, ir) asm_comp(as, ir) + +#if LJ_HASFFI +/* 64 bit integer comparisons. */ +static void asm_comp64(ASMState *as, IRIns *ir) +{ + PPCCC cc = asm_compmap[(ir-1)->o]; + if ((cc&3) == (CC_EQ&3)) { + asm_guardcc(as, cc); + emit_tab(as, (cc&4) ? PPCI_CRAND : PPCI_CROR, + (CC_EQ&3), (CC_EQ&3), 4+(CC_EQ&3)); + } else { + asm_guardcc(as, CC_EQ); + emit_tab(as, PPCI_CROR, (CC_EQ&3), (CC_EQ&3), ((cc^~(cc>>2))&1)); + emit_tab(as, (cc&4) ? PPCI_CRAND : PPCI_CRANDC, + (CC_EQ&3), (CC_EQ&3), 4+(cc&3)); + } + /* Loword comparison sets cr1 and is unsigned, except for equality. */ + asm_intcomp_(as, (ir-1)->op1, (ir-1)->op2, 4, + cc | ((cc&3) == (CC_EQ&3) ? 0 : CC_UNSIGNED)); + /* Hiword comparison sets cr0. */ + asm_intcomp_(as, ir->op1, ir->op2, 0, cc); + as->flagmcp = NULL; /* Doesn't work here. */ +} +#endif + +/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */ + +/* Hiword op of a split 64 bit op. Previous op must be the loword op. */ +static void asm_hiop(ASMState *as, IRIns *ir) +{ +#if LJ_HASFFI + /* HIOP is marked as a store because it needs its own DCE logic. */ + int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */ + if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1; + if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */ + as->curins--; /* Always skip the CONV. */ + if (usehi || uselo) + asm_conv64(as, ir); + return; + } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */ + as->curins--; /* Always skip the loword comparison. */ + asm_comp64(as, ir); + return; + } else if ((ir-1)->o == IR_XSTORE) { + as->curins--; /* Handle both stores here. */ + if ((ir-1)->r != RID_SINK) { + asm_xstore_(as, ir, 0); + asm_xstore_(as, ir-1, 4); + } + return; + } + if (!usehi) return; /* Skip unused hiword op for all remaining ops. */ + switch ((ir-1)->o) { + case IR_ADD: as->curins--; asm_add64(as, ir); break; + case IR_SUB: as->curins--; asm_sub64(as, ir); break; + case IR_NEG: as->curins--; asm_neg64(as, ir); break; + case IR_CALLN: + case IR_CALLXS: + if (!uselo) + ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */ + break; + case IR_CNEWI: + /* Nothing to do here. Handled by lo op itself. */ + break; + default: lua_assert(0); break; + } +#else + UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused without FFI. */ +#endif +} + +/* -- Profiling ----------------------------------------------------------- */ + +static void asm_prof(ASMState *as, IRIns *ir) +{ + UNUSED(ir); + asm_guardcc(as, CC_NE); + emit_asi(as, PPCI_ANDIDOT, RID_TMP, RID_TMP, HOOK_PROFILE); + emit_lsglptr(as, PPCI_LBZ, RID_TMP, + (int32_t)offsetof(global_State, hookmask)); +} + +/* -- Stack handling ------------------------------------------------------ */ + +/* Check Lua stack size for overflow. Use exit handler as fallback. */ +static void asm_stack_check(ASMState *as, BCReg topslot, + IRIns *irp, RegSet allow, ExitNo exitno) +{ + /* Try to get an unused temp. register, otherwise spill/restore RID_RET*. */ + Reg tmp, pbase = irp ? (ra_hasreg(irp->r) ? irp->r : RID_TMP) : RID_BASE; + rset_clear(allow, pbase); + tmp = allow ? rset_pickbot(allow) : + (pbase == RID_RETHI ? RID_RETLO : RID_RETHI); + emit_condbranch(as, PPCI_BC, CC_LT, asm_exitstub_addr(as, exitno)); + if (allow == RSET_EMPTY) /* Restore temp. register. */ + emit_tai(as, PPCI_LWZ, tmp, RID_SP, SPOFS_TMPW); + else + ra_modified(as, tmp); + emit_ai(as, PPCI_CMPLWI, RID_TMP, (int32_t)(8*topslot)); + emit_tab(as, PPCI_SUBF, RID_TMP, pbase, tmp); + emit_tai(as, PPCI_LWZ, tmp, tmp, offsetof(lua_State, maxstack)); + if (pbase == RID_TMP) + emit_getgl(as, RID_TMP, jit_base); + emit_getgl(as, tmp, cur_L); + if (allow == RSET_EMPTY) /* Spill temp. register. */ + emit_tai(as, PPCI_STW, tmp, RID_SP, SPOFS_TMPW); +} + +/* Restore Lua stack from on-trace state. */ +static void asm_stack_restore(ASMState *as, SnapShot *snap) +{ + SnapEntry *map = &as->T->snapmap[snap->mapofs]; + SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1]; + MSize n, nent = snap->nent; + /* Store the value of all modified slots to the Lua stack. */ + for (n = 0; n < nent; n++) { + SnapEntry sn = map[n]; + BCReg s = snap_slot(sn); + int32_t ofs = 8*((int32_t)s-1); + IRRef ref = snap_ref(sn); + IRIns *ir = IR(ref); + if ((sn & SNAP_NORESTORE)) + continue; + if (irt_isnum(ir->t)) { + Reg src = ra_alloc1(as, ref, RSET_FPR); + emit_fai(as, PPCI_STFD, src, RID_BASE, ofs); + } else { + Reg type; + RegSet allow = rset_exclude(RSET_GPR, RID_BASE); + lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t)); + if (!irt_ispri(ir->t)) { + Reg src = ra_alloc1(as, ref, allow); + rset_clear(allow, src); + emit_tai(as, PPCI_STW, src, RID_BASE, ofs+4); + } + if ((sn & (SNAP_CONT|SNAP_FRAME))) { + if (s == 0) continue; /* Do not overwrite link to previous frame. */ + type = ra_allock(as, (int32_t)(*flinks--), allow); + } else { + type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow); + } + emit_tai(as, PPCI_STW, type, RID_BASE, ofs); + } + checkmclim(as); + } + lua_assert(map + nent == flinks); +} + +/* -- GC handling --------------------------------------------------------- */ + +/* Check GC threshold and do one or more GC steps. */ +static void asm_gc_check(ASMState *as) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit]; + IRRef args[2]; + MCLabel l_end; + Reg tmp; + ra_evictset(as, RSET_SCRATCH); + l_end = emit_label(as); + /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */ + asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */ + emit_ai(as, PPCI_CMPWI, RID_RET, 0); + args[0] = ASMREF_TMP1; /* global_State *g */ + args[1] = ASMREF_TMP2; /* MSize steps */ + asm_gencall(as, ci, args); + emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768); + tmp = ra_releasetmp(as, ASMREF_TMP2); + emit_loadi(as, tmp, as->gcsteps); + /* Jump around GC step if GC total < GC threshold. */ + emit_condbranch(as, PPCI_BC|PPCF_Y, CC_LT, l_end); + emit_ab(as, PPCI_CMPLW, RID_TMP, tmp); + emit_getgl(as, tmp, gc.threshold); + emit_getgl(as, RID_TMP, gc.total); + as->gcsteps = 0; + checkmclim(as); +} + +/* -- Loop handling ------------------------------------------------------- */ + +/* Fixup the loop branch. */ +static void asm_loop_fixup(ASMState *as) +{ + MCode *p = as->mctop; + MCode *target = as->mcp; + if (as->loopinv) { /* Inverted loop branch? */ + /* asm_guardcc already inverted the cond branch and patched the final b. */ + p[-2] = (p[-2] & (0xffff0000u & ~PPCF_Y)) | (((target-p+2) & 0x3fffu) << 2); + } else { + p[-1] = PPCI_B|(((target-p+1)&0x00ffffffu)<<2); + } +} + +/* -- Head of trace ------------------------------------------------------- */ + +/* Coalesce BASE register for a root trace. */ +static void asm_head_root_base(ASMState *as) +{ + IRIns *ir = IR(REF_BASE); + Reg r = ir->r; + if (ra_hasreg(r)) { + ra_free(as, r); + if (rset_test(as->modset, r) || irt_ismarked(ir->t)) + ir->r = RID_INIT; /* No inheritance for modified BASE register. */ + if (r != RID_BASE) + emit_mr(as, r, RID_BASE); + } +} + +/* Coalesce BASE register for a side trace. */ +static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow) +{ + IRIns *ir = IR(REF_BASE); + Reg r = ir->r; + if (ra_hasreg(r)) { + ra_free(as, r); + if (rset_test(as->modset, r) || irt_ismarked(ir->t)) + ir->r = RID_INIT; /* No inheritance for modified BASE register. */ + if (irp->r == r) { + rset_clear(allow, r); /* Mark same BASE register as coalesced. */ + } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) { + rset_clear(allow, irp->r); + emit_mr(as, r, irp->r); /* Move from coalesced parent reg. */ + } else { + emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */ + } + } + return allow; +} + +/* -- Tail of trace ------------------------------------------------------- */ + +/* Fixup the tail code. */ +static void asm_tail_fixup(ASMState *as, TraceNo lnk) +{ + MCode *p = as->mctop; + MCode *target; + int32_t spadj = as->T->spadjust; + if (spadj == 0) { + *--p = PPCI_NOP; + *--p = PPCI_NOP; + as->mctop = p; + } else { + /* Patch stack adjustment. */ + lua_assert(checki16(CFRAME_SIZE+spadj)); + p[-3] = PPCI_ADDI | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | (CFRAME_SIZE+spadj); + p[-2] = PPCI_STWU | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | spadj; + } + /* Patch exit branch. */ + target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp; + p[-1] = PPCI_B|(((target-p+1)&0x00ffffffu)<<2); +} + +/* Prepare tail of code. */ +static void asm_tail_prep(ASMState *as) +{ + MCode *p = as->mctop - 1; /* Leave room for exit branch. */ + if (as->loopref) { + as->invmcp = as->mcp = p; + } else { + as->mcp = p-2; /* Leave room for stack pointer adjustment. */ + as->invmcp = NULL; + } +} + +/* -- Trace setup --------------------------------------------------------- */ + +/* Ensure there are enough stack slots for call arguments. */ +static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci) +{ + IRRef args[CCI_NARGS_MAX*2]; + uint32_t i, nargs = CCI_XNARGS(ci); + int nslots = 2, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR; + asm_collectargs(as, ir, ci, args); + for (i = 0; i < nargs; i++) + if (args[i] && irt_isfp(IR(args[i])->t)) { + if (nfpr > 0) nfpr--; else nslots = (nslots+3) & ~1; + } else { + if (ngpr > 0) ngpr--; else nslots++; + } + if (nslots > as->evenspill) /* Leave room for args in stack slots. */ + as->evenspill = nslots; + return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET); +} + +static void asm_setup_target(ASMState *as) +{ + asm_exitstub_setup(as, as->T->nsnap + (as->parent ? 1 : 0)); +} + +/* -- Trace patching ------------------------------------------------------ */ + +/* Patch exit jumps of existing machine code to a new target. */ +void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target) +{ + MCode *p = T->mcode; + MCode *pe = (MCode *)((char *)p + T->szmcode); + MCode *px = exitstub_trace_addr(T, exitno); + MCode *cstart = NULL; + MCode *mcarea = lj_mcode_patch(J, p, 0); + int clearso = 0; + for (; p < pe; p++) { + /* Look for exitstub branch, try to replace with branch to target. */ + uint32_t ins = *p; + if ((ins & 0xfc000000u) == 0x40000000u && + ((ins ^ ((char *)px-(char *)p)) & 0xffffu) == 0) { + ptrdiff_t delta = (char *)target - (char *)p; + if (((ins >> 16) & 3) == (CC_SO&3)) { + clearso = sizeof(MCode); + delta -= sizeof(MCode); + } + /* Many, but not all short-range branches can be patched directly. */ + if (((delta + 0x8000) >> 16) == 0) { + *p = (ins & 0xffdf0000u) | ((uint32_t)delta & 0xffffu) | + ((delta & 0x8000) * (PPCF_Y/0x8000)); + if (!cstart) cstart = p; + } + } else if ((ins & 0xfc000000u) == PPCI_B && + ((ins ^ ((char *)px-(char *)p)) & 0x03ffffffu) == 0) { + ptrdiff_t delta = (char *)target - (char *)p; + lua_assert(((delta + 0x02000000) >> 26) == 0); + *p = PPCI_B | ((uint32_t)delta & 0x03ffffffu); + if (!cstart) cstart = p; + } + } + { /* Always patch long-range branch in exit stub itself. */ + ptrdiff_t delta = (char *)target - (char *)px - clearso; + lua_assert(((delta + 0x02000000) >> 26) == 0); + *px = PPCI_B | ((uint32_t)delta & 0x03ffffffu); + } + if (!cstart) cstart = px; + lj_mcode_sync(cstart, px+1); + if (clearso) { /* Extend the current trace. Ugly workaround. */ + MCode *pp = J->cur.mcode; + J->cur.szmcode += sizeof(MCode); + *--pp = PPCI_MCRXR; /* Clear SO flag. */ + J->cur.mcode = pp; + lj_mcode_sync(pp, pp+1); + } + lj_mcode_patch(J, mcarea, 1); +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_asm_x86.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_asm_x86.h new file mode 100644 index 00000000000..941d09192c2 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_asm_x86.h @@ -0,0 +1,2634 @@ +/* +** x86/x64 IR assembler (SSA IR -> machine code). +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +/* -- Guard handling ------------------------------------------------------ */ + +/* Generate an exit stub group at the bottom of the reserved MCode memory. */ +static MCode *asm_exitstub_gen(ASMState *as, ExitNo group) +{ + ExitNo i, groupofs = (group*EXITSTUBS_PER_GROUP) & 0xff; + MCode *mxp = as->mcbot; + MCode *mxpstart = mxp; + if (mxp + (2+2)*EXITSTUBS_PER_GROUP+8+5 >= as->mctop) + asm_mclimit(as); + /* Push low byte of exitno for each exit stub. */ + *mxp++ = XI_PUSHi8; *mxp++ = (MCode)groupofs; + for (i = 1; i < EXITSTUBS_PER_GROUP; i++) { + *mxp++ = XI_JMPs; *mxp++ = (MCode)((2+2)*(EXITSTUBS_PER_GROUP - i) - 2); + *mxp++ = XI_PUSHi8; *mxp++ = (MCode)(groupofs + i); + } + /* Push the high byte of the exitno for each exit stub group. */ + *mxp++ = XI_PUSHi8; *mxp++ = (MCode)((group*EXITSTUBS_PER_GROUP)>>8); + /* Store DISPATCH at original stack slot 0. Account for the two push ops. */ + *mxp++ = XI_MOVmi; + *mxp++ = MODRM(XM_OFS8, 0, RID_ESP); + *mxp++ = MODRM(XM_SCALE1, RID_ESP, RID_ESP); + *mxp++ = 2*sizeof(void *); + *(int32_t *)mxp = ptr2addr(J2GG(as->J)->dispatch); mxp += 4; + /* Jump to exit handler which fills in the ExitState. */ + *mxp++ = XI_JMP; mxp += 4; + *((int32_t *)(mxp-4)) = jmprel(mxp, (MCode *)(void *)lj_vm_exit_handler); + /* Commit the code for this group (even if assembly fails later on). */ + lj_mcode_commitbot(as->J, mxp); + as->mcbot = mxp; + as->mclim = as->mcbot + MCLIM_REDZONE; + return mxpstart; +} + +/* Setup all needed exit stubs. */ +static void asm_exitstub_setup(ASMState *as, ExitNo nexits) +{ + ExitNo i; + if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR) + lj_trace_err(as->J, LJ_TRERR_SNAPOV); + for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++) + if (as->J->exitstubgroup[i] == NULL) + as->J->exitstubgroup[i] = asm_exitstub_gen(as, i); +} + +/* Emit conditional branch to exit for guard. +** It's important to emit this *after* all registers have been allocated, +** because rematerializations may invalidate the flags. +*/ +static void asm_guardcc(ASMState *as, int cc) +{ + MCode *target = exitstub_addr(as->J, as->snapno); + MCode *p = as->mcp; + if (LJ_UNLIKELY(p == as->invmcp)) { + as->loopinv = 1; + *(int32_t *)(p+1) = jmprel(p+5, target); + target = p; + cc ^= 1; + if (as->realign) { + emit_sjcc(as, cc, target); + return; + } + } + emit_jcc(as, cc, target); +} + +/* -- Memory operand fusion ----------------------------------------------- */ + +/* Limit linear search to this distance. Avoids O(n^2) behavior. */ +#define CONFLICT_SEARCH_LIM 31 + +/* Check if a reference is a signed 32 bit constant. */ +static int asm_isk32(ASMState *as, IRRef ref, int32_t *k) +{ + if (irref_isk(ref)) { + IRIns *ir = IR(ref); + if (ir->o != IR_KINT64) { + *k = ir->i; + return 1; + } else if (checki32((int64_t)ir_kint64(ir)->u64)) { + *k = (int32_t)ir_kint64(ir)->u64; + return 1; + } + } + return 0; +} + +/* Check if there's no conflicting instruction between curins and ref. +** Also avoid fusing loads if there are multiple references. +*/ +static int noconflict(ASMState *as, IRRef ref, IROp conflict, int noload) +{ + IRIns *ir = as->ir; + IRRef i = as->curins; + if (i > ref + CONFLICT_SEARCH_LIM) + return 0; /* Give up, ref is too far away. */ + while (--i > ref) { + if (ir[i].o == conflict) + return 0; /* Conflict found. */ + else if (!noload && (ir[i].op1 == ref || ir[i].op2 == ref)) + return 0; + } + return 1; /* Ok, no conflict. */ +} + +/* Fuse array base into memory operand. */ +static IRRef asm_fuseabase(ASMState *as, IRRef ref) +{ + IRIns *irb = IR(ref); + as->mrm.ofs = 0; + if (irb->o == IR_FLOAD) { + IRIns *ira = IR(irb->op1); + lua_assert(irb->op2 == IRFL_TAB_ARRAY); + /* We can avoid the FLOAD of t->array for colocated arrays. */ + if (ira->o == IR_TNEW && ira->op1 <= LJ_MAX_COLOSIZE && + !neverfuse(as) && noconflict(as, irb->op1, IR_NEWREF, 1)) { + as->mrm.ofs = (int32_t)sizeof(GCtab); /* Ofs to colocated array. */ + return irb->op1; /* Table obj. */ + } + } else if (irb->o == IR_ADD && irref_isk(irb->op2)) { + /* Fuse base offset (vararg load). */ + as->mrm.ofs = IR(irb->op2)->i; + return irb->op1; + } + return ref; /* Otherwise use the given array base. */ +} + +/* Fuse array reference into memory operand. */ +static void asm_fusearef(ASMState *as, IRIns *ir, RegSet allow) +{ + IRIns *irx; + lua_assert(ir->o == IR_AREF); + as->mrm.base = (uint8_t)ra_alloc1(as, asm_fuseabase(as, ir->op1), allow); + irx = IR(ir->op2); + if (irref_isk(ir->op2)) { + as->mrm.ofs += 8*irx->i; + as->mrm.idx = RID_NONE; + } else { + rset_clear(allow, as->mrm.base); + as->mrm.scale = XM_SCALE8; + /* Fuse a constant ADD (e.g. t[i+1]) into the offset. + ** Doesn't help much without ABCelim, but reduces register pressure. + */ + if (!LJ_64 && /* Has bad effects with negative index on x64. */ + mayfuse(as, ir->op2) && ra_noreg(irx->r) && + irx->o == IR_ADD && irref_isk(irx->op2)) { + as->mrm.ofs += 8*IR(irx->op2)->i; + as->mrm.idx = (uint8_t)ra_alloc1(as, irx->op1, allow); + } else { + as->mrm.idx = (uint8_t)ra_alloc1(as, ir->op2, allow); + } + } +} + +/* Fuse array/hash/upvalue reference into memory operand. +** Caveat: this may allocate GPRs for the base/idx registers. Be sure to +** pass the final allow mask, excluding any GPRs used for other inputs. +** In particular: 2-operand GPR instructions need to call ra_dest() first! +*/ +static void asm_fuseahuref(ASMState *as, IRRef ref, RegSet allow) +{ + IRIns *ir = IR(ref); + if (ra_noreg(ir->r)) { + switch ((IROp)ir->o) { + case IR_AREF: + if (mayfuse(as, ref)) { + asm_fusearef(as, ir, allow); + return; + } + break; + case IR_HREFK: + if (mayfuse(as, ref)) { + as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow); + as->mrm.ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node)); + as->mrm.idx = RID_NONE; + return; + } + break; + case IR_UREFC: + if (irref_isk(ir->op1)) { + GCfunc *fn = ir_kfunc(IR(ir->op1)); + GCupval *uv = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv; + as->mrm.ofs = ptr2addr(&uv->tv); + as->mrm.base = as->mrm.idx = RID_NONE; + return; + } + break; + default: + lua_assert(ir->o == IR_HREF || ir->o == IR_NEWREF || ir->o == IR_UREFO || + ir->o == IR_KKPTR); + break; + } + } + as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow); + as->mrm.ofs = 0; + as->mrm.idx = RID_NONE; +} + +/* Fuse FLOAD/FREF reference into memory operand. */ +static void asm_fusefref(ASMState *as, IRIns *ir, RegSet allow) +{ + lua_assert(ir->o == IR_FLOAD || ir->o == IR_FREF); + as->mrm.ofs = field_ofs[ir->op2]; + as->mrm.idx = RID_NONE; + if (irref_isk(ir->op1)) { + as->mrm.ofs += IR(ir->op1)->i; + as->mrm.base = RID_NONE; + } else { + as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow); + } +} + +/* Fuse string reference into memory operand. */ +static void asm_fusestrref(ASMState *as, IRIns *ir, RegSet allow) +{ + IRIns *irr; + lua_assert(ir->o == IR_STRREF); + as->mrm.base = as->mrm.idx = RID_NONE; + as->mrm.scale = XM_SCALE1; + as->mrm.ofs = sizeof(GCstr); + if (irref_isk(ir->op1)) { + as->mrm.ofs += IR(ir->op1)->i; + } else { + Reg r = ra_alloc1(as, ir->op1, allow); + rset_clear(allow, r); + as->mrm.base = (uint8_t)r; + } + irr = IR(ir->op2); + if (irref_isk(ir->op2)) { + as->mrm.ofs += irr->i; + } else { + Reg r; + /* Fuse a constant add into the offset, e.g. string.sub(s, i+10). */ + if (!LJ_64 && /* Has bad effects with negative index on x64. */ + mayfuse(as, ir->op2) && irr->o == IR_ADD && irref_isk(irr->op2)) { + as->mrm.ofs += IR(irr->op2)->i; + r = ra_alloc1(as, irr->op1, allow); + } else { + r = ra_alloc1(as, ir->op2, allow); + } + if (as->mrm.base == RID_NONE) + as->mrm.base = (uint8_t)r; + else + as->mrm.idx = (uint8_t)r; + } +} + +static void asm_fusexref(ASMState *as, IRRef ref, RegSet allow) +{ + IRIns *ir = IR(ref); + as->mrm.idx = RID_NONE; + if (ir->o == IR_KPTR || ir->o == IR_KKPTR) { + as->mrm.ofs = ir->i; + as->mrm.base = RID_NONE; + } else if (ir->o == IR_STRREF) { + asm_fusestrref(as, ir, allow); + } else { + as->mrm.ofs = 0; + if (canfuse(as, ir) && ir->o == IR_ADD && ra_noreg(ir->r)) { + /* Gather (base+idx*sz)+ofs as emitted by cdata ptr/array indexing. */ + IRIns *irx; + IRRef idx; + Reg r; + if (asm_isk32(as, ir->op2, &as->mrm.ofs)) { /* Recognize x+ofs. */ + ref = ir->op1; + ir = IR(ref); + if (!(ir->o == IR_ADD && canfuse(as, ir) && ra_noreg(ir->r))) + goto noadd; + } + as->mrm.scale = XM_SCALE1; + idx = ir->op1; + ref = ir->op2; + irx = IR(idx); + if (!(irx->o == IR_BSHL || irx->o == IR_ADD)) { /* Try other operand. */ + idx = ir->op2; + ref = ir->op1; + irx = IR(idx); + } + if (canfuse(as, irx) && ra_noreg(irx->r)) { + if (irx->o == IR_BSHL && irref_isk(irx->op2) && IR(irx->op2)->i <= 3) { + /* Recognize idx<op1; + as->mrm.scale = (uint8_t)(IR(irx->op2)->i << 6); + } else if (irx->o == IR_ADD && irx->op1 == irx->op2) { + /* FOLD does idx*2 ==> idx<<1 ==> idx+idx. */ + idx = irx->op1; + as->mrm.scale = XM_SCALE2; + } + } + r = ra_alloc1(as, idx, allow); + rset_clear(allow, r); + as->mrm.idx = (uint8_t)r; + } + noadd: + as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow); + } +} + +/* Fuse load into memory operand. */ +static Reg asm_fuseload(ASMState *as, IRRef ref, RegSet allow) +{ + IRIns *ir = IR(ref); + if (ra_hasreg(ir->r)) { + if (allow != RSET_EMPTY) { /* Fast path. */ + ra_noweak(as, ir->r); + return ir->r; + } + fusespill: + /* Force a spill if only memory operands are allowed (asm_x87load). */ + as->mrm.base = RID_ESP; + as->mrm.ofs = ra_spill(as, ir); + as->mrm.idx = RID_NONE; + return RID_MRM; + } + if (ir->o == IR_KNUM) { + RegSet avail = as->freeset & ~as->modset & RSET_FPR; + lua_assert(allow != RSET_EMPTY); + if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */ + as->mrm.ofs = ptr2addr(ir_knum(ir)); + as->mrm.base = as->mrm.idx = RID_NONE; + return RID_MRM; + } + } else if (ir->o == IR_KINT64) { + RegSet avail = as->freeset & ~as->modset & RSET_GPR; + lua_assert(allow != RSET_EMPTY); + if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */ + as->mrm.ofs = ptr2addr(ir_kint64(ir)); + as->mrm.base = as->mrm.idx = RID_NONE; + return RID_MRM; + } + } else if (mayfuse(as, ref)) { + RegSet xallow = (allow & RSET_GPR) ? allow : RSET_GPR; + if (ir->o == IR_SLOAD) { + if (!(ir->op2 & (IRSLOAD_PARENT|IRSLOAD_CONVERT)) && + noconflict(as, ref, IR_RETF, 0)) { + as->mrm.base = (uint8_t)ra_alloc1(as, REF_BASE, xallow); + as->mrm.ofs = 8*((int32_t)ir->op1-1) + ((ir->op2&IRSLOAD_FRAME)?4:0); + as->mrm.idx = RID_NONE; + return RID_MRM; + } + } else if (ir->o == IR_FLOAD) { + /* Generic fusion is only ok for 32 bit operand (but see asm_comp). */ + if ((irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)) && + noconflict(as, ref, IR_FSTORE, 0)) { + asm_fusefref(as, ir, xallow); + return RID_MRM; + } + } else if (ir->o == IR_ALOAD || ir->o == IR_HLOAD || ir->o == IR_ULOAD) { + if (noconflict(as, ref, ir->o + IRDELTA_L2S, 0)) { + asm_fuseahuref(as, ir->op1, xallow); + return RID_MRM; + } + } else if (ir->o == IR_XLOAD) { + /* Generic fusion is not ok for 8/16 bit operands (but see asm_comp). + ** Fusing unaligned memory operands is ok on x86 (except for SIMD types). + */ + if ((!irt_typerange(ir->t, IRT_I8, IRT_U16)) && + noconflict(as, ref, IR_XSTORE, 0)) { + asm_fusexref(as, ir->op1, xallow); + return RID_MRM; + } + } else if (ir->o == IR_VLOAD) { + asm_fuseahuref(as, ir->op1, xallow); + return RID_MRM; + } + } + if (!(as->freeset & allow) && !irref_isk(ref) && + (allow == RSET_EMPTY || ra_hasspill(ir->s) || iscrossref(as, ref))) + goto fusespill; + return ra_allocref(as, ref, allow); +} + +#if LJ_64 +/* Don't fuse a 32 bit load into a 64 bit operation. */ +static Reg asm_fuseloadm(ASMState *as, IRRef ref, RegSet allow, int is64) +{ + if (is64 && !irt_is64(IR(ref)->t)) + return ra_alloc1(as, ref, allow); + return asm_fuseload(as, ref, allow); +} +#else +#define asm_fuseloadm(as, ref, allow, is64) asm_fuseload(as, (ref), (allow)) +#endif + +/* -- Calls --------------------------------------------------------------- */ + +/* Count the required number of stack slots for a call. */ +static int asm_count_call_slots(ASMState *as, const CCallInfo *ci, IRRef *args) +{ + uint32_t i, nargs = CCI_XNARGS(ci); + int nslots = 0; +#if LJ_64 + if (LJ_ABI_WIN) { + nslots = (int)(nargs*2); /* Only matters for more than four args. */ + } else { + int ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR; + for (i = 0; i < nargs; i++) + if (args[i] && irt_isfp(IR(args[i])->t)) { + if (nfpr > 0) nfpr--; else nslots += 2; + } else { + if (ngpr > 0) ngpr--; else nslots += 2; + } + } +#else + int ngpr = 0; + if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL) + ngpr = 2; + else if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL) + ngpr = 1; + for (i = 0; i < nargs; i++) + if (args[i] && irt_isfp(IR(args[i])->t)) { + nslots += irt_isnum(IR(args[i])->t) ? 2 : 1; + } else { + if (ngpr > 0) ngpr--; else nslots++; + } +#endif + return nslots; +} + +/* Generate a call to a C function. */ +static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) +{ + uint32_t n, nargs = CCI_XNARGS(ci); + int32_t ofs = STACKARG_OFS; +#if LJ_64 + uint32_t gprs = REGARG_GPRS; + Reg fpr = REGARG_FIRSTFPR; +#if !LJ_ABI_WIN + MCode *patchnfpr = NULL; +#endif +#else + uint32_t gprs = 0; + if ((ci->flags & CCI_CC_MASK) != CCI_CC_CDECL) { + if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL) + gprs = (REGARG_GPRS & 31); + else if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL) + gprs = REGARG_GPRS; + } +#endif + if ((void *)ci->func) + emit_call(as, ci->func); +#if LJ_64 + if ((ci->flags & CCI_VARARG)) { /* Special handling for vararg calls. */ +#if LJ_ABI_WIN + for (n = 0; n < 4 && n < nargs; n++) { + IRIns *ir = IR(args[n]); + if (irt_isfp(ir->t)) /* Duplicate FPRs in GPRs. */ + emit_rr(as, XO_MOVDto, (irt_isnum(ir->t) ? REX_64 : 0) | (fpr+n), + ((gprs >> (n*5)) & 31)); /* Either MOVD or MOVQ. */ + } +#else + patchnfpr = --as->mcp; /* Indicate number of used FPRs in register al. */ + *--as->mcp = XI_MOVrib | RID_EAX; +#endif + } +#endif + for (n = 0; n < nargs; n++) { /* Setup args. */ + IRRef ref = args[n]; + IRIns *ir = IR(ref); + Reg r; +#if LJ_64 && LJ_ABI_WIN + /* Windows/x64 argument registers are strictly positional. */ + r = irt_isfp(ir->t) ? (fpr <= REGARG_LASTFPR ? fpr : 0) : (gprs & 31); + fpr++; gprs >>= 5; +#elif LJ_64 + /* POSIX/x64 argument registers are used in order of appearance. */ + if (irt_isfp(ir->t)) { + r = fpr <= REGARG_LASTFPR ? fpr++ : 0; + } else { + r = gprs & 31; gprs >>= 5; + } +#else + if (ref && irt_isfp(ir->t)) { + r = 0; + } else { + r = gprs & 31; gprs >>= 5; + if (!ref) continue; + } +#endif + if (r) { /* Argument is in a register. */ + if (r < RID_MAX_GPR && ref < ASMREF_TMP1) { +#if LJ_64 + if (ir->o == IR_KINT64) + emit_loadu64(as, r, ir_kint64(ir)->u64); + else +#endif + emit_loadi(as, r, ir->i); + } else { + lua_assert(rset_test(as->freeset, r)); /* Must have been evicted. */ + if (ra_hasreg(ir->r)) { + ra_noweak(as, ir->r); + emit_movrr(as, ir, r, ir->r); + } else { + ra_allocref(as, ref, RID2RSET(r)); + } + } + } else if (irt_isfp(ir->t)) { /* FP argument is on stack. */ + lua_assert(!(irt_isfloat(ir->t) && irref_isk(ref))); /* No float k. */ + if (LJ_32 && (ofs & 4) && irref_isk(ref)) { + /* Split stores for unaligned FP consts. */ + emit_movmroi(as, RID_ESP, ofs, (int32_t)ir_knum(ir)->u32.lo); + emit_movmroi(as, RID_ESP, ofs+4, (int32_t)ir_knum(ir)->u32.hi); + } else { + r = ra_alloc1(as, ref, RSET_FPR); + emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto, + r, RID_ESP, ofs); + } + ofs += (LJ_32 && irt_isfloat(ir->t)) ? 4 : 8; + } else { /* Non-FP argument is on stack. */ + if (LJ_32 && ref < ASMREF_TMP1) { + emit_movmroi(as, RID_ESP, ofs, ir->i); + } else { + r = ra_alloc1(as, ref, RSET_GPR); + emit_movtomro(as, REX_64 + r, RID_ESP, ofs); + } + ofs += sizeof(intptr_t); + } + checkmclim(as); + } +#if LJ_64 && !LJ_ABI_WIN + if (patchnfpr) *patchnfpr = fpr - REGARG_FIRSTFPR; +#endif +} + +/* Setup result reg/sp for call. Evict scratch regs. */ +static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) +{ + RegSet drop = RSET_SCRATCH; + int hiop = (LJ_32 && (ir+1)->o == IR_HIOP); + if ((ci->flags & CCI_NOFPRCLOBBER)) + drop &= ~RSET_FPR; + if (ra_hasreg(ir->r)) + rset_clear(drop, ir->r); /* Dest reg handled below. */ + if (hiop && ra_hasreg((ir+1)->r)) + rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */ + ra_evictset(as, drop); /* Evictions must be performed first. */ + if (ra_used(ir)) { + if (irt_isfp(ir->t)) { + int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */ +#if LJ_64 + if ((ci->flags & CCI_CASTU64)) { + Reg dest = ir->r; + if (ra_hasreg(dest)) { + ra_free(as, dest); + ra_modified(as, dest); + emit_rr(as, XO_MOVD, dest|REX_64, RID_RET); /* Really MOVQ. */ + } + if (ofs) emit_movtomro(as, RID_RET|REX_64, RID_ESP, ofs); + } else { + ra_destreg(as, ir, RID_FPRET); + } +#else + /* Number result is in x87 st0 for x86 calling convention. */ + Reg dest = ir->r; + if (ra_hasreg(dest)) { + ra_free(as, dest); + ra_modified(as, dest); + emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSD : XO_MOVSS, + dest, RID_ESP, ofs); + } + if ((ci->flags & CCI_CASTU64)) { + emit_movtomro(as, RID_RETLO, RID_ESP, ofs); + emit_movtomro(as, RID_RETHI, RID_ESP, ofs+4); + } else { + emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd, + irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs); + } +#endif +#if LJ_32 + } else if (hiop) { + ra_destpair(as, ir); +#endif + } else { + lua_assert(!irt_ispri(ir->t)); + ra_destreg(as, ir, RID_RET); + } + } else if (LJ_32 && irt_isfp(ir->t) && !(ci->flags & CCI_CASTU64)) { + emit_x87op(as, XI_FPOP); /* Pop unused result from x87 st0. */ + } +} + +/* Return a constant function pointer or NULL for indirect calls. */ +static void *asm_callx_func(ASMState *as, IRIns *irf, IRRef func) +{ +#if LJ_32 + UNUSED(as); + if (irref_isk(func)) + return (void *)irf->i; +#else + if (irref_isk(func)) { + MCode *p; + if (irf->o == IR_KINT64) + p = (MCode *)(void *)ir_k64(irf)->u64; + else + p = (MCode *)(void *)(uintptr_t)(uint32_t)irf->i; + if (p - as->mcp == (int32_t)(p - as->mcp)) + return p; /* Call target is still in +-2GB range. */ + /* Avoid the indirect case of emit_call(). Try to hoist func addr. */ + } +#endif + return NULL; +} + +static void asm_callx(ASMState *as, IRIns *ir) +{ + IRRef args[CCI_NARGS_MAX*2]; + CCallInfo ci; + IRRef func; + IRIns *irf; + int32_t spadj = 0; + ci.flags = asm_callx_flags(as, ir); + asm_collectargs(as, ir, &ci, args); + asm_setupresult(as, ir, &ci); +#if LJ_32 + /* Have to readjust stack after non-cdecl calls due to callee cleanup. */ + if ((ci.flags & CCI_CC_MASK) != CCI_CC_CDECL) + spadj = 4 * asm_count_call_slots(as, &ci, args); +#endif + func = ir->op2; irf = IR(func); + if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); } + ci.func = (ASMFunction)asm_callx_func(as, irf, func); + if (!(void *)ci.func) { + /* Use a (hoistable) non-scratch register for indirect calls. */ + RegSet allow = (RSET_GPR & ~RSET_SCRATCH); + Reg r = ra_alloc1(as, func, allow); + if (LJ_32) emit_spsub(as, spadj); /* Above code may cause restores! */ + emit_rr(as, XO_GROUP5, XOg_CALL, r); + } else if (LJ_32) { + emit_spsub(as, spadj); + } + asm_gencall(as, &ci, args); +} + +/* -- Returns ------------------------------------------------------------- */ + +/* Return to lower frame. Guard that it goes to the right spot. */ +static void asm_retf(ASMState *as, IRIns *ir) +{ + Reg base = ra_alloc1(as, REF_BASE, RSET_GPR); + void *pc = ir_kptr(IR(ir->op2)); + int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1)); + as->topslot -= (BCReg)delta; + if ((int32_t)as->topslot < 0) as->topslot = 0; + irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */ + emit_setgl(as, base, jit_base); + emit_addptr(as, base, -8*delta); + asm_guardcc(as, CC_NE); + emit_gmroi(as, XG_ARITHi(XOg_CMP), base, -4, ptr2addr(pc)); +} + +/* -- Type conversions ---------------------------------------------------- */ + +static void asm_tointg(ASMState *as, IRIns *ir, Reg left) +{ + Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); + Reg dest = ra_dest(as, ir, RSET_GPR); + asm_guardcc(as, CC_P); + asm_guardcc(as, CC_NE); + emit_rr(as, XO_UCOMISD, left, tmp); + emit_rr(as, XO_CVTSI2SD, tmp, dest); + emit_rr(as, XO_XORPS, tmp, tmp); /* Avoid partial register stall. */ + emit_rr(as, XO_CVTTSD2SI, dest, left); + /* Can't fuse since left is needed twice. */ +} + +static void asm_tobit(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg tmp = ra_noreg(IR(ir->op1)->r) ? + ra_alloc1(as, ir->op1, RSET_FPR) : + ra_scratch(as, RSET_FPR); + Reg right = asm_fuseload(as, ir->op2, rset_exclude(RSET_FPR, tmp)); + emit_rr(as, XO_MOVDto, tmp, dest); + emit_mrm(as, XO_ADDSD, tmp, right); + ra_left(as, tmp, ir->op1); +} + +static void asm_conv(ASMState *as, IRIns *ir) +{ + IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); + int st64 = (st == IRT_I64 || st == IRT_U64 || (LJ_64 && st == IRT_P64)); + int stfp = (st == IRT_NUM || st == IRT_FLOAT); + IRRef lref = ir->op1; + lua_assert(irt_type(ir->t) != st); + lua_assert(!(LJ_32 && (irt_isint64(ir->t) || st64))); /* Handled by SPLIT. */ + if (irt_isfp(ir->t)) { + Reg dest = ra_dest(as, ir, RSET_FPR); + if (stfp) { /* FP to FP conversion. */ + Reg left = asm_fuseload(as, lref, RSET_FPR); + emit_mrm(as, st == IRT_NUM ? XO_CVTSD2SS : XO_CVTSS2SD, dest, left); + if (left == dest) return; /* Avoid the XO_XORPS. */ + } else if (LJ_32 && st == IRT_U32) { /* U32 to FP conversion on x86. */ + /* number = (2^52+2^51 .. u32) - (2^52+2^51) */ + cTValue *k = lj_ir_k64_find(as->J, U64x(43380000,00000000)); + Reg bias = ra_scratch(as, rset_exclude(RSET_FPR, dest)); + if (irt_isfloat(ir->t)) + emit_rr(as, XO_CVTSD2SS, dest, dest); + emit_rr(as, XO_SUBSD, dest, bias); /* Subtract 2^52+2^51 bias. */ + emit_rr(as, XO_XORPS, dest, bias); /* Merge bias and integer. */ + emit_loadn(as, bias, k); + emit_mrm(as, XO_MOVD, dest, asm_fuseload(as, lref, RSET_GPR)); + return; + } else { /* Integer to FP conversion. */ + Reg left = (LJ_64 && (st == IRT_U32 || st == IRT_U64)) ? + ra_alloc1(as, lref, RSET_GPR) : + asm_fuseloadm(as, lref, RSET_GPR, st64); + if (LJ_64 && st == IRT_U64) { + MCLabel l_end = emit_label(as); + const void *k = lj_ir_k64_find(as->J, U64x(43f00000,00000000)); + emit_rma(as, XO_ADDSD, dest, k); /* Add 2^64 to compensate. */ + emit_sjcc(as, CC_NS, l_end); + emit_rr(as, XO_TEST, left|REX_64, left); /* Check if u64 >= 2^63. */ + } + emit_mrm(as, irt_isnum(ir->t) ? XO_CVTSI2SD : XO_CVTSI2SS, + dest|((LJ_64 && (st64 || st == IRT_U32)) ? REX_64 : 0), left); + } + emit_rr(as, XO_XORPS, dest, dest); /* Avoid partial register stall. */ + } else if (stfp) { /* FP to integer conversion. */ + if (irt_isguard(ir->t)) { + /* Checked conversions are only supported from number to int. */ + lua_assert(irt_isint(ir->t) && st == IRT_NUM); + asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); + } else { + Reg dest = ra_dest(as, ir, RSET_GPR); + x86Op op = st == IRT_NUM ? XO_CVTTSD2SI : XO_CVTTSS2SI; + if (LJ_64 ? irt_isu64(ir->t) : irt_isu32(ir->t)) { + /* LJ_64: For inputs >= 2^63 add -2^64, convert again. */ + /* LJ_32: For inputs >= 2^31 add -2^31, convert again and add 2^31. */ + Reg tmp = ra_noreg(IR(lref)->r) ? ra_alloc1(as, lref, RSET_FPR) : + ra_scratch(as, RSET_FPR); + MCLabel l_end = emit_label(as); + if (LJ_32) + emit_gri(as, XG_ARITHi(XOg_ADD), dest, (int32_t)0x80000000); + emit_rr(as, op, dest|REX_64, tmp); + if (st == IRT_NUM) + emit_rma(as, XO_ADDSD, tmp, lj_ir_k64_find(as->J, + LJ_64 ? U64x(c3f00000,00000000) : U64x(c1e00000,00000000))); + else + emit_rma(as, XO_ADDSS, tmp, lj_ir_k64_find(as->J, + LJ_64 ? U64x(00000000,df800000) : U64x(00000000,cf000000))); + emit_sjcc(as, CC_NS, l_end); + emit_rr(as, XO_TEST, dest|REX_64, dest); /* Check if dest negative. */ + emit_rr(as, op, dest|REX_64, tmp); + ra_left(as, tmp, lref); + } else { + Reg left = asm_fuseload(as, lref, RSET_FPR); + if (LJ_64 && irt_isu32(ir->t)) + emit_rr(as, XO_MOV, dest, dest); /* Zero hiword. */ + emit_mrm(as, op, + dest|((LJ_64 && + (irt_is64(ir->t) || irt_isu32(ir->t))) ? REX_64 : 0), + left); + } + } + } else if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ + Reg left, dest = ra_dest(as, ir, RSET_GPR); + RegSet allow = RSET_GPR; + x86Op op; + lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); + if (st == IRT_I8) { + op = XO_MOVSXb; allow = RSET_GPR8; dest |= FORCE_REX; + } else if (st == IRT_U8) { + op = XO_MOVZXb; allow = RSET_GPR8; dest |= FORCE_REX; + } else if (st == IRT_I16) { + op = XO_MOVSXw; + } else { + op = XO_MOVZXw; + } + left = asm_fuseload(as, lref, allow); + /* Add extra MOV if source is already in wrong register. */ + if (!LJ_64 && left != RID_MRM && !rset_test(allow, left)) { + Reg tmp = ra_scratch(as, allow); + emit_rr(as, op, dest, tmp); + emit_rr(as, XO_MOV, tmp, left); + } else { + emit_mrm(as, op, dest, left); + } + } else { /* 32/64 bit integer conversions. */ + if (LJ_32) { /* Only need to handle 32/32 bit no-op (cast) on x86. */ + Reg dest = ra_dest(as, ir, RSET_GPR); + ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */ + } else if (irt_is64(ir->t)) { + Reg dest = ra_dest(as, ir, RSET_GPR); + if (st64 || !(ir->op2 & IRCONV_SEXT)) { + /* 64/64 bit no-op (cast) or 32 to 64 bit zero extension. */ + ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */ + } else { /* 32 to 64 bit sign extension. */ + Reg left = asm_fuseload(as, lref, RSET_GPR); + emit_mrm(as, XO_MOVSXd, dest|REX_64, left); + } + } else { + Reg dest = ra_dest(as, ir, RSET_GPR); + if (st64) { + Reg left = asm_fuseload(as, lref, RSET_GPR); + /* This is either a 32 bit reg/reg mov which zeroes the hiword + ** or a load of the loword from a 64 bit address. + */ + emit_mrm(as, XO_MOV, dest, left); + } else { /* 32/32 bit no-op (cast). */ + ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */ + } + } + } +} + +#if LJ_32 && LJ_HASFFI +/* No SSE conversions to/from 64 bit on x86, so resort to ugly x87 code. */ + +/* 64 bit integer to FP conversion in 32 bit mode. */ +static void asm_conv_fp_int64(ASMState *as, IRIns *ir) +{ + Reg hi = ra_alloc1(as, ir->op1, RSET_GPR); + Reg lo = ra_alloc1(as, (ir-1)->op1, rset_exclude(RSET_GPR, hi)); + int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */ + Reg dest = ir->r; + if (ra_hasreg(dest)) { + ra_free(as, dest); + ra_modified(as, dest); + emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSD : XO_MOVSS, dest, RID_ESP, ofs); + } + emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd, + irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs); + if (((ir-1)->op2 & IRCONV_SRCMASK) == IRT_U64) { + /* For inputs in [2^63,2^64-1] add 2^64 to compensate. */ + MCLabel l_end = emit_label(as); + emit_rma(as, XO_FADDq, XOg_FADDq, + lj_ir_k64_find(as->J, U64x(43f00000,00000000))); + emit_sjcc(as, CC_NS, l_end); + emit_rr(as, XO_TEST, hi, hi); /* Check if u64 >= 2^63. */ + } else { + lua_assert(((ir-1)->op2 & IRCONV_SRCMASK) == IRT_I64); + } + emit_rmro(as, XO_FILDq, XOg_FILDq, RID_ESP, 0); + /* NYI: Avoid narrow-to-wide store-to-load forwarding stall. */ + emit_rmro(as, XO_MOVto, hi, RID_ESP, 4); + emit_rmro(as, XO_MOVto, lo, RID_ESP, 0); +} + +/* FP to 64 bit integer conversion in 32 bit mode. */ +static void asm_conv_int64_fp(ASMState *as, IRIns *ir) +{ + IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK); + IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH); + Reg lo, hi; + lua_assert(st == IRT_NUM || st == IRT_FLOAT); + lua_assert(dt == IRT_I64 || dt == IRT_U64); + hi = ra_dest(as, ir, RSET_GPR); + lo = ra_dest(as, ir-1, rset_exclude(RSET_GPR, hi)); + if (ra_used(ir-1)) emit_rmro(as, XO_MOV, lo, RID_ESP, 0); + /* NYI: Avoid wide-to-narrow store-to-load forwarding stall. */ + if (!(as->flags & JIT_F_SSE3)) { /* Set FPU rounding mode to default. */ + emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 4); + emit_rmro(as, XO_MOVto, lo, RID_ESP, 4); + emit_gri(as, XG_ARITHi(XOg_AND), lo, 0xf3ff); + } + if (dt == IRT_U64) { + /* For inputs in [2^63,2^64-1] add -2^64 and convert again. */ + MCLabel l_pop, l_end = emit_label(as); + emit_x87op(as, XI_FPOP); + l_pop = emit_label(as); + emit_sjmp(as, l_end); + emit_rmro(as, XO_MOV, hi, RID_ESP, 4); + if ((as->flags & JIT_F_SSE3)) + emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0); + else + emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0); + emit_rma(as, XO_FADDq, XOg_FADDq, + lj_ir_k64_find(as->J, U64x(c3f00000,00000000))); + emit_sjcc(as, CC_NS, l_pop); + emit_rr(as, XO_TEST, hi, hi); /* Check if out-of-range (2^63). */ + } + emit_rmro(as, XO_MOV, hi, RID_ESP, 4); + if ((as->flags & JIT_F_SSE3)) { /* Truncation is easy with SSE3. */ + emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0); + } else { /* Otherwise set FPU rounding mode to truncate before the store. */ + emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0); + emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 0); + emit_rmro(as, XO_MOVtow, lo, RID_ESP, 0); + emit_rmro(as, XO_ARITHw(XOg_OR), lo, RID_ESP, 0); + emit_loadi(as, lo, 0xc00); + emit_rmro(as, XO_FNSTCW, XOg_FNSTCW, RID_ESP, 0); + } + if (dt == IRT_U64) + emit_x87op(as, XI_FDUP); + emit_mrm(as, st == IRT_NUM ? XO_FLDq : XO_FLDd, + st == IRT_NUM ? XOg_FLDq: XOg_FLDd, + asm_fuseload(as, ir->op1, RSET_EMPTY)); +} + +static void asm_conv64(ASMState *as, IRIns *ir) +{ + if (irt_isfp(ir->t)) + asm_conv_fp_int64(as, ir); + else + asm_conv_int64_fp(as, ir); +} +#endif + +static void asm_strto(ASMState *as, IRIns *ir) +{ + /* Force a spill slot for the destination register (if any). */ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num]; + IRRef args[2]; + RegSet drop = RSET_SCRATCH; + if ((drop & RSET_FPR) != RSET_FPR && ra_hasreg(ir->r)) + rset_set(drop, ir->r); /* WIN64 doesn't spill all FPRs. */ + ra_evictset(as, drop); + asm_guardcc(as, CC_E); + emit_rr(as, XO_TEST, RID_RET, RID_RET); /* Test return status. */ + args[0] = ir->op1; /* GCstr *str */ + args[1] = ASMREF_TMP1; /* TValue *n */ + asm_gencall(as, ci, args); + /* Store the result to the spill slot or temp slots. */ + emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64, + RID_ESP, sps_scale(ir->s)); +} + +/* -- Memory references --------------------------------------------------- */ + +/* Get pointer to TValue. */ +static void asm_tvptr(ASMState *as, Reg dest, IRRef ref) +{ + IRIns *ir = IR(ref); + if (irt_isnum(ir->t)) { + /* For numbers use the constant itself or a spill slot as a TValue. */ + if (irref_isk(ref)) + emit_loada(as, dest, ir_knum(ir)); + else + emit_rmro(as, XO_LEA, dest|REX_64, RID_ESP, ra_spill(as, ir)); + } else { + /* Otherwise use g->tmptv to hold the TValue. */ + if (!irref_isk(ref)) { + Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, dest)); + emit_movtomro(as, REX_64IR(ir, src), dest, 0); + } else if (!irt_ispri(ir->t)) { + emit_movmroi(as, dest, 0, ir->i); + } + if (!(LJ_64 && irt_islightud(ir->t))) + emit_movmroi(as, dest, 4, irt_toitype(ir->t)); + emit_loada(as, dest, &J2G(as->J)->tmptv); + } +} + +static void asm_aref(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + asm_fusearef(as, ir, RSET_GPR); + if (!(as->mrm.idx == RID_NONE && as->mrm.ofs == 0)) + emit_mrm(as, XO_LEA, dest, RID_MRM); + else if (as->mrm.base != dest) + emit_rr(as, XO_MOV, dest, as->mrm.base); +} + +/* Inlined hash lookup. Specialized for key type and for const keys. +** The equivalent C code is: +** Node *n = hashkey(t, key); +** do { +** if (lj_obj_equal(&n->key, key)) return &n->val; +** } while ((n = nextnode(n))); +** return niltv(L); +*/ +static void asm_href(ASMState *as, IRIns *ir, IROp merge) +{ + RegSet allow = RSET_GPR; + int destused = ra_used(ir); + Reg dest = ra_dest(as, ir, allow); + Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest)); + Reg key = RID_NONE, tmp = RID_NONE; + IRIns *irkey = IR(ir->op2); + int isk = irref_isk(ir->op2); + IRType1 kt = irkey->t; + uint32_t khash; + MCLabel l_end, l_loop, l_next; + + if (!isk) { + rset_clear(allow, tab); + key = ra_alloc1(as, ir->op2, irt_isnum(kt) ? RSET_FPR : allow); + if (!irt_isstr(kt)) + tmp = ra_scratch(as, rset_exclude(allow, key)); + } + + /* Key not found in chain: jump to exit (if merged) or load niltv. */ + l_end = emit_label(as); + if (merge == IR_NE) + asm_guardcc(as, CC_E); /* XI_JMP is not found by lj_asm_patchexit. */ + else if (destused) + emit_loada(as, dest, niltvg(J2G(as->J))); + + /* Follow hash chain until the end. */ + l_loop = emit_sjcc_label(as, CC_NZ); + emit_rr(as, XO_TEST, dest, dest); + emit_rmro(as, XO_MOV, dest, dest, offsetof(Node, next)); + l_next = emit_label(as); + + /* Type and value comparison. */ + if (merge == IR_EQ) + asm_guardcc(as, CC_E); + else + emit_sjcc(as, CC_E, l_end); + if (irt_isnum(kt)) { + if (isk) { + /* Assumes -0.0 is already canonicalized to +0.0. */ + emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.lo), + (int32_t)ir_knum(irkey)->u32.lo); + emit_sjcc(as, CC_NE, l_next); + emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.hi), + (int32_t)ir_knum(irkey)->u32.hi); + } else { + emit_sjcc(as, CC_P, l_next); + emit_rmro(as, XO_UCOMISD, key, dest, offsetof(Node, key.n)); + emit_sjcc(as, CC_AE, l_next); + /* The type check avoids NaN penalties and complaints from Valgrind. */ +#if LJ_64 + emit_u32(as, LJ_TISNUM); + emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it)); +#else + emit_i8(as, LJ_TISNUM); + emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it)); +#endif + } +#if LJ_64 + } else if (irt_islightud(kt)) { + emit_rmro(as, XO_CMP, key|REX_64, dest, offsetof(Node, key.u64)); +#endif + } else { + if (!irt_ispri(kt)) { + lua_assert(irt_isaddr(kt)); + if (isk) + emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.gcr), + ptr2addr(ir_kgc(irkey))); + else + emit_rmro(as, XO_CMP, key, dest, offsetof(Node, key.gcr)); + emit_sjcc(as, CC_NE, l_next); + } + lua_assert(!irt_isnil(kt)); + emit_i8(as, irt_toitype(kt)); + emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it)); + } + emit_sfixup(as, l_loop); + checkmclim(as); + + /* Load main position relative to tab->node into dest. */ + khash = isk ? ir_khash(irkey) : 1; + if (khash == 0) { + emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, node)); + } else { + emit_rmro(as, XO_ARITH(XOg_ADD), dest, tab, offsetof(GCtab, node)); + if ((as->flags & JIT_F_PREFER_IMUL)) { + emit_i8(as, sizeof(Node)); + emit_rr(as, XO_IMULi8, dest, dest); + } else { + emit_shifti(as, XOg_SHL, dest, 3); + emit_rmrxo(as, XO_LEA, dest, dest, dest, XM_SCALE2, 0); + } + if (isk) { + emit_gri(as, XG_ARITHi(XOg_AND), dest, (int32_t)khash); + emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask)); + } else if (irt_isstr(kt)) { + emit_rmro(as, XO_ARITH(XOg_AND), dest, key, offsetof(GCstr, hash)); + emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask)); + } else { /* Must match with hashrot() in lj_tab.c. */ + emit_rmro(as, XO_ARITH(XOg_AND), dest, tab, offsetof(GCtab, hmask)); + emit_rr(as, XO_ARITH(XOg_SUB), dest, tmp); + emit_shifti(as, XOg_ROL, tmp, HASH_ROT3); + emit_rr(as, XO_ARITH(XOg_XOR), dest, tmp); + emit_shifti(as, XOg_ROL, dest, HASH_ROT2); + emit_rr(as, XO_ARITH(XOg_SUB), tmp, dest); + emit_shifti(as, XOg_ROL, dest, HASH_ROT1); + emit_rr(as, XO_ARITH(XOg_XOR), tmp, dest); + if (irt_isnum(kt)) { + emit_rr(as, XO_ARITH(XOg_ADD), dest, dest); +#if LJ_64 + emit_shifti(as, XOg_SHR|REX_64, dest, 32); + emit_rr(as, XO_MOV, tmp, dest); + emit_rr(as, XO_MOVDto, key|REX_64, dest); +#else + emit_rmro(as, XO_MOV, dest, RID_ESP, ra_spill(as, irkey)+4); + emit_rr(as, XO_MOVDto, key, tmp); +#endif + } else { + emit_rr(as, XO_MOV, tmp, key); + emit_rmro(as, XO_LEA, dest, key, HASH_BIAS); + } + } + } +} + +static void asm_hrefk(ASMState *as, IRIns *ir) +{ + IRIns *kslot = IR(ir->op2); + IRIns *irkey = IR(kslot->op1); + int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node)); + Reg dest = ra_used(ir) ? ra_dest(as, ir, RSET_GPR) : RID_NONE; + Reg node = ra_alloc1(as, ir->op1, RSET_GPR); +#if !LJ_64 + MCLabel l_exit; +#endif + lua_assert(ofs % sizeof(Node) == 0); + if (ra_hasreg(dest)) { + if (ofs != 0) { + if (dest == node && !(as->flags & JIT_F_LEA_AGU)) + emit_gri(as, XG_ARITHi(XOg_ADD), dest, ofs); + else + emit_rmro(as, XO_LEA, dest, node, ofs); + } else if (dest != node) { + emit_rr(as, XO_MOV, dest, node); + } + } + asm_guardcc(as, CC_NE); +#if LJ_64 + if (!irt_ispri(irkey->t)) { + Reg key = ra_scratch(as, rset_exclude(RSET_GPR, node)); + emit_rmro(as, XO_CMP, key|REX_64, node, + ofs + (int32_t)offsetof(Node, key.u64)); + lua_assert(irt_isnum(irkey->t) || irt_isgcv(irkey->t)); + /* Assumes -0.0 is already canonicalized to +0.0. */ + emit_loadu64(as, key, irt_isnum(irkey->t) ? ir_knum(irkey)->u64 : + ((uint64_t)irt_toitype(irkey->t) << 32) | + (uint64_t)(uint32_t)ptr2addr(ir_kgc(irkey))); + } else { + lua_assert(!irt_isnil(irkey->t)); + emit_i8(as, irt_toitype(irkey->t)); + emit_rmro(as, XO_ARITHi8, XOg_CMP, node, + ofs + (int32_t)offsetof(Node, key.it)); + } +#else + l_exit = emit_label(as); + if (irt_isnum(irkey->t)) { + /* Assumes -0.0 is already canonicalized to +0.0. */ + emit_gmroi(as, XG_ARITHi(XOg_CMP), node, + ofs + (int32_t)offsetof(Node, key.u32.lo), + (int32_t)ir_knum(irkey)->u32.lo); + emit_sjcc(as, CC_NE, l_exit); + emit_gmroi(as, XG_ARITHi(XOg_CMP), node, + ofs + (int32_t)offsetof(Node, key.u32.hi), + (int32_t)ir_knum(irkey)->u32.hi); + } else { + if (!irt_ispri(irkey->t)) { + lua_assert(irt_isgcv(irkey->t)); + emit_gmroi(as, XG_ARITHi(XOg_CMP), node, + ofs + (int32_t)offsetof(Node, key.gcr), + ptr2addr(ir_kgc(irkey))); + emit_sjcc(as, CC_NE, l_exit); + } + lua_assert(!irt_isnil(irkey->t)); + emit_i8(as, irt_toitype(irkey->t)); + emit_rmro(as, XO_ARITHi8, XOg_CMP, node, + ofs + (int32_t)offsetof(Node, key.it)); + } +#endif +} + +static void asm_uref(ASMState *as, IRIns *ir) +{ + /* NYI: Check that UREFO is still open and not aliasing a slot. */ + Reg dest = ra_dest(as, ir, RSET_GPR); + if (irref_isk(ir->op1)) { + GCfunc *fn = ir_kfunc(IR(ir->op1)); + MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v; + emit_rma(as, XO_MOV, dest, v); + } else { + Reg uv = ra_scratch(as, RSET_GPR); + Reg func = ra_alloc1(as, ir->op1, RSET_GPR); + if (ir->o == IR_UREFC) { + emit_rmro(as, XO_LEA, dest, uv, offsetof(GCupval, tv)); + asm_guardcc(as, CC_NE); + emit_i8(as, 1); + emit_rmro(as, XO_ARITHib, XOg_CMP, uv, offsetof(GCupval, closed)); + } else { + emit_rmro(as, XO_MOV, dest, uv, offsetof(GCupval, v)); + } + emit_rmro(as, XO_MOV, uv, func, + (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8)); + } +} + +static void asm_fref(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + asm_fusefref(as, ir, RSET_GPR); + emit_mrm(as, XO_LEA, dest, RID_MRM); +} + +static void asm_strref(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + asm_fusestrref(as, ir, RSET_GPR); + if (as->mrm.base == RID_NONE) + emit_loadi(as, dest, as->mrm.ofs); + else if (as->mrm.base == dest && as->mrm.idx == RID_NONE) + emit_gri(as, XG_ARITHi(XOg_ADD), dest, as->mrm.ofs); + else + emit_mrm(as, XO_LEA, dest, RID_MRM); +} + +/* -- Loads and stores ---------------------------------------------------- */ + +static void asm_fxload(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); + x86Op xo; + if (ir->o == IR_FLOAD) + asm_fusefref(as, ir, RSET_GPR); + else + asm_fusexref(as, ir->op1, RSET_GPR); + /* ir->op2 is ignored -- unaligned loads are ok on x86. */ + switch (irt_type(ir->t)) { + case IRT_I8: xo = XO_MOVSXb; break; + case IRT_U8: xo = XO_MOVZXb; break; + case IRT_I16: xo = XO_MOVSXw; break; + case IRT_U16: xo = XO_MOVZXw; break; + case IRT_NUM: xo = XO_MOVSD; break; + case IRT_FLOAT: xo = XO_MOVSS; break; + default: + if (LJ_64 && irt_is64(ir->t)) + dest |= REX_64; + else + lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)); + xo = XO_MOV; + break; + } + emit_mrm(as, xo, dest, RID_MRM); +} + +#define asm_fload(as, ir) asm_fxload(as, ir) +#define asm_xload(as, ir) asm_fxload(as, ir) + +static void asm_fxstore(ASMState *as, IRIns *ir) +{ + RegSet allow = RSET_GPR; + Reg src = RID_NONE, osrc = RID_NONE; + int32_t k = 0; + if (ir->r == RID_SINK) + return; + /* The IRT_I16/IRT_U16 stores should never be simplified for constant + ** values since mov word [mem], imm16 has a length-changing prefix. + */ + if (irt_isi16(ir->t) || irt_isu16(ir->t) || irt_isfp(ir->t) || + !asm_isk32(as, ir->op2, &k)) { + RegSet allow8 = irt_isfp(ir->t) ? RSET_FPR : + (irt_isi8(ir->t) || irt_isu8(ir->t)) ? RSET_GPR8 : RSET_GPR; + src = osrc = ra_alloc1(as, ir->op2, allow8); + if (!LJ_64 && !rset_test(allow8, src)) { /* Already in wrong register. */ + rset_clear(allow, osrc); + src = ra_scratch(as, allow8); + } + rset_clear(allow, src); + } + if (ir->o == IR_FSTORE) { + asm_fusefref(as, IR(ir->op1), allow); + } else { + asm_fusexref(as, ir->op1, allow); + if (LJ_32 && ir->o == IR_HIOP) as->mrm.ofs += 4; + } + if (ra_hasreg(src)) { + x86Op xo; + switch (irt_type(ir->t)) { + case IRT_I8: case IRT_U8: xo = XO_MOVtob; src |= FORCE_REX; break; + case IRT_I16: case IRT_U16: xo = XO_MOVtow; break; + case IRT_NUM: xo = XO_MOVSDto; break; + case IRT_FLOAT: xo = XO_MOVSSto; break; +#if LJ_64 + case IRT_LIGHTUD: lua_assert(0); /* NYI: mask 64 bit lightuserdata. */ +#endif + default: + if (LJ_64 && irt_is64(ir->t)) + src |= REX_64; + else + lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)); + xo = XO_MOVto; + break; + } + emit_mrm(as, xo, src, RID_MRM); + if (!LJ_64 && src != osrc) { + ra_noweak(as, osrc); + emit_rr(as, XO_MOV, src, osrc); + } + } else { + if (irt_isi8(ir->t) || irt_isu8(ir->t)) { + emit_i8(as, k); + emit_mrm(as, XO_MOVmib, 0, RID_MRM); + } else { + lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || irt_isu32(ir->t) || + irt_isaddr(ir->t)); + emit_i32(as, k); + emit_mrm(as, XO_MOVmi, REX_64IR(ir, 0), RID_MRM); + } + } +} + +#define asm_fstore(as, ir) asm_fxstore(as, ir) +#define asm_xstore(as, ir) asm_fxstore(as, ir) + +#if LJ_64 +static Reg asm_load_lightud64(ASMState *as, IRIns *ir, int typecheck) +{ + if (ra_used(ir) || typecheck) { + Reg dest = ra_dest(as, ir, RSET_GPR); + if (typecheck) { + Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, dest)); + asm_guardcc(as, CC_NE); + emit_i8(as, -2); + emit_rr(as, XO_ARITHi8, XOg_CMP, tmp); + emit_shifti(as, XOg_SAR|REX_64, tmp, 47); + emit_rr(as, XO_MOV, tmp|REX_64, dest); + } + return dest; + } else { + return RID_NONE; + } +} +#endif + +static void asm_ahuvload(ASMState *as, IRIns *ir) +{ + lua_assert(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) || + (LJ_DUALNUM && irt_isint(ir->t))); +#if LJ_64 + if (irt_islightud(ir->t)) { + Reg dest = asm_load_lightud64(as, ir, 1); + if (ra_hasreg(dest)) { + asm_fuseahuref(as, ir->op1, RSET_GPR); + emit_mrm(as, XO_MOV, dest|REX_64, RID_MRM); + } + return; + } else +#endif + if (ra_used(ir)) { + RegSet allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR; + Reg dest = ra_dest(as, ir, allow); + asm_fuseahuref(as, ir->op1, RSET_GPR); + emit_mrm(as, dest < RID_MAX_GPR ? XO_MOV : XO_MOVSD, dest, RID_MRM); + } else { + asm_fuseahuref(as, ir->op1, RSET_GPR); + } + /* Always do the type check, even if the load result is unused. */ + as->mrm.ofs += 4; + asm_guardcc(as, irt_isnum(ir->t) ? CC_AE : CC_NE); + if (LJ_64 && irt_type(ir->t) >= IRT_NUM) { + lua_assert(irt_isinteger(ir->t) || irt_isnum(ir->t)); + emit_u32(as, LJ_TISNUM); + emit_mrm(as, XO_ARITHi, XOg_CMP, RID_MRM); + } else { + emit_i8(as, irt_toitype(ir->t)); + emit_mrm(as, XO_ARITHi8, XOg_CMP, RID_MRM); + } +} + +static void asm_ahustore(ASMState *as, IRIns *ir) +{ + if (ir->r == RID_SINK) + return; + if (irt_isnum(ir->t)) { + Reg src = ra_alloc1(as, ir->op2, RSET_FPR); + asm_fuseahuref(as, ir->op1, RSET_GPR); + emit_mrm(as, XO_MOVSDto, src, RID_MRM); +#if LJ_64 + } else if (irt_islightud(ir->t)) { + Reg src = ra_alloc1(as, ir->op2, RSET_GPR); + asm_fuseahuref(as, ir->op1, rset_exclude(RSET_GPR, src)); + emit_mrm(as, XO_MOVto, src|REX_64, RID_MRM); +#endif + } else { + IRIns *irr = IR(ir->op2); + RegSet allow = RSET_GPR; + Reg src = RID_NONE; + if (!irref_isk(ir->op2)) { + src = ra_alloc1(as, ir->op2, allow); + rset_clear(allow, src); + } + asm_fuseahuref(as, ir->op1, allow); + if (ra_hasreg(src)) { + emit_mrm(as, XO_MOVto, src, RID_MRM); + } else if (!irt_ispri(irr->t)) { + lua_assert(irt_isaddr(ir->t) || (LJ_DUALNUM && irt_isinteger(ir->t))); + emit_i32(as, irr->i); + emit_mrm(as, XO_MOVmi, 0, RID_MRM); + } + as->mrm.ofs += 4; + emit_i32(as, (int32_t)irt_toitype(ir->t)); + emit_mrm(as, XO_MOVmi, 0, RID_MRM); + } +} + +static void asm_sload(ASMState *as, IRIns *ir) +{ + int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0); + IRType1 t = ir->t; + Reg base; + lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ + lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK)); + lua_assert(LJ_DUALNUM || + !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME))); + if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) { + Reg left = ra_scratch(as, RSET_FPR); + asm_tointg(as, ir, left); /* Frees dest reg. Do this before base alloc. */ + base = ra_alloc1(as, REF_BASE, RSET_GPR); + emit_rmro(as, XO_MOVSD, left, base, ofs); + t.irt = IRT_NUM; /* Continue with a regular number type check. */ +#if LJ_64 + } else if (irt_islightud(t)) { + Reg dest = asm_load_lightud64(as, ir, (ir->op2 & IRSLOAD_TYPECHECK)); + if (ra_hasreg(dest)) { + base = ra_alloc1(as, REF_BASE, RSET_GPR); + emit_rmro(as, XO_MOV, dest|REX_64, base, ofs); + } + return; +#endif + } else if (ra_used(ir)) { + RegSet allow = irt_isnum(t) ? RSET_FPR : RSET_GPR; + Reg dest = ra_dest(as, ir, allow); + base = ra_alloc1(as, REF_BASE, RSET_GPR); + lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t)); + if ((ir->op2 & IRSLOAD_CONVERT)) { + t.irt = irt_isint(t) ? IRT_NUM : IRT_INT; /* Check for original type. */ + emit_rmro(as, irt_isint(t) ? XO_CVTSI2SD : XO_CVTTSD2SI, dest, base, ofs); + } else { + emit_rmro(as, irt_isnum(t) ? XO_MOVSD : XO_MOV, dest, base, ofs); + } + } else { + if (!(ir->op2 & IRSLOAD_TYPECHECK)) + return; /* No type check: avoid base alloc. */ + base = ra_alloc1(as, REF_BASE, RSET_GPR); + } + if ((ir->op2 & IRSLOAD_TYPECHECK)) { + /* Need type check, even if the load result is unused. */ + asm_guardcc(as, irt_isnum(t) ? CC_AE : CC_NE); + if (LJ_64 && irt_type(t) >= IRT_NUM) { + lua_assert(irt_isinteger(t) || irt_isnum(t)); + emit_u32(as, LJ_TISNUM); + emit_rmro(as, XO_ARITHi, XOg_CMP, base, ofs+4); + } else { + emit_i8(as, irt_toitype(t)); + emit_rmro(as, XO_ARITHi8, XOg_CMP, base, ofs+4); + } + } +} + +/* -- Allocations --------------------------------------------------------- */ + +#if LJ_HASFFI +static void asm_cnew(ASMState *as, IRIns *ir) +{ + CTState *cts = ctype_ctsG(J2G(as->J)); + CTypeID id = (CTypeID)IR(ir->op1)->i; + CTSize sz; + CTInfo info = lj_ctype_info(cts, id, &sz); + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; + IRRef args[4]; + lua_assert(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL)); + + as->gcsteps++; + asm_setupresult(as, ir, ci); /* GCcdata * */ + + /* Initialize immutable cdata object. */ + if (ir->o == IR_CNEWI) { + RegSet allow = (RSET_GPR & ~RSET_SCRATCH); +#if LJ_64 + Reg r64 = sz == 8 ? REX_64 : 0; + if (irref_isk(ir->op2)) { + IRIns *irk = IR(ir->op2); + uint64_t k = irk->o == IR_KINT64 ? ir_k64(irk)->u64 : + (uint64_t)(uint32_t)irk->i; + if (sz == 4 || checki32((int64_t)k)) { + emit_i32(as, (int32_t)k); + emit_rmro(as, XO_MOVmi, r64, RID_RET, sizeof(GCcdata)); + } else { + emit_movtomro(as, RID_ECX + r64, RID_RET, sizeof(GCcdata)); + emit_loadu64(as, RID_ECX, k); + } + } else { + Reg r = ra_alloc1(as, ir->op2, allow); + emit_movtomro(as, r + r64, RID_RET, sizeof(GCcdata)); + } +#else + int32_t ofs = sizeof(GCcdata); + if (sz == 8) { + ofs += 4; ir++; + lua_assert(ir->o == IR_HIOP); + } + do { + if (irref_isk(ir->op2)) { + emit_movmroi(as, RID_RET, ofs, IR(ir->op2)->i); + } else { + Reg r = ra_alloc1(as, ir->op2, allow); + emit_movtomro(as, r, RID_RET, ofs); + rset_clear(allow, r); + } + if (ofs == sizeof(GCcdata)) break; + ofs -= 4; ir--; + } while (1); +#endif + lua_assert(sz == 4 || sz == 8); + } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */ + ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv]; + args[0] = ASMREF_L; /* lua_State *L */ + args[1] = ir->op1; /* CTypeID id */ + args[2] = ir->op2; /* CTSize sz */ + args[3] = ASMREF_TMP1; /* CTSize align */ + asm_gencall(as, ci, args); + emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info)); + return; + } + + /* Combine initialization of marked, gct and ctypeid. */ + emit_movtomro(as, RID_ECX, RID_RET, offsetof(GCcdata, marked)); + emit_gri(as, XG_ARITHi(XOg_OR), RID_ECX, + (int32_t)((~LJ_TCDATA<<8)+(id<<16))); + emit_gri(as, XG_ARITHi(XOg_AND), RID_ECX, LJ_GC_WHITES); + emit_opgl(as, XO_MOVZXb, RID_ECX, gc.currentwhite); + + args[0] = ASMREF_L; /* lua_State *L */ + args[1] = ASMREF_TMP1; /* MSize size */ + asm_gencall(as, ci, args); + emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)(sz+sizeof(GCcdata))); +} +#else +#define asm_cnew(as, ir) ((void)0) +#endif + +/* -- Write barriers ------------------------------------------------------ */ + +static void asm_tbar(ASMState *as, IRIns *ir) +{ + Reg tab = ra_alloc1(as, ir->op1, RSET_GPR); + Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, tab)); + MCLabel l_end = emit_label(as); + emit_movtomro(as, tmp, tab, offsetof(GCtab, gclist)); + emit_setgl(as, tab, gc.grayagain); + emit_getgl(as, tmp, gc.grayagain); + emit_i8(as, ~LJ_GC_BLACK); + emit_rmro(as, XO_ARITHib, XOg_AND, tab, offsetof(GCtab, marked)); + emit_sjcc(as, CC_Z, l_end); + emit_i8(as, LJ_GC_BLACK); + emit_rmro(as, XO_GROUP3b, XOg_TEST, tab, offsetof(GCtab, marked)); +} + +static void asm_obar(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv]; + IRRef args[2]; + MCLabel l_end; + Reg obj; + /* No need for other object barriers (yet). */ + lua_assert(IR(ir->op1)->o == IR_UREFC); + ra_evictset(as, RSET_SCRATCH); + l_end = emit_label(as); + args[0] = ASMREF_TMP1; /* global_State *g */ + args[1] = ir->op1; /* TValue *tv */ + asm_gencall(as, ci, args); + emit_loada(as, ra_releasetmp(as, ASMREF_TMP1), J2G(as->J)); + obj = IR(ir->op1)->r; + emit_sjcc(as, CC_Z, l_end); + emit_i8(as, LJ_GC_WHITES); + if (irref_isk(ir->op2)) { + GCobj *vp = ir_kgc(IR(ir->op2)); + emit_rma(as, XO_GROUP3b, XOg_TEST, &vp->gch.marked); + } else { + Reg val = ra_alloc1(as, ir->op2, rset_exclude(RSET_SCRATCH&RSET_GPR, obj)); + emit_rmro(as, XO_GROUP3b, XOg_TEST, val, (int32_t)offsetof(GChead, marked)); + } + emit_sjcc(as, CC_Z, l_end); + emit_i8(as, LJ_GC_BLACK); + emit_rmro(as, XO_GROUP3b, XOg_TEST, obj, + (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)); +} + +/* -- FP/int arithmetic and logic operations ------------------------------ */ + +/* Load reference onto x87 stack. Force a spill to memory if needed. */ +static void asm_x87load(ASMState *as, IRRef ref) +{ + IRIns *ir = IR(ref); + if (ir->o == IR_KNUM) { + cTValue *tv = ir_knum(ir); + if (tvispzero(tv)) /* Use fldz only for +0. */ + emit_x87op(as, XI_FLDZ); + else if (tvispone(tv)) + emit_x87op(as, XI_FLD1); + else + emit_rma(as, XO_FLDq, XOg_FLDq, tv); + } else if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT && !ra_used(ir) && + !irref_isk(ir->op1) && mayfuse(as, ir->op1)) { + IRIns *iri = IR(ir->op1); + emit_rmro(as, XO_FILDd, XOg_FILDd, RID_ESP, ra_spill(as, iri)); + } else { + emit_mrm(as, XO_FLDq, XOg_FLDq, asm_fuseload(as, ref, RSET_EMPTY)); + } +} + +static void asm_fpmath(ASMState *as, IRIns *ir) +{ + IRFPMathOp fpm = (IRFPMathOp)ir->op2; + if (fpm == IRFPM_SQRT) { + Reg dest = ra_dest(as, ir, RSET_FPR); + Reg left = asm_fuseload(as, ir->op1, RSET_FPR); + emit_mrm(as, XO_SQRTSD, dest, left); + } else if (fpm <= IRFPM_TRUNC) { + if (as->flags & JIT_F_SSE4_1) { /* SSE4.1 has a rounding instruction. */ + Reg dest = ra_dest(as, ir, RSET_FPR); + Reg left = asm_fuseload(as, ir->op1, RSET_FPR); + /* ROUNDSD has a 4-byte opcode which doesn't fit in x86Op. + ** Let's pretend it's a 3-byte opcode, and compensate afterwards. + ** This is atrocious, but the alternatives are much worse. + */ + /* Round down/up/trunc == 1001/1010/1011. */ + emit_i8(as, 0x09 + fpm); + emit_mrm(as, XO_ROUNDSD, dest, left); + if (LJ_64 && as->mcp[1] != (MCode)(XO_ROUNDSD >> 16)) { + as->mcp[0] = as->mcp[1]; as->mcp[1] = 0x0f; /* Swap 0F and REX. */ + } + *--as->mcp = 0x66; /* 1st byte of ROUNDSD opcode. */ + } else { /* Call helper functions for SSE2 variant. */ + /* The modified regs must match with the *.dasc implementation. */ + RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX); + if (ra_hasreg(ir->r)) + rset_clear(drop, ir->r); /* Dest reg handled below. */ + ra_evictset(as, drop); + ra_destreg(as, ir, RID_XMM0); + emit_call(as, fpm == IRFPM_FLOOR ? lj_vm_floor_sse : + fpm == IRFPM_CEIL ? lj_vm_ceil_sse : lj_vm_trunc_sse); + ra_left(as, RID_XMM0, ir->op1); + } + } else if (fpm == IRFPM_EXP2 && asm_fpjoin_pow(as, ir)) { + /* Rejoined to pow(). */ + } else { + asm_callid(as, ir, IRCALL_lj_vm_floor + fpm); + } +} + +#define asm_atan2(as, ir) asm_callid(as, ir, IRCALL_atan2) + +static void asm_ldexp(ASMState *as, IRIns *ir) +{ + int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */ + Reg dest = ir->r; + if (ra_hasreg(dest)) { + ra_free(as, dest); + ra_modified(as, dest); + emit_rmro(as, XO_MOVSD, dest, RID_ESP, ofs); + } + emit_rmro(as, XO_FSTPq, XOg_FSTPq, RID_ESP, ofs); + emit_x87op(as, XI_FPOP1); + emit_x87op(as, XI_FSCALE); + asm_x87load(as, ir->op1); + asm_x87load(as, ir->op2); +} + +static void asm_fppowi(ASMState *as, IRIns *ir) +{ + /* The modified regs must match with the *.dasc implementation. */ + RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM1+1)|RID2RSET(RID_EAX); + if (ra_hasreg(ir->r)) + rset_clear(drop, ir->r); /* Dest reg handled below. */ + ra_evictset(as, drop); + ra_destreg(as, ir, RID_XMM0); + emit_call(as, lj_vm_powi_sse); + ra_left(as, RID_XMM0, ir->op1); + ra_left(as, RID_EAX, ir->op2); +} + +static void asm_pow(ASMState *as, IRIns *ir) +{ +#if LJ_64 && LJ_HASFFI + if (!irt_isnum(ir->t)) + asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 : + IRCALL_lj_carith_powu64); + else +#endif + asm_fppowi(as, ir); +} + +static int asm_swapops(ASMState *as, IRIns *ir) +{ + IRIns *irl = IR(ir->op1); + IRIns *irr = IR(ir->op2); + lua_assert(ra_noreg(irr->r)); + if (!irm_iscomm(lj_ir_mode[ir->o])) + return 0; /* Can't swap non-commutative operations. */ + if (irref_isk(ir->op2)) + return 0; /* Don't swap constants to the left. */ + if (ra_hasreg(irl->r)) + return 1; /* Swap if left already has a register. */ + if (ra_samehint(ir->r, irr->r)) + return 1; /* Swap if dest and right have matching hints. */ + if (as->curins > as->loopref) { /* In variant part? */ + if (ir->op2 < as->loopref && !irt_isphi(irr->t)) + return 0; /* Keep invariants on the right. */ + if (ir->op1 < as->loopref && !irt_isphi(irl->t)) + return 1; /* Swap invariants to the right. */ + } + if (opisfusableload(irl->o)) + return 1; /* Swap fusable loads to the right. */ + return 0; /* Otherwise don't swap. */ +} + +static void asm_fparith(ASMState *as, IRIns *ir, x86Op xo) +{ + IRRef lref = ir->op1; + IRRef rref = ir->op2; + RegSet allow = RSET_FPR; + Reg dest; + Reg right = IR(rref)->r; + if (ra_hasreg(right)) { + rset_clear(allow, right); + ra_noweak(as, right); + } + dest = ra_dest(as, ir, allow); + if (lref == rref) { + right = dest; + } else if (ra_noreg(right)) { + if (asm_swapops(as, ir)) { + IRRef tmp = lref; lref = rref; rref = tmp; + } + right = asm_fuseload(as, rref, rset_clear(allow, dest)); + } + emit_mrm(as, xo, dest, right); + ra_left(as, dest, lref); +} + +static void asm_intarith(ASMState *as, IRIns *ir, x86Arith xa) +{ + IRRef lref = ir->op1; + IRRef rref = ir->op2; + RegSet allow = RSET_GPR; + Reg dest, right; + int32_t k = 0; + if (as->flagmcp == as->mcp) { /* Drop test r,r instruction. */ + MCode *p = as->mcp + ((LJ_64 && *as->mcp < XI_TESTb) ? 3 : 2); + if ((p[1] & 15) < 14) { + if ((p[1] & 15) >= 12) p[1] -= 4; /* L <->S, NL <-> NS */ + as->flagmcp = NULL; + as->mcp = p; + } /* else: cannot transform LE/NLE to cc without use of OF. */ + } + right = IR(rref)->r; + if (ra_hasreg(right)) { + rset_clear(allow, right); + ra_noweak(as, right); + } + dest = ra_dest(as, ir, allow); + if (lref == rref) { + right = dest; + } else if (ra_noreg(right) && !asm_isk32(as, rref, &k)) { + if (asm_swapops(as, ir)) { + IRRef tmp = lref; lref = rref; rref = tmp; + } + right = asm_fuseloadm(as, rref, rset_clear(allow, dest), irt_is64(ir->t)); + } + if (irt_isguard(ir->t)) /* For IR_ADDOV etc. */ + asm_guardcc(as, CC_O); + if (xa != XOg_X_IMUL) { + if (ra_hasreg(right)) + emit_mrm(as, XO_ARITH(xa), REX_64IR(ir, dest), right); + else + emit_gri(as, XG_ARITHi(xa), REX_64IR(ir, dest), k); + } else if (ra_hasreg(right)) { /* IMUL r, mrm. */ + emit_mrm(as, XO_IMUL, REX_64IR(ir, dest), right); + } else { /* IMUL r, r, k. */ + /* NYI: use lea/shl/add/sub (FOLD only does 2^k) depending on CPU. */ + Reg left = asm_fuseloadm(as, lref, RSET_GPR, irt_is64(ir->t)); + x86Op xo; + if (checki8(k)) { emit_i8(as, k); xo = XO_IMULi8; + } else { emit_i32(as, k); xo = XO_IMULi; } + emit_mrm(as, xo, REX_64IR(ir, dest), left); + return; + } + ra_left(as, dest, lref); +} + +/* LEA is really a 4-operand ADD with an independent destination register, +** up to two source registers and an immediate. One register can be scaled +** by 1, 2, 4 or 8. This can be used to avoid moves or to fuse several +** instructions. +** +** Currently only a few common cases are supported: +** - 3-operand ADD: y = a+b; y = a+k with a and b already allocated +** - Left ADD fusion: y = (a+b)+k; y = (a+k)+b +** - Right ADD fusion: y = a+(b+k) +** The ommited variants have already been reduced by FOLD. +** +** There are more fusion opportunities, like gathering shifts or joining +** common references. But these are probably not worth the trouble, since +** array indexing is not decomposed and already makes use of all fields +** of the ModRM operand. +*/ +static int asm_lea(ASMState *as, IRIns *ir) +{ + IRIns *irl = IR(ir->op1); + IRIns *irr = IR(ir->op2); + RegSet allow = RSET_GPR; + Reg dest; + as->mrm.base = as->mrm.idx = RID_NONE; + as->mrm.scale = XM_SCALE1; + as->mrm.ofs = 0; + if (ra_hasreg(irl->r)) { + rset_clear(allow, irl->r); + ra_noweak(as, irl->r); + as->mrm.base = irl->r; + if (irref_isk(ir->op2) || ra_hasreg(irr->r)) { + /* The PHI renaming logic does a better job in some cases. */ + if (ra_hasreg(ir->r) && + ((irt_isphi(irl->t) && as->phireg[ir->r] == ir->op1) || + (irt_isphi(irr->t) && as->phireg[ir->r] == ir->op2))) + return 0; + if (irref_isk(ir->op2)) { + as->mrm.ofs = irr->i; + } else { + rset_clear(allow, irr->r); + ra_noweak(as, irr->r); + as->mrm.idx = irr->r; + } + } else if (irr->o == IR_ADD && mayfuse(as, ir->op2) && + irref_isk(irr->op2)) { + Reg idx = ra_alloc1(as, irr->op1, allow); + rset_clear(allow, idx); + as->mrm.idx = (uint8_t)idx; + as->mrm.ofs = IR(irr->op2)->i; + } else { + return 0; + } + } else if (ir->op1 != ir->op2 && irl->o == IR_ADD && mayfuse(as, ir->op1) && + (irref_isk(ir->op2) || irref_isk(irl->op2))) { + Reg idx, base = ra_alloc1(as, irl->op1, allow); + rset_clear(allow, base); + as->mrm.base = (uint8_t)base; + if (irref_isk(ir->op2)) { + as->mrm.ofs = irr->i; + idx = ra_alloc1(as, irl->op2, allow); + } else { + as->mrm.ofs = IR(irl->op2)->i; + idx = ra_alloc1(as, ir->op2, allow); + } + rset_clear(allow, idx); + as->mrm.idx = (uint8_t)idx; + } else { + return 0; + } + dest = ra_dest(as, ir, allow); + emit_mrm(as, XO_LEA, dest, RID_MRM); + return 1; /* Success. */ +} + +static void asm_add(ASMState *as, IRIns *ir) +{ + if (irt_isnum(ir->t)) + asm_fparith(as, ir, XO_ADDSD); + else if ((as->flags & JIT_F_LEA_AGU) || as->flagmcp == as->mcp || + irt_is64(ir->t) || !asm_lea(as, ir)) + asm_intarith(as, ir, XOg_ADD); +} + +static void asm_sub(ASMState *as, IRIns *ir) +{ + if (irt_isnum(ir->t)) + asm_fparith(as, ir, XO_SUBSD); + else /* Note: no need for LEA trick here. i-k is encoded as i+(-k). */ + asm_intarith(as, ir, XOg_SUB); +} + +static void asm_mul(ASMState *as, IRIns *ir) +{ + if (irt_isnum(ir->t)) + asm_fparith(as, ir, XO_MULSD); + else + asm_intarith(as, ir, XOg_X_IMUL); +} + +static void asm_div(ASMState *as, IRIns *ir) +{ +#if LJ_64 && LJ_HASFFI + if (!irt_isnum(ir->t)) + asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 : + IRCALL_lj_carith_divu64); + else +#endif + asm_fparith(as, ir, XO_DIVSD); +} + +static void asm_mod(ASMState *as, IRIns *ir) +{ +#if LJ_64 && LJ_HASFFI + if (!irt_isint(ir->t)) + asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 : + IRCALL_lj_carith_modu64); + else +#endif + asm_callid(as, ir, IRCALL_lj_vm_modi); +} + +static void asm_neg_not(ASMState *as, IRIns *ir, x86Group3 xg) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + emit_rr(as, XO_GROUP3, REX_64IR(ir, xg), dest); + ra_left(as, dest, ir->op1); +} + +static void asm_neg(ASMState *as, IRIns *ir) +{ + if (irt_isnum(ir->t)) + asm_fparith(as, ir, XO_XORPS); + else + asm_neg_not(as, ir, XOg_NEG); +} + +#define asm_abs(as, ir) asm_fparith(as, ir, XO_ANDPS) + +static void asm_intmin_max(ASMState *as, IRIns *ir, int cc) +{ + Reg right, dest = ra_dest(as, ir, RSET_GPR); + IRRef lref = ir->op1, rref = ir->op2; + if (irref_isk(rref)) { lref = rref; rref = ir->op1; } + right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, dest)); + emit_rr(as, XO_CMOV + (cc<<24), REX_64IR(ir, dest), right); + emit_rr(as, XO_CMP, REX_64IR(ir, dest), right); + ra_left(as, dest, lref); +} + +static void asm_min(ASMState *as, IRIns *ir) +{ + if (irt_isnum(ir->t)) + asm_fparith(as, ir, XO_MINSD); + else + asm_intmin_max(as, ir, CC_G); +} + +static void asm_max(ASMState *as, IRIns *ir) +{ + if (irt_isnum(ir->t)) + asm_fparith(as, ir, XO_MAXSD); + else + asm_intmin_max(as, ir, CC_L); +} + +/* Note: don't use LEA for overflow-checking arithmetic! */ +#define asm_addov(as, ir) asm_intarith(as, ir, XOg_ADD) +#define asm_subov(as, ir) asm_intarith(as, ir, XOg_SUB) +#define asm_mulov(as, ir) asm_intarith(as, ir, XOg_X_IMUL) + +#define asm_bnot(as, ir) asm_neg_not(as, ir, XOg_NOT) + +static void asm_bswap(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + as->mcp = emit_op(XO_BSWAP + ((dest&7) << 24), + REX_64IR(ir, 0), dest, 0, as->mcp, 1); + ra_left(as, dest, ir->op1); +} + +#define asm_band(as, ir) asm_intarith(as, ir, XOg_AND) +#define asm_bor(as, ir) asm_intarith(as, ir, XOg_OR) +#define asm_bxor(as, ir) asm_intarith(as, ir, XOg_XOR) + +static void asm_bitshift(ASMState *as, IRIns *ir, x86Shift xs) +{ + IRRef rref = ir->op2; + IRIns *irr = IR(rref); + Reg dest; + if (irref_isk(rref)) { /* Constant shifts. */ + int shift; + dest = ra_dest(as, ir, RSET_GPR); + shift = irr->i & (irt_is64(ir->t) ? 63 : 31); + switch (shift) { + case 0: break; + case 1: emit_rr(as, XO_SHIFT1, REX_64IR(ir, xs), dest); break; + default: emit_shifti(as, REX_64IR(ir, xs), dest, shift); break; + } + } else { /* Variable shifts implicitly use register cl (i.e. ecx). */ + Reg right; + dest = ra_dest(as, ir, rset_exclude(RSET_GPR, RID_ECX)); + if (dest == RID_ECX) { + dest = ra_scratch(as, rset_exclude(RSET_GPR, RID_ECX)); + emit_rr(as, XO_MOV, RID_ECX, dest); + } + right = irr->r; + if (ra_noreg(right)) + right = ra_allocref(as, rref, RID2RSET(RID_ECX)); + else if (right != RID_ECX) + ra_scratch(as, RID2RSET(RID_ECX)); + emit_rr(as, XO_SHIFTcl, REX_64IR(ir, xs), dest); + ra_noweak(as, right); + if (right != RID_ECX) + emit_rr(as, XO_MOV, RID_ECX, right); + } + ra_left(as, dest, ir->op1); + /* + ** Note: avoid using the flags resulting from a shift or rotate! + ** All of them cause a partial flag stall, except for r,1 shifts + ** (but not rotates). And a shift count of 0 leaves the flags unmodified. + */ +} + +#define asm_bshl(as, ir) asm_bitshift(as, ir, XOg_SHL) +#define asm_bshr(as, ir) asm_bitshift(as, ir, XOg_SHR) +#define asm_bsar(as, ir) asm_bitshift(as, ir, XOg_SAR) +#define asm_brol(as, ir) asm_bitshift(as, ir, XOg_ROL) +#define asm_bror(as, ir) asm_bitshift(as, ir, XOg_ROR) + +/* -- Comparisons --------------------------------------------------------- */ + +/* Virtual flags for unordered FP comparisons. */ +#define VCC_U 0x1000 /* Unordered. */ +#define VCC_P 0x2000 /* Needs extra CC_P branch. */ +#define VCC_S 0x4000 /* Swap avoids CC_P branch. */ +#define VCC_PS (VCC_P|VCC_S) + +/* Map of comparisons to flags. ORDER IR. */ +#define COMPFLAGS(ci, cin, cu, cf) ((ci)+((cu)<<4)+((cin)<<8)+(cf)) +static const uint16_t asm_compmap[IR_ABC+1] = { + /* signed non-eq unsigned flags */ + /* LT */ COMPFLAGS(CC_GE, CC_G, CC_AE, VCC_PS), + /* GE */ COMPFLAGS(CC_L, CC_L, CC_B, 0), + /* LE */ COMPFLAGS(CC_G, CC_G, CC_A, VCC_PS), + /* GT */ COMPFLAGS(CC_LE, CC_L, CC_BE, 0), + /* ULT */ COMPFLAGS(CC_AE, CC_A, CC_AE, VCC_U), + /* UGE */ COMPFLAGS(CC_B, CC_B, CC_B, VCC_U|VCC_PS), + /* ULE */ COMPFLAGS(CC_A, CC_A, CC_A, VCC_U), + /* UGT */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS), + /* EQ */ COMPFLAGS(CC_NE, CC_NE, CC_NE, VCC_P), + /* NE */ COMPFLAGS(CC_E, CC_E, CC_E, VCC_U|VCC_P), + /* ABC */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS) /* Same as UGT. */ +}; + +/* FP and integer comparisons. */ +static void asm_comp(ASMState *as, IRIns *ir) +{ + uint32_t cc = asm_compmap[ir->o]; + if (irt_isnum(ir->t)) { + IRRef lref = ir->op1; + IRRef rref = ir->op2; + Reg left, right; + MCLabel l_around; + /* + ** An extra CC_P branch is required to preserve ordered/unordered + ** semantics for FP comparisons. This can be avoided by swapping + ** the operands and inverting the condition (except for EQ and UNE). + ** So always try to swap if possible. + ** + ** Another option would be to swap operands to achieve better memory + ** operand fusion. But it's unlikely that this outweighs the cost + ** of the extra branches. + */ + if (cc & VCC_S) { /* Swap? */ + IRRef tmp = lref; lref = rref; rref = tmp; + cc ^= (VCC_PS|(5<<4)); /* A <-> B, AE <-> BE, PS <-> none */ + } + left = ra_alloc1(as, lref, RSET_FPR); + right = asm_fuseload(as, rref, rset_exclude(RSET_FPR, left)); + l_around = emit_label(as); + asm_guardcc(as, cc >> 4); + if (cc & VCC_P) { /* Extra CC_P branch required? */ + if (!(cc & VCC_U)) { + asm_guardcc(as, CC_P); /* Branch to exit for ordered comparisons. */ + } else if (l_around != as->invmcp) { + emit_sjcc(as, CC_P, l_around); /* Branch around for unordered. */ + } else { + /* Patched to mcloop by asm_loop_fixup. */ + as->loopinv = 2; + if (as->realign) + emit_sjcc(as, CC_P, as->mcp); + else + emit_jcc(as, CC_P, as->mcp); + } + } + emit_mrm(as, XO_UCOMISD, left, right); + } else { + IRRef lref = ir->op1, rref = ir->op2; + IROp leftop = (IROp)(IR(lref)->o); + Reg r64 = REX_64IR(ir, 0); + int32_t imm = 0; + lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || + irt_isu32(ir->t) || irt_isaddr(ir->t) || irt_isu8(ir->t)); + /* Swap constants (only for ABC) and fusable loads to the right. */ + if (irref_isk(lref) || (!irref_isk(rref) && opisfusableload(leftop))) { + if ((cc & 0xc) == 0xc) cc ^= 0x53; /* L <-> G, LE <-> GE */ + else if ((cc & 0xa) == 0x2) cc ^= 0x55; /* A <-> B, AE <-> BE */ + lref = ir->op2; rref = ir->op1; + } + if (asm_isk32(as, rref, &imm)) { + IRIns *irl = IR(lref); + /* Check wether we can use test ins. Not for unsigned, since CF=0. */ + int usetest = (imm == 0 && (cc & 0xa) != 0x2); + if (usetest && irl->o == IR_BAND && irl+1 == ir && !ra_used(irl)) { + /* Combine comp(BAND(ref, r/imm), 0) into test mrm, r/imm. */ + Reg right, left = RID_NONE; + RegSet allow = RSET_GPR; + if (!asm_isk32(as, irl->op2, &imm)) { + left = ra_alloc1(as, irl->op2, allow); + rset_clear(allow, left); + } else { /* Try to Fuse IRT_I8/IRT_U8 loads, too. See below. */ + IRIns *irll = IR(irl->op1); + if (opisfusableload((IROp)irll->o) && + (irt_isi8(irll->t) || irt_isu8(irll->t))) { + IRType1 origt = irll->t; /* Temporarily flip types. */ + irll->t.irt = (irll->t.irt & ~IRT_TYPE) | IRT_INT; + as->curins--; /* Skip to BAND to avoid failing in noconflict(). */ + right = asm_fuseload(as, irl->op1, RSET_GPR); + as->curins++; + irll->t = origt; + if (right != RID_MRM) goto test_nofuse; + /* Fusion succeeded, emit test byte mrm, imm8. */ + asm_guardcc(as, cc); + emit_i8(as, (imm & 0xff)); + emit_mrm(as, XO_GROUP3b, XOg_TEST, RID_MRM); + return; + } + } + as->curins--; /* Skip to BAND to avoid failing in noconflict(). */ + right = asm_fuseloadm(as, irl->op1, allow, r64); + as->curins++; /* Undo the above. */ + test_nofuse: + asm_guardcc(as, cc); + if (ra_noreg(left)) { + emit_i32(as, imm); + emit_mrm(as, XO_GROUP3, r64 + XOg_TEST, right); + } else { + emit_mrm(as, XO_TEST, r64 + left, right); + } + } else { + Reg left; + if (opisfusableload((IROp)irl->o) && + ((irt_isu8(irl->t) && checku8(imm)) || + ((irt_isi8(irl->t) || irt_isi16(irl->t)) && checki8(imm)) || + (irt_isu16(irl->t) && checku16(imm) && checki8((int16_t)imm)))) { + /* Only the IRT_INT case is fused by asm_fuseload. + ** The IRT_I8/IRT_U8 loads and some IRT_I16/IRT_U16 loads + ** are handled here. + ** Note that cmp word [mem], imm16 should not be generated, + ** since it has a length-changing prefix. Compares of a word + ** against a sign-extended imm8 are ok, however. + */ + IRType1 origt = irl->t; /* Temporarily flip types. */ + irl->t.irt = (irl->t.irt & ~IRT_TYPE) | IRT_INT; + left = asm_fuseload(as, lref, RSET_GPR); + irl->t = origt; + if (left == RID_MRM) { /* Fusion succeeded? */ + if (irt_isu8(irl->t) || irt_isu16(irl->t)) + cc >>= 4; /* Need unsigned compare. */ + asm_guardcc(as, cc); + emit_i8(as, imm); + emit_mrm(as, (irt_isi8(origt) || irt_isu8(origt)) ? + XO_ARITHib : XO_ARITHiw8, r64 + XOg_CMP, RID_MRM); + return; + } /* Otherwise handle register case as usual. */ + } else { + left = asm_fuseloadm(as, lref, + irt_isu8(ir->t) ? RSET_GPR8 : RSET_GPR, r64); + } + asm_guardcc(as, cc); + if (usetest && left != RID_MRM) { + /* Use test r,r instead of cmp r,0. */ + x86Op xo = XO_TEST; + if (irt_isu8(ir->t)) { + lua_assert(ir->o == IR_EQ || ir->o == IR_NE); + xo = XO_TESTb; + if (!rset_test(RSET_RANGE(RID_EAX, RID_EBX+1), left)) { + if (LJ_64) { + left |= FORCE_REX; + } else { + emit_i32(as, 0xff); + emit_mrm(as, XO_GROUP3, XOg_TEST, left); + return; + } + } + } + emit_rr(as, xo, r64 + left, left); + if (irl+1 == ir) /* Referencing previous ins? */ + as->flagmcp = as->mcp; /* Set flag to drop test r,r if possible. */ + } else { + emit_gmrmi(as, XG_ARITHi(XOg_CMP), r64 + left, imm); + } + } + } else { + Reg left = ra_alloc1(as, lref, RSET_GPR); + Reg right = asm_fuseloadm(as, rref, rset_exclude(RSET_GPR, left), r64); + asm_guardcc(as, cc); + emit_mrm(as, XO_CMP, r64 + left, right); + } + } +} + +#define asm_equal(as, ir) asm_comp(as, ir) + +#if LJ_32 && LJ_HASFFI +/* 64 bit integer comparisons in 32 bit mode. */ +static void asm_comp_int64(ASMState *as, IRIns *ir) +{ + uint32_t cc = asm_compmap[(ir-1)->o]; + RegSet allow = RSET_GPR; + Reg lefthi = RID_NONE, leftlo = RID_NONE; + Reg righthi = RID_NONE, rightlo = RID_NONE; + MCLabel l_around; + x86ModRM mrm; + + as->curins--; /* Skip loword ins. Avoids failing in noconflict(), too. */ + + /* Allocate/fuse hiword operands. */ + if (irref_isk(ir->op2)) { + lefthi = asm_fuseload(as, ir->op1, allow); + } else { + lefthi = ra_alloc1(as, ir->op1, allow); + rset_clear(allow, lefthi); + righthi = asm_fuseload(as, ir->op2, allow); + if (righthi == RID_MRM) { + if (as->mrm.base != RID_NONE) rset_clear(allow, as->mrm.base); + if (as->mrm.idx != RID_NONE) rset_clear(allow, as->mrm.idx); + } else { + rset_clear(allow, righthi); + } + } + mrm = as->mrm; /* Save state for hiword instruction. */ + + /* Allocate/fuse loword operands. */ + if (irref_isk((ir-1)->op2)) { + leftlo = asm_fuseload(as, (ir-1)->op1, allow); + } else { + leftlo = ra_alloc1(as, (ir-1)->op1, allow); + rset_clear(allow, leftlo); + rightlo = asm_fuseload(as, (ir-1)->op2, allow); + } + + /* All register allocations must be performed _before_ this point. */ + l_around = emit_label(as); + as->invmcp = as->flagmcp = NULL; /* Cannot use these optimizations. */ + + /* Loword comparison and branch. */ + asm_guardcc(as, cc >> 4); /* Always use unsigned compare for loword. */ + if (ra_noreg(rightlo)) { + int32_t imm = IR((ir-1)->op2)->i; + if (imm == 0 && ((cc >> 4) & 0xa) != 0x2 && leftlo != RID_MRM) + emit_rr(as, XO_TEST, leftlo, leftlo); + else + emit_gmrmi(as, XG_ARITHi(XOg_CMP), leftlo, imm); + } else { + emit_mrm(as, XO_CMP, leftlo, rightlo); + } + + /* Hiword comparison and branches. */ + if ((cc & 15) != CC_NE) + emit_sjcc(as, CC_NE, l_around); /* Hiword unequal: skip loword compare. */ + if ((cc & 15) != CC_E) + asm_guardcc(as, cc >> 8); /* Hiword compare without equality check. */ + as->mrm = mrm; /* Restore state. */ + if (ra_noreg(righthi)) { + int32_t imm = IR(ir->op2)->i; + if (imm == 0 && (cc & 0xa) != 0x2 && lefthi != RID_MRM) + emit_rr(as, XO_TEST, lefthi, lefthi); + else + emit_gmrmi(as, XG_ARITHi(XOg_CMP), lefthi, imm); + } else { + emit_mrm(as, XO_CMP, lefthi, righthi); + } +} +#endif + +/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */ + +/* Hiword op of a split 64 bit op. Previous op must be the loword op. */ +static void asm_hiop(ASMState *as, IRIns *ir) +{ +#if LJ_32 && LJ_HASFFI + /* HIOP is marked as a store because it needs its own DCE logic. */ + int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */ + if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1; + if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */ + as->curins--; /* Always skip the CONV. */ + if (usehi || uselo) + asm_conv64(as, ir); + return; + } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */ + asm_comp_int64(as, ir); + return; + } else if ((ir-1)->o == IR_XSTORE) { + if ((ir-1)->r != RID_SINK) + asm_fxstore(as, ir); + return; + } + if (!usehi) return; /* Skip unused hiword op for all remaining ops. */ + switch ((ir-1)->o) { + case IR_ADD: + as->flagmcp = NULL; + as->curins--; + asm_intarith(as, ir, XOg_ADC); + asm_intarith(as, ir-1, XOg_ADD); + break; + case IR_SUB: + as->flagmcp = NULL; + as->curins--; + asm_intarith(as, ir, XOg_SBB); + asm_intarith(as, ir-1, XOg_SUB); + break; + case IR_NEG: { + Reg dest = ra_dest(as, ir, RSET_GPR); + emit_rr(as, XO_GROUP3, XOg_NEG, dest); + emit_i8(as, 0); + emit_rr(as, XO_ARITHi8, XOg_ADC, dest); + ra_left(as, dest, ir->op1); + as->curins--; + asm_neg_not(as, ir-1, XOg_NEG); + break; + } + case IR_CALLN: + case IR_CALLXS: + if (!uselo) + ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */ + break; + case IR_CNEWI: + /* Nothing to do here. Handled by CNEWI itself. */ + break; + default: lua_assert(0); break; + } +#else + UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused on x64 or without FFI. */ +#endif +} + +/* -- Profiling ----------------------------------------------------------- */ + +static void asm_prof(ASMState *as, IRIns *ir) +{ + UNUSED(ir); + asm_guardcc(as, CC_NE); + emit_i8(as, HOOK_PROFILE); + emit_rma(as, XO_GROUP3b, XOg_TEST, &J2G(as->J)->hookmask); +} + +/* -- Stack handling ------------------------------------------------------ */ + +/* Check Lua stack size for overflow. Use exit handler as fallback. */ +static void asm_stack_check(ASMState *as, BCReg topslot, + IRIns *irp, RegSet allow, ExitNo exitno) +{ + /* Try to get an unused temp. register, otherwise spill/restore eax. */ + Reg pbase = irp ? irp->r : RID_BASE; + Reg r = allow ? rset_pickbot(allow) : RID_EAX; + emit_jcc(as, CC_B, exitstub_addr(as->J, exitno)); + if (allow == RSET_EMPTY) /* Restore temp. register. */ + emit_rmro(as, XO_MOV, r|REX_64, RID_ESP, 0); + else + ra_modified(as, r); + emit_gri(as, XG_ARITHi(XOg_CMP), r, (int32_t)(8*topslot)); + if (ra_hasreg(pbase) && pbase != r) + emit_rr(as, XO_ARITH(XOg_SUB), r, pbase); + else + emit_rmro(as, XO_ARITH(XOg_SUB), r, RID_NONE, + ptr2addr(&J2G(as->J)->jit_base)); + emit_rmro(as, XO_MOV, r, r, offsetof(lua_State, maxstack)); + emit_getgl(as, r, cur_L); + if (allow == RSET_EMPTY) /* Spill temp. register. */ + emit_rmro(as, XO_MOVto, r|REX_64, RID_ESP, 0); +} + +/* Restore Lua stack from on-trace state. */ +static void asm_stack_restore(ASMState *as, SnapShot *snap) +{ + SnapEntry *map = &as->T->snapmap[snap->mapofs]; + SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1]; + MSize n, nent = snap->nent; + /* Store the value of all modified slots to the Lua stack. */ + for (n = 0; n < nent; n++) { + SnapEntry sn = map[n]; + BCReg s = snap_slot(sn); + int32_t ofs = 8*((int32_t)s-1); + IRRef ref = snap_ref(sn); + IRIns *ir = IR(ref); + if ((sn & SNAP_NORESTORE)) + continue; + if (irt_isnum(ir->t)) { + Reg src = ra_alloc1(as, ref, RSET_FPR); + emit_rmro(as, XO_MOVSDto, src, RID_BASE, ofs); + } else { + lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || + (LJ_DUALNUM && irt_isinteger(ir->t))); + if (!irref_isk(ref)) { + Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE)); + emit_movtomro(as, REX_64IR(ir, src), RID_BASE, ofs); + } else if (!irt_ispri(ir->t)) { + emit_movmroi(as, RID_BASE, ofs, ir->i); + } + if ((sn & (SNAP_CONT|SNAP_FRAME))) { + if (s != 0) /* Do not overwrite link to previous frame. */ + emit_movmroi(as, RID_BASE, ofs+4, (int32_t)(*flinks--)); + } else { + if (!(LJ_64 && irt_islightud(ir->t))) + emit_movmroi(as, RID_BASE, ofs+4, irt_toitype(ir->t)); + } + } + checkmclim(as); + } + lua_assert(map + nent == flinks); +} + +/* -- GC handling --------------------------------------------------------- */ + +/* Check GC threshold and do one or more GC steps. */ +static void asm_gc_check(ASMState *as) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit]; + IRRef args[2]; + MCLabel l_end; + Reg tmp; + ra_evictset(as, RSET_SCRATCH); + l_end = emit_label(as); + /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */ + asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */ + emit_rr(as, XO_TEST, RID_RET, RID_RET); + args[0] = ASMREF_TMP1; /* global_State *g */ + args[1] = ASMREF_TMP2; /* MSize steps */ + asm_gencall(as, ci, args); + tmp = ra_releasetmp(as, ASMREF_TMP1); + emit_loada(as, tmp, J2G(as->J)); + emit_loadi(as, ra_releasetmp(as, ASMREF_TMP2), as->gcsteps); + /* Jump around GC step if GC total < GC threshold. */ + emit_sjcc(as, CC_B, l_end); + emit_opgl(as, XO_ARITH(XOg_CMP), tmp, gc.threshold); + emit_getgl(as, tmp, gc.total); + as->gcsteps = 0; + checkmclim(as); +} + +/* -- Loop handling ------------------------------------------------------- */ + +/* Fixup the loop branch. */ +static void asm_loop_fixup(ASMState *as) +{ + MCode *p = as->mctop; + MCode *target = as->mcp; + if (as->realign) { /* Realigned loops use short jumps. */ + as->realign = NULL; /* Stop another retry. */ + lua_assert(((intptr_t)target & 15) == 0); + if (as->loopinv) { /* Inverted loop branch? */ + p -= 5; + p[0] = XI_JMP; + lua_assert(target - p >= -128); + p[-1] = (MCode)(target - p); /* Patch sjcc. */ + if (as->loopinv == 2) + p[-3] = (MCode)(target - p + 2); /* Patch opt. short jp. */ + } else { + lua_assert(target - p >= -128); + p[-1] = (MCode)(int8_t)(target - p); /* Patch short jmp. */ + p[-2] = XI_JMPs; + } + } else { + MCode *newloop; + p[-5] = XI_JMP; + if (as->loopinv) { /* Inverted loop branch? */ + /* asm_guardcc already inverted the jcc and patched the jmp. */ + p -= 5; + newloop = target+4; + *(int32_t *)(p-4) = (int32_t)(target - p); /* Patch jcc. */ + if (as->loopinv == 2) { + *(int32_t *)(p-10) = (int32_t)(target - p + 6); /* Patch opt. jp. */ + newloop = target+8; + } + } else { /* Otherwise just patch jmp. */ + *(int32_t *)(p-4) = (int32_t)(target - p); + newloop = target+3; + } + /* Realign small loops and shorten the loop branch. */ + if (newloop >= p - 128) { + as->realign = newloop; /* Force a retry and remember alignment. */ + as->curins = as->stopins; /* Abort asm_trace now. */ + as->T->nins = as->orignins; /* Remove any added renames. */ + } + } +} + +/* -- Head of trace ------------------------------------------------------- */ + +/* Coalesce BASE register for a root trace. */ +static void asm_head_root_base(ASMState *as) +{ + IRIns *ir = IR(REF_BASE); + Reg r = ir->r; + if (ra_hasreg(r)) { + ra_free(as, r); + if (rset_test(as->modset, r) || irt_ismarked(ir->t)) + ir->r = RID_INIT; /* No inheritance for modified BASE register. */ + if (r != RID_BASE) + emit_rr(as, XO_MOV, r, RID_BASE); + } +} + +/* Coalesce or reload BASE register for a side trace. */ +static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow) +{ + IRIns *ir = IR(REF_BASE); + Reg r = ir->r; + if (ra_hasreg(r)) { + ra_free(as, r); + if (rset_test(as->modset, r) || irt_ismarked(ir->t)) + ir->r = RID_INIT; /* No inheritance for modified BASE register. */ + if (irp->r == r) { + rset_clear(allow, r); /* Mark same BASE register as coalesced. */ + } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) { + rset_clear(allow, irp->r); + emit_rr(as, XO_MOV, r, irp->r); /* Move from coalesced parent reg. */ + } else { + emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */ + } + } + return allow; +} + +/* -- Tail of trace ------------------------------------------------------- */ + +/* Fixup the tail code. */ +static void asm_tail_fixup(ASMState *as, TraceNo lnk) +{ + /* Note: don't use as->mcp swap + emit_*: emit_op overwrites more bytes. */ + MCode *p = as->mctop; + MCode *target, *q; + int32_t spadj = as->T->spadjust; + if (spadj == 0) { + p -= ((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0); + } else { + MCode *p1; + /* Patch stack adjustment. */ + if (checki8(spadj)) { + p -= 3; + p1 = p-6; + *p1 = (MCode)spadj; + } else { + p1 = p-9; + *(int32_t *)p1 = spadj; + } + if ((as->flags & JIT_F_LEA_AGU)) { +#if LJ_64 + p1[-4] = 0x48; +#endif + p1[-3] = (MCode)XI_LEA; + p1[-2] = MODRM(checki8(spadj) ? XM_OFS8 : XM_OFS32, RID_ESP, RID_ESP); + p1[-1] = MODRM(XM_SCALE1, RID_ESP, RID_ESP); + } else { +#if LJ_64 + p1[-3] = 0x48; +#endif + p1[-2] = (MCode)(checki8(spadj) ? XI_ARITHi8 : XI_ARITHi); + p1[-1] = MODRM(XM_REG, XOg_ADD, RID_ESP); + } + } + /* Patch exit branch. */ + target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp; + *(int32_t *)(p-4) = jmprel(p, target); + p[-5] = XI_JMP; + /* Drop unused mcode tail. Fill with NOPs to make the prefetcher happy. */ + for (q = as->mctop-1; q >= p; q--) + *q = XI_NOP; + as->mctop = p; +} + +/* Prepare tail of code. */ +static void asm_tail_prep(ASMState *as) +{ + MCode *p = as->mctop; + /* Realign and leave room for backwards loop branch or exit branch. */ + if (as->realign) { + int i = ((int)(intptr_t)as->realign) & 15; + /* Fill unused mcode tail with NOPs to make the prefetcher happy. */ + while (i-- > 0) + *--p = XI_NOP; + as->mctop = p; + p -= (as->loopinv ? 5 : 2); /* Space for short/near jmp. */ + } else { + p -= 5; /* Space for exit branch (near jmp). */ + } + if (as->loopref) { + as->invmcp = as->mcp = p; + } else { + /* Leave room for ESP adjustment: add esp, imm or lea esp, [esp+imm] */ + as->mcp = p - (((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0)); + as->invmcp = NULL; + } +} + +/* -- Trace setup --------------------------------------------------------- */ + +/* Ensure there are enough stack slots for call arguments. */ +static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci) +{ + IRRef args[CCI_NARGS_MAX*2]; + int nslots; + asm_collectargs(as, ir, ci, args); + nslots = asm_count_call_slots(as, ci, args); + if (nslots > as->evenspill) /* Leave room for args in stack slots. */ + as->evenspill = nslots; +#if LJ_64 + return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET); +#else + return irt_isfp(ir->t) ? REGSP_INIT : REGSP_HINT(RID_RET); +#endif +} + +/* Target-specific setup. */ +static void asm_setup_target(ASMState *as) +{ + asm_exitstub_setup(as, as->T->nsnap); +} + +/* -- Trace patching ------------------------------------------------------ */ + +/* Patch exit jumps of existing machine code to a new target. */ +void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target) +{ + MCode *p = T->mcode; + MCode *mcarea = lj_mcode_patch(J, p, 0); + MSize len = T->szmcode; + MCode *px = exitstub_addr(J, exitno) - 6; + MCode *pe = p+len-6; + uint32_t stateaddr = u32ptr(&J2G(J)->vmstate); + if (len > 5 && p[len-5] == XI_JMP && p+len-6 + *(int32_t *)(p+len-4) == px) + *(int32_t *)(p+len-4) = jmprel(p+len, target); + /* Do not patch parent exit for a stack check. Skip beyond vmstate update. */ + for (; p < pe; p++) + if (*(uint32_t *)(p+(LJ_64 ? 3 : 2)) == stateaddr && p[0] == XI_MOVmi) { + p += LJ_64 ? 11 : 10; + break; + } + lua_assert(p < pe); + for (; p < pe; p++) { + if ((*(uint16_t *)p & 0xf0ff) == 0x800f && p + *(int32_t *)(p+2) == px) { + *(int32_t *)(p+2) = jmprel(p+6, target); + p += 5; + } + } + lj_mcode_sync(T->mcode, T->mcode + T->szmcode); + lj_mcode_patch(J, mcarea, 1); +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_bc.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_bc.c new file mode 100644 index 00000000000..a8f444c26e1 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_bc.c @@ -0,0 +1,14 @@ +/* +** Bytecode instruction modes. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_bc_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_bc.h" + +/* Bytecode offsets and bytecode instruction modes. */ +#include "lj_bcdef.h" + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_bc.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_bc.h new file mode 100644 index 00000000000..64c1bcda35a --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_bc.h @@ -0,0 +1,265 @@ +/* +** Bytecode instruction format. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_BC_H +#define _LJ_BC_H + +#include "lj_def.h" +#include "lj_arch.h" + +/* Bytecode instruction format, 32 bit wide, fields of 8 or 16 bit: +** +** +----+----+----+----+ +** | B | C | A | OP | Format ABC +** +----+----+----+----+ +** | D | A | OP | Format AD +** +-------------------- +** MSB LSB +** +** In-memory instructions are always stored in host byte order. +*/ + +/* Operand ranges and related constants. */ +#define BCMAX_A 0xff +#define BCMAX_B 0xff +#define BCMAX_C 0xff +#define BCMAX_D 0xffff +#define BCBIAS_J 0x8000 +#define NO_REG BCMAX_A +#define NO_JMP (~(BCPos)0) + +/* Macros to get instruction fields. */ +#define bc_op(i) ((BCOp)((i)&0xff)) +#define bc_a(i) ((BCReg)(((i)>>8)&0xff)) +#define bc_b(i) ((BCReg)((i)>>24)) +#define bc_c(i) ((BCReg)(((i)>>16)&0xff)) +#define bc_d(i) ((BCReg)((i)>>16)) +#define bc_j(i) ((ptrdiff_t)bc_d(i)-BCBIAS_J) + +/* Macros to set instruction fields. */ +#define setbc_byte(p, x, ofs) \ + ((uint8_t *)(p))[LJ_ENDIAN_SELECT(ofs, 3-ofs)] = (uint8_t)(x) +#define setbc_op(p, x) setbc_byte(p, (x), 0) +#define setbc_a(p, x) setbc_byte(p, (x), 1) +#define setbc_b(p, x) setbc_byte(p, (x), 3) +#define setbc_c(p, x) setbc_byte(p, (x), 2) +#define setbc_d(p, x) \ + ((uint16_t *)(p))[LJ_ENDIAN_SELECT(1, 0)] = (uint16_t)(x) +#define setbc_j(p, x) setbc_d(p, (BCPos)((int32_t)(x)+BCBIAS_J)) + +/* Macros to compose instructions. */ +#define BCINS_ABC(o, a, b, c) \ + (((BCIns)(o))|((BCIns)(a)<<8)|((BCIns)(b)<<24)|((BCIns)(c)<<16)) +#define BCINS_AD(o, a, d) \ + (((BCIns)(o))|((BCIns)(a)<<8)|((BCIns)(d)<<16)) +#define BCINS_AJ(o, a, j) BCINS_AD(o, a, (BCPos)((int32_t)(j)+BCBIAS_J)) + +/* Bytecode instruction definition. Order matters, see below. +** +** (name, filler, Amode, Bmode, Cmode or Dmode, metamethod) +** +** The opcode name suffixes specify the type for RB/RC or RD: +** V = variable slot +** S = string const +** N = number const +** P = primitive type (~itype) +** B = unsigned byte literal +** M = multiple args/results +*/ +#define BCDEF(_) \ + /* Comparison ops. ORDER OPR. */ \ + _(ISLT, var, ___, var, lt) \ + _(ISGE, var, ___, var, lt) \ + _(ISLE, var, ___, var, le) \ + _(ISGT, var, ___, var, le) \ + \ + _(ISEQV, var, ___, var, eq) \ + _(ISNEV, var, ___, var, eq) \ + _(ISEQS, var, ___, str, eq) \ + _(ISNES, var, ___, str, eq) \ + _(ISEQN, var, ___, num, eq) \ + _(ISNEN, var, ___, num, eq) \ + _(ISEQP, var, ___, pri, eq) \ + _(ISNEP, var, ___, pri, eq) \ + \ + /* Unary test and copy ops. */ \ + _(ISTC, dst, ___, var, ___) \ + _(ISFC, dst, ___, var, ___) \ + _(IST, ___, ___, var, ___) \ + _(ISF, ___, ___, var, ___) \ + _(ISTYPE, var, ___, lit, ___) \ + _(ISNUM, var, ___, lit, ___) \ + \ + /* Unary ops. */ \ + _(MOV, dst, ___, var, ___) \ + _(NOT, dst, ___, var, ___) \ + _(UNM, dst, ___, var, unm) \ + _(LEN, dst, ___, var, len) \ + \ + /* Binary ops. ORDER OPR. VV last, POW must be next. */ \ + _(ADDVN, dst, var, num, add) \ + _(SUBVN, dst, var, num, sub) \ + _(MULVN, dst, var, num, mul) \ + _(DIVVN, dst, var, num, div) \ + _(MODVN, dst, var, num, mod) \ + \ + _(ADDNV, dst, var, num, add) \ + _(SUBNV, dst, var, num, sub) \ + _(MULNV, dst, var, num, mul) \ + _(DIVNV, dst, var, num, div) \ + _(MODNV, dst, var, num, mod) \ + \ + _(ADDVV, dst, var, var, add) \ + _(SUBVV, dst, var, var, sub) \ + _(MULVV, dst, var, var, mul) \ + _(DIVVV, dst, var, var, div) \ + _(MODVV, dst, var, var, mod) \ + \ + _(POW, dst, var, var, pow) \ + _(CAT, dst, rbase, rbase, concat) \ + \ + /* Constant ops. */ \ + _(KSTR, dst, ___, str, ___) \ + _(KCDATA, dst, ___, cdata, ___) \ + _(KSHORT, dst, ___, lits, ___) \ + _(KNUM, dst, ___, num, ___) \ + _(KPRI, dst, ___, pri, ___) \ + _(KNIL, base, ___, base, ___) \ + \ + /* Upvalue and function ops. */ \ + _(UGET, dst, ___, uv, ___) \ + _(USETV, uv, ___, var, ___) \ + _(USETS, uv, ___, str, ___) \ + _(USETN, uv, ___, num, ___) \ + _(USETP, uv, ___, pri, ___) \ + _(UCLO, rbase, ___, jump, ___) \ + _(FNEW, dst, ___, func, gc) \ + \ + /* Table ops. */ \ + _(TNEW, dst, ___, lit, gc) \ + _(TDUP, dst, ___, tab, gc) \ + _(GGET, dst, ___, str, index) \ + _(GSET, var, ___, str, newindex) \ + _(TGETV, dst, var, var, index) \ + _(TGETS, dst, var, str, index) \ + _(TGETB, dst, var, lit, index) \ + _(TGETR, dst, var, var, index) \ + _(TSETV, var, var, var, newindex) \ + _(TSETS, var, var, str, newindex) \ + _(TSETB, var, var, lit, newindex) \ + _(TSETM, base, ___, num, newindex) \ + _(TSETR, var, var, var, newindex) \ + \ + /* Calls and vararg handling. T = tail call. */ \ + _(CALLM, base, lit, lit, call) \ + _(CALL, base, lit, lit, call) \ + _(CALLMT, base, ___, lit, call) \ + _(CALLT, base, ___, lit, call) \ + _(ITERC, base, lit, lit, call) \ + _(ITERN, base, lit, lit, call) \ + _(VARG, base, lit, lit, ___) \ + _(ISNEXT, base, ___, jump, ___) \ + \ + /* Returns. */ \ + _(RETM, base, ___, lit, ___) \ + _(RET, rbase, ___, lit, ___) \ + _(RET0, rbase, ___, lit, ___) \ + _(RET1, rbase, ___, lit, ___) \ + \ + /* Loops and branches. I/J = interp/JIT, I/C/L = init/call/loop. */ \ + _(FORI, base, ___, jump, ___) \ + _(JFORI, base, ___, jump, ___) \ + \ + _(FORL, base, ___, jump, ___) \ + _(IFORL, base, ___, jump, ___) \ + _(JFORL, base, ___, lit, ___) \ + \ + _(ITERL, base, ___, jump, ___) \ + _(IITERL, base, ___, jump, ___) \ + _(JITERL, base, ___, lit, ___) \ + \ + _(LOOP, rbase, ___, jump, ___) \ + _(ILOOP, rbase, ___, jump, ___) \ + _(JLOOP, rbase, ___, lit, ___) \ + \ + _(JMP, rbase, ___, jump, ___) \ + \ + /* Function headers. I/J = interp/JIT, F/V/C = fixarg/vararg/C func. */ \ + _(FUNCF, rbase, ___, ___, ___) \ + _(IFUNCF, rbase, ___, ___, ___) \ + _(JFUNCF, rbase, ___, lit, ___) \ + _(FUNCV, rbase, ___, ___, ___) \ + _(IFUNCV, rbase, ___, ___, ___) \ + _(JFUNCV, rbase, ___, lit, ___) \ + _(FUNCC, rbase, ___, ___, ___) \ + _(FUNCCW, rbase, ___, ___, ___) + +/* Bytecode opcode numbers. */ +typedef enum { +#define BCENUM(name, ma, mb, mc, mt) BC_##name, +BCDEF(BCENUM) +#undef BCENUM + BC__MAX +} BCOp; + +LJ_STATIC_ASSERT((int)BC_ISEQV+1 == (int)BC_ISNEV); +LJ_STATIC_ASSERT(((int)BC_ISEQV^1) == (int)BC_ISNEV); +LJ_STATIC_ASSERT(((int)BC_ISEQS^1) == (int)BC_ISNES); +LJ_STATIC_ASSERT(((int)BC_ISEQN^1) == (int)BC_ISNEN); +LJ_STATIC_ASSERT(((int)BC_ISEQP^1) == (int)BC_ISNEP); +LJ_STATIC_ASSERT(((int)BC_ISLT^1) == (int)BC_ISGE); +LJ_STATIC_ASSERT(((int)BC_ISLE^1) == (int)BC_ISGT); +LJ_STATIC_ASSERT(((int)BC_ISLT^3) == (int)BC_ISGT); +LJ_STATIC_ASSERT((int)BC_IST-(int)BC_ISTC == (int)BC_ISF-(int)BC_ISFC); +LJ_STATIC_ASSERT((int)BC_CALLT-(int)BC_CALL == (int)BC_CALLMT-(int)BC_CALLM); +LJ_STATIC_ASSERT((int)BC_CALLMT + 1 == (int)BC_CALLT); +LJ_STATIC_ASSERT((int)BC_RETM + 1 == (int)BC_RET); +LJ_STATIC_ASSERT((int)BC_FORL + 1 == (int)BC_IFORL); +LJ_STATIC_ASSERT((int)BC_FORL + 2 == (int)BC_JFORL); +LJ_STATIC_ASSERT((int)BC_ITERL + 1 == (int)BC_IITERL); +LJ_STATIC_ASSERT((int)BC_ITERL + 2 == (int)BC_JITERL); +LJ_STATIC_ASSERT((int)BC_LOOP + 1 == (int)BC_ILOOP); +LJ_STATIC_ASSERT((int)BC_LOOP + 2 == (int)BC_JLOOP); +LJ_STATIC_ASSERT((int)BC_FUNCF + 1 == (int)BC_IFUNCF); +LJ_STATIC_ASSERT((int)BC_FUNCF + 2 == (int)BC_JFUNCF); +LJ_STATIC_ASSERT((int)BC_FUNCV + 1 == (int)BC_IFUNCV); +LJ_STATIC_ASSERT((int)BC_FUNCV + 2 == (int)BC_JFUNCV); + +/* This solves a circular dependency problem, change as needed. */ +#define FF_next_N 4 + +/* Stack slots used by FORI/FORL, relative to operand A. */ +enum { + FORL_IDX, FORL_STOP, FORL_STEP, FORL_EXT +}; + +/* Bytecode operand modes. ORDER BCMode */ +typedef enum { + BCMnone, BCMdst, BCMbase, BCMvar, BCMrbase, BCMuv, /* Mode A must be <= 7 */ + BCMlit, BCMlits, BCMpri, BCMnum, BCMstr, BCMtab, BCMfunc, BCMjump, BCMcdata, + BCM_max +} BCMode; +#define BCM___ BCMnone + +#define bcmode_a(op) ((BCMode)(lj_bc_mode[op] & 7)) +#define bcmode_b(op) ((BCMode)((lj_bc_mode[op]>>3) & 15)) +#define bcmode_c(op) ((BCMode)((lj_bc_mode[op]>>7) & 15)) +#define bcmode_d(op) bcmode_c(op) +#define bcmode_hasd(op) ((lj_bc_mode[op] & (15<<3)) == (BCMnone<<3)) +#define bcmode_mm(op) ((MMS)(lj_bc_mode[op]>>11)) + +#define BCMODE(name, ma, mb, mc, mm) \ + (BCM##ma|(BCM##mb<<3)|(BCM##mc<<7)|(MM_##mm<<11)), +#define BCMODE_FF 0 + +static LJ_AINLINE int bc_isret(BCOp op) +{ + return (op == BC_RETM || op == BC_RET || op == BC_RET0 || op == BC_RET1); +} + +LJ_DATA const uint16_t lj_bc_mode[]; +LJ_DATA const uint16_t lj_bc_ofs[]; + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_bcdump.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_bcdump.h new file mode 100644 index 00000000000..c38983146bb --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_bcdump.h @@ -0,0 +1,68 @@ +/* +** Bytecode dump definitions. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_BCDUMP_H +#define _LJ_BCDUMP_H + +#include "lj_obj.h" +#include "lj_lex.h" + +/* -- Bytecode dump format ------------------------------------------------ */ + +/* +** dump = header proto+ 0U +** header = ESC 'L' 'J' versionB flagsU [namelenU nameB*] +** proto = lengthU pdata +** pdata = phead bcinsW* uvdataH* kgc* knum* [debugB*] +** phead = flagsB numparamsB framesizeB numuvB numkgcU numknU numbcU +** [debuglenU [firstlineU numlineU]] +** kgc = kgctypeU { ktab | (loU hiU) | (rloU rhiU iloU ihiU) | strB* } +** knum = intU0 | (loU1 hiU) +** ktab = narrayU nhashU karray* khash* +** karray = ktabk +** khash = ktabk ktabk +** ktabk = ktabtypeU { intU | (loU hiU) | strB* } +** +** B = 8 bit, H = 16 bit, W = 32 bit, U = ULEB128 of W, U0/U1 = ULEB128 of W+1 +*/ + +/* Bytecode dump header. */ +#define BCDUMP_HEAD1 0x1b +#define BCDUMP_HEAD2 0x4c +#define BCDUMP_HEAD3 0x4a + +/* If you perform *any* kind of private modifications to the bytecode itself +** or to the dump format, you *must* set BCDUMP_VERSION to 0x80 or higher. +*/ +#define BCDUMP_VERSION 2 + +/* Compatibility flags. */ +#define BCDUMP_F_BE 0x01 +#define BCDUMP_F_STRIP 0x02 +#define BCDUMP_F_FFI 0x04 +#define BCDUMP_F_FR2 0x08 + +#define BCDUMP_F_KNOWN (BCDUMP_F_FR2*2-1) + +/* Type codes for the GC constants of a prototype. Plus length for strings. */ +enum { + BCDUMP_KGC_CHILD, BCDUMP_KGC_TAB, BCDUMP_KGC_I64, BCDUMP_KGC_U64, + BCDUMP_KGC_COMPLEX, BCDUMP_KGC_STR +}; + +/* Type codes for the keys/values of a constant table. */ +enum { + BCDUMP_KTAB_NIL, BCDUMP_KTAB_FALSE, BCDUMP_KTAB_TRUE, + BCDUMP_KTAB_INT, BCDUMP_KTAB_NUM, BCDUMP_KTAB_STR +}; + +/* -- Bytecode reader/writer ---------------------------------------------- */ + +LJ_FUNC int lj_bcwrite(lua_State *L, GCproto *pt, lua_Writer writer, + void *data, int strip); +LJ_FUNC GCproto *lj_bcread_proto(LexState *ls); +LJ_FUNC GCproto *lj_bcread(LexState *ls); + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_bcread.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_bcread.c new file mode 100644 index 00000000000..5e5021773d7 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_bcread.c @@ -0,0 +1,457 @@ +/* +** Bytecode reader. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_bcread_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_buf.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_bc.h" +#if LJ_HASFFI +#include "lj_ctype.h" +#include "lj_cdata.h" +#include "lualib.h" +#endif +#include "lj_lex.h" +#include "lj_bcdump.h" +#include "lj_state.h" +#include "lj_strfmt.h" + +/* Reuse some lexer fields for our own purposes. */ +#define bcread_flags(ls) ls->level +#define bcread_swap(ls) \ + ((bcread_flags(ls) & BCDUMP_F_BE) != LJ_BE*BCDUMP_F_BE) +#define bcread_oldtop(L, ls) restorestack(L, ls->lastline) +#define bcread_savetop(L, ls, top) \ + ls->lastline = (BCLine)savestack(L, (top)) + +/* -- Input buffer handling ----------------------------------------------- */ + +/* Throw reader error. */ +static LJ_NOINLINE void bcread_error(LexState *ls, ErrMsg em) +{ + lua_State *L = ls->L; + const char *name = ls->chunkarg; + if (*name == BCDUMP_HEAD1) name = "(binary)"; + else if (*name == '@' || *name == '=') name++; + lj_strfmt_pushf(L, "%s: %s", name, err2msg(em)); + lj_err_throw(L, LUA_ERRSYNTAX); +} + +/* Refill buffer. */ +static LJ_NOINLINE void bcread_fill(LexState *ls, MSize len, int need) +{ + lua_assert(len != 0); + if (len > LJ_MAX_BUF || ls->c < 0) + bcread_error(ls, LJ_ERR_BCBAD); + do { + const char *buf; + size_t sz; + char *p = sbufB(&ls->sb); + MSize n = (MSize)(ls->pe - ls->p); + if (n) { /* Copy remainder to buffer. */ + if (sbuflen(&ls->sb)) { /* Move down in buffer. */ + lua_assert(ls->pe == sbufP(&ls->sb)); + if (ls->p != p) memmove(p, ls->p, n); + } else { /* Copy from buffer provided by reader. */ + p = lj_buf_need(&ls->sb, len); + memcpy(p, ls->p, n); + } + ls->p = p; + ls->pe = p + n; + } + setsbufP(&ls->sb, p + n); + buf = ls->rfunc(ls->L, ls->rdata, &sz); /* Get more data from reader. */ + if (buf == NULL || sz == 0) { /* EOF? */ + if (need) bcread_error(ls, LJ_ERR_BCBAD); + ls->c = -1; /* Only bad if we get called again. */ + break; + } + if (n) { /* Append to buffer. */ + n += (MSize)sz; + p = lj_buf_need(&ls->sb, n < len ? len : n); + memcpy(sbufP(&ls->sb), buf, sz); + setsbufP(&ls->sb, p + n); + ls->p = p; + ls->pe = p + n; + } else { /* Return buffer provided by reader. */ + ls->p = buf; + ls->pe = buf + sz; + } + } while (ls->p + len > ls->pe); +} + +/* Need a certain number of bytes. */ +static LJ_AINLINE void bcread_need(LexState *ls, MSize len) +{ + if (LJ_UNLIKELY(ls->p + len > ls->pe)) + bcread_fill(ls, len, 1); +} + +/* Want to read up to a certain number of bytes, but may need less. */ +static LJ_AINLINE void bcread_want(LexState *ls, MSize len) +{ + if (LJ_UNLIKELY(ls->p + len > ls->pe)) + bcread_fill(ls, len, 0); +} + +/* Return memory block from buffer. */ +static LJ_AINLINE uint8_t *bcread_mem(LexState *ls, MSize len) +{ + uint8_t *p = (uint8_t *)ls->p; + ls->p += len; + lua_assert(ls->p <= ls->pe); + return p; +} + +/* Copy memory block from buffer. */ +static void bcread_block(LexState *ls, void *q, MSize len) +{ + memcpy(q, bcread_mem(ls, len), len); +} + +/* Read byte from buffer. */ +static LJ_AINLINE uint32_t bcread_byte(LexState *ls) +{ + lua_assert(ls->p < ls->pe); + return (uint32_t)(uint8_t)*ls->p++; +} + +/* Read ULEB128 value from buffer. */ +static LJ_AINLINE uint32_t bcread_uleb128(LexState *ls) +{ + uint32_t v = lj_buf_ruleb128(&ls->p); + lua_assert(ls->p <= ls->pe); + return v; +} + +/* Read top 32 bits of 33 bit ULEB128 value from buffer. */ +static uint32_t bcread_uleb128_33(LexState *ls) +{ + const uint8_t *p = (const uint8_t *)ls->p; + uint32_t v = (*p++ >> 1); + if (LJ_UNLIKELY(v >= 0x40)) { + int sh = -1; + v &= 0x3f; + do { + v |= ((*p & 0x7f) << (sh += 7)); + } while (*p++ >= 0x80); + } + ls->p = (char *)p; + lua_assert(ls->p <= ls->pe); + return v; +} + +/* -- Bytecode reader ----------------------------------------------------- */ + +/* Read debug info of a prototype. */ +static void bcread_dbg(LexState *ls, GCproto *pt, MSize sizedbg) +{ + void *lineinfo = (void *)proto_lineinfo(pt); + bcread_block(ls, lineinfo, sizedbg); + /* Swap lineinfo if the endianess differs. */ + if (bcread_swap(ls) && pt->numline >= 256) { + MSize i, n = pt->sizebc-1; + if (pt->numline < 65536) { + uint16_t *p = (uint16_t *)lineinfo; + for (i = 0; i < n; i++) p[i] = (uint16_t)((p[i] >> 8)|(p[i] << 8)); + } else { + uint32_t *p = (uint32_t *)lineinfo; + for (i = 0; i < n; i++) p[i] = lj_bswap(p[i]); + } + } +} + +/* Find pointer to varinfo. */ +static const void *bcread_varinfo(GCproto *pt) +{ + const uint8_t *p = proto_uvinfo(pt); + MSize n = pt->sizeuv; + if (n) while (*p++ || --n) ; + return p; +} + +/* Read a single constant key/value of a template table. */ +static void bcread_ktabk(LexState *ls, TValue *o) +{ + MSize tp = bcread_uleb128(ls); + if (tp >= BCDUMP_KTAB_STR) { + MSize len = tp - BCDUMP_KTAB_STR; + const char *p = (const char *)bcread_mem(ls, len); + setstrV(ls->L, o, lj_str_new(ls->L, p, len)); + } else if (tp == BCDUMP_KTAB_INT) { + setintV(o, (int32_t)bcread_uleb128(ls)); + } else if (tp == BCDUMP_KTAB_NUM) { + o->u32.lo = bcread_uleb128(ls); + o->u32.hi = bcread_uleb128(ls); + } else { + lua_assert(tp <= BCDUMP_KTAB_TRUE); + setpriV(o, ~tp); + } +} + +/* Read a template table. */ +static GCtab *bcread_ktab(LexState *ls) +{ + MSize narray = bcread_uleb128(ls); + MSize nhash = bcread_uleb128(ls); + GCtab *t = lj_tab_new(ls->L, narray, hsize2hbits(nhash)); + if (narray) { /* Read array entries. */ + MSize i; + TValue *o = tvref(t->array); + for (i = 0; i < narray; i++, o++) + bcread_ktabk(ls, o); + } + if (nhash) { /* Read hash entries. */ + MSize i; + for (i = 0; i < nhash; i++) { + TValue key; + bcread_ktabk(ls, &key); + lua_assert(!tvisnil(&key)); + bcread_ktabk(ls, lj_tab_set(ls->L, t, &key)); + } + } + return t; +} + +/* Read GC constants of a prototype. */ +static void bcread_kgc(LexState *ls, GCproto *pt, MSize sizekgc) +{ + MSize i; + GCRef *kr = mref(pt->k, GCRef) - (ptrdiff_t)sizekgc; + for (i = 0; i < sizekgc; i++, kr++) { + MSize tp = bcread_uleb128(ls); + if (tp >= BCDUMP_KGC_STR) { + MSize len = tp - BCDUMP_KGC_STR; + const char *p = (const char *)bcread_mem(ls, len); + setgcref(*kr, obj2gco(lj_str_new(ls->L, p, len))); + } else if (tp == BCDUMP_KGC_TAB) { + setgcref(*kr, obj2gco(bcread_ktab(ls))); +#if LJ_HASFFI + } else if (tp != BCDUMP_KGC_CHILD) { + CTypeID id = tp == BCDUMP_KGC_COMPLEX ? CTID_COMPLEX_DOUBLE : + tp == BCDUMP_KGC_I64 ? CTID_INT64 : CTID_UINT64; + CTSize sz = tp == BCDUMP_KGC_COMPLEX ? 16 : 8; + GCcdata *cd = lj_cdata_new_(ls->L, id, sz); + TValue *p = (TValue *)cdataptr(cd); + setgcref(*kr, obj2gco(cd)); + p[0].u32.lo = bcread_uleb128(ls); + p[0].u32.hi = bcread_uleb128(ls); + if (tp == BCDUMP_KGC_COMPLEX) { + p[1].u32.lo = bcread_uleb128(ls); + p[1].u32.hi = bcread_uleb128(ls); + } +#endif + } else { + lua_State *L = ls->L; + lua_assert(tp == BCDUMP_KGC_CHILD); + if (L->top <= bcread_oldtop(L, ls)) /* Stack underflow? */ + bcread_error(ls, LJ_ERR_BCBAD); + L->top--; + setgcref(*kr, obj2gco(protoV(L->top))); + } + } +} + +/* Read number constants of a prototype. */ +static void bcread_knum(LexState *ls, GCproto *pt, MSize sizekn) +{ + MSize i; + TValue *o = mref(pt->k, TValue); + for (i = 0; i < sizekn; i++, o++) { + int isnum = (ls->p[0] & 1); + uint32_t lo = bcread_uleb128_33(ls); + if (isnum) { + o->u32.lo = lo; + o->u32.hi = bcread_uleb128(ls); + } else { + setintV(o, lo); + } + } +} + +/* Read bytecode instructions. */ +static void bcread_bytecode(LexState *ls, GCproto *pt, MSize sizebc) +{ + BCIns *bc = proto_bc(pt); + bc[0] = BCINS_AD((pt->flags & PROTO_VARARG) ? BC_FUNCV : BC_FUNCF, + pt->framesize, 0); + bcread_block(ls, bc+1, (sizebc-1)*(MSize)sizeof(BCIns)); + /* Swap bytecode instructions if the endianess differs. */ + if (bcread_swap(ls)) { + MSize i; + for (i = 1; i < sizebc; i++) bc[i] = lj_bswap(bc[i]); + } +} + +/* Read upvalue refs. */ +static void bcread_uv(LexState *ls, GCproto *pt, MSize sizeuv) +{ + if (sizeuv) { + uint16_t *uv = proto_uv(pt); + bcread_block(ls, uv, sizeuv*2); + /* Swap upvalue refs if the endianess differs. */ + if (bcread_swap(ls)) { + MSize i; + for (i = 0; i < sizeuv; i++) + uv[i] = (uint16_t)((uv[i] >> 8)|(uv[i] << 8)); + } + } +} + +/* Read a prototype. */ +GCproto *lj_bcread_proto(LexState *ls) +{ + GCproto *pt; + MSize framesize, numparams, flags, sizeuv, sizekgc, sizekn, sizebc, sizept; + MSize ofsk, ofsuv, ofsdbg; + MSize sizedbg = 0; + BCLine firstline = 0, numline = 0; + + /* Read prototype header. */ + flags = bcread_byte(ls); + numparams = bcread_byte(ls); + framesize = bcread_byte(ls); + sizeuv = bcread_byte(ls); + sizekgc = bcread_uleb128(ls); + sizekn = bcread_uleb128(ls); + sizebc = bcread_uleb128(ls) + 1; + if (!(bcread_flags(ls) & BCDUMP_F_STRIP)) { + sizedbg = bcread_uleb128(ls); + if (sizedbg) { + firstline = bcread_uleb128(ls); + numline = bcread_uleb128(ls); + } + } + + /* Calculate total size of prototype including all colocated arrays. */ + sizept = (MSize)sizeof(GCproto) + + sizebc*(MSize)sizeof(BCIns) + + sizekgc*(MSize)sizeof(GCRef); + sizept = (sizept + (MSize)sizeof(TValue)-1) & ~((MSize)sizeof(TValue)-1); + ofsk = sizept; sizept += sizekn*(MSize)sizeof(TValue); + ofsuv = sizept; sizept += ((sizeuv+1)&~1)*2; + ofsdbg = sizept; sizept += sizedbg; + + /* Allocate prototype object and initialize its fields. */ + pt = (GCproto *)lj_mem_newgco(ls->L, (MSize)sizept); + pt->gct = ~LJ_TPROTO; + pt->numparams = (uint8_t)numparams; + pt->framesize = (uint8_t)framesize; + pt->sizebc = sizebc; + setmref(pt->k, (char *)pt + ofsk); + setmref(pt->uv, (char *)pt + ofsuv); + pt->sizekgc = 0; /* Set to zero until fully initialized. */ + pt->sizekn = sizekn; + pt->sizept = sizept; + pt->sizeuv = (uint8_t)sizeuv; + pt->flags = (uint8_t)flags; + pt->trace = 0; + setgcref(pt->chunkname, obj2gco(ls->chunkname)); + + /* Close potentially uninitialized gap between bc and kgc. */ + *(uint32_t *)((char *)pt + ofsk - sizeof(GCRef)*(sizekgc+1)) = 0; + + /* Read bytecode instructions and upvalue refs. */ + bcread_bytecode(ls, pt, sizebc); + bcread_uv(ls, pt, sizeuv); + + /* Read constants. */ + bcread_kgc(ls, pt, sizekgc); + pt->sizekgc = sizekgc; + bcread_knum(ls, pt, sizekn); + + /* Read and initialize debug info. */ + pt->firstline = firstline; + pt->numline = numline; + if (sizedbg) { + MSize sizeli = (sizebc-1) << (numline < 256 ? 0 : numline < 65536 ? 1 : 2); + setmref(pt->lineinfo, (char *)pt + ofsdbg); + setmref(pt->uvinfo, (char *)pt + ofsdbg + sizeli); + bcread_dbg(ls, pt, sizedbg); + setmref(pt->varinfo, bcread_varinfo(pt)); + } else { + setmref(pt->lineinfo, NULL); + setmref(pt->uvinfo, NULL); + setmref(pt->varinfo, NULL); + } + return pt; +} + +/* Read and check header of bytecode dump. */ +static int bcread_header(LexState *ls) +{ + uint32_t flags; + bcread_want(ls, 3+5+5); + if (bcread_byte(ls) != BCDUMP_HEAD2 || + bcread_byte(ls) != BCDUMP_HEAD3 || + bcread_byte(ls) != BCDUMP_VERSION) return 0; + bcread_flags(ls) = flags = bcread_uleb128(ls); + if ((flags & ~(BCDUMP_F_KNOWN)) != 0) return 0; + if ((flags & BCDUMP_F_FR2) != LJ_FR2*BCDUMP_F_FR2) return 0; + if ((flags & BCDUMP_F_FFI)) { +#if LJ_HASFFI + lua_State *L = ls->L; + if (!ctype_ctsG(G(L))) { + ptrdiff_t oldtop = savestack(L, L->top); + luaopen_ffi(L); /* Load FFI library on-demand. */ + L->top = restorestack(L, oldtop); + } +#else + return 0; +#endif + } + if ((flags & BCDUMP_F_STRIP)) { + ls->chunkname = lj_str_newz(ls->L, ls->chunkarg); + } else { + MSize len = bcread_uleb128(ls); + bcread_need(ls, len); + ls->chunkname = lj_str_new(ls->L, (const char *)bcread_mem(ls, len), len); + } + return 1; /* Ok. */ +} + +/* Read a bytecode dump. */ +GCproto *lj_bcread(LexState *ls) +{ + lua_State *L = ls->L; + lua_assert(ls->c == BCDUMP_HEAD1); + bcread_savetop(L, ls, L->top); + lj_buf_reset(&ls->sb); + /* Check for a valid bytecode dump header. */ + if (!bcread_header(ls)) + bcread_error(ls, LJ_ERR_BCFMT); + for (;;) { /* Process all prototypes in the bytecode dump. */ + GCproto *pt; + MSize len; + const char *startp; + /* Read length. */ + if (ls->p < ls->pe && ls->p[0] == 0) { /* Shortcut EOF. */ + ls->p++; + break; + } + bcread_want(ls, 5); + len = bcread_uleb128(ls); + if (!len) break; /* EOF */ + bcread_need(ls, len); + startp = ls->p; + pt = lj_bcread_proto(ls); + if (ls->p != startp + len) + bcread_error(ls, LJ_ERR_BCBAD); + setprotoV(L, L->top, pt); + incr_top(L); + } + if ((int32_t)(2*(uint32_t)(ls->pe - ls->p)) > 0 || + L->top-1 != bcread_oldtop(L, ls)) + bcread_error(ls, LJ_ERR_BCBAD); + /* Pop off last prototype. */ + L->top--; + return protoV(L->top); +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_bcwrite.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_bcwrite.c new file mode 100644 index 00000000000..b2c09738387 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_bcwrite.c @@ -0,0 +1,361 @@ +/* +** Bytecode writer. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_bcwrite_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_buf.h" +#include "lj_bc.h" +#if LJ_HASFFI +#include "lj_ctype.h" +#endif +#if LJ_HASJIT +#include "lj_dispatch.h" +#include "lj_jit.h" +#endif +#include "lj_strfmt.h" +#include "lj_bcdump.h" +#include "lj_vm.h" + +/* Context for bytecode writer. */ +typedef struct BCWriteCtx { + SBuf sb; /* Output buffer. */ + GCproto *pt; /* Root prototype. */ + lua_Writer wfunc; /* Writer callback. */ + void *wdata; /* Writer callback data. */ + int strip; /* Strip debug info. */ + int status; /* Status from writer callback. */ +} BCWriteCtx; + +/* -- Bytecode writer ----------------------------------------------------- */ + +/* Write a single constant key/value of a template table. */ +static void bcwrite_ktabk(BCWriteCtx *ctx, cTValue *o, int narrow) +{ + char *p = lj_buf_more(&ctx->sb, 1+10); + if (tvisstr(o)) { + const GCstr *str = strV(o); + MSize len = str->len; + p = lj_buf_more(&ctx->sb, 5+len); + p = lj_strfmt_wuleb128(p, BCDUMP_KTAB_STR+len); + p = lj_buf_wmem(p, strdata(str), len); + } else if (tvisint(o)) { + *p++ = BCDUMP_KTAB_INT; + p = lj_strfmt_wuleb128(p, intV(o)); + } else if (tvisnum(o)) { + if (!LJ_DUALNUM && narrow) { /* Narrow number constants to integers. */ + lua_Number num = numV(o); + int32_t k = lj_num2int(num); + if (num == (lua_Number)k) { /* -0 is never a constant. */ + *p++ = BCDUMP_KTAB_INT; + p = lj_strfmt_wuleb128(p, k); + setsbufP(&ctx->sb, p); + return; + } + } + *p++ = BCDUMP_KTAB_NUM; + p = lj_strfmt_wuleb128(p, o->u32.lo); + p = lj_strfmt_wuleb128(p, o->u32.hi); + } else { + lua_assert(tvispri(o)); + *p++ = BCDUMP_KTAB_NIL+~itype(o); + } + setsbufP(&ctx->sb, p); +} + +/* Write a template table. */ +static void bcwrite_ktab(BCWriteCtx *ctx, char *p, const GCtab *t) +{ + MSize narray = 0, nhash = 0; + if (t->asize > 0) { /* Determine max. length of array part. */ + ptrdiff_t i; + TValue *array = tvref(t->array); + for (i = (ptrdiff_t)t->asize-1; i >= 0; i--) + if (!tvisnil(&array[i])) + break; + narray = (MSize)(i+1); + } + if (t->hmask > 0) { /* Count number of used hash slots. */ + MSize i, hmask = t->hmask; + Node *node = noderef(t->node); + for (i = 0; i <= hmask; i++) + nhash += !tvisnil(&node[i].val); + } + /* Write number of array slots and hash slots. */ + p = lj_strfmt_wuleb128(p, narray); + p = lj_strfmt_wuleb128(p, nhash); + setsbufP(&ctx->sb, p); + if (narray) { /* Write array entries (may contain nil). */ + MSize i; + TValue *o = tvref(t->array); + for (i = 0; i < narray; i++, o++) + bcwrite_ktabk(ctx, o, 1); + } + if (nhash) { /* Write hash entries. */ + MSize i = nhash; + Node *node = noderef(t->node) + t->hmask; + for (;; node--) + if (!tvisnil(&node->val)) { + bcwrite_ktabk(ctx, &node->key, 0); + bcwrite_ktabk(ctx, &node->val, 1); + if (--i == 0) break; + } + } +} + +/* Write GC constants of a prototype. */ +static void bcwrite_kgc(BCWriteCtx *ctx, GCproto *pt) +{ + MSize i, sizekgc = pt->sizekgc; + GCRef *kr = mref(pt->k, GCRef) - (ptrdiff_t)sizekgc; + for (i = 0; i < sizekgc; i++, kr++) { + GCobj *o = gcref(*kr); + MSize tp, need = 1; + char *p; + /* Determine constant type and needed size. */ + if (o->gch.gct == ~LJ_TSTR) { + tp = BCDUMP_KGC_STR + gco2str(o)->len; + need = 5+gco2str(o)->len; + } else if (o->gch.gct == ~LJ_TPROTO) { + lua_assert((pt->flags & PROTO_CHILD)); + tp = BCDUMP_KGC_CHILD; +#if LJ_HASFFI + } else if (o->gch.gct == ~LJ_TCDATA) { + CTypeID id = gco2cd(o)->ctypeid; + need = 1+4*5; + if (id == CTID_INT64) { + tp = BCDUMP_KGC_I64; + } else if (id == CTID_UINT64) { + tp = BCDUMP_KGC_U64; + } else { + lua_assert(id == CTID_COMPLEX_DOUBLE); + tp = BCDUMP_KGC_COMPLEX; + } +#endif + } else { + lua_assert(o->gch.gct == ~LJ_TTAB); + tp = BCDUMP_KGC_TAB; + need = 1+2*5; + } + /* Write constant type. */ + p = lj_buf_more(&ctx->sb, need); + p = lj_strfmt_wuleb128(p, tp); + /* Write constant data (if any). */ + if (tp >= BCDUMP_KGC_STR) { + p = lj_buf_wmem(p, strdata(gco2str(o)), gco2str(o)->len); + } else if (tp == BCDUMP_KGC_TAB) { + bcwrite_ktab(ctx, p, gco2tab(o)); + continue; +#if LJ_HASFFI + } else if (tp != BCDUMP_KGC_CHILD) { + cTValue *q = (TValue *)cdataptr(gco2cd(o)); + p = lj_strfmt_wuleb128(p, q[0].u32.lo); + p = lj_strfmt_wuleb128(p, q[0].u32.hi); + if (tp == BCDUMP_KGC_COMPLEX) { + p = lj_strfmt_wuleb128(p, q[1].u32.lo); + p = lj_strfmt_wuleb128(p, q[1].u32.hi); + } +#endif + } + setsbufP(&ctx->sb, p); + } +} + +/* Write number constants of a prototype. */ +static void bcwrite_knum(BCWriteCtx *ctx, GCproto *pt) +{ + MSize i, sizekn = pt->sizekn; + cTValue *o = mref(pt->k, TValue); + char *p = lj_buf_more(&ctx->sb, 10*sizekn); + for (i = 0; i < sizekn; i++, o++) { + int32_t k; + if (tvisint(o)) { + k = intV(o); + goto save_int; + } else { + /* Write a 33 bit ULEB128 for the int (lsb=0) or loword (lsb=1). */ + if (!LJ_DUALNUM) { /* Narrow number constants to integers. */ + lua_Number num = numV(o); + k = lj_num2int(num); + if (num == (lua_Number)k) { /* -0 is never a constant. */ + save_int: + p = lj_strfmt_wuleb128(p, 2*(uint32_t)k | ((uint32_t)k&0x80000000u)); + if (k < 0) + p[-1] = (p[-1] & 7) | ((k>>27) & 0x18); + continue; + } + } + p = lj_strfmt_wuleb128(p, 1+(2*o->u32.lo | (o->u32.lo & 0x80000000u))); + if (o->u32.lo >= 0x80000000u) + p[-1] = (p[-1] & 7) | ((o->u32.lo>>27) & 0x18); + p = lj_strfmt_wuleb128(p, o->u32.hi); + } + } + setsbufP(&ctx->sb, p); +} + +/* Write bytecode instructions. */ +static char *bcwrite_bytecode(BCWriteCtx *ctx, char *p, GCproto *pt) +{ + MSize nbc = pt->sizebc-1; /* Omit the [JI]FUNC* header. */ +#if LJ_HASJIT + uint8_t *q = (uint8_t *)p; +#endif + p = lj_buf_wmem(p, proto_bc(pt)+1, nbc*(MSize)sizeof(BCIns)); + UNUSED(ctx); +#if LJ_HASJIT + /* Unpatch modified bytecode containing ILOOP/JLOOP etc. */ + if ((pt->flags & PROTO_ILOOP) || pt->trace) { + jit_State *J = L2J(sbufL(&ctx->sb)); + MSize i; + for (i = 0; i < nbc; i++, q += sizeof(BCIns)) { + BCOp op = (BCOp)q[LJ_ENDIAN_SELECT(0, 3)]; + if (op == BC_IFORL || op == BC_IITERL || op == BC_ILOOP || + op == BC_JFORI) { + q[LJ_ENDIAN_SELECT(0, 3)] = (uint8_t)(op-BC_IFORL+BC_FORL); + } else if (op == BC_JFORL || op == BC_JITERL || op == BC_JLOOP) { + BCReg rd = q[LJ_ENDIAN_SELECT(2, 1)] + (q[LJ_ENDIAN_SELECT(3, 0)] << 8); + BCIns ins = traceref(J, rd)->startins; + q[LJ_ENDIAN_SELECT(0, 3)] = (uint8_t)(op-BC_JFORL+BC_FORL); + q[LJ_ENDIAN_SELECT(2, 1)] = bc_c(ins); + q[LJ_ENDIAN_SELECT(3, 0)] = bc_b(ins); + } + } + } +#endif + return p; +} + +/* Write prototype. */ +static void bcwrite_proto(BCWriteCtx *ctx, GCproto *pt) +{ + MSize sizedbg = 0; + char *p; + + /* Recursively write children of prototype. */ + if ((pt->flags & PROTO_CHILD)) { + ptrdiff_t i, n = pt->sizekgc; + GCRef *kr = mref(pt->k, GCRef) - 1; + for (i = 0; i < n; i++, kr--) { + GCobj *o = gcref(*kr); + if (o->gch.gct == ~LJ_TPROTO) + bcwrite_proto(ctx, gco2pt(o)); + } + } + + /* Start writing the prototype info to a buffer. */ + p = lj_buf_need(&ctx->sb, + 5+4+6*5+(pt->sizebc-1)*(MSize)sizeof(BCIns)+pt->sizeuv*2); + p += 5; /* Leave room for final size. */ + + /* Write prototype header. */ + *p++ = (pt->flags & (PROTO_CHILD|PROTO_VARARG|PROTO_FFI)); + *p++ = pt->numparams; + *p++ = pt->framesize; + *p++ = pt->sizeuv; + p = lj_strfmt_wuleb128(p, pt->sizekgc); + p = lj_strfmt_wuleb128(p, pt->sizekn); + p = lj_strfmt_wuleb128(p, pt->sizebc-1); + if (!ctx->strip) { + if (proto_lineinfo(pt)) + sizedbg = pt->sizept - (MSize)((char *)proto_lineinfo(pt) - (char *)pt); + p = lj_strfmt_wuleb128(p, sizedbg); + if (sizedbg) { + p = lj_strfmt_wuleb128(p, pt->firstline); + p = lj_strfmt_wuleb128(p, pt->numline); + } + } + + /* Write bytecode instructions and upvalue refs. */ + p = bcwrite_bytecode(ctx, p, pt); + p = lj_buf_wmem(p, proto_uv(pt), pt->sizeuv*2); + setsbufP(&ctx->sb, p); + + /* Write constants. */ + bcwrite_kgc(ctx, pt); + bcwrite_knum(ctx, pt); + + /* Write debug info, if not stripped. */ + if (sizedbg) { + p = lj_buf_more(&ctx->sb, sizedbg); + p = lj_buf_wmem(p, proto_lineinfo(pt), sizedbg); + setsbufP(&ctx->sb, p); + } + + /* Pass buffer to writer function. */ + if (ctx->status == 0) { + MSize n = sbuflen(&ctx->sb) - 5; + MSize nn = (lj_fls(n)+8)*9 >> 6; + char *q = sbufB(&ctx->sb) + (5 - nn); + p = lj_strfmt_wuleb128(q, n); /* Fill in final size. */ + lua_assert(p == sbufB(&ctx->sb) + 5); + ctx->status = ctx->wfunc(sbufL(&ctx->sb), q, nn+n, ctx->wdata); + } +} + +/* Write header of bytecode dump. */ +static void bcwrite_header(BCWriteCtx *ctx) +{ + GCstr *chunkname = proto_chunkname(ctx->pt); + const char *name = strdata(chunkname); + MSize len = chunkname->len; + char *p = lj_buf_need(&ctx->sb, 5+5+len); + *p++ = BCDUMP_HEAD1; + *p++ = BCDUMP_HEAD2; + *p++ = BCDUMP_HEAD3; + *p++ = BCDUMP_VERSION; + *p++ = (ctx->strip ? BCDUMP_F_STRIP : 0) + + LJ_BE*BCDUMP_F_BE + + ((ctx->pt->flags & PROTO_FFI) ? BCDUMP_F_FFI : 0) + + LJ_FR2*BCDUMP_F_FR2; + if (!ctx->strip) { + p = lj_strfmt_wuleb128(p, len); + p = lj_buf_wmem(p, name, len); + } + ctx->status = ctx->wfunc(sbufL(&ctx->sb), sbufB(&ctx->sb), + (MSize)(p - sbufB(&ctx->sb)), ctx->wdata); +} + +/* Write footer of bytecode dump. */ +static void bcwrite_footer(BCWriteCtx *ctx) +{ + if (ctx->status == 0) { + uint8_t zero = 0; + ctx->status = ctx->wfunc(sbufL(&ctx->sb), &zero, 1, ctx->wdata); + } +} + +/* Protected callback for bytecode writer. */ +static TValue *cpwriter(lua_State *L, lua_CFunction dummy, void *ud) +{ + BCWriteCtx *ctx = (BCWriteCtx *)ud; + UNUSED(L); UNUSED(dummy); + lj_buf_need(&ctx->sb, 1024); /* Avoids resize for most prototypes. */ + bcwrite_header(ctx); + bcwrite_proto(ctx, ctx->pt); + bcwrite_footer(ctx); + return NULL; +} + +/* Write bytecode for a prototype. */ +int lj_bcwrite(lua_State *L, GCproto *pt, lua_Writer writer, void *data, + int strip) +{ + BCWriteCtx ctx; + int status; + ctx.pt = pt; + ctx.wfunc = writer; + ctx.wdata = data; + ctx.strip = strip; + ctx.status = 0; + lj_buf_init(L, &ctx.sb); + status = lj_vm_cpcall(L, NULL, &ctx, cpwriter); + if (status == 0) status = ctx.status; + lj_buf_free(G(sbufL(&ctx.sb)), &ctx.sb); + return status; +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_buf.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_buf.c new file mode 100644 index 00000000000..023bb9aac8f --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_buf.c @@ -0,0 +1,234 @@ +/* +** Buffer handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_buf_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_buf.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_strfmt.h" + +/* -- Buffer management --------------------------------------------------- */ + +static void buf_grow(SBuf *sb, MSize sz) +{ + MSize osz = sbufsz(sb), len = sbuflen(sb), nsz = osz; + char *b; + if (nsz < LJ_MIN_SBUF) nsz = LJ_MIN_SBUF; + while (nsz < sz) nsz += nsz; + b = (char *)lj_mem_realloc(sbufL(sb), sbufB(sb), osz, nsz); + setmref(sb->b, b); + setmref(sb->p, b + len); + setmref(sb->e, b + nsz); +} + +LJ_NOINLINE char *LJ_FASTCALL lj_buf_need2(SBuf *sb, MSize sz) +{ + lua_assert(sz > sbufsz(sb)); + if (LJ_UNLIKELY(sz > LJ_MAX_BUF)) + lj_err_mem(sbufL(sb)); + buf_grow(sb, sz); + return sbufB(sb); +} + +LJ_NOINLINE char *LJ_FASTCALL lj_buf_more2(SBuf *sb, MSize sz) +{ + MSize len = sbuflen(sb); + lua_assert(sz > sbufleft(sb)); + if (LJ_UNLIKELY(sz > LJ_MAX_BUF || len + sz > LJ_MAX_BUF)) + lj_err_mem(sbufL(sb)); + buf_grow(sb, len + sz); + return sbufP(sb); +} + +void LJ_FASTCALL lj_buf_shrink(lua_State *L, SBuf *sb) +{ + char *b = sbufB(sb); + MSize osz = (MSize)(sbufE(sb) - b); + if (osz > 2*LJ_MIN_SBUF) { + MSize n = (MSize)(sbufP(sb) - b); + b = lj_mem_realloc(L, b, osz, (osz >> 1)); + setmref(sb->b, b); + setmref(sb->p, b + n); + setmref(sb->e, b + (osz >> 1)); + } +} + +char * LJ_FASTCALL lj_buf_tmp(lua_State *L, MSize sz) +{ + SBuf *sb = &G(L)->tmpbuf; + setsbufL(sb, L); + return lj_buf_need(sb, sz); +} + +/* -- Low-level buffer put operations ------------------------------------- */ + +SBuf *lj_buf_putmem(SBuf *sb, const void *q, MSize len) +{ + char *p = lj_buf_more(sb, len); + p = lj_buf_wmem(p, q, len); + setsbufP(sb, p); + return sb; +} + +#if LJ_HASJIT +SBuf * LJ_FASTCALL lj_buf_putchar(SBuf *sb, int c) +{ + char *p = lj_buf_more(sb, 1); + *p++ = (char)c; + setsbufP(sb, p); + return sb; +} +#endif + +SBuf * LJ_FASTCALL lj_buf_putstr(SBuf *sb, GCstr *s) +{ + MSize len = s->len; + char *p = lj_buf_more(sb, len); + p = lj_buf_wmem(p, strdata(s), len); + setsbufP(sb, p); + return sb; +} + +/* -- High-level buffer put operations ------------------------------------ */ + +SBuf * LJ_FASTCALL lj_buf_putstr_reverse(SBuf *sb, GCstr *s) +{ + MSize len = s->len; + char *p = lj_buf_more(sb, len), *e = p+len; + const char *q = strdata(s)+len-1; + while (p < e) + *p++ = *q--; + setsbufP(sb, p); + return sb; +} + +SBuf * LJ_FASTCALL lj_buf_putstr_lower(SBuf *sb, GCstr *s) +{ + MSize len = s->len; + char *p = lj_buf_more(sb, len), *e = p+len; + const char *q = strdata(s); + for (; p < e; p++, q++) { + uint32_t c = *(unsigned char *)q; +#if LJ_TARGET_PPC + *p = c + ((c >= 'A' && c <= 'Z') << 5); +#else + if (c >= 'A' && c <= 'Z') c += 0x20; + *p = c; +#endif + } + setsbufP(sb, p); + return sb; +} + +SBuf * LJ_FASTCALL lj_buf_putstr_upper(SBuf *sb, GCstr *s) +{ + MSize len = s->len; + char *p = lj_buf_more(sb, len), *e = p+len; + const char *q = strdata(s); + for (; p < e; p++, q++) { + uint32_t c = *(unsigned char *)q; +#if LJ_TARGET_PPC + *p = c - ((c >= 'a' && c <= 'z') << 5); +#else + if (c >= 'a' && c <= 'z') c -= 0x20; + *p = c; +#endif + } + setsbufP(sb, p); + return sb; +} + +SBuf *lj_buf_putstr_rep(SBuf *sb, GCstr *s, int32_t rep) +{ + MSize len = s->len; + if (rep > 0 && len) { + uint64_t tlen = (uint64_t)rep * len; + char *p; + if (LJ_UNLIKELY(tlen > LJ_MAX_STR)) + lj_err_mem(sbufL(sb)); + p = lj_buf_more(sb, (MSize)tlen); + if (len == 1) { /* Optimize a common case. */ + uint32_t c = strdata(s)[0]; + do { *p++ = c; } while (--rep > 0); + } else { + const char *e = strdata(s) + len; + do { + const char *q = strdata(s); + do { *p++ = *q++; } while (q < e); + } while (--rep > 0); + } + setsbufP(sb, p); + } + return sb; +} + +SBuf *lj_buf_puttab(SBuf *sb, GCtab *t, GCstr *sep, int32_t i, int32_t e) +{ + MSize seplen = sep ? sep->len : 0; + if (i <= e) { + for (;;) { + cTValue *o = lj_tab_getint(t, i); + char *p; + if (!o) { + badtype: /* Error: bad element type. */ + setsbufP(sb, (void *)(intptr_t)i); /* Store failing index. */ + return NULL; + } else if (tvisstr(o)) { + MSize len = strV(o)->len; + p = lj_buf_wmem(lj_buf_more(sb, len + seplen), strVdata(o), len); + } else if (tvisint(o)) { + p = lj_strfmt_wint(lj_buf_more(sb, STRFMT_MAXBUF_INT+seplen), intV(o)); + } else if (tvisnum(o)) { + p = lj_strfmt_wnum(lj_buf_more(sb, STRFMT_MAXBUF_NUM+seplen), o); + } else { + goto badtype; + } + if (i++ == e) { + setsbufP(sb, p); + break; + } + if (seplen) p = lj_buf_wmem(p, strdata(sep), seplen); + setsbufP(sb, p); + } + } + return sb; +} + +/* -- Miscellaneous buffer operations ------------------------------------- */ + +GCstr * LJ_FASTCALL lj_buf_tostr(SBuf *sb) +{ + return lj_str_new(sbufL(sb), sbufB(sb), sbuflen(sb)); +} + +/* Concatenate two strings. */ +GCstr *lj_buf_cat2str(lua_State *L, GCstr *s1, GCstr *s2) +{ + MSize len1 = s1->len, len2 = s2->len; + char *buf = lj_buf_tmp(L, len1 + len2); + memcpy(buf, strdata(s1), len1); + memcpy(buf+len1, strdata(s2), len2); + return lj_str_new(L, buf, len1 + len2); +} + +/* Read ULEB128 from buffer. */ +uint32_t LJ_FASTCALL lj_buf_ruleb128(const char **pp) +{ + const uint8_t *p = (const uint8_t *)*pp; + uint32_t v = *p++; + if (LJ_UNLIKELY(v >= 0x80)) { + int sh = 0; + v &= 0x7f; + do { v |= ((*p & 0x7f) << (sh += 7)); } while (*p++ >= 0x80); + } + *pp = (const char *)p; + return v; +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_buf.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_buf.h new file mode 100644 index 00000000000..1cf1780b020 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_buf.h @@ -0,0 +1,105 @@ +/* +** Buffer handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_BUF_H +#define _LJ_BUF_H + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_str.h" + +/* Resizable string buffers. Struct definition in lj_obj.h. */ +#define sbufB(sb) (mref((sb)->b, char)) +#define sbufP(sb) (mref((sb)->p, char)) +#define sbufE(sb) (mref((sb)->e, char)) +#define sbufL(sb) (mref((sb)->L, lua_State)) +#define sbufsz(sb) ((MSize)(sbufE((sb)) - sbufB((sb)))) +#define sbuflen(sb) ((MSize)(sbufP((sb)) - sbufB((sb)))) +#define sbufleft(sb) ((MSize)(sbufE((sb)) - sbufP((sb)))) +#define setsbufP(sb, q) (setmref((sb)->p, (q))) +#define setsbufL(sb, l) (setmref((sb)->L, (l))) + +/* Buffer management */ +LJ_FUNC char *LJ_FASTCALL lj_buf_need2(SBuf *sb, MSize sz); +LJ_FUNC char *LJ_FASTCALL lj_buf_more2(SBuf *sb, MSize sz); +LJ_FUNC void LJ_FASTCALL lj_buf_shrink(lua_State *L, SBuf *sb); +LJ_FUNC char * LJ_FASTCALL lj_buf_tmp(lua_State *L, MSize sz); + +static LJ_AINLINE void lj_buf_init(lua_State *L, SBuf *sb) +{ + setsbufL(sb, L); + setmref(sb->p, NULL); setmref(sb->e, NULL); setmref(sb->b, NULL); +} + +static LJ_AINLINE void lj_buf_reset(SBuf *sb) +{ + setmrefr(sb->p, sb->b); +} + +static LJ_AINLINE SBuf *lj_buf_tmp_(lua_State *L) +{ + SBuf *sb = &G(L)->tmpbuf; + setsbufL(sb, L); + lj_buf_reset(sb); + return sb; +} + +static LJ_AINLINE void lj_buf_free(global_State *g, SBuf *sb) +{ + lj_mem_free(g, sbufB(sb), sbufsz(sb)); +} + +static LJ_AINLINE char *lj_buf_need(SBuf *sb, MSize sz) +{ + if (LJ_UNLIKELY(sz > sbufsz(sb))) + return lj_buf_need2(sb, sz); + return sbufB(sb); +} + +static LJ_AINLINE char *lj_buf_more(SBuf *sb, MSize sz) +{ + if (LJ_UNLIKELY(sz > sbufleft(sb))) + return lj_buf_more2(sb, sz); + return sbufP(sb); +} + +/* Low-level buffer put operations */ +LJ_FUNC SBuf *lj_buf_putmem(SBuf *sb, const void *q, MSize len); +#if LJ_HASJIT +LJ_FUNC SBuf * LJ_FASTCALL lj_buf_putchar(SBuf *sb, int c); +#endif +LJ_FUNC SBuf * LJ_FASTCALL lj_buf_putstr(SBuf *sb, GCstr *s); + +static LJ_AINLINE char *lj_buf_wmem(char *p, const void *q, MSize len) +{ + return (char *)memcpy(p, q, len) + len; +} + +static LJ_AINLINE void lj_buf_putb(SBuf *sb, int c) +{ + char *p = lj_buf_more(sb, 1); + *p++ = (char)c; + setsbufP(sb, p); +} + +/* High-level buffer put operations */ +LJ_FUNCA SBuf * LJ_FASTCALL lj_buf_putstr_reverse(SBuf *sb, GCstr *s); +LJ_FUNCA SBuf * LJ_FASTCALL lj_buf_putstr_lower(SBuf *sb, GCstr *s); +LJ_FUNCA SBuf * LJ_FASTCALL lj_buf_putstr_upper(SBuf *sb, GCstr *s); +LJ_FUNC SBuf *lj_buf_putstr_rep(SBuf *sb, GCstr *s, int32_t rep); +LJ_FUNC SBuf *lj_buf_puttab(SBuf *sb, GCtab *t, GCstr *sep, + int32_t i, int32_t e); + +/* Miscellaneous buffer operations */ +LJ_FUNCA GCstr * LJ_FASTCALL lj_buf_tostr(SBuf *sb); +LJ_FUNC GCstr *lj_buf_cat2str(lua_State *L, GCstr *s1, GCstr *s2); +LJ_FUNC uint32_t LJ_FASTCALL lj_buf_ruleb128(const char **pp); + +static LJ_AINLINE GCstr *lj_buf_str(lua_State *L, SBuf *sb) +{ + return lj_str_new(L, sbufB(sb), sbuflen(sb)); +} + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_carith.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_carith.c new file mode 100644 index 00000000000..9032ea32410 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_carith.c @@ -0,0 +1,429 @@ +/* +** C data arithmetic. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include "lj_obj.h" + +#if LJ_HASFFI + +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_tab.h" +#include "lj_meta.h" +#include "lj_ir.h" +#include "lj_ctype.h" +#include "lj_cconv.h" +#include "lj_cdata.h" +#include "lj_carith.h" +#include "lj_strscan.h" + +/* -- C data arithmetic --------------------------------------------------- */ + +/* Binary operands of an operator converted to ctypes. */ +typedef struct CDArith { + uint8_t *p[2]; + CType *ct[2]; +} CDArith; + +/* Check arguments for arithmetic metamethods. */ +static int carith_checkarg(lua_State *L, CTState *cts, CDArith *ca) +{ + TValue *o = L->base; + int ok = 1; + MSize i; + if (o+1 >= L->top) + lj_err_argt(L, 1, LUA_TCDATA); + for (i = 0; i < 2; i++, o++) { + if (tviscdata(o)) { + GCcdata *cd = cdataV(o); + CTypeID id = (CTypeID)cd->ctypeid; + CType *ct = ctype_raw(cts, id); + uint8_t *p = (uint8_t *)cdataptr(cd); + if (ctype_isptr(ct->info)) { + p = (uint8_t *)cdata_getptr(p, ct->size); + if (ctype_isref(ct->info)) ct = ctype_rawchild(cts, ct); + } else if (ctype_isfunc(ct->info)) { + p = (uint8_t *)*(void **)p; + ct = ctype_get(cts, + lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|id), CTSIZE_PTR)); + } + if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct); + ca->ct[i] = ct; + ca->p[i] = p; + } else if (tvisint(o)) { + ca->ct[i] = ctype_get(cts, CTID_INT32); + ca->p[i] = (uint8_t *)&o->i; + } else if (tvisnum(o)) { + ca->ct[i] = ctype_get(cts, CTID_DOUBLE); + ca->p[i] = (uint8_t *)&o->n; + } else if (tvisnil(o)) { + ca->ct[i] = ctype_get(cts, CTID_P_VOID); + ca->p[i] = (uint8_t *)0; + } else if (tvisstr(o)) { + TValue *o2 = i == 0 ? o+1 : o-1; + CType *ct = ctype_raw(cts, cdataV(o2)->ctypeid); + ca->ct[i] = NULL; + ca->p[i] = (uint8_t *)strVdata(o); + ok = 0; + if (ctype_isenum(ct->info)) { + CTSize ofs; + CType *cct = lj_ctype_getfield(cts, ct, strV(o), &ofs); + if (cct && ctype_isconstval(cct->info)) { + ca->ct[i] = ctype_child(cts, cct); + ca->p[i] = (uint8_t *)&cct->size; /* Assumes ct does not grow. */ + ok = 1; + } else { + ca->ct[1-i] = ct; /* Use enum to improve error message. */ + ca->p[1-i] = NULL; + break; + } + } + } else { + ca->ct[i] = NULL; + ca->p[i] = (void *)(intptr_t)1; /* To make it unequal. */ + ok = 0; + } + } + return ok; +} + +/* Pointer arithmetic. */ +static int carith_ptr(lua_State *L, CTState *cts, CDArith *ca, MMS mm) +{ + CType *ctp = ca->ct[0]; + uint8_t *pp = ca->p[0]; + ptrdiff_t idx; + CTSize sz; + CTypeID id; + GCcdata *cd; + if (ctype_isptr(ctp->info) || ctype_isrefarray(ctp->info)) { + if ((mm == MM_sub || mm == MM_eq || mm == MM_lt || mm == MM_le) && + (ctype_isptr(ca->ct[1]->info) || ctype_isrefarray(ca->ct[1]->info))) { + uint8_t *pp2 = ca->p[1]; + if (mm == MM_eq) { /* Pointer equality. Incompatible pointers are ok. */ + setboolV(L->top-1, (pp == pp2)); + return 1; + } + if (!lj_cconv_compatptr(cts, ctp, ca->ct[1], CCF_IGNQUAL)) + return 0; + if (mm == MM_sub) { /* Pointer difference. */ + intptr_t diff; + sz = lj_ctype_size(cts, ctype_cid(ctp->info)); /* Element size. */ + if (sz == 0 || sz == CTSIZE_INVALID) + return 0; + diff = ((intptr_t)pp - (intptr_t)pp2) / (int32_t)sz; + /* All valid pointer differences on x64 are in (-2^47, +2^47), + ** which fits into a double without loss of precision. + */ + setintptrV(L->top-1, (int32_t)diff); + return 1; + } else if (mm == MM_lt) { /* Pointer comparison (unsigned). */ + setboolV(L->top-1, ((uintptr_t)pp < (uintptr_t)pp2)); + return 1; + } else { + lua_assert(mm == MM_le); + setboolV(L->top-1, ((uintptr_t)pp <= (uintptr_t)pp2)); + return 1; + } + } + if (!((mm == MM_add || mm == MM_sub) && ctype_isnum(ca->ct[1]->info))) + return 0; + lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT_PSZ), ca->ct[1], + (uint8_t *)&idx, ca->p[1], 0); + if (mm == MM_sub) idx = -idx; + } else if (mm == MM_add && ctype_isnum(ctp->info) && + (ctype_isptr(ca->ct[1]->info) || ctype_isrefarray(ca->ct[1]->info))) { + /* Swap pointer and index. */ + ctp = ca->ct[1]; pp = ca->p[1]; + lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT_PSZ), ca->ct[0], + (uint8_t *)&idx, ca->p[0], 0); + } else { + return 0; + } + sz = lj_ctype_size(cts, ctype_cid(ctp->info)); /* Element size. */ + if (sz == CTSIZE_INVALID) + return 0; + pp += idx*(int32_t)sz; /* Compute pointer + index. */ + id = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(ctp->info)), + CTSIZE_PTR); + cd = lj_cdata_new(cts, id, CTSIZE_PTR); + *(uint8_t **)cdataptr(cd) = pp; + setcdataV(L, L->top-1, cd); + lj_gc_check(L); + return 1; +} + +/* 64 bit integer arithmetic. */ +static int carith_int64(lua_State *L, CTState *cts, CDArith *ca, MMS mm) +{ + if (ctype_isnum(ca->ct[0]->info) && ca->ct[0]->size <= 8 && + ctype_isnum(ca->ct[1]->info) && ca->ct[1]->size <= 8) { + CTypeID id = (((ca->ct[0]->info & CTF_UNSIGNED) && ca->ct[0]->size == 8) || + ((ca->ct[1]->info & CTF_UNSIGNED) && ca->ct[1]->size == 8)) ? + CTID_UINT64 : CTID_INT64; + CType *ct = ctype_get(cts, id); + GCcdata *cd; + uint64_t u0, u1, *up; + lj_cconv_ct_ct(cts, ct, ca->ct[0], (uint8_t *)&u0, ca->p[0], 0); + if (mm != MM_unm) + lj_cconv_ct_ct(cts, ct, ca->ct[1], (uint8_t *)&u1, ca->p[1], 0); + switch (mm) { + case MM_eq: + setboolV(L->top-1, (u0 == u1)); + return 1; + case MM_lt: + setboolV(L->top-1, + id == CTID_INT64 ? ((int64_t)u0 < (int64_t)u1) : (u0 < u1)); + return 1; + case MM_le: + setboolV(L->top-1, + id == CTID_INT64 ? ((int64_t)u0 <= (int64_t)u1) : (u0 <= u1)); + return 1; + default: break; + } + cd = lj_cdata_new(cts, id, 8); + up = (uint64_t *)cdataptr(cd); + setcdataV(L, L->top-1, cd); + switch (mm) { + case MM_add: *up = u0 + u1; break; + case MM_sub: *up = u0 - u1; break; + case MM_mul: *up = u0 * u1; break; + case MM_div: + if (id == CTID_INT64) + *up = (uint64_t)lj_carith_divi64((int64_t)u0, (int64_t)u1); + else + *up = lj_carith_divu64(u0, u1); + break; + case MM_mod: + if (id == CTID_INT64) + *up = (uint64_t)lj_carith_modi64((int64_t)u0, (int64_t)u1); + else + *up = lj_carith_modu64(u0, u1); + break; + case MM_pow: + if (id == CTID_INT64) + *up = (uint64_t)lj_carith_powi64((int64_t)u0, (int64_t)u1); + else + *up = lj_carith_powu64(u0, u1); + break; + case MM_unm: *up = (uint64_t)-(int64_t)u0; break; + default: lua_assert(0); break; + } + lj_gc_check(L); + return 1; + } + return 0; +} + +/* Handle ctype arithmetic metamethods. */ +static int lj_carith_meta(lua_State *L, CTState *cts, CDArith *ca, MMS mm) +{ + cTValue *tv = NULL; + if (tviscdata(L->base)) { + CTypeID id = cdataV(L->base)->ctypeid; + CType *ct = ctype_raw(cts, id); + if (ctype_isptr(ct->info)) id = ctype_cid(ct->info); + tv = lj_ctype_meta(cts, id, mm); + } + if (!tv && L->base+1 < L->top && tviscdata(L->base+1)) { + CTypeID id = cdataV(L->base+1)->ctypeid; + CType *ct = ctype_raw(cts, id); + if (ctype_isptr(ct->info)) id = ctype_cid(ct->info); + tv = lj_ctype_meta(cts, id, mm); + } + if (!tv) { + const char *repr[2]; + int i, isenum = -1, isstr = -1; + if (mm == MM_eq) { /* Equality checks never raise an error. */ + int eq = ca->p[0] == ca->p[1]; + setboolV(L->top-1, eq); + setboolV(&G(L)->tmptv2, eq); /* Remember for trace recorder. */ + return 1; + } + for (i = 0; i < 2; i++) { + if (ca->ct[i] && tviscdata(L->base+i)) { + if (ctype_isenum(ca->ct[i]->info)) isenum = i; + repr[i] = strdata(lj_ctype_repr(L, ctype_typeid(cts, ca->ct[i]), NULL)); + } else { + if (tvisstr(&L->base[i])) isstr = i; + repr[i] = lj_typename(&L->base[i]); + } + } + if ((isenum ^ isstr) == 1) + lj_err_callerv(L, LJ_ERR_FFI_BADCONV, repr[isstr], repr[isenum]); + lj_err_callerv(L, mm == MM_len ? LJ_ERR_FFI_BADLEN : + mm == MM_concat ? LJ_ERR_FFI_BADCONCAT : + mm < MM_add ? LJ_ERR_FFI_BADCOMP : LJ_ERR_FFI_BADARITH, + repr[0], repr[1]); + } + return lj_meta_tailcall(L, tv); +} + +/* Arithmetic operators for cdata. */ +int lj_carith_op(lua_State *L, MMS mm) +{ + CTState *cts = ctype_cts(L); + CDArith ca; + if (carith_checkarg(L, cts, &ca)) { + if (carith_int64(L, cts, &ca, mm) || carith_ptr(L, cts, &ca, mm)) { + copyTV(L, &G(L)->tmptv2, L->top-1); /* Remember for trace recorder. */ + return 1; + } + } + return lj_carith_meta(L, cts, &ca, mm); +} + +/* -- 64 bit bit operations helpers --------------------------------------- */ + +#if LJ_64 +#define B64DEF(name) \ + static LJ_AINLINE uint64_t lj_carith_##name(uint64_t x, int32_t sh) +#else +/* Not inlined on 32 bit archs, since some of these are quite lengthy. */ +#define B64DEF(name) \ + uint64_t LJ_NOINLINE lj_carith_##name(uint64_t x, int32_t sh) +#endif + +B64DEF(shl64) { return x << (sh&63); } +B64DEF(shr64) { return x >> (sh&63); } +B64DEF(sar64) { return (uint64_t)((int64_t)x >> (sh&63)); } +B64DEF(rol64) { return lj_rol(x, (sh&63)); } +B64DEF(ror64) { return lj_ror(x, (sh&63)); } + +#undef B64DEF + +uint64_t lj_carith_shift64(uint64_t x, int32_t sh, int op) +{ + switch (op) { + case IR_BSHL-IR_BSHL: x = lj_carith_shl64(x, sh); break; + case IR_BSHR-IR_BSHL: x = lj_carith_shr64(x, sh); break; + case IR_BSAR-IR_BSHL: x = lj_carith_sar64(x, sh); break; + case IR_BROL-IR_BSHL: x = lj_carith_rol64(x, sh); break; + case IR_BROR-IR_BSHL: x = lj_carith_ror64(x, sh); break; + default: lua_assert(0); break; + } + return x; +} + +/* Equivalent to lj_lib_checkbit(), but handles cdata. */ +uint64_t lj_carith_check64(lua_State *L, int narg, CTypeID *id) +{ + TValue *o = L->base + narg-1; + if (o >= L->top) { + err: + lj_err_argt(L, narg, LUA_TNUMBER); + } else if (LJ_LIKELY(tvisnumber(o))) { + /* Handled below. */ + } else if (tviscdata(o)) { + CTState *cts = ctype_cts(L); + uint8_t *sp = (uint8_t *)cdataptr(cdataV(o)); + CTypeID sid = cdataV(o)->ctypeid; + CType *s = ctype_get(cts, sid); + uint64_t x; + if (ctype_isref(s->info)) { + sp = *(void **)sp; + sid = ctype_cid(s->info); + } + s = ctype_raw(cts, sid); + if (ctype_isenum(s->info)) s = ctype_child(cts, s); + if ((s->info & (CTMASK_NUM|CTF_BOOL|CTF_FP|CTF_UNSIGNED)) == + CTINFO(CT_NUM, CTF_UNSIGNED) && s->size == 8) + *id = CTID_UINT64; /* Use uint64_t, since it has the highest rank. */ + else if (!*id) + *id = CTID_INT64; /* Use int64_t, unless already set. */ + lj_cconv_ct_ct(cts, ctype_get(cts, *id), s, + (uint8_t *)&x, sp, CCF_ARG(narg)); + return x; + } else if (!(tvisstr(o) && lj_strscan_number(strV(o), o))) { + goto err; + } + if (LJ_LIKELY(tvisint(o))) { + return (uint32_t)intV(o); + } else { + int32_t i = lj_num2bit(numV(o)); + if (LJ_DUALNUM) setintV(o, i); + return (uint32_t)i; + } +} + + +/* -- 64 bit integer arithmetic helpers ----------------------------------- */ + +#if LJ_32 && LJ_HASJIT +/* Signed/unsigned 64 bit multiplication. */ +int64_t lj_carith_mul64(int64_t a, int64_t b) +{ + return a * b; +} +#endif + +/* Unsigned 64 bit division. */ +uint64_t lj_carith_divu64(uint64_t a, uint64_t b) +{ + if (b == 0) return U64x(80000000,00000000); + return a / b; +} + +/* Signed 64 bit division. */ +int64_t lj_carith_divi64(int64_t a, int64_t b) +{ + if (b == 0 || (a == (int64_t)U64x(80000000,00000000) && b == -1)) + return U64x(80000000,00000000); + return a / b; +} + +/* Unsigned 64 bit modulo. */ +uint64_t lj_carith_modu64(uint64_t a, uint64_t b) +{ + if (b == 0) return U64x(80000000,00000000); + return a % b; +} + +/* Signed 64 bit modulo. */ +int64_t lj_carith_modi64(int64_t a, int64_t b) +{ + if (b == 0) return U64x(80000000,00000000); + if (a == (int64_t)U64x(80000000,00000000) && b == -1) return 0; + return a % b; +} + +/* Unsigned 64 bit x^k. */ +uint64_t lj_carith_powu64(uint64_t x, uint64_t k) +{ + uint64_t y; + if (k == 0) + return 1; + for (; (k & 1) == 0; k >>= 1) x *= x; + y = x; + if ((k >>= 1) != 0) { + for (;;) { + x *= x; + if (k == 1) break; + if (k & 1) y *= x; + k >>= 1; + } + y *= x; + } + return y; +} + +/* Signed 64 bit x^k. */ +int64_t lj_carith_powi64(int64_t x, int64_t k) +{ + if (k == 0) + return 1; + if (k < 0) { + if (x == 0) + return U64x(7fffffff,ffffffff); + else if (x == 1) + return 1; + else if (x == -1) + return (k & 1) ? -1 : 1; + else + return 0; + } + return (int64_t)lj_carith_powu64((uint64_t)x, (uint64_t)k); +} + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_carith.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_carith.h new file mode 100644 index 00000000000..da8320f3066 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_carith.h @@ -0,0 +1,37 @@ +/* +** C data arithmetic. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_CARITH_H +#define _LJ_CARITH_H + +#include "lj_obj.h" + +#if LJ_HASFFI + +LJ_FUNC int lj_carith_op(lua_State *L, MMS mm); + +#if LJ_32 +LJ_FUNC uint64_t lj_carith_shl64(uint64_t x, int32_t sh); +LJ_FUNC uint64_t lj_carith_shr64(uint64_t x, int32_t sh); +LJ_FUNC uint64_t lj_carith_sar64(uint64_t x, int32_t sh); +LJ_FUNC uint64_t lj_carith_rol64(uint64_t x, int32_t sh); +LJ_FUNC uint64_t lj_carith_ror64(uint64_t x, int32_t sh); +#endif +LJ_FUNC uint64_t lj_carith_shift64(uint64_t x, int32_t sh, int op); +LJ_FUNC uint64_t lj_carith_check64(lua_State *L, int narg, CTypeID *id); + +#if LJ_32 && LJ_HASJIT +LJ_FUNC int64_t lj_carith_mul64(int64_t x, int64_t k); +#endif +LJ_FUNC uint64_t lj_carith_divu64(uint64_t a, uint64_t b); +LJ_FUNC int64_t lj_carith_divi64(int64_t a, int64_t b); +LJ_FUNC uint64_t lj_carith_modu64(uint64_t a, uint64_t b); +LJ_FUNC int64_t lj_carith_modi64(int64_t a, int64_t b); +LJ_FUNC uint64_t lj_carith_powu64(uint64_t x, uint64_t k); +LJ_FUNC int64_t lj_carith_powi64(int64_t x, int64_t k); + +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_ccall.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_ccall.c new file mode 100644 index 00000000000..5ab5b60daab --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_ccall.c @@ -0,0 +1,984 @@ +/* +** FFI C call handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include "lj_obj.h" + +#if LJ_HASFFI + +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_tab.h" +#include "lj_ctype.h" +#include "lj_cconv.h" +#include "lj_cdata.h" +#include "lj_ccall.h" +#include "lj_trace.h" + +/* Target-specific handling of register arguments. */ +#if LJ_TARGET_X86 +/* -- x86 calling conventions --------------------------------------------- */ + +#if LJ_ABI_WIN + +#define CCALL_HANDLE_STRUCTRET \ + /* Return structs bigger than 8 by reference (on stack only). */ \ + cc->retref = (sz > 8); \ + if (cc->retref) cc->stack[nsp++] = (GPRArg)dp; + +#define CCALL_HANDLE_COMPLEXRET CCALL_HANDLE_STRUCTRET + +#else + +#if LJ_TARGET_OSX + +#define CCALL_HANDLE_STRUCTRET \ + /* Return structs of size 1, 2, 4 or 8 in registers. */ \ + cc->retref = !(sz == 1 || sz == 2 || sz == 4 || sz == 8); \ + if (cc->retref) { \ + if (ngpr < maxgpr) \ + cc->gpr[ngpr++] = (GPRArg)dp; \ + else \ + cc->stack[nsp++] = (GPRArg)dp; \ + } else { /* Struct with single FP field ends up in FPR. */ \ + cc->resx87 = ccall_classify_struct(cts, ctr); \ + } + +#define CCALL_HANDLE_STRUCTRET2 \ + if (cc->resx87) sp = (uint8_t *)&cc->fpr[0]; \ + memcpy(dp, sp, ctr->size); + +#else + +#define CCALL_HANDLE_STRUCTRET \ + cc->retref = 1; /* Return all structs by reference (in reg or on stack). */ \ + if (ngpr < maxgpr) \ + cc->gpr[ngpr++] = (GPRArg)dp; \ + else \ + cc->stack[nsp++] = (GPRArg)dp; + +#endif + +#define CCALL_HANDLE_COMPLEXRET \ + /* Return complex float in GPRs and complex double by reference. */ \ + cc->retref = (sz > 8); \ + if (cc->retref) { \ + if (ngpr < maxgpr) \ + cc->gpr[ngpr++] = (GPRArg)dp; \ + else \ + cc->stack[nsp++] = (GPRArg)dp; \ + } + +#endif + +#define CCALL_HANDLE_COMPLEXRET2 \ + if (!cc->retref) \ + *(int64_t *)dp = *(int64_t *)sp; /* Copy complex float from GPRs. */ + +#define CCALL_HANDLE_STRUCTARG \ + ngpr = maxgpr; /* Pass all structs by value on the stack. */ + +#define CCALL_HANDLE_COMPLEXARG \ + isfp = 1; /* Pass complex by value on stack. */ + +#define CCALL_HANDLE_REGARG \ + if (!isfp) { /* Only non-FP values may be passed in registers. */ \ + if (n > 1) { /* Anything > 32 bit is passed on the stack. */ \ + if (!LJ_ABI_WIN) ngpr = maxgpr; /* Prevent reordering. */ \ + } else if (ngpr + 1 <= maxgpr) { \ + dp = &cc->gpr[ngpr]; \ + ngpr += n; \ + goto done; \ + } \ + } + +#elif LJ_TARGET_X64 && LJ_ABI_WIN +/* -- Windows/x64 calling conventions ------------------------------------- */ + +#define CCALL_HANDLE_STRUCTRET \ + /* Return structs of size 1, 2, 4 or 8 in a GPR. */ \ + cc->retref = !(sz == 1 || sz == 2 || sz == 4 || sz == 8); \ + if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp; + +#define CCALL_HANDLE_COMPLEXRET CCALL_HANDLE_STRUCTRET + +#define CCALL_HANDLE_COMPLEXRET2 \ + if (!cc->retref) \ + *(int64_t *)dp = *(int64_t *)sp; /* Copy complex float from GPRs. */ + +#define CCALL_HANDLE_STRUCTARG \ + /* Pass structs of size 1, 2, 4 or 8 in a GPR by value. */ \ + if (!(sz == 1 || sz == 2 || sz == 4 || sz == 8)) { \ + rp = cdataptr(lj_cdata_new(cts, did, sz)); \ + sz = CTSIZE_PTR; /* Pass all other structs by reference. */ \ + } + +#define CCALL_HANDLE_COMPLEXARG \ + /* Pass complex float in a GPR and complex double by reference. */ \ + if (sz != 2*sizeof(float)) { \ + rp = cdataptr(lj_cdata_new(cts, did, sz)); \ + sz = CTSIZE_PTR; \ + } + +/* Windows/x64 argument registers are strictly positional (use ngpr). */ +#define CCALL_HANDLE_REGARG \ + if (isfp) { \ + if (ngpr < maxgpr) { dp = &cc->fpr[ngpr++]; nfpr = ngpr; goto done; } \ + } else { \ + if (ngpr < maxgpr) { dp = &cc->gpr[ngpr++]; goto done; } \ + } + +#elif LJ_TARGET_X64 +/* -- POSIX/x64 calling conventions --------------------------------------- */ + +#define CCALL_HANDLE_STRUCTRET \ + int rcl[2]; rcl[0] = rcl[1] = 0; \ + if (ccall_classify_struct(cts, ctr, rcl, 0)) { \ + cc->retref = 1; /* Return struct by reference. */ \ + cc->gpr[ngpr++] = (GPRArg)dp; \ + } else { \ + cc->retref = 0; /* Return small structs in registers. */ \ + } + +#define CCALL_HANDLE_STRUCTRET2 \ + int rcl[2]; rcl[0] = rcl[1] = 0; \ + ccall_classify_struct(cts, ctr, rcl, 0); \ + ccall_struct_ret(cc, rcl, dp, ctr->size); + +#define CCALL_HANDLE_COMPLEXRET \ + /* Complex values are returned in one or two FPRs. */ \ + cc->retref = 0; + +#define CCALL_HANDLE_COMPLEXRET2 \ + if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPR. */ \ + *(int64_t *)dp = cc->fpr[0].l[0]; \ + } else { /* Copy non-contiguous complex double from FPRs. */ \ + ((int64_t *)dp)[0] = cc->fpr[0].l[0]; \ + ((int64_t *)dp)[1] = cc->fpr[1].l[0]; \ + } + +#define CCALL_HANDLE_STRUCTARG \ + int rcl[2]; rcl[0] = rcl[1] = 0; \ + if (!ccall_classify_struct(cts, d, rcl, 0)) { \ + cc->nsp = nsp; cc->ngpr = ngpr; cc->nfpr = nfpr; \ + if (ccall_struct_arg(cc, cts, d, rcl, o, narg)) goto err_nyi; \ + nsp = cc->nsp; ngpr = cc->ngpr; nfpr = cc->nfpr; \ + continue; \ + } /* Pass all other structs by value on stack. */ + +#define CCALL_HANDLE_COMPLEXARG \ + isfp = 2; /* Pass complex in FPRs or on stack. Needs postprocessing. */ + +#define CCALL_HANDLE_REGARG \ + if (isfp) { /* Try to pass argument in FPRs. */ \ + int n2 = ctype_isvector(d->info) ? 1 : n; \ + if (nfpr + n2 <= CCALL_NARG_FPR) { \ + dp = &cc->fpr[nfpr]; \ + nfpr += n2; \ + goto done; \ + } \ + } else { /* Try to pass argument in GPRs. */ \ + /* Note that reordering is explicitly allowed in the x64 ABI. */ \ + if (n <= 2 && ngpr + n <= maxgpr) { \ + dp = &cc->gpr[ngpr]; \ + ngpr += n; \ + goto done; \ + } \ + } + +#elif LJ_TARGET_ARM +/* -- ARM calling conventions --------------------------------------------- */ + +#if LJ_ABI_SOFTFP + +#define CCALL_HANDLE_STRUCTRET \ + /* Return structs of size <= 4 in a GPR. */ \ + cc->retref = !(sz <= 4); \ + if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp; + +#define CCALL_HANDLE_COMPLEXRET \ + cc->retref = 1; /* Return all complex values by reference. */ \ + cc->gpr[ngpr++] = (GPRArg)dp; + +#define CCALL_HANDLE_COMPLEXRET2 \ + UNUSED(dp); /* Nothing to do. */ + +#define CCALL_HANDLE_STRUCTARG \ + /* Pass all structs by value in registers and/or on the stack. */ + +#define CCALL_HANDLE_COMPLEXARG \ + /* Pass complex by value in 2 or 4 GPRs. */ + +#define CCALL_HANDLE_REGARG_FP1 +#define CCALL_HANDLE_REGARG_FP2 + +#else + +#define CCALL_HANDLE_STRUCTRET \ + cc->retref = !ccall_classify_struct(cts, ctr, ct); \ + if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp; + +#define CCALL_HANDLE_STRUCTRET2 \ + if (ccall_classify_struct(cts, ctr, ct) > 1) sp = (uint8_t *)&cc->fpr[0]; \ + memcpy(dp, sp, ctr->size); + +#define CCALL_HANDLE_COMPLEXRET \ + if (!(ct->info & CTF_VARARG)) cc->retref = 0; /* Return complex in FPRs. */ + +#define CCALL_HANDLE_COMPLEXRET2 \ + if (!(ct->info & CTF_VARARG)) memcpy(dp, &cc->fpr[0], ctr->size); + +#define CCALL_HANDLE_STRUCTARG \ + isfp = (ccall_classify_struct(cts, d, ct) > 1); + /* Pass all structs by value in registers and/or on the stack. */ + +#define CCALL_HANDLE_COMPLEXARG \ + isfp = 1; /* Pass complex by value in FPRs or on stack. */ + +#define CCALL_HANDLE_REGARG_FP1 \ + if (isfp && !(ct->info & CTF_VARARG)) { \ + if ((d->info & CTF_ALIGN) > CTALIGN_PTR) { \ + if (nfpr + (n >> 1) <= CCALL_NARG_FPR) { \ + dp = &cc->fpr[nfpr]; \ + nfpr += (n >> 1); \ + goto done; \ + } \ + } else { \ + if (sz > 1 && fprodd != nfpr) fprodd = 0; \ + if (fprodd) { \ + if (2*nfpr+n <= 2*CCALL_NARG_FPR+1) { \ + dp = (void *)&cc->fpr[fprodd-1].f[1]; \ + nfpr += (n >> 1); \ + if ((n & 1)) fprodd = 0; else fprodd = nfpr-1; \ + goto done; \ + } \ + } else { \ + if (2*nfpr+n <= 2*CCALL_NARG_FPR) { \ + dp = (void *)&cc->fpr[nfpr]; \ + nfpr += (n >> 1); \ + if ((n & 1)) fprodd = ++nfpr; else fprodd = 0; \ + goto done; \ + } \ + } \ + } \ + fprodd = 0; /* No reordering after the first FP value is on stack. */ \ + } else { + +#define CCALL_HANDLE_REGARG_FP2 } + +#endif + +#define CCALL_HANDLE_REGARG \ + CCALL_HANDLE_REGARG_FP1 \ + if ((d->info & CTF_ALIGN) > CTALIGN_PTR) { \ + if (ngpr < maxgpr) \ + ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \ + } \ + if (ngpr < maxgpr) { \ + dp = &cc->gpr[ngpr]; \ + if (ngpr + n > maxgpr) { \ + nsp += ngpr + n - maxgpr; /* Assumes contiguous gpr/stack fields. */ \ + if (nsp > CCALL_MAXSTACK) goto err_nyi; /* Too many arguments. */ \ + ngpr = maxgpr; \ + } else { \ + ngpr += n; \ + } \ + goto done; \ + } CCALL_HANDLE_REGARG_FP2 + +#define CCALL_HANDLE_RET \ + if ((ct->info & CTF_VARARG)) sp = (uint8_t *)&cc->gpr[0]; + +#elif LJ_TARGET_ARM64 +/* -- ARM64 calling conventions ------------------------------------------- */ + +#define CCALL_HANDLE_STRUCTRET \ + cc->retref = !ccall_classify_struct(cts, ctr); \ + if (cc->retref) cc->retp = dp; + +#define CCALL_HANDLE_STRUCTRET2 \ + unsigned int cl = ccall_classify_struct(cts, ctr); \ + if ((cl & 4)) { /* Combine float HFA from separate registers. */ \ + CTSize i = (cl >> 8) - 1; \ + do { ((uint32_t *)dp)[i] = cc->fpr[i].u32; } while (i--); \ + } else { \ + if (cl > 1) sp = (uint8_t *)&cc->fpr[0]; \ + memcpy(dp, sp, ctr->size); \ + } + +#define CCALL_HANDLE_COMPLEXRET \ + /* Complex values are returned in one or two FPRs. */ \ + cc->retref = 0; + +#define CCALL_HANDLE_COMPLEXRET2 \ + if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPRs. */ \ + ((float *)dp)[0] = cc->fpr[0].f; \ + ((float *)dp)[1] = cc->fpr[1].f; \ + } else { /* Copy complex double from FPRs. */ \ + ((double *)dp)[0] = cc->fpr[0].d; \ + ((double *)dp)[1] = cc->fpr[1].d; \ + } + +#define CCALL_HANDLE_STRUCTARG \ + unsigned int cl = ccall_classify_struct(cts, d); \ + if (cl == 0) { /* Pass struct by reference. */ \ + rp = cdataptr(lj_cdata_new(cts, did, sz)); \ + sz = CTSIZE_PTR; \ + } else if (cl > 1) { /* Pass struct in FPRs or on stack. */ \ + isfp = (cl & 4) ? 2 : 1; \ + } /* else: Pass struct in GPRs or on stack. */ + +#define CCALL_HANDLE_COMPLEXARG \ + /* Pass complex by value in separate (!) FPRs or on stack. */ \ + isfp = ctr->size == 2*sizeof(float) ? 2 : 1; + +#define CCALL_HANDLE_REGARG \ + if (LJ_TARGET_IOS && isva) { \ + /* IOS: All variadic arguments are on the stack. */ \ + } else if (isfp) { /* Try to pass argument in FPRs. */ \ + int n2 = ctype_isvector(d->info) ? 1 : n*isfp; \ + if (nfpr + n2 <= CCALL_NARG_FPR) { \ + dp = &cc->fpr[nfpr]; \ + nfpr += n2; \ + goto done; \ + } else { \ + nfpr = CCALL_NARG_FPR; /* Prevent reordering. */ \ + if (LJ_TARGET_IOS && d->size < 8) goto err_nyi; \ + } \ + } else { /* Try to pass argument in GPRs. */ \ + if (!LJ_TARGET_IOS && (d->info & CTF_ALIGN) > CTALIGN_PTR) \ + ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \ + if (ngpr + n <= maxgpr) { \ + dp = &cc->gpr[ngpr]; \ + ngpr += n; \ + goto done; \ + } else { \ + ngpr = maxgpr; /* Prevent reordering. */ \ + if (LJ_TARGET_IOS && d->size < 8) goto err_nyi; \ + } \ + } + +#elif LJ_TARGET_PPC +/* -- PPC calling conventions --------------------------------------------- */ + +#define CCALL_HANDLE_STRUCTRET \ + cc->retref = 1; /* Return all structs by reference. */ \ + cc->gpr[ngpr++] = (GPRArg)dp; + +#define CCALL_HANDLE_COMPLEXRET \ + /* Complex values are returned in 2 or 4 GPRs. */ \ + cc->retref = 0; + +#define CCALL_HANDLE_COMPLEXRET2 \ + memcpy(dp, sp, ctr->size); /* Copy complex from GPRs. */ + +#define CCALL_HANDLE_STRUCTARG \ + rp = cdataptr(lj_cdata_new(cts, did, sz)); \ + sz = CTSIZE_PTR; /* Pass all structs by reference. */ + +#define CCALL_HANDLE_COMPLEXARG \ + /* Pass complex by value in 2 or 4 GPRs. */ + +#define CCALL_HANDLE_REGARG \ + if (isfp) { /* Try to pass argument in FPRs. */ \ + if (nfpr + 1 <= CCALL_NARG_FPR) { \ + dp = &cc->fpr[nfpr]; \ + nfpr += 1; \ + d = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */ \ + goto done; \ + } \ + } else { /* Try to pass argument in GPRs. */ \ + if (n > 1) { \ + lua_assert(n == 2 || n == 4); /* int64_t or complex (float). */ \ + if (ctype_isinteger(d->info)) \ + ngpr = (ngpr + 1u) & ~1u; /* Align int64_t to regpair. */ \ + else if (ngpr + n > maxgpr) \ + ngpr = maxgpr; /* Prevent reordering. */ \ + } \ + if (ngpr + n <= maxgpr) { \ + dp = &cc->gpr[ngpr]; \ + ngpr += n; \ + goto done; \ + } \ + } + +#define CCALL_HANDLE_RET \ + if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \ + ctr = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */ + +#elif LJ_TARGET_MIPS +/* -- MIPS calling conventions -------------------------------------------- */ + +#define CCALL_HANDLE_STRUCTRET \ + cc->retref = 1; /* Return all structs by reference. */ \ + cc->gpr[ngpr++] = (GPRArg)dp; + +#define CCALL_HANDLE_COMPLEXRET \ + /* Complex values are returned in 1 or 2 FPRs. */ \ + cc->retref = 0; + +#define CCALL_HANDLE_COMPLEXRET2 \ + if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPRs. */ \ + ((float *)dp)[0] = cc->fpr[0].f; \ + ((float *)dp)[1] = cc->fpr[1].f; \ + } else { /* Copy complex double from FPRs. */ \ + ((double *)dp)[0] = cc->fpr[0].d; \ + ((double *)dp)[1] = cc->fpr[1].d; \ + } + +#define CCALL_HANDLE_STRUCTARG \ + /* Pass all structs by value in registers and/or on the stack. */ + +#define CCALL_HANDLE_COMPLEXARG \ + /* Pass complex by value in 2 or 4 GPRs. */ + +#define CCALL_HANDLE_REGARG \ + if (isfp && nfpr < CCALL_NARG_FPR && !(ct->info & CTF_VARARG)) { \ + /* Try to pass argument in FPRs. */ \ + dp = n == 1 ? (void *)&cc->fpr[nfpr].f : (void *)&cc->fpr[nfpr].d; \ + nfpr++; ngpr += n; \ + goto done; \ + } else { /* Try to pass argument in GPRs. */ \ + nfpr = CCALL_NARG_FPR; \ + if ((d->info & CTF_ALIGN) > CTALIGN_PTR) \ + ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \ + if (ngpr < maxgpr) { \ + dp = &cc->gpr[ngpr]; \ + if (ngpr + n > maxgpr) { \ + nsp += ngpr + n - maxgpr; /* Assumes contiguous gpr/stack fields. */ \ + if (nsp > CCALL_MAXSTACK) goto err_nyi; /* Too many arguments. */ \ + ngpr = maxgpr; \ + } else { \ + ngpr += n; \ + } \ + goto done; \ + } \ + } + +#define CCALL_HANDLE_RET \ + if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \ + sp = (uint8_t *)&cc->fpr[0].f; + +#else +#error "Missing calling convention definitions for this architecture" +#endif + +#ifndef CCALL_HANDLE_STRUCTRET2 +#define CCALL_HANDLE_STRUCTRET2 \ + memcpy(dp, sp, ctr->size); /* Copy struct return value from GPRs. */ +#endif + +/* -- x86 OSX ABI struct classification ----------------------------------- */ + +#if LJ_TARGET_X86 && LJ_TARGET_OSX + +/* Check for struct with single FP field. */ +static int ccall_classify_struct(CTState *cts, CType *ct) +{ + CTSize sz = ct->size; + if (!(sz == sizeof(float) || sz == sizeof(double))) return 0; + if ((ct->info & CTF_UNION)) return 0; + while (ct->sib) { + ct = ctype_get(cts, ct->sib); + if (ctype_isfield(ct->info)) { + CType *sct = ctype_rawchild(cts, ct); + if (ctype_isfp(sct->info)) { + if (sct->size == sz) + return (sz >> 2); /* Return 1 for float or 2 for double. */ + } else if (ctype_isstruct(sct->info)) { + if (sct->size) + return ccall_classify_struct(cts, sct); + } else { + break; + } + } else if (ctype_isbitfield(ct->info)) { + break; + } else if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) { + CType *sct = ctype_rawchild(cts, ct); + if (sct->size) + return ccall_classify_struct(cts, sct); + } + } + return 0; +} + +#endif + +/* -- x64 struct classification ------------------------------------------- */ + +#if LJ_TARGET_X64 && !LJ_ABI_WIN + +/* Register classes for x64 struct classification. */ +#define CCALL_RCL_INT 1 +#define CCALL_RCL_SSE 2 +#define CCALL_RCL_MEM 4 +/* NYI: classify vectors. */ + +static int ccall_classify_struct(CTState *cts, CType *ct, int *rcl, CTSize ofs); + +/* Classify a C type. */ +static void ccall_classify_ct(CTState *cts, CType *ct, int *rcl, CTSize ofs) +{ + if (ctype_isarray(ct->info)) { + CType *cct = ctype_rawchild(cts, ct); + CTSize eofs, esz = cct->size, asz = ct->size; + for (eofs = 0; eofs < asz; eofs += esz) + ccall_classify_ct(cts, cct, rcl, ofs+eofs); + } else if (ctype_isstruct(ct->info)) { + ccall_classify_struct(cts, ct, rcl, ofs); + } else { + int cl = ctype_isfp(ct->info) ? CCALL_RCL_SSE : CCALL_RCL_INT; + lua_assert(ctype_hassize(ct->info)); + if ((ofs & (ct->size-1))) cl = CCALL_RCL_MEM; /* Unaligned. */ + rcl[(ofs >= 8)] |= cl; + } +} + +/* Recursively classify a struct based on its fields. */ +static int ccall_classify_struct(CTState *cts, CType *ct, int *rcl, CTSize ofs) +{ + if (ct->size > 16) return CCALL_RCL_MEM; /* Too big, gets memory class. */ + while (ct->sib) { + CTSize fofs; + ct = ctype_get(cts, ct->sib); + fofs = ofs+ct->size; + if (ctype_isfield(ct->info)) + ccall_classify_ct(cts, ctype_rawchild(cts, ct), rcl, fofs); + else if (ctype_isbitfield(ct->info)) + rcl[(fofs >= 8)] |= CCALL_RCL_INT; /* NYI: unaligned bitfields? */ + else if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) + ccall_classify_struct(cts, ctype_rawchild(cts, ct), rcl, fofs); + } + return ((rcl[0]|rcl[1]) & CCALL_RCL_MEM); /* Memory class? */ +} + +/* Try to split up a small struct into registers. */ +static int ccall_struct_reg(CCallState *cc, GPRArg *dp, int *rcl) +{ + MSize ngpr = cc->ngpr, nfpr = cc->nfpr; + uint32_t i; + for (i = 0; i < 2; i++) { + lua_assert(!(rcl[i] & CCALL_RCL_MEM)); + if ((rcl[i] & CCALL_RCL_INT)) { /* Integer class takes precedence. */ + if (ngpr >= CCALL_NARG_GPR) return 1; /* Register overflow. */ + cc->gpr[ngpr++] = dp[i]; + } else if ((rcl[i] & CCALL_RCL_SSE)) { + if (nfpr >= CCALL_NARG_FPR) return 1; /* Register overflow. */ + cc->fpr[nfpr++].l[0] = dp[i]; + } + } + cc->ngpr = ngpr; cc->nfpr = nfpr; + return 0; /* Ok. */ +} + +/* Pass a small struct argument. */ +static int ccall_struct_arg(CCallState *cc, CTState *cts, CType *d, int *rcl, + TValue *o, int narg) +{ + GPRArg dp[2]; + dp[0] = dp[1] = 0; + /* Convert to temp. struct. */ + lj_cconv_ct_tv(cts, d, (uint8_t *)dp, o, CCF_ARG(narg)); + if (ccall_struct_reg(cc, dp, rcl)) { /* Register overflow? Pass on stack. */ + MSize nsp = cc->nsp, n = rcl[1] ? 2 : 1; + if (nsp + n > CCALL_MAXSTACK) return 1; /* Too many arguments. */ + cc->nsp = nsp + n; + memcpy(&cc->stack[nsp], dp, n*CTSIZE_PTR); + } + return 0; /* Ok. */ +} + +/* Combine returned small struct. */ +static void ccall_struct_ret(CCallState *cc, int *rcl, uint8_t *dp, CTSize sz) +{ + GPRArg sp[2]; + MSize ngpr = 0, nfpr = 0; + uint32_t i; + for (i = 0; i < 2; i++) { + if ((rcl[i] & CCALL_RCL_INT)) { /* Integer class takes precedence. */ + sp[i] = cc->gpr[ngpr++]; + } else if ((rcl[i] & CCALL_RCL_SSE)) { + sp[i] = cc->fpr[nfpr++].l[0]; + } + } + memcpy(dp, sp, sz); +} +#endif + +/* -- ARM hard-float ABI struct classification ---------------------------- */ + +#if LJ_TARGET_ARM && !LJ_ABI_SOFTFP + +/* Classify a struct based on its fields. */ +static unsigned int ccall_classify_struct(CTState *cts, CType *ct, CType *ctf) +{ + CTSize sz = ct->size; + unsigned int r = 0, n = 0, isu = (ct->info & CTF_UNION); + if ((ctf->info & CTF_VARARG)) goto noth; + while (ct->sib) { + CType *sct; + ct = ctype_get(cts, ct->sib); + if (ctype_isfield(ct->info)) { + sct = ctype_rawchild(cts, ct); + if (ctype_isfp(sct->info)) { + r |= sct->size; + if (!isu) n++; else if (n == 0) n = 1; + } else if (ctype_iscomplex(sct->info)) { + r |= (sct->size >> 1); + if (!isu) n += 2; else if (n < 2) n = 2; + } else if (ctype_isstruct(sct->info)) { + goto substruct; + } else { + goto noth; + } + } else if (ctype_isbitfield(ct->info)) { + goto noth; + } else if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) { + sct = ctype_rawchild(cts, ct); + substruct: + if (sct->size > 0) { + unsigned int s = ccall_classify_struct(cts, sct, ctf); + if (s <= 1) goto noth; + r |= (s & 255); + if (!isu) n += (s >> 8); else if (n < (s >>8)) n = (s >> 8); + } + } + } + if ((r == 4 || r == 8) && n <= 4) + return r + (n << 8); +noth: /* Not a homogeneous float/double aggregate. */ + return (sz <= 4); /* Return structs of size <= 4 in a GPR. */ +} + +#endif + +/* -- ARM64 ABI struct classification ------------------------------------- */ + +#if LJ_TARGET_ARM64 + +/* Classify a struct based on its fields. */ +static unsigned int ccall_classify_struct(CTState *cts, CType *ct) +{ + CTSize sz = ct->size; + unsigned int r = 0, n = 0, isu = (ct->info & CTF_UNION); + while (ct->sib) { + CType *sct; + ct = ctype_get(cts, ct->sib); + if (ctype_isfield(ct->info)) { + sct = ctype_rawchild(cts, ct); + if (ctype_isfp(sct->info)) { + r |= sct->size; + if (!isu) n++; else if (n == 0) n = 1; + } else if (ctype_iscomplex(sct->info)) { + r |= (sct->size >> 1); + if (!isu) n += 2; else if (n < 2) n = 2; + } else if (ctype_isstruct(sct->info)) { + goto substruct; + } else { + goto noth; + } + } else if (ctype_isbitfield(ct->info)) { + goto noth; + } else if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) { + sct = ctype_rawchild(cts, ct); + substruct: + if (sct->size > 0) { + unsigned int s = ccall_classify_struct(cts, sct); + if (s <= 1) goto noth; + r |= (s & 255); + if (!isu) n += (s >> 8); else if (n < (s >>8)) n = (s >> 8); + } + } + } + if ((r == 4 || r == 8) && n <= 4) + return r + (n << 8); +noth: /* Not a homogeneous float/double aggregate. */ + return (sz <= 16); /* Return structs of size <= 16 in GPRs. */ +} + +#endif + +/* -- Common C call handling ---------------------------------------------- */ + +/* Infer the destination CTypeID for a vararg argument. */ +CTypeID lj_ccall_ctid_vararg(CTState *cts, cTValue *o) +{ + if (tvisnumber(o)) { + return CTID_DOUBLE; + } else if (tviscdata(o)) { + CTypeID id = cdataV(o)->ctypeid; + CType *s = ctype_get(cts, id); + if (ctype_isrefarray(s->info)) { + return lj_ctype_intern(cts, + CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(s->info)), CTSIZE_PTR); + } else if (ctype_isstruct(s->info) || ctype_isfunc(s->info)) { + /* NYI: how to pass a struct by value in a vararg argument? */ + return lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|id), CTSIZE_PTR); + } else if (ctype_isfp(s->info) && s->size == sizeof(float)) { + return CTID_DOUBLE; + } else { + return id; + } + } else if (tvisstr(o)) { + return CTID_P_CCHAR; + } else if (tvisbool(o)) { + return CTID_BOOL; + } else { + return CTID_P_VOID; + } +} + +/* Setup arguments for C call. */ +static int ccall_set_args(lua_State *L, CTState *cts, CType *ct, + CCallState *cc) +{ + int gcsteps = 0; + TValue *o, *top = L->top; + CTypeID fid; + CType *ctr; + MSize maxgpr, ngpr = 0, nsp = 0, narg; +#if CCALL_NARG_FPR + MSize nfpr = 0; +#if LJ_TARGET_ARM + MSize fprodd = 0; +#endif +#endif + + /* Clear unused regs to get some determinism in case of misdeclaration. */ + memset(cc->gpr, 0, sizeof(cc->gpr)); +#if CCALL_NUM_FPR + memset(cc->fpr, 0, sizeof(cc->fpr)); +#endif + +#if LJ_TARGET_X86 + /* x86 has several different calling conventions. */ + cc->resx87 = 0; + switch (ctype_cconv(ct->info)) { + case CTCC_FASTCALL: maxgpr = 2; break; + case CTCC_THISCALL: maxgpr = 1; break; + default: maxgpr = 0; break; + } +#else + maxgpr = CCALL_NARG_GPR; +#endif + + /* Perform required setup for some result types. */ + ctr = ctype_rawchild(cts, ct); + if (ctype_isvector(ctr->info)) { + if (!(CCALL_VECTOR_REG && (ctr->size == 8 || ctr->size == 16))) + goto err_nyi; + } else if (ctype_iscomplex(ctr->info) || ctype_isstruct(ctr->info)) { + /* Preallocate cdata object and anchor it after arguments. */ + CTSize sz = ctr->size; + GCcdata *cd = lj_cdata_new(cts, ctype_cid(ct->info), sz); + void *dp = cdataptr(cd); + setcdataV(L, L->top++, cd); + if (ctype_isstruct(ctr->info)) { + CCALL_HANDLE_STRUCTRET + } else { + CCALL_HANDLE_COMPLEXRET + } +#if LJ_TARGET_X86 + } else if (ctype_isfp(ctr->info)) { + cc->resx87 = ctr->size == sizeof(float) ? 1 : 2; +#endif + } + + /* Skip initial attributes. */ + fid = ct->sib; + while (fid) { + CType *ctf = ctype_get(cts, fid); + if (!ctype_isattrib(ctf->info)) break; + fid = ctf->sib; + } + + /* Walk through all passed arguments. */ + for (o = L->base+1, narg = 1; o < top; o++, narg++) { + CTypeID did; + CType *d; + CTSize sz; + MSize n, isfp = 0, isva = 0; + void *dp, *rp = NULL; + + if (fid) { /* Get argument type from field. */ + CType *ctf = ctype_get(cts, fid); + fid = ctf->sib; + lua_assert(ctype_isfield(ctf->info)); + did = ctype_cid(ctf->info); + } else { + if (!(ct->info & CTF_VARARG)) + lj_err_caller(L, LJ_ERR_FFI_NUMARG); /* Too many arguments. */ + did = lj_ccall_ctid_vararg(cts, o); /* Infer vararg type. */ + isva = 1; + } + d = ctype_raw(cts, did); + sz = d->size; + + /* Find out how (by value/ref) and where (GPR/FPR) to pass an argument. */ + if (ctype_isnum(d->info)) { + if (sz > 8) goto err_nyi; + if ((d->info & CTF_FP)) + isfp = 1; + } else if (ctype_isvector(d->info)) { + if (CCALL_VECTOR_REG && (sz == 8 || sz == 16)) + isfp = 1; + else + goto err_nyi; + } else if (ctype_isstruct(d->info)) { + CCALL_HANDLE_STRUCTARG + } else if (ctype_iscomplex(d->info)) { + CCALL_HANDLE_COMPLEXARG + } else { + sz = CTSIZE_PTR; + } + sz = (sz + CTSIZE_PTR-1) & ~(CTSIZE_PTR-1); + n = sz / CTSIZE_PTR; /* Number of GPRs or stack slots needed. */ + + CCALL_HANDLE_REGARG /* Handle register arguments. */ + + /* Otherwise pass argument on stack. */ + if (CCALL_ALIGN_STACKARG && !rp && (d->info & CTF_ALIGN) > CTALIGN_PTR) { + MSize align = (1u << ctype_align(d->info-CTALIGN_PTR)) -1; + nsp = (nsp + align) & ~align; /* Align argument on stack. */ + } + if (nsp + n > CCALL_MAXSTACK) { /* Too many arguments. */ + err_nyi: + lj_err_caller(L, LJ_ERR_FFI_NYICALL); + } + dp = &cc->stack[nsp]; + nsp += n; + isva = 0; + + done: + if (rp) { /* Pass by reference. */ + gcsteps++; + *(void **)dp = rp; + dp = rp; + } + lj_cconv_ct_tv(cts, d, (uint8_t *)dp, o, CCF_ARG(narg)); + /* Extend passed integers to 32 bits at least. */ + if (ctype_isinteger_or_bool(d->info) && d->size < 4) { + if (d->info & CTF_UNSIGNED) + *(uint32_t *)dp = d->size == 1 ? (uint32_t)*(uint8_t *)dp : + (uint32_t)*(uint16_t *)dp; + else + *(int32_t *)dp = d->size == 1 ? (int32_t)*(int8_t *)dp : + (int32_t)*(int16_t *)dp; + } +#if LJ_TARGET_X64 && LJ_ABI_WIN + if (isva) { /* Windows/x64 mirrors varargs in both register sets. */ + if (nfpr == ngpr) + cc->gpr[ngpr-1] = cc->fpr[ngpr-1].l[0]; + else + cc->fpr[ngpr-1].l[0] = cc->gpr[ngpr-1]; + } +#else + UNUSED(isva); +#endif +#if LJ_TARGET_X64 && !LJ_ABI_WIN + if (isfp == 2 && n == 2 && (uint8_t *)dp == (uint8_t *)&cc->fpr[nfpr-2]) { + cc->fpr[nfpr-1].d[0] = cc->fpr[nfpr-2].d[1]; /* Split complex double. */ + cc->fpr[nfpr-2].d[1] = 0; + } +#elif LJ_TARGET_ARM64 + if (isfp == 2 && (uint8_t *)dp < (uint8_t *)cc->stack) { + /* Split float HFA or complex float into separate registers. */ + CTSize i = (sz >> 2) - 1; + do { ((uint64_t *)dp)[i] = ((uint32_t *)dp)[i]; } while (i--); + } +#else + UNUSED(isfp); +#endif + } + if (fid) lj_err_caller(L, LJ_ERR_FFI_NUMARG); /* Too few arguments. */ + +#if LJ_TARGET_X64 || LJ_TARGET_PPC + cc->nfpr = nfpr; /* Required for vararg functions. */ +#endif + cc->nsp = nsp; + cc->spadj = (CCALL_SPS_FREE + CCALL_SPS_EXTRA)*CTSIZE_PTR; + if (nsp > CCALL_SPS_FREE) + cc->spadj += (((nsp-CCALL_SPS_FREE)*CTSIZE_PTR + 15u) & ~15u); + return gcsteps; +} + +/* Get results from C call. */ +static int ccall_get_results(lua_State *L, CTState *cts, CType *ct, + CCallState *cc, int *ret) +{ + CType *ctr = ctype_rawchild(cts, ct); + uint8_t *sp = (uint8_t *)&cc->gpr[0]; + if (ctype_isvoid(ctr->info)) { + *ret = 0; /* Zero results. */ + return 0; /* No additional GC step. */ + } + *ret = 1; /* One result. */ + if (ctype_isstruct(ctr->info)) { + /* Return cdata object which is already on top of stack. */ + if (!cc->retref) { + void *dp = cdataptr(cdataV(L->top-1)); /* Use preallocated object. */ + CCALL_HANDLE_STRUCTRET2 + } + return 1; /* One GC step. */ + } + if (ctype_iscomplex(ctr->info)) { + /* Return cdata object which is already on top of stack. */ + void *dp = cdataptr(cdataV(L->top-1)); /* Use preallocated object. */ + CCALL_HANDLE_COMPLEXRET2 + return 1; /* One GC step. */ + } + if (LJ_BE && ctype_isinteger_or_bool(ctr->info) && ctr->size < CTSIZE_PTR) + sp += (CTSIZE_PTR - ctr->size); +#if CCALL_NUM_FPR + if (ctype_isfp(ctr->info) || ctype_isvector(ctr->info)) + sp = (uint8_t *)&cc->fpr[0]; +#endif +#ifdef CCALL_HANDLE_RET + CCALL_HANDLE_RET +#endif + /* No reference types end up here, so there's no need for the CTypeID. */ + lua_assert(!(ctype_isrefarray(ctr->info) || ctype_isstruct(ctr->info))); + return lj_cconv_tv_ct(cts, ctr, 0, L->top-1, sp); +} + +/* Call C function. */ +int lj_ccall_func(lua_State *L, GCcdata *cd) +{ + CTState *cts = ctype_cts(L); + CType *ct = ctype_raw(cts, cd->ctypeid); + CTSize sz = CTSIZE_PTR; + if (ctype_isptr(ct->info)) { + sz = ct->size; + ct = ctype_rawchild(cts, ct); + } + if (ctype_isfunc(ct->info)) { + CCallState cc; + int gcsteps, ret; + cc.func = (void (*)(void))cdata_getptr(cdataptr(cd), sz); + gcsteps = ccall_set_args(L, cts, ct, &cc); + ct = (CType *)((intptr_t)ct-(intptr_t)cts->tab); + cts->cb.slot = ~0u; + lj_vm_ffi_call(&cc); + if (cts->cb.slot != ~0u) { /* Blacklist function that called a callback. */ + TValue tv; + setlightudV(&tv, (void *)cc.func); + setboolV(lj_tab_set(L, cts->miscmap, &tv), 1); + } + ct = (CType *)((intptr_t)ct+(intptr_t)cts->tab); /* May be reallocated. */ + gcsteps += ccall_get_results(L, cts, ct, &cc, &ret); +#if LJ_TARGET_X86 && LJ_ABI_WIN + /* Automatically detect __stdcall and fix up C function declaration. */ + if (cc.spadj && ctype_cconv(ct->info) == CTCC_CDECL) { + CTF_INSERT(ct->info, CCONV, CTCC_STDCALL); + lj_trace_abort(G(L)); + } +#endif + while (gcsteps-- > 0) + lj_gc_check(L); + return ret; + } + return -1; /* Not a function. */ +} + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_ccall.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_ccall.h new file mode 100644 index 00000000000..91983feebd7 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_ccall.h @@ -0,0 +1,178 @@ +/* +** FFI C call handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_CCALL_H +#define _LJ_CCALL_H + +#include "lj_obj.h" +#include "lj_ctype.h" + +#if LJ_HASFFI + +/* -- C calling conventions ----------------------------------------------- */ + +#if LJ_TARGET_X86ORX64 + +#if LJ_TARGET_X86 +#define CCALL_NARG_GPR 2 /* For fastcall arguments. */ +#define CCALL_NARG_FPR 0 +#define CCALL_NRET_GPR 2 +#define CCALL_NRET_FPR 1 /* For FP results on x87 stack. */ +#define CCALL_ALIGN_STACKARG 0 /* Don't align argument on stack. */ +#elif LJ_ABI_WIN +#define CCALL_NARG_GPR 4 +#define CCALL_NARG_FPR 4 +#define CCALL_NRET_GPR 1 +#define CCALL_NRET_FPR 1 +#define CCALL_SPS_EXTRA 4 +#else +#define CCALL_NARG_GPR 6 +#define CCALL_NARG_FPR 8 +#define CCALL_NRET_GPR 2 +#define CCALL_NRET_FPR 2 +#define CCALL_VECTOR_REG 1 /* Pass vectors in registers. */ +#endif + +#define CCALL_SPS_FREE 1 +#define CCALL_ALIGN_CALLSTATE 16 + +typedef LJ_ALIGN(16) union FPRArg { + double d[2]; + float f[4]; + uint8_t b[16]; + uint16_t s[8]; + int i[4]; + int64_t l[2]; +} FPRArg; + +typedef intptr_t GPRArg; + +#elif LJ_TARGET_ARM + +#define CCALL_NARG_GPR 4 +#define CCALL_NRET_GPR 2 /* For softfp double. */ +#if LJ_ABI_SOFTFP +#define CCALL_NARG_FPR 0 +#define CCALL_NRET_FPR 0 +#else +#define CCALL_NARG_FPR 8 +#define CCALL_NRET_FPR 4 +#endif +#define CCALL_SPS_FREE 0 + +typedef intptr_t GPRArg; +typedef union FPRArg { + double d; + float f[2]; +} FPRArg; + +#elif LJ_TARGET_ARM64 + +#define CCALL_NARG_GPR 8 +#define CCALL_NRET_GPR 2 +#define CCALL_NARG_FPR 8 +#define CCALL_NRET_FPR 4 +#define CCALL_SPS_FREE 0 + +typedef intptr_t GPRArg; +typedef union FPRArg { + double d; + float f; + uint32_t u32; +} FPRArg; + +#elif LJ_TARGET_PPC + +#define CCALL_NARG_GPR 8 +#define CCALL_NARG_FPR 8 +#define CCALL_NRET_GPR 4 /* For complex double. */ +#define CCALL_NRET_FPR 1 +#define CCALL_SPS_EXTRA 4 +#define CCALL_SPS_FREE 0 + +typedef intptr_t GPRArg; +typedef double FPRArg; + +#elif LJ_TARGET_MIPS + +#define CCALL_NARG_GPR 4 +#define CCALL_NARG_FPR 2 +#define CCALL_NRET_GPR 2 +#define CCALL_NRET_FPR 2 +#define CCALL_SPS_EXTRA 7 +#define CCALL_SPS_FREE 1 + +typedef intptr_t GPRArg; +typedef union FPRArg { + double d; + struct { LJ_ENDIAN_LOHI(float f; , float g;) }; +} FPRArg; + +#else +#error "Missing calling convention definitions for this architecture" +#endif + +#ifndef CCALL_SPS_EXTRA +#define CCALL_SPS_EXTRA 0 +#endif +#ifndef CCALL_VECTOR_REG +#define CCALL_VECTOR_REG 0 +#endif +#ifndef CCALL_ALIGN_STACKARG +#define CCALL_ALIGN_STACKARG 1 +#endif +#ifndef CCALL_ALIGN_CALLSTATE +#define CCALL_ALIGN_CALLSTATE 8 +#endif + +#define CCALL_NUM_GPR \ + (CCALL_NARG_GPR > CCALL_NRET_GPR ? CCALL_NARG_GPR : CCALL_NRET_GPR) +#define CCALL_NUM_FPR \ + (CCALL_NARG_FPR > CCALL_NRET_FPR ? CCALL_NARG_FPR : CCALL_NRET_FPR) + +/* Check against constants in lj_ctype.h. */ +LJ_STATIC_ASSERT(CCALL_NUM_GPR <= CCALL_MAX_GPR); +LJ_STATIC_ASSERT(CCALL_NUM_FPR <= CCALL_MAX_FPR); + +#define CCALL_MAXSTACK 32 + +/* -- C call state -------------------------------------------------------- */ + +typedef LJ_ALIGN(CCALL_ALIGN_CALLSTATE) struct CCallState { + void (*func)(void); /* Pointer to called function. */ + uint32_t spadj; /* Stack pointer adjustment. */ + uint8_t nsp; /* Number of stack slots. */ + uint8_t retref; /* Return value by reference. */ +#if LJ_TARGET_X64 + uint8_t ngpr; /* Number of arguments in GPRs. */ + uint8_t nfpr; /* Number of arguments in FPRs. */ +#elif LJ_TARGET_X86 + uint8_t resx87; /* Result on x87 stack: 1:float, 2:double. */ +#elif LJ_TARGET_ARM64 + void *retp; /* Aggregate return pointer in x8. */ +#elif LJ_TARGET_PPC + uint8_t nfpr; /* Number of arguments in FPRs. */ +#endif +#if LJ_32 + int32_t align1; +#endif +#if CCALL_NUM_FPR + FPRArg fpr[CCALL_NUM_FPR]; /* Arguments/results in FPRs. */ +#endif + GPRArg gpr[CCALL_NUM_GPR]; /* Arguments/results in GPRs. */ + GPRArg stack[CCALL_MAXSTACK]; /* Stack slots. */ +} CCallState; + +/* -- C call handling ----------------------------------------------------- */ + +/* Really belongs to lj_vm.h. */ +LJ_ASMF void LJ_FASTCALL lj_vm_ffi_call(CCallState *cc); + +LJ_FUNC CTypeID lj_ccall_ctid_vararg(CTState *cts, cTValue *o); +LJ_FUNC int lj_ccall_func(lua_State *L, GCcdata *cd); + +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_ccallback.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_ccallback.c new file mode 100644 index 00000000000..065c329fa7e --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_ccallback.c @@ -0,0 +1,712 @@ +/* +** FFI C callback handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include "lj_obj.h" + +#if LJ_HASFFI + +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_tab.h" +#include "lj_state.h" +#include "lj_frame.h" +#include "lj_ctype.h" +#include "lj_cconv.h" +#include "lj_ccall.h" +#include "lj_ccallback.h" +#include "lj_target.h" +#include "lj_mcode.h" +#include "lj_trace.h" +#include "lj_vm.h" + +/* -- Target-specific handling of callback slots -------------------------- */ + +#define CALLBACK_MCODE_SIZE (LJ_PAGESIZE * LJ_NUM_CBPAGE) + +#if LJ_OS_NOJIT + +/* Callbacks disabled. */ +#define CALLBACK_SLOT2OFS(slot) (0*(slot)) +#define CALLBACK_OFS2SLOT(ofs) (0*(ofs)) +#define CALLBACK_MAX_SLOT 0 + +#elif LJ_TARGET_X86ORX64 + +#define CALLBACK_MCODE_HEAD (LJ_64 ? 8 : 0) +#define CALLBACK_MCODE_GROUP (-2+1+2+(LJ_GC64 ? 10 : 5)+(LJ_64 ? 6 : 5)) + +#define CALLBACK_SLOT2OFS(slot) \ + (CALLBACK_MCODE_HEAD + CALLBACK_MCODE_GROUP*((slot)/32) + 4*(slot)) + +static MSize CALLBACK_OFS2SLOT(MSize ofs) +{ + MSize group; + ofs -= CALLBACK_MCODE_HEAD; + group = ofs / (32*4 + CALLBACK_MCODE_GROUP); + return (ofs % (32*4 + CALLBACK_MCODE_GROUP))/4 + group*32; +} + +#define CALLBACK_MAX_SLOT \ + (((CALLBACK_MCODE_SIZE-CALLBACK_MCODE_HEAD)/(CALLBACK_MCODE_GROUP+4*32))*32) + +#elif LJ_TARGET_ARM + +#define CALLBACK_MCODE_HEAD 32 + +#elif LJ_TARGET_ARM64 + +#define CALLBACK_MCODE_HEAD 32 + +#elif LJ_TARGET_PPC + +#define CALLBACK_MCODE_HEAD 24 + +#elif LJ_TARGET_MIPS + +#define CALLBACK_MCODE_HEAD 24 + +#else + +/* Missing support for this architecture. */ +#define CALLBACK_SLOT2OFS(slot) (0*(slot)) +#define CALLBACK_OFS2SLOT(ofs) (0*(ofs)) +#define CALLBACK_MAX_SLOT 0 + +#endif + +#ifndef CALLBACK_SLOT2OFS +#define CALLBACK_SLOT2OFS(slot) (CALLBACK_MCODE_HEAD + 8*(slot)) +#define CALLBACK_OFS2SLOT(ofs) (((ofs)-CALLBACK_MCODE_HEAD)/8) +#define CALLBACK_MAX_SLOT (CALLBACK_OFS2SLOT(CALLBACK_MCODE_SIZE)) +#endif + +/* Convert callback slot number to callback function pointer. */ +static void *callback_slot2ptr(CTState *cts, MSize slot) +{ + return (uint8_t *)cts->cb.mcode + CALLBACK_SLOT2OFS(slot); +} + +/* Convert callback function pointer to slot number. */ +MSize lj_ccallback_ptr2slot(CTState *cts, void *p) +{ + uintptr_t ofs = (uintptr_t)((uint8_t *)p -(uint8_t *)cts->cb.mcode); + if (ofs < CALLBACK_MCODE_SIZE) { + MSize slot = CALLBACK_OFS2SLOT((MSize)ofs); + if (CALLBACK_SLOT2OFS(slot) == (MSize)ofs) + return slot; + } + return ~0u; /* Not a known callback function pointer. */ +} + +/* Initialize machine code for callback function pointers. */ +#if LJ_OS_NOJIT +/* Disabled callback support. */ +#define callback_mcode_init(g, p) UNUSED(p) +#elif LJ_TARGET_X86ORX64 +static void callback_mcode_init(global_State *g, uint8_t *page) +{ + uint8_t *p = page; + uint8_t *target = (uint8_t *)(void *)lj_vm_ffi_callback; + MSize slot; +#if LJ_64 + *(void **)p = target; p += 8; +#endif + for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) { + /* mov al, slot; jmp group */ + *p++ = XI_MOVrib | RID_EAX; *p++ = (uint8_t)slot; + if ((slot & 31) == 31 || slot == CALLBACK_MAX_SLOT-1) { + /* push ebp/rbp; mov ah, slot>>8; mov ebp, &g. */ + *p++ = XI_PUSH + RID_EBP; + *p++ = XI_MOVrib | (RID_EAX+4); *p++ = (uint8_t)(slot >> 8); +#if LJ_GC64 + *p++ = 0x48; *p++ = XI_MOVri | RID_EBP; + *(uint64_t *)p = (uint64_t)(g); p += 8; +#else + *p++ = XI_MOVri | RID_EBP; + *(int32_t *)p = i32ptr(g); p += 4; +#endif +#if LJ_64 + /* jmp [rip-pageofs] where lj_vm_ffi_callback is stored. */ + *p++ = XI_GROUP5; *p++ = XM_OFS0 + (XOg_JMP<<3) + RID_EBP; + *(int32_t *)p = (int32_t)(page-(p+4)); p += 4; +#else + /* jmp lj_vm_ffi_callback. */ + *p++ = XI_JMP; *(int32_t *)p = target-(p+4); p += 4; +#endif + } else { + *p++ = XI_JMPs; *p++ = (uint8_t)((2+2)*(31-(slot&31)) - 2); + } + } + lua_assert(p - page <= CALLBACK_MCODE_SIZE); +} +#elif LJ_TARGET_ARM +static void callback_mcode_init(global_State *g, uint32_t *page) +{ + uint32_t *p = page; + void *target = (void *)lj_vm_ffi_callback; + MSize slot; + /* This must match with the saveregs macro in buildvm_arm.dasc. */ + *p++ = ARMI_SUB|ARMF_D(RID_R12)|ARMF_N(RID_R12)|ARMF_M(RID_PC); + *p++ = ARMI_PUSH|ARMF_N(RID_SP)|RSET_RANGE(RID_R4,RID_R11+1)|RID2RSET(RID_LR); + *p++ = ARMI_SUB|ARMI_K12|ARMF_D(RID_R12)|ARMF_N(RID_R12)|CALLBACK_MCODE_HEAD; + *p++ = ARMI_STR|ARMI_LS_P|ARMI_LS_W|ARMF_D(RID_R12)|ARMF_N(RID_SP)|(CFRAME_SIZE-4*9); + *p++ = ARMI_LDR|ARMI_LS_P|ARMI_LS_U|ARMF_D(RID_R12)|ARMF_N(RID_PC); + *p++ = ARMI_LDR|ARMI_LS_P|ARMI_LS_U|ARMF_D(RID_PC)|ARMF_N(RID_PC); + *p++ = u32ptr(g); + *p++ = u32ptr(target); + for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) { + *p++ = ARMI_MOV|ARMF_D(RID_R12)|ARMF_M(RID_PC); + *p = ARMI_B | ((page-p-2) & 0x00ffffffu); + p++; + } + lua_assert(p - page <= CALLBACK_MCODE_SIZE); +} +#elif LJ_TARGET_ARM64 +static void callback_mcode_init(global_State *g, uint32_t *page) +{ + uint32_t *p = page; + void *target = (void *)lj_vm_ffi_callback; + MSize slot; + *p++ = A64I_LDRLx | A64F_D(RID_X11) | A64F_S19(4); + *p++ = A64I_LDRLx | A64F_D(RID_X10) | A64F_S19(5); + *p++ = A64I_BR | A64F_N(RID_X11); + *p++ = A64I_NOP; + ((void **)p)[0] = target; + ((void **)p)[1] = g; + p += 4; + for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) { + *p++ = A64I_MOVZw | A64F_D(RID_X9) | A64F_U16(slot); + *p = A64I_B | A64F_S26((page-p) & 0x03ffffffu); + p++; + } + lua_assert(p - page <= CALLBACK_MCODE_SIZE); +} +#elif LJ_TARGET_PPC +static void callback_mcode_init(global_State *g, uint32_t *page) +{ + uint32_t *p = page; + void *target = (void *)lj_vm_ffi_callback; + MSize slot; + *p++ = PPCI_LIS | PPCF_T(RID_TMP) | (u32ptr(target) >> 16); + *p++ = PPCI_LIS | PPCF_T(RID_R12) | (u32ptr(g) >> 16); + *p++ = PPCI_ORI | PPCF_A(RID_TMP)|PPCF_T(RID_TMP) | (u32ptr(target) & 0xffff); + *p++ = PPCI_ORI | PPCF_A(RID_R12)|PPCF_T(RID_R12) | (u32ptr(g) & 0xffff); + *p++ = PPCI_MTCTR | PPCF_T(RID_TMP); + *p++ = PPCI_BCTR; + for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) { + *p++ = PPCI_LI | PPCF_T(RID_R11) | slot; + *p = PPCI_B | (((page-p) & 0x00ffffffu) << 2); + p++; + } + lua_assert(p - page <= CALLBACK_MCODE_SIZE); +} +#elif LJ_TARGET_MIPS +static void callback_mcode_init(global_State *g, uint32_t *page) +{ + uint32_t *p = page; + void *target = (void *)lj_vm_ffi_callback; + MSize slot; + *p++ = MIPSI_SW | MIPSF_T(RID_R1)|MIPSF_S(RID_SP) | 0; + *p++ = MIPSI_LUI | MIPSF_T(RID_R3) | (u32ptr(target) >> 16); + *p++ = MIPSI_LUI | MIPSF_T(RID_R2) | (u32ptr(g) >> 16); + *p++ = MIPSI_ORI | MIPSF_T(RID_R3)|MIPSF_S(RID_R3) |(u32ptr(target)&0xffff); + *p++ = MIPSI_JR | MIPSF_S(RID_R3); + *p++ = MIPSI_ORI | MIPSF_T(RID_R2)|MIPSF_S(RID_R2) | (u32ptr(g)&0xffff); + for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) { + *p = MIPSI_B | ((page-p-1) & 0x0000ffffu); + p++; + *p++ = MIPSI_LI | MIPSF_T(RID_R1) | slot; + } + lua_assert(p - page <= CALLBACK_MCODE_SIZE); +} +#else +/* Missing support for this architecture. */ +#define callback_mcode_init(g, p) UNUSED(p) +#endif + +/* -- Machine code management --------------------------------------------- */ + +#if LJ_TARGET_WINDOWS + +#define WIN32_LEAN_AND_MEAN +#include + +#elif LJ_TARGET_POSIX + +#include +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif + +#endif + +/* Allocate and initialize area for callback function pointers. */ +static void callback_mcode_new(CTState *cts) +{ + size_t sz = (size_t)CALLBACK_MCODE_SIZE; + void *p; + if (CALLBACK_MAX_SLOT == 0) + lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV); +#if LJ_TARGET_WINDOWS + p = VirtualAlloc(NULL, sz, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); + if (!p) + lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV); +#elif LJ_TARGET_POSIX + p = mmap(NULL, sz, (PROT_READ|PROT_WRITE), MAP_PRIVATE|MAP_ANONYMOUS, + -1, 0); + if (p == MAP_FAILED) + lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV); +#else + /* Fallback allocator. Fails if memory is not executable by default. */ + p = lj_mem_new(cts->L, sz); +#endif + cts->cb.mcode = p; + callback_mcode_init(cts->g, p); + lj_mcode_sync(p, (char *)p + sz); +#if LJ_TARGET_WINDOWS + { + DWORD oprot; + VirtualProtect(p, sz, PAGE_EXECUTE_READ, &oprot); + } +#elif LJ_TARGET_POSIX + mprotect(p, sz, (PROT_READ|PROT_EXEC)); +#endif +} + +/* Free area for callback function pointers. */ +void lj_ccallback_mcode_free(CTState *cts) +{ + size_t sz = (size_t)CALLBACK_MCODE_SIZE; + void *p = cts->cb.mcode; + if (p == NULL) return; +#if LJ_TARGET_WINDOWS + VirtualFree(p, 0, MEM_RELEASE); + UNUSED(sz); +#elif LJ_TARGET_POSIX + munmap(p, sz); +#else + lj_mem_free(cts->g, p, sz); +#endif +} + +/* -- C callback entry ---------------------------------------------------- */ + +/* Target-specific handling of register arguments. Similar to lj_ccall.c. */ +#if LJ_TARGET_X86 + +#define CALLBACK_HANDLE_REGARG \ + if (!isfp) { /* Only non-FP values may be passed in registers. */ \ + if (n > 1) { /* Anything > 32 bit is passed on the stack. */ \ + if (!LJ_ABI_WIN) ngpr = maxgpr; /* Prevent reordering. */ \ + } else if (ngpr + 1 <= maxgpr) { \ + sp = &cts->cb.gpr[ngpr]; \ + ngpr += n; \ + goto done; \ + } \ + } + +#elif LJ_TARGET_X64 && LJ_ABI_WIN + +/* Windows/x64 argument registers are strictly positional (use ngpr). */ +#define CALLBACK_HANDLE_REGARG \ + if (isfp) { \ + if (ngpr < maxgpr) { sp = &cts->cb.fpr[ngpr++]; UNUSED(nfpr); goto done; } \ + } else { \ + if (ngpr < maxgpr) { sp = &cts->cb.gpr[ngpr++]; goto done; } \ + } + +#elif LJ_TARGET_X64 + +#define CALLBACK_HANDLE_REGARG \ + if (isfp) { \ + if (nfpr + n <= CCALL_NARG_FPR) { \ + sp = &cts->cb.fpr[nfpr]; \ + nfpr += n; \ + goto done; \ + } \ + } else { \ + if (ngpr + n <= maxgpr) { \ + sp = &cts->cb.gpr[ngpr]; \ + ngpr += n; \ + goto done; \ + } \ + } + +#elif LJ_TARGET_ARM + +#if LJ_ABI_SOFTFP + +#define CALLBACK_HANDLE_REGARG_FP1 UNUSED(isfp); +#define CALLBACK_HANDLE_REGARG_FP2 + +#else + +#define CALLBACK_HANDLE_REGARG_FP1 \ + if (isfp) { \ + if (n == 1) { \ + if (fprodd) { \ + sp = &cts->cb.fpr[fprodd-1]; \ + fprodd = 0; \ + goto done; \ + } else if (nfpr + 1 <= CCALL_NARG_FPR) { \ + sp = &cts->cb.fpr[nfpr++]; \ + fprodd = nfpr; \ + goto done; \ + } \ + } else { \ + if (nfpr + 1 <= CCALL_NARG_FPR) { \ + sp = &cts->cb.fpr[nfpr++]; \ + goto done; \ + } \ + } \ + fprodd = 0; /* No reordering after the first FP value is on stack. */ \ + } else { + +#define CALLBACK_HANDLE_REGARG_FP2 } + +#endif + +#define CALLBACK_HANDLE_REGARG \ + CALLBACK_HANDLE_REGARG_FP1 \ + if (n > 1) ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \ + if (ngpr + n <= maxgpr) { \ + sp = &cts->cb.gpr[ngpr]; \ + ngpr += n; \ + goto done; \ + } CALLBACK_HANDLE_REGARG_FP2 + +#elif LJ_TARGET_ARM64 + +#define CALLBACK_HANDLE_REGARG \ + if (isfp) { \ + if (nfpr + n <= CCALL_NARG_FPR) { \ + sp = &cts->cb.fpr[nfpr]; \ + nfpr += n; \ + goto done; \ + } else { \ + nfpr = CCALL_NARG_FPR; /* Prevent reordering. */ \ + } \ + } else { \ + if (!LJ_TARGET_IOS && n > 1) \ + ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \ + if (ngpr + n <= maxgpr) { \ + sp = &cts->cb.gpr[ngpr]; \ + ngpr += n; \ + goto done; \ + } else { \ + ngpr = CCALL_NARG_GPR; /* Prevent reordering. */ \ + } \ + } + +#elif LJ_TARGET_PPC + +#define CALLBACK_HANDLE_REGARG \ + if (isfp) { \ + if (nfpr + 1 <= CCALL_NARG_FPR) { \ + sp = &cts->cb.fpr[nfpr++]; \ + cta = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */ \ + goto done; \ + } \ + } else { /* Try to pass argument in GPRs. */ \ + if (n > 1) { \ + lua_assert(ctype_isinteger(cta->info) && n == 2); /* int64_t. */ \ + ngpr = (ngpr + 1u) & ~1u; /* Align int64_t to regpair. */ \ + } \ + if (ngpr + n <= maxgpr) { \ + sp = &cts->cb.gpr[ngpr]; \ + ngpr += n; \ + goto done; \ + } \ + } + +#define CALLBACK_HANDLE_RET \ + if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \ + *(double *)dp = *(float *)dp; /* FPRs always hold doubles. */ + +#elif LJ_TARGET_MIPS + +#define CALLBACK_HANDLE_REGARG \ + if (isfp && nfpr < CCALL_NARG_FPR) { /* Try to pass argument in FPRs. */ \ + sp = (void *)((uint8_t *)&cts->cb.fpr[nfpr] + ((LJ_BE && n==1) ? 4 : 0)); \ + nfpr++; ngpr += n; \ + goto done; \ + } else { /* Try to pass argument in GPRs. */ \ + nfpr = CCALL_NARG_FPR; \ + if (n > 1) ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \ + if (ngpr + n <= maxgpr) { \ + sp = &cts->cb.gpr[ngpr]; \ + ngpr += n; \ + goto done; \ + } \ + } + +#define CALLBACK_HANDLE_RET \ + if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \ + ((float *)dp)[1] = *(float *)dp; + +#else +#error "Missing calling convention definitions for this architecture" +#endif + +/* Convert and push callback arguments to Lua stack. */ +static void callback_conv_args(CTState *cts, lua_State *L) +{ + TValue *o = L->top; + intptr_t *stack = cts->cb.stack; + MSize slot = cts->cb.slot; + CTypeID id = 0, rid, fid; + int gcsteps = 0; + CType *ct; + GCfunc *fn; + int fntp; + MSize ngpr = 0, nsp = 0, maxgpr = CCALL_NARG_GPR; +#if CCALL_NARG_FPR + MSize nfpr = 0; +#if LJ_TARGET_ARM + MSize fprodd = 0; +#endif +#endif + + if (slot < cts->cb.sizeid && (id = cts->cb.cbid[slot]) != 0) { + ct = ctype_get(cts, id); + rid = ctype_cid(ct->info); /* Return type. x86: +(spadj<<16). */ + fn = funcV(lj_tab_getint(cts->miscmap, (int32_t)slot)); + fntp = LJ_TFUNC; + } else { /* Must set up frame first, before throwing the error. */ + ct = NULL; + rid = 0; + fn = (GCfunc *)L; + fntp = LJ_TTHREAD; + } + /* Continuation returns from callback. */ + if (LJ_FR2) { + (o++)->u64 = LJ_CONT_FFI_CALLBACK; + (o++)->u64 = rid; + o++; + } else { + o->u32.lo = LJ_CONT_FFI_CALLBACK; + o->u32.hi = rid; + o++; + } + setframe_gc(o, obj2gco(fn), fntp); + setframe_ftsz(o, ((char *)(o+1) - (char *)L->base) + FRAME_CONT); + L->top = L->base = ++o; + if (!ct) + lj_err_caller(cts->L, LJ_ERR_FFI_BADCBACK); + if (isluafunc(fn)) + setcframe_pc(L->cframe, proto_bc(funcproto(fn))+1); + lj_state_checkstack(L, LUA_MINSTACK); /* May throw. */ + o = L->base; /* Might have been reallocated. */ + +#if LJ_TARGET_X86 + /* x86 has several different calling conventions. */ + switch (ctype_cconv(ct->info)) { + case CTCC_FASTCALL: maxgpr = 2; break; + case CTCC_THISCALL: maxgpr = 1; break; + default: maxgpr = 0; break; + } +#endif + + fid = ct->sib; + while (fid) { + CType *ctf = ctype_get(cts, fid); + if (!ctype_isattrib(ctf->info)) { + CType *cta; + void *sp; + CTSize sz; + int isfp; + MSize n; + lua_assert(ctype_isfield(ctf->info)); + cta = ctype_rawchild(cts, ctf); + isfp = ctype_isfp(cta->info); + sz = (cta->size + CTSIZE_PTR-1) & ~(CTSIZE_PTR-1); + n = sz / CTSIZE_PTR; /* Number of GPRs or stack slots needed. */ + + CALLBACK_HANDLE_REGARG /* Handle register arguments. */ + + /* Otherwise pass argument on stack. */ + if (CCALL_ALIGN_STACKARG && LJ_32 && sz == 8) + nsp = (nsp + 1) & ~1u; /* Align 64 bit argument on stack. */ + sp = &stack[nsp]; + nsp += n; + + done: + if (LJ_BE && cta->size < CTSIZE_PTR) + sp = (void *)((uint8_t *)sp + CTSIZE_PTR-cta->size); + gcsteps += lj_cconv_tv_ct(cts, cta, 0, o++, sp); + } + fid = ctf->sib; + } + L->top = o; +#if LJ_TARGET_X86 + /* Store stack adjustment for returns from non-cdecl callbacks. */ + if (ctype_cconv(ct->info) != CTCC_CDECL) { +#if LJ_FR2 + (L->base-3)->u64 |= (nsp << (16+2)); +#else + (L->base-2)->u32.hi |= (nsp << (16+2)); +#endif + } +#endif + while (gcsteps-- > 0) + lj_gc_check(L); +} + +/* Convert Lua object to callback result. */ +static void callback_conv_result(CTState *cts, lua_State *L, TValue *o) +{ +#if LJ_FR2 + CType *ctr = ctype_raw(cts, (uint16_t)(L->base-3)->u64); +#else + CType *ctr = ctype_raw(cts, (uint16_t)(L->base-2)->u32.hi); +#endif +#if LJ_TARGET_X86 + cts->cb.gpr[2] = 0; +#endif + if (!ctype_isvoid(ctr->info)) { + uint8_t *dp = (uint8_t *)&cts->cb.gpr[0]; +#if CCALL_NUM_FPR + if (ctype_isfp(ctr->info)) + dp = (uint8_t *)&cts->cb.fpr[0]; +#endif + lj_cconv_ct_tv(cts, ctr, dp, o, 0); +#ifdef CALLBACK_HANDLE_RET + CALLBACK_HANDLE_RET +#endif + /* Extend returned integers to (at least) 32 bits. */ + if (ctype_isinteger_or_bool(ctr->info) && ctr->size < 4) { + if (ctr->info & CTF_UNSIGNED) + *(uint32_t *)dp = ctr->size == 1 ? (uint32_t)*(uint8_t *)dp : + (uint32_t)*(uint16_t *)dp; + else + *(int32_t *)dp = ctr->size == 1 ? (int32_t)*(int8_t *)dp : + (int32_t)*(int16_t *)dp; + } +#if LJ_TARGET_X86 + if (ctype_isfp(ctr->info)) + cts->cb.gpr[2] = ctr->size == sizeof(float) ? 1 : 2; +#endif + } +} + +/* Enter callback. */ +lua_State * LJ_FASTCALL lj_ccallback_enter(CTState *cts, void *cf) +{ + lua_State *L = cts->L; + global_State *g = cts->g; + lua_assert(L != NULL); + if (tvref(g->jit_base)) { + setstrV(L, L->top++, lj_err_str(L, LJ_ERR_FFI_BADCBACK)); + if (g->panic) g->panic(L); + exit(EXIT_FAILURE); + } + lj_trace_abort(g); /* Never record across callback. */ + /* Setup C frame. */ + cframe_prev(cf) = L->cframe; + setcframe_L(cf, L); + cframe_errfunc(cf) = -1; + cframe_nres(cf) = 0; + L->cframe = cf; + callback_conv_args(cts, L); + return L; /* Now call the function on this stack. */ +} + +/* Leave callback. */ +void LJ_FASTCALL lj_ccallback_leave(CTState *cts, TValue *o) +{ + lua_State *L = cts->L; + GCfunc *fn; + TValue *obase = L->base; + L->base = L->top; /* Keep continuation frame for throwing errors. */ + if (o >= L->base) { + /* PC of RET* is lost. Point to last line for result conv. errors. */ + fn = curr_func(L); + if (isluafunc(fn)) { + GCproto *pt = funcproto(fn); + setcframe_pc(L->cframe, proto_bc(pt)+pt->sizebc+1); + } + } + callback_conv_result(cts, L, o); + /* Finally drop C frame and continuation frame. */ + L->top -= 2+2*LJ_FR2; + L->base = obase; + L->cframe = cframe_prev(L->cframe); + cts->cb.slot = 0; /* Blacklist C function that called the callback. */ +} + +/* -- C callback management ----------------------------------------------- */ + +/* Get an unused slot in the callback slot table. */ +static MSize callback_slot_new(CTState *cts, CType *ct) +{ + CTypeID id = ctype_typeid(cts, ct); + CTypeID1 *cbid = cts->cb.cbid; + MSize top; + for (top = cts->cb.topid; top < cts->cb.sizeid; top++) + if (LJ_LIKELY(cbid[top] == 0)) + goto found; +#if CALLBACK_MAX_SLOT + if (top >= CALLBACK_MAX_SLOT) +#endif + lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV); + if (!cts->cb.mcode) + callback_mcode_new(cts); + lj_mem_growvec(cts->L, cbid, cts->cb.sizeid, CALLBACK_MAX_SLOT, CTypeID1); + cts->cb.cbid = cbid; + memset(cbid+top, 0, (cts->cb.sizeid-top)*sizeof(CTypeID1)); +found: + cbid[top] = id; + cts->cb.topid = top+1; + return top; +} + +/* Check for function pointer and supported argument/result types. */ +static CType *callback_checkfunc(CTState *cts, CType *ct) +{ + int narg = 0; + if (!ctype_isptr(ct->info) || (LJ_64 && ct->size != CTSIZE_PTR)) + return NULL; + ct = ctype_rawchild(cts, ct); + if (ctype_isfunc(ct->info)) { + CType *ctr = ctype_rawchild(cts, ct); + CTypeID fid = ct->sib; + if (!(ctype_isvoid(ctr->info) || ctype_isenum(ctr->info) || + ctype_isptr(ctr->info) || (ctype_isnum(ctr->info) && ctr->size <= 8))) + return NULL; + if ((ct->info & CTF_VARARG)) + return NULL; + while (fid) { + CType *ctf = ctype_get(cts, fid); + if (!ctype_isattrib(ctf->info)) { + CType *cta; + lua_assert(ctype_isfield(ctf->info)); + cta = ctype_rawchild(cts, ctf); + if (!(ctype_isenum(cta->info) || ctype_isptr(cta->info) || + (ctype_isnum(cta->info) && cta->size <= 8)) || + ++narg >= LUA_MINSTACK-3) + return NULL; + } + fid = ctf->sib; + } + return ct; + } + return NULL; +} + +/* Create a new callback and return the callback function pointer. */ +void *lj_ccallback_new(CTState *cts, CType *ct, GCfunc *fn) +{ + ct = callback_checkfunc(cts, ct); + if (ct) { + MSize slot = callback_slot_new(cts, ct); + GCtab *t = cts->miscmap; + setfuncV(cts->L, lj_tab_setint(cts->L, t, (int32_t)slot), fn); + lj_gc_anybarriert(cts->L, t); + return callback_slot2ptr(cts, slot); + } + return NULL; /* Bad conversion. */ +} + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_ccallback.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_ccallback.h new file mode 100644 index 00000000000..83dbe048b5b --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_ccallback.h @@ -0,0 +1,25 @@ +/* +** FFI C callback handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_CCALLBACK_H +#define _LJ_CCALLBACK_H + +#include "lj_obj.h" +#include "lj_ctype.h" + +#if LJ_HASFFI + +/* Really belongs to lj_vm.h. */ +LJ_ASMF void lj_vm_ffi_callback(void); + +LJ_FUNC MSize lj_ccallback_ptr2slot(CTState *cts, void *p); +LJ_FUNCA lua_State * LJ_FASTCALL lj_ccallback_enter(CTState *cts, void *cf); +LJ_FUNCA void LJ_FASTCALL lj_ccallback_leave(CTState *cts, TValue *o); +LJ_FUNC void *lj_ccallback_new(CTState *cts, CType *ct, GCfunc *fn); +LJ_FUNC void lj_ccallback_mcode_free(CTState *cts); + +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_cconv.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_cconv.c new file mode 100644 index 00000000000..8a270766fb9 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_cconv.c @@ -0,0 +1,752 @@ +/* +** C type conversions. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include "lj_obj.h" + +#if LJ_HASFFI + +#include "lj_err.h" +#include "lj_tab.h" +#include "lj_ctype.h" +#include "lj_cdata.h" +#include "lj_cconv.h" +#include "lj_ccallback.h" + +/* -- Conversion errors --------------------------------------------------- */ + +/* Bad conversion. */ +LJ_NORET static void cconv_err_conv(CTState *cts, CType *d, CType *s, + CTInfo flags) +{ + const char *dst = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, d), NULL)); + const char *src; + if ((flags & CCF_FROMTV)) + src = lj_obj_typename[1+(ctype_isnum(s->info) ? LUA_TNUMBER : + ctype_isarray(s->info) ? LUA_TSTRING : LUA_TNIL)]; + else + src = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, s), NULL)); + if (CCF_GETARG(flags)) + lj_err_argv(cts->L, CCF_GETARG(flags), LJ_ERR_FFI_BADCONV, src, dst); + else + lj_err_callerv(cts->L, LJ_ERR_FFI_BADCONV, src, dst); +} + +/* Bad conversion from TValue. */ +LJ_NORET static void cconv_err_convtv(CTState *cts, CType *d, TValue *o, + CTInfo flags) +{ + const char *dst = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, d), NULL)); + const char *src = lj_typename(o); + if (CCF_GETARG(flags)) + lj_err_argv(cts->L, CCF_GETARG(flags), LJ_ERR_FFI_BADCONV, src, dst); + else + lj_err_callerv(cts->L, LJ_ERR_FFI_BADCONV, src, dst); +} + +/* Initializer overflow. */ +LJ_NORET static void cconv_err_initov(CTState *cts, CType *d) +{ + const char *dst = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, d), NULL)); + lj_err_callerv(cts->L, LJ_ERR_FFI_INITOV, dst); +} + +/* -- C type compatibility checks ----------------------------------------- */ + +/* Get raw type and qualifiers for a child type. Resolves enums, too. */ +static CType *cconv_childqual(CTState *cts, CType *ct, CTInfo *qual) +{ + ct = ctype_child(cts, ct); + for (;;) { + if (ctype_isattrib(ct->info)) { + if (ctype_attrib(ct->info) == CTA_QUAL) *qual |= ct->size; + } else if (!ctype_isenum(ct->info)) { + break; + } + ct = ctype_child(cts, ct); + } + *qual |= (ct->info & CTF_QUAL); + return ct; +} + +/* Check for compatible types when converting to a pointer. +** Note: these checks are more relaxed than what C99 mandates. +*/ +int lj_cconv_compatptr(CTState *cts, CType *d, CType *s, CTInfo flags) +{ + if (!((flags & CCF_CAST) || d == s)) { + CTInfo dqual = 0, squal = 0; + d = cconv_childqual(cts, d, &dqual); + if (!ctype_isstruct(s->info)) + s = cconv_childqual(cts, s, &squal); + if ((flags & CCF_SAME)) { + if (dqual != squal) + return 0; /* Different qualifiers. */ + } else if (!(flags & CCF_IGNQUAL)) { + if ((dqual & squal) != squal) + return 0; /* Discarded qualifiers. */ + if (ctype_isvoid(d->info) || ctype_isvoid(s->info)) + return 1; /* Converting to/from void * is always ok. */ + } + if (ctype_type(d->info) != ctype_type(s->info) || + d->size != s->size) + return 0; /* Different type or different size. */ + if (ctype_isnum(d->info)) { + if (((d->info ^ s->info) & (CTF_BOOL|CTF_FP))) + return 0; /* Different numeric types. */ + } else if (ctype_ispointer(d->info)) { + /* Check child types for compatibility. */ + return lj_cconv_compatptr(cts, d, s, flags|CCF_SAME); + } else if (ctype_isstruct(d->info)) { + if (d != s) + return 0; /* Must be exact same type for struct/union. */ + } else if (ctype_isfunc(d->info)) { + /* NYI: structural equality of functions. */ + } + } + return 1; /* Types are compatible. */ +} + +/* -- C type to C type conversion ----------------------------------------- */ + +/* Convert C type to C type. Caveat: expects to get the raw CType! +** +** Note: This is only used by the interpreter and not optimized at all. +** The JIT compiler will do a much better job specializing for each case. +*/ +void lj_cconv_ct_ct(CTState *cts, CType *d, CType *s, + uint8_t *dp, uint8_t *sp, CTInfo flags) +{ + CTSize dsize = d->size, ssize = s->size; + CTInfo dinfo = d->info, sinfo = s->info; + void *tmpptr; + + lua_assert(!ctype_isenum(dinfo) && !ctype_isenum(sinfo)); + lua_assert(!ctype_isattrib(dinfo) && !ctype_isattrib(sinfo)); + + if (ctype_type(dinfo) > CT_MAYCONVERT || ctype_type(sinfo) > CT_MAYCONVERT) + goto err_conv; + + /* Some basic sanity checks. */ + lua_assert(!ctype_isnum(dinfo) || dsize > 0); + lua_assert(!ctype_isnum(sinfo) || ssize > 0); + lua_assert(!ctype_isbool(dinfo) || dsize == 1 || dsize == 4); + lua_assert(!ctype_isbool(sinfo) || ssize == 1 || ssize == 4); + lua_assert(!ctype_isinteger(dinfo) || (1u< ssize) { /* Zero-extend or sign-extend LSB. */ +#if LJ_LE + uint8_t fill = (!(sinfo & CTF_UNSIGNED) && (sp[ssize-1]&0x80)) ? 0xff : 0; + memcpy(dp, sp, ssize); + memset(dp + ssize, fill, dsize-ssize); +#else + uint8_t fill = (!(sinfo & CTF_UNSIGNED) && (sp[0]&0x80)) ? 0xff : 0; + memset(dp, fill, dsize-ssize); + memcpy(dp + (dsize-ssize), sp, ssize); +#endif + } else { /* Copy LSB. */ +#if LJ_LE + memcpy(dp, sp, dsize); +#else + memcpy(dp, sp + (ssize-dsize), dsize); +#endif + } + break; + case CCX(I, F): { + double n; /* Always convert via double. */ + conv_I_F: + /* Convert source to double. */ + if (ssize == sizeof(double)) n = *(double *)sp; + else if (ssize == sizeof(float)) n = (double)*(float *)sp; + else goto err_conv; /* NYI: long double. */ + /* Then convert double to integer. */ + /* The conversion must exactly match the semantics of JIT-compiled code! */ + if (dsize < 4 || (dsize == 4 && !(dinfo & CTF_UNSIGNED))) { + int32_t i = (int32_t)n; + if (dsize == 4) *(int32_t *)dp = i; + else if (dsize == 2) *(int16_t *)dp = (int16_t)i; + else *(int8_t *)dp = (int8_t)i; + } else if (dsize == 4) { + *(uint32_t *)dp = (uint32_t)n; + } else if (dsize == 8) { + if (!(dinfo & CTF_UNSIGNED)) + *(int64_t *)dp = (int64_t)n; + else + *(uint64_t *)dp = lj_num2u64(n); + } else { + goto err_conv; /* NYI: conversion to >64 bit integers. */ + } + break; + } + case CCX(I, C): + s = ctype_child(cts, s); + sinfo = s->info; + ssize = s->size; + goto conv_I_F; /* Just convert re. */ + case CCX(I, P): + if (!(flags & CCF_CAST)) goto err_conv; + sinfo = CTINFO(CT_NUM, CTF_UNSIGNED); + goto conv_I_I; + case CCX(I, A): + if (!(flags & CCF_CAST)) goto err_conv; + sinfo = CTINFO(CT_NUM, CTF_UNSIGNED); + ssize = CTSIZE_PTR; + tmpptr = sp; + sp = (uint8_t *)&tmpptr; + goto conv_I_I; + + /* Destination is a floating-point number. */ + case CCX(F, B): + case CCX(F, I): { + double n; /* Always convert via double. */ + conv_F_I: + /* First convert source to double. */ + /* The conversion must exactly match the semantics of JIT-compiled code! */ + if (ssize < 4 || (ssize == 4 && !(sinfo & CTF_UNSIGNED))) { + int32_t i; + if (ssize == 4) { + i = *(int32_t *)sp; + } else if (!(sinfo & CTF_UNSIGNED)) { + if (ssize == 2) i = *(int16_t *)sp; + else i = *(int8_t *)sp; + } else { + if (ssize == 2) i = *(uint16_t *)sp; + else i = *(uint8_t *)sp; + } + n = (double)i; + } else if (ssize == 4) { + n = (double)*(uint32_t *)sp; + } else if (ssize == 8) { + if (!(sinfo & CTF_UNSIGNED)) n = (double)*(int64_t *)sp; + else n = (double)*(uint64_t *)sp; + } else { + goto err_conv; /* NYI: conversion from >64 bit integers. */ + } + /* Convert double to destination. */ + if (dsize == sizeof(double)) *(double *)dp = n; + else if (dsize == sizeof(float)) *(float *)dp = (float)n; + else goto err_conv; /* NYI: long double. */ + break; + } + case CCX(F, F): { + double n; /* Always convert via double. */ + conv_F_F: + if (ssize == dsize) goto copyval; + /* Convert source to double. */ + if (ssize == sizeof(double)) n = *(double *)sp; + else if (ssize == sizeof(float)) n = (double)*(float *)sp; + else goto err_conv; /* NYI: long double. */ + /* Convert double to destination. */ + if (dsize == sizeof(double)) *(double *)dp = n; + else if (dsize == sizeof(float)) *(float *)dp = (float)n; + else goto err_conv; /* NYI: long double. */ + break; + } + case CCX(F, C): + s = ctype_child(cts, s); + sinfo = s->info; + ssize = s->size; + goto conv_F_F; /* Ignore im, and convert from re. */ + + /* Destination is a complex number. */ + case CCX(C, I): + d = ctype_child(cts, d); + dinfo = d->info; + dsize = d->size; + memset(dp + dsize, 0, dsize); /* Clear im. */ + goto conv_F_I; /* Convert to re. */ + case CCX(C, F): + d = ctype_child(cts, d); + dinfo = d->info; + dsize = d->size; + memset(dp + dsize, 0, dsize); /* Clear im. */ + goto conv_F_F; /* Convert to re. */ + + case CCX(C, C): + if (dsize != ssize) { /* Different types: convert re/im separately. */ + CType *dc = ctype_child(cts, d); + CType *sc = ctype_child(cts, s); + lj_cconv_ct_ct(cts, dc, sc, dp, sp, flags); + lj_cconv_ct_ct(cts, dc, sc, dp + dc->size, sp + sc->size, flags); + return; + } + goto copyval; /* Otherwise this is easy. */ + + /* Destination is a vector. */ + case CCX(V, I): + case CCX(V, F): + case CCX(V, C): { + CType *dc = ctype_child(cts, d); + CTSize esize; + /* First convert the scalar to the first element. */ + lj_cconv_ct_ct(cts, dc, s, dp, sp, flags); + /* Then replicate it to the other elements (splat). */ + for (sp = dp, esize = dc->size; dsize > esize; dsize -= esize) { + dp += esize; + memcpy(dp, sp, esize); + } + break; + } + + case CCX(V, V): + /* Copy same-sized vectors, even for different lengths/element-types. */ + if (dsize != ssize) goto err_conv; + goto copyval; + + /* Destination is a pointer. */ + case CCX(P, I): + if (!(flags & CCF_CAST)) goto err_conv; + dinfo = CTINFO(CT_NUM, CTF_UNSIGNED); + goto conv_I_I; + + case CCX(P, F): + if (!(flags & CCF_CAST) || !(flags & CCF_FROMTV)) goto err_conv; + /* The signed conversion is cheaper. x64 really has 47 bit pointers. */ + dinfo = CTINFO(CT_NUM, (LJ_64 && dsize == 8) ? 0 : CTF_UNSIGNED); + goto conv_I_F; + + case CCX(P, P): + if (!lj_cconv_compatptr(cts, d, s, flags)) goto err_conv; + cdata_setptr(dp, dsize, cdata_getptr(sp, ssize)); + break; + + case CCX(P, A): + case CCX(P, S): + if (!lj_cconv_compatptr(cts, d, s, flags)) goto err_conv; + cdata_setptr(dp, dsize, sp); + break; + + /* Destination is an array. */ + case CCX(A, A): + if ((flags & CCF_CAST) || (d->info & CTF_VLA) || dsize != ssize || + d->size == CTSIZE_INVALID || !lj_cconv_compatptr(cts, d, s, flags)) + goto err_conv; + goto copyval; + + /* Destination is a struct/union. */ + case CCX(S, S): + if ((flags & CCF_CAST) || (d->info & CTF_VLA) || d != s) + goto err_conv; /* Must be exact same type. */ +copyval: /* Copy value. */ + lua_assert(dsize == ssize); + memcpy(dp, sp, dsize); + break; + + default: + err_conv: + cconv_err_conv(cts, d, s, flags); + } +} + +/* -- C type to TValue conversion ----------------------------------------- */ + +/* Convert C type to TValue. Caveat: expects to get the raw CType! */ +int lj_cconv_tv_ct(CTState *cts, CType *s, CTypeID sid, + TValue *o, uint8_t *sp) +{ + CTInfo sinfo = s->info; + if (ctype_isnum(sinfo)) { + if (!ctype_isbool(sinfo)) { + if (ctype_isinteger(sinfo) && s->size > 4) goto copyval; + if (LJ_DUALNUM && ctype_isinteger(sinfo)) { + int32_t i; + lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT32), s, + (uint8_t *)&i, sp, 0); + if ((sinfo & CTF_UNSIGNED) && i < 0) + setnumV(o, (lua_Number)(uint32_t)i); + else + setintV(o, i); + } else { + lj_cconv_ct_ct(cts, ctype_get(cts, CTID_DOUBLE), s, + (uint8_t *)&o->n, sp, 0); + /* Numbers are NOT canonicalized here! Beware of uninitialized data. */ + lua_assert(tvisnum(o)); + } + } else { + uint32_t b = s->size == 1 ? (*sp != 0) : (*(int *)sp != 0); + setboolV(o, b); + setboolV(&cts->g->tmptv2, b); /* Remember for trace recorder. */ + } + return 0; + } else if (ctype_isrefarray(sinfo) || ctype_isstruct(sinfo)) { + /* Create reference. */ + setcdataV(cts->L, o, lj_cdata_newref(cts, sp, sid)); + return 1; /* Need GC step. */ + } else { + GCcdata *cd; + CTSize sz; + copyval: /* Copy value. */ + sz = s->size; + lua_assert(sz != CTSIZE_INVALID); + /* Attributes are stripped, qualifiers are kept (but mostly ignored). */ + cd = lj_cdata_new(cts, ctype_typeid(cts, s), sz); + setcdataV(cts->L, o, cd); + memcpy(cdataptr(cd), sp, sz); + return 1; /* Need GC step. */ + } +} + +/* Convert bitfield to TValue. */ +int lj_cconv_tv_bf(CTState *cts, CType *s, TValue *o, uint8_t *sp) +{ + CTInfo info = s->info; + CTSize pos, bsz; + uint32_t val; + lua_assert(ctype_isbitfield(info)); + /* NYI: packed bitfields may cause misaligned reads. */ + switch (ctype_bitcsz(info)) { + case 4: val = *(uint32_t *)sp; break; + case 2: val = *(uint16_t *)sp; break; + case 1: val = *(uint8_t *)sp; break; + default: lua_assert(0); val = 0; break; + } + /* Check if a packed bitfield crosses a container boundary. */ + pos = ctype_bitpos(info); + bsz = ctype_bitbsz(info); + lua_assert(pos < 8*ctype_bitcsz(info)); + lua_assert(bsz > 0 && bsz <= 8*ctype_bitcsz(info)); + if (pos + bsz > 8*ctype_bitcsz(info)) + lj_err_caller(cts->L, LJ_ERR_FFI_NYIPACKBIT); + if (!(info & CTF_BOOL)) { + CTSize shift = 32 - bsz; + if (!(info & CTF_UNSIGNED)) { + setintV(o, (int32_t)(val << (shift-pos)) >> shift); + } else { + val = (val << (shift-pos)) >> shift; + if (!LJ_DUALNUM || (int32_t)val < 0) + setnumV(o, (lua_Number)(uint32_t)val); + else + setintV(o, (int32_t)val); + } + } else { + lua_assert(bsz == 1); + setboolV(o, (val >> pos) & 1); + } + return 0; /* No GC step needed. */ +} + +/* -- TValue to C type conversion ----------------------------------------- */ + +/* Convert table to array. */ +static void cconv_array_tab(CTState *cts, CType *d, + uint8_t *dp, GCtab *t, CTInfo flags) +{ + int32_t i; + CType *dc = ctype_rawchild(cts, d); /* Array element type. */ + CTSize size = d->size, esize = dc->size, ofs = 0; + for (i = 0; ; i++) { + TValue *tv = (TValue *)lj_tab_getint(t, i); + if (!tv || tvisnil(tv)) { + if (i == 0) continue; /* Try again for 1-based tables. */ + break; /* Stop at first nil. */ + } + if (ofs >= size) + cconv_err_initov(cts, d); + lj_cconv_ct_tv(cts, dc, dp + ofs, tv, flags); + ofs += esize; + } + if (size != CTSIZE_INVALID) { /* Only fill up arrays with known size. */ + if (ofs == esize) { /* Replicate a single element. */ + for (; ofs < size; ofs += esize) memcpy(dp + ofs, dp, esize); + } else { /* Otherwise fill the remainder with zero. */ + memset(dp + ofs, 0, size - ofs); + } + } +} + +/* Convert table to sub-struct/union. */ +static void cconv_substruct_tab(CTState *cts, CType *d, uint8_t *dp, + GCtab *t, int32_t *ip, CTInfo flags) +{ + CTypeID id = d->sib; + while (id) { + CType *df = ctype_get(cts, id); + id = df->sib; + if (ctype_isfield(df->info) || ctype_isbitfield(df->info)) { + TValue *tv; + int32_t i = *ip, iz = i; + if (!gcref(df->name)) continue; /* Ignore unnamed fields. */ + if (i >= 0) { + retry: + tv = (TValue *)lj_tab_getint(t, i); + if (!tv || tvisnil(tv)) { + if (i == 0) { i = 1; goto retry; } /* 1-based tables. */ + if (iz == 0) { *ip = i = -1; goto tryname; } /* Init named fields. */ + break; /* Stop at first nil. */ + } + *ip = i + 1; + } else { + tryname: + tv = (TValue *)lj_tab_getstr(t, gco2str(gcref(df->name))); + if (!tv || tvisnil(tv)) continue; + } + if (ctype_isfield(df->info)) + lj_cconv_ct_tv(cts, ctype_rawchild(cts, df), dp+df->size, tv, flags); + else + lj_cconv_bf_tv(cts, df, dp+df->size, tv); + if ((d->info & CTF_UNION)) break; + } else if (ctype_isxattrib(df->info, CTA_SUBTYPE)) { + cconv_substruct_tab(cts, ctype_rawchild(cts, df), + dp+df->size, t, ip, flags); + } /* Ignore all other entries in the chain. */ + } +} + +/* Convert table to struct/union. */ +static void cconv_struct_tab(CTState *cts, CType *d, + uint8_t *dp, GCtab *t, CTInfo flags) +{ + int32_t i = 0; + memset(dp, 0, d->size); /* Much simpler to clear the struct first. */ + cconv_substruct_tab(cts, d, dp, t, &i, flags); +} + +/* Convert TValue to C type. Caveat: expects to get the raw CType! */ +void lj_cconv_ct_tv(CTState *cts, CType *d, + uint8_t *dp, TValue *o, CTInfo flags) +{ + CTypeID sid = CTID_P_VOID; + CType *s; + void *tmpptr; + uint8_t tmpbool, *sp = (uint8_t *)&tmpptr; + if (LJ_LIKELY(tvisint(o))) { + sp = (uint8_t *)&o->i; + sid = CTID_INT32; + flags |= CCF_FROMTV; + } else if (LJ_LIKELY(tvisnum(o))) { + sp = (uint8_t *)&o->n; + sid = CTID_DOUBLE; + flags |= CCF_FROMTV; + } else if (tviscdata(o)) { + sp = cdataptr(cdataV(o)); + sid = cdataV(o)->ctypeid; + s = ctype_get(cts, sid); + if (ctype_isref(s->info)) { /* Resolve reference for value. */ + lua_assert(s->size == CTSIZE_PTR); + sp = *(void **)sp; + sid = ctype_cid(s->info); + } + s = ctype_raw(cts, sid); + if (ctype_isfunc(s->info)) { + sid = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|sid), CTSIZE_PTR); + } else { + if (ctype_isenum(s->info)) s = ctype_child(cts, s); + goto doconv; + } + } else if (tvisstr(o)) { + GCstr *str = strV(o); + if (ctype_isenum(d->info)) { /* Match string against enum constant. */ + CTSize ofs; + CType *cct = lj_ctype_getfield(cts, d, str, &ofs); + if (!cct || !ctype_isconstval(cct->info)) + goto err_conv; + lua_assert(d->size == 4); + sp = (uint8_t *)&cct->size; + sid = ctype_cid(cct->info); + } else if (ctype_isrefarray(d->info)) { /* Copy string to array. */ + CType *dc = ctype_rawchild(cts, d); + CTSize sz = str->len+1; + if (!ctype_isinteger(dc->info) || dc->size != 1) + goto err_conv; + if (d->size != 0 && d->size < sz) + sz = d->size; + memcpy(dp, strdata(str), sz); + return; + } else { /* Otherwise pass it as a const char[]. */ + sp = (uint8_t *)strdata(str); + sid = CTID_A_CCHAR; + flags |= CCF_FROMTV; + } + } else if (tvistab(o)) { + if (ctype_isarray(d->info)) { + cconv_array_tab(cts, d, dp, tabV(o), flags); + return; + } else if (ctype_isstruct(d->info)) { + cconv_struct_tab(cts, d, dp, tabV(o), flags); + return; + } else { + goto err_conv; + } + } else if (tvisbool(o)) { + tmpbool = boolV(o); + sp = &tmpbool; + sid = CTID_BOOL; + } else if (tvisnil(o)) { + tmpptr = (void *)0; + flags |= CCF_FROMTV; + } else if (tvisudata(o)) { + GCudata *ud = udataV(o); + tmpptr = uddata(ud); + if (ud->udtype == UDTYPE_IO_FILE) + tmpptr = *(void **)tmpptr; + } else if (tvislightud(o)) { + tmpptr = lightudV(o); + } else if (tvisfunc(o)) { + void *p = lj_ccallback_new(cts, d, funcV(o)); + if (p) { + *(void **)dp = p; + return; + } + goto err_conv; + } else { + err_conv: + cconv_err_convtv(cts, d, o, flags); + } + s = ctype_get(cts, sid); +doconv: + if (ctype_isenum(d->info)) d = ctype_child(cts, d); + lj_cconv_ct_ct(cts, d, s, dp, sp, flags); +} + +/* Convert TValue to bitfield. */ +void lj_cconv_bf_tv(CTState *cts, CType *d, uint8_t *dp, TValue *o) +{ + CTInfo info = d->info; + CTSize pos, bsz; + uint32_t val, mask; + lua_assert(ctype_isbitfield(info)); + if ((info & CTF_BOOL)) { + uint8_t tmpbool; + lua_assert(ctype_bitbsz(info) == 1); + lj_cconv_ct_tv(cts, ctype_get(cts, CTID_BOOL), &tmpbool, o, 0); + val = tmpbool; + } else { + CTypeID did = (info & CTF_UNSIGNED) ? CTID_UINT32 : CTID_INT32; + lj_cconv_ct_tv(cts, ctype_get(cts, did), (uint8_t *)&val, o, 0); + } + pos = ctype_bitpos(info); + bsz = ctype_bitbsz(info); + lua_assert(pos < 8*ctype_bitcsz(info)); + lua_assert(bsz > 0 && bsz <= 8*ctype_bitcsz(info)); + /* Check if a packed bitfield crosses a container boundary. */ + if (pos + bsz > 8*ctype_bitcsz(info)) + lj_err_caller(cts->L, LJ_ERR_FFI_NYIPACKBIT); + mask = ((1u << bsz) - 1u) << pos; + val = (val << pos) & mask; + /* NYI: packed bitfields may cause misaligned reads/writes. */ + switch (ctype_bitcsz(info)) { + case 4: *(uint32_t *)dp = (*(uint32_t *)dp & ~mask) | (uint32_t)val; break; + case 2: *(uint16_t *)dp = (*(uint16_t *)dp & ~mask) | (uint16_t)val; break; + case 1: *(uint8_t *)dp = (*(uint8_t *)dp & ~mask) | (uint8_t)val; break; + default: lua_assert(0); break; + } +} + +/* -- Initialize C type with TValues -------------------------------------- */ + +/* Initialize an array with TValues. */ +static void cconv_array_init(CTState *cts, CType *d, CTSize sz, uint8_t *dp, + TValue *o, MSize len) +{ + CType *dc = ctype_rawchild(cts, d); /* Array element type. */ + CTSize ofs, esize = dc->size; + MSize i; + if (len*esize > sz) + cconv_err_initov(cts, d); + for (i = 0, ofs = 0; i < len; i++, ofs += esize) + lj_cconv_ct_tv(cts, dc, dp + ofs, o + i, 0); + if (ofs == esize) { /* Replicate a single element. */ + for (; ofs < sz; ofs += esize) memcpy(dp + ofs, dp, esize); + } else { /* Otherwise fill the remainder with zero. */ + memset(dp + ofs, 0, sz - ofs); + } +} + +/* Initialize a sub-struct/union with TValues. */ +static void cconv_substruct_init(CTState *cts, CType *d, uint8_t *dp, + TValue *o, MSize len, MSize *ip) +{ + CTypeID id = d->sib; + while (id) { + CType *df = ctype_get(cts, id); + id = df->sib; + if (ctype_isfield(df->info) || ctype_isbitfield(df->info)) { + MSize i = *ip; + if (!gcref(df->name)) continue; /* Ignore unnamed fields. */ + if (i >= len) break; + *ip = i + 1; + if (ctype_isfield(df->info)) + lj_cconv_ct_tv(cts, ctype_rawchild(cts, df), dp+df->size, o + i, 0); + else + lj_cconv_bf_tv(cts, df, dp+df->size, o + i); + if ((d->info & CTF_UNION)) break; + } else if (ctype_isxattrib(df->info, CTA_SUBTYPE)) { + cconv_substruct_init(cts, ctype_rawchild(cts, df), + dp+df->size, o, len, ip); + if ((d->info & CTF_UNION)) break; + } /* Ignore all other entries in the chain. */ + } +} + +/* Initialize a struct/union with TValues. */ +static void cconv_struct_init(CTState *cts, CType *d, CTSize sz, uint8_t *dp, + TValue *o, MSize len) +{ + MSize i = 0; + memset(dp, 0, sz); /* Much simpler to clear the struct first. */ + cconv_substruct_init(cts, d, dp, o, len, &i); + if (i < len) + cconv_err_initov(cts, d); +} + +/* Check whether to use a multi-value initializer. +** This is true if an aggregate is to be initialized with a value. +** Valarrays are treated as values here so ct_tv handles (V|C, I|F). +*/ +int lj_cconv_multi_init(CTState *cts, CType *d, TValue *o) +{ + if (!(ctype_isrefarray(d->info) || ctype_isstruct(d->info))) + return 0; /* Destination is not an aggregate. */ + if (tvistab(o) || (tvisstr(o) && !ctype_isstruct(d->info))) + return 0; /* Initializer is not a value. */ + if (tviscdata(o) && lj_ctype_rawref(cts, cdataV(o)->ctypeid) == d) + return 0; /* Source and destination are identical aggregates. */ + return 1; /* Otherwise the initializer is a value. */ +} + +/* Initialize C type with TValues. Caveat: expects to get the raw CType! */ +void lj_cconv_ct_init(CTState *cts, CType *d, CTSize sz, + uint8_t *dp, TValue *o, MSize len) +{ + if (len == 0) + memset(dp, 0, sz); + else if (len == 1 && !lj_cconv_multi_init(cts, d, o)) + lj_cconv_ct_tv(cts, d, dp, o, 0); + else if (ctype_isarray(d->info)) /* Also handles valarray init with len>1. */ + cconv_array_init(cts, d, sz, dp, o, len); + else if (ctype_isstruct(d->info)) + cconv_struct_init(cts, d, sz, dp, o, len); + else + cconv_err_initov(cts, d); +} + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_cconv.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_cconv.h new file mode 100644 index 00000000000..2bd50ff46e9 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_cconv.h @@ -0,0 +1,70 @@ +/* +** C type conversions. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_CCONV_H +#define _LJ_CCONV_H + +#include "lj_obj.h" +#include "lj_ctype.h" + +#if LJ_HASFFI + +/* Compressed C type index. ORDER CCX. */ +enum { + CCX_B, /* Bool. */ + CCX_I, /* Integer. */ + CCX_F, /* Floating-point number. */ + CCX_C, /* Complex. */ + CCX_V, /* Vector. */ + CCX_P, /* Pointer. */ + CCX_A, /* Refarray. */ + CCX_S /* Struct/union. */ +}; + +/* Convert C type info to compressed C type index. ORDER CT. ORDER CCX. */ +static LJ_AINLINE uint32_t cconv_idx(CTInfo info) +{ + uint32_t idx = ((info >> 26) & 15u); /* Dispatch bits. */ + lua_assert(ctype_type(info) <= CT_MAYCONVERT); +#if LJ_64 + idx = ((uint32_t)(U64x(f436fff5,fff7f021) >> 4*idx) & 15u); +#else + idx = (((idx < 8 ? 0xfff7f021u : 0xf436fff5) >> 4*(idx & 7u)) & 15u); +#endif + lua_assert(idx < 8); + return idx; +} + +#define cconv_idx2(dinfo, sinfo) \ + ((cconv_idx((dinfo)) << 3) + cconv_idx((sinfo))) + +#define CCX(dst, src) ((CCX_##dst << 3) + CCX_##src) + +/* Conversion flags. */ +#define CCF_CAST 0x00000001u +#define CCF_FROMTV 0x00000002u +#define CCF_SAME 0x00000004u +#define CCF_IGNQUAL 0x00000008u + +#define CCF_ARG_SHIFT 8 +#define CCF_ARG(n) ((n) << CCF_ARG_SHIFT) +#define CCF_GETARG(f) ((f) >> CCF_ARG_SHIFT) + +LJ_FUNC int lj_cconv_compatptr(CTState *cts, CType *d, CType *s, CTInfo flags); +LJ_FUNC void lj_cconv_ct_ct(CTState *cts, CType *d, CType *s, + uint8_t *dp, uint8_t *sp, CTInfo flags); +LJ_FUNC int lj_cconv_tv_ct(CTState *cts, CType *s, CTypeID sid, + TValue *o, uint8_t *sp); +LJ_FUNC int lj_cconv_tv_bf(CTState *cts, CType *s, TValue *o, uint8_t *sp); +LJ_FUNC void lj_cconv_ct_tv(CTState *cts, CType *d, + uint8_t *dp, TValue *o, CTInfo flags); +LJ_FUNC void lj_cconv_bf_tv(CTState *cts, CType *d, uint8_t *dp, TValue *o); +LJ_FUNC int lj_cconv_multi_init(CTState *cts, CType *d, TValue *o); +LJ_FUNC void lj_cconv_ct_init(CTState *cts, CType *d, CTSize sz, + uint8_t *dp, TValue *o, MSize len); + +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_cdata.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_cdata.c new file mode 100644 index 00000000000..5cd2c1140ef --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_cdata.c @@ -0,0 +1,288 @@ +/* +** C data management. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include "lj_obj.h" + +#if LJ_HASFFI + +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_tab.h" +#include "lj_ctype.h" +#include "lj_cconv.h" +#include "lj_cdata.h" + +/* -- C data allocation --------------------------------------------------- */ + +/* Allocate a new C data object holding a reference to another object. */ +GCcdata *lj_cdata_newref(CTState *cts, const void *p, CTypeID id) +{ + CTypeID refid = lj_ctype_intern(cts, CTINFO_REF(id), CTSIZE_PTR); + GCcdata *cd = lj_cdata_new(cts, refid, CTSIZE_PTR); + *(const void **)cdataptr(cd) = p; + return cd; +} + +/* Allocate variable-sized or specially aligned C data object. */ +GCcdata *lj_cdata_newv(lua_State *L, CTypeID id, CTSize sz, CTSize align) +{ + global_State *g; + MSize extra = sizeof(GCcdataVar) + sizeof(GCcdata) + + (align > CT_MEMALIGN ? (1u<offset = (uint16_t)((char *)cd - p); + cdatav(cd)->extra = extra; + cdatav(cd)->len = sz; + g = G(L); + setgcrefr(cd->nextgc, g->gc.root); + setgcref(g->gc.root, obj2gco(cd)); + newwhite(g, obj2gco(cd)); + cd->marked |= 0x80; + cd->gct = ~LJ_TCDATA; + cd->ctypeid = id; + return cd; +} + +/* Free a C data object. */ +void LJ_FASTCALL lj_cdata_free(global_State *g, GCcdata *cd) +{ + if (LJ_UNLIKELY(cd->marked & LJ_GC_CDATA_FIN)) { + GCobj *root; + makewhite(g, obj2gco(cd)); + markfinalized(obj2gco(cd)); + if ((root = gcref(g->gc.mmudata)) != NULL) { + setgcrefr(cd->nextgc, root->gch.nextgc); + setgcref(root->gch.nextgc, obj2gco(cd)); + setgcref(g->gc.mmudata, obj2gco(cd)); + } else { + setgcref(cd->nextgc, obj2gco(cd)); + setgcref(g->gc.mmudata, obj2gco(cd)); + } + } else if (LJ_LIKELY(!cdataisv(cd))) { + CType *ct = ctype_raw(ctype_ctsG(g), cd->ctypeid); + CTSize sz = ctype_hassize(ct->info) ? ct->size : CTSIZE_PTR; + lua_assert(ctype_hassize(ct->info) || ctype_isfunc(ct->info) || + ctype_isextern(ct->info)); + lj_mem_free(g, cd, sizeof(GCcdata) + sz); + } else { + lj_mem_free(g, memcdatav(cd), sizecdatav(cd)); + } +} + +void lj_cdata_setfin(lua_State *L, GCcdata *cd, GCobj *obj, uint32_t it) +{ + GCtab *t = ctype_ctsG(G(L))->finalizer; + if (gcref(t->metatable)) { + /* Add cdata to finalizer table, if still enabled. */ + TValue *tv, tmp; + setcdataV(L, &tmp, cd); + lj_gc_anybarriert(L, t); + tv = lj_tab_set(L, t, &tmp); + setgcV(L, tv, obj, it); + if (!tvisnil(tv)) + cd->marked |= LJ_GC_CDATA_FIN; + else + cd->marked &= ~LJ_GC_CDATA_FIN; + } +} + +/* -- C data indexing ----------------------------------------------------- */ + +/* Index C data by a TValue. Return CType and pointer. */ +CType *lj_cdata_index(CTState *cts, GCcdata *cd, cTValue *key, uint8_t **pp, + CTInfo *qual) +{ + uint8_t *p = (uint8_t *)cdataptr(cd); + CType *ct = ctype_get(cts, cd->ctypeid); + ptrdiff_t idx; + + /* Resolve reference for cdata object. */ + if (ctype_isref(ct->info)) { + lua_assert(ct->size == CTSIZE_PTR); + p = *(uint8_t **)p; + ct = ctype_child(cts, ct); + } + +collect_attrib: + /* Skip attributes and collect qualifiers. */ + while (ctype_isattrib(ct->info)) { + if (ctype_attrib(ct->info) == CTA_QUAL) *qual |= ct->size; + ct = ctype_child(cts, ct); + } + lua_assert(!ctype_isref(ct->info)); /* Interning rejects refs to refs. */ + + if (tvisint(key)) { + idx = (ptrdiff_t)intV(key); + goto integer_key; + } else if (tvisnum(key)) { /* Numeric key. */ +#ifdef _MSC_VER + /* Workaround for MSVC bug. */ + volatile +#endif + lua_Number n = numV(key); + idx = LJ_64 ? (ptrdiff_t)n : (ptrdiff_t)lj_num2int(n); + integer_key: + if (ctype_ispointer(ct->info)) { + CTSize sz = lj_ctype_size(cts, ctype_cid(ct->info)); /* Element size. */ + if (sz == CTSIZE_INVALID) + lj_err_caller(cts->L, LJ_ERR_FFI_INVSIZE); + if (ctype_isptr(ct->info)) { + p = (uint8_t *)cdata_getptr(p, ct->size); + } else if ((ct->info & (CTF_VECTOR|CTF_COMPLEX))) { + if ((ct->info & CTF_COMPLEX)) idx &= 1; + *qual |= CTF_CONST; /* Valarray elements are constant. */ + } + *pp = p + idx*(int32_t)sz; + return ct; + } + } else if (tviscdata(key)) { /* Integer cdata key. */ + GCcdata *cdk = cdataV(key); + CType *ctk = ctype_raw(cts, cdk->ctypeid); + if (ctype_isenum(ctk->info)) ctk = ctype_child(cts, ctk); + if (ctype_isinteger(ctk->info)) { + lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT_PSZ), ctk, + (uint8_t *)&idx, cdataptr(cdk), 0); + goto integer_key; + } + } else if (tvisstr(key)) { /* String key. */ + GCstr *name = strV(key); + if (ctype_isstruct(ct->info)) { + CTSize ofs; + CType *fct = lj_ctype_getfieldq(cts, ct, name, &ofs, qual); + if (fct) { + *pp = p + ofs; + return fct; + } + } else if (ctype_iscomplex(ct->info)) { + if (name->len == 2) { + *qual |= CTF_CONST; /* Complex fields are constant. */ + if (strdata(name)[0] == 'r' && strdata(name)[1] == 'e') { + *pp = p; + return ct; + } else if (strdata(name)[0] == 'i' && strdata(name)[1] == 'm') { + *pp = p + (ct->size >> 1); + return ct; + } + } + } else if (cd->ctypeid == CTID_CTYPEID) { + /* Allow indexing a (pointer to) struct constructor to get constants. */ + CType *sct = ctype_raw(cts, *(CTypeID *)p); + if (ctype_isptr(sct->info)) + sct = ctype_rawchild(cts, sct); + if (ctype_isstruct(sct->info)) { + CTSize ofs; + CType *fct = lj_ctype_getfield(cts, sct, name, &ofs); + if (fct && ctype_isconstval(fct->info)) + return fct; + } + ct = sct; /* Allow resolving metamethods for constructors, too. */ + } + } + if (ctype_isptr(ct->info)) { /* Automatically perform '->'. */ + if (ctype_isstruct(ctype_rawchild(cts, ct)->info)) { + p = (uint8_t *)cdata_getptr(p, ct->size); + ct = ctype_child(cts, ct); + goto collect_attrib; + } + } + *qual |= 1; /* Lookup failed. */ + return ct; /* But return the resolved raw type. */ +} + +/* -- C data getters ------------------------------------------------------ */ + +/* Get constant value and convert to TValue. */ +static void cdata_getconst(CTState *cts, TValue *o, CType *ct) +{ + CType *ctt = ctype_child(cts, ct); + lua_assert(ctype_isinteger(ctt->info) && ctt->size <= 4); + /* Constants are already zero-extended/sign-extended to 32 bits. */ + if ((ctt->info & CTF_UNSIGNED) && (int32_t)ct->size < 0) + setnumV(o, (lua_Number)(uint32_t)ct->size); + else + setintV(o, (int32_t)ct->size); +} + +/* Get C data value and convert to TValue. */ +int lj_cdata_get(CTState *cts, CType *s, TValue *o, uint8_t *sp) +{ + CTypeID sid; + + if (ctype_isconstval(s->info)) { + cdata_getconst(cts, o, s); + return 0; /* No GC step needed. */ + } else if (ctype_isbitfield(s->info)) { + return lj_cconv_tv_bf(cts, s, o, sp); + } + + /* Get child type of pointer/array/field. */ + lua_assert(ctype_ispointer(s->info) || ctype_isfield(s->info)); + sid = ctype_cid(s->info); + s = ctype_get(cts, sid); + + /* Resolve reference for field. */ + if (ctype_isref(s->info)) { + lua_assert(s->size == CTSIZE_PTR); + sp = *(uint8_t **)sp; + sid = ctype_cid(s->info); + s = ctype_get(cts, sid); + } + + /* Skip attributes. */ + while (ctype_isattrib(s->info)) + s = ctype_child(cts, s); + + return lj_cconv_tv_ct(cts, s, sid, o, sp); +} + +/* -- C data setters ------------------------------------------------------ */ + +/* Convert TValue and set C data value. */ +void lj_cdata_set(CTState *cts, CType *d, uint8_t *dp, TValue *o, CTInfo qual) +{ + if (ctype_isconstval(d->info)) { + goto err_const; + } else if (ctype_isbitfield(d->info)) { + if (((d->info|qual) & CTF_CONST)) goto err_const; + lj_cconv_bf_tv(cts, d, dp, o); + return; + } + + /* Get child type of pointer/array/field. */ + lua_assert(ctype_ispointer(d->info) || ctype_isfield(d->info)); + d = ctype_child(cts, d); + + /* Resolve reference for field. */ + if (ctype_isref(d->info)) { + lua_assert(d->size == CTSIZE_PTR); + dp = *(uint8_t **)dp; + d = ctype_child(cts, d); + } + + /* Skip attributes and collect qualifiers. */ + for (;;) { + if (ctype_isattrib(d->info)) { + if (ctype_attrib(d->info) == CTA_QUAL) qual |= d->size; + } else { + break; + } + d = ctype_child(cts, d); + } + + lua_assert(ctype_hassize(d->info) && !ctype_isvoid(d->info)); + + if (((d->info|qual) & CTF_CONST)) { + err_const: + lj_err_caller(cts->L, LJ_ERR_FFI_WRCONST); + } + + lj_cconv_ct_tv(cts, d, dp, o, 0); +} + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_cdata.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_cdata.h new file mode 100644 index 00000000000..c8975be1c9f --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_cdata.h @@ -0,0 +1,76 @@ +/* +** C data management. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_CDATA_H +#define _LJ_CDATA_H + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_ctype.h" + +#if LJ_HASFFI + +/* Get C data pointer. */ +static LJ_AINLINE void *cdata_getptr(void *p, CTSize sz) +{ + if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */ + return ((void *)(uintptr_t)*(uint32_t *)p); + } else { + lua_assert(sz == CTSIZE_PTR); + return *(void **)p; + } +} + +/* Set C data pointer. */ +static LJ_AINLINE void cdata_setptr(void *p, CTSize sz, const void *v) +{ + if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */ + *(uint32_t *)p = (uint32_t)(uintptr_t)v; + } else { + lua_assert(sz == CTSIZE_PTR); + *(void **)p = (void *)v; + } +} + +/* Allocate fixed-size C data object. */ +static LJ_AINLINE GCcdata *lj_cdata_new(CTState *cts, CTypeID id, CTSize sz) +{ + GCcdata *cd; +#ifdef LUA_USE_ASSERT + CType *ct = ctype_raw(cts, id); + lua_assert((ctype_hassize(ct->info) ? ct->size : CTSIZE_PTR) == sz); +#endif + cd = (GCcdata *)lj_mem_newgco(cts->L, sizeof(GCcdata) + sz); + cd->gct = ~LJ_TCDATA; + cd->ctypeid = ctype_check(cts, id); + return cd; +} + +/* Variant which works without a valid CTState. */ +static LJ_AINLINE GCcdata *lj_cdata_new_(lua_State *L, CTypeID id, CTSize sz) +{ + GCcdata *cd = (GCcdata *)lj_mem_newgco(L, sizeof(GCcdata) + sz); + cd->gct = ~LJ_TCDATA; + cd->ctypeid = id; + return cd; +} + +LJ_FUNC GCcdata *lj_cdata_newref(CTState *cts, const void *pp, CTypeID id); +LJ_FUNC GCcdata *lj_cdata_newv(lua_State *L, CTypeID id, CTSize sz, + CTSize align); + +LJ_FUNC void LJ_FASTCALL lj_cdata_free(global_State *g, GCcdata *cd); +LJ_FUNC void lj_cdata_setfin(lua_State *L, GCcdata *cd, GCobj *obj, + uint32_t it); + +LJ_FUNC CType *lj_cdata_index(CTState *cts, GCcdata *cd, cTValue *key, + uint8_t **pp, CTInfo *qual); +LJ_FUNC int lj_cdata_get(CTState *cts, CType *s, TValue *o, uint8_t *sp); +LJ_FUNC void lj_cdata_set(CTState *cts, CType *d, uint8_t *dp, TValue *o, + CTInfo qual); + +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_char.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_char.c new file mode 100644 index 00000000000..11f23efe44d --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_char.c @@ -0,0 +1,43 @@ +/* +** Character types. +** Donated to the public domain. +** +** This is intended to replace the problematic libc single-byte NLS functions. +** These just don't make sense anymore with UTF-8 locales becoming the norm +** on POSIX systems. It never worked too well on Windows systems since hardly +** anyone bothered to call setlocale(). +** +** This table is hardcoded for ASCII. Identifiers include the characters +** 128-255, too. This allows for the use of all non-ASCII chars as identifiers +** in the lexer. This is a broad definition, but works well in practice +** for both UTF-8 locales and most single-byte locales (such as ISO-8859-*). +** +** If you really need proper character types for UTF-8 strings, please use +** an add-on library such as slnunicode: http://luaforge.net/projects/sln/ +*/ + +#define lj_char_c +#define LUA_CORE + +#include "lj_char.h" + +LJ_DATADEF const uint8_t lj_char_bits[257] = { + 0, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 152,152,152,152,152,152,152,152,152,152, 4, 4, 4, 4, 4, 4, + 4,176,176,176,176,176,176,160,160,160,160,160,160,160,160,160, + 160,160,160,160,160,160,160,160,160,160,160, 4, 4, 4, 4,132, + 4,208,208,208,208,208,208,192,192,192,192,192,192,192,192,192, + 192,192,192,192,192,192,192,192,192,192,192, 4, 4, 4, 4, 1, + 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128, + 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128, + 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128, + 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128, + 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128, + 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128, + 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128, + 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128 +}; + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_char.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_char.h new file mode 100644 index 00000000000..c3c86d34b1d --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_char.h @@ -0,0 +1,42 @@ +/* +** Character types. +** Donated to the public domain. +*/ + +#ifndef _LJ_CHAR_H +#define _LJ_CHAR_H + +#include "lj_def.h" + +#define LJ_CHAR_CNTRL 0x01 +#define LJ_CHAR_SPACE 0x02 +#define LJ_CHAR_PUNCT 0x04 +#define LJ_CHAR_DIGIT 0x08 +#define LJ_CHAR_XDIGIT 0x10 +#define LJ_CHAR_UPPER 0x20 +#define LJ_CHAR_LOWER 0x40 +#define LJ_CHAR_IDENT 0x80 +#define LJ_CHAR_ALPHA (LJ_CHAR_LOWER|LJ_CHAR_UPPER) +#define LJ_CHAR_ALNUM (LJ_CHAR_ALPHA|LJ_CHAR_DIGIT) +#define LJ_CHAR_GRAPH (LJ_CHAR_ALNUM|LJ_CHAR_PUNCT) + +/* Only pass -1 or 0..255 to these macros. Never pass a signed char! */ +#define lj_char_isa(c, t) ((lj_char_bits+1)[(c)] & t) +#define lj_char_iscntrl(c) lj_char_isa((c), LJ_CHAR_CNTRL) +#define lj_char_isspace(c) lj_char_isa((c), LJ_CHAR_SPACE) +#define lj_char_ispunct(c) lj_char_isa((c), LJ_CHAR_PUNCT) +#define lj_char_isdigit(c) lj_char_isa((c), LJ_CHAR_DIGIT) +#define lj_char_isxdigit(c) lj_char_isa((c), LJ_CHAR_XDIGIT) +#define lj_char_isupper(c) lj_char_isa((c), LJ_CHAR_UPPER) +#define lj_char_islower(c) lj_char_isa((c), LJ_CHAR_LOWER) +#define lj_char_isident(c) lj_char_isa((c), LJ_CHAR_IDENT) +#define lj_char_isalpha(c) lj_char_isa((c), LJ_CHAR_ALPHA) +#define lj_char_isalnum(c) lj_char_isa((c), LJ_CHAR_ALNUM) +#define lj_char_isgraph(c) lj_char_isa((c), LJ_CHAR_GRAPH) + +#define lj_char_toupper(c) ((c) - (lj_char_islower(c) >> 1)) +#define lj_char_tolower(c) ((c) + lj_char_isupper(c)) + +LJ_DATA const uint8_t lj_char_bits[257]; + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_clib.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_clib.c new file mode 100644 index 00000000000..1e927ebe99e --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_clib.c @@ -0,0 +1,418 @@ +/* +** FFI C library loader. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include "lj_obj.h" + +#if LJ_HASFFI + +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_tab.h" +#include "lj_str.h" +#include "lj_udata.h" +#include "lj_ctype.h" +#include "lj_cconv.h" +#include "lj_cdata.h" +#include "lj_clib.h" +#include "lj_strfmt.h" + +/* -- OS-specific functions ----------------------------------------------- */ + +#if LJ_TARGET_DLOPEN + +#include +#include + +#if defined(RTLD_DEFAULT) +#define CLIB_DEFHANDLE RTLD_DEFAULT +#elif LJ_TARGET_OSX || LJ_TARGET_BSD +#define CLIB_DEFHANDLE ((void *)(intptr_t)-2) +#else +#define CLIB_DEFHANDLE NULL +#endif + +LJ_NORET LJ_NOINLINE static void clib_error_(lua_State *L) +{ + lj_err_callermsg(L, dlerror()); +} + +#define clib_error(L, fmt, name) clib_error_(L) + +#if defined(__CYGWIN__) +#define CLIB_SOPREFIX "cyg" +#else +#define CLIB_SOPREFIX "lib" +#endif + +#if LJ_TARGET_OSX +#define CLIB_SOEXT "%s.dylib" +#elif defined(__CYGWIN__) +#define CLIB_SOEXT "%s.dll" +#else +#define CLIB_SOEXT "%s.so" +#endif + +static const char *clib_extname(lua_State *L, const char *name) +{ + if (!strchr(name, '/') +#ifdef __CYGWIN__ + && !strchr(name, '\\') +#endif + ) { + if (!strchr(name, '.')) { + name = lj_strfmt_pushf(L, CLIB_SOEXT, name); + L->top--; +#ifdef __CYGWIN__ + } else { + return name; +#endif + } + if (!(name[0] == CLIB_SOPREFIX[0] && name[1] == CLIB_SOPREFIX[1] && + name[2] == CLIB_SOPREFIX[2])) { + name = lj_strfmt_pushf(L, CLIB_SOPREFIX "%s", name); + L->top--; + } + } + return name; +} + +/* Check for a recognized ld script line. */ +static const char *clib_check_lds(lua_State *L, const char *buf) +{ + char *p, *e; + if ((!strncmp(buf, "GROUP", 5) || !strncmp(buf, "INPUT", 5)) && + (p = strchr(buf, '('))) { + while (*++p == ' ') ; + for (e = p; *e && *e != ' ' && *e != ')'; e++) ; + return strdata(lj_str_new(L, p, e-p)); + } + return NULL; +} + +/* Quick and dirty solution to resolve shared library name from ld script. */ +static const char *clib_resolve_lds(lua_State *L, const char *name) +{ + FILE *fp = fopen(name, "r"); + const char *p = NULL; + if (fp) { + char buf[256]; + if (fgets(buf, sizeof(buf), fp)) { + if (!strncmp(buf, "/* GNU ld script", 16)) { /* ld script magic? */ + while (fgets(buf, sizeof(buf), fp)) { /* Check all lines. */ + p = clib_check_lds(L, buf); + if (p) break; + } + } else { /* Otherwise check only the first line. */ + p = clib_check_lds(L, buf); + } + } + fclose(fp); + } + return p; +} + +static void *clib_loadlib(lua_State *L, const char *name, int global) +{ + void *h = dlopen(clib_extname(L, name), + RTLD_LAZY | (global?RTLD_GLOBAL:RTLD_LOCAL)); + if (!h) { + const char *e, *err = dlerror(); + if (*err == '/' && (e = strchr(err, ':')) && + (name = clib_resolve_lds(L, strdata(lj_str_new(L, err, e-err))))) { + h = dlopen(name, RTLD_LAZY | (global?RTLD_GLOBAL:RTLD_LOCAL)); + if (h) return h; + err = dlerror(); + } + lj_err_callermsg(L, err); + } + return h; +} + +static void clib_unloadlib(CLibrary *cl) +{ + if (cl->handle && cl->handle != CLIB_DEFHANDLE) + dlclose(cl->handle); +} + +static void *clib_getsym(CLibrary *cl, const char *name) +{ + void *p = dlsym(cl->handle, name); + return p; +} + +#elif LJ_TARGET_WINDOWS + +#define WIN32_LEAN_AND_MEAN +#include + +#ifndef GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS +#define GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS 4 +#define GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT 2 +BOOL WINAPI GetModuleHandleExA(DWORD, LPCSTR, HMODULE*); +#endif + +#define CLIB_DEFHANDLE ((void *)-1) + +/* Default libraries. */ +enum { + CLIB_HANDLE_EXE, + CLIB_HANDLE_DLL, + CLIB_HANDLE_CRT, + CLIB_HANDLE_KERNEL32, + CLIB_HANDLE_USER32, + CLIB_HANDLE_GDI32, + CLIB_HANDLE_MAX +}; + +static void *clib_def_handle[CLIB_HANDLE_MAX]; + +LJ_NORET LJ_NOINLINE static void clib_error(lua_State *L, const char *fmt, + const char *name) +{ + DWORD err = GetLastError(); +#if LJ_TARGET_XBOXONE + wchar_t wbuf[128]; + char buf[128*2]; + if (!FormatMessageW(FORMAT_MESSAGE_IGNORE_INSERTS|FORMAT_MESSAGE_FROM_SYSTEM, + NULL, err, 0, wbuf, sizeof(wbuf)/sizeof(wchar_t), NULL) || + !WideCharToMultiByte(CP_ACP, 0, wbuf, 128, buf, 128*2, NULL, NULL)) +#else + char buf[128]; + if (!FormatMessageA(FORMAT_MESSAGE_IGNORE_INSERTS|FORMAT_MESSAGE_FROM_SYSTEM, + NULL, err, 0, buf, sizeof(buf), NULL)) +#endif + buf[0] = '\0'; + lj_err_callermsg(L, lj_strfmt_pushf(L, fmt, name, buf)); +} + +static int clib_needext(const char *s) +{ + while (*s) { + if (*s == '/' || *s == '\\' || *s == '.') return 0; + s++; + } + return 1; +} + +static const char *clib_extname(lua_State *L, const char *name) +{ + if (clib_needext(name)) { + name = lj_strfmt_pushf(L, "%s.dll", name); + L->top--; + } + return name; +} + +static void *clib_loadlib(lua_State *L, const char *name, int global) +{ + DWORD oldwerr = GetLastError(); + void *h = (void *)LoadLibraryExA(clib_extname(L, name), NULL, 0); + if (!h) clib_error(L, "cannot load module " LUA_QS ": %s", name); + SetLastError(oldwerr); + UNUSED(global); + return h; +} + +static void clib_unloadlib(CLibrary *cl) +{ + if (cl->handle == CLIB_DEFHANDLE) { + MSize i; + for (i = CLIB_HANDLE_KERNEL32; i < CLIB_HANDLE_MAX; i++) { + void *h = clib_def_handle[i]; + if (h) { + clib_def_handle[i] = NULL; + FreeLibrary((HINSTANCE)h); + } + } + } else if (cl->handle) { + FreeLibrary((HINSTANCE)cl->handle); + } +} + +static void *clib_getsym(CLibrary *cl, const char *name) +{ + void *p = NULL; + if (cl->handle == CLIB_DEFHANDLE) { /* Search default libraries. */ + MSize i; + for (i = 0; i < CLIB_HANDLE_MAX; i++) { + HINSTANCE h = (HINSTANCE)clib_def_handle[i]; + if (!(void *)h) { /* Resolve default library handles (once). */ + switch (i) { + case CLIB_HANDLE_EXE: GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, NULL, &h); break; + case CLIB_HANDLE_DLL: + GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS|GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, + (const char *)clib_def_handle, &h); + break; + case CLIB_HANDLE_CRT: + GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS|GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, + (const char *)&_fmode, &h); + break; + case CLIB_HANDLE_KERNEL32: h = LoadLibraryExA("kernel32.dll", NULL, 0); break; + case CLIB_HANDLE_USER32: h = LoadLibraryExA("user32.dll", NULL, 0); break; + case CLIB_HANDLE_GDI32: h = LoadLibraryExA("gdi32.dll", NULL, 0); break; + } + if (!h) continue; + clib_def_handle[i] = (void *)h; + } + p = (void *)GetProcAddress(h, name); + if (p) break; + } + } else { + p = (void *)GetProcAddress((HINSTANCE)cl->handle, name); + } + return p; +} + +#else + +#define CLIB_DEFHANDLE NULL + +LJ_NORET LJ_NOINLINE static void clib_error(lua_State *L, const char *fmt, + const char *name) +{ + lj_err_callermsg(L, lj_strfmt_pushf(L, fmt, name, "no support for this OS")); +} + +static void *clib_loadlib(lua_State *L, const char *name, int global) +{ + lj_err_callermsg(L, "no support for loading dynamic libraries for this OS"); + UNUSED(name); UNUSED(global); + return NULL; +} + +static void clib_unloadlib(CLibrary *cl) +{ + UNUSED(cl); +} + +static void *clib_getsym(CLibrary *cl, const char *name) +{ + UNUSED(cl); UNUSED(name); + return NULL; +} + +#endif + +/* -- C library indexing -------------------------------------------------- */ + +#if LJ_TARGET_X86 && LJ_ABI_WIN +/* Compute argument size for fastcall/stdcall functions. */ +static CTSize clib_func_argsize(CTState *cts, CType *ct) +{ + CTSize n = 0; + while (ct->sib) { + CType *d; + ct = ctype_get(cts, ct->sib); + if (ctype_isfield(ct->info)) { + d = ctype_rawchild(cts, ct); + n += ((d->size + 3) & ~3); + } + } + return n; +} +#endif + +/* Get redirected or mangled external symbol. */ +static const char *clib_extsym(CTState *cts, CType *ct, GCstr *name) +{ + if (ct->sib) { + CType *ctf = ctype_get(cts, ct->sib); + if (ctype_isxattrib(ctf->info, CTA_REDIR)) + return strdata(gco2str(gcref(ctf->name))); + } + return strdata(name); +} + +/* Index a C library by name. */ +TValue *lj_clib_index(lua_State *L, CLibrary *cl, GCstr *name) +{ + TValue *tv = lj_tab_setstr(L, cl->cache, name); + if (LJ_UNLIKELY(tvisnil(tv))) { + CTState *cts = ctype_cts(L); + CType *ct; + CTypeID id = lj_ctype_getname(cts, &ct, name, CLNS_INDEX); + if (!id) + lj_err_callerv(L, LJ_ERR_FFI_NODECL, strdata(name)); + if (ctype_isconstval(ct->info)) { + CType *ctt = ctype_child(cts, ct); + lua_assert(ctype_isinteger(ctt->info) && ctt->size <= 4); + if ((ctt->info & CTF_UNSIGNED) && (int32_t)ct->size < 0) + setnumV(tv, (lua_Number)(uint32_t)ct->size); + else + setintV(tv, (int32_t)ct->size); + } else { + const char *sym = clib_extsym(cts, ct, name); +#if LJ_TARGET_WINDOWS + DWORD oldwerr = GetLastError(); +#endif + void *p = clib_getsym(cl, sym); + GCcdata *cd; + lua_assert(ctype_isfunc(ct->info) || ctype_isextern(ct->info)); +#if LJ_TARGET_X86 && LJ_ABI_WIN + /* Retry with decorated name for fastcall/stdcall functions. */ + if (!p && ctype_isfunc(ct->info)) { + CTInfo cconv = ctype_cconv(ct->info); + if (cconv == CTCC_FASTCALL || cconv == CTCC_STDCALL) { + CTSize sz = clib_func_argsize(cts, ct); + const char *symd = lj_strfmt_pushf(L, + cconv == CTCC_FASTCALL ? "@%s@%d" : "_%s@%d", + sym, sz); + L->top--; + p = clib_getsym(cl, symd); + } + } +#endif + if (!p) + clib_error(L, "cannot resolve symbol " LUA_QS ": %s", sym); +#if LJ_TARGET_WINDOWS + SetLastError(oldwerr); +#endif + cd = lj_cdata_new(cts, id, CTSIZE_PTR); + *(void **)cdataptr(cd) = p; + setcdataV(L, tv, cd); + } + } + return tv; +} + +/* -- C library management ------------------------------------------------ */ + +/* Create a new CLibrary object and push it on the stack. */ +static CLibrary *clib_new(lua_State *L, GCtab *mt) +{ + GCtab *t = lj_tab_new(L, 0, 0); + GCudata *ud = lj_udata_new(L, sizeof(CLibrary), t); + CLibrary *cl = (CLibrary *)uddata(ud); + cl->cache = t; + ud->udtype = UDTYPE_FFI_CLIB; + /* NOBARRIER: The GCudata is new (marked white). */ + setgcref(ud->metatable, obj2gco(mt)); + setudataV(L, L->top++, ud); + return cl; +} + +/* Load a C library. */ +void lj_clib_load(lua_State *L, GCtab *mt, GCstr *name, int global) +{ + void *handle = clib_loadlib(L, strdata(name), global); + CLibrary *cl = clib_new(L, mt); + cl->handle = handle; +} + +/* Unload a C library. */ +void lj_clib_unload(CLibrary *cl) +{ + clib_unloadlib(cl); + cl->handle = NULL; +} + +/* Create the default C library object. */ +void lj_clib_default(lua_State *L, GCtab *mt) +{ + CLibrary *cl = clib_new(L, mt); + cl->handle = CLIB_DEFHANDLE; +} + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_clib.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_clib.h new file mode 100644 index 00000000000..e5dc98e1cd7 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_clib.h @@ -0,0 +1,29 @@ +/* +** FFI C library loader. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_CLIB_H +#define _LJ_CLIB_H + +#include "lj_obj.h" + +#if LJ_HASFFI + +/* Namespace for C library indexing. */ +#define CLNS_INDEX ((1u<env. */ +} CLibrary; + +LJ_FUNC TValue *lj_clib_index(lua_State *L, CLibrary *cl, GCstr *name); +LJ_FUNC void lj_clib_load(lua_State *L, GCtab *mt, GCstr *name, int global); +LJ_FUNC void lj_clib_unload(CLibrary *cl); +LJ_FUNC void lj_clib_default(lua_State *L, GCtab *mt); + +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_cparse.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_cparse.c new file mode 100644 index 00000000000..f212bd36820 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_cparse.c @@ -0,0 +1,1862 @@ +/* +** C declaration parser. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include "lj_obj.h" + +#if LJ_HASFFI + +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_buf.h" +#include "lj_ctype.h" +#include "lj_cparse.h" +#include "lj_frame.h" +#include "lj_vm.h" +#include "lj_char.h" +#include "lj_strscan.h" +#include "lj_strfmt.h" + +/* +** Important note: this is NOT a validating C parser! This is a minimal +** C declaration parser, solely for use by the LuaJIT FFI. +** +** It ought to return correct results for properly formed C declarations, +** but it may accept some invalid declarations, too (and return nonsense). +** Also, it shows rather generic error messages to avoid unnecessary bloat. +** If in doubt, please check the input against your favorite C compiler. +*/ + +/* -- C lexer ------------------------------------------------------------- */ + +/* C lexer token names. */ +static const char *const ctoknames[] = { +#define CTOKSTR(name, str) str, +CTOKDEF(CTOKSTR) +#undef CTOKSTR + NULL +}; + +/* Forward declaration. */ +LJ_NORET static void cp_err(CPState *cp, ErrMsg em); + +static const char *cp_tok2str(CPState *cp, CPToken tok) +{ + lua_assert(tok < CTOK_FIRSTDECL); + if (tok > CTOK_OFS) + return ctoknames[tok-CTOK_OFS-1]; + else if (!lj_char_iscntrl(tok)) + return lj_strfmt_pushf(cp->L, "%c", tok); + else + return lj_strfmt_pushf(cp->L, "char(%d)", tok); +} + +/* End-of-line? */ +static LJ_AINLINE int cp_iseol(CPChar c) +{ + return (c == '\n' || c == '\r'); +} + +/* Peek next raw character. */ +static LJ_AINLINE CPChar cp_rawpeek(CPState *cp) +{ + return (CPChar)(uint8_t)(*cp->p); +} + +static LJ_NOINLINE CPChar cp_get_bs(CPState *cp); + +/* Get next character. */ +static LJ_AINLINE CPChar cp_get(CPState *cp) +{ + cp->c = (CPChar)(uint8_t)(*cp->p++); + if (LJ_LIKELY(cp->c != '\\')) return cp->c; + return cp_get_bs(cp); +} + +/* Transparently skip backslash-escaped line breaks. */ +static LJ_NOINLINE CPChar cp_get_bs(CPState *cp) +{ + CPChar c2, c = cp_rawpeek(cp); + if (!cp_iseol(c)) return cp->c; + cp->p++; + c2 = cp_rawpeek(cp); + if (cp_iseol(c2) && c2 != c) cp->p++; + cp->linenumber++; + return cp_get(cp); +} + +/* Save character in buffer. */ +static LJ_AINLINE void cp_save(CPState *cp, CPChar c) +{ + lj_buf_putb(&cp->sb, c); +} + +/* Skip line break. Handles "\n", "\r", "\r\n" or "\n\r". */ +static void cp_newline(CPState *cp) +{ + CPChar c = cp_rawpeek(cp); + if (cp_iseol(c) && c != cp->c) cp->p++; + cp->linenumber++; +} + +LJ_NORET static void cp_errmsg(CPState *cp, CPToken tok, ErrMsg em, ...) +{ + const char *msg, *tokstr; + lua_State *L; + va_list argp; + if (tok == 0) { + tokstr = NULL; + } else if (tok == CTOK_IDENT || tok == CTOK_INTEGER || tok == CTOK_STRING || + tok >= CTOK_FIRSTDECL) { + if (sbufP(&cp->sb) == sbufB(&cp->sb)) cp_save(cp, '$'); + cp_save(cp, '\0'); + tokstr = sbufB(&cp->sb); + } else { + tokstr = cp_tok2str(cp, tok); + } + L = cp->L; + va_start(argp, em); + msg = lj_strfmt_pushvf(L, err2msg(em), argp); + va_end(argp); + if (tokstr) + msg = lj_strfmt_pushf(L, err2msg(LJ_ERR_XNEAR), msg, tokstr); + if (cp->linenumber > 1) + msg = lj_strfmt_pushf(L, "%s at line %d", msg, cp->linenumber); + lj_err_callermsg(L, msg); +} + +LJ_NORET LJ_NOINLINE static void cp_err_token(CPState *cp, CPToken tok) +{ + cp_errmsg(cp, cp->tok, LJ_ERR_XTOKEN, cp_tok2str(cp, tok)); +} + +LJ_NORET LJ_NOINLINE static void cp_err_badidx(CPState *cp, CType *ct) +{ + GCstr *s = lj_ctype_repr(cp->cts->L, ctype_typeid(cp->cts, ct), NULL); + cp_errmsg(cp, 0, LJ_ERR_FFI_BADIDX, strdata(s)); +} + +LJ_NORET LJ_NOINLINE static void cp_err(CPState *cp, ErrMsg em) +{ + cp_errmsg(cp, 0, em); +} + +/* -- Main lexical scanner ------------------------------------------------ */ + +/* Parse number literal. Only handles int32_t/uint32_t right now. */ +static CPToken cp_number(CPState *cp) +{ + StrScanFmt fmt; + TValue o; + do { cp_save(cp, cp->c); } while (lj_char_isident(cp_get(cp))); + cp_save(cp, '\0'); + fmt = lj_strscan_scan((const uint8_t *)sbufB(&cp->sb), &o, STRSCAN_OPT_C); + if (fmt == STRSCAN_INT) cp->val.id = CTID_INT32; + else if (fmt == STRSCAN_U32) cp->val.id = CTID_UINT32; + else if (!(cp->mode & CPARSE_MODE_SKIP)) + cp_errmsg(cp, CTOK_INTEGER, LJ_ERR_XNUMBER); + cp->val.u32 = (uint32_t)o.i; + return CTOK_INTEGER; +} + +/* Parse identifier or keyword. */ +static CPToken cp_ident(CPState *cp) +{ + do { cp_save(cp, cp->c); } while (lj_char_isident(cp_get(cp))); + cp->str = lj_buf_str(cp->L, &cp->sb); + cp->val.id = lj_ctype_getname(cp->cts, &cp->ct, cp->str, cp->tmask); + if (ctype_type(cp->ct->info) == CT_KW) + return ctype_cid(cp->ct->info); + return CTOK_IDENT; +} + +/* Parse parameter. */ +static CPToken cp_param(CPState *cp) +{ + CPChar c = cp_get(cp); + TValue *o = cp->param; + if (lj_char_isident(c) || c == '$') /* Reserve $xyz for future extensions. */ + cp_errmsg(cp, c, LJ_ERR_XSYNTAX); + if (!o || o >= cp->L->top) + cp_err(cp, LJ_ERR_FFI_NUMPARAM); + cp->param = o+1; + if (tvisstr(o)) { + cp->str = strV(o); + cp->val.id = 0; + cp->ct = &cp->cts->tab[0]; + return CTOK_IDENT; + } else if (tvisnumber(o)) { + cp->val.i32 = numberVint(o); + cp->val.id = CTID_INT32; + return CTOK_INTEGER; + } else { + GCcdata *cd; + if (!tviscdata(o)) + lj_err_argtype(cp->L, (int)(o-cp->L->base)+1, "type parameter"); + cd = cdataV(o); + if (cd->ctypeid == CTID_CTYPEID) + cp->val.id = *(CTypeID *)cdataptr(cd); + else + cp->val.id = cd->ctypeid; + return '$'; + } +} + +/* Parse string or character constant. */ +static CPToken cp_string(CPState *cp) +{ + CPChar delim = cp->c; + cp_get(cp); + while (cp->c != delim) { + CPChar c = cp->c; + if (c == '\0') cp_errmsg(cp, CTOK_EOF, LJ_ERR_XSTR); + if (c == '\\') { + c = cp_get(cp); + switch (c) { + case '\0': cp_errmsg(cp, CTOK_EOF, LJ_ERR_XSTR); break; + case 'a': c = '\a'; break; + case 'b': c = '\b'; break; + case 'f': c = '\f'; break; + case 'n': c = '\n'; break; + case 'r': c = '\r'; break; + case 't': c = '\t'; break; + case 'v': c = '\v'; break; + case 'e': c = 27; break; + case 'x': + c = 0; + while (lj_char_isxdigit(cp_get(cp))) + c = (c<<4) + (lj_char_isdigit(cp->c) ? cp->c-'0' : (cp->c&15)+9); + cp_save(cp, (c & 0xff)); + continue; + default: + if (lj_char_isdigit(c)) { + c -= '0'; + if (lj_char_isdigit(cp_get(cp))) { + c = c*8 + (cp->c - '0'); + if (lj_char_isdigit(cp_get(cp))) { + c = c*8 + (cp->c - '0'); + cp_get(cp); + } + } + cp_save(cp, (c & 0xff)); + continue; + } + break; + } + } + cp_save(cp, c); + cp_get(cp); + } + cp_get(cp); + if (delim == '"') { + cp->str = lj_buf_str(cp->L, &cp->sb); + return CTOK_STRING; + } else { + if (sbuflen(&cp->sb) != 1) cp_err_token(cp, '\''); + cp->val.i32 = (int32_t)(char)*sbufB(&cp->sb); + cp->val.id = CTID_INT32; + return CTOK_INTEGER; + } +} + +/* Skip C comment. */ +static void cp_comment_c(CPState *cp) +{ + do { + if (cp_get(cp) == '*') { + do { + if (cp_get(cp) == '/') { cp_get(cp); return; } + } while (cp->c == '*'); + } + if (cp_iseol(cp->c)) cp_newline(cp); + } while (cp->c != '\0'); +} + +/* Skip C++ comment. */ +static void cp_comment_cpp(CPState *cp) +{ + while (!cp_iseol(cp_get(cp)) && cp->c != '\0') + ; +} + +/* Lexical scanner for C. Only a minimal subset is implemented. */ +static CPToken cp_next_(CPState *cp) +{ + lj_buf_reset(&cp->sb); + for (;;) { + if (lj_char_isident(cp->c)) + return lj_char_isdigit(cp->c) ? cp_number(cp) : cp_ident(cp); + switch (cp->c) { + case '\n': case '\r': cp_newline(cp); /* fallthrough. */ + case ' ': case '\t': case '\v': case '\f': cp_get(cp); break; + case '"': case '\'': return cp_string(cp); + case '/': + if (cp_get(cp) == '*') cp_comment_c(cp); + else if (cp->c == '/') cp_comment_cpp(cp); + else return '/'; + break; + case '|': + if (cp_get(cp) != '|') return '|'; cp_get(cp); return CTOK_OROR; + case '&': + if (cp_get(cp) != '&') return '&'; cp_get(cp); return CTOK_ANDAND; + case '=': + if (cp_get(cp) != '=') return '='; cp_get(cp); return CTOK_EQ; + case '!': + if (cp_get(cp) != '=') return '!'; cp_get(cp); return CTOK_NE; + case '<': + if (cp_get(cp) == '=') { cp_get(cp); return CTOK_LE; } + else if (cp->c == '<') { cp_get(cp); return CTOK_SHL; } + return '<'; + case '>': + if (cp_get(cp) == '=') { cp_get(cp); return CTOK_GE; } + else if (cp->c == '>') { cp_get(cp); return CTOK_SHR; } + return '>'; + case '-': + if (cp_get(cp) != '>') return '-'; cp_get(cp); return CTOK_DEREF; + case '$': + return cp_param(cp); + case '\0': return CTOK_EOF; + default: { CPToken c = cp->c; cp_get(cp); return c; } + } + } +} + +static LJ_NOINLINE CPToken cp_next(CPState *cp) +{ + return (cp->tok = cp_next_(cp)); +} + +/* -- C parser ------------------------------------------------------------ */ + +/* Namespaces for resolving identifiers. */ +#define CPNS_DEFAULT \ + ((1u<linenumber = 1; + cp->depth = 0; + cp->curpack = 0; + cp->packstack[0] = 255; + lj_buf_init(cp->L, &cp->sb); + lua_assert(cp->p != NULL); + cp_get(cp); /* Read-ahead first char. */ + cp->tok = 0; + cp->tmask = CPNS_DEFAULT; + cp_next(cp); /* Read-ahead first token. */ +} + +/* Cleanup C parser state. */ +static void cp_cleanup(CPState *cp) +{ + global_State *g = G(cp->L); + lj_buf_free(g, &cp->sb); +} + +/* Check and consume optional token. */ +static int cp_opt(CPState *cp, CPToken tok) +{ + if (cp->tok == tok) { cp_next(cp); return 1; } + return 0; +} + +/* Check and consume token. */ +static void cp_check(CPState *cp, CPToken tok) +{ + if (cp->tok != tok) cp_err_token(cp, tok); + cp_next(cp); +} + +/* Check if the next token may start a type declaration. */ +static int cp_istypedecl(CPState *cp) +{ + if (cp->tok >= CTOK_FIRSTDECL && cp->tok <= CTOK_LASTDECL) return 1; + if (cp->tok == CTOK_IDENT && ctype_istypedef(cp->ct->info)) return 1; + if (cp->tok == '$') return 1; + return 0; +} + +/* -- Constant expression evaluator --------------------------------------- */ + +/* Forward declarations. */ +static void cp_expr_unary(CPState *cp, CPValue *k); +static void cp_expr_sub(CPState *cp, CPValue *k, int pri); + +/* Please note that type handling is very weak here. Most ops simply +** assume integer operands. Accessors are only needed to compute types and +** return synthetic values. The only purpose of the expression evaluator +** is to compute the values of constant expressions one would typically +** find in C header files. And again: this is NOT a validating C parser! +*/ + +/* Parse comma separated expression and return last result. */ +static void cp_expr_comma(CPState *cp, CPValue *k) +{ + do { cp_expr_sub(cp, k, 0); } while (cp_opt(cp, ',')); +} + +/* Parse sizeof/alignof operator. */ +static void cp_expr_sizeof(CPState *cp, CPValue *k, int wantsz) +{ + CTSize sz; + CTInfo info; + if (cp_opt(cp, '(')) { + if (cp_istypedecl(cp)) + k->id = cp_decl_abstract(cp); + else + cp_expr_comma(cp, k); + cp_check(cp, ')'); + } else { + cp_expr_unary(cp, k); + } + info = lj_ctype_info(cp->cts, k->id, &sz); + if (wantsz) { + if (sz != CTSIZE_INVALID) + k->u32 = sz; + else if (k->id != CTID_A_CCHAR) /* Special case for sizeof("string"). */ + cp_err(cp, LJ_ERR_FFI_INVSIZE); + } else { + k->u32 = 1u << ctype_align(info); + } + k->id = CTID_UINT32; /* Really size_t. */ +} + +/* Parse prefix operators. */ +static void cp_expr_prefix(CPState *cp, CPValue *k) +{ + if (cp->tok == CTOK_INTEGER) { + *k = cp->val; cp_next(cp); + } else if (cp_opt(cp, '+')) { + cp_expr_unary(cp, k); /* Nothing to do (well, integer promotion). */ + } else if (cp_opt(cp, '-')) { + cp_expr_unary(cp, k); k->i32 = -k->i32; + } else if (cp_opt(cp, '~')) { + cp_expr_unary(cp, k); k->i32 = ~k->i32; + } else if (cp_opt(cp, '!')) { + cp_expr_unary(cp, k); k->i32 = !k->i32; k->id = CTID_INT32; + } else if (cp_opt(cp, '(')) { + if (cp_istypedecl(cp)) { /* Cast operator. */ + CTypeID id = cp_decl_abstract(cp); + cp_check(cp, ')'); + cp_expr_unary(cp, k); + k->id = id; /* No conversion performed. */ + } else { /* Sub-expression. */ + cp_expr_comma(cp, k); + cp_check(cp, ')'); + } + } else if (cp_opt(cp, '*')) { /* Indirection. */ + CType *ct; + cp_expr_unary(cp, k); + ct = lj_ctype_rawref(cp->cts, k->id); + if (!ctype_ispointer(ct->info)) + cp_err_badidx(cp, ct); + k->u32 = 0; k->id = ctype_cid(ct->info); + } else if (cp_opt(cp, '&')) { /* Address operator. */ + cp_expr_unary(cp, k); + k->id = lj_ctype_intern(cp->cts, CTINFO(CT_PTR, CTALIGN_PTR+k->id), + CTSIZE_PTR); + } else if (cp_opt(cp, CTOK_SIZEOF)) { + cp_expr_sizeof(cp, k, 1); + } else if (cp_opt(cp, CTOK_ALIGNOF)) { + cp_expr_sizeof(cp, k, 0); + } else if (cp->tok == CTOK_IDENT) { + if (ctype_type(cp->ct->info) == CT_CONSTVAL) { + k->u32 = cp->ct->size; k->id = ctype_cid(cp->ct->info); + } else if (ctype_type(cp->ct->info) == CT_EXTERN) { + k->u32 = cp->val.id; k->id = ctype_cid(cp->ct->info); + } else if (ctype_type(cp->ct->info) == CT_FUNC) { + k->u32 = cp->val.id; k->id = cp->val.id; + } else { + goto err_expr; + } + cp_next(cp); + } else if (cp->tok == CTOK_STRING) { + CTSize sz = cp->str->len; + while (cp_next(cp) == CTOK_STRING) + sz += cp->str->len; + k->u32 = sz + 1; + k->id = CTID_A_CCHAR; + } else { + err_expr: + cp_errmsg(cp, cp->tok, LJ_ERR_XSYMBOL); + } +} + +/* Parse postfix operators. */ +static void cp_expr_postfix(CPState *cp, CPValue *k) +{ + for (;;) { + CType *ct; + if (cp_opt(cp, '[')) { /* Array/pointer index. */ + CPValue k2; + cp_expr_comma(cp, &k2); + ct = lj_ctype_rawref(cp->cts, k->id); + if (!ctype_ispointer(ct->info)) { + ct = lj_ctype_rawref(cp->cts, k2.id); + if (!ctype_ispointer(ct->info)) + cp_err_badidx(cp, ct); + } + cp_check(cp, ']'); + k->u32 = 0; + } else if (cp->tok == '.' || cp->tok == CTOK_DEREF) { /* Struct deref. */ + CTSize ofs; + CType *fct; + ct = lj_ctype_rawref(cp->cts, k->id); + if (cp->tok == CTOK_DEREF) { + if (!ctype_ispointer(ct->info)) + cp_err_badidx(cp, ct); + ct = lj_ctype_rawref(cp->cts, ctype_cid(ct->info)); + } + cp_next(cp); + if (cp->tok != CTOK_IDENT) cp_err_token(cp, CTOK_IDENT); + if (!ctype_isstruct(ct->info) || ct->size == CTSIZE_INVALID || + !(fct = lj_ctype_getfield(cp->cts, ct, cp->str, &ofs)) || + ctype_isbitfield(fct->info)) { + GCstr *s = lj_ctype_repr(cp->cts->L, ctype_typeid(cp->cts, ct), NULL); + cp_errmsg(cp, 0, LJ_ERR_FFI_BADMEMBER, strdata(s), strdata(cp->str)); + } + ct = fct; + k->u32 = ctype_isconstval(ct->info) ? ct->size : 0; + cp_next(cp); + } else { + return; + } + k->id = ctype_cid(ct->info); + } +} + +/* Parse infix operators. */ +static void cp_expr_infix(CPState *cp, CPValue *k, int pri) +{ + CPValue k2; + k2.u32 = 0; k2.id = 0; /* Silence the compiler. */ + for (;;) { + switch (pri) { + case 0: + if (cp_opt(cp, '?')) { + CPValue k3; + cp_expr_comma(cp, &k2); /* Right-associative. */ + cp_check(cp, ':'); + cp_expr_sub(cp, &k3, 0); + k->u32 = k->u32 ? k2.u32 : k3.u32; + k->id = k2.id > k3.id ? k2.id : k3.id; + continue; + } + case 1: + if (cp_opt(cp, CTOK_OROR)) { + cp_expr_sub(cp, &k2, 2); k->i32 = k->u32 || k2.u32; k->id = CTID_INT32; + continue; + } + case 2: + if (cp_opt(cp, CTOK_ANDAND)) { + cp_expr_sub(cp, &k2, 3); k->i32 = k->u32 && k2.u32; k->id = CTID_INT32; + continue; + } + case 3: + if (cp_opt(cp, '|')) { + cp_expr_sub(cp, &k2, 4); k->u32 = k->u32 | k2.u32; goto arith_result; + } + case 4: + if (cp_opt(cp, '^')) { + cp_expr_sub(cp, &k2, 5); k->u32 = k->u32 ^ k2.u32; goto arith_result; + } + case 5: + if (cp_opt(cp, '&')) { + cp_expr_sub(cp, &k2, 6); k->u32 = k->u32 & k2.u32; goto arith_result; + } + case 6: + if (cp_opt(cp, CTOK_EQ)) { + cp_expr_sub(cp, &k2, 7); k->i32 = k->u32 == k2.u32; k->id = CTID_INT32; + continue; + } else if (cp_opt(cp, CTOK_NE)) { + cp_expr_sub(cp, &k2, 7); k->i32 = k->u32 != k2.u32; k->id = CTID_INT32; + continue; + } + case 7: + if (cp_opt(cp, '<')) { + cp_expr_sub(cp, &k2, 8); + if (k->id == CTID_INT32 && k2.id == CTID_INT32) + k->i32 = k->i32 < k2.i32; + else + k->i32 = k->u32 < k2.u32; + k->id = CTID_INT32; + continue; + } else if (cp_opt(cp, '>')) { + cp_expr_sub(cp, &k2, 8); + if (k->id == CTID_INT32 && k2.id == CTID_INT32) + k->i32 = k->i32 > k2.i32; + else + k->i32 = k->u32 > k2.u32; + k->id = CTID_INT32; + continue; + } else if (cp_opt(cp, CTOK_LE)) { + cp_expr_sub(cp, &k2, 8); + if (k->id == CTID_INT32 && k2.id == CTID_INT32) + k->i32 = k->i32 <= k2.i32; + else + k->i32 = k->u32 <= k2.u32; + k->id = CTID_INT32; + continue; + } else if (cp_opt(cp, CTOK_GE)) { + cp_expr_sub(cp, &k2, 8); + if (k->id == CTID_INT32 && k2.id == CTID_INT32) + k->i32 = k->i32 >= k2.i32; + else + k->i32 = k->u32 >= k2.u32; + k->id = CTID_INT32; + continue; + } + case 8: + if (cp_opt(cp, CTOK_SHL)) { + cp_expr_sub(cp, &k2, 9); k->u32 = k->u32 << k2.u32; + continue; + } else if (cp_opt(cp, CTOK_SHR)) { + cp_expr_sub(cp, &k2, 9); + if (k->id == CTID_INT32) + k->i32 = k->i32 >> k2.i32; + else + k->u32 = k->u32 >> k2.u32; + continue; + } + case 9: + if (cp_opt(cp, '+')) { + cp_expr_sub(cp, &k2, 10); k->u32 = k->u32 + k2.u32; + arith_result: + if (k2.id > k->id) k->id = k2.id; /* Trivial promotion to unsigned. */ + continue; + } else if (cp_opt(cp, '-')) { + cp_expr_sub(cp, &k2, 10); k->u32 = k->u32 - k2.u32; goto arith_result; + } + case 10: + if (cp_opt(cp, '*')) { + cp_expr_unary(cp, &k2); k->u32 = k->u32 * k2.u32; goto arith_result; + } else if (cp_opt(cp, '/')) { + cp_expr_unary(cp, &k2); + if (k2.id > k->id) k->id = k2.id; /* Trivial promotion to unsigned. */ + if (k2.u32 == 0 || + (k->id == CTID_INT32 && k->u32 == 0x80000000u && k2.i32 == -1)) + cp_err(cp, LJ_ERR_BADVAL); + if (k->id == CTID_INT32) + k->i32 = k->i32 / k2.i32; + else + k->u32 = k->u32 / k2.u32; + continue; + } else if (cp_opt(cp, '%')) { + cp_expr_unary(cp, &k2); + if (k2.id > k->id) k->id = k2.id; /* Trivial promotion to unsigned. */ + if (k2.u32 == 0 || + (k->id == CTID_INT32 && k->u32 == 0x80000000u && k2.i32 == -1)) + cp_err(cp, LJ_ERR_BADVAL); + if (k->id == CTID_INT32) + k->i32 = k->i32 % k2.i32; + else + k->u32 = k->u32 % k2.u32; + continue; + } + default: + return; + } + } +} + +/* Parse and evaluate unary expression. */ +static void cp_expr_unary(CPState *cp, CPValue *k) +{ + if (++cp->depth > CPARSE_MAX_DECLDEPTH) cp_err(cp, LJ_ERR_XLEVELS); + cp_expr_prefix(cp, k); + cp_expr_postfix(cp, k); + cp->depth--; +} + +/* Parse and evaluate sub-expression. */ +static void cp_expr_sub(CPState *cp, CPValue *k, int pri) +{ + cp_expr_unary(cp, k); + cp_expr_infix(cp, k, pri); +} + +/* Parse constant integer expression. */ +static void cp_expr_kint(CPState *cp, CPValue *k) +{ + CType *ct; + cp_expr_sub(cp, k, 0); + ct = ctype_raw(cp->cts, k->id); + if (!ctype_isinteger(ct->info)) cp_err(cp, LJ_ERR_BADVAL); +} + +/* Parse (non-negative) size expression. */ +static CTSize cp_expr_ksize(CPState *cp) +{ + CPValue k; + cp_expr_kint(cp, &k); + if (k.u32 >= 0x80000000u) cp_err(cp, LJ_ERR_FFI_INVSIZE); + return k.u32; +} + +/* -- Type declaration stack management ----------------------------------- */ + +/* Add declaration element behind the insertion position. */ +static CPDeclIdx cp_add(CPDecl *decl, CTInfo info, CTSize size) +{ + CPDeclIdx top = decl->top; + if (top >= CPARSE_MAX_DECLSTACK) cp_err(decl->cp, LJ_ERR_XLEVELS); + decl->stack[top].info = info; + decl->stack[top].size = size; + decl->stack[top].sib = 0; + setgcrefnull(decl->stack[top].name); + decl->stack[top].next = decl->stack[decl->pos].next; + decl->stack[decl->pos].next = (CTypeID1)top; + decl->top = top+1; + return top; +} + +/* Push declaration element before the insertion position. */ +static CPDeclIdx cp_push(CPDecl *decl, CTInfo info, CTSize size) +{ + return (decl->pos = cp_add(decl, info, size)); +} + +/* Push or merge attributes. */ +static void cp_push_attributes(CPDecl *decl) +{ + CType *ct = &decl->stack[decl->pos]; + if (ctype_isfunc(ct->info)) { /* Ok to modify in-place. */ +#if LJ_TARGET_X86 + if ((decl->fattr & CTFP_CCONV)) + ct->info = (ct->info & (CTMASK_NUM|CTF_VARARG|CTMASK_CID)) + + (decl->fattr & ~CTMASK_CID); +#endif + } else { + if ((decl->attr & CTFP_ALIGNED) && !(decl->mode & CPARSE_MODE_FIELD)) + cp_push(decl, CTINFO(CT_ATTRIB, CTATTRIB(CTA_ALIGN)), + ctype_align(decl->attr)); + } +} + +/* Push unrolled type to declaration stack and merge qualifiers. */ +static void cp_push_type(CPDecl *decl, CTypeID id) +{ + CType *ct = ctype_get(decl->cp->cts, id); + CTInfo info = ct->info; + CTSize size = ct->size; + switch (ctype_type(info)) { + case CT_STRUCT: case CT_ENUM: + cp_push(decl, CTINFO(CT_TYPEDEF, id), 0); /* Don't copy unique types. */ + if ((decl->attr & CTF_QUAL)) { /* Push unmerged qualifiers. */ + cp_push(decl, CTINFO(CT_ATTRIB, CTATTRIB(CTA_QUAL)), + (decl->attr & CTF_QUAL)); + decl->attr &= ~CTF_QUAL; + } + break; + case CT_ATTRIB: + if (ctype_isxattrib(info, CTA_QUAL)) + decl->attr &= ~size; /* Remove redundant qualifiers. */ + cp_push_type(decl, ctype_cid(info)); /* Unroll. */ + cp_push(decl, info & ~CTMASK_CID, size); /* Copy type. */ + break; + case CT_ARRAY: + if ((ct->info & (CTF_VECTOR|CTF_COMPLEX))) { + info |= (decl->attr & CTF_QUAL); + decl->attr &= ~CTF_QUAL; + } + cp_push_type(decl, ctype_cid(info)); /* Unroll. */ + cp_push(decl, info & ~CTMASK_CID, size); /* Copy type. */ + decl->stack[decl->pos].sib = 1; /* Mark as already checked and sized. */ + /* Note: this is not copied to the ct->sib in the C type table. */ + break; + case CT_FUNC: + /* Copy type, link parameters (shared). */ + decl->stack[cp_push(decl, info, size)].sib = ct->sib; + break; + default: + /* Copy type, merge common qualifiers. */ + cp_push(decl, info|(decl->attr & CTF_QUAL), size); + decl->attr &= ~CTF_QUAL; + break; + } +} + +/* Consume the declaration element chain and intern the C type. */ +static CTypeID cp_decl_intern(CPState *cp, CPDecl *decl) +{ + CTypeID id = 0; + CPDeclIdx idx = 0; + CTSize csize = CTSIZE_INVALID; + CTSize cinfo = 0; + do { + CType *ct = &decl->stack[idx]; + CTInfo info = ct->info; + CTInfo size = ct->size; + /* The cid is already part of info for copies of pointers/functions. */ + idx = ct->next; + if (ctype_istypedef(info)) { + lua_assert(id == 0); + id = ctype_cid(info); + /* Always refetch info/size, since struct/enum may have been completed. */ + cinfo = ctype_get(cp->cts, id)->info; + csize = ctype_get(cp->cts, id)->size; + lua_assert(ctype_isstruct(cinfo) || ctype_isenum(cinfo)); + } else if (ctype_isfunc(info)) { /* Intern function. */ + CType *fct; + CTypeID fid; + CTypeID sib; + if (id) { + CType *refct = ctype_raw(cp->cts, id); + /* Reject function or refarray return types. */ + if (ctype_isfunc(refct->info) || ctype_isrefarray(refct->info)) + cp_err(cp, LJ_ERR_FFI_INVTYPE); + } + /* No intervening attributes allowed, skip forward. */ + while (idx) { + CType *ctn = &decl->stack[idx]; + if (!ctype_isattrib(ctn->info)) break; + idx = ctn->next; /* Skip attribute. */ + } + sib = ct->sib; /* Next line may reallocate the C type table. */ + fid = lj_ctype_new(cp->cts, &fct); + csize = CTSIZE_INVALID; + fct->info = cinfo = info + id; + fct->size = size; + fct->sib = sib; + id = fid; + } else if (ctype_isattrib(info)) { + if (ctype_isxattrib(info, CTA_QUAL)) + cinfo |= size; + else if (ctype_isxattrib(info, CTA_ALIGN)) + CTF_INSERT(cinfo, ALIGN, size); + id = lj_ctype_intern(cp->cts, info+id, size); + /* Inherit csize/cinfo from original type. */ + } else { + if (ctype_isnum(info)) { /* Handle mode/vector-size attributes. */ + lua_assert(id == 0); + if (!(info & CTF_BOOL)) { + CTSize msize = ctype_msizeP(decl->attr); + CTSize vsize = ctype_vsizeP(decl->attr); + if (msize && (!(info & CTF_FP) || (msize == 4 || msize == 8))) { + CTSize malign = lj_fls(msize); + if (malign > 4) malign = 4; /* Limit alignment. */ + CTF_INSERT(info, ALIGN, malign); + size = msize; /* Override size via mode. */ + } + if (vsize) { /* Vector size set? */ + CTSize esize = lj_fls(size); + if (vsize >= esize) { + /* Intern the element type first. */ + id = lj_ctype_intern(cp->cts, info, size); + /* Then create a vector (array) with vsize alignment. */ + size = (1u << vsize); + if (vsize > 4) vsize = 4; /* Limit alignment. */ + if (ctype_align(info) > vsize) vsize = ctype_align(info); + info = CTINFO(CT_ARRAY, (info & CTF_QUAL) + CTF_VECTOR + + CTALIGN(vsize)); + } + } + } + } else if (ctype_isptr(info)) { + /* Reject pointer/ref to ref. */ + if (id && ctype_isref(ctype_raw(cp->cts, id)->info)) + cp_err(cp, LJ_ERR_FFI_INVTYPE); + if (ctype_isref(info)) { + info &= ~CTF_VOLATILE; /* Refs are always const, never volatile. */ + /* No intervening attributes allowed, skip forward. */ + while (idx) { + CType *ctn = &decl->stack[idx]; + if (!ctype_isattrib(ctn->info)) break; + idx = ctn->next; /* Skip attribute. */ + } + } + } else if (ctype_isarray(info)) { /* Check for valid array size etc. */ + if (ct->sib == 0) { /* Only check/size arrays not copied by unroll. */ + if (ctype_isref(cinfo)) /* Reject arrays of refs. */ + cp_err(cp, LJ_ERR_FFI_INVTYPE); + /* Reject VLS or unknown-sized types. */ + if (ctype_isvltype(cinfo) || csize == CTSIZE_INVALID) + cp_err(cp, LJ_ERR_FFI_INVSIZE); + /* a[] and a[?] keep their invalid size. */ + if (size != CTSIZE_INVALID) { + uint64_t xsz = (uint64_t)size * csize; + if (xsz >= 0x80000000u) cp_err(cp, LJ_ERR_FFI_INVSIZE); + size = (CTSize)xsz; + } + } + if ((cinfo & CTF_ALIGN) > (info & CTF_ALIGN)) /* Find max. align. */ + info = (info & ~CTF_ALIGN) | (cinfo & CTF_ALIGN); + info |= (cinfo & CTF_QUAL); /* Inherit qual. */ + } else { + lua_assert(ctype_isvoid(info)); + } + csize = size; + cinfo = info+id; + id = lj_ctype_intern(cp->cts, info+id, size); + } + } while (idx); + return id; +} + +/* -- C declaration parser ------------------------------------------------ */ + +#define H_(le, be) LJ_ENDIAN_SELECT(0x##le, 0x##be) + +/* Reset declaration state to declaration specifier. */ +static void cp_decl_reset(CPDecl *decl) +{ + decl->pos = decl->specpos; + decl->top = decl->specpos+1; + decl->stack[decl->specpos].next = 0; + decl->attr = decl->specattr; + decl->fattr = decl->specfattr; + decl->name = NULL; + decl->redir = NULL; +} + +/* Parse constant initializer. */ +/* NYI: FP constants and strings as initializers. */ +static CTypeID cp_decl_constinit(CPState *cp, CType **ctp, CTypeID ctypeid) +{ + CType *ctt = ctype_get(cp->cts, ctypeid); + CTInfo info; + CTSize size; + CPValue k; + CTypeID constid; + while (ctype_isattrib(ctt->info)) { /* Skip attributes. */ + ctypeid = ctype_cid(ctt->info); /* Update ID, too. */ + ctt = ctype_get(cp->cts, ctypeid); + } + info = ctt->info; + size = ctt->size; + if (!ctype_isinteger(info) || !(info & CTF_CONST) || size > 4) + cp_err(cp, LJ_ERR_FFI_INVTYPE); + cp_check(cp, '='); + cp_expr_sub(cp, &k, 0); + constid = lj_ctype_new(cp->cts, ctp); + (*ctp)->info = CTINFO(CT_CONSTVAL, CTF_CONST|ctypeid); + k.u32 <<= 8*(4-size); + if ((info & CTF_UNSIGNED)) + k.u32 >>= 8*(4-size); + else + k.u32 = (uint32_t)((int32_t)k.u32 >> 8*(4-size)); + (*ctp)->size = k.u32; + return constid; +} + +/* Parse size in parentheses as part of attribute. */ +static CTSize cp_decl_sizeattr(CPState *cp) +{ + CTSize sz; + uint32_t oldtmask = cp->tmask; + cp->tmask = CPNS_DEFAULT; /* Required for expression evaluator. */ + cp_check(cp, '('); + sz = cp_expr_ksize(cp); + cp->tmask = oldtmask; + cp_check(cp, ')'); + return sz; +} + +/* Parse alignment attribute. */ +static void cp_decl_align(CPState *cp, CPDecl *decl) +{ + CTSize al = 4; /* Unspecified alignment is 16 bytes. */ + if (cp->tok == '(') { + al = cp_decl_sizeattr(cp); + al = al ? lj_fls(al) : 0; + } + CTF_INSERT(decl->attr, ALIGN, al); + decl->attr |= CTFP_ALIGNED; +} + +/* Parse GCC asm("name") redirect. */ +static void cp_decl_asm(CPState *cp, CPDecl *decl) +{ + UNUSED(decl); + cp_next(cp); + cp_check(cp, '('); + if (cp->tok == CTOK_STRING) { + GCstr *str = cp->str; + while (cp_next(cp) == CTOK_STRING) { + lj_strfmt_pushf(cp->L, "%s%s", strdata(str), strdata(cp->str)); + cp->L->top--; + str = strV(cp->L->top); + } + decl->redir = str; + } + cp_check(cp, ')'); +} + +/* Parse GCC __attribute__((mode(...))). */ +static void cp_decl_mode(CPState *cp, CPDecl *decl) +{ + cp_check(cp, '('); + if (cp->tok == CTOK_IDENT) { + const char *s = strdata(cp->str); + CTSize sz = 0, vlen = 0; + if (s[0] == '_' && s[1] == '_') s += 2; + if (*s == 'V') { + s++; + vlen = *s++ - '0'; + if (*s >= '0' && *s <= '9') + vlen = vlen*10 + (*s++ - '0'); + } + switch (*s++) { + case 'Q': sz = 1; break; + case 'H': sz = 2; break; + case 'S': sz = 4; break; + case 'D': sz = 8; break; + case 'T': sz = 16; break; + case 'O': sz = 32; break; + default: goto bad_size; + } + if (*s == 'I' || *s == 'F') { + CTF_INSERT(decl->attr, MSIZEP, sz); + if (vlen) CTF_INSERT(decl->attr, VSIZEP, lj_fls(vlen*sz)); + } + bad_size: + cp_next(cp); + } + cp_check(cp, ')'); +} + +/* Parse GCC __attribute__((...)). */ +static void cp_decl_gccattribute(CPState *cp, CPDecl *decl) +{ + cp_next(cp); + cp_check(cp, '('); + cp_check(cp, '('); + while (cp->tok != ')') { + if (cp->tok == CTOK_IDENT) { + GCstr *attrstr = cp->str; + cp_next(cp); + switch (attrstr->hash) { + case H_(64a9208e,8ce14319): case H_(8e6331b2,95a282af): /* aligned */ + cp_decl_align(cp, decl); + break; + case H_(42eb47de,f0ede26c): case H_(29f48a09,cf383e0c): /* packed */ + decl->attr |= CTFP_PACKED; + break; + case H_(0a84eef6,8dfab04c): case H_(995cf92c,d5696591): /* mode */ + cp_decl_mode(cp, decl); + break; + case H_(0ab31997,2d5213fa): case H_(bf875611,200e9990): /* vector_size */ + { + CTSize vsize = cp_decl_sizeattr(cp); + if (vsize) CTF_INSERT(decl->attr, VSIZEP, lj_fls(vsize)); + } + break; +#if LJ_TARGET_X86 + case H_(5ad22db8,c689b848): case H_(439150fa,65ea78cb): /* regparm */ + CTF_INSERT(decl->fattr, REGPARM, cp_decl_sizeattr(cp)); + decl->fattr |= CTFP_CCONV; + break; + case H_(18fc0b98,7ff4c074): case H_(4e62abed,0a747424): /* cdecl */ + CTF_INSERT(decl->fattr, CCONV, CTCC_CDECL); + decl->fattr |= CTFP_CCONV; + break; + case H_(72b2e41b,494c5a44): case H_(f2356d59,f25fc9bd): /* thiscall */ + CTF_INSERT(decl->fattr, CCONV, CTCC_THISCALL); + decl->fattr |= CTFP_CCONV; + break; + case H_(0d0ffc42,ab746f88): case H_(21c54ba1,7f0ca7e3): /* fastcall */ + CTF_INSERT(decl->fattr, CCONV, CTCC_FASTCALL); + decl->fattr |= CTFP_CCONV; + break; + case H_(ef76b040,9412e06a): case H_(de56697b,c750e6e1): /* stdcall */ + CTF_INSERT(decl->fattr, CCONV, CTCC_STDCALL); + decl->fattr |= CTFP_CCONV; + break; + case H_(ea78b622,f234bd8e): case H_(252ffb06,8d50f34b): /* sseregparm */ + decl->fattr |= CTF_SSEREGPARM; + decl->fattr |= CTFP_CCONV; + break; +#endif + default: /* Skip all other attributes. */ + goto skip_attr; + } + } else if (cp->tok >= CTOK_FIRSTDECL) { /* For __attribute((const)) etc. */ + cp_next(cp); + skip_attr: + if (cp_opt(cp, '(')) { + while (cp->tok != ')' && cp->tok != CTOK_EOF) cp_next(cp); + cp_check(cp, ')'); + } + } else { + break; + } + if (!cp_opt(cp, ',')) break; + } + cp_check(cp, ')'); + cp_check(cp, ')'); +} + +/* Parse MSVC __declspec(...). */ +static void cp_decl_msvcattribute(CPState *cp, CPDecl *decl) +{ + cp_next(cp); + cp_check(cp, '('); + while (cp->tok == CTOK_IDENT) { + GCstr *attrstr = cp->str; + cp_next(cp); + switch (attrstr->hash) { + case H_(bc2395fa,98f267f8): /* align */ + cp_decl_align(cp, decl); + break; + default: /* Ignore all other attributes. */ + if (cp_opt(cp, '(')) { + while (cp->tok != ')' && cp->tok != CTOK_EOF) cp_next(cp); + cp_check(cp, ')'); + } + break; + } + } + cp_check(cp, ')'); +} + +/* Parse declaration attributes (and common qualifiers). */ +static void cp_decl_attributes(CPState *cp, CPDecl *decl) +{ + for (;;) { + switch (cp->tok) { + case CTOK_CONST: decl->attr |= CTF_CONST; break; + case CTOK_VOLATILE: decl->attr |= CTF_VOLATILE; break; + case CTOK_RESTRICT: break; /* Ignore. */ + case CTOK_EXTENSION: break; /* Ignore. */ + case CTOK_ATTRIBUTE: cp_decl_gccattribute(cp, decl); continue; + case CTOK_ASM: cp_decl_asm(cp, decl); continue; + case CTOK_DECLSPEC: cp_decl_msvcattribute(cp, decl); continue; + case CTOK_CCDECL: +#if LJ_TARGET_X86 + CTF_INSERT(decl->fattr, CCONV, cp->ct->size); + decl->fattr |= CTFP_CCONV; +#endif + break; + case CTOK_PTRSZ: +#if LJ_64 + CTF_INSERT(decl->attr, MSIZEP, cp->ct->size); +#endif + break; + default: return; + } + cp_next(cp); + } +} + +/* Parse struct/union/enum name. */ +static CTypeID cp_struct_name(CPState *cp, CPDecl *sdecl, CTInfo info) +{ + CTypeID sid; + CType *ct; + cp->tmask = CPNS_STRUCT; + cp_next(cp); + cp_decl_attributes(cp, sdecl); + cp->tmask = CPNS_DEFAULT; + if (cp->tok != '{') { + if (cp->tok != CTOK_IDENT) cp_err_token(cp, CTOK_IDENT); + if (cp->val.id) { /* Name of existing struct/union/enum. */ + sid = cp->val.id; + ct = cp->ct; + if ((ct->info ^ info) & (CTMASK_NUM|CTF_UNION)) /* Wrong type. */ + cp_errmsg(cp, 0, LJ_ERR_FFI_REDEF, strdata(gco2str(gcref(ct->name)))); + } else { /* Create named, incomplete struct/union/enum. */ + if ((cp->mode & CPARSE_MODE_NOIMPLICIT)) + cp_errmsg(cp, 0, LJ_ERR_FFI_BADTAG, strdata(cp->str)); + sid = lj_ctype_new(cp->cts, &ct); + ct->info = info; + ct->size = CTSIZE_INVALID; + ctype_setname(ct, cp->str); + lj_ctype_addname(cp->cts, ct, sid); + } + cp_next(cp); + } else { /* Create anonymous, incomplete struct/union/enum. */ + sid = lj_ctype_new(cp->cts, &ct); + ct->info = info; + ct->size = CTSIZE_INVALID; + } + if (cp->tok == '{') { + if (ct->size != CTSIZE_INVALID || ct->sib) + cp_errmsg(cp, 0, LJ_ERR_FFI_REDEF, strdata(gco2str(gcref(ct->name)))); + ct->sib = 1; /* Indicate the type is currently being defined. */ + } + return sid; +} + +/* Determine field alignment. */ +static CTSize cp_field_align(CPState *cp, CType *ct, CTInfo info) +{ + CTSize align = ctype_align(info); + UNUSED(cp); UNUSED(ct); +#if (LJ_TARGET_X86 && !LJ_ABI_WIN) || (LJ_TARGET_ARM && __APPLE__) + /* The SYSV i386 and iOS ABIs limit alignment of non-vector fields to 2^2. */ + if (align > 2 && !(info & CTFP_ALIGNED)) { + if (ctype_isarray(info) && !(info & CTF_VECTOR)) { + do { + ct = ctype_rawchild(cp->cts, ct); + info = ct->info; + } while (ctype_isarray(info) && !(info & CTF_VECTOR)); + } + if (ctype_isnum(info) || ctype_isenum(info)) + align = 2; + } +#endif + return align; +} + +/* Layout struct/union fields. */ +static void cp_struct_layout(CPState *cp, CTypeID sid, CTInfo sattr) +{ + CTSize bofs = 0, bmaxofs = 0; /* Bit offset and max. bit offset. */ + CTSize maxalign = ctype_align(sattr); + CType *sct = ctype_get(cp->cts, sid); + CTInfo sinfo = sct->info; + CTypeID fieldid = sct->sib; + while (fieldid) { + CType *ct = ctype_get(cp->cts, fieldid); + CTInfo attr = ct->size; /* Field declaration attributes (temp.). */ + + if (ctype_isfield(ct->info) || + (ctype_isxattrib(ct->info, CTA_SUBTYPE) && attr)) { + CTSize align, amask; /* Alignment (pow2) and alignment mask (bits). */ + CTSize sz; + CTInfo info = lj_ctype_info(cp->cts, ctype_cid(ct->info), &sz); + CTSize bsz, csz = 8*sz; /* Field size and container size (in bits). */ + sinfo |= (info & (CTF_QUAL|CTF_VLA)); /* Merge pseudo-qualifiers. */ + + /* Check for size overflow and determine alignment. */ + if (sz >= 0x20000000u || bofs + csz < bofs || (info & CTF_VLA)) { + if (!(sz == CTSIZE_INVALID && ctype_isarray(info) && + !(sinfo & CTF_UNION))) + cp_err(cp, LJ_ERR_FFI_INVSIZE); + csz = sz = 0; /* Treat a[] and a[?] as zero-sized. */ + } + align = cp_field_align(cp, ct, info); + if (((attr|sattr) & CTFP_PACKED) || + ((attr & CTFP_ALIGNED) && ctype_align(attr) > align)) + align = ctype_align(attr); + if (cp->packstack[cp->curpack] < align) + align = cp->packstack[cp->curpack]; + if (align > maxalign) maxalign = align; + amask = (8u << align) - 1; + + bsz = ctype_bitcsz(ct->info); /* Bitfield size (temp.). */ + if (bsz == CTBSZ_FIELD || !ctype_isfield(ct->info)) { + bsz = csz; /* Regular fields or subtypes always fill the container. */ + bofs = (bofs + amask) & ~amask; /* Start new aligned field. */ + ct->size = (bofs >> 3); /* Store field offset. */ + } else { /* Bitfield. */ + if (bsz == 0 || (attr & CTFP_ALIGNED) || + (!((attr|sattr) & CTFP_PACKED) && (bofs & amask) + bsz > csz)) + bofs = (bofs + amask) & ~amask; /* Start new aligned field. */ + + /* Prefer regular field over bitfield. */ + if (bsz == csz && (bofs & amask) == 0) { + ct->info = CTINFO(CT_FIELD, ctype_cid(ct->info)); + ct->size = (bofs >> 3); /* Store field offset. */ + } else { + ct->info = CTINFO(CT_BITFIELD, + (info & (CTF_QUAL|CTF_UNSIGNED|CTF_BOOL)) + + (csz << (CTSHIFT_BITCSZ-3)) + (bsz << CTSHIFT_BITBSZ)); +#if LJ_BE + ct->info += ((csz - (bofs & (csz-1)) - bsz) << CTSHIFT_BITPOS); +#else + ct->info += ((bofs & (csz-1)) << CTSHIFT_BITPOS); +#endif + ct->size = ((bofs & ~(csz-1)) >> 3); /* Store container offset. */ + } + } + + /* Determine next offset or max. offset. */ + if ((sinfo & CTF_UNION)) { + if (bsz > bmaxofs) bmaxofs = bsz; + } else { + bofs += bsz; + } + } /* All other fields in the chain are already set up. */ + + fieldid = ct->sib; + } + + /* Complete struct/union. */ + sct->info = sinfo + CTALIGN(maxalign); + bofs = (sinfo & CTF_UNION) ? bmaxofs : bofs; + maxalign = (8u << maxalign) - 1; + sct->size = (((bofs + maxalign) & ~maxalign) >> 3); +} + +/* Parse struct/union declaration. */ +static CTypeID cp_decl_struct(CPState *cp, CPDecl *sdecl, CTInfo sinfo) +{ + CTypeID sid = cp_struct_name(cp, sdecl, sinfo); + if (cp_opt(cp, '{')) { /* Struct/union definition. */ + CTypeID lastid = sid; + int lastdecl = 0; + while (cp->tok != '}') { + CPDecl decl; + CPscl scl = cp_decl_spec(cp, &decl, CDF_STATIC); + decl.mode = scl ? CPARSE_MODE_DIRECT : + CPARSE_MODE_DIRECT|CPARSE_MODE_ABSTRACT|CPARSE_MODE_FIELD; + + for (;;) { + CTypeID ctypeid; + + if (lastdecl) cp_err_token(cp, '}'); + + /* Parse field declarator. */ + decl.bits = CTSIZE_INVALID; + cp_declarator(cp, &decl); + ctypeid = cp_decl_intern(cp, &decl); + + if ((scl & CDF_STATIC)) { /* Static constant in struct namespace. */ + CType *ct; + CTypeID fieldid = cp_decl_constinit(cp, &ct, ctypeid); + ctype_get(cp->cts, lastid)->sib = fieldid; + lastid = fieldid; + ctype_setname(ct, decl.name); + } else { + CTSize bsz = CTBSZ_FIELD; /* Temp. for layout phase. */ + CType *ct; + CTypeID fieldid = lj_ctype_new(cp->cts, &ct); /* Do this first. */ + CType *tct = ctype_raw(cp->cts, ctypeid); + + if (decl.bits == CTSIZE_INVALID) { /* Regular field. */ + if (ctype_isarray(tct->info) && tct->size == CTSIZE_INVALID) + lastdecl = 1; /* a[] or a[?] must be the last declared field. */ + + /* Accept transparent struct/union/enum. */ + if (!decl.name) { + if (!((ctype_isstruct(tct->info) && !(tct->info & CTF_VLA)) || + ctype_isenum(tct->info))) + cp_err_token(cp, CTOK_IDENT); + ct->info = CTINFO(CT_ATTRIB, CTATTRIB(CTA_SUBTYPE) + ctypeid); + ct->size = ctype_isstruct(tct->info) ? + (decl.attr|0x80000000u) : 0; /* For layout phase. */ + goto add_field; + } + } else { /* Bitfield. */ + bsz = decl.bits; + if (!ctype_isinteger_or_bool(tct->info) || + (bsz == 0 && decl.name) || 8*tct->size > CTBSZ_MAX || + bsz > ((tct->info & CTF_BOOL) ? 1 : 8*tct->size)) + cp_errmsg(cp, ':', LJ_ERR_BADVAL); + } + + /* Create temporary field for layout phase. */ + ct->info = CTINFO(CT_FIELD, ctypeid + (bsz << CTSHIFT_BITCSZ)); + ct->size = decl.attr; + if (decl.name) ctype_setname(ct, decl.name); + + add_field: + ctype_get(cp->cts, lastid)->sib = fieldid; + lastid = fieldid; + } + if (!cp_opt(cp, ',')) break; + cp_decl_reset(&decl); + } + cp_check(cp, ';'); + } + cp_check(cp, '}'); + ctype_get(cp->cts, lastid)->sib = 0; /* Drop sib = 1 for empty structs. */ + cp_decl_attributes(cp, sdecl); /* Layout phase needs postfix attributes. */ + cp_struct_layout(cp, sid, sdecl->attr); + } + return sid; +} + +/* Parse enum declaration. */ +static CTypeID cp_decl_enum(CPState *cp, CPDecl *sdecl) +{ + CTypeID eid = cp_struct_name(cp, sdecl, CTINFO(CT_ENUM, CTID_VOID)); + CTInfo einfo = CTINFO(CT_ENUM, CTALIGN(2) + CTID_UINT32); + CTSize esize = 4; /* Only 32 bit enums are supported. */ + if (cp_opt(cp, '{')) { /* Enum definition. */ + CPValue k; + CTypeID lastid = eid; + k.u32 = 0; + k.id = CTID_INT32; + do { + GCstr *name = cp->str; + if (cp->tok != CTOK_IDENT) cp_err_token(cp, CTOK_IDENT); + if (cp->val.id) cp_errmsg(cp, 0, LJ_ERR_FFI_REDEF, strdata(name)); + cp_next(cp); + if (cp_opt(cp, '=')) { + cp_expr_kint(cp, &k); + if (k.id == CTID_UINT32) { + /* C99 says that enum constants are always (signed) integers. + ** But since unsigned constants like 0x80000000 are quite common, + ** those are left as uint32_t. + */ + if (k.i32 >= 0) k.id = CTID_INT32; + } else { + /* OTOH it's common practice and even mandated by some ABIs + ** that the enum type itself is unsigned, unless there are any + ** negative constants. + */ + k.id = CTID_INT32; + if (k.i32 < 0) einfo = CTINFO(CT_ENUM, CTALIGN(2) + CTID_INT32); + } + } + /* Add named enum constant. */ + { + CType *ct; + CTypeID constid = lj_ctype_new(cp->cts, &ct); + ctype_get(cp->cts, lastid)->sib = constid; + lastid = constid; + ctype_setname(ct, name); + ct->info = CTINFO(CT_CONSTVAL, CTF_CONST|k.id); + ct->size = k.u32++; + if (k.u32 == 0x80000000u) k.id = CTID_UINT32; + lj_ctype_addname(cp->cts, ct, constid); + } + if (!cp_opt(cp, ',')) break; + } while (cp->tok != '}'); /* Trailing ',' is ok. */ + cp_check(cp, '}'); + /* Complete enum. */ + ctype_get(cp->cts, eid)->info = einfo; + ctype_get(cp->cts, eid)->size = esize; + } + return eid; +} + +/* Parse declaration specifiers. */ +static CPscl cp_decl_spec(CPState *cp, CPDecl *decl, CPscl scl) +{ + uint32_t cds = 0, sz = 0; + CTypeID tdef = 0; + + decl->cp = cp; + decl->mode = cp->mode; + decl->name = NULL; + decl->redir = NULL; + decl->attr = 0; + decl->fattr = 0; + decl->pos = decl->top = 0; + decl->stack[0].next = 0; + + for (;;) { /* Parse basic types. */ + cp_decl_attributes(cp, decl); + if (cp->tok >= CTOK_FIRSTDECL && cp->tok <= CTOK_LASTDECLFLAG) { + uint32_t cbit; + if (cp->ct->size) { + if (sz) goto end_decl; + sz = cp->ct->size; + } + cbit = (1u << (cp->tok - CTOK_FIRSTDECL)); + cds = cds | cbit | ((cbit & cds & CDF_LONG) << 1); + if (cp->tok >= CTOK_FIRSTSCL) { + if (!(scl & cbit)) cp_errmsg(cp, cp->tok, LJ_ERR_FFI_BADSCL); + } else if (tdef) { + goto end_decl; + } + cp_next(cp); + continue; + } + if (sz || tdef || + (cds & (CDF_SHORT|CDF_LONG|CDF_SIGNED|CDF_UNSIGNED|CDF_COMPLEX))) + break; + switch (cp->tok) { + case CTOK_STRUCT: + tdef = cp_decl_struct(cp, decl, CTINFO(CT_STRUCT, 0)); + continue; + case CTOK_UNION: + tdef = cp_decl_struct(cp, decl, CTINFO(CT_STRUCT, CTF_UNION)); + continue; + case CTOK_ENUM: + tdef = cp_decl_enum(cp, decl); + continue; + case CTOK_IDENT: + if (ctype_istypedef(cp->ct->info)) { + tdef = ctype_cid(cp->ct->info); /* Get typedef. */ + cp_next(cp); + continue; + } + break; + case '$': + tdef = cp->val.id; + cp_next(cp); + continue; + default: + break; + } + break; + } +end_decl: + + if ((cds & CDF_COMPLEX)) /* Use predefined complex types. */ + tdef = sz == 4 ? CTID_COMPLEX_FLOAT : CTID_COMPLEX_DOUBLE; + + if (tdef) { + cp_push_type(decl, tdef); + } else if ((cds & CDF_VOID)) { + cp_push(decl, CTINFO(CT_VOID, (decl->attr & CTF_QUAL)), CTSIZE_INVALID); + decl->attr &= ~CTF_QUAL; + } else { + /* Determine type info and size. */ + CTInfo info = CTINFO(CT_NUM, (cds & CDF_UNSIGNED) ? CTF_UNSIGNED : 0); + if ((cds & CDF_BOOL)) { + if ((cds & ~(CDF_SCL|CDF_BOOL|CDF_INT|CDF_SIGNED|CDF_UNSIGNED))) + cp_errmsg(cp, 0, LJ_ERR_FFI_INVTYPE); + info |= CTF_BOOL; + if (!(cds & CDF_SIGNED)) info |= CTF_UNSIGNED; + if (!sz) { + sz = 1; + } + } else if ((cds & CDF_FP)) { + info = CTINFO(CT_NUM, CTF_FP); + if ((cds & CDF_LONG)) sz = sizeof(long double); + } else if ((cds & CDF_CHAR)) { + if ((cds & (CDF_CHAR|CDF_SIGNED|CDF_UNSIGNED)) == CDF_CHAR) + info |= CTF_UCHAR; /* Handle platforms where char is unsigned. */ + } else if ((cds & CDF_SHORT)) { + sz = sizeof(short); + } else if ((cds & CDF_LONGLONG)) { + sz = 8; + } else if ((cds & CDF_LONG)) { + info |= CTF_LONG; + sz = sizeof(long); + } else if (!sz) { + if (!(cds & (CDF_SIGNED|CDF_UNSIGNED))) + cp_errmsg(cp, cp->tok, LJ_ERR_FFI_DECLSPEC); + sz = sizeof(int); + } + lua_assert(sz != 0); + info += CTALIGN(lj_fls(sz)); /* Use natural alignment. */ + info += (decl->attr & CTF_QUAL); /* Merge qualifiers. */ + cp_push(decl, info, sz); + decl->attr &= ~CTF_QUAL; + } + decl->specpos = decl->pos; + decl->specattr = decl->attr; + decl->specfattr = decl->fattr; + return (cds & CDF_SCL); /* Return storage class. */ +} + +/* Parse array declaration. */ +static void cp_decl_array(CPState *cp, CPDecl *decl) +{ + CTInfo info = CTINFO(CT_ARRAY, 0); + CTSize nelem = CTSIZE_INVALID; /* Default size for a[] or a[?]. */ + cp_decl_attributes(cp, decl); + if (cp_opt(cp, '?')) + info |= CTF_VLA; /* Create variable-length array a[?]. */ + else if (cp->tok != ']') + nelem = cp_expr_ksize(cp); + cp_check(cp, ']'); + cp_add(decl, info, nelem); +} + +/* Parse function declaration. */ +static void cp_decl_func(CPState *cp, CPDecl *fdecl) +{ + CTSize nargs = 0; + CTInfo info = CTINFO(CT_FUNC, 0); + CTypeID lastid = 0, anchor = 0; + if (cp->tok != ')') { + do { + CPDecl decl; + CTypeID ctypeid, fieldid; + CType *ct; + if (cp_opt(cp, '.')) { /* Vararg function. */ + cp_check(cp, '.'); /* Workaround for the minimalistic lexer. */ + cp_check(cp, '.'); + info |= CTF_VARARG; + break; + } + cp_decl_spec(cp, &decl, CDF_REGISTER); + decl.mode = CPARSE_MODE_DIRECT|CPARSE_MODE_ABSTRACT; + cp_declarator(cp, &decl); + ctypeid = cp_decl_intern(cp, &decl); + ct = ctype_raw(cp->cts, ctypeid); + if (ctype_isvoid(ct->info)) + break; + else if (ctype_isrefarray(ct->info)) + ctypeid = lj_ctype_intern(cp->cts, + CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(ct->info)), CTSIZE_PTR); + else if (ctype_isfunc(ct->info)) + ctypeid = lj_ctype_intern(cp->cts, + CTINFO(CT_PTR, CTALIGN_PTR|ctypeid), CTSIZE_PTR); + /* Add new parameter. */ + fieldid = lj_ctype_new(cp->cts, &ct); + if (anchor) + ctype_get(cp->cts, lastid)->sib = fieldid; + else + anchor = fieldid; + lastid = fieldid; + if (decl.name) ctype_setname(ct, decl.name); + ct->info = CTINFO(CT_FIELD, ctypeid); + ct->size = nargs++; + } while (cp_opt(cp, ',')); + } + cp_check(cp, ')'); + if (cp_opt(cp, '{')) { /* Skip function definition. */ + int level = 1; + cp->mode |= CPARSE_MODE_SKIP; + for (;;) { + if (cp->tok == '{') level++; + else if (cp->tok == '}' && --level == 0) break; + else if (cp->tok == CTOK_EOF) cp_err_token(cp, '}'); + cp_next(cp); + } + cp->mode &= ~CPARSE_MODE_SKIP; + cp->tok = ';'; /* Ok for cp_decl_multi(), error in cp_decl_single(). */ + } + info |= (fdecl->fattr & ~CTMASK_CID); + fdecl->fattr = 0; + fdecl->stack[cp_add(fdecl, info, nargs)].sib = anchor; +} + +/* Parse declarator. */ +static void cp_declarator(CPState *cp, CPDecl *decl) +{ + if (++cp->depth > CPARSE_MAX_DECLDEPTH) cp_err(cp, LJ_ERR_XLEVELS); + + for (;;) { /* Head of declarator. */ + if (cp_opt(cp, '*')) { /* Pointer. */ + CTSize sz; + CTInfo info; + cp_decl_attributes(cp, decl); + sz = CTSIZE_PTR; + info = CTINFO(CT_PTR, CTALIGN_PTR); +#if LJ_64 + if (ctype_msizeP(decl->attr) == 4) { + sz = 4; + info = CTINFO(CT_PTR, CTALIGN(2)); + } +#endif + info += (decl->attr & (CTF_QUAL|CTF_REF)); + decl->attr &= ~(CTF_QUAL|(CTMASK_MSIZEP<attr &= ~(CTF_QUAL|(CTMASK_MSIZEP<mode & CPARSE_MODE_ABSTRACT) && + (cp->tok == ')' || cp_istypedecl(cp))) goto func_decl; + pos = decl->pos; + cp_declarator(cp, decl); + cp_check(cp, ')'); + decl->pos = pos; + } else if (cp->tok == CTOK_IDENT) { /* Direct declarator. */ + if (!(decl->mode & CPARSE_MODE_DIRECT)) cp_err_token(cp, CTOK_EOF); + decl->name = cp->str; + decl->nameid = cp->val.id; + cp_next(cp); + } else { /* Abstract declarator. */ + if (!(decl->mode & CPARSE_MODE_ABSTRACT)) cp_err_token(cp, CTOK_IDENT); + } + + for (;;) { /* Tail of declarator. */ + if (cp_opt(cp, '[')) { /* Array. */ + cp_decl_array(cp, decl); + } else if (cp_opt(cp, '(')) { /* Function. */ + func_decl: + cp_decl_func(cp, decl); + } else { + break; + } + } + + if ((decl->mode & CPARSE_MODE_FIELD) && cp_opt(cp, ':')) /* Field width. */ + decl->bits = cp_expr_ksize(cp); + + /* Process postfix attributes. */ + cp_decl_attributes(cp, decl); + cp_push_attributes(decl); + + cp->depth--; +} + +/* Parse an abstract type declaration and return it's C type ID. */ +static CTypeID cp_decl_abstract(CPState *cp) +{ + CPDecl decl; + cp_decl_spec(cp, &decl, 0); + decl.mode = CPARSE_MODE_ABSTRACT; + cp_declarator(cp, &decl); + return cp_decl_intern(cp, &decl); +} + +/* Handle pragmas. */ +static void cp_pragma(CPState *cp, BCLine pragmaline) +{ + cp_next(cp); + if (cp->tok == CTOK_IDENT && + cp->str->hash == H_(e79b999f,42ca3e85)) { /* pack */ + cp_next(cp); + cp_check(cp, '('); + if (cp->tok == CTOK_IDENT) { + if (cp->str->hash == H_(738e923c,a1b65954)) { /* push */ + if (cp->curpack < CPARSE_MAX_PACKSTACK) { + cp->packstack[cp->curpack+1] = cp->packstack[cp->curpack]; + cp->curpack++; + } + } else if (cp->str->hash == H_(6c71cf27,6c71cf27)) { /* pop */ + if (cp->curpack > 0) cp->curpack--; + } else { + cp_errmsg(cp, cp->tok, LJ_ERR_XSYMBOL); + } + cp_next(cp); + if (!cp_opt(cp, ',')) goto end_pack; + } + if (cp->tok == CTOK_INTEGER) { + cp->packstack[cp->curpack] = cp->val.u32 ? lj_fls(cp->val.u32) : 0; + cp_next(cp); + } else { + cp->packstack[cp->curpack] = 255; + } + end_pack: + cp_check(cp, ')'); + } else { /* Ignore all other pragmas. */ + while (cp->tok != CTOK_EOF && cp->linenumber == pragmaline) + cp_next(cp); + } +} + +/* Parse multiple C declarations of types or extern identifiers. */ +static void cp_decl_multi(CPState *cp) +{ + int first = 1; + while (cp->tok != CTOK_EOF) { + CPDecl decl; + CPscl scl; + if (cp_opt(cp, ';')) { /* Skip empty statements. */ + first = 0; + continue; + } + if (cp->tok == '#') { /* Workaround, since we have no preprocessor, yet. */ + BCLine pragmaline = cp->linenumber; + if (!(cp_next(cp) == CTOK_IDENT && + cp->str->hash == H_(f5e6b4f8,1d509107))) /* pragma */ + cp_errmsg(cp, cp->tok, LJ_ERR_XSYMBOL); + cp_pragma(cp, pragmaline); + continue; + } + scl = cp_decl_spec(cp, &decl, CDF_TYPEDEF|CDF_EXTERN|CDF_STATIC); + if ((cp->tok == ';' || cp->tok == CTOK_EOF) && + ctype_istypedef(decl.stack[0].info)) { + CTInfo info = ctype_rawchild(cp->cts, &decl.stack[0])->info; + if (ctype_isstruct(info) || ctype_isenum(info)) + goto decl_end; /* Accept empty declaration of struct/union/enum. */ + } + for (;;) { + CTypeID ctypeid; + cp_declarator(cp, &decl); + ctypeid = cp_decl_intern(cp, &decl); + if (decl.name && !decl.nameid) { /* NYI: redeclarations are ignored. */ + CType *ct; + CTypeID id; + if ((scl & CDF_TYPEDEF)) { /* Create new typedef. */ + id = lj_ctype_new(cp->cts, &ct); + ct->info = CTINFO(CT_TYPEDEF, ctypeid); + goto noredir; + } else if (ctype_isfunc(ctype_get(cp->cts, ctypeid)->info)) { + /* Treat both static and extern function declarations as extern. */ + ct = ctype_get(cp->cts, ctypeid); + /* We always get new anonymous functions (typedefs are copied). */ + lua_assert(gcref(ct->name) == NULL); + id = ctypeid; /* Just name it. */ + } else if ((scl & CDF_STATIC)) { /* Accept static constants. */ + id = cp_decl_constinit(cp, &ct, ctypeid); + goto noredir; + } else { /* External references have extern or no storage class. */ + id = lj_ctype_new(cp->cts, &ct); + ct->info = CTINFO(CT_EXTERN, ctypeid); + } + if (decl.redir) { /* Add attribute for redirected symbol name. */ + CType *cta; + CTypeID aid = lj_ctype_new(cp->cts, &cta); + ct = ctype_get(cp->cts, id); /* Table may have been reallocated. */ + cta->info = CTINFO(CT_ATTRIB, CTATTRIB(CTA_REDIR)); + cta->sib = ct->sib; + ct->sib = aid; + ctype_setname(cta, decl.redir); + } + noredir: + ctype_setname(ct, decl.name); + lj_ctype_addname(cp->cts, ct, id); + } + if (!cp_opt(cp, ',')) break; + cp_decl_reset(&decl); + } + decl_end: + if (cp->tok == CTOK_EOF && first) break; /* May omit ';' for 1 decl. */ + first = 0; + cp_check(cp, ';'); + } +} + +/* Parse a single C type declaration. */ +static void cp_decl_single(CPState *cp) +{ + CPDecl decl; + cp_decl_spec(cp, &decl, 0); + cp_declarator(cp, &decl); + cp->val.id = cp_decl_intern(cp, &decl); + if (cp->tok != CTOK_EOF) cp_err_token(cp, CTOK_EOF); +} + +#undef H_ + +/* ------------------------------------------------------------------------ */ + +/* Protected callback for C parser. */ +static TValue *cpcparser(lua_State *L, lua_CFunction dummy, void *ud) +{ + CPState *cp = (CPState *)ud; + UNUSED(dummy); + cframe_errfunc(L->cframe) = -1; /* Inherit error function. */ + cp_init(cp); + if ((cp->mode & CPARSE_MODE_MULTI)) + cp_decl_multi(cp); + else + cp_decl_single(cp); + if (cp->param && cp->param != cp->L->top) + cp_err(cp, LJ_ERR_FFI_NUMPARAM); + lua_assert(cp->depth == 0); + return NULL; +} + +/* C parser. */ +int lj_cparse(CPState *cp) +{ + LJ_CTYPE_SAVE(cp->cts); + int errcode = lj_vm_cpcall(cp->L, NULL, cp, cpcparser); + if (errcode) + LJ_CTYPE_RESTORE(cp->cts); + cp_cleanup(cp); + return errcode; +} + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_cparse.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_cparse.h new file mode 100644 index 00000000000..441580de6fe --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_cparse.h @@ -0,0 +1,65 @@ +/* +** C declaration parser. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_CPARSE_H +#define _LJ_CPARSE_H + +#include "lj_obj.h" +#include "lj_ctype.h" + +#if LJ_HASFFI + +/* C parser limits. */ +#define CPARSE_MAX_BUF 32768 /* Max. token buffer size. */ +#define CPARSE_MAX_DECLSTACK 100 /* Max. declaration stack depth. */ +#define CPARSE_MAX_DECLDEPTH 20 /* Max. recursive declaration depth. */ +#define CPARSE_MAX_PACKSTACK 7 /* Max. pack pragma stack depth. */ + +/* Flags for C parser mode. */ +#define CPARSE_MODE_MULTI 1 /* Process multiple declarations. */ +#define CPARSE_MODE_ABSTRACT 2 /* Accept abstract declarators. */ +#define CPARSE_MODE_DIRECT 4 /* Accept direct declarators. */ +#define CPARSE_MODE_FIELD 8 /* Accept field width in bits, too. */ +#define CPARSE_MODE_NOIMPLICIT 16 /* Reject implicit declarations. */ +#define CPARSE_MODE_SKIP 32 /* Skip definitions, ignore errors. */ + +typedef int CPChar; /* C parser character. Unsigned ext. from char. */ +typedef int CPToken; /* C parser token. */ + +/* C parser internal value representation. */ +typedef struct CPValue { + union { + int32_t i32; /* Value for CTID_INT32. */ + uint32_t u32; /* Value for CTID_UINT32. */ + }; + CTypeID id; /* C Type ID of the value. */ +} CPValue; + +/* C parser state. */ +typedef struct CPState { + CPChar c; /* Current character. */ + CPToken tok; /* Current token. */ + CPValue val; /* Token value. */ + GCstr *str; /* Interned string of identifier/keyword. */ + CType *ct; /* C type table entry. */ + const char *p; /* Current position in input buffer. */ + SBuf sb; /* String buffer for tokens. */ + lua_State *L; /* Lua state. */ + CTState *cts; /* C type state. */ + TValue *param; /* C type parameters. */ + const char *srcname; /* Current source name. */ + BCLine linenumber; /* Input line counter. */ + int depth; /* Recursive declaration depth. */ + uint32_t tmask; /* Type mask for next identifier. */ + uint32_t mode; /* C parser mode. */ + uint8_t packstack[CPARSE_MAX_PACKSTACK]; /* Stack for pack pragmas. */ + uint8_t curpack; /* Current position in pack pragma stack. */ +} CPState; + +LJ_FUNC int lj_cparse(CPState *cp); + +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_crecord.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_crecord.c new file mode 100644 index 00000000000..e200cc99423 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_crecord.c @@ -0,0 +1,1834 @@ +/* +** Trace recorder for C data operations. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_ffrecord_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT && LJ_HASFFI + +#include "lj_err.h" +#include "lj_tab.h" +#include "lj_frame.h" +#include "lj_ctype.h" +#include "lj_cdata.h" +#include "lj_cparse.h" +#include "lj_cconv.h" +#include "lj_carith.h" +#include "lj_clib.h" +#include "lj_ccall.h" +#include "lj_ff.h" +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_ircall.h" +#include "lj_iropt.h" +#include "lj_trace.h" +#include "lj_record.h" +#include "lj_ffrecord.h" +#include "lj_snap.h" +#include "lj_crecord.h" +#include "lj_dispatch.h" +#include "lj_strfmt.h" + +/* Some local macros to save typing. Undef'd at the end. */ +#define IR(ref) (&J->cur.ir[(ref)]) + +/* Pass IR on to next optimization in chain (FOLD). */ +#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) + +#define emitconv(a, dt, st, flags) \ + emitir(IRT(IR_CONV, (dt)), (a), (st)|((dt) << 5)|(flags)) + +/* -- C type checks ------------------------------------------------------- */ + +static GCcdata *argv2cdata(jit_State *J, TRef tr, cTValue *o) +{ + GCcdata *cd; + TRef trtypeid; + if (!tref_iscdata(tr)) + lj_trace_err(J, LJ_TRERR_BADTYPE); + cd = cdataV(o); + /* Specialize to the CTypeID. */ + trtypeid = emitir(IRT(IR_FLOAD, IRT_U16), tr, IRFL_CDATA_CTYPEID); + emitir(IRTG(IR_EQ, IRT_INT), trtypeid, lj_ir_kint(J, (int32_t)cd->ctypeid)); + return cd; +} + +/* Specialize to the CTypeID held by a cdata constructor. */ +static CTypeID crec_constructor(jit_State *J, GCcdata *cd, TRef tr) +{ + CTypeID id; + lua_assert(tref_iscdata(tr) && cd->ctypeid == CTID_CTYPEID); + id = *(CTypeID *)cdataptr(cd); + tr = emitir(IRT(IR_FLOAD, IRT_INT), tr, IRFL_CDATA_INT); + emitir(IRTG(IR_EQ, IRT_INT), tr, lj_ir_kint(J, (int32_t)id)); + return id; +} + +static CTypeID argv2ctype(jit_State *J, TRef tr, cTValue *o) +{ + if (tref_isstr(tr)) { + GCstr *s = strV(o); + CPState cp; + CTypeID oldtop; + /* Specialize to the string containing the C type declaration. */ + emitir(IRTG(IR_EQ, IRT_STR), tr, lj_ir_kstr(J, s)); + cp.L = J->L; + cp.cts = ctype_ctsG(J2G(J)); + oldtop = cp.cts->top; + cp.srcname = strdata(s); + cp.p = strdata(s); + cp.param = NULL; + cp.mode = CPARSE_MODE_ABSTRACT|CPARSE_MODE_NOIMPLICIT; + if (lj_cparse(&cp) || cp.cts->top > oldtop) /* Avoid new struct defs. */ + lj_trace_err(J, LJ_TRERR_BADTYPE); + return cp.val.id; + } else { + GCcdata *cd = argv2cdata(J, tr, o); + return cd->ctypeid == CTID_CTYPEID ? crec_constructor(J, cd, tr) : + cd->ctypeid; + } +} + +/* Convert CType to IRType (if possible). */ +static IRType crec_ct2irt(CTState *cts, CType *ct) +{ + if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct); + if (LJ_LIKELY(ctype_isnum(ct->info))) { + if ((ct->info & CTF_FP)) { + if (ct->size == sizeof(double)) + return IRT_NUM; + else if (ct->size == sizeof(float)) + return IRT_FLOAT; + } else { + uint32_t b = lj_fls(ct->size); + if (b <= 3) + return IRT_I8 + 2*b + ((ct->info & CTF_UNSIGNED) ? 1 : 0); + } + } else if (ctype_isptr(ct->info)) { + return (LJ_64 && ct->size == 8) ? IRT_P64 : IRT_P32; + } else if (ctype_iscomplex(ct->info)) { + if (ct->size == 2*sizeof(double)) + return IRT_NUM; + else if (ct->size == 2*sizeof(float)) + return IRT_FLOAT; + } + return IRT_CDATA; +} + +/* -- Optimized memory fill and copy -------------------------------------- */ + +/* Maximum length and unroll of inlined copy/fill. */ +#define CREC_COPY_MAXUNROLL 16 +#define CREC_COPY_MAXLEN 128 + +#define CREC_FILL_MAXUNROLL 16 + +/* Number of windowed registers used for optimized memory copy. */ +#if LJ_TARGET_X86 +#define CREC_COPY_REGWIN 2 +#elif LJ_TARGET_PPC || LJ_TARGET_MIPS +#define CREC_COPY_REGWIN 8 +#else +#define CREC_COPY_REGWIN 4 +#endif + +/* List of memory offsets for copy/fill. */ +typedef struct CRecMemList { + CTSize ofs; /* Offset in bytes. */ + IRType tp; /* Type of load/store. */ + TRef trofs; /* TRef of interned offset. */ + TRef trval; /* TRef of load value. */ +} CRecMemList; + +/* Generate copy list for element-wise struct copy. */ +static MSize crec_copy_struct(CRecMemList *ml, CTState *cts, CType *ct) +{ + CTypeID fid = ct->sib; + MSize mlp = 0; + while (fid) { + CType *df = ctype_get(cts, fid); + fid = df->sib; + if (ctype_isfield(df->info)) { + CType *cct; + IRType tp; + if (!gcref(df->name)) continue; /* Ignore unnamed fields. */ + cct = ctype_rawchild(cts, df); /* Field type. */ + tp = crec_ct2irt(cts, cct); + if (tp == IRT_CDATA) return 0; /* NYI: aggregates. */ + if (mlp >= CREC_COPY_MAXUNROLL) return 0; + ml[mlp].ofs = df->size; + ml[mlp].tp = tp; + mlp++; + if (ctype_iscomplex(cct->info)) { + if (mlp >= CREC_COPY_MAXUNROLL) return 0; + ml[mlp].ofs = df->size + (cct->size >> 1); + ml[mlp].tp = tp; + mlp++; + } + } else if (!ctype_isconstval(df->info)) { + /* NYI: bitfields and sub-structures. */ + return 0; + } + } + return mlp; +} + +/* Generate unrolled copy list, from highest to lowest step size/alignment. */ +static MSize crec_copy_unroll(CRecMemList *ml, CTSize len, CTSize step, + IRType tp) +{ + CTSize ofs = 0; + MSize mlp = 0; + if (tp == IRT_CDATA) tp = IRT_U8 + 2*lj_fls(step); + do { + while (ofs + step <= len) { + if (mlp >= CREC_COPY_MAXUNROLL) return 0; + ml[mlp].ofs = ofs; + ml[mlp].tp = tp; + mlp++; + ofs += step; + } + step >>= 1; + tp -= 2; + } while (ofs < len); + return mlp; +} + +/* +** Emit copy list with windowed loads/stores. +** LJ_TARGET_UNALIGNED: may emit unaligned loads/stores (not marked as such). +*/ +static void crec_copy_emit(jit_State *J, CRecMemList *ml, MSize mlp, + TRef trdst, TRef trsrc) +{ + MSize i, j, rwin = 0; + for (i = 0, j = 0; i < mlp; ) { + TRef trofs = lj_ir_kintp(J, ml[i].ofs); + TRef trsptr = emitir(IRT(IR_ADD, IRT_PTR), trsrc, trofs); + ml[i].trval = emitir(IRT(IR_XLOAD, ml[i].tp), trsptr, 0); + ml[i].trofs = trofs; + i++; + rwin += (LJ_SOFTFP && ml[i].tp == IRT_NUM) ? 2 : 1; + if (rwin >= CREC_COPY_REGWIN || i >= mlp) { /* Flush buffered stores. */ + rwin = 0; + for ( ; j < i; j++) { + TRef trdptr = emitir(IRT(IR_ADD, IRT_PTR), trdst, ml[j].trofs); + emitir(IRT(IR_XSTORE, ml[j].tp), trdptr, ml[j].trval); + } + } + } +} + +/* Optimized memory copy. */ +static void crec_copy(jit_State *J, TRef trdst, TRef trsrc, TRef trlen, + CType *ct) +{ + if (tref_isk(trlen)) { /* Length must be constant. */ + CRecMemList ml[CREC_COPY_MAXUNROLL]; + MSize mlp = 0; + CTSize step = 1, len = (CTSize)IR(tref_ref(trlen))->i; + IRType tp = IRT_CDATA; + int needxbar = 0; + if (len == 0) return; /* Shortcut. */ + if (len > CREC_COPY_MAXLEN) goto fallback; + if (ct) { + CTState *cts = ctype_ctsG(J2G(J)); + lua_assert(ctype_isarray(ct->info) || ctype_isstruct(ct->info)); + if (ctype_isarray(ct->info)) { + CType *cct = ctype_rawchild(cts, ct); + tp = crec_ct2irt(cts, cct); + if (tp == IRT_CDATA) goto rawcopy; + step = lj_ir_type_size[tp]; + lua_assert((len & (step-1)) == 0); + } else if ((ct->info & CTF_UNION)) { + step = (1u << ctype_align(ct->info)); + goto rawcopy; + } else { + mlp = crec_copy_struct(ml, cts, ct); + goto emitcopy; + } + } else { + rawcopy: + needxbar = 1; + if (LJ_TARGET_UNALIGNED || step >= CTSIZE_PTR) + step = CTSIZE_PTR; + } + mlp = crec_copy_unroll(ml, len, step, tp); + emitcopy: + if (mlp) { + crec_copy_emit(J, ml, mlp, trdst, trsrc); + if (needxbar) + emitir(IRT(IR_XBAR, IRT_NIL), 0, 0); + return; + } + } +fallback: + /* Call memcpy. Always needs a barrier to disable alias analysis. */ + lj_ir_call(J, IRCALL_memcpy, trdst, trsrc, trlen); + emitir(IRT(IR_XBAR, IRT_NIL), 0, 0); +} + +/* Generate unrolled fill list, from highest to lowest step size/alignment. */ +static MSize crec_fill_unroll(CRecMemList *ml, CTSize len, CTSize step) +{ + CTSize ofs = 0; + MSize mlp = 0; + IRType tp = IRT_U8 + 2*lj_fls(step); + do { + while (ofs + step <= len) { + if (mlp >= CREC_COPY_MAXUNROLL) return 0; + ml[mlp].ofs = ofs; + ml[mlp].tp = tp; + mlp++; + ofs += step; + } + step >>= 1; + tp -= 2; + } while (ofs < len); + return mlp; +} + +/* +** Emit stores for fill list. +** LJ_TARGET_UNALIGNED: may emit unaligned stores (not marked as such). +*/ +static void crec_fill_emit(jit_State *J, CRecMemList *ml, MSize mlp, + TRef trdst, TRef trfill) +{ + MSize i; + for (i = 0; i < mlp; i++) { + TRef trofs = lj_ir_kintp(J, ml[i].ofs); + TRef trdptr = emitir(IRT(IR_ADD, IRT_PTR), trdst, trofs); + emitir(IRT(IR_XSTORE, ml[i].tp), trdptr, trfill); + } +} + +/* Optimized memory fill. */ +static void crec_fill(jit_State *J, TRef trdst, TRef trlen, TRef trfill, + CTSize step) +{ + if (tref_isk(trlen)) { /* Length must be constant. */ + CRecMemList ml[CREC_FILL_MAXUNROLL]; + MSize mlp; + CTSize len = (CTSize)IR(tref_ref(trlen))->i; + if (len == 0) return; /* Shortcut. */ + if (LJ_TARGET_UNALIGNED || step >= CTSIZE_PTR) + step = CTSIZE_PTR; + if (step * CREC_FILL_MAXUNROLL < len) goto fallback; + mlp = crec_fill_unroll(ml, len, step); + if (!mlp) goto fallback; + if (tref_isk(trfill) || ml[0].tp != IRT_U8) + trfill = emitconv(trfill, IRT_INT, IRT_U8, 0); + if (ml[0].tp != IRT_U8) { /* Scatter U8 to U16/U32/U64. */ + if (CTSIZE_PTR == 8 && ml[0].tp == IRT_U64) { + if (tref_isk(trfill)) /* Pointless on x64 with zero-extended regs. */ + trfill = emitconv(trfill, IRT_U64, IRT_U32, 0); + trfill = emitir(IRT(IR_MUL, IRT_U64), trfill, + lj_ir_kint64(J, U64x(01010101,01010101))); + } else { + trfill = emitir(IRTI(IR_MUL), trfill, + lj_ir_kint(J, ml[0].tp == IRT_U16 ? 0x0101 : 0x01010101)); + } + } + crec_fill_emit(J, ml, mlp, trdst, trfill); + } else { +fallback: + /* Call memset. Always needs a barrier to disable alias analysis. */ + lj_ir_call(J, IRCALL_memset, trdst, trfill, trlen); /* Note: arg order! */ + } + emitir(IRT(IR_XBAR, IRT_NIL), 0, 0); +} + +/* -- Convert C type to C type -------------------------------------------- */ + +/* +** This code mirrors the code in lj_cconv.c. It performs the same steps +** for the trace recorder that lj_cconv.c does for the interpreter. +** +** One major difference is that we can get away with much fewer checks +** here. E.g. checks for casts, constness or correct types can often be +** omitted, even if they might fail. The interpreter subsequently throws +** an error, which aborts the trace. +** +** All operations are specialized to their C types, so the on-trace +** outcome must be the same as the outcome in the interpreter. If the +** interpreter doesn't throw an error, then the trace is correct, too. +** Care must be taken not to generate invalid (temporary) IR or to +** trigger asserts. +*/ + +/* Determine whether a passed number or cdata number is non-zero. */ +static int crec_isnonzero(CType *s, void *p) +{ + if (p == (void *)0) + return 0; + if (p == (void *)1) + return 1; + if ((s->info & CTF_FP)) { + if (s->size == sizeof(float)) + return (*(float *)p != 0); + else + return (*(double *)p != 0); + } else { + if (s->size == 1) + return (*(uint8_t *)p != 0); + else if (s->size == 2) + return (*(uint16_t *)p != 0); + else if (s->size == 4) + return (*(uint32_t *)p != 0); + else + return (*(uint64_t *)p != 0); + } +} + +static TRef crec_ct_ct(jit_State *J, CType *d, CType *s, TRef dp, TRef sp, + void *svisnz) +{ + IRType dt = crec_ct2irt(ctype_ctsG(J2G(J)), d); + IRType st = crec_ct2irt(ctype_ctsG(J2G(J)), s); + CTSize dsize = d->size, ssize = s->size; + CTInfo dinfo = d->info, sinfo = s->info; + + if (ctype_type(dinfo) > CT_MAYCONVERT || ctype_type(sinfo) > CT_MAYCONVERT) + goto err_conv; + + /* + ** Note: Unlike lj_cconv_ct_ct(), sp holds the _value_ of pointers and + ** numbers up to 8 bytes. Otherwise sp holds a pointer. + */ + + switch (cconv_idx2(dinfo, sinfo)) { + /* Destination is a bool. */ + case CCX(B, B): + goto xstore; /* Source operand is already normalized. */ + case CCX(B, I): + case CCX(B, F): + if (st != IRT_CDATA) { + /* Specialize to the result of a comparison against 0. */ + TRef zero = (st == IRT_NUM || st == IRT_FLOAT) ? lj_ir_knum(J, 0) : + (st == IRT_I64 || st == IRT_U64) ? lj_ir_kint64(J, 0) : + lj_ir_kint(J, 0); + int isnz = crec_isnonzero(s, svisnz); + emitir(IRTG(isnz ? IR_NE : IR_EQ, st), sp, zero); + sp = lj_ir_kint(J, isnz); + goto xstore; + } + goto err_nyi; + + /* Destination is an integer. */ + case CCX(I, B): + case CCX(I, I): + conv_I_I: + if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi; + /* Extend 32 to 64 bit integer. */ + if (dsize == 8 && ssize < 8 && !(LJ_64 && (sinfo & CTF_UNSIGNED))) + sp = emitconv(sp, dt, ssize < 4 ? IRT_INT : st, + (sinfo & CTF_UNSIGNED) ? 0 : IRCONV_SEXT); + else if (dsize < 8 && ssize == 8) /* Truncate from 64 bit integer. */ + sp = emitconv(sp, dsize < 4 ? IRT_INT : dt, st, 0); + else if (st == IRT_INT) + sp = lj_opt_narrow_toint(J, sp); + xstore: + if (dt == IRT_I64 || dt == IRT_U64) lj_needsplit(J); + if (dp == 0) return sp; + emitir(IRT(IR_XSTORE, dt), dp, sp); + break; + case CCX(I, C): + sp = emitir(IRT(IR_XLOAD, st), sp, 0); /* Load re. */ + /* fallthrough */ + case CCX(I, F): + if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi; + sp = emitconv(sp, dsize < 4 ? IRT_INT : dt, st, IRCONV_ANY); + goto xstore; + case CCX(I, P): + case CCX(I, A): + sinfo = CTINFO(CT_NUM, CTF_UNSIGNED); + ssize = CTSIZE_PTR; + st = IRT_UINTP; + if (((dsize ^ ssize) & 8) == 0) { /* Must insert no-op type conversion. */ + sp = emitconv(sp, dsize < 4 ? IRT_INT : dt, IRT_PTR, 0); + goto xstore; + } + goto conv_I_I; + + /* Destination is a floating-point number. */ + case CCX(F, B): + case CCX(F, I): + conv_F_I: + if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi; + sp = emitconv(sp, dt, ssize < 4 ? IRT_INT : st, 0); + goto xstore; + case CCX(F, C): + sp = emitir(IRT(IR_XLOAD, st), sp, 0); /* Load re. */ + /* fallthrough */ + case CCX(F, F): + conv_F_F: + if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi; + if (dt != st) sp = emitconv(sp, dt, st, 0); + goto xstore; + + /* Destination is a complex number. */ + case CCX(C, I): + case CCX(C, F): + { /* Clear im. */ + TRef ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, (dsize >> 1))); + emitir(IRT(IR_XSTORE, dt), ptr, lj_ir_knum(J, 0)); + } + /* Convert to re. */ + if ((sinfo & CTF_FP)) goto conv_F_F; else goto conv_F_I; + + case CCX(C, C): + if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi; + { + TRef re, im, ptr; + re = emitir(IRT(IR_XLOAD, st), sp, 0); + ptr = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, (ssize >> 1))); + im = emitir(IRT(IR_XLOAD, st), ptr, 0); + if (dt != st) { + re = emitconv(re, dt, st, 0); + im = emitconv(im, dt, st, 0); + } + emitir(IRT(IR_XSTORE, dt), dp, re); + ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, (dsize >> 1))); + emitir(IRT(IR_XSTORE, dt), ptr, im); + } + break; + + /* Destination is a vector. */ + case CCX(V, I): + case CCX(V, F): + case CCX(V, C): + case CCX(V, V): + goto err_nyi; + + /* Destination is a pointer. */ + case CCX(P, P): + case CCX(P, A): + case CCX(P, S): + /* There are only 32 bit pointers/addresses on 32 bit machines. + ** Also ok on x64, since all 32 bit ops clear the upper part of the reg. + */ + goto xstore; + case CCX(P, I): + if (st == IRT_CDATA) goto err_nyi; + if (!LJ_64 && ssize == 8) /* Truncate from 64 bit integer. */ + sp = emitconv(sp, IRT_U32, st, 0); + goto xstore; + case CCX(P, F): + if (st == IRT_CDATA) goto err_nyi; + /* The signed conversion is cheaper. x64 really has 47 bit pointers. */ + sp = emitconv(sp, (LJ_64 && dsize == 8) ? IRT_I64 : IRT_U32, + st, IRCONV_ANY); + goto xstore; + + /* Destination is an array. */ + case CCX(A, A): + /* Destination is a struct/union. */ + case CCX(S, S): + if (dp == 0) goto err_conv; + crec_copy(J, dp, sp, lj_ir_kint(J, dsize), d); + break; + + default: + err_conv: + err_nyi: + lj_trace_err(J, LJ_TRERR_NYICONV); + break; + } + return 0; +} + +/* -- Convert C type to TValue (load) ------------------------------------- */ + +static TRef crec_tv_ct(jit_State *J, CType *s, CTypeID sid, TRef sp) +{ + CTState *cts = ctype_ctsG(J2G(J)); + IRType t = crec_ct2irt(cts, s); + CTInfo sinfo = s->info; + if (ctype_isnum(sinfo)) { + TRef tr; + if (t == IRT_CDATA) + goto err_nyi; /* NYI: copyval of >64 bit integers. */ + tr = emitir(IRT(IR_XLOAD, t), sp, 0); + if (t == IRT_FLOAT || t == IRT_U32) { /* Keep uint32_t/float as numbers. */ + return emitconv(tr, IRT_NUM, t, 0); + } else if (t == IRT_I64 || t == IRT_U64) { /* Box 64 bit integer. */ + sp = tr; + lj_needsplit(J); + } else if ((sinfo & CTF_BOOL)) { + /* Assume not equal to zero. Fixup and emit pending guard later. */ + lj_ir_set(J, IRTGI(IR_NE), tr, lj_ir_kint(J, 0)); + J->postproc = LJ_POST_FIXGUARD; + return TREF_TRUE; + } else { + return tr; + } + } else if (ctype_isptr(sinfo) || ctype_isenum(sinfo)) { + sp = emitir(IRT(IR_XLOAD, t), sp, 0); /* Box pointers and enums. */ + } else if (ctype_isrefarray(sinfo) || ctype_isstruct(sinfo)) { + cts->L = J->L; + sid = lj_ctype_intern(cts, CTINFO_REF(sid), CTSIZE_PTR); /* Create ref. */ + } else if (ctype_iscomplex(sinfo)) { /* Unbox/box complex. */ + ptrdiff_t esz = (ptrdiff_t)(s->size >> 1); + TRef ptr, tr1, tr2, dp; + dp = emitir(IRTG(IR_CNEW, IRT_CDATA), lj_ir_kint(J, sid), TREF_NIL); + tr1 = emitir(IRT(IR_XLOAD, t), sp, 0); + ptr = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, esz)); + tr2 = emitir(IRT(IR_XLOAD, t), ptr, 0); + ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, sizeof(GCcdata))); + emitir(IRT(IR_XSTORE, t), ptr, tr1); + ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, sizeof(GCcdata)+esz)); + emitir(IRT(IR_XSTORE, t), ptr, tr2); + return dp; + } else { + /* NYI: copyval of vectors. */ + err_nyi: + lj_trace_err(J, LJ_TRERR_NYICONV); + } + /* Box pointer, ref, enum or 64 bit integer. */ + return emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, sid), sp); +} + +/* -- Convert TValue to C type (store) ------------------------------------ */ + +static TRef crec_ct_tv(jit_State *J, CType *d, TRef dp, TRef sp, cTValue *sval) +{ + CTState *cts = ctype_ctsG(J2G(J)); + CTypeID sid = CTID_P_VOID; + void *svisnz = 0; + CType *s; + if (LJ_LIKELY(tref_isinteger(sp))) { + sid = CTID_INT32; + svisnz = (void *)(intptr_t)(tvisint(sval)?(intV(sval)!=0):!tviszero(sval)); + } else if (tref_isnum(sp)) { + sid = CTID_DOUBLE; + svisnz = (void *)(intptr_t)(tvisint(sval)?(intV(sval)!=0):!tviszero(sval)); + } else if (tref_isbool(sp)) { + sp = lj_ir_kint(J, tref_istrue(sp) ? 1 : 0); + sid = CTID_BOOL; + } else if (tref_isnil(sp)) { + sp = lj_ir_kptr(J, NULL); + } else if (tref_isudata(sp)) { + GCudata *ud = udataV(sval); + if (ud->udtype == UDTYPE_IO_FILE) { + TRef tr = emitir(IRT(IR_FLOAD, IRT_U8), sp, IRFL_UDATA_UDTYPE); + emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, UDTYPE_IO_FILE)); + sp = emitir(IRT(IR_FLOAD, IRT_PTR), sp, IRFL_UDATA_FILE); + } else { + sp = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, sizeof(GCudata))); + } + } else if (tref_isstr(sp)) { + if (ctype_isenum(d->info)) { /* Match string against enum constant. */ + GCstr *str = strV(sval); + CTSize ofs; + CType *cct = lj_ctype_getfield(cts, d, str, &ofs); + /* Specialize to the name of the enum constant. */ + emitir(IRTG(IR_EQ, IRT_STR), sp, lj_ir_kstr(J, str)); + if (cct && ctype_isconstval(cct->info)) { + lua_assert(ctype_child(cts, cct)->size == 4); + svisnz = (void *)(intptr_t)(ofs != 0); + sp = lj_ir_kint(J, (int32_t)ofs); + sid = ctype_cid(cct->info); + } /* else: interpreter will throw. */ + } else if (ctype_isrefarray(d->info)) { /* Copy string to array. */ + lj_trace_err(J, LJ_TRERR_BADTYPE); /* NYI */ + } else { /* Otherwise pass the string data as a const char[]. */ + /* Don't use STRREF. It folds with SNEW, which loses the trailing NUL. */ + sp = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, sizeof(GCstr))); + sid = CTID_A_CCHAR; + } + } else if (tref_islightud(sp)) { +#if LJ_64 + sp = emitir(IRT(IR_BAND, IRT_P64), sp, + lj_ir_kint64(J, U64x(00007fff,ffffffff))); +#endif + } else { /* NYI: tref_istab(sp). */ + IRType t; + sid = argv2cdata(J, sp, sval)->ctypeid; + s = ctype_raw(cts, sid); + svisnz = cdataptr(cdataV(sval)); + if (ctype_isfunc(s->info)) { + sid = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|sid), CTSIZE_PTR); + s = ctype_get(cts, sid); + t = IRT_PTR; + } else { + t = crec_ct2irt(cts, s); + } + if (ctype_isptr(s->info)) { + sp = emitir(IRT(IR_FLOAD, t), sp, IRFL_CDATA_PTR); + if (ctype_isref(s->info)) { + svisnz = *(void **)svisnz; + s = ctype_rawchild(cts, s); + if (ctype_isenum(s->info)) s = ctype_child(cts, s); + t = crec_ct2irt(cts, s); + } else { + goto doconv; + } + } else if (t == IRT_I64 || t == IRT_U64) { + sp = emitir(IRT(IR_FLOAD, t), sp, IRFL_CDATA_INT64); + lj_needsplit(J); + goto doconv; + } else if (t == IRT_INT || t == IRT_U32) { + if (ctype_isenum(s->info)) s = ctype_child(cts, s); + sp = emitir(IRT(IR_FLOAD, t), sp, IRFL_CDATA_INT); + goto doconv; + } else { + sp = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, sizeof(GCcdata))); + } + if (ctype_isnum(s->info) && t != IRT_CDATA) + sp = emitir(IRT(IR_XLOAD, t), sp, 0); /* Load number value. */ + goto doconv; + } + s = ctype_get(cts, sid); +doconv: + if (ctype_isenum(d->info)) d = ctype_child(cts, d); + return crec_ct_ct(J, d, s, dp, sp, svisnz); +} + +/* -- C data metamethods -------------------------------------------------- */ + +/* This would be rather difficult in FOLD, so do it here: +** (base+k)+(idx*sz)+ofs ==> (base+idx*sz)+(ofs+k) +** (base+(idx+k)*sz)+ofs ==> (base+idx*sz)+(ofs+k*sz) +*/ +static TRef crec_reassoc_ofs(jit_State *J, TRef tr, ptrdiff_t *ofsp, MSize sz) +{ + IRIns *ir = IR(tref_ref(tr)); + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && irref_isk(ir->op2) && + (ir->o == IR_ADD || ir->o == IR_ADDOV || ir->o == IR_SUBOV)) { + IRIns *irk = IR(ir->op2); + ptrdiff_t k; + if (LJ_64 && irk->o == IR_KINT64) + k = (ptrdiff_t)ir_kint64(irk)->u64 * sz; + else + k = (ptrdiff_t)irk->i * sz; + if (ir->o == IR_SUBOV) *ofsp -= k; else *ofsp += k; + tr = ir->op1; /* Not a TRef, but the caller doesn't care. */ + } + return tr; +} + +/* Record ctype __index/__newindex metamethods. */ +static void crec_index_meta(jit_State *J, CTState *cts, CType *ct, + RecordFFData *rd) +{ + CTypeID id = ctype_typeid(cts, ct); + cTValue *tv = lj_ctype_meta(cts, id, rd->data ? MM_newindex : MM_index); + if (!tv) + lj_trace_err(J, LJ_TRERR_BADTYPE); + if (tvisfunc(tv)) { + J->base[-1] = lj_ir_kfunc(J, funcV(tv)) | TREF_FRAME; + rd->nres = -1; /* Pending tailcall. */ + } else if (rd->data == 0 && tvistab(tv) && tref_isstr(J->base[1])) { + /* Specialize to result of __index lookup. */ + cTValue *o = lj_tab_get(J->L, tabV(tv), &rd->argv[1]); + J->base[0] = lj_record_constify(J, o); + if (!J->base[0]) + lj_trace_err(J, LJ_TRERR_BADTYPE); + /* Always specialize to the key. */ + emitir(IRTG(IR_EQ, IRT_STR), J->base[1], lj_ir_kstr(J, strV(&rd->argv[1]))); + } else { + /* NYI: resolving of non-function metamethods. */ + /* NYI: non-string keys for __index table. */ + /* NYI: stores to __newindex table. */ + lj_trace_err(J, LJ_TRERR_BADTYPE); + } +} + +void LJ_FASTCALL recff_cdata_index(jit_State *J, RecordFFData *rd) +{ + TRef idx, ptr = J->base[0]; + ptrdiff_t ofs = sizeof(GCcdata); + GCcdata *cd = argv2cdata(J, ptr, &rd->argv[0]); + CTState *cts = ctype_ctsG(J2G(J)); + CType *ct = ctype_raw(cts, cd->ctypeid); + CTypeID sid = 0; + + /* Resolve pointer or reference for cdata object. */ + if (ctype_isptr(ct->info)) { + IRType t = (LJ_64 && ct->size == 8) ? IRT_P64 : IRT_P32; + if (ctype_isref(ct->info)) ct = ctype_rawchild(cts, ct); + ptr = emitir(IRT(IR_FLOAD, t), ptr, IRFL_CDATA_PTR); + ofs = 0; + ptr = crec_reassoc_ofs(J, ptr, &ofs, 1); + } + +again: + idx = J->base[1]; + if (tref_isnumber(idx)) { + idx = lj_opt_narrow_cindex(J, idx); + if (ctype_ispointer(ct->info)) { + CTSize sz; + integer_key: + if ((ct->info & CTF_COMPLEX)) + idx = emitir(IRT(IR_BAND, IRT_INTP), idx, lj_ir_kintp(J, 1)); + sz = lj_ctype_size(cts, (sid = ctype_cid(ct->info))); + idx = crec_reassoc_ofs(J, idx, &ofs, sz); +#if LJ_TARGET_ARM || LJ_TARGET_PPC + /* Hoist base add to allow fusion of index/shift into operands. */ + if (LJ_LIKELY(J->flags & JIT_F_OPT_LOOP) && ofs +#if LJ_TARGET_ARM + && (sz == 1 || sz == 4) +#endif + ) { + ptr = emitir(IRT(IR_ADD, IRT_PTR), ptr, lj_ir_kintp(J, ofs)); + ofs = 0; + } +#endif + idx = emitir(IRT(IR_MUL, IRT_INTP), idx, lj_ir_kintp(J, sz)); + ptr = emitir(IRT(IR_ADD, IRT_PTR), idx, ptr); + } + } else if (tref_iscdata(idx)) { + GCcdata *cdk = cdataV(&rd->argv[1]); + CType *ctk = ctype_raw(cts, cdk->ctypeid); + IRType t = crec_ct2irt(cts, ctk); + if (ctype_ispointer(ct->info) && t >= IRT_I8 && t <= IRT_U64) { + if (ctk->size == 8) { + idx = emitir(IRT(IR_FLOAD, t), idx, IRFL_CDATA_INT64); + } else if (ctk->size == 4) { + idx = emitir(IRT(IR_FLOAD, t), idx, IRFL_CDATA_INT); + } else { + idx = emitir(IRT(IR_ADD, IRT_PTR), idx, + lj_ir_kintp(J, sizeof(GCcdata))); + idx = emitir(IRT(IR_XLOAD, t), idx, 0); + } + if (LJ_64 && ctk->size < sizeof(intptr_t) && !(ctk->info & CTF_UNSIGNED)) + idx = emitconv(idx, IRT_INTP, IRT_INT, IRCONV_SEXT); + if (!LJ_64 && ctk->size > sizeof(intptr_t)) { + idx = emitconv(idx, IRT_INTP, t, 0); + lj_needsplit(J); + } + goto integer_key; + } + } else if (tref_isstr(idx)) { + GCstr *name = strV(&rd->argv[1]); + if (cd && cd->ctypeid == CTID_CTYPEID) + ct = ctype_raw(cts, crec_constructor(J, cd, ptr)); + if (ctype_isstruct(ct->info)) { + CTSize fofs; + CType *fct; + fct = lj_ctype_getfield(cts, ct, name, &fofs); + if (fct) { + /* Always specialize to the field name. */ + emitir(IRTG(IR_EQ, IRT_STR), idx, lj_ir_kstr(J, name)); + if (ctype_isconstval(fct->info)) { + if (fct->size >= 0x80000000u && + (ctype_child(cts, fct)->info & CTF_UNSIGNED)) { + J->base[0] = lj_ir_knum(J, (lua_Number)(uint32_t)fct->size); + return; + } + J->base[0] = lj_ir_kint(J, (int32_t)fct->size); + return; /* Interpreter will throw for newindex. */ + } else if (ctype_isbitfield(fct->info)) { + lj_trace_err(J, LJ_TRERR_NYICONV); + } else { + lua_assert(ctype_isfield(fct->info)); + sid = ctype_cid(fct->info); + } + ofs += (ptrdiff_t)fofs; + } + } else if (ctype_iscomplex(ct->info)) { + if (name->len == 2 && + ((strdata(name)[0] == 'r' && strdata(name)[1] == 'e') || + (strdata(name)[0] == 'i' && strdata(name)[1] == 'm'))) { + /* Always specialize to the field name. */ + emitir(IRTG(IR_EQ, IRT_STR), idx, lj_ir_kstr(J, name)); + if (strdata(name)[0] == 'i') ofs += (ct->size >> 1); + sid = ctype_cid(ct->info); + } + } + } + if (!sid) { + if (ctype_isptr(ct->info)) { /* Automatically perform '->'. */ + CType *cct = ctype_rawchild(cts, ct); + if (ctype_isstruct(cct->info)) { + ct = cct; + cd = NULL; + if (tref_isstr(idx)) goto again; + } + } + crec_index_meta(J, cts, ct, rd); + return; + } + + if (ofs) + ptr = emitir(IRT(IR_ADD, IRT_PTR), ptr, lj_ir_kintp(J, ofs)); + + /* Resolve reference for field. */ + ct = ctype_get(cts, sid); + if (ctype_isref(ct->info)) { + ptr = emitir(IRT(IR_XLOAD, IRT_PTR), ptr, 0); + sid = ctype_cid(ct->info); + ct = ctype_get(cts, sid); + } + + while (ctype_isattrib(ct->info)) + ct = ctype_child(cts, ct); /* Skip attributes. */ + + if (rd->data == 0) { /* __index metamethod. */ + J->base[0] = crec_tv_ct(J, ct, sid, ptr); + } else { /* __newindex metamethod. */ + rd->nres = 0; + J->needsnap = 1; + crec_ct_tv(J, ct, ptr, J->base[2], &rd->argv[2]); + } +} + +/* Record setting a finalizer. */ +static void crec_finalizer(jit_State *J, TRef trcd, TRef trfin, cTValue *fin) +{ + if (tvisgcv(fin)) { + if (!trfin) trfin = lj_ir_kptr(J, gcval(fin)); + } else if (tvisnil(fin)) { + trfin = lj_ir_kptr(J, NULL); + } else { + lj_trace_err(J, LJ_TRERR_BADTYPE); + } + lj_ir_call(J, IRCALL_lj_cdata_setfin, trcd, + trfin, lj_ir_kint(J, (int32_t)itype(fin))); + J->needsnap = 1; +} + +/* Record cdata allocation. */ +static void crec_alloc(jit_State *J, RecordFFData *rd, CTypeID id) +{ + CTState *cts = ctype_ctsG(J2G(J)); + CTSize sz; + CTInfo info = lj_ctype_info(cts, id, &sz); + CType *d = ctype_raw(cts, id); + TRef trcd, trid = lj_ir_kint(J, id); + cTValue *fin; + /* Use special instruction to box pointer or 32/64 bit integer. */ + if (ctype_isptr(info) || (ctype_isinteger(info) && (sz == 4 || sz == 8))) { + TRef sp = J->base[1] ? crec_ct_tv(J, d, 0, J->base[1], &rd->argv[1]) : + ctype_isptr(info) ? lj_ir_kptr(J, NULL) : + sz == 4 ? lj_ir_kint(J, 0) : + (lj_needsplit(J), lj_ir_kint64(J, 0)); + J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA), trid, sp); + return; + } else { + TRef trsz = TREF_NIL; + if ((info & CTF_VLA)) { /* Calculate VLA/VLS size at runtime. */ + CTSize sz0, sz1; + if (!J->base[1] || J->base[2]) + lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: init VLA/VLS. */ + trsz = crec_ct_tv(J, ctype_get(cts, CTID_INT32), 0, + J->base[1], &rd->argv[1]); + sz0 = lj_ctype_vlsize(cts, d, 0); + sz1 = lj_ctype_vlsize(cts, d, 1); + trsz = emitir(IRTGI(IR_MULOV), trsz, lj_ir_kint(J, (int32_t)(sz1-sz0))); + trsz = emitir(IRTGI(IR_ADDOV), trsz, lj_ir_kint(J, (int32_t)sz0)); + J->base[1] = 0; /* Simplify logic below. */ + } else if (ctype_align(info) > CT_MEMALIGN) { + trsz = lj_ir_kint(J, sz); + } + trcd = emitir(IRTG(IR_CNEW, IRT_CDATA), trid, trsz); + if (sz > 128 || (info & CTF_VLA)) { + TRef dp; + CTSize align; + special: /* Only handle bulk zero-fill for large/VLA/VLS types. */ + if (J->base[1]) + lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: init large/VLA/VLS types. */ + dp = emitir(IRT(IR_ADD, IRT_PTR), trcd, lj_ir_kintp(J, sizeof(GCcdata))); + if (trsz == TREF_NIL) trsz = lj_ir_kint(J, sz); + align = ctype_align(info); + if (align < CT_MEMALIGN) align = CT_MEMALIGN; + crec_fill(J, dp, trsz, lj_ir_kint(J, 0), (1u << align)); + } else if (J->base[1] && !J->base[2] && + !lj_cconv_multi_init(cts, d, &rd->argv[1])) { + goto single_init; + } else if (ctype_isarray(d->info)) { + CType *dc = ctype_rawchild(cts, d); /* Array element type. */ + CTSize ofs, esize = dc->size; + TRef sp = 0; + TValue tv; + TValue *sval = &tv; + MSize i; + tv.u64 = 0; + if (!(ctype_isnum(dc->info) || ctype_isptr(dc->info)) || + esize * CREC_FILL_MAXUNROLL < sz) + goto special; + for (i = 1, ofs = 0; ofs < sz; ofs += esize) { + TRef dp = emitir(IRT(IR_ADD, IRT_PTR), trcd, + lj_ir_kintp(J, ofs + sizeof(GCcdata))); + if (J->base[i]) { + sp = J->base[i]; + sval = &rd->argv[i]; + i++; + } else if (i != 2) { + sp = ctype_isnum(dc->info) ? lj_ir_kint(J, 0) : TREF_NIL; + } + crec_ct_tv(J, dc, dp, sp, sval); + } + } else if (ctype_isstruct(d->info)) { + CTypeID fid = d->sib; + MSize i = 1; + while (fid) { + CType *df = ctype_get(cts, fid); + fid = df->sib; + if (ctype_isfield(df->info)) { + CType *dc; + TRef sp, dp; + TValue tv; + TValue *sval = &tv; + setintV(&tv, 0); + if (!gcref(df->name)) continue; /* Ignore unnamed fields. */ + dc = ctype_rawchild(cts, df); /* Field type. */ + if (!(ctype_isnum(dc->info) || ctype_isptr(dc->info) || + ctype_isenum(dc->info))) + lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: init aggregates. */ + if (J->base[i]) { + sp = J->base[i]; + sval = &rd->argv[i]; + i++; + } else { + sp = ctype_isptr(dc->info) ? TREF_NIL : lj_ir_kint(J, 0); + } + dp = emitir(IRT(IR_ADD, IRT_PTR), trcd, + lj_ir_kintp(J, df->size + sizeof(GCcdata))); + crec_ct_tv(J, dc, dp, sp, sval); + } else if (!ctype_isconstval(df->info)) { + /* NYI: init bitfields and sub-structures. */ + lj_trace_err(J, LJ_TRERR_NYICONV); + } + } + } else { + TRef dp; + single_init: + dp = emitir(IRT(IR_ADD, IRT_PTR), trcd, lj_ir_kintp(J, sizeof(GCcdata))); + if (J->base[1]) { + crec_ct_tv(J, d, dp, J->base[1], &rd->argv[1]); + } else { + TValue tv; + tv.u64 = 0; + crec_ct_tv(J, d, dp, lj_ir_kint(J, 0), &tv); + } + } + } + J->base[0] = trcd; + /* Handle __gc metamethod. */ + fin = lj_ctype_meta(cts, id, MM_gc); + if (fin) + crec_finalizer(J, trcd, 0, fin); +} + +/* Record argument conversions. */ +static TRef crec_call_args(jit_State *J, RecordFFData *rd, + CTState *cts, CType *ct) +{ + TRef args[CCI_NARGS_MAX]; + CTypeID fid; + MSize i, n; + TRef tr, *base; + cTValue *o; +#if LJ_TARGET_X86 +#if LJ_ABI_WIN + TRef *arg0 = NULL, *arg1 = NULL; +#endif + int ngpr = 0; + if (ctype_cconv(ct->info) == CTCC_THISCALL) + ngpr = 1; + else if (ctype_cconv(ct->info) == CTCC_FASTCALL) + ngpr = 2; +#endif + + /* Skip initial attributes. */ + fid = ct->sib; + while (fid) { + CType *ctf = ctype_get(cts, fid); + if (!ctype_isattrib(ctf->info)) break; + fid = ctf->sib; + } + args[0] = TREF_NIL; + for (n = 0, base = J->base+1, o = rd->argv+1; *base; n++, base++, o++) { + CTypeID did; + CType *d; + + if (n >= CCI_NARGS_MAX) + lj_trace_err(J, LJ_TRERR_NYICALL); + + if (fid) { /* Get argument type from field. */ + CType *ctf = ctype_get(cts, fid); + fid = ctf->sib; + lua_assert(ctype_isfield(ctf->info)); + did = ctype_cid(ctf->info); + } else { + if (!(ct->info & CTF_VARARG)) + lj_trace_err(J, LJ_TRERR_NYICALL); /* Too many arguments. */ + did = lj_ccall_ctid_vararg(cts, o); /* Infer vararg type. */ + } + d = ctype_raw(cts, did); + if (!(ctype_isnum(d->info) || ctype_isptr(d->info) || + ctype_isenum(d->info))) + lj_trace_err(J, LJ_TRERR_NYICALL); + tr = crec_ct_tv(J, d, 0, *base, o); + if (ctype_isinteger_or_bool(d->info)) { + if (d->size < 4) { + if ((d->info & CTF_UNSIGNED)) + tr = emitconv(tr, IRT_INT, d->size==1 ? IRT_U8 : IRT_U16, 0); + else + tr = emitconv(tr, IRT_INT, d->size==1 ? IRT_I8 : IRT_I16,IRCONV_SEXT); + } + } else if (LJ_SOFTFP && ctype_isfp(d->info) && d->size > 4) { + lj_needsplit(J); + } +#if LJ_TARGET_X86 + /* 64 bit args must not end up in registers for fastcall/thiscall. */ +#if LJ_ABI_WIN + if (!ctype_isfp(d->info)) { + /* Sigh, the Windows/x86 ABI allows reordering across 64 bit args. */ + if (tref_typerange(tr, IRT_I64, IRT_U64)) { + if (ngpr) { + arg0 = &args[n]; args[n++] = TREF_NIL; ngpr--; + if (ngpr) { + arg1 = &args[n]; args[n++] = TREF_NIL; ngpr--; + } + } + } else { + if (arg0) { *arg0 = tr; arg0 = NULL; n--; continue; } + if (arg1) { *arg1 = tr; arg1 = NULL; n--; continue; } + if (ngpr) ngpr--; + } + } +#else + if (!ctype_isfp(d->info) && ngpr) { + if (tref_typerange(tr, IRT_I64, IRT_U64)) { + /* No reordering for other x86 ABIs. Simply add alignment args. */ + do { args[n++] = TREF_NIL; } while (--ngpr); + } else { + ngpr--; + } + } +#endif +#endif + args[n] = tr; + } + tr = args[0]; + for (i = 1; i < n; i++) + tr = emitir(IRT(IR_CARG, IRT_NIL), tr, args[i]); + return tr; +} + +/* Create a snapshot for the caller, simulating a 'false' return value. */ +static void crec_snap_caller(jit_State *J) +{ + lua_State *L = J->L; + TValue *base = L->base, *top = L->top; + const BCIns *pc = J->pc; + TRef ftr = J->base[-1]; + ptrdiff_t delta; + if (!frame_islua(base-1) || J->framedepth <= 0) + lj_trace_err(J, LJ_TRERR_NYICALL); + J->pc = frame_pc(base-1); delta = 1+LJ_FR2+bc_a(J->pc[-1]); + L->top = base; L->base = base - delta; + J->base[-1] = TREF_FALSE; + J->base -= delta; J->baseslot -= (BCReg)delta; + J->maxslot = (BCReg)delta; J->framedepth--; + lj_snap_add(J); + L->base = base; L->top = top; + J->framedepth++; J->maxslot = 1; + J->base += delta; J->baseslot += (BCReg)delta; + J->base[-1] = ftr; J->pc = pc; +} + +/* Record function call. */ +static int crec_call(jit_State *J, RecordFFData *rd, GCcdata *cd) +{ + CTState *cts = ctype_ctsG(J2G(J)); + CType *ct = ctype_raw(cts, cd->ctypeid); + IRType tp = IRT_PTR; + if (ctype_isptr(ct->info)) { + tp = (LJ_64 && ct->size == 8) ? IRT_P64 : IRT_P32; + ct = ctype_rawchild(cts, ct); + } + if (ctype_isfunc(ct->info)) { + TRef func = emitir(IRT(IR_FLOAD, tp), J->base[0], IRFL_CDATA_PTR); + CType *ctr = ctype_rawchild(cts, ct); + IRType t = crec_ct2irt(cts, ctr); + TRef tr; + TValue tv; + /* Check for blacklisted C functions that might call a callback. */ + setlightudV(&tv, + cdata_getptr(cdataptr(cd), (LJ_64 && tp == IRT_P64) ? 8 : 4)); + if (tvistrue(lj_tab_get(J->L, cts->miscmap, &tv))) + lj_trace_err(J, LJ_TRERR_BLACKL); + if (ctype_isvoid(ctr->info)) { + t = IRT_NIL; + rd->nres = 0; + } else if (!(ctype_isnum(ctr->info) || ctype_isptr(ctr->info) || + ctype_isenum(ctr->info)) || t == IRT_CDATA) { + lj_trace_err(J, LJ_TRERR_NYICALL); + } + if ((ct->info & CTF_VARARG) +#if LJ_TARGET_X86 + || ctype_cconv(ct->info) != CTCC_CDECL +#endif + ) + func = emitir(IRT(IR_CARG, IRT_NIL), func, + lj_ir_kint(J, ctype_typeid(cts, ct))); + tr = emitir(IRT(IR_CALLXS, t), crec_call_args(J, rd, cts, ct), func); + if (ctype_isbool(ctr->info)) { + if (frame_islua(J->L->base-1) && bc_b(frame_pc(J->L->base-1)[-1]) == 1) { + /* Don't check result if ignored. */ + tr = TREF_NIL; + } else { + crec_snap_caller(J); +#if LJ_TARGET_X86ORX64 + /* Note: only the x86/x64 backend supports U8 and only for EQ(tr, 0). */ + lj_ir_set(J, IRTG(IR_NE, IRT_U8), tr, lj_ir_kint(J, 0)); +#else + lj_ir_set(J, IRTGI(IR_NE), tr, lj_ir_kint(J, 0)); +#endif + J->postproc = LJ_POST_FIXGUARDSNAP; + tr = TREF_TRUE; + } + } else if (t == IRT_PTR || (LJ_64 && t == IRT_P32) || + t == IRT_I64 || t == IRT_U64 || ctype_isenum(ctr->info)) { + TRef trid = lj_ir_kint(J, ctype_cid(ct->info)); + tr = emitir(IRTG(IR_CNEWI, IRT_CDATA), trid, tr); + if (t == IRT_I64 || t == IRT_U64) lj_needsplit(J); + } else if (t == IRT_FLOAT || t == IRT_U32) { + tr = emitconv(tr, IRT_NUM, t, 0); + } else if (t == IRT_I8 || t == IRT_I16) { + tr = emitconv(tr, IRT_INT, t, IRCONV_SEXT); + } else if (t == IRT_U8 || t == IRT_U16) { + tr = emitconv(tr, IRT_INT, t, 0); + } + J->base[0] = tr; + J->needsnap = 1; + return 1; + } + return 0; +} + +void LJ_FASTCALL recff_cdata_call(jit_State *J, RecordFFData *rd) +{ + CTState *cts = ctype_ctsG(J2G(J)); + GCcdata *cd = argv2cdata(J, J->base[0], &rd->argv[0]); + CTypeID id = cd->ctypeid; + CType *ct; + cTValue *tv; + MMS mm = MM_call; + if (id == CTID_CTYPEID) { + id = crec_constructor(J, cd, J->base[0]); + mm = MM_new; + } else if (crec_call(J, rd, cd)) { + return; + } + /* Record ctype __call/__new metamethod. */ + ct = ctype_raw(cts, id); + tv = lj_ctype_meta(cts, ctype_isptr(ct->info) ? ctype_cid(ct->info) : id, mm); + if (tv) { + if (tvisfunc(tv)) { + J->base[-1] = lj_ir_kfunc(J, funcV(tv)) | TREF_FRAME; + rd->nres = -1; /* Pending tailcall. */ + return; + } + } else if (mm == MM_new) { + crec_alloc(J, rd, id); + return; + } + /* No metamethod or NYI: non-function metamethods. */ + lj_trace_err(J, LJ_TRERR_BADTYPE); +} + +static TRef crec_arith_int64(jit_State *J, TRef *sp, CType **s, MMS mm) +{ + if (ctype_isnum(s[0]->info) && ctype_isnum(s[1]->info)) { + IRType dt; + CTypeID id; + TRef tr; + MSize i; + IROp op; + lj_needsplit(J); + if (((s[0]->info & CTF_UNSIGNED) && s[0]->size == 8) || + ((s[1]->info & CTF_UNSIGNED) && s[1]->size == 8)) { + dt = IRT_U64; id = CTID_UINT64; + } else { + dt = IRT_I64; id = CTID_INT64; + if (mm < MM_add && + !((s[0]->info | s[1]->info) & CTF_FP) && + s[0]->size == 4 && s[1]->size == 4) { /* Try to narrow comparison. */ + if (!((s[0]->info ^ s[1]->info) & CTF_UNSIGNED) || + (tref_isk(sp[1]) && IR(tref_ref(sp[1]))->i >= 0)) { + dt = (s[0]->info & CTF_UNSIGNED) ? IRT_U32 : IRT_INT; + goto comp; + } else if (tref_isk(sp[0]) && IR(tref_ref(sp[0]))->i >= 0) { + dt = (s[1]->info & CTF_UNSIGNED) ? IRT_U32 : IRT_INT; + goto comp; + } + } + } + for (i = 0; i < 2; i++) { + IRType st = tref_type(sp[i]); + if (st == IRT_NUM || st == IRT_FLOAT) + sp[i] = emitconv(sp[i], dt, st, IRCONV_ANY); + else if (!(st == IRT_I64 || st == IRT_U64)) + sp[i] = emitconv(sp[i], dt, IRT_INT, + (s[i]->info & CTF_UNSIGNED) ? 0 : IRCONV_SEXT); + } + if (mm < MM_add) { + comp: + /* Assume true comparison. Fixup and emit pending guard later. */ + if (mm == MM_eq) { + op = IR_EQ; + } else { + op = mm == MM_lt ? IR_LT : IR_LE; + if (dt == IRT_U32 || dt == IRT_U64) + op += (IR_ULT-IR_LT); + } + lj_ir_set(J, IRTG(op, dt), sp[0], sp[1]); + J->postproc = LJ_POST_FIXGUARD; + return TREF_TRUE; + } else { + tr = emitir(IRT(mm+(int)IR_ADD-(int)MM_add, dt), sp[0], sp[1]); + } + return emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr); + } + return 0; +} + +static TRef crec_arith_ptr(jit_State *J, TRef *sp, CType **s, MMS mm) +{ + CTState *cts = ctype_ctsG(J2G(J)); + CType *ctp = s[0]; + if (ctype_isptr(ctp->info) || ctype_isrefarray(ctp->info)) { + if ((mm == MM_sub || mm == MM_eq || mm == MM_lt || mm == MM_le) && + (ctype_isptr(s[1]->info) || ctype_isrefarray(s[1]->info))) { + if (mm == MM_sub) { /* Pointer difference. */ + TRef tr; + CTSize sz = lj_ctype_size(cts, ctype_cid(ctp->info)); + if (sz == 0 || (sz & (sz-1)) != 0) + return 0; /* NYI: integer division. */ + tr = emitir(IRT(IR_SUB, IRT_INTP), sp[0], sp[1]); + tr = emitir(IRT(IR_BSAR, IRT_INTP), tr, lj_ir_kint(J, lj_fls(sz))); +#if LJ_64 + tr = emitconv(tr, IRT_NUM, IRT_INTP, 0); +#endif + return tr; + } else { /* Pointer comparison (unsigned). */ + /* Assume true comparison. Fixup and emit pending guard later. */ + IROp op = mm == MM_eq ? IR_EQ : mm == MM_lt ? IR_ULT : IR_ULE; + lj_ir_set(J, IRTG(op, IRT_PTR), sp[0], sp[1]); + J->postproc = LJ_POST_FIXGUARD; + return TREF_TRUE; + } + } + if (!((mm == MM_add || mm == MM_sub) && ctype_isnum(s[1]->info))) + return 0; + } else if (mm == MM_add && ctype_isnum(ctp->info) && + (ctype_isptr(s[1]->info) || ctype_isrefarray(s[1]->info))) { + TRef tr = sp[0]; sp[0] = sp[1]; sp[1] = tr; /* Swap pointer and index. */ + ctp = s[1]; + } else { + return 0; + } + { + TRef tr = sp[1]; + IRType t = tref_type(tr); + CTSize sz = lj_ctype_size(cts, ctype_cid(ctp->info)); + CTypeID id; +#if LJ_64 + if (t == IRT_NUM || t == IRT_FLOAT) + tr = emitconv(tr, IRT_INTP, t, IRCONV_ANY); + else if (!(t == IRT_I64 || t == IRT_U64)) + tr = emitconv(tr, IRT_INTP, IRT_INT, + ((t - IRT_I8) & 1) ? 0 : IRCONV_SEXT); +#else + if (!tref_typerange(sp[1], IRT_I8, IRT_U32)) { + tr = emitconv(tr, IRT_INTP, t, + (t == IRT_NUM || t == IRT_FLOAT) ? IRCONV_ANY : 0); + } +#endif + tr = emitir(IRT(IR_MUL, IRT_INTP), tr, lj_ir_kintp(J, sz)); + tr = emitir(IRT(mm+(int)IR_ADD-(int)MM_add, IRT_PTR), sp[0], tr); + id = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(ctp->info)), + CTSIZE_PTR); + return emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr); + } +} + +/* Record ctype arithmetic metamethods. */ +static TRef crec_arith_meta(jit_State *J, TRef *sp, CType **s, CTState *cts, + RecordFFData *rd) +{ + cTValue *tv = NULL; + if (J->base[0]) { + if (tviscdata(&rd->argv[0])) { + CTypeID id = argv2cdata(J, J->base[0], &rd->argv[0])->ctypeid; + CType *ct = ctype_raw(cts, id); + if (ctype_isptr(ct->info)) id = ctype_cid(ct->info); + tv = lj_ctype_meta(cts, id, (MMS)rd->data); + } + if (!tv && J->base[1] && tviscdata(&rd->argv[1])) { + CTypeID id = argv2cdata(J, J->base[1], &rd->argv[1])->ctypeid; + CType *ct = ctype_raw(cts, id); + if (ctype_isptr(ct->info)) id = ctype_cid(ct->info); + tv = lj_ctype_meta(cts, id, (MMS)rd->data); + } + } + if (tv) { + if (tvisfunc(tv)) { + J->base[-1] = lj_ir_kfunc(J, funcV(tv)) | TREF_FRAME; + rd->nres = -1; /* Pending tailcall. */ + return 0; + } /* NYI: non-function metamethods. */ + } else if ((MMS)rd->data == MM_eq) { /* Fallback cdata pointer comparison. */ + if (sp[0] && sp[1] && ctype_isnum(s[0]->info) == ctype_isnum(s[1]->info)) { + /* Assume true comparison. Fixup and emit pending guard later. */ + lj_ir_set(J, IRTG(IR_EQ, IRT_PTR), sp[0], sp[1]); + J->postproc = LJ_POST_FIXGUARD; + return TREF_TRUE; + } else { + return TREF_FALSE; + } + } + lj_trace_err(J, LJ_TRERR_BADTYPE); + return 0; +} + +void LJ_FASTCALL recff_cdata_arith(jit_State *J, RecordFFData *rd) +{ + CTState *cts = ctype_ctsG(J2G(J)); + TRef sp[2]; + CType *s[2]; + MSize i; + for (i = 0; i < 2; i++) { + TRef tr = J->base[i]; + CType *ct = ctype_get(cts, CTID_DOUBLE); + if (!tr) { + lj_trace_err(J, LJ_TRERR_BADTYPE); + } else if (tref_iscdata(tr)) { + CTypeID id = argv2cdata(J, tr, &rd->argv[i])->ctypeid; + IRType t; + ct = ctype_raw(cts, id); + t = crec_ct2irt(cts, ct); + if (ctype_isptr(ct->info)) { /* Resolve pointer or reference. */ + tr = emitir(IRT(IR_FLOAD, t), tr, IRFL_CDATA_PTR); + if (ctype_isref(ct->info)) { + ct = ctype_rawchild(cts, ct); + t = crec_ct2irt(cts, ct); + } + } else if (t == IRT_I64 || t == IRT_U64) { + tr = emitir(IRT(IR_FLOAD, t), tr, IRFL_CDATA_INT64); + lj_needsplit(J); + goto ok; + } else if (t == IRT_INT || t == IRT_U32) { + tr = emitir(IRT(IR_FLOAD, t), tr, IRFL_CDATA_INT); + if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct); + goto ok; + } else if (ctype_isfunc(ct->info)) { + tr = emitir(IRT(IR_FLOAD, IRT_PTR), tr, IRFL_CDATA_PTR); + ct = ctype_get(cts, + lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|id), CTSIZE_PTR)); + goto ok; + } else { + tr = emitir(IRT(IR_ADD, IRT_PTR), tr, lj_ir_kintp(J, sizeof(GCcdata))); + } + if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct); + if (ctype_isnum(ct->info)) { + if (t == IRT_CDATA) { + tr = 0; + } else { + if (t == IRT_I64 || t == IRT_U64) lj_needsplit(J); + tr = emitir(IRT(IR_XLOAD, t), tr, 0); + } + } + } else if (tref_isnil(tr)) { + tr = lj_ir_kptr(J, NULL); + ct = ctype_get(cts, CTID_P_VOID); + } else if (tref_isinteger(tr)) { + ct = ctype_get(cts, CTID_INT32); + } else if (tref_isstr(tr)) { + TRef tr2 = J->base[1-i]; + CTypeID id = argv2cdata(J, tr2, &rd->argv[1-i])->ctypeid; + ct = ctype_raw(cts, id); + if (ctype_isenum(ct->info)) { /* Match string against enum constant. */ + GCstr *str = strV(&rd->argv[i]); + CTSize ofs; + CType *cct = lj_ctype_getfield(cts, ct, str, &ofs); + if (cct && ctype_isconstval(cct->info)) { + /* Specialize to the name of the enum constant. */ + emitir(IRTG(IR_EQ, IRT_STR), tr, lj_ir_kstr(J, str)); + ct = ctype_child(cts, cct); + tr = lj_ir_kint(J, (int32_t)ofs); + } else { /* Interpreter will throw or return false. */ + ct = ctype_get(cts, CTID_P_VOID); + } + } else if (ctype_isptr(ct->info)) { + tr = emitir(IRT(IR_ADD, IRT_PTR), tr, lj_ir_kintp(J, sizeof(GCstr))); + } else { + ct = ctype_get(cts, CTID_P_VOID); + } + } else if (!tref_isnum(tr)) { + tr = 0; + ct = ctype_get(cts, CTID_P_VOID); + } + ok: + s[i] = ct; + sp[i] = tr; + } + { + TRef tr; + if (!(tr = crec_arith_int64(J, sp, s, (MMS)rd->data)) && + !(tr = crec_arith_ptr(J, sp, s, (MMS)rd->data)) && + !(tr = crec_arith_meta(J, sp, s, cts, rd))) + return; + J->base[0] = tr; + /* Fixup cdata comparisons, too. Avoids some cdata escapes. */ + if (J->postproc == LJ_POST_FIXGUARD && frame_iscont(J->L->base-1) && + !irt_isguard(J->guardemit)) { + const BCIns *pc = frame_contpc(J->L->base-1) - 1; + if (bc_op(*pc) <= BC_ISNEP) { + J2G(J)->tmptv.u64 = (uint64_t)(uintptr_t)pc; + J->postproc = LJ_POST_FIXCOMP; + } + } + } +} + +/* -- C library namespace metamethods ------------------------------------- */ + +void LJ_FASTCALL recff_clib_index(jit_State *J, RecordFFData *rd) +{ + CTState *cts = ctype_ctsG(J2G(J)); + if (tref_isudata(J->base[0]) && tref_isstr(J->base[1]) && + udataV(&rd->argv[0])->udtype == UDTYPE_FFI_CLIB) { + CLibrary *cl = (CLibrary *)uddata(udataV(&rd->argv[0])); + GCstr *name = strV(&rd->argv[1]); + CType *ct; + CTypeID id = lj_ctype_getname(cts, &ct, name, CLNS_INDEX); + cTValue *tv = lj_tab_getstr(cl->cache, name); + rd->nres = rd->data; + if (id && tv && !tvisnil(tv)) { + /* Specialize to the symbol name and make the result a constant. */ + emitir(IRTG(IR_EQ, IRT_STR), J->base[1], lj_ir_kstr(J, name)); + if (ctype_isconstval(ct->info)) { + if (ct->size >= 0x80000000u && + (ctype_child(cts, ct)->info & CTF_UNSIGNED)) + J->base[0] = lj_ir_knum(J, (lua_Number)(uint32_t)ct->size); + else + J->base[0] = lj_ir_kint(J, (int32_t)ct->size); + } else if (ctype_isextern(ct->info)) { + CTypeID sid = ctype_cid(ct->info); + void *sp = *(void **)cdataptr(cdataV(tv)); + TRef ptr; + ct = ctype_raw(cts, sid); + if (LJ_64 && !checkptr32(sp)) + ptr = lj_ir_kintp(J, (uintptr_t)sp); + else + ptr = lj_ir_kptr(J, sp); + if (rd->data) { + J->base[0] = crec_tv_ct(J, ct, sid, ptr); + } else { + J->needsnap = 1; + crec_ct_tv(J, ct, ptr, J->base[2], &rd->argv[2]); + } + } else { + J->base[0] = lj_ir_kgc(J, obj2gco(cdataV(tv)), IRT_CDATA); + } + } else { + lj_trace_err(J, LJ_TRERR_NOCACHE); + } + } /* else: interpreter will throw. */ +} + +/* -- FFI library functions ----------------------------------------------- */ + +static TRef crec_toint(jit_State *J, CTState *cts, TRef sp, TValue *sval) +{ + return crec_ct_tv(J, ctype_get(cts, CTID_INT32), 0, sp, sval); +} + +void LJ_FASTCALL recff_ffi_new(jit_State *J, RecordFFData *rd) +{ + crec_alloc(J, rd, argv2ctype(J, J->base[0], &rd->argv[0])); +} + +void LJ_FASTCALL recff_ffi_errno(jit_State *J, RecordFFData *rd) +{ + UNUSED(rd); + if (J->base[0]) + lj_trace_err(J, LJ_TRERR_NYICALL); + J->base[0] = lj_ir_call(J, IRCALL_lj_vm_errno); +} + +void LJ_FASTCALL recff_ffi_string(jit_State *J, RecordFFData *rd) +{ + CTState *cts = ctype_ctsG(J2G(J)); + TRef tr = J->base[0]; + if (tr) { + TRef trlen = J->base[1]; + if (!tref_isnil(trlen)) { + trlen = crec_toint(J, cts, trlen, &rd->argv[1]); + tr = crec_ct_tv(J, ctype_get(cts, CTID_P_CVOID), 0, tr, &rd->argv[0]); + } else { + tr = crec_ct_tv(J, ctype_get(cts, CTID_P_CCHAR), 0, tr, &rd->argv[0]); + trlen = lj_ir_call(J, IRCALL_strlen, tr); + } + J->base[0] = emitir(IRT(IR_XSNEW, IRT_STR), tr, trlen); + } /* else: interpreter will throw. */ +} + +void LJ_FASTCALL recff_ffi_copy(jit_State *J, RecordFFData *rd) +{ + CTState *cts = ctype_ctsG(J2G(J)); + TRef trdst = J->base[0], trsrc = J->base[1], trlen = J->base[2]; + if (trdst && trsrc && (trlen || tref_isstr(trsrc))) { + trdst = crec_ct_tv(J, ctype_get(cts, CTID_P_VOID), 0, trdst, &rd->argv[0]); + trsrc = crec_ct_tv(J, ctype_get(cts, CTID_P_CVOID), 0, trsrc, &rd->argv[1]); + if (trlen) { + trlen = crec_toint(J, cts, trlen, &rd->argv[2]); + } else { + trlen = emitir(IRTI(IR_FLOAD), J->base[1], IRFL_STR_LEN); + trlen = emitir(IRTI(IR_ADD), trlen, lj_ir_kint(J, 1)); + } + rd->nres = 0; + crec_copy(J, trdst, trsrc, trlen, NULL); + } /* else: interpreter will throw. */ +} + +void LJ_FASTCALL recff_ffi_fill(jit_State *J, RecordFFData *rd) +{ + CTState *cts = ctype_ctsG(J2G(J)); + TRef trdst = J->base[0], trlen = J->base[1], trfill = J->base[2]; + if (trdst && trlen) { + CTSize step = 1; + if (tviscdata(&rd->argv[0])) { /* Get alignment of original destination. */ + CTSize sz; + CType *ct = ctype_raw(cts, cdataV(&rd->argv[0])->ctypeid); + if (ctype_isptr(ct->info)) + ct = ctype_rawchild(cts, ct); + step = (1u<argv[0]); + trlen = crec_toint(J, cts, trlen, &rd->argv[1]); + if (trfill) + trfill = crec_toint(J, cts, trfill, &rd->argv[2]); + else + trfill = lj_ir_kint(J, 0); + rd->nres = 0; + crec_fill(J, trdst, trlen, trfill, step); + } /* else: interpreter will throw. */ +} + +void LJ_FASTCALL recff_ffi_typeof(jit_State *J, RecordFFData *rd) +{ + if (tref_iscdata(J->base[0])) { + TRef trid = lj_ir_kint(J, argv2ctype(J, J->base[0], &rd->argv[0])); + J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA), + lj_ir_kint(J, CTID_CTYPEID), trid); + } else { + setfuncV(J->L, &J->errinfo, J->fn); + lj_trace_err_info(J, LJ_TRERR_NYIFFU); + } +} + +void LJ_FASTCALL recff_ffi_istype(jit_State *J, RecordFFData *rd) +{ + argv2ctype(J, J->base[0], &rd->argv[0]); + if (tref_iscdata(J->base[1])) { + argv2ctype(J, J->base[1], &rd->argv[1]); + J->postproc = LJ_POST_FIXBOOL; + J->base[0] = TREF_TRUE; + } else { + J->base[0] = TREF_FALSE; + } +} + +void LJ_FASTCALL recff_ffi_abi(jit_State *J, RecordFFData *rd) +{ + if (tref_isstr(J->base[0])) { + /* Specialize to the ABI string to make the boolean result a constant. */ + emitir(IRTG(IR_EQ, IRT_STR), J->base[0], lj_ir_kstr(J, strV(&rd->argv[0]))); + J->postproc = LJ_POST_FIXBOOL; + J->base[0] = TREF_TRUE; + } else { + lj_trace_err(J, LJ_TRERR_BADTYPE); + } +} + +/* Record ffi.sizeof(), ffi.alignof(), ffi.offsetof(). */ +void LJ_FASTCALL recff_ffi_xof(jit_State *J, RecordFFData *rd) +{ + CTypeID id = argv2ctype(J, J->base[0], &rd->argv[0]); + if (rd->data == FF_ffi_sizeof) { + CType *ct = lj_ctype_rawref(ctype_ctsG(J2G(J)), id); + if (ctype_isvltype(ct->info)) + lj_trace_err(J, LJ_TRERR_BADTYPE); + } else if (rd->data == FF_ffi_offsetof) { /* Specialize to the field name. */ + if (!tref_isstr(J->base[1])) + lj_trace_err(J, LJ_TRERR_BADTYPE); + emitir(IRTG(IR_EQ, IRT_STR), J->base[1], lj_ir_kstr(J, strV(&rd->argv[1]))); + rd->nres = 3; /* Just in case. */ + } + J->postproc = LJ_POST_FIXCONST; + J->base[0] = J->base[1] = J->base[2] = TREF_NIL; +} + +void LJ_FASTCALL recff_ffi_gc(jit_State *J, RecordFFData *rd) +{ + argv2cdata(J, J->base[0], &rd->argv[0]); + if (!J->base[1]) + lj_trace_err(J, LJ_TRERR_BADTYPE); + crec_finalizer(J, J->base[0], J->base[1], &rd->argv[1]); +} + +/* -- 64 bit bit.* library functions -------------------------------------- */ + +/* Determine bit operation type from argument type. */ +static CTypeID crec_bit64_type(CTState *cts, cTValue *tv) +{ + if (tviscdata(tv)) { + CType *ct = lj_ctype_rawref(cts, cdataV(tv)->ctypeid); + if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct); + if ((ct->info & (CTMASK_NUM|CTF_BOOL|CTF_FP|CTF_UNSIGNED)) == + CTINFO(CT_NUM, CTF_UNSIGNED) && ct->size == 8) + return CTID_UINT64; /* Use uint64_t, since it has the highest rank. */ + return CTID_INT64; /* Otherwise use int64_t. */ + } + return 0; /* Use regular 32 bit ops. */ +} + +void LJ_FASTCALL recff_bit64_tobit(jit_State *J, RecordFFData *rd) +{ + CTState *cts = ctype_ctsG(J2G(J)); + TRef tr = crec_ct_tv(J, ctype_get(cts, CTID_INT64), 0, + J->base[0], &rd->argv[0]); + if (!tref_isinteger(tr)) + tr = emitconv(tr, IRT_INT, tref_type(tr), 0); + J->base[0] = tr; +} + +int LJ_FASTCALL recff_bit64_unary(jit_State *J, RecordFFData *rd) +{ + CTState *cts = ctype_ctsG(J2G(J)); + CTypeID id = crec_bit64_type(cts, &rd->argv[0]); + if (id) { + TRef tr = crec_ct_tv(J, ctype_get(cts, id), 0, J->base[0], &rd->argv[0]); + tr = emitir(IRT(rd->data, id-CTID_INT64+IRT_I64), tr, 0); + J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr); + return 1; + } + return 0; +} + +int LJ_FASTCALL recff_bit64_nary(jit_State *J, RecordFFData *rd) +{ + CTState *cts = ctype_ctsG(J2G(J)); + CTypeID id = 0; + MSize i; + for (i = 0; J->base[i] != 0; i++) { + CTypeID aid = crec_bit64_type(cts, &rd->argv[i]); + if (id < aid) id = aid; /* Determine highest type rank of all arguments. */ + } + if (id) { + CType *ct = ctype_get(cts, id); + uint32_t ot = IRT(rd->data, id-CTID_INT64+IRT_I64); + TRef tr = crec_ct_tv(J, ct, 0, J->base[0], &rd->argv[0]); + for (i = 1; J->base[i] != 0; i++) { + TRef tr2 = crec_ct_tv(J, ct, 0, J->base[i], &rd->argv[i]); + tr = emitir(ot, tr, tr2); + } + J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr); + return 1; + } + return 0; +} + +int LJ_FASTCALL recff_bit64_shift(jit_State *J, RecordFFData *rd) +{ + CTState *cts = ctype_ctsG(J2G(J)); + CTypeID id; + TRef tsh = 0; + if (J->base[0] && tref_iscdata(J->base[1])) { + tsh = crec_ct_tv(J, ctype_get(cts, CTID_INT64), 0, + J->base[1], &rd->argv[1]); + if (!tref_isinteger(tsh)) + tsh = emitconv(tsh, IRT_INT, tref_type(tsh), 0); + J->base[1] = tsh; + } + id = crec_bit64_type(cts, &rd->argv[0]); + if (id) { + TRef tr = crec_ct_tv(J, ctype_get(cts, id), 0, J->base[0], &rd->argv[0]); + uint32_t op = rd->data; + if (!tsh) tsh = lj_opt_narrow_tobit(J, J->base[1]); + if (!(op < IR_BROL ? LJ_TARGET_MASKSHIFT : LJ_TARGET_MASKROT) && + !tref_isk(tsh)) + tsh = emitir(IRTI(IR_BAND), tsh, lj_ir_kint(J, 63)); +#ifdef LJ_TARGET_UNIFYROT + if (op == (LJ_TARGET_UNIFYROT == 1 ? IR_BROR : IR_BROL)) { + op = LJ_TARGET_UNIFYROT == 1 ? IR_BROL : IR_BROR; + tsh = emitir(IRTI(IR_NEG), tsh, tsh); + } +#endif + tr = emitir(IRT(op, id-CTID_INT64+IRT_I64), tr, tsh); + J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr); + return 1; + } + return 0; +} + +TRef recff_bit64_tohex(jit_State *J, RecordFFData *rd, TRef hdr) +{ + CTState *cts = ctype_ctsG(J2G(J)); + CTypeID id = crec_bit64_type(cts, &rd->argv[0]); + TRef tr, trsf = J->base[1]; + SFormat sf = (STRFMT_UINT|STRFMT_T_HEX); + int32_t n; + if (trsf) { + CTypeID id2 = 0; + n = (int32_t)lj_carith_check64(J->L, 2, &id2); + if (id2) + trsf = crec_ct_tv(J, ctype_get(cts, CTID_INT32), 0, trsf, &rd->argv[1]); + else + trsf = lj_opt_narrow_tobit(J, trsf); + emitir(IRTGI(IR_EQ), trsf, lj_ir_kint(J, n)); /* Specialize to n. */ + } else { + n = id ? 16 : 8; + } + if (n < 0) { n = -n; sf |= STRFMT_F_UPPER; } + sf |= ((SFormat)((n+1)&255) << STRFMT_SH_PREC); + if (id) { + tr = crec_ct_tv(J, ctype_get(cts, id), 0, J->base[0], &rd->argv[0]); + if (n < 16) + tr = emitir(IRT(IR_BAND, IRT_U64), tr, + lj_ir_kint64(J, ((uint64_t)1 << 4*n)-1)); + } else { + tr = lj_opt_narrow_tobit(J, J->base[0]); + if (n < 8) + tr = emitir(IRTI(IR_BAND), tr, lj_ir_kint(J, (int32_t)((1u << 4*n)-1))); + tr = emitconv(tr, IRT_U64, IRT_INT, 0); /* No sign-extension. */ + lj_needsplit(J); + } + return lj_ir_call(J, IRCALL_lj_strfmt_putfxint, hdr, lj_ir_kint(J, sf), tr); +} + +/* -- Miscellaneous library functions ------------------------------------- */ + +void LJ_FASTCALL lj_crecord_tonumber(jit_State *J, RecordFFData *rd) +{ + CTState *cts = ctype_ctsG(J2G(J)); + CType *d, *ct = lj_ctype_rawref(cts, cdataV(&rd->argv[0])->ctypeid); + if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct); + if (ctype_isnum(ct->info) || ctype_iscomplex(ct->info)) { + if (ctype_isinteger_or_bool(ct->info) && ct->size <= 4 && + !(ct->size == 4 && (ct->info & CTF_UNSIGNED))) + d = ctype_get(cts, CTID_INT32); + else + d = ctype_get(cts, CTID_DOUBLE); + J->base[0] = crec_ct_tv(J, d, 0, J->base[0], &rd->argv[0]); + } else { + J->base[0] = TREF_NIL; + } +} + +#undef IR +#undef emitir +#undef emitconv + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_crecord.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_crecord.h new file mode 100644 index 00000000000..59f342a1d9a --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_crecord.h @@ -0,0 +1,38 @@ +/* +** Trace recorder for C data operations. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_CRECORD_H +#define _LJ_CRECORD_H + +#include "lj_obj.h" +#include "lj_jit.h" +#include "lj_ffrecord.h" + +#if LJ_HASJIT && LJ_HASFFI +LJ_FUNC void LJ_FASTCALL recff_cdata_index(jit_State *J, RecordFFData *rd); +LJ_FUNC void LJ_FASTCALL recff_cdata_call(jit_State *J, RecordFFData *rd); +LJ_FUNC void LJ_FASTCALL recff_cdata_arith(jit_State *J, RecordFFData *rd); +LJ_FUNC void LJ_FASTCALL recff_clib_index(jit_State *J, RecordFFData *rd); +LJ_FUNC void LJ_FASTCALL recff_ffi_new(jit_State *J, RecordFFData *rd); +LJ_FUNC void LJ_FASTCALL recff_ffi_errno(jit_State *J, RecordFFData *rd); +LJ_FUNC void LJ_FASTCALL recff_ffi_string(jit_State *J, RecordFFData *rd); +LJ_FUNC void LJ_FASTCALL recff_ffi_copy(jit_State *J, RecordFFData *rd); +LJ_FUNC void LJ_FASTCALL recff_ffi_fill(jit_State *J, RecordFFData *rd); +LJ_FUNC void LJ_FASTCALL recff_ffi_typeof(jit_State *J, RecordFFData *rd); +LJ_FUNC void LJ_FASTCALL recff_ffi_istype(jit_State *J, RecordFFData *rd); +LJ_FUNC void LJ_FASTCALL recff_ffi_abi(jit_State *J, RecordFFData *rd); +LJ_FUNC void LJ_FASTCALL recff_ffi_xof(jit_State *J, RecordFFData *rd); +LJ_FUNC void LJ_FASTCALL recff_ffi_gc(jit_State *J, RecordFFData *rd); + +LJ_FUNC void LJ_FASTCALL recff_bit64_tobit(jit_State *J, RecordFFData *rd); +LJ_FUNC int LJ_FASTCALL recff_bit64_unary(jit_State *J, RecordFFData *rd); +LJ_FUNC int LJ_FASTCALL recff_bit64_nary(jit_State *J, RecordFFData *rd); +LJ_FUNC int LJ_FASTCALL recff_bit64_shift(jit_State *J, RecordFFData *rd); +LJ_FUNC TRef recff_bit64_tohex(jit_State *J, RecordFFData *rd, TRef hdr); + +LJ_FUNC void LJ_FASTCALL lj_crecord_tonumber(jit_State *J, RecordFFData *rd); +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_ctype.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_ctype.c new file mode 100644 index 00000000000..2e23c994bb0 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_ctype.c @@ -0,0 +1,635 @@ +/* +** C type management. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include "lj_obj.h" + +#if LJ_HASFFI + +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_strfmt.h" +#include "lj_ctype.h" +#include "lj_ccallback.h" + +/* -- C type definitions -------------------------------------------------- */ + +/* Predefined typedefs. */ +#define CTTDDEF(_) \ + /* Vararg handling. */ \ + _("va_list", P_VOID) \ + _("__builtin_va_list", P_VOID) \ + _("__gnuc_va_list", P_VOID) \ + /* From stddef.h. */ \ + _("ptrdiff_t", INT_PSZ) \ + _("size_t", UINT_PSZ) \ + _("wchar_t", WCHAR) \ + /* Subset of stdint.h. */ \ + _("int8_t", INT8) \ + _("int16_t", INT16) \ + _("int32_t", INT32) \ + _("int64_t", INT64) \ + _("uint8_t", UINT8) \ + _("uint16_t", UINT16) \ + _("uint32_t", UINT32) \ + _("uint64_t", UINT64) \ + _("intptr_t", INT_PSZ) \ + _("uintptr_t", UINT_PSZ) \ + /* End of typedef list. */ + +/* Keywords (only the ones we actually care for). */ +#define CTKWDEF(_) \ + /* Type specifiers. */ \ + _("void", -1, CTOK_VOID) \ + _("_Bool", 0, CTOK_BOOL) \ + _("bool", 1, CTOK_BOOL) \ + _("char", 1, CTOK_CHAR) \ + _("int", 4, CTOK_INT) \ + _("__int8", 1, CTOK_INT) \ + _("__int16", 2, CTOK_INT) \ + _("__int32", 4, CTOK_INT) \ + _("__int64", 8, CTOK_INT) \ + _("float", 4, CTOK_FP) \ + _("double", 8, CTOK_FP) \ + _("long", 0, CTOK_LONG) \ + _("short", 0, CTOK_SHORT) \ + _("_Complex", 0, CTOK_COMPLEX) \ + _("complex", 0, CTOK_COMPLEX) \ + _("__complex", 0, CTOK_COMPLEX) \ + _("__complex__", 0, CTOK_COMPLEX) \ + _("signed", 0, CTOK_SIGNED) \ + _("__signed", 0, CTOK_SIGNED) \ + _("__signed__", 0, CTOK_SIGNED) \ + _("unsigned", 0, CTOK_UNSIGNED) \ + /* Type qualifiers. */ \ + _("const", 0, CTOK_CONST) \ + _("__const", 0, CTOK_CONST) \ + _("__const__", 0, CTOK_CONST) \ + _("volatile", 0, CTOK_VOLATILE) \ + _("__volatile", 0, CTOK_VOLATILE) \ + _("__volatile__", 0, CTOK_VOLATILE) \ + _("restrict", 0, CTOK_RESTRICT) \ + _("__restrict", 0, CTOK_RESTRICT) \ + _("__restrict__", 0, CTOK_RESTRICT) \ + _("inline", 0, CTOK_INLINE) \ + _("__inline", 0, CTOK_INLINE) \ + _("__inline__", 0, CTOK_INLINE) \ + /* Storage class specifiers. */ \ + _("typedef", 0, CTOK_TYPEDEF) \ + _("extern", 0, CTOK_EXTERN) \ + _("static", 0, CTOK_STATIC) \ + _("auto", 0, CTOK_AUTO) \ + _("register", 0, CTOK_REGISTER) \ + /* GCC Attributes. */ \ + _("__extension__", 0, CTOK_EXTENSION) \ + _("__attribute", 0, CTOK_ATTRIBUTE) \ + _("__attribute__", 0, CTOK_ATTRIBUTE) \ + _("asm", 0, CTOK_ASM) \ + _("__asm", 0, CTOK_ASM) \ + _("__asm__", 0, CTOK_ASM) \ + /* MSVC Attributes. */ \ + _("__declspec", 0, CTOK_DECLSPEC) \ + _("__cdecl", CTCC_CDECL, CTOK_CCDECL) \ + _("__thiscall", CTCC_THISCALL, CTOK_CCDECL) \ + _("__fastcall", CTCC_FASTCALL, CTOK_CCDECL) \ + _("__stdcall", CTCC_STDCALL, CTOK_CCDECL) \ + _("__ptr32", 4, CTOK_PTRSZ) \ + _("__ptr64", 8, CTOK_PTRSZ) \ + /* Other type specifiers. */ \ + _("struct", 0, CTOK_STRUCT) \ + _("union", 0, CTOK_UNION) \ + _("enum", 0, CTOK_ENUM) \ + /* Operators. */ \ + _("sizeof", 0, CTOK_SIZEOF) \ + _("__alignof", 0, CTOK_ALIGNOF) \ + _("__alignof__", 0, CTOK_ALIGNOF) \ + /* End of keyword list. */ + +/* Type info for predefined types. Size merged in. */ +static CTInfo lj_ctype_typeinfo[] = { +#define CTTYINFODEF(id, sz, ct, info) CTINFO((ct),(((sz)&0x3fu)<<10)+(info)), +#define CTTDINFODEF(name, id) CTINFO(CT_TYPEDEF, CTID_##id), +#define CTKWINFODEF(name, sz, kw) CTINFO(CT_KW,(((sz)&0x3fu)<<10)+(kw)), +CTTYDEF(CTTYINFODEF) +CTTDDEF(CTTDINFODEF) +CTKWDEF(CTKWINFODEF) +#undef CTTYINFODEF +#undef CTTDINFODEF +#undef CTKWINFODEF + 0 +}; + +/* Predefined type names collected in a single string. */ +static const char * const lj_ctype_typenames = +#define CTTDNAMEDEF(name, id) name "\0" +#define CTKWNAMEDEF(name, sz, cds) name "\0" +CTTDDEF(CTTDNAMEDEF) +CTKWDEF(CTKWNAMEDEF) +#undef CTTDNAMEDEF +#undef CTKWNAMEDEF +; + +#define CTTYPEINFO_NUM (sizeof(lj_ctype_typeinfo)/sizeof(CTInfo)-1) +#ifdef LUAJIT_CTYPE_CHECK_ANCHOR +#define CTTYPETAB_MIN CTTYPEINFO_NUM +#else +#define CTTYPETAB_MIN 128 +#endif + +/* -- C type interning ---------------------------------------------------- */ + +#define ct_hashtype(info, size) (hashrot(info, size) & CTHASH_MASK) +#define ct_hashname(name) \ + (hashrot(u32ptr(name), u32ptr(name) + HASH_BIAS) & CTHASH_MASK) + +/* Create new type element. */ +CTypeID lj_ctype_new(CTState *cts, CType **ctp) +{ + CTypeID id = cts->top; + CType *ct; + lua_assert(cts->L); + if (LJ_UNLIKELY(id >= cts->sizetab)) { + if (id >= CTID_MAX) lj_err_msg(cts->L, LJ_ERR_TABOV); +#ifdef LUAJIT_CTYPE_CHECK_ANCHOR + ct = lj_mem_newvec(cts->L, id+1, CType); + memcpy(ct, cts->tab, id*sizeof(CType)); + memset(cts->tab, 0, id*sizeof(CType)); + lj_mem_freevec(cts->g, cts->tab, cts->sizetab, CType); + cts->tab = ct; + cts->sizetab = id+1; +#else + lj_mem_growvec(cts->L, cts->tab, cts->sizetab, CTID_MAX, CType); +#endif + } + cts->top = id+1; + *ctp = ct = &cts->tab[id]; + ct->info = 0; + ct->size = 0; + ct->sib = 0; + ct->next = 0; + setgcrefnull(ct->name); + return id; +} + +/* Intern a type element. */ +CTypeID lj_ctype_intern(CTState *cts, CTInfo info, CTSize size) +{ + uint32_t h = ct_hashtype(info, size); + CTypeID id = cts->hash[h]; + lua_assert(cts->L); + while (id) { + CType *ct = ctype_get(cts, id); + if (ct->info == info && ct->size == size) + return id; + id = ct->next; + } + id = cts->top; + if (LJ_UNLIKELY(id >= cts->sizetab)) { + if (id >= CTID_MAX) lj_err_msg(cts->L, LJ_ERR_TABOV); + lj_mem_growvec(cts->L, cts->tab, cts->sizetab, CTID_MAX, CType); + } + cts->top = id+1; + cts->tab[id].info = info; + cts->tab[id].size = size; + cts->tab[id].sib = 0; + cts->tab[id].next = cts->hash[h]; + setgcrefnull(cts->tab[id].name); + cts->hash[h] = (CTypeID1)id; + return id; +} + +/* Add type element to hash table. */ +static void ctype_addtype(CTState *cts, CType *ct, CTypeID id) +{ + uint32_t h = ct_hashtype(ct->info, ct->size); + ct->next = cts->hash[h]; + cts->hash[h] = (CTypeID1)id; +} + +/* Add named element to hash table. */ +void lj_ctype_addname(CTState *cts, CType *ct, CTypeID id) +{ + uint32_t h = ct_hashname(gcref(ct->name)); + ct->next = cts->hash[h]; + cts->hash[h] = (CTypeID1)id; +} + +/* Get a C type by name, matching the type mask. */ +CTypeID lj_ctype_getname(CTState *cts, CType **ctp, GCstr *name, uint32_t tmask) +{ + CTypeID id = cts->hash[ct_hashname(name)]; + while (id) { + CType *ct = ctype_get(cts, id); + if (gcref(ct->name) == obj2gco(name) && + ((tmask >> ctype_type(ct->info)) & 1)) { + *ctp = ct; + return id; + } + id = ct->next; + } + *ctp = &cts->tab[0]; /* Simplify caller logic. ctype_get() would assert. */ + return 0; +} + +/* Get a struct/union/enum/function field by name. */ +CType *lj_ctype_getfieldq(CTState *cts, CType *ct, GCstr *name, CTSize *ofs, + CTInfo *qual) +{ + while (ct->sib) { + ct = ctype_get(cts, ct->sib); + if (gcref(ct->name) == obj2gco(name)) { + *ofs = ct->size; + return ct; + } + if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) { + CType *fct, *cct = ctype_child(cts, ct); + CTInfo q = 0; + while (ctype_isattrib(cct->info)) { + if (ctype_attrib(cct->info) == CTA_QUAL) q |= cct->size; + cct = ctype_child(cts, cct); + } + fct = lj_ctype_getfieldq(cts, cct, name, ofs, qual); + if (fct) { + if (qual) *qual |= q; + *ofs += ct->size; + return fct; + } + } + } + return NULL; /* Not found. */ +} + +/* -- C type information -------------------------------------------------- */ + +/* Follow references and get raw type for a C type ID. */ +CType *lj_ctype_rawref(CTState *cts, CTypeID id) +{ + CType *ct = ctype_get(cts, id); + while (ctype_isattrib(ct->info) || ctype_isref(ct->info)) + ct = ctype_child(cts, ct); + return ct; +} + +/* Get size for a C type ID. Does NOT support VLA/VLS. */ +CTSize lj_ctype_size(CTState *cts, CTypeID id) +{ + CType *ct = ctype_raw(cts, id); + return ctype_hassize(ct->info) ? ct->size : CTSIZE_INVALID; +} + +/* Get size for a variable-length C type. Does NOT support other C types. */ +CTSize lj_ctype_vlsize(CTState *cts, CType *ct, CTSize nelem) +{ + uint64_t xsz = 0; + if (ctype_isstruct(ct->info)) { + CTypeID arrid = 0, fid = ct->sib; + xsz = ct->size; /* Add the struct size. */ + while (fid) { + CType *ctf = ctype_get(cts, fid); + if (ctype_type(ctf->info) == CT_FIELD) + arrid = ctype_cid(ctf->info); /* Remember last field of VLS. */ + fid = ctf->sib; + } + ct = ctype_raw(cts, arrid); + } + lua_assert(ctype_isvlarray(ct->info)); /* Must be a VLA. */ + ct = ctype_rawchild(cts, ct); /* Get array element. */ + lua_assert(ctype_hassize(ct->info)); + /* Calculate actual size of VLA and check for overflow. */ + xsz += (uint64_t)ct->size * nelem; + return xsz < 0x80000000u ? (CTSize)xsz : CTSIZE_INVALID; +} + +/* Get type, qualifiers, size and alignment for a C type ID. */ +CTInfo lj_ctype_info(CTState *cts, CTypeID id, CTSize *szp) +{ + CTInfo qual = 0; + CType *ct = ctype_get(cts, id); + for (;;) { + CTInfo info = ct->info; + if (ctype_isenum(info)) { + /* Follow child. Need to look at its attributes, too. */ + } else if (ctype_isattrib(info)) { + if (ctype_isxattrib(info, CTA_QUAL)) + qual |= ct->size; + else if (ctype_isxattrib(info, CTA_ALIGN) && !(qual & CTFP_ALIGNED)) + qual |= CTFP_ALIGNED + CTALIGN(ct->size); + } else { + if (!(qual & CTFP_ALIGNED)) qual |= (info & CTF_ALIGN); + qual |= (info & ~(CTF_ALIGN|CTMASK_CID)); + lua_assert(ctype_hassize(info) || ctype_isfunc(info)); + *szp = ctype_isfunc(info) ? CTSIZE_INVALID : ct->size; + break; + } + ct = ctype_get(cts, ctype_cid(info)); + } + return qual; +} + +/* Get ctype metamethod. */ +cTValue *lj_ctype_meta(CTState *cts, CTypeID id, MMS mm) +{ + CType *ct = ctype_get(cts, id); + cTValue *tv; + while (ctype_isattrib(ct->info) || ctype_isref(ct->info)) { + id = ctype_cid(ct->info); + ct = ctype_get(cts, id); + } + if (ctype_isptr(ct->info) && + ctype_isfunc(ctype_get(cts, ctype_cid(ct->info))->info)) + tv = lj_tab_getstr(cts->miscmap, &cts->g->strempty); + else + tv = lj_tab_getinth(cts->miscmap, -(int32_t)id); + if (tv && tvistab(tv) && + (tv = lj_tab_getstr(tabV(tv), mmname_str(cts->g, mm))) && !tvisnil(tv)) + return tv; + return NULL; +} + +/* -- C type representation ----------------------------------------------- */ + +/* Fixed max. length of a C type representation. */ +#define CTREPR_MAX 512 + +typedef struct CTRepr { + char *pb, *pe; + CTState *cts; + lua_State *L; + int needsp; + int ok; + char buf[CTREPR_MAX]; +} CTRepr; + +/* Prepend string. */ +static void ctype_prepstr(CTRepr *ctr, const char *str, MSize len) +{ + char *p = ctr->pb; + if (ctr->buf + len+1 > p) { ctr->ok = 0; return; } + if (ctr->needsp) *--p = ' '; + ctr->needsp = 1; + p -= len; + while (len-- > 0) p[len] = str[len]; + ctr->pb = p; +} + +#define ctype_preplit(ctr, str) ctype_prepstr((ctr), "" str, sizeof(str)-1) + +/* Prepend char. */ +static void ctype_prepc(CTRepr *ctr, int c) +{ + if (ctr->buf >= ctr->pb) { ctr->ok = 0; return; } + *--ctr->pb = c; +} + +/* Prepend number. */ +static void ctype_prepnum(CTRepr *ctr, uint32_t n) +{ + char *p = ctr->pb; + if (ctr->buf + 10+1 > p) { ctr->ok = 0; return; } + do { *--p = (char)('0' + n % 10); } while (n /= 10); + ctr->pb = p; + ctr->needsp = 0; +} + +/* Append char. */ +static void ctype_appc(CTRepr *ctr, int c) +{ + if (ctr->pe >= ctr->buf + CTREPR_MAX) { ctr->ok = 0; return; } + *ctr->pe++ = c; +} + +/* Append number. */ +static void ctype_appnum(CTRepr *ctr, uint32_t n) +{ + char buf[10]; + char *p = buf+sizeof(buf); + char *q = ctr->pe; + if (q > ctr->buf + CTREPR_MAX - 10) { ctr->ok = 0; return; } + do { *--p = (char)('0' + n % 10); } while (n /= 10); + do { *q++ = *p++; } while (p < buf+sizeof(buf)); + ctr->pe = q; +} + +/* Prepend qualifiers. */ +static void ctype_prepqual(CTRepr *ctr, CTInfo info) +{ + if ((info & CTF_VOLATILE)) ctype_preplit(ctr, "volatile"); + if ((info & CTF_CONST)) ctype_preplit(ctr, "const"); +} + +/* Prepend named type. */ +static void ctype_preptype(CTRepr *ctr, CType *ct, CTInfo qual, const char *t) +{ + if (gcref(ct->name)) { + GCstr *str = gco2str(gcref(ct->name)); + ctype_prepstr(ctr, strdata(str), str->len); + } else { + if (ctr->needsp) ctype_prepc(ctr, ' '); + ctype_prepnum(ctr, ctype_typeid(ctr->cts, ct)); + ctr->needsp = 1; + } + ctype_prepstr(ctr, t, (MSize)strlen(t)); + ctype_prepqual(ctr, qual); +} + +static void ctype_repr(CTRepr *ctr, CTypeID id) +{ + CType *ct = ctype_get(ctr->cts, id); + CTInfo qual = 0; + int ptrto = 0; + for (;;) { + CTInfo info = ct->info; + CTSize size = ct->size; + switch (ctype_type(info)) { + case CT_NUM: + if ((info & CTF_BOOL)) { + ctype_preplit(ctr, "bool"); + } else if ((info & CTF_FP)) { + if (size == sizeof(double)) ctype_preplit(ctr, "double"); + else if (size == sizeof(float)) ctype_preplit(ctr, "float"); + else ctype_preplit(ctr, "long double"); + } else if (size == 1) { + if (!((info ^ CTF_UCHAR) & CTF_UNSIGNED)) ctype_preplit(ctr, "char"); + else if (CTF_UCHAR) ctype_preplit(ctr, "signed char"); + else ctype_preplit(ctr, "unsigned char"); + } else if (size < 8) { + if (size == 4) ctype_preplit(ctr, "int"); + else ctype_preplit(ctr, "short"); + if ((info & CTF_UNSIGNED)) ctype_preplit(ctr, "unsigned"); + } else { + ctype_preplit(ctr, "_t"); + ctype_prepnum(ctr, size*8); + ctype_preplit(ctr, "int"); + if ((info & CTF_UNSIGNED)) ctype_prepc(ctr, 'u'); + } + ctype_prepqual(ctr, (qual|info)); + return; + case CT_VOID: + ctype_preplit(ctr, "void"); + ctype_prepqual(ctr, (qual|info)); + return; + case CT_STRUCT: + ctype_preptype(ctr, ct, qual, (info & CTF_UNION) ? "union" : "struct"); + return; + case CT_ENUM: + if (id == CTID_CTYPEID) { + ctype_preplit(ctr, "ctype"); + return; + } + ctype_preptype(ctr, ct, qual, "enum"); + return; + case CT_ATTRIB: + if (ctype_attrib(info) == CTA_QUAL) qual |= size; + break; + case CT_PTR: + if ((info & CTF_REF)) { + ctype_prepc(ctr, '&'); + } else { + ctype_prepqual(ctr, (qual|info)); + if (LJ_64 && size == 4) ctype_preplit(ctr, "__ptr32"); + ctype_prepc(ctr, '*'); + } + qual = 0; + ptrto = 1; + ctr->needsp = 1; + break; + case CT_ARRAY: + if (ctype_isrefarray(info)) { + ctr->needsp = 1; + if (ptrto) { ptrto = 0; ctype_prepc(ctr, '('); ctype_appc(ctr, ')'); } + ctype_appc(ctr, '['); + if (size != CTSIZE_INVALID) { + CTSize csize = ctype_child(ctr->cts, ct)->size; + ctype_appnum(ctr, csize ? size/csize : 0); + } else if ((info & CTF_VLA)) { + ctype_appc(ctr, '?'); + } + ctype_appc(ctr, ']'); + } else if ((info & CTF_COMPLEX)) { + if (size == 2*sizeof(float)) ctype_preplit(ctr, "float"); + ctype_preplit(ctr, "complex"); + return; + } else { + ctype_preplit(ctr, ")))"); + ctype_prepnum(ctr, size); + ctype_preplit(ctr, "__attribute__((vector_size("); + } + break; + case CT_FUNC: + ctr->needsp = 1; + if (ptrto) { ptrto = 0; ctype_prepc(ctr, '('); ctype_appc(ctr, ')'); } + ctype_appc(ctr, '('); + ctype_appc(ctr, ')'); + break; + default: + lua_assert(0); + break; + } + ct = ctype_get(ctr->cts, ctype_cid(info)); + } +} + +/* Return a printable representation of a C type. */ +GCstr *lj_ctype_repr(lua_State *L, CTypeID id, GCstr *name) +{ + global_State *g = G(L); + CTRepr ctr; + ctr.pb = ctr.pe = &ctr.buf[CTREPR_MAX/2]; + ctr.cts = ctype_ctsG(g); + ctr.L = L; + ctr.ok = 1; + ctr.needsp = 0; + if (name) ctype_prepstr(&ctr, strdata(name), name->len); + ctype_repr(&ctr, id); + if (LJ_UNLIKELY(!ctr.ok)) return lj_str_newlit(L, "?"); + return lj_str_new(L, ctr.pb, ctr.pe - ctr.pb); +} + +/* Convert int64_t/uint64_t to string with 'LL' or 'ULL' suffix. */ +GCstr *lj_ctype_repr_int64(lua_State *L, uint64_t n, int isunsigned) +{ + char buf[1+20+3]; + char *p = buf+sizeof(buf); + int sign = 0; + *--p = 'L'; *--p = 'L'; + if (isunsigned) { + *--p = 'U'; + } else if ((int64_t)n < 0) { + n = (uint64_t)-(int64_t)n; + sign = 1; + } + do { *--p = (char)('0' + n % 10); } while (n /= 10); + if (sign) *--p = '-'; + return lj_str_new(L, p, (size_t)(buf+sizeof(buf)-p)); +} + +/* Convert complex to string with 'i' or 'I' suffix. */ +GCstr *lj_ctype_repr_complex(lua_State *L, void *sp, CTSize size) +{ + char buf[2*STRFMT_MAXBUF_NUM+2+1], *p = buf; + TValue re, im; + if (size == 2*sizeof(double)) { + re.n = *(double *)sp; im.n = ((double *)sp)[1]; + } else { + re.n = (double)*(float *)sp; im.n = (double)((float *)sp)[1]; + } + p = lj_strfmt_wnum(p, &re); + if (!(im.u32.hi & 0x80000000u) || im.n != im.n) *p++ = '+'; + p = lj_strfmt_wnum(p, &im); + *p = *(p-1) >= 'a' ? 'I' : 'i'; + p++; + return lj_str_new(L, buf, p-buf); +} + +/* -- C type state -------------------------------------------------------- */ + +/* Initialize C type table and state. */ +CTState *lj_ctype_init(lua_State *L) +{ + CTState *cts = lj_mem_newt(L, sizeof(CTState), CTState); + CType *ct = lj_mem_newvec(L, CTTYPETAB_MIN, CType); + const char *name = lj_ctype_typenames; + CTypeID id; + memset(cts, 0, sizeof(CTState)); + cts->tab = ct; + cts->sizetab = CTTYPETAB_MIN; + cts->top = CTTYPEINFO_NUM; + cts->L = NULL; + cts->g = G(L); + for (id = 0; id < CTTYPEINFO_NUM; id++, ct++) { + CTInfo info = lj_ctype_typeinfo[id]; + ct->size = (CTSize)((int32_t)(info << 16) >> 26); + ct->info = info & 0xffff03ffu; + ct->sib = 0; + if (ctype_type(info) == CT_KW || ctype_istypedef(info)) { + size_t len = strlen(name); + GCstr *str = lj_str_new(L, name, len); + ctype_setname(ct, str); + name += len+1; + lj_ctype_addname(cts, ct, id); + } else { + setgcrefnull(ct->name); + ct->next = 0; + if (!ctype_isenum(info)) ctype_addtype(cts, ct, id); + } + } + setmref(G(L)->ctype_state, cts); + return cts; +} + +/* Free C type table and state. */ +void lj_ctype_freestate(global_State *g) +{ + CTState *cts = ctype_ctsG(g); + if (cts) { + lj_ccallback_mcode_free(cts); + lj_mem_freevec(g, cts->tab, cts->sizetab, CType); + lj_mem_freevec(g, cts->cb.cbid, cts->cb.sizeid, CTypeID1); + lj_mem_freet(g, cts); + } +} + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_ctype.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_ctype.h new file mode 100644 index 00000000000..6639547a431 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_ctype.h @@ -0,0 +1,461 @@ +/* +** C type management. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_CTYPE_H +#define _LJ_CTYPE_H + +#include "lj_obj.h" +#include "lj_gc.h" + +#if LJ_HASFFI + +/* -- C type definitions -------------------------------------------------- */ + +/* C type numbers. Highest 4 bits of C type info. ORDER CT. */ +enum { + /* Externally visible types. */ + CT_NUM, /* Integer or floating-point numbers. */ + CT_STRUCT, /* Struct or union. */ + CT_PTR, /* Pointer or reference. */ + CT_ARRAY, /* Array or complex type. */ + CT_MAYCONVERT = CT_ARRAY, + CT_VOID, /* Void type. */ + CT_ENUM, /* Enumeration. */ + CT_HASSIZE = CT_ENUM, /* Last type where ct->size holds the actual size. */ + CT_FUNC, /* Function. */ + CT_TYPEDEF, /* Typedef. */ + CT_ATTRIB, /* Miscellaneous attributes. */ + /* Internal element types. */ + CT_FIELD, /* Struct/union field or function parameter. */ + CT_BITFIELD, /* Struct/union bitfield. */ + CT_CONSTVAL, /* Constant value. */ + CT_EXTERN, /* External reference. */ + CT_KW /* Keyword. */ +}; + +LJ_STATIC_ASSERT(((int)CT_PTR & (int)CT_ARRAY) == CT_PTR); +LJ_STATIC_ASSERT(((int)CT_STRUCT & (int)CT_ARRAY) == CT_STRUCT); + +/* +** ---------- info ------------ +** |type flags... A cid | size | sib | next | name | +** +----------------------------+--------+-------+-------+-------+-- +** |NUM BFvcUL.. A | size | | type | | +** |STRUCT ..vcU..V A | size | field | name? | name? | +** |PTR ..vcR... A cid | size | | type | | +** |ARRAY VCvc...V A cid | size | | type | | +** |VOID ..vc.... A | size | | type | | +** |ENUM A cid | size | const | name? | name? | +** |FUNC ....VS.. cc cid | nargs | field | name? | name? | +** |TYPEDEF cid | | | name | name | +** |ATTRIB attrnum cid | attr | sib? | type? | | +** |FIELD cid | offset | field | | name? | +** |BITFIELD B.vcU csz bsz pos | offset | field | | name? | +** |CONSTVAL c cid | value | const | name | name | +** |EXTERN cid | | sib? | name | name | +** |KW tok | size | | name | name | +** +----------------------------+--------+-------+-------+-------+-- +** ^^ ^^--- bits used for C type conversion dispatch +*/ + +/* C type info flags. TFFArrrr */ +#define CTF_BOOL 0x08000000u /* Boolean: NUM, BITFIELD. */ +#define CTF_FP 0x04000000u /* Floating-point: NUM. */ +#define CTF_CONST 0x02000000u /* Const qualifier. */ +#define CTF_VOLATILE 0x01000000u /* Volatile qualifier. */ +#define CTF_UNSIGNED 0x00800000u /* Unsigned: NUM, BITFIELD. */ +#define CTF_LONG 0x00400000u /* Long: NUM. */ +#define CTF_VLA 0x00100000u /* Variable-length: ARRAY, STRUCT. */ +#define CTF_REF 0x00800000u /* Reference: PTR. */ +#define CTF_VECTOR 0x08000000u /* Vector: ARRAY. */ +#define CTF_COMPLEX 0x04000000u /* Complex: ARRAY. */ +#define CTF_UNION 0x00800000u /* Union: STRUCT. */ +#define CTF_VARARG 0x00800000u /* Vararg: FUNC. */ +#define CTF_SSEREGPARM 0x00400000u /* SSE register parameters: FUNC. */ + +#define CTF_QUAL (CTF_CONST|CTF_VOLATILE) +#define CTF_ALIGN (CTMASK_ALIGN< 0 ? CTF_UNSIGNED : 0) + +/* Flags used in parser. .F.Ammvf cp->attr */ +#define CTFP_ALIGNED 0x00000001u /* cp->attr + ALIGN */ +#define CTFP_PACKED 0x00000002u /* cp->attr */ +/* ...C...f cp->fattr */ +#define CTFP_CCONV 0x00000001u /* cp->fattr + CCONV/[SSE]REGPARM */ + +/* C type info bitfields. */ +#define CTMASK_CID 0x0000ffffu /* Max. 65536 type IDs. */ +#define CTMASK_NUM 0xf0000000u /* Max. 16 type numbers. */ +#define CTSHIFT_NUM 28 +#define CTMASK_ALIGN 15 /* Max. alignment is 2^15. */ +#define CTSHIFT_ALIGN 16 +#define CTMASK_ATTRIB 255 /* Max. 256 attributes. */ +#define CTSHIFT_ATTRIB 16 +#define CTMASK_CCONV 3 /* Max. 4 calling conventions. */ +#define CTSHIFT_CCONV 16 +#define CTMASK_REGPARM 3 /* Max. 0-3 regparms. */ +#define CTSHIFT_REGPARM 18 +/* Bitfields only used in parser. */ +#define CTMASK_VSIZEP 15 /* Max. vector size is 2^15. */ +#define CTSHIFT_VSIZEP 4 +#define CTMASK_MSIZEP 255 /* Max. type size (via mode) is 128. */ +#define CTSHIFT_MSIZEP 8 + +/* Info bits for BITFIELD. Max. size of bitfield is 64 bits. */ +#define CTBSZ_MAX 32 /* Max. size of bitfield is 32 bit. */ +#define CTBSZ_FIELD 127 /* Temp. marker for regular field. */ +#define CTMASK_BITPOS 127 +#define CTMASK_BITBSZ 127 +#define CTMASK_BITCSZ 127 +#define CTSHIFT_BITPOS 0 +#define CTSHIFT_BITBSZ 8 +#define CTSHIFT_BITCSZ 16 + +#define CTF_INSERT(info, field, val) \ + info = (info & ~(CTMASK_##field<> CTSHIFT_NUM) +#define ctype_cid(info) ((CTypeID)((info) & CTMASK_CID)) +#define ctype_align(info) (((info) >> CTSHIFT_ALIGN) & CTMASK_ALIGN) +#define ctype_attrib(info) (((info) >> CTSHIFT_ATTRIB) & CTMASK_ATTRIB) +#define ctype_bitpos(info) (((info) >> CTSHIFT_BITPOS) & CTMASK_BITPOS) +#define ctype_bitbsz(info) (((info) >> CTSHIFT_BITBSZ) & CTMASK_BITBSZ) +#define ctype_bitcsz(info) (((info) >> CTSHIFT_BITCSZ) & CTMASK_BITCSZ) +#define ctype_vsizeP(info) (((info) >> CTSHIFT_VSIZEP) & CTMASK_VSIZEP) +#define ctype_msizeP(info) (((info) >> CTSHIFT_MSIZEP) & CTMASK_MSIZEP) +#define ctype_cconv(info) (((info) >> CTSHIFT_CCONV) & CTMASK_CCONV) + +/* Simple type checks. */ +#define ctype_isnum(info) (ctype_type((info)) == CT_NUM) +#define ctype_isvoid(info) (ctype_type((info)) == CT_VOID) +#define ctype_isptr(info) (ctype_type((info)) == CT_PTR) +#define ctype_isarray(info) (ctype_type((info)) == CT_ARRAY) +#define ctype_isstruct(info) (ctype_type((info)) == CT_STRUCT) +#define ctype_isfunc(info) (ctype_type((info)) == CT_FUNC) +#define ctype_isenum(info) (ctype_type((info)) == CT_ENUM) +#define ctype_istypedef(info) (ctype_type((info)) == CT_TYPEDEF) +#define ctype_isattrib(info) (ctype_type((info)) == CT_ATTRIB) +#define ctype_isfield(info) (ctype_type((info)) == CT_FIELD) +#define ctype_isbitfield(info) (ctype_type((info)) == CT_BITFIELD) +#define ctype_isconstval(info) (ctype_type((info)) == CT_CONSTVAL) +#define ctype_isextern(info) (ctype_type((info)) == CT_EXTERN) +#define ctype_hassize(info) (ctype_type((info)) <= CT_HASSIZE) + +/* Combined type and flag checks. */ +#define ctype_isinteger(info) \ + (((info) & (CTMASK_NUM|CTF_BOOL|CTF_FP)) == CTINFO(CT_NUM, 0)) +#define ctype_isinteger_or_bool(info) \ + (((info) & (CTMASK_NUM|CTF_FP)) == CTINFO(CT_NUM, 0)) +#define ctype_isbool(info) \ + (((info) & (CTMASK_NUM|CTF_BOOL)) == CTINFO(CT_NUM, CTF_BOOL)) +#define ctype_isfp(info) \ + (((info) & (CTMASK_NUM|CTF_FP)) == CTINFO(CT_NUM, CTF_FP)) + +#define ctype_ispointer(info) \ + ((ctype_type(info) >> 1) == (CT_PTR >> 1)) /* Pointer or array. */ +#define ctype_isref(info) \ + (((info) & (CTMASK_NUM|CTF_REF)) == CTINFO(CT_PTR, CTF_REF)) + +#define ctype_isrefarray(info) \ + (((info) & (CTMASK_NUM|CTF_VECTOR|CTF_COMPLEX)) == CTINFO(CT_ARRAY, 0)) +#define ctype_isvector(info) \ + (((info) & (CTMASK_NUM|CTF_VECTOR)) == CTINFO(CT_ARRAY, CTF_VECTOR)) +#define ctype_iscomplex(info) \ + (((info) & (CTMASK_NUM|CTF_COMPLEX)) == CTINFO(CT_ARRAY, CTF_COMPLEX)) + +#define ctype_isvltype(info) \ + (((info) & ((CTMASK_NUM|CTF_VLA) - (2u<") _(STRING, "") \ + _(INTEGER, "") _(EOF, "") \ + _(OROR, "||") _(ANDAND, "&&") _(EQ, "==") _(NE, "!=") \ + _(LE, "<=") _(GE, ">=") _(SHL, "<<") _(SHR, ">>") _(DEREF, "->") + +/* Simple declaration specifiers. */ +#define CDSDEF(_) \ + _(VOID) _(BOOL) _(CHAR) _(INT) _(FP) \ + _(LONG) _(LONGLONG) _(SHORT) _(COMPLEX) _(SIGNED) _(UNSIGNED) \ + _(CONST) _(VOLATILE) _(RESTRICT) _(INLINE) \ + _(TYPEDEF) _(EXTERN) _(STATIC) _(AUTO) _(REGISTER) + +/* C keywords. */ +#define CKWDEF(_) \ + CDSDEF(_) _(EXTENSION) _(ASM) _(ATTRIBUTE) \ + _(DECLSPEC) _(CCDECL) _(PTRSZ) \ + _(STRUCT) _(UNION) _(ENUM) \ + _(SIZEOF) _(ALIGNOF) + +/* C token numbers. */ +enum { + CTOK_OFS = 255, +#define CTOKNUM(name, sym) CTOK_##name, +#define CKWNUM(name) CTOK_##name, +CTOKDEF(CTOKNUM) +CKWDEF(CKWNUM) +#undef CTOKNUM +#undef CKWNUM + CTOK_FIRSTDECL = CTOK_VOID, + CTOK_FIRSTSCL = CTOK_TYPEDEF, + CTOK_LASTDECLFLAG = CTOK_REGISTER, + CTOK_LASTDECL = CTOK_ENUM +}; + +/* Declaration specifier flags. */ +enum { +#define CDSFLAG(name) CDF_##name = (1u << (CTOK_##name - CTOK_FIRSTDECL)), +CDSDEF(CDSFLAG) +#undef CDSFLAG + CDF__END +}; + +#define CDF_SCL (CDF_TYPEDEF|CDF_EXTERN|CDF_STATIC|CDF_AUTO|CDF_REGISTER) + +/* -- C type management --------------------------------------------------- */ + +#define ctype_ctsG(g) (mref((g)->ctype_state, CTState)) + +/* Get C type state. */ +static LJ_AINLINE CTState *ctype_cts(lua_State *L) +{ + CTState *cts = ctype_ctsG(G(L)); + cts->L = L; /* Save L for errors and allocations. */ + return cts; +} + +/* Save and restore state of C type table. */ +#define LJ_CTYPE_SAVE(cts) CTState savects_ = *(cts) +#define LJ_CTYPE_RESTORE(cts) \ + ((cts)->top = savects_.top, \ + memcpy((cts)->hash, savects_.hash, sizeof(savects_.hash))) + +/* Check C type ID for validity when assertions are enabled. */ +static LJ_AINLINE CTypeID ctype_check(CTState *cts, CTypeID id) +{ + lua_assert(id > 0 && id < cts->top); UNUSED(cts); + return id; +} + +/* Get C type for C type ID. */ +static LJ_AINLINE CType *ctype_get(CTState *cts, CTypeID id) +{ + return &cts->tab[ctype_check(cts, id)]; +} + +/* Get C type ID for a C type. */ +#define ctype_typeid(cts, ct) ((CTypeID)((ct) - (cts)->tab)) + +/* Get child C type. */ +static LJ_AINLINE CType *ctype_child(CTState *cts, CType *ct) +{ + lua_assert(!(ctype_isvoid(ct->info) || ctype_isstruct(ct->info) || + ctype_isbitfield(ct->info))); /* These don't have children. */ + return ctype_get(cts, ctype_cid(ct->info)); +} + +/* Get raw type for a C type ID. */ +static LJ_AINLINE CType *ctype_raw(CTState *cts, CTypeID id) +{ + CType *ct = ctype_get(cts, id); + while (ctype_isattrib(ct->info)) ct = ctype_child(cts, ct); + return ct; +} + +/* Get raw type of the child of a C type. */ +static LJ_AINLINE CType *ctype_rawchild(CTState *cts, CType *ct) +{ + do { ct = ctype_child(cts, ct); } while (ctype_isattrib(ct->info)); + return ct; +} + +/* Set the name of a C type table element. */ +static LJ_AINLINE void ctype_setname(CType *ct, GCstr *s) +{ + /* NOBARRIER: mark string as fixed -- the C type table is never collected. */ + fixstring(s); + setgcref(ct->name, obj2gco(s)); +} + +LJ_FUNC CTypeID lj_ctype_new(CTState *cts, CType **ctp); +LJ_FUNC CTypeID lj_ctype_intern(CTState *cts, CTInfo info, CTSize size); +LJ_FUNC void lj_ctype_addname(CTState *cts, CType *ct, CTypeID id); +LJ_FUNC CTypeID lj_ctype_getname(CTState *cts, CType **ctp, GCstr *name, + uint32_t tmask); +LJ_FUNC CType *lj_ctype_getfieldq(CTState *cts, CType *ct, GCstr *name, + CTSize *ofs, CTInfo *qual); +#define lj_ctype_getfield(cts, ct, name, ofs) \ + lj_ctype_getfieldq((cts), (ct), (name), (ofs), NULL) +LJ_FUNC CType *lj_ctype_rawref(CTState *cts, CTypeID id); +LJ_FUNC CTSize lj_ctype_size(CTState *cts, CTypeID id); +LJ_FUNC CTSize lj_ctype_vlsize(CTState *cts, CType *ct, CTSize nelem); +LJ_FUNC CTInfo lj_ctype_info(CTState *cts, CTypeID id, CTSize *szp); +LJ_FUNC cTValue *lj_ctype_meta(CTState *cts, CTypeID id, MMS mm); +LJ_FUNC GCstr *lj_ctype_repr(lua_State *L, CTypeID id, GCstr *name); +LJ_FUNC GCstr *lj_ctype_repr_int64(lua_State *L, uint64_t n, int isunsigned); +LJ_FUNC GCstr *lj_ctype_repr_complex(lua_State *L, void *sp, CTSize size); +LJ_FUNC CTState *lj_ctype_init(lua_State *L); +LJ_FUNC void lj_ctype_freestate(global_State *g); + +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_debug.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_debug.c new file mode 100644 index 00000000000..3226d03b9e1 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_debug.c @@ -0,0 +1,699 @@ +/* +** Debugging and introspection. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_debug_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_err.h" +#include "lj_debug.h" +#include "lj_buf.h" +#include "lj_tab.h" +#include "lj_state.h" +#include "lj_frame.h" +#include "lj_bc.h" +#include "lj_strfmt.h" +#if LJ_HASJIT +#include "lj_jit.h" +#endif + +/* -- Frames -------------------------------------------------------------- */ + +/* Get frame corresponding to a level. */ +cTValue *lj_debug_frame(lua_State *L, int level, int *size) +{ + cTValue *frame, *nextframe, *bot = tvref(L->stack)+LJ_FR2; + /* Traverse frames backwards. */ + for (nextframe = frame = L->base-1; frame > bot; ) { + if (frame_gc(frame) == obj2gco(L)) + level++; /* Skip dummy frames. See lj_err_optype_call(). */ + if (level-- == 0) { + *size = (int)(nextframe - frame); + return frame; /* Level found. */ + } + nextframe = frame; + if (frame_islua(frame)) { + frame = frame_prevl(frame); + } else { + if (frame_isvarg(frame)) + level++; /* Skip vararg pseudo-frame. */ + frame = frame_prevd(frame); + } + } + *size = level; + return NULL; /* Level not found. */ +} + +/* Invalid bytecode position. */ +#define NO_BCPOS (~(BCPos)0) + +/* Return bytecode position for function/frame or NO_BCPOS. */ +static BCPos debug_framepc(lua_State *L, GCfunc *fn, cTValue *nextframe) +{ + const BCIns *ins; + GCproto *pt; + BCPos pos; + lua_assert(fn->c.gct == ~LJ_TFUNC || fn->c.gct == ~LJ_TTHREAD); + if (!isluafunc(fn)) { /* Cannot derive a PC for non-Lua functions. */ + return NO_BCPOS; + } else if (nextframe == NULL) { /* Lua function on top. */ + void *cf = cframe_raw(L->cframe); + if (cf == NULL || (char *)cframe_pc(cf) == (char *)cframe_L(cf)) + return NO_BCPOS; + ins = cframe_pc(cf); /* Only happens during error/hook handling. */ + } else { + if (frame_islua(nextframe)) { + ins = frame_pc(nextframe); + } else if (frame_iscont(nextframe)) { + ins = frame_contpc(nextframe); + } else { + /* Lua function below errfunc/gc/hook: find cframe to get the PC. */ + void *cf = cframe_raw(L->cframe); + TValue *f = L->base-1; + for (;;) { + if (cf == NULL) + return NO_BCPOS; + while (cframe_nres(cf) < 0) { + if (f >= restorestack(L, -cframe_nres(cf))) + break; + cf = cframe_raw(cframe_prev(cf)); + if (cf == NULL) + return NO_BCPOS; + } + if (f < nextframe) + break; + if (frame_islua(f)) { + f = frame_prevl(f); + } else { + if (frame_isc(f) || (frame_iscont(f) && frame_iscont_fficb(f))) + cf = cframe_raw(cframe_prev(cf)); + f = frame_prevd(f); + } + } + ins = cframe_pc(cf); + } + } + pt = funcproto(fn); + pos = proto_bcpos(pt, ins) - 1; +#if LJ_HASJIT + if (pos > pt->sizebc) { /* Undo the effects of lj_trace_exit for JLOOP. */ + GCtrace *T = (GCtrace *)((char *)(ins-1) - offsetof(GCtrace, startins)); + lua_assert(bc_isret(bc_op(ins[-1]))); + pos = proto_bcpos(pt, mref(T->startpc, const BCIns)); + } +#endif + return pos; +} + +/* -- Line numbers -------------------------------------------------------- */ + +/* Get line number for a bytecode position. */ +BCLine LJ_FASTCALL lj_debug_line(GCproto *pt, BCPos pc) +{ + const void *lineinfo = proto_lineinfo(pt); + if (pc <= pt->sizebc && lineinfo) { + BCLine first = pt->firstline; + if (pc == pt->sizebc) return first + pt->numline; + if (pc-- == 0) return first; + if (pt->numline < 256) + return first + (BCLine)((const uint8_t *)lineinfo)[pc]; + else if (pt->numline < 65536) + return first + (BCLine)((const uint16_t *)lineinfo)[pc]; + else + return first + (BCLine)((const uint32_t *)lineinfo)[pc]; + } + return 0; +} + +/* Get line number for function/frame. */ +static BCLine debug_frameline(lua_State *L, GCfunc *fn, cTValue *nextframe) +{ + BCPos pc = debug_framepc(L, fn, nextframe); + if (pc != NO_BCPOS) { + GCproto *pt = funcproto(fn); + lua_assert(pc <= pt->sizebc); + return lj_debug_line(pt, pc); + } + return -1; +} + +/* -- Variable names ------------------------------------------------------ */ + +/* Get name of a local variable from slot number and PC. */ +static const char *debug_varname(const GCproto *pt, BCPos pc, BCReg slot) +{ + const char *p = (const char *)proto_varinfo(pt); + if (p) { + BCPos lastpc = 0; + for (;;) { + const char *name = p; + uint32_t vn = *(const uint8_t *)p; + BCPos startpc, endpc; + if (vn < VARNAME__MAX) { + if (vn == VARNAME_END) break; /* End of varinfo. */ + } else { + do { p++; } while (*(const uint8_t *)p); /* Skip over variable name. */ + } + p++; + lastpc = startpc = lastpc + lj_buf_ruleb128(&p); + if (startpc > pc) break; + endpc = startpc + lj_buf_ruleb128(&p); + if (pc < endpc && slot-- == 0) { + if (vn < VARNAME__MAX) { +#define VARNAMESTR(name, str) str "\0" + name = VARNAMEDEF(VARNAMESTR); +#undef VARNAMESTR + if (--vn) while (*name++ || --vn) ; + } + return name; + } + } + } + return NULL; +} + +/* Get name of local variable from 1-based slot number and function/frame. */ +static TValue *debug_localname(lua_State *L, const lua_Debug *ar, + const char **name, BCReg slot1) +{ + uint32_t offset = (uint32_t)ar->i_ci & 0xffff; + uint32_t size = (uint32_t)ar->i_ci >> 16; + TValue *frame = tvref(L->stack) + offset; + TValue *nextframe = size ? frame + size : NULL; + GCfunc *fn = frame_func(frame); + BCPos pc = debug_framepc(L, fn, nextframe); + if (!nextframe) nextframe = L->top+LJ_FR2; + if ((int)slot1 < 0) { /* Negative slot number is for varargs. */ + if (pc != NO_BCPOS) { + GCproto *pt = funcproto(fn); + if ((pt->flags & PROTO_VARARG)) { + slot1 = pt->numparams + (BCReg)(-(int)slot1); + if (frame_isvarg(frame)) { /* Vararg frame has been set up? (pc!=0) */ + nextframe = frame; + frame = frame_prevd(frame); + } + if (frame + slot1+LJ_FR2 < nextframe) { + *name = "(*vararg)"; + return frame+slot1; + } + } + } + return NULL; + } + if (pc != NO_BCPOS && + (*name = debug_varname(funcproto(fn), pc, slot1-1)) != NULL) + ; + else if (slot1 > 0 && frame + slot1+LJ_FR2 < nextframe) + *name = "(*temporary)"; + return frame+slot1; +} + +/* Get name of upvalue. */ +const char *lj_debug_uvname(GCproto *pt, uint32_t idx) +{ + const uint8_t *p = proto_uvinfo(pt); + lua_assert(idx < pt->sizeuv); + if (!p) return ""; + if (idx) while (*p++ || --idx) ; + return (const char *)p; +} + +/* Get name and value of upvalue. */ +const char *lj_debug_uvnamev(cTValue *o, uint32_t idx, TValue **tvp) +{ + if (tvisfunc(o)) { + GCfunc *fn = funcV(o); + if (isluafunc(fn)) { + GCproto *pt = funcproto(fn); + if (idx < pt->sizeuv) { + *tvp = uvval(&gcref(fn->l.uvptr[idx])->uv); + return lj_debug_uvname(pt, idx); + } + } else { + if (idx < fn->c.nupvalues) { + *tvp = &fn->c.upvalue[idx]; + return ""; + } + } + } + return NULL; +} + +/* Deduce name of an object from slot number and PC. */ +const char *lj_debug_slotname(GCproto *pt, const BCIns *ip, BCReg slot, + const char **name) +{ + const char *lname; +restart: + lname = debug_varname(pt, proto_bcpos(pt, ip), slot); + if (lname != NULL) { *name = lname; return "local"; } + while (--ip > proto_bc(pt)) { + BCIns ins = *ip; + BCOp op = bc_op(ins); + BCReg ra = bc_a(ins); + if (bcmode_a(op) == BCMbase) { + if (slot >= ra && (op != BC_KNIL || slot <= bc_d(ins))) + return NULL; + } else if (bcmode_a(op) == BCMdst && ra == slot) { + switch (bc_op(ins)) { + case BC_MOV: + if (ra == slot) { slot = bc_d(ins); goto restart; } + break; + case BC_GGET: + *name = strdata(gco2str(proto_kgc(pt, ~(ptrdiff_t)bc_d(ins)))); + return "global"; + case BC_TGETS: + *name = strdata(gco2str(proto_kgc(pt, ~(ptrdiff_t)bc_c(ins)))); + if (ip > proto_bc(pt)) { + BCIns insp = ip[-1]; + if (bc_op(insp) == BC_MOV && bc_a(insp) == ra+1+LJ_FR2 && + bc_d(insp) == bc_b(ins)) + return "method"; + } + return "field"; + case BC_UGET: + *name = lj_debug_uvname(pt, bc_d(ins)); + return "upvalue"; + default: + return NULL; + } + } + } + return NULL; +} + +/* Deduce function name from caller of a frame. */ +const char *lj_debug_funcname(lua_State *L, cTValue *frame, const char **name) +{ + cTValue *pframe; + GCfunc *fn; + BCPos pc; + if (frame <= tvref(L->stack)+LJ_FR2) + return NULL; + if (frame_isvarg(frame)) + frame = frame_prevd(frame); + pframe = frame_prev(frame); + fn = frame_func(pframe); + pc = debug_framepc(L, fn, frame); + if (pc != NO_BCPOS) { + GCproto *pt = funcproto(fn); + const BCIns *ip = &proto_bc(pt)[check_exp(pc < pt->sizebc, pc)]; + MMS mm = bcmode_mm(bc_op(*ip)); + if (mm == MM_call) { + BCReg slot = bc_a(*ip); + if (bc_op(*ip) == BC_ITERC) slot -= 3; + return lj_debug_slotname(pt, ip, slot, name); + } else if (mm != MM__MAX) { + *name = strdata(mmname_str(G(L), mm)); + return "metamethod"; + } + } + return NULL; +} + +/* -- Source code locations ----------------------------------------------- */ + +/* Generate shortened source name. */ +void lj_debug_shortname(char *out, GCstr *str, BCLine line) +{ + const char *src = strdata(str); + if (*src == '=') { + strncpy(out, src+1, LUA_IDSIZE); /* Remove first char. */ + out[LUA_IDSIZE-1] = '\0'; /* Ensures null termination. */ + } else if (*src == '@') { /* Output "source", or "...source". */ + size_t len = str->len-1; + src++; /* Skip the `@' */ + if (len >= LUA_IDSIZE) { + src += len-(LUA_IDSIZE-4); /* Get last part of file name. */ + *out++ = '.'; *out++ = '.'; *out++ = '.'; + } + strcpy(out, src); + } else { /* Output [string "string"] or [builtin:name]. */ + size_t len; /* Length, up to first control char. */ + for (len = 0; len < LUA_IDSIZE-12; len++) + if (((const unsigned char *)src)[len] < ' ') break; + strcpy(out, line == ~(BCLine)0 ? "[builtin:" : "[string \""); out += 9; + if (src[len] != '\0') { /* Must truncate? */ + if (len > LUA_IDSIZE-15) len = LUA_IDSIZE-15; + strncpy(out, src, len); out += len; + strcpy(out, "..."); out += 3; + } else { + strcpy(out, src); out += len; + } + strcpy(out, line == ~(BCLine)0 ? "]" : "\"]"); + } +} + +/* Add current location of a frame to error message. */ +void lj_debug_addloc(lua_State *L, const char *msg, + cTValue *frame, cTValue *nextframe) +{ + if (frame) { + GCfunc *fn = frame_func(frame); + if (isluafunc(fn)) { + BCLine line = debug_frameline(L, fn, nextframe); + if (line >= 0) { + GCproto *pt = funcproto(fn); + char buf[LUA_IDSIZE]; + lj_debug_shortname(buf, proto_chunkname(pt), pt->firstline); + lj_strfmt_pushf(L, "%s:%d: %s", buf, line, msg); + return; + } + } + } + lj_strfmt_pushf(L, "%s", msg); +} + +/* Push location string for a bytecode position to Lua stack. */ +void lj_debug_pushloc(lua_State *L, GCproto *pt, BCPos pc) +{ + GCstr *name = proto_chunkname(pt); + const char *s = strdata(name); + MSize i, len = name->len; + BCLine line = lj_debug_line(pt, pc); + if (pt->firstline == ~(BCLine)0) { + lj_strfmt_pushf(L, "builtin:%s", s); + } else if (*s == '@') { + s++; len--; + for (i = len; i > 0; i--) + if (s[i] == '/' || s[i] == '\\') { + s += i+1; + break; + } + lj_strfmt_pushf(L, "%s:%d", s, line); + } else if (len > 40) { + lj_strfmt_pushf(L, "%p:%d", pt, line); + } else if (*s == '=') { + lj_strfmt_pushf(L, "%s:%d", s+1, line); + } else { + lj_strfmt_pushf(L, "\"%s\":%d", s, line); + } +} + +/* -- Public debug API ---------------------------------------------------- */ + +/* lua_getupvalue() and lua_setupvalue() are in lj_api.c. */ + +LUA_API const char *lua_getlocal(lua_State *L, const lua_Debug *ar, int n) +{ + const char *name = NULL; + if (ar) { + TValue *o = debug_localname(L, ar, &name, (BCReg)n); + if (name) { + copyTV(L, L->top, o); + incr_top(L); + } + } else if (tvisfunc(L->top-1) && isluafunc(funcV(L->top-1))) { + name = debug_varname(funcproto(funcV(L->top-1)), 0, (BCReg)n-1); + } + return name; +} + +LUA_API const char *lua_setlocal(lua_State *L, const lua_Debug *ar, int n) +{ + const char *name = NULL; + TValue *o = debug_localname(L, ar, &name, (BCReg)n); + if (name) + copyTV(L, o, L->top-1); + L->top--; + return name; +} + +int lj_debug_getinfo(lua_State *L, const char *what, lj_Debug *ar, int ext) +{ + int opt_f = 0, opt_L = 0; + TValue *frame = NULL; + TValue *nextframe = NULL; + GCfunc *fn; + if (*what == '>') { + TValue *func = L->top - 1; + api_check(L, tvisfunc(func)); + fn = funcV(func); + L->top--; + what++; + } else { + uint32_t offset = (uint32_t)ar->i_ci & 0xffff; + uint32_t size = (uint32_t)ar->i_ci >> 16; + lua_assert(offset != 0); + frame = tvref(L->stack) + offset; + if (size) nextframe = frame + size; + lua_assert(frame <= tvref(L->maxstack) && + (!nextframe || nextframe <= tvref(L->maxstack))); + fn = frame_func(frame); + lua_assert(fn->c.gct == ~LJ_TFUNC); + } + for (; *what; what++) { + if (*what == 'S') { + if (isluafunc(fn)) { + GCproto *pt = funcproto(fn); + BCLine firstline = pt->firstline; + GCstr *name = proto_chunkname(pt); + ar->source = strdata(name); + lj_debug_shortname(ar->short_src, name, pt->firstline); + ar->linedefined = (int)firstline; + ar->lastlinedefined = (int)(firstline + pt->numline); + ar->what = (firstline || !pt->numline) ? "Lua" : "main"; + } else { + ar->source = "=[C]"; + ar->short_src[0] = '['; + ar->short_src[1] = 'C'; + ar->short_src[2] = ']'; + ar->short_src[3] = '\0'; + ar->linedefined = -1; + ar->lastlinedefined = -1; + ar->what = "C"; + } + } else if (*what == 'l') { + ar->currentline = frame ? debug_frameline(L, fn, nextframe) : -1; + } else if (*what == 'u') { + ar->nups = fn->c.nupvalues; + if (ext) { + if (isluafunc(fn)) { + GCproto *pt = funcproto(fn); + ar->nparams = pt->numparams; + ar->isvararg = !!(pt->flags & PROTO_VARARG); + } else { + ar->nparams = 0; + ar->isvararg = 1; + } + } + } else if (*what == 'n') { + ar->namewhat = frame ? lj_debug_funcname(L, frame, &ar->name) : NULL; + if (ar->namewhat == NULL) { + ar->namewhat = ""; + ar->name = NULL; + } + } else if (*what == 'f') { + opt_f = 1; + } else if (*what == 'L') { + opt_L = 1; + } else { + return 0; /* Bad option. */ + } + } + if (opt_f) { + setfuncV(L, L->top, fn); + incr_top(L); + } + if (opt_L) { + if (isluafunc(fn)) { + GCtab *t = lj_tab_new(L, 0, 0); + GCproto *pt = funcproto(fn); + const void *lineinfo = proto_lineinfo(pt); + if (lineinfo) { + BCLine first = pt->firstline; + int sz = pt->numline < 256 ? 1 : pt->numline < 65536 ? 2 : 4; + MSize i, szl = pt->sizebc-1; + for (i = 0; i < szl; i++) { + BCLine line = first + + (sz == 1 ? (BCLine)((const uint8_t *)lineinfo)[i] : + sz == 2 ? (BCLine)((const uint16_t *)lineinfo)[i] : + (BCLine)((const uint32_t *)lineinfo)[i]); + setboolV(lj_tab_setint(L, t, line), 1); + } + } + settabV(L, L->top, t); + } else { + setnilV(L->top); + } + incr_top(L); + } + return 1; /* Ok. */ +} + +LUA_API int lua_getinfo(lua_State *L, const char *what, lua_Debug *ar) +{ + return lj_debug_getinfo(L, what, (lj_Debug *)ar, 0); +} + +LUA_API int lua_getstack(lua_State *L, int level, lua_Debug *ar) +{ + int size; + cTValue *frame = lj_debug_frame(L, level, &size); + if (frame) { + ar->i_ci = (size << 16) + (int)(frame - tvref(L->stack)); + return 1; + } else { + ar->i_ci = level - size; + return 0; + } +} + +#if LJ_HASPROFILE +/* Put the chunkname into a buffer. */ +static int debug_putchunkname(SBuf *sb, GCproto *pt, int pathstrip) +{ + GCstr *name = proto_chunkname(pt); + const char *p = strdata(name); + if (pt->firstline == ~(BCLine)0) { + lj_buf_putmem(sb, "[builtin:", 9); + lj_buf_putstr(sb, name); + lj_buf_putb(sb, ']'); + return 0; + } + if (*p == '=' || *p == '@') { + MSize len = name->len-1; + p++; + if (pathstrip) { + int i; + for (i = len-1; i >= 0; i--) + if (p[i] == '/' || p[i] == '\\') { + len -= i+1; + p = p+i+1; + break; + } + } + lj_buf_putmem(sb, p, len); + } else { + lj_buf_putmem(sb, "[string]", 8); + } + return 1; +} + +/* Put a compact stack dump into a buffer. */ +void lj_debug_dumpstack(lua_State *L, SBuf *sb, const char *fmt, int depth) +{ + int level = 0, dir = 1, pathstrip = 1; + MSize lastlen = 0; + if (depth < 0) { level = ~depth; depth = dir = -1; } /* Reverse frames. */ + while (level != depth) { /* Loop through all frame. */ + int size; + cTValue *frame = lj_debug_frame(L, level, &size); + if (frame) { + cTValue *nextframe = size ? frame+size : NULL; + GCfunc *fn = frame_func(frame); + const uint8_t *p = (const uint8_t *)fmt; + int c; + while ((c = *p++)) { + switch (c) { + case 'p': /* Preserve full path. */ + pathstrip = 0; + break; + case 'F': case 'f': { /* Dump function name. */ + const char *name; + const char *what = lj_debug_funcname(L, frame, &name); + if (what) { + if (c == 'F' && isluafunc(fn)) { /* Dump module:name for 'F'. */ + GCproto *pt = funcproto(fn); + if (pt->firstline != ~(BCLine)0) { /* Not a bytecode builtin. */ + debug_putchunkname(sb, pt, pathstrip); + lj_buf_putb(sb, ':'); + } + } + lj_buf_putmem(sb, name, (MSize)strlen(name)); + break; + } /* else: can't derive a name, dump module:line. */ + } + /* fallthrough */ + case 'l': /* Dump module:line. */ + if (isluafunc(fn)) { + GCproto *pt = funcproto(fn); + if (debug_putchunkname(sb, pt, pathstrip)) { + /* Regular Lua function. */ + BCLine line = c == 'l' ? debug_frameline(L, fn, nextframe) : + pt->firstline; + lj_buf_putb(sb, ':'); + lj_strfmt_putint(sb, line >= 0 ? line : pt->firstline); + } + } else if (isffunc(fn)) { /* Dump numbered builtins. */ + lj_buf_putmem(sb, "[builtin#", 9); + lj_strfmt_putint(sb, fn->c.ffid); + lj_buf_putb(sb, ']'); + } else { /* Dump C function address. */ + lj_buf_putb(sb, '@'); + lj_strfmt_putptr(sb, fn->c.f); + } + break; + case 'Z': /* Zap trailing separator. */ + lastlen = sbuflen(sb); + break; + default: + lj_buf_putb(sb, c); + break; + } + } + } else if (dir == 1) { + break; + } else { + level -= size; /* Reverse frame order: quickly skip missing level. */ + } + level += dir; + } + if (lastlen) + setsbufP(sb, sbufB(sb) + lastlen); /* Zap trailing separator. */ +} +#endif + +/* Number of frames for the leading and trailing part of a traceback. */ +#define TRACEBACK_LEVELS1 12 +#define TRACEBACK_LEVELS2 10 + +LUALIB_API void luaL_traceback (lua_State *L, lua_State *L1, const char *msg, + int level) +{ + int top = (int)(L->top - L->base); + int lim = TRACEBACK_LEVELS1; + lua_Debug ar; + if (msg) lua_pushfstring(L, "%s\n", msg); + lua_pushliteral(L, "stack traceback:"); + while (lua_getstack(L1, level++, &ar)) { + GCfunc *fn; + if (level > lim) { + if (!lua_getstack(L1, level + TRACEBACK_LEVELS2, &ar)) { + level--; + } else { + lua_pushliteral(L, "\n\t..."); + lua_getstack(L1, -10, &ar); + level = ar.i_ci - TRACEBACK_LEVELS2; + } + lim = 2147483647; + continue; + } + lua_getinfo(L1, "Snlf", &ar); + fn = funcV(L1->top-1); L1->top--; + if (isffunc(fn) && !*ar.namewhat) + lua_pushfstring(L, "\n\t[builtin#%d]:", fn->c.ffid); + else + lua_pushfstring(L, "\n\t%s:", ar.short_src); + if (ar.currentline > 0) + lua_pushfstring(L, "%d:", ar.currentline); + if (*ar.namewhat) { + lua_pushfstring(L, " in function " LUA_QS, ar.name); + } else { + if (*ar.what == 'm') { + lua_pushliteral(L, " in main chunk"); + } else if (*ar.what == 'C') { + lua_pushfstring(L, " at %p", fn->c.f); + } else { + lua_pushfstring(L, " in function <%s:%d>", + ar.short_src, ar.linedefined); + } + } + if ((int)(L->top - L->base) - top >= 15) + lua_concat(L, (int)(L->top - L->base) - top); + } + lua_concat(L, (int)(L->top - L->base) - top); +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_debug.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_debug.h new file mode 100644 index 00000000000..11d308a41b3 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_debug.h @@ -0,0 +1,65 @@ +/* +** Debugging and introspection. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_DEBUG_H +#define _LJ_DEBUG_H + +#include "lj_obj.h" + +typedef struct lj_Debug { + /* Common fields. Must be in the same order as in lua.h. */ + int event; + const char *name; + const char *namewhat; + const char *what; + const char *source; + int currentline; + int nups; + int linedefined; + int lastlinedefined; + char short_src[LUA_IDSIZE]; + int i_ci; + /* Extended fields. Only valid if lj_debug_getinfo() is called with ext = 1.*/ + int nparams; + int isvararg; +} lj_Debug; + +LJ_FUNC cTValue *lj_debug_frame(lua_State *L, int level, int *size); +LJ_FUNC BCLine LJ_FASTCALL lj_debug_line(GCproto *pt, BCPos pc); +LJ_FUNC const char *lj_debug_uvname(GCproto *pt, uint32_t idx); +LJ_FUNC const char *lj_debug_uvnamev(cTValue *o, uint32_t idx, TValue **tvp); +LJ_FUNC const char *lj_debug_slotname(GCproto *pt, const BCIns *pc, + BCReg slot, const char **name); +LJ_FUNC const char *lj_debug_funcname(lua_State *L, cTValue *frame, + const char **name); +LJ_FUNC void lj_debug_shortname(char *out, GCstr *str, BCLine line); +LJ_FUNC void lj_debug_addloc(lua_State *L, const char *msg, + cTValue *frame, cTValue *nextframe); +LJ_FUNC void lj_debug_pushloc(lua_State *L, GCproto *pt, BCPos pc); +LJ_FUNC int lj_debug_getinfo(lua_State *L, const char *what, lj_Debug *ar, + int ext); +#if LJ_HASPROFILE +LJ_FUNC void lj_debug_dumpstack(lua_State *L, SBuf *sb, const char *fmt, + int depth); +#endif + +/* Fixed internal variable names. */ +#define VARNAMEDEF(_) \ + _(FOR_IDX, "(for index)") \ + _(FOR_STOP, "(for limit)") \ + _(FOR_STEP, "(for step)") \ + _(FOR_GEN, "(for generator)") \ + _(FOR_STATE, "(for state)") \ + _(FOR_CTL, "(for control)") + +enum { + VARNAME_END, +#define VARNAMEENUM(name, str) VARNAME_##name, + VARNAMEDEF(VARNAMEENUM) +#undef VARNAMEENUM + VARNAME__MAX +}; + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_def.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_def.h new file mode 100644 index 00000000000..04bf00c89be --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_def.h @@ -0,0 +1,365 @@ +/* +** LuaJIT common internal definitions. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_DEF_H +#define _LJ_DEF_H + +#include "lua.h" + +#if defined(_MSC_VER) +/* MSVC is stuck in the last century and doesn't have C99's stdint.h. */ +typedef __int8 int8_t; +typedef __int16 int16_t; +typedef __int32 int32_t; +typedef __int64 int64_t; +typedef unsigned __int8 uint8_t; +typedef unsigned __int16 uint16_t; +typedef unsigned __int32 uint32_t; +typedef unsigned __int64 uint64_t; +#ifdef _WIN64 +typedef __int64 intptr_t; +typedef unsigned __int64 uintptr_t; +#else +typedef __int32 intptr_t; +typedef unsigned __int32 uintptr_t; +#endif +#elif defined(__symbian__) +/* Cough. */ +typedef signed char int8_t; +typedef short int int16_t; +typedef int int32_t; +typedef long long int64_t; +typedef unsigned char uint8_t; +typedef unsigned short int uint16_t; +typedef unsigned int uint32_t; +typedef unsigned long long uint64_t; +typedef int intptr_t; +typedef unsigned int uintptr_t; +#else +#include +#endif + +/* Needed everywhere. */ +#include +#include + +/* Various VM limits. */ +#define LJ_MAX_MEM32 0x7fffff00 /* Max. 32 bit memory allocation. */ +#define LJ_MAX_MEM64 ((uint64_t)1<<47) /* Max. 64 bit memory allocation. */ +/* Max. total memory allocation. */ +#define LJ_MAX_MEM (LJ_GC64 ? LJ_MAX_MEM64 : LJ_MAX_MEM32) +#define LJ_MAX_ALLOC LJ_MAX_MEM /* Max. individual allocation length. */ +#define LJ_MAX_STR LJ_MAX_MEM32 /* Max. string length. */ +#define LJ_MAX_BUF LJ_MAX_MEM32 /* Max. buffer length. */ +#define LJ_MAX_UDATA LJ_MAX_MEM32 /* Max. userdata length. */ + +#define LJ_MAX_STRTAB (1<<26) /* Max. string table size. */ +#define LJ_MAX_HBITS 26 /* Max. hash bits. */ +#define LJ_MAX_ABITS 28 /* Max. bits of array key. */ +#define LJ_MAX_ASIZE ((1<<(LJ_MAX_ABITS-1))+1) /* Max. array part size. */ +#define LJ_MAX_COLOSIZE 16 /* Max. elems for colocated array. */ + +#define LJ_MAX_LINE LJ_MAX_MEM32 /* Max. source code line number. */ +#define LJ_MAX_XLEVEL 200 /* Max. syntactic nesting level. */ +#define LJ_MAX_BCINS (1<<26) /* Max. # of bytecode instructions. */ +#define LJ_MAX_SLOTS 250 /* Max. # of slots in a Lua func. */ +#define LJ_MAX_LOCVAR 200 /* Max. # of local variables. */ +#define LJ_MAX_UPVAL 249 /* Max. # of upvalues. */ + +#define LJ_MAX_IDXCHAIN 100 /* __index/__newindex chain limit. */ +#define LJ_STACK_EXTRA (5+2*LJ_FR2) /* Extra stack space (metamethods). */ + +#define LJ_NUM_CBPAGE 1 /* Number of FFI callback pages. */ + +/* Minimum table/buffer sizes. */ +#define LJ_MIN_GLOBAL 6 /* Min. global table size (hbits). */ +#define LJ_MIN_REGISTRY 2 /* Min. registry size (hbits). */ +#define LJ_MIN_STRTAB 256 /* Min. string table size (pow2). */ +#define LJ_MIN_SBUF 32 /* Min. string buffer length. */ +#define LJ_MIN_VECSZ 8 /* Min. size for growable vectors. */ +#define LJ_MIN_IRSZ 32 /* Min. size for growable IR. */ +#define LJ_MIN_K64SZ 16 /* Min. size for chained K64Array. */ + +/* JIT compiler limits. */ +#define LJ_MAX_JSLOTS 250 /* Max. # of stack slots for a trace. */ +#define LJ_MAX_PHI 64 /* Max. # of PHIs for a loop. */ +#define LJ_MAX_EXITSTUBGR 16 /* Max. # of exit stub groups. */ + +/* Various macros. */ +#ifndef UNUSED +#define UNUSED(x) ((void)(x)) /* to avoid warnings */ +#endif + +#define U64x(hi, lo) (((uint64_t)0x##hi << 32) + (uint64_t)0x##lo) +#define i32ptr(p) ((int32_t)(intptr_t)(void *)(p)) +#define u32ptr(p) ((uint32_t)(intptr_t)(void *)(p)) + +#define checki8(x) ((x) == (int32_t)(int8_t)(x)) +#define checku8(x) ((x) == (int32_t)(uint8_t)(x)) +#define checki16(x) ((x) == (int32_t)(int16_t)(x)) +#define checku16(x) ((x) == (int32_t)(uint16_t)(x)) +#define checki32(x) ((x) == (int32_t)(x)) +#define checku32(x) ((x) == (uint32_t)(x)) +#define checkptr32(x) ((uintptr_t)(x) == (uint32_t)(uintptr_t)(x)) +#define checkptr47(x) (((uint64_t)(x) >> 47) == 0) +#if LJ_GC64 +#define checkptrGC(x) (checkptr47((x))) +#elif LJ_64 +#define checkptrGC(x) (checkptr32((x))) +#else +#define checkptrGC(x) 1 +#endif + +/* Every half-decent C compiler transforms this into a rotate instruction. */ +#define lj_rol(x, n) (((x)<<(n)) | ((x)>>(-(int)(n)&(8*sizeof(x)-1)))) +#define lj_ror(x, n) (((x)<<(-(int)(n)&(8*sizeof(x)-1))) | ((x)>>(n))) + +/* A really naive Bloom filter. But sufficient for our needs. */ +typedef uintptr_t BloomFilter; +#define BLOOM_MASK (8*sizeof(BloomFilter) - 1) +#define bloombit(x) ((uintptr_t)1 << ((x) & BLOOM_MASK)) +#define bloomset(b, x) ((b) |= bloombit((x))) +#define bloomtest(b, x) ((b) & bloombit((x))) + +#if defined(__GNUC__) || defined(__psp2__) + +#define LJ_NORET __attribute__((noreturn)) +#define LJ_ALIGN(n) __attribute__((aligned(n))) +#define LJ_INLINE inline +#define LJ_AINLINE inline __attribute__((always_inline)) +#define LJ_NOINLINE __attribute__((noinline)) + +#if defined(__ELF__) || defined(__MACH__) || defined(__psp2__) +#if !((defined(__sun__) && defined(__svr4__)) || defined(__CELLOS_LV2__)) +#define LJ_NOAPI extern __attribute__((visibility("hidden"))) +#endif +#endif + +/* Note: it's only beneficial to use fastcall on x86 and then only for up to +** two non-FP args. The amalgamated compile covers all LJ_FUNC cases. Only +** indirect calls and related tail-called C functions are marked as fastcall. +*/ +#if defined(__i386__) +#define LJ_FASTCALL __attribute__((fastcall)) +#endif + +#define LJ_LIKELY(x) __builtin_expect(!!(x), 1) +#define LJ_UNLIKELY(x) __builtin_expect(!!(x), 0) + +#define lj_ffs(x) ((uint32_t)__builtin_ctz(x)) +/* Don't ask ... */ +#if defined(__INTEL_COMPILER) && (defined(__i386__) || defined(__x86_64__)) +static LJ_AINLINE uint32_t lj_fls(uint32_t x) +{ + uint32_t r; __asm__("bsrl %1, %0" : "=r" (r) : "rm" (x) : "cc"); return r; +} +#else +#define lj_fls(x) ((uint32_t)(__builtin_clz(x)^31)) +#endif + +#if defined(__arm__) +static LJ_AINLINE uint32_t lj_bswap(uint32_t x) +{ +#if defined(__psp2__) + return __builtin_rev(x); +#else + uint32_t r; +#if __ARM_ARCH_6__ || __ARM_ARCH_6J__ || __ARM_ARCH_6T2__ || __ARM_ARCH_6Z__ ||\ + __ARM_ARCH_6ZK__ || __ARM_ARCH_7__ || __ARM_ARCH_7A__ || __ARM_ARCH_7R__ + __asm__("rev %0, %1" : "=r" (r) : "r" (x)); + return r; +#else +#ifdef __thumb__ + r = x ^ lj_ror(x, 16); +#else + __asm__("eor %0, %1, %1, ror #16" : "=r" (r) : "r" (x)); +#endif + return ((r & 0xff00ffffu) >> 8) ^ lj_ror(x, 8); +#endif +#endif +} + +static LJ_AINLINE uint64_t lj_bswap64(uint64_t x) +{ + return ((uint64_t)lj_bswap((uint32_t)x)<<32) | lj_bswap((uint32_t)(x>>32)); +} +#elif (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) +static LJ_AINLINE uint32_t lj_bswap(uint32_t x) +{ + return (uint32_t)__builtin_bswap32((int32_t)x); +} + +static LJ_AINLINE uint64_t lj_bswap64(uint64_t x) +{ + return (uint64_t)__builtin_bswap64((int64_t)x); +} +#elif defined(__i386__) || defined(__x86_64__) +static LJ_AINLINE uint32_t lj_bswap(uint32_t x) +{ + uint32_t r; __asm__("bswap %0" : "=r" (r) : "0" (x)); return r; +} + +#if defined(__i386__) +static LJ_AINLINE uint64_t lj_bswap64(uint64_t x) +{ + return ((uint64_t)lj_bswap((uint32_t)x)<<32) | lj_bswap((uint32_t)(x>>32)); +} +#else +static LJ_AINLINE uint64_t lj_bswap64(uint64_t x) +{ + uint64_t r; __asm__("bswap %0" : "=r" (r) : "0" (x)); return r; +} +#endif +#else +static LJ_AINLINE uint32_t lj_bswap(uint32_t x) +{ + return (x << 24) | ((x & 0xff00) << 8) | ((x >> 8) & 0xff00) | (x >> 24); +} + +static LJ_AINLINE uint64_t lj_bswap64(uint64_t x) +{ + return (uint64_t)lj_bswap((uint32_t)(x >> 32)) | + ((uint64_t)lj_bswap((uint32_t)x) << 32); +} +#endif + +typedef union __attribute__((packed)) Unaligned16 { + uint16_t u; + uint8_t b[2]; +} Unaligned16; + +typedef union __attribute__((packed)) Unaligned32 { + uint32_t u; + uint8_t b[4]; +} Unaligned32; + +/* Unaligned load of uint16_t. */ +static LJ_AINLINE uint16_t lj_getu16(const void *p) +{ + return ((const Unaligned16 *)p)->u; +} + +/* Unaligned load of uint32_t. */ +static LJ_AINLINE uint32_t lj_getu32(const void *p) +{ + return ((const Unaligned32 *)p)->u; +} + +#elif defined(_MSC_VER) + +#define LJ_NORET __declspec(noreturn) +#define LJ_ALIGN(n) __declspec(align(n)) +#define LJ_INLINE __inline +#define LJ_AINLINE __forceinline +#define LJ_NOINLINE __declspec(noinline) +#if defined(_M_IX86) +#define LJ_FASTCALL __fastcall +#endif + +#ifdef _M_PPC +unsigned int _CountLeadingZeros(long); +#pragma intrinsic(_CountLeadingZeros) +static LJ_AINLINE uint32_t lj_fls(uint32_t x) +{ + return _CountLeadingZeros(x) ^ 31; +} +#else +unsigned char _BitScanForward(uint32_t *, unsigned long); +unsigned char _BitScanReverse(uint32_t *, unsigned long); +#pragma intrinsic(_BitScanForward) +#pragma intrinsic(_BitScanReverse) + +static LJ_AINLINE uint32_t lj_ffs(uint32_t x) +{ + uint32_t r; _BitScanForward(&r, x); return r; +} + +static LJ_AINLINE uint32_t lj_fls(uint32_t x) +{ + uint32_t r; _BitScanReverse(&r, x); return r; +} +#endif + +unsigned long _byteswap_ulong(unsigned long); +uint64_t _byteswap_uint64(uint64_t); +#define lj_bswap(x) (_byteswap_ulong((x))) +#define lj_bswap64(x) (_byteswap_uint64((x))) + +#if defined(_M_PPC) && defined(LUAJIT_NO_UNALIGNED) +/* +** Replacement for unaligned loads on Xbox 360. Disabled by default since it's +** usually more costly than the occasional stall when crossing a cache-line. +*/ +static LJ_AINLINE uint16_t lj_getu16(const void *v) +{ + const uint8_t *p = (const uint8_t *)v; + return (uint16_t)((p[0]<<8) | p[1]); +} +static LJ_AINLINE uint32_t lj_getu32(const void *v) +{ + const uint8_t *p = (const uint8_t *)v; + return (uint32_t)((p[0]<<24) | (p[1]<<16) | (p[2]<<8) | p[3]); +} +#else +/* Unaligned loads are generally ok on x86/x64. */ +#define lj_getu16(p) (*(uint16_t *)(p)) +#define lj_getu32(p) (*(uint32_t *)(p)) +#endif + +#else +#error "missing defines for your compiler" +#endif + +/* Optional defines. */ +#ifndef LJ_FASTCALL +#define LJ_FASTCALL +#endif +#ifndef LJ_NORET +#define LJ_NORET +#endif +#ifndef LJ_NOAPI +#define LJ_NOAPI extern +#endif +#ifndef LJ_LIKELY +#define LJ_LIKELY(x) (x) +#define LJ_UNLIKELY(x) (x) +#endif + +/* Attributes for internal functions. */ +#define LJ_DATA LJ_NOAPI +#define LJ_DATADEF +#define LJ_ASMF LJ_NOAPI +#define LJ_FUNCA LJ_NOAPI +#if defined(ljamalg_c) +#define LJ_FUNC static +#else +#define LJ_FUNC LJ_NOAPI +#endif +#define LJ_FUNC_NORET LJ_FUNC LJ_NORET +#define LJ_FUNCA_NORET LJ_FUNCA LJ_NORET +#define LJ_ASMF_NORET LJ_ASMF LJ_NORET + +/* Runtime assertions. */ +#ifdef lua_assert +#define check_exp(c, e) (lua_assert(c), (e)) +#define api_check(l, e) lua_assert(e) +#else +#define lua_assert(c) ((void)0) +#define check_exp(c, e) (e) +#define api_check luai_apicheck +#endif + +/* Static assertions. */ +#define LJ_ASSERT_NAME2(name, line) name ## line +#define LJ_ASSERT_NAME(line) LJ_ASSERT_NAME2(lj_assert_, line) +#ifdef __COUNTER__ +#define LJ_STATIC_ASSERT(cond) \ + extern void LJ_ASSERT_NAME(__COUNTER__)(int STATIC_ASSERTION_FAILED[(cond)?1:-1]) +#else +#define LJ_STATIC_ASSERT(cond) \ + extern void LJ_ASSERT_NAME(__LINE__)(int STATIC_ASSERTION_FAILED[(cond)?1:-1]) +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_dispatch.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_dispatch.c new file mode 100644 index 00000000000..1a07371cacf --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_dispatch.c @@ -0,0 +1,557 @@ +/* +** Instruction dispatch handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_dispatch_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_err.h" +#include "lj_buf.h" +#include "lj_func.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_meta.h" +#include "lj_debug.h" +#include "lj_state.h" +#include "lj_frame.h" +#include "lj_bc.h" +#include "lj_ff.h" +#include "lj_strfmt.h" +#if LJ_HASJIT +#include "lj_jit.h" +#endif +#if LJ_HASFFI +#include "lj_ccallback.h" +#endif +#include "lj_trace.h" +#include "lj_dispatch.h" +#if LJ_HASPROFILE +#include "lj_profile.h" +#endif +#include "lj_vm.h" +#include "luajit.h" + +/* Bump GG_NUM_ASMFF in lj_dispatch.h as needed. Ugly. */ +LJ_STATIC_ASSERT(GG_NUM_ASMFF == FF_NUM_ASMFUNC); + +/* -- Dispatch table management ------------------------------------------- */ + +#if LJ_TARGET_MIPS +#include +LJ_FUNCA_NORET void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L, + lua_State *co); +#if !LJ_HASJIT +#define lj_dispatch_stitch lj_dispatch_ins +#endif +#if !LJ_HASPROFILE +#define lj_dispatch_profile lj_dispatch_ins +#endif + +#define GOTFUNC(name) (ASMFunction)name, +static const ASMFunction dispatch_got[] = { + GOTDEF(GOTFUNC) +}; +#undef GOTFUNC +#endif + +/* Initialize instruction dispatch table and hot counters. */ +void lj_dispatch_init(GG_State *GG) +{ + uint32_t i; + ASMFunction *disp = GG->dispatch; + for (i = 0; i < GG_LEN_SDISP; i++) + disp[GG_LEN_DDISP+i] = disp[i] = makeasmfunc(lj_bc_ofs[i]); + for (i = GG_LEN_SDISP; i < GG_LEN_DDISP; i++) + disp[i] = makeasmfunc(lj_bc_ofs[i]); + /* The JIT engine is off by default. luaopen_jit() turns it on. */ + disp[BC_FORL] = disp[BC_IFORL]; + disp[BC_ITERL] = disp[BC_IITERL]; + disp[BC_LOOP] = disp[BC_ILOOP]; + disp[BC_FUNCF] = disp[BC_IFUNCF]; + disp[BC_FUNCV] = disp[BC_IFUNCV]; + GG->g.bc_cfunc_ext = GG->g.bc_cfunc_int = BCINS_AD(BC_FUNCC, LUA_MINSTACK, 0); + for (i = 0; i < GG_NUM_ASMFF; i++) + GG->bcff[i] = BCINS_AD(BC__MAX+i, 0, 0); +#if LJ_TARGET_MIPS + memcpy(GG->got, dispatch_got, LJ_GOT__MAX*4); +#endif +} + +#if LJ_HASJIT +/* Initialize hotcount table. */ +void lj_dispatch_init_hotcount(global_State *g) +{ + int32_t hotloop = G2J(g)->param[JIT_P_hotloop]; + HotCount start = (HotCount)(hotloop*HOTCOUNT_LOOP - 1); + HotCount *hotcount = G2GG(g)->hotcount; + uint32_t i; + for (i = 0; i < HOTCOUNT_SIZE; i++) + hotcount[i] = start; +} +#endif + +/* Internal dispatch mode bits. */ +#define DISPMODE_CALL 0x01 /* Override call dispatch. */ +#define DISPMODE_RET 0x02 /* Override return dispatch. */ +#define DISPMODE_INS 0x04 /* Override instruction dispatch. */ +#define DISPMODE_JIT 0x10 /* JIT compiler on. */ +#define DISPMODE_REC 0x20 /* Recording active. */ +#define DISPMODE_PROF 0x40 /* Profiling active. */ + +/* Update dispatch table depending on various flags. */ +void lj_dispatch_update(global_State *g) +{ + uint8_t oldmode = g->dispatchmode; + uint8_t mode = 0; +#if LJ_HASJIT + mode |= (G2J(g)->flags & JIT_F_ON) ? DISPMODE_JIT : 0; + mode |= G2J(g)->state != LJ_TRACE_IDLE ? + (DISPMODE_REC|DISPMODE_INS|DISPMODE_CALL) : 0; +#endif +#if LJ_HASPROFILE + mode |= (g->hookmask & HOOK_PROFILE) ? (DISPMODE_PROF|DISPMODE_INS) : 0; +#endif + mode |= (g->hookmask & (LUA_MASKLINE|LUA_MASKCOUNT)) ? DISPMODE_INS : 0; + mode |= (g->hookmask & LUA_MASKCALL) ? DISPMODE_CALL : 0; + mode |= (g->hookmask & LUA_MASKRET) ? DISPMODE_RET : 0; + if (oldmode != mode) { /* Mode changed? */ + ASMFunction *disp = G2GG(g)->dispatch; + ASMFunction f_forl, f_iterl, f_loop, f_funcf, f_funcv; + g->dispatchmode = mode; + + /* Hotcount if JIT is on, but not while recording. */ + if ((mode & (DISPMODE_JIT|DISPMODE_REC)) == DISPMODE_JIT) { + f_forl = makeasmfunc(lj_bc_ofs[BC_FORL]); + f_iterl = makeasmfunc(lj_bc_ofs[BC_ITERL]); + f_loop = makeasmfunc(lj_bc_ofs[BC_LOOP]); + f_funcf = makeasmfunc(lj_bc_ofs[BC_FUNCF]); + f_funcv = makeasmfunc(lj_bc_ofs[BC_FUNCV]); + } else { /* Otherwise use the non-hotcounting instructions. */ + f_forl = disp[GG_LEN_DDISP+BC_IFORL]; + f_iterl = disp[GG_LEN_DDISP+BC_IITERL]; + f_loop = disp[GG_LEN_DDISP+BC_ILOOP]; + f_funcf = makeasmfunc(lj_bc_ofs[BC_IFUNCF]); + f_funcv = makeasmfunc(lj_bc_ofs[BC_IFUNCV]); + } + /* Init static counting instruction dispatch first (may be copied below). */ + disp[GG_LEN_DDISP+BC_FORL] = f_forl; + disp[GG_LEN_DDISP+BC_ITERL] = f_iterl; + disp[GG_LEN_DDISP+BC_LOOP] = f_loop; + + /* Set dynamic instruction dispatch. */ + if ((oldmode ^ mode) & (DISPMODE_PROF|DISPMODE_REC|DISPMODE_INS)) { + /* Need to update the whole table. */ + if (!(mode & DISPMODE_INS)) { /* No ins dispatch? */ + /* Copy static dispatch table to dynamic dispatch table. */ + memcpy(&disp[0], &disp[GG_LEN_DDISP], GG_LEN_SDISP*sizeof(ASMFunction)); + /* Overwrite with dynamic return dispatch. */ + if ((mode & DISPMODE_RET)) { + disp[BC_RETM] = lj_vm_rethook; + disp[BC_RET] = lj_vm_rethook; + disp[BC_RET0] = lj_vm_rethook; + disp[BC_RET1] = lj_vm_rethook; + } + } else { + /* The recording dispatch also checks for hooks. */ + ASMFunction f = (mode & DISPMODE_PROF) ? lj_vm_profhook : + (mode & DISPMODE_REC) ? lj_vm_record : lj_vm_inshook; + uint32_t i; + for (i = 0; i < GG_LEN_SDISP; i++) + disp[i] = f; + } + } else if (!(mode & DISPMODE_INS)) { + /* Otherwise set dynamic counting ins. */ + disp[BC_FORL] = f_forl; + disp[BC_ITERL] = f_iterl; + disp[BC_LOOP] = f_loop; + /* Set dynamic return dispatch. */ + if ((mode & DISPMODE_RET)) { + disp[BC_RETM] = lj_vm_rethook; + disp[BC_RET] = lj_vm_rethook; + disp[BC_RET0] = lj_vm_rethook; + disp[BC_RET1] = lj_vm_rethook; + } else { + disp[BC_RETM] = disp[GG_LEN_DDISP+BC_RETM]; + disp[BC_RET] = disp[GG_LEN_DDISP+BC_RET]; + disp[BC_RET0] = disp[GG_LEN_DDISP+BC_RET0]; + disp[BC_RET1] = disp[GG_LEN_DDISP+BC_RET1]; + } + } + + /* Set dynamic call dispatch. */ + if ((oldmode ^ mode) & DISPMODE_CALL) { /* Update the whole table? */ + uint32_t i; + if ((mode & DISPMODE_CALL) == 0) { /* No call hooks? */ + for (i = GG_LEN_SDISP; i < GG_LEN_DDISP; i++) + disp[i] = makeasmfunc(lj_bc_ofs[i]); + } else { + for (i = GG_LEN_SDISP; i < GG_LEN_DDISP; i++) + disp[i] = lj_vm_callhook; + } + } + if (!(mode & DISPMODE_CALL)) { /* Overwrite dynamic counting ins. */ + disp[BC_FUNCF] = f_funcf; + disp[BC_FUNCV] = f_funcv; + } + +#if LJ_HASJIT + /* Reset hotcounts for JIT off to on transition. */ + if ((mode & DISPMODE_JIT) && !(oldmode & DISPMODE_JIT)) + lj_dispatch_init_hotcount(g); +#endif + } +} + +/* -- JIT mode setting ---------------------------------------------------- */ + +#if LJ_HASJIT +/* Set JIT mode for a single prototype. */ +static void setptmode(global_State *g, GCproto *pt, int mode) +{ + if ((mode & LUAJIT_MODE_ON)) { /* (Re-)enable JIT compilation. */ + pt->flags &= ~PROTO_NOJIT; + lj_trace_reenableproto(pt); /* Unpatch all ILOOP etc. bytecodes. */ + } else { /* Flush and/or disable JIT compilation. */ + if (!(mode & LUAJIT_MODE_FLUSH)) + pt->flags |= PROTO_NOJIT; + lj_trace_flushproto(g, pt); /* Flush all traces of prototype. */ + } +} + +/* Recursively set the JIT mode for all children of a prototype. */ +static void setptmode_all(global_State *g, GCproto *pt, int mode) +{ + ptrdiff_t i; + if (!(pt->flags & PROTO_CHILD)) return; + for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++) { + GCobj *o = proto_kgc(pt, i); + if (o->gch.gct == ~LJ_TPROTO) { + setptmode(g, gco2pt(o), mode); + setptmode_all(g, gco2pt(o), mode); + } + } +} +#endif + +/* Public API function: control the JIT engine. */ +int luaJIT_setmode(lua_State *L, int idx, int mode) +{ + global_State *g = G(L); + int mm = mode & LUAJIT_MODE_MASK; + lj_trace_abort(g); /* Abort recording on any state change. */ + /* Avoid pulling the rug from under our own feet. */ + if ((g->hookmask & HOOK_GC)) + lj_err_caller(L, LJ_ERR_NOGCMM); + switch (mm) { +#if LJ_HASJIT + case LUAJIT_MODE_ENGINE: + if ((mode & LUAJIT_MODE_FLUSH)) { + lj_trace_flushall(L); + } else { + if (!(mode & LUAJIT_MODE_ON)) + G2J(g)->flags &= ~(uint32_t)JIT_F_ON; +#if LJ_TARGET_X86ORX64 + else if ((G2J(g)->flags & JIT_F_SSE2)) + G2J(g)->flags |= (uint32_t)JIT_F_ON; + else + return 0; /* Don't turn on JIT compiler without SSE2 support. */ +#else + else + G2J(g)->flags |= (uint32_t)JIT_F_ON; +#endif + lj_dispatch_update(g); + } + break; + case LUAJIT_MODE_FUNC: + case LUAJIT_MODE_ALLFUNC: + case LUAJIT_MODE_ALLSUBFUNC: { + cTValue *tv = idx == 0 ? frame_prev(L->base-1) : + idx > 0 ? L->base + (idx-1) : L->top + idx; + GCproto *pt; + if ((idx == 0 || tvisfunc(tv)) && isluafunc(&gcval(tv)->fn)) + pt = funcproto(&gcval(tv)->fn); /* Cannot use funcV() for frame slot. */ + else if (tvisproto(tv)) + pt = protoV(tv); + else + return 0; /* Failed. */ + if (mm != LUAJIT_MODE_ALLSUBFUNC) + setptmode(g, pt, mode); + if (mm != LUAJIT_MODE_FUNC) + setptmode_all(g, pt, mode); + break; + } + case LUAJIT_MODE_TRACE: + if (!(mode & LUAJIT_MODE_FLUSH)) + return 0; /* Failed. */ + lj_trace_flush(G2J(g), idx); + break; +#else + case LUAJIT_MODE_ENGINE: + case LUAJIT_MODE_FUNC: + case LUAJIT_MODE_ALLFUNC: + case LUAJIT_MODE_ALLSUBFUNC: + UNUSED(idx); + if ((mode & LUAJIT_MODE_ON)) + return 0; /* Failed. */ + break; +#endif + case LUAJIT_MODE_WRAPCFUNC: + if ((mode & LUAJIT_MODE_ON)) { + if (idx != 0) { + cTValue *tv = idx > 0 ? L->base + (idx-1) : L->top + idx; + if (tvislightud(tv)) + g->wrapf = (lua_CFunction)lightudV(tv); + else + return 0; /* Failed. */ + } else { + return 0; /* Failed. */ + } + g->bc_cfunc_ext = BCINS_AD(BC_FUNCCW, 0, 0); + } else { + g->bc_cfunc_ext = BCINS_AD(BC_FUNCC, 0, 0); + } + break; + default: + return 0; /* Failed. */ + } + return 1; /* OK. */ +} + +/* Enforce (dynamic) linker error for version mismatches. See luajit.c. */ +LUA_API void LUAJIT_VERSION_SYM(void) +{ +} + +/* -- Hooks --------------------------------------------------------------- */ + +/* This function can be called asynchronously (e.g. during a signal). */ +LUA_API int lua_sethook(lua_State *L, lua_Hook func, int mask, int count) +{ + global_State *g = G(L); + mask &= HOOK_EVENTMASK; + if (func == NULL || mask == 0) { mask = 0; func = NULL; } /* Consistency. */ + g->hookf = func; + g->hookcount = g->hookcstart = (int32_t)count; + g->hookmask = (uint8_t)((g->hookmask & ~HOOK_EVENTMASK) | mask); + lj_trace_abort(g); /* Abort recording on any hook change. */ + lj_dispatch_update(g); + return 1; +} + +LUA_API lua_Hook lua_gethook(lua_State *L) +{ + return G(L)->hookf; +} + +LUA_API int lua_gethookmask(lua_State *L) +{ + return G(L)->hookmask & HOOK_EVENTMASK; +} + +LUA_API int lua_gethookcount(lua_State *L) +{ + return (int)G(L)->hookcstart; +} + +/* Call a hook. */ +static void callhook(lua_State *L, int event, BCLine line) +{ + global_State *g = G(L); + lua_Hook hookf = g->hookf; + if (hookf && !hook_active(g)) { + lua_Debug ar; + lj_trace_abort(g); /* Abort recording on any hook call. */ + ar.event = event; + ar.currentline = line; + /* Top frame, nextframe = NULL. */ + ar.i_ci = (int)((L->base-1) - tvref(L->stack)); + lj_state_checkstack(L, 1+LUA_MINSTACK); +#if LJ_HASPROFILE && !LJ_PROFILE_SIGPROF + lj_profile_hook_enter(g); +#else + hook_enter(g); +#endif + hookf(L, &ar); + lua_assert(hook_active(g)); + setgcref(g->cur_L, obj2gco(L)); +#if LJ_HASPROFILE && !LJ_PROFILE_SIGPROF + lj_profile_hook_leave(g); +#else + hook_leave(g); +#endif + } +} + +/* -- Dispatch callbacks -------------------------------------------------- */ + +/* Calculate number of used stack slots in the current frame. */ +static BCReg cur_topslot(GCproto *pt, const BCIns *pc, uint32_t nres) +{ + BCIns ins = pc[-1]; + if (bc_op(ins) == BC_UCLO) + ins = pc[bc_j(ins)]; + switch (bc_op(ins)) { + case BC_CALLM: case BC_CALLMT: return bc_a(ins) + bc_c(ins) + nres-1+1+LJ_FR2; + case BC_RETM: return bc_a(ins) + bc_d(ins) + nres-1; + case BC_TSETM: return bc_a(ins) + nres-1; + default: return pt->framesize; + } +} + +/* Instruction dispatch. Used by instr/line/return hooks or when recording. */ +void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc) +{ + ERRNO_SAVE + GCfunc *fn = curr_func(L); + GCproto *pt = funcproto(fn); + void *cf = cframe_raw(L->cframe); + const BCIns *oldpc = cframe_pc(cf); + global_State *g = G(L); + BCReg slots; + setcframe_pc(cf, pc); + slots = cur_topslot(pt, pc, cframe_multres_n(cf)); + L->top = L->base + slots; /* Fix top. */ +#if LJ_HASJIT + { + jit_State *J = G2J(g); + if (J->state != LJ_TRACE_IDLE) { +#ifdef LUA_USE_ASSERT + ptrdiff_t delta = L->top - L->base; +#endif + J->L = L; + lj_trace_ins(J, pc-1); /* The interpreter bytecode PC is offset by 1. */ + lua_assert(L->top - L->base == delta); + } + } +#endif + if ((g->hookmask & LUA_MASKCOUNT) && g->hookcount == 0) { + g->hookcount = g->hookcstart; + callhook(L, LUA_HOOKCOUNT, -1); + L->top = L->base + slots; /* Fix top again. */ + } + if ((g->hookmask & LUA_MASKLINE)) { + BCPos npc = proto_bcpos(pt, pc) - 1; + BCPos opc = proto_bcpos(pt, oldpc) - 1; + BCLine line = lj_debug_line(pt, npc); + if (pc <= oldpc || opc >= pt->sizebc || line != lj_debug_line(pt, opc)) { + callhook(L, LUA_HOOKLINE, line); + L->top = L->base + slots; /* Fix top again. */ + } + } + if ((g->hookmask & LUA_MASKRET) && bc_isret(bc_op(pc[-1]))) + callhook(L, LUA_HOOKRET, -1); + ERRNO_RESTORE +} + +/* Initialize call. Ensure stack space and return # of missing parameters. */ +static int call_init(lua_State *L, GCfunc *fn) +{ + if (isluafunc(fn)) { + GCproto *pt = funcproto(fn); + int numparams = pt->numparams; + int gotparams = (int)(L->top - L->base); + int need = pt->framesize; + if ((pt->flags & PROTO_VARARG)) need += 1+gotparams; + lj_state_checkstack(L, (MSize)need); + numparams -= gotparams; + return numparams >= 0 ? numparams : 0; + } else { + lj_state_checkstack(L, LUA_MINSTACK); + return 0; + } +} + +/* Call dispatch. Used by call hooks, hot calls or when recording. */ +ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns *pc) +{ + ERRNO_SAVE + GCfunc *fn = curr_func(L); + BCOp op; + global_State *g = G(L); +#if LJ_HASJIT + jit_State *J = G2J(g); +#endif + int missing = call_init(L, fn); +#if LJ_HASJIT + J->L = L; + if ((uintptr_t)pc & 1) { /* Marker for hot call. */ +#ifdef LUA_USE_ASSERT + ptrdiff_t delta = L->top - L->base; +#endif + pc = (const BCIns *)((uintptr_t)pc & ~(uintptr_t)1); + lj_trace_hot(J, pc); + lua_assert(L->top - L->base == delta); + goto out; + } else if (J->state != LJ_TRACE_IDLE && + !(g->hookmask & (HOOK_GC|HOOK_VMEVENT))) { +#ifdef LUA_USE_ASSERT + ptrdiff_t delta = L->top - L->base; +#endif + /* Record the FUNC* bytecodes, too. */ + lj_trace_ins(J, pc-1); /* The interpreter bytecode PC is offset by 1. */ + lua_assert(L->top - L->base == delta); + } +#endif + if ((g->hookmask & LUA_MASKCALL)) { + int i; + for (i = 0; i < missing; i++) /* Add missing parameters. */ + setnilV(L->top++); + callhook(L, LUA_HOOKCALL, -1); + /* Preserve modifications of missing parameters by lua_setlocal(). */ + while (missing-- > 0 && tvisnil(L->top - 1)) + L->top--; + } +#if LJ_HASJIT +out: +#endif + op = bc_op(pc[-1]); /* Get FUNC* op. */ +#if LJ_HASJIT + /* Use the non-hotcounting variants if JIT is off or while recording. */ + if ((!(J->flags & JIT_F_ON) || J->state != LJ_TRACE_IDLE) && + (op == BC_FUNCF || op == BC_FUNCV)) + op = (BCOp)((int)op+(int)BC_IFUNCF-(int)BC_FUNCF); +#endif + ERRNO_RESTORE + return makeasmfunc(lj_bc_ofs[op]); /* Return static dispatch target. */ +} + +#if LJ_HASJIT +/* Stitch a new trace. */ +void LJ_FASTCALL lj_dispatch_stitch(jit_State *J, const BCIns *pc) +{ + ERRNO_SAVE + lua_State *L = J->L; + void *cf = cframe_raw(L->cframe); + const BCIns *oldpc = cframe_pc(cf); + setcframe_pc(cf, pc); + /* Before dispatch, have to bias PC by 1. */ + L->top = L->base + cur_topslot(curr_proto(L), pc+1, cframe_multres_n(cf)); + lj_trace_stitch(J, pc-1); /* Point to the CALL instruction. */ + setcframe_pc(cf, oldpc); + ERRNO_RESTORE +} +#endif + +#if LJ_HASPROFILE +/* Profile dispatch. */ +void LJ_FASTCALL lj_dispatch_profile(lua_State *L, const BCIns *pc) +{ + ERRNO_SAVE + GCfunc *fn = curr_func(L); + GCproto *pt = funcproto(fn); + void *cf = cframe_raw(L->cframe); + const BCIns *oldpc = cframe_pc(cf); + global_State *g; + setcframe_pc(cf, pc); + L->top = L->base + cur_topslot(pt, pc, cframe_multres_n(cf)); + lj_profile_interpreter(L); + setcframe_pc(cf, oldpc); + g = G(L); + setgcref(g->cur_L, obj2gco(L)); + setvmstate(g, INTERP); + ERRNO_RESTORE +} +#endif + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_dispatch.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_dispatch.h new file mode 100644 index 00000000000..1e247e38286 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_dispatch.h @@ -0,0 +1,138 @@ +/* +** Instruction dispatch handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_DISPATCH_H +#define _LJ_DISPATCH_H + +#include "lj_obj.h" +#include "lj_bc.h" +#if LJ_HASJIT +#include "lj_jit.h" +#endif + +#if LJ_TARGET_MIPS +/* Need our own global offset table for the dreaded MIPS calling conventions. */ +#if LJ_HASJIT +#define JITGOTDEF(_) _(lj_trace_exit) _(lj_trace_hot) +#else +#define JITGOTDEF(_) +#endif +#if LJ_HASFFI +#define FFIGOTDEF(_) \ + _(lj_meta_equal_cd) _(lj_ccallback_enter) _(lj_ccallback_leave) +#else +#define FFIGOTDEF(_) +#endif +#define GOTDEF(_) \ + _(floor) _(ceil) _(trunc) _(log) _(log10) _(exp) _(sin) _(cos) _(tan) \ + _(asin) _(acos) _(atan) _(sinh) _(cosh) _(tanh) _(frexp) _(modf) _(atan2) \ + _(pow) _(fmod) _(ldexp) \ + _(lj_dispatch_call) _(lj_dispatch_ins) _(lj_dispatch_stitch) \ + _(lj_dispatch_profile) _(lj_err_throw) \ + _(lj_ffh_coroutine_wrap_err) _(lj_func_closeuv) _(lj_func_newL_gc) \ + _(lj_gc_barrieruv) _(lj_gc_step) _(lj_gc_step_fixtop) _(lj_meta_arith) \ + _(lj_meta_call) _(lj_meta_cat) _(lj_meta_comp) _(lj_meta_equal) \ + _(lj_meta_for) _(lj_meta_istype) _(lj_meta_len) _(lj_meta_tget) \ + _(lj_meta_tset) _(lj_state_growstack) _(lj_strfmt_num) \ + _(lj_str_new) _(lj_tab_dup) _(lj_tab_get) _(lj_tab_getinth) _(lj_tab_len) \ + _(lj_tab_new) _(lj_tab_newkey) _(lj_tab_next) _(lj_tab_reasize) \ + _(lj_tab_setinth) _(lj_buf_putstr_reverse) _(lj_buf_putstr_lower) \ + _(lj_buf_putstr_upper) _(lj_buf_tostr) JITGOTDEF(_) FFIGOTDEF(_) + +enum { +#define GOTENUM(name) LJ_GOT_##name, +GOTDEF(GOTENUM) +#undef GOTENUM + LJ_GOT__MAX +}; +#endif + +/* Type of hot counter. Must match the code in the assembler VM. */ +/* 16 bits are sufficient. Only 0.0015% overhead with maximum slot penalty. */ +typedef uint16_t HotCount; + +/* Number of hot counter hash table entries (must be a power of two). */ +#define HOTCOUNT_SIZE 64 +#define HOTCOUNT_PCMASK ((HOTCOUNT_SIZE-1)*sizeof(HotCount)) + +/* Hotcount decrements. */ +#define HOTCOUNT_LOOP 2 +#define HOTCOUNT_CALL 1 + +/* This solves a circular dependency problem -- bump as needed. Sigh. */ +#define GG_NUM_ASMFF 57 + +#define GG_LEN_DDISP (BC__MAX + GG_NUM_ASMFF) +#define GG_LEN_SDISP BC_FUNCF +#define GG_LEN_DISP (GG_LEN_DDISP + GG_LEN_SDISP) + +/* Global state, main thread and extra fields are allocated together. */ +typedef struct GG_State { + lua_State L; /* Main thread. */ + global_State g; /* Global state. */ +#if LJ_TARGET_MIPS + ASMFunction got[LJ_GOT__MAX]; /* Global offset table. */ +#endif +#if LJ_HASJIT + jit_State J; /* JIT state. */ + HotCount hotcount[HOTCOUNT_SIZE]; /* Hot counters. */ +#endif + ASMFunction dispatch[GG_LEN_DISP]; /* Instruction dispatch tables. */ + BCIns bcff[GG_NUM_ASMFF]; /* Bytecode for ASM fast functions. */ +} GG_State; + +#define GG_OFS(field) ((int)offsetof(GG_State, field)) +#define G2GG(gl) ((GG_State *)((char *)(gl) - GG_OFS(g))) +#define J2GG(j) ((GG_State *)((char *)(j) - GG_OFS(J))) +#define L2GG(L) (G2GG(G(L))) +#define J2G(J) (&J2GG(J)->g) +#define G2J(gl) (&G2GG(gl)->J) +#define L2J(L) (&L2GG(L)->J) +#define GG_G2DISP (GG_OFS(dispatch) - GG_OFS(g)) +#define GG_DISP2G (GG_OFS(g) - GG_OFS(dispatch)) +#define GG_DISP2J (GG_OFS(J) - GG_OFS(dispatch)) +#define GG_DISP2HOT (GG_OFS(hotcount) - GG_OFS(dispatch)) +#define GG_DISP2STATIC (GG_LEN_DDISP*(int)sizeof(ASMFunction)) + +#define hotcount_get(gg, pc) \ + (gg)->hotcount[(u32ptr(pc)>>2) & (HOTCOUNT_SIZE-1)] +#define hotcount_set(gg, pc, val) \ + (hotcount_get((gg), (pc)) = (HotCount)(val)) + +/* Dispatch table management. */ +LJ_FUNC void lj_dispatch_init(GG_State *GG); +#if LJ_HASJIT +LJ_FUNC void lj_dispatch_init_hotcount(global_State *g); +#endif +LJ_FUNC void lj_dispatch_update(global_State *g); + +/* Instruction dispatch callback for hooks or when recording. */ +LJ_FUNCA void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc); +LJ_FUNCA ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns*pc); +#if LJ_HASJIT +LJ_FUNCA void LJ_FASTCALL lj_dispatch_stitch(jit_State *J, const BCIns *pc); +#endif +#if LJ_HASPROFILE +LJ_FUNCA void LJ_FASTCALL lj_dispatch_profile(lua_State *L, const BCIns *pc); +#endif + +#if LJ_HASFFI && !defined(_BUILDVM_H) +/* Save/restore errno and GetLastError() around hooks, exits and recording. */ +#include +#if LJ_TARGET_WINDOWS +#define WIN32_LEAN_AND_MEAN +#include +#define ERRNO_SAVE int olderr = errno; DWORD oldwerr = GetLastError(); +#define ERRNO_RESTORE errno = olderr; SetLastError(oldwerr); +#else +#define ERRNO_SAVE int olderr = errno; +#define ERRNO_RESTORE errno = olderr; +#endif +#else +#define ERRNO_SAVE +#define ERRNO_RESTORE +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_emit_arm.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_emit_arm.h new file mode 100644 index 00000000000..45ce519ef6b --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_emit_arm.h @@ -0,0 +1,356 @@ +/* +** ARM instruction emitter. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +/* -- Constant encoding --------------------------------------------------- */ + +static uint8_t emit_invai[16] = { + /* AND */ (ARMI_AND^ARMI_BIC) >> 21, + /* EOR */ 0, + /* SUB */ (ARMI_SUB^ARMI_ADD) >> 21, + /* RSB */ 0, + /* ADD */ (ARMI_ADD^ARMI_SUB) >> 21, + /* ADC */ (ARMI_ADC^ARMI_SBC) >> 21, + /* SBC */ (ARMI_SBC^ARMI_ADC) >> 21, + /* RSC */ 0, + /* TST */ 0, + /* TEQ */ 0, + /* CMP */ (ARMI_CMP^ARMI_CMN) >> 21, + /* CMN */ (ARMI_CMN^ARMI_CMP) >> 21, + /* ORR */ 0, + /* MOV */ (ARMI_MOV^ARMI_MVN) >> 21, + /* BIC */ (ARMI_BIC^ARMI_AND) >> 21, + /* MVN */ (ARMI_MVN^ARMI_MOV) >> 21 +}; + +/* Encode constant in K12 format for data processing instructions. */ +static uint32_t emit_isk12(ARMIns ai, int32_t n) +{ + uint32_t invai, i, m = (uint32_t)n; + /* K12: unsigned 8 bit value, rotated in steps of two bits. */ + for (i = 0; i < 4096; i += 256, m = lj_rol(m, 2)) + if (m <= 255) return ARMI_K12|m|i; + /* Otherwise try negation/complement with the inverse instruction. */ + invai = emit_invai[((ai >> 21) & 15)]; + if (!invai) return 0; /* Failed. No inverse instruction. */ + m = ~(uint32_t)n; + if (invai == ((ARMI_SUB^ARMI_ADD) >> 21) || + invai == (ARMI_CMP^ARMI_CMN) >> 21) m++; + for (i = 0; i < 4096; i += 256, m = lj_rol(m, 2)) + if (m <= 255) return ARMI_K12|(invai<<21)|m|i; + return 0; /* Failed. */ +} + +/* -- Emit basic instructions --------------------------------------------- */ + +static void emit_dnm(ASMState *as, ARMIns ai, Reg rd, Reg rn, Reg rm) +{ + *--as->mcp = ai | ARMF_D(rd) | ARMF_N(rn) | ARMF_M(rm); +} + +static void emit_dm(ASMState *as, ARMIns ai, Reg rd, Reg rm) +{ + *--as->mcp = ai | ARMF_D(rd) | ARMF_M(rm); +} + +static void emit_dn(ASMState *as, ARMIns ai, Reg rd, Reg rn) +{ + *--as->mcp = ai | ARMF_D(rd) | ARMF_N(rn); +} + +static void emit_nm(ASMState *as, ARMIns ai, Reg rn, Reg rm) +{ + *--as->mcp = ai | ARMF_N(rn) | ARMF_M(rm); +} + +static void emit_d(ASMState *as, ARMIns ai, Reg rd) +{ + *--as->mcp = ai | ARMF_D(rd); +} + +static void emit_n(ASMState *as, ARMIns ai, Reg rn) +{ + *--as->mcp = ai | ARMF_N(rn); +} + +static void emit_m(ASMState *as, ARMIns ai, Reg rm) +{ + *--as->mcp = ai | ARMF_M(rm); +} + +static void emit_lsox(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs) +{ + lua_assert(ofs >= -255 && ofs <= 255); + if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U; + *--as->mcp = ai | ARMI_LS_P | ARMI_LSX_I | ARMF_D(rd) | ARMF_N(rn) | + ((ofs & 0xf0) << 4) | (ofs & 0x0f); +} + +static void emit_lso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs) +{ + lua_assert(ofs >= -4095 && ofs <= 4095); + /* Combine LDR/STR pairs to LDRD/STRD. */ + if (*as->mcp == (ai|ARMI_LS_P|ARMI_LS_U|ARMF_D(rd^1)|ARMF_N(rn)|(ofs^4)) && + (ai & ~(ARMI_LDR^ARMI_STR)) == ARMI_STR && rd != rn && + (uint32_t)ofs <= 252 && !(ofs & 3) && !((rd ^ (ofs >>2)) & 1) && + as->mcp != as->mcloop) { + as->mcp++; + emit_lsox(as, ai == ARMI_LDR ? ARMI_LDRD : ARMI_STRD, rd&~1, rn, ofs&~4); + return; + } + if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U; + *--as->mcp = ai | ARMI_LS_P | ARMF_D(rd) | ARMF_N(rn) | ofs; +} + +#if !LJ_SOFTFP +static void emit_vlso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs) +{ + lua_assert(ofs >= -1020 && ofs <= 1020 && (ofs&3) == 0); + if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U; + *--as->mcp = ai | ARMI_LS_P | ARMF_D(rd & 15) | ARMF_N(rn) | (ofs >> 2); +} +#endif + +/* -- Emit loads/stores --------------------------------------------------- */ + +/* Prefer spills of BASE/L. */ +#define emit_canremat(ref) ((ref) < ASMREF_L) + +/* Try to find a one step delta relative to another constant. */ +static int emit_kdelta1(ASMState *as, Reg d, int32_t i) +{ + RegSet work = ~as->freeset & RSET_GPR; + while (work) { + Reg r = rset_picktop(work); + IRRef ref = regcost_ref(as->cost[r]); + lua_assert(r != d); + if (emit_canremat(ref)) { + int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i); + uint32_t k = emit_isk12(ARMI_ADD, delta); + if (k) { + if (k == ARMI_K12) + emit_dm(as, ARMI_MOV, d, r); + else + emit_dn(as, ARMI_ADD^k, d, r); + return 1; + } + } + rset_clear(work, r); + } + return 0; /* Failed. */ +} + +/* Try to find a two step delta relative to another constant. */ +static int emit_kdelta2(ASMState *as, Reg d, int32_t i) +{ + RegSet work = ~as->freeset & RSET_GPR; + while (work) { + Reg r = rset_picktop(work); + IRRef ref = regcost_ref(as->cost[r]); + lua_assert(r != d); + if (emit_canremat(ref)) { + int32_t other = ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i; + if (other) { + int32_t delta = i - other; + uint32_t sh, inv = 0, k2, k; + if (delta < 0) { delta = -delta; inv = ARMI_ADD^ARMI_SUB; } + sh = lj_ffs(delta) & ~1; + k2 = emit_isk12(0, delta & (255 << sh)); + k = emit_isk12(0, delta & ~(255 << sh)); + if (k) { + emit_dn(as, ARMI_ADD^k2^inv, d, d); + emit_dn(as, ARMI_ADD^k^inv, d, r); + return 1; + } + } + } + rset_clear(work, r); + } + return 0; /* Failed. */ +} + +/* Load a 32 bit constant into a GPR. */ +static void emit_loadi(ASMState *as, Reg r, int32_t i) +{ + uint32_t k = emit_isk12(ARMI_MOV, i); + lua_assert(rset_test(as->freeset, r) || r == RID_TMP); + if (k) { + /* Standard K12 constant. */ + emit_d(as, ARMI_MOV^k, r); + } else if ((as->flags & JIT_F_ARMV6T2) && (uint32_t)i < 0x00010000u) { + /* 16 bit loword constant for ARMv6T2. */ + emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), r); + } else if (emit_kdelta1(as, r, i)) { + /* One step delta relative to another constant. */ + } else if ((as->flags & JIT_F_ARMV6T2)) { + /* 32 bit hiword/loword constant for ARMv6T2. */ + emit_d(as, ARMI_MOVT|((i>>16) & 0x0fff)|(((i>>16) & 0xf000)<<4), r); + emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), r); + } else if (emit_kdelta2(as, r, i)) { + /* Two step delta relative to another constant. */ + } else { + /* Otherwise construct the constant with up to 4 instructions. */ + /* NYI: use mvn+bic, use pc-relative loads. */ + for (;;) { + uint32_t sh = lj_ffs(i) & ~1; + int32_t m = i & (255 << sh); + i &= ~(255 << sh); + if (i == 0) { + emit_d(as, ARMI_MOV ^ emit_isk12(0, m), r); + break; + } + emit_dn(as, ARMI_ORR ^ emit_isk12(0, m), r, r); + } + } +} + +#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr))) + +static Reg ra_allock(ASMState *as, int32_t k, RegSet allow); + +/* Get/set from constant pointer. */ +static void emit_lsptr(ASMState *as, ARMIns ai, Reg r, void *p) +{ + int32_t i = i32ptr(p); + emit_lso(as, ai, r, ra_allock(as, (i & ~4095), rset_exclude(RSET_GPR, r)), + (i & 4095)); +} + +#if !LJ_SOFTFP +/* Load a number constant into an FPR. */ +static void emit_loadn(ASMState *as, Reg r, cTValue *tv) +{ + int32_t i; + if ((as->flags & JIT_F_VFPV3) && !tv->u32.lo) { + uint32_t hi = tv->u32.hi; + uint32_t b = ((hi >> 22) & 0x1ff); + if (!(hi & 0xffff) && (b == 0x100 || b == 0x0ff)) { + *--as->mcp = ARMI_VMOVI_D | ARMF_D(r & 15) | + ((tv->u32.hi >> 12) & 0x00080000) | + ((tv->u32.hi >> 4) & 0x00070000) | + ((tv->u32.hi >> 16) & 0x0000000f); + return; + } + } + i = i32ptr(tv); + emit_vlso(as, ARMI_VLDR_D, r, + ra_allock(as, (i & ~1020), RSET_GPR), (i & 1020)); +} +#endif + +/* Get/set global_State fields. */ +#define emit_getgl(as, r, field) \ + emit_lsptr(as, ARMI_LDR, (r), (void *)&J2G(as->J)->field) +#define emit_setgl(as, r, field) \ + emit_lsptr(as, ARMI_STR, (r), (void *)&J2G(as->J)->field) + +/* Trace number is determined from pc of exit instruction. */ +#define emit_setvmstate(as, i) UNUSED(i) + +/* -- Emit control-flow instructions -------------------------------------- */ + +/* Label for internal jumps. */ +typedef MCode *MCLabel; + +/* Return label pointing to current PC. */ +#define emit_label(as) ((as)->mcp) + +static void emit_branch(ASMState *as, ARMIns ai, MCode *target) +{ + MCode *p = as->mcp; + ptrdiff_t delta = (target - p) - 1; + lua_assert(((delta + 0x00800000) >> 24) == 0); + *--p = ai | ((uint32_t)delta & 0x00ffffffu); + as->mcp = p; +} + +#define emit_jmp(as, target) emit_branch(as, ARMI_B, (target)) + +static void emit_call(ASMState *as, void *target) +{ + MCode *p = --as->mcp; + ptrdiff_t delta = ((char *)target - (char *)p) - 8; + if ((((delta>>2) + 0x00800000) >> 24) == 0) { + if ((delta & 1)) + *p = ARMI_BLX | ((uint32_t)(delta>>2) & 0x00ffffffu) | ((delta&2) << 27); + else + *p = ARMI_BL | ((uint32_t)(delta>>2) & 0x00ffffffu); + } else { /* Target out of range: need indirect call. But don't use R0-R3. */ + Reg r = ra_allock(as, i32ptr(target), RSET_RANGE(RID_R4, RID_R12+1)); + *p = ARMI_BLXr | ARMF_M(r); + } +} + +/* -- Emit generic operations --------------------------------------------- */ + +/* Generic move between two regs. */ +static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src) +{ +#if LJ_SOFTFP + lua_assert(!irt_isnum(ir->t)); UNUSED(ir); +#else + if (dst >= RID_MAX_GPR) { + emit_dm(as, irt_isnum(ir->t) ? ARMI_VMOV_D : ARMI_VMOV_S, + (dst & 15), (src & 15)); + return; + } +#endif + if (as->mcp != as->mcloop) { /* Swap early registers for loads/stores. */ + MCode ins = *as->mcp, swp = (src^dst); + if ((ins & 0x0c000000) == 0x04000000 && (ins & 0x02000010) != 0x02000010) { + if (!((ins ^ (dst << 16)) & 0x000f0000)) + *as->mcp = ins ^ (swp << 16); /* Swap N in load/store. */ + if (!(ins & 0x00100000) && !((ins ^ (dst << 12)) & 0x0000f000)) + *as->mcp = ins ^ (swp << 12); /* Swap D in store. */ + } + } + emit_dm(as, ARMI_MOV, dst, src); +} + +/* Generic load of register with base and (small) offset address. */ +static void emit_loadofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs) +{ +#if LJ_SOFTFP + lua_assert(!irt_isnum(ir->t)); UNUSED(ir); +#else + if (r >= RID_MAX_GPR) + emit_vlso(as, irt_isnum(ir->t) ? ARMI_VLDR_D : ARMI_VLDR_S, r, base, ofs); + else +#endif + emit_lso(as, ARMI_LDR, r, base, ofs); +} + +/* Generic store of register with base and (small) offset address. */ +static void emit_storeofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs) +{ +#if LJ_SOFTFP + lua_assert(!irt_isnum(ir->t)); UNUSED(ir); +#else + if (r >= RID_MAX_GPR) + emit_vlso(as, irt_isnum(ir->t) ? ARMI_VSTR_D : ARMI_VSTR_S, r, base, ofs); + else +#endif + emit_lso(as, ARMI_STR, r, base, ofs); +} + +/* Emit an arithmetic/logic operation with a constant operand. */ +static void emit_opk(ASMState *as, ARMIns ai, Reg dest, Reg src, + int32_t i, RegSet allow) +{ + uint32_t k = emit_isk12(ai, i); + if (k) + emit_dn(as, ai^k, dest, src); + else + emit_dnm(as, ai, dest, src, ra_allock(as, i, allow)); +} + +/* Add offset to pointer. */ +static void emit_addptr(ASMState *as, Reg r, int32_t ofs) +{ + if (ofs) + emit_opk(as, ARMI_ADD, r, r, ofs, rset_exclude(RSET_GPR, r)); +} + +#define emit_spsub(as, ofs) emit_addptr(as, RID_SP, -(ofs)) + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_emit_mips.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_emit_mips.h new file mode 100644 index 00000000000..8e7ee66fb1a --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_emit_mips.h @@ -0,0 +1,211 @@ +/* +** MIPS instruction emitter. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +/* -- Emit basic instructions --------------------------------------------- */ + +static void emit_dst(ASMState *as, MIPSIns mi, Reg rd, Reg rs, Reg rt) +{ + *--as->mcp = mi | MIPSF_D(rd) | MIPSF_S(rs) | MIPSF_T(rt); +} + +static void emit_dta(ASMState *as, MIPSIns mi, Reg rd, Reg rt, uint32_t a) +{ + *--as->mcp = mi | MIPSF_D(rd) | MIPSF_T(rt) | MIPSF_A(a); +} + +#define emit_ds(as, mi, rd, rs) emit_dst(as, (mi), (rd), (rs), 0) +#define emit_tg(as, mi, rt, rg) emit_dst(as, (mi), (rg)&31, 0, (rt)) + +static void emit_tsi(ASMState *as, MIPSIns mi, Reg rt, Reg rs, int32_t i) +{ + *--as->mcp = mi | MIPSF_T(rt) | MIPSF_S(rs) | (i & 0xffff); +} + +#define emit_ti(as, mi, rt, i) emit_tsi(as, (mi), (rt), 0, (i)) +#define emit_hsi(as, mi, rh, rs, i) emit_tsi(as, (mi), (rh) & 31, (rs), (i)) + +static void emit_fgh(ASMState *as, MIPSIns mi, Reg rf, Reg rg, Reg rh) +{ + *--as->mcp = mi | MIPSF_F(rf&31) | MIPSF_G(rg&31) | MIPSF_H(rh&31); +} + +#define emit_fg(as, mi, rf, rg) emit_fgh(as, (mi), (rf), (rg), 0) + +static void emit_rotr(ASMState *as, Reg dest, Reg src, Reg tmp, uint32_t shift) +{ + if ((as->flags & JIT_F_MIPS32R2)) { + emit_dta(as, MIPSI_ROTR, dest, src, shift); + } else { + emit_dst(as, MIPSI_OR, dest, dest, tmp); + emit_dta(as, MIPSI_SLL, dest, src, (-shift)&31); + emit_dta(as, MIPSI_SRL, tmp, src, shift); + } +} + +/* -- Emit loads/stores --------------------------------------------------- */ + +/* Prefer rematerialization of BASE/L from global_State over spills. */ +#define emit_canremat(ref) ((ref) <= REF_BASE) + +/* Try to find a one step delta relative to another constant. */ +static int emit_kdelta1(ASMState *as, Reg t, int32_t i) +{ + RegSet work = ~as->freeset & RSET_GPR; + while (work) { + Reg r = rset_picktop(work); + IRRef ref = regcost_ref(as->cost[r]); + lua_assert(r != t); + if (ref < ASMREF_L) { + int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i); + if (checki16(delta)) { + emit_tsi(as, MIPSI_ADDIU, t, r, delta); + return 1; + } + } + rset_clear(work, r); + } + return 0; /* Failed. */ +} + +/* Load a 32 bit constant into a GPR. */ +static void emit_loadi(ASMState *as, Reg r, int32_t i) +{ + if (checki16(i)) { + emit_ti(as, MIPSI_LI, r, i); + } else { + if ((i & 0xffff)) { + int32_t jgl = i32ptr(J2G(as->J)); + if ((uint32_t)(i-jgl) < 65536) { + emit_tsi(as, MIPSI_ADDIU, r, RID_JGL, i-jgl-32768); + return; + } else if (emit_kdelta1(as, r, i)) { + return; + } else if ((i >> 16) == 0) { + emit_tsi(as, MIPSI_ORI, r, RID_ZERO, i); + return; + } + emit_tsi(as, MIPSI_ORI, r, r, i); + } + emit_ti(as, MIPSI_LUI, r, (i >> 16)); + } +} + +#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr))) + +static Reg ra_allock(ASMState *as, int32_t k, RegSet allow); +static void ra_allockreg(ASMState *as, int32_t k, Reg r); + +/* Get/set from constant pointer. */ +static void emit_lsptr(ASMState *as, MIPSIns mi, Reg r, void *p, RegSet allow) +{ + int32_t jgl = i32ptr(J2G(as->J)); + int32_t i = i32ptr(p); + Reg base; + if ((uint32_t)(i-jgl) < 65536) { + i = i-jgl-32768; + base = RID_JGL; + } else { + base = ra_allock(as, i-(int16_t)i, allow); + } + emit_tsi(as, mi, r, base, i); +} + +#define emit_loadn(as, r, tv) \ + emit_lsptr(as, MIPSI_LDC1, ((r) & 31), (void *)(tv), RSET_GPR) + +/* Get/set global_State fields. */ +static void emit_lsglptr(ASMState *as, MIPSIns mi, Reg r, int32_t ofs) +{ + emit_tsi(as, mi, r, RID_JGL, ofs-32768); +} + +#define emit_getgl(as, r, field) \ + emit_lsglptr(as, MIPSI_LW, (r), (int32_t)offsetof(global_State, field)) +#define emit_setgl(as, r, field) \ + emit_lsglptr(as, MIPSI_SW, (r), (int32_t)offsetof(global_State, field)) + +/* Trace number is determined from per-trace exit stubs. */ +#define emit_setvmstate(as, i) UNUSED(i) + +/* -- Emit control-flow instructions -------------------------------------- */ + +/* Label for internal jumps. */ +typedef MCode *MCLabel; + +/* Return label pointing to current PC. */ +#define emit_label(as) ((as)->mcp) + +static void emit_branch(ASMState *as, MIPSIns mi, Reg rs, Reg rt, MCode *target) +{ + MCode *p = as->mcp; + ptrdiff_t delta = target - p; + lua_assert(((delta + 0x8000) >> 16) == 0); + *--p = mi | MIPSF_S(rs) | MIPSF_T(rt) | ((uint32_t)delta & 0xffffu); + as->mcp = p; +} + +static void emit_jmp(ASMState *as, MCode *target) +{ + *--as->mcp = MIPSI_NOP; + emit_branch(as, MIPSI_B, RID_ZERO, RID_ZERO, (target)); +} + +static void emit_call(ASMState *as, void *target) +{ + MCode *p = as->mcp; + *--p = MIPSI_NOP; + if ((((uintptr_t)target ^ (uintptr_t)p) >> 28) == 0) + *--p = MIPSI_JAL | (((uintptr_t)target >>2) & 0x03ffffffu); + else /* Target out of range: need indirect call. */ + *--p = MIPSI_JALR | MIPSF_S(RID_CFUNCADDR); + as->mcp = p; + ra_allockreg(as, i32ptr(target), RID_CFUNCADDR); +} + +/* -- Emit generic operations --------------------------------------------- */ + +#define emit_move(as, dst, src) \ + emit_ds(as, MIPSI_MOVE, (dst), (src)) + +/* Generic move between two regs. */ +static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src) +{ + if (dst < RID_MAX_GPR) + emit_move(as, dst, src); + else + emit_fg(as, irt_isnum(ir->t) ? MIPSI_MOV_D : MIPSI_MOV_S, dst, src); +} + +/* Generic load of register with base and (small) offset address. */ +static void emit_loadofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs) +{ + if (r < RID_MAX_GPR) + emit_tsi(as, MIPSI_LW, r, base, ofs); + else + emit_tsi(as, irt_isnum(ir->t) ? MIPSI_LDC1 : MIPSI_LWC1, + (r & 31), base, ofs); +} + +/* Generic store of register with base and (small) offset address. */ +static void emit_storeofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs) +{ + if (r < RID_MAX_GPR) + emit_tsi(as, MIPSI_SW, r, base, ofs); + else + emit_tsi(as, irt_isnum(ir->t) ? MIPSI_SDC1 : MIPSI_SWC1, + (r&31), base, ofs); +} + +/* Add offset to pointer. */ +static void emit_addptr(ASMState *as, Reg r, int32_t ofs) +{ + if (ofs) { + lua_assert(checki16(ofs)); + emit_tsi(as, MIPSI_ADDIU, r, r, ofs); + } +} + +#define emit_spsub(as, ofs) emit_addptr(as, RID_SP, -(ofs)) + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_emit_ppc.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_emit_ppc.h new file mode 100644 index 00000000000..087860ed382 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_emit_ppc.h @@ -0,0 +1,238 @@ +/* +** PPC instruction emitter. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +/* -- Emit basic instructions --------------------------------------------- */ + +static void emit_tab(ASMState *as, PPCIns pi, Reg rt, Reg ra, Reg rb) +{ + *--as->mcp = pi | PPCF_T(rt) | PPCF_A(ra) | PPCF_B(rb); +} + +#define emit_asb(as, pi, ra, rs, rb) emit_tab(as, (pi), (rs), (ra), (rb)) +#define emit_as(as, pi, ra, rs) emit_tab(as, (pi), (rs), (ra), 0) +#define emit_ab(as, pi, ra, rb) emit_tab(as, (pi), 0, (ra), (rb)) + +static void emit_tai(ASMState *as, PPCIns pi, Reg rt, Reg ra, int32_t i) +{ + *--as->mcp = pi | PPCF_T(rt) | PPCF_A(ra) | (i & 0xffff); +} + +#define emit_ti(as, pi, rt, i) emit_tai(as, (pi), (rt), 0, (i)) +#define emit_ai(as, pi, ra, i) emit_tai(as, (pi), 0, (ra), (i)) +#define emit_asi(as, pi, ra, rs, i) emit_tai(as, (pi), (rs), (ra), (i)) + +#define emit_fab(as, pi, rf, ra, rb) \ + emit_tab(as, (pi), (rf)&31, (ra)&31, (rb)&31) +#define emit_fb(as, pi, rf, rb) emit_tab(as, (pi), (rf)&31, 0, (rb)&31) +#define emit_fac(as, pi, rf, ra, rc) \ + emit_tab(as, (pi) | PPCF_C((rc) & 31), (rf)&31, (ra)&31, 0) +#define emit_facb(as, pi, rf, ra, rc, rb) \ + emit_tab(as, (pi) | PPCF_C((rc) & 31), (rf)&31, (ra)&31, (rb)&31) +#define emit_fai(as, pi, rf, ra, i) emit_tai(as, (pi), (rf)&31, (ra), (i)) + +static void emit_rot(ASMState *as, PPCIns pi, Reg ra, Reg rs, + int32_t n, int32_t b, int32_t e) +{ + *--as->mcp = pi | PPCF_T(rs) | PPCF_A(ra) | PPCF_B(n) | + PPCF_MB(b) | PPCF_ME(e); +} + +static void emit_slwi(ASMState *as, Reg ra, Reg rs, int32_t n) +{ + lua_assert(n >= 0 && n < 32); + emit_rot(as, PPCI_RLWINM, ra, rs, n, 0, 31-n); +} + +static void emit_rotlwi(ASMState *as, Reg ra, Reg rs, int32_t n) +{ + lua_assert(n >= 0 && n < 32); + emit_rot(as, PPCI_RLWINM, ra, rs, n, 0, 31); +} + +/* -- Emit loads/stores --------------------------------------------------- */ + +/* Prefer rematerialization of BASE/L from global_State over spills. */ +#define emit_canremat(ref) ((ref) <= REF_BASE) + +/* Try to find a one step delta relative to another constant. */ +static int emit_kdelta1(ASMState *as, Reg t, int32_t i) +{ + RegSet work = ~as->freeset & RSET_GPR; + while (work) { + Reg r = rset_picktop(work); + IRRef ref = regcost_ref(as->cost[r]); + lua_assert(r != t); + if (ref < ASMREF_L) { + int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i); + if (checki16(delta)) { + emit_tai(as, PPCI_ADDI, t, r, delta); + return 1; + } + } + rset_clear(work, r); + } + return 0; /* Failed. */ +} + +/* Load a 32 bit constant into a GPR. */ +static void emit_loadi(ASMState *as, Reg r, int32_t i) +{ + if (checki16(i)) { + emit_ti(as, PPCI_LI, r, i); + } else { + if ((i & 0xffff)) { + int32_t jgl = i32ptr(J2G(as->J)); + if ((uint32_t)(i-jgl) < 65536) { + emit_tai(as, PPCI_ADDI, r, RID_JGL, i-jgl-32768); + return; + } else if (emit_kdelta1(as, r, i)) { + return; + } + emit_asi(as, PPCI_ORI, r, r, i); + } + emit_ti(as, PPCI_LIS, r, (i >> 16)); + } +} + +#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr))) + +static Reg ra_allock(ASMState *as, int32_t k, RegSet allow); + +/* Get/set from constant pointer. */ +static void emit_lsptr(ASMState *as, PPCIns pi, Reg r, void *p, RegSet allow) +{ + int32_t jgl = i32ptr(J2G(as->J)); + int32_t i = i32ptr(p); + Reg base; + if ((uint32_t)(i-jgl) < 65536) { + i = i-jgl-32768; + base = RID_JGL; + } else { + base = ra_allock(as, i-(int16_t)i, allow); + } + emit_tai(as, pi, r, base, i); +} + +#define emit_loadn(as, r, tv) \ + emit_lsptr(as, PPCI_LFD, ((r) & 31), (void *)(tv), RSET_GPR) + +/* Get/set global_State fields. */ +static void emit_lsglptr(ASMState *as, PPCIns pi, Reg r, int32_t ofs) +{ + emit_tai(as, pi, r, RID_JGL, ofs-32768); +} + +#define emit_getgl(as, r, field) \ + emit_lsglptr(as, PPCI_LWZ, (r), (int32_t)offsetof(global_State, field)) +#define emit_setgl(as, r, field) \ + emit_lsglptr(as, PPCI_STW, (r), (int32_t)offsetof(global_State, field)) + +/* Trace number is determined from per-trace exit stubs. */ +#define emit_setvmstate(as, i) UNUSED(i) + +/* -- Emit control-flow instructions -------------------------------------- */ + +/* Label for internal jumps. */ +typedef MCode *MCLabel; + +/* Return label pointing to current PC. */ +#define emit_label(as) ((as)->mcp) + +static void emit_condbranch(ASMState *as, PPCIns pi, PPCCC cc, MCode *target) +{ + MCode *p = --as->mcp; + ptrdiff_t delta = (char *)target - (char *)p; + lua_assert(((delta + 0x8000) >> 16) == 0); + pi ^= (delta & 0x8000) * (PPCF_Y/0x8000); + *p = pi | PPCF_CC(cc) | ((uint32_t)delta & 0xffffu); +} + +static void emit_jmp(ASMState *as, MCode *target) +{ + MCode *p = --as->mcp; + ptrdiff_t delta = (char *)target - (char *)p; + *p = PPCI_B | (delta & 0x03fffffcu); +} + +static void emit_call(ASMState *as, void *target) +{ + MCode *p = --as->mcp; + ptrdiff_t delta = (char *)target - (char *)p; + if ((((delta>>2) + 0x00800000) >> 24) == 0) { + *p = PPCI_BL | (delta & 0x03fffffcu); + } else { /* Target out of range: need indirect call. Don't use arg reg. */ + RegSet allow = RSET_GPR & ~RSET_RANGE(RID_R0, REGARG_LASTGPR+1); + Reg r = ra_allock(as, i32ptr(target), allow); + *p = PPCI_BCTRL; + p[-1] = PPCI_MTCTR | PPCF_T(r); + as->mcp = p-1; + } +} + +/* -- Emit generic operations --------------------------------------------- */ + +#define emit_mr(as, dst, src) \ + emit_asb(as, PPCI_MR, (dst), (src), (src)) + +/* Generic move between two regs. */ +static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src) +{ + UNUSED(ir); + if (dst < RID_MAX_GPR) + emit_mr(as, dst, src); + else + emit_fb(as, PPCI_FMR, dst, src); +} + +/* Generic load of register with base and (small) offset address. */ +static void emit_loadofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs) +{ + if (r < RID_MAX_GPR) + emit_tai(as, PPCI_LWZ, r, base, ofs); + else + emit_fai(as, irt_isnum(ir->t) ? PPCI_LFD : PPCI_LFS, r, base, ofs); +} + +/* Generic store of register with base and (small) offset address. */ +static void emit_storeofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs) +{ + if (r < RID_MAX_GPR) + emit_tai(as, PPCI_STW, r, base, ofs); + else + emit_fai(as, irt_isnum(ir->t) ? PPCI_STFD : PPCI_STFS, r, base, ofs); +} + +/* Emit a compare (for equality) with a constant operand. */ +static void emit_cmpi(ASMState *as, Reg r, int32_t k) +{ + if (checki16(k)) { + emit_ai(as, PPCI_CMPWI, r, k); + } else if (checku16(k)) { + emit_ai(as, PPCI_CMPLWI, r, k); + } else { + emit_ai(as, PPCI_CMPLWI, RID_TMP, k); + emit_asi(as, PPCI_XORIS, RID_TMP, r, (k >> 16)); + } +} + +/* Add offset to pointer. */ +static void emit_addptr(ASMState *as, Reg r, int32_t ofs) +{ + if (ofs) { + emit_tai(as, PPCI_ADDI, r, r, ofs); + if (!checki16(ofs)) + emit_tai(as, PPCI_ADDIS, r, r, (ofs + 32768) >> 16); + } +} + +static void emit_spsub(ASMState *as, int32_t ofs) +{ + if (ofs) { + emit_tai(as, PPCI_STWU, RID_TMP, RID_SP, -ofs); + emit_tai(as, PPCI_ADDI, RID_TMP, RID_SP, + CFRAME_SIZE + (as->parent ? as->parent->spadjust : 0)); + } +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_emit_x86.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_emit_x86.h new file mode 100644 index 00000000000..ac42db3e20f --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_emit_x86.h @@ -0,0 +1,462 @@ +/* +** x86/x64 instruction emitter. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +/* -- Emit basic instructions --------------------------------------------- */ + +#define MODRM(mode, r1, r2) ((MCode)((mode)+(((r1)&7)<<3)+((r2)&7))) + +#if LJ_64 +#define REXRB(p, rr, rb) \ + { MCode rex = 0x40 + (((rr)>>1)&4) + (((rb)>>3)&1); \ + if (rex != 0x40) *--(p) = rex; } +#define FORCE_REX 0x200 +#define REX_64 (FORCE_REX|0x080000) +#else +#define REXRB(p, rr, rb) ((void)0) +#define FORCE_REX 0 +#define REX_64 0 +#endif + +#define emit_i8(as, i) (*--as->mcp = (MCode)(i)) +#define emit_i32(as, i) (*(int32_t *)(as->mcp-4) = (i), as->mcp -= 4) +#define emit_u32(as, u) (*(uint32_t *)(as->mcp-4) = (u), as->mcp -= 4) + +#define emit_x87op(as, xo) \ + (*(uint16_t *)(as->mcp-2) = (uint16_t)(xo), as->mcp -= 2) + +/* op */ +static LJ_AINLINE MCode *emit_op(x86Op xo, Reg rr, Reg rb, Reg rx, + MCode *p, int delta) +{ + int n = (int8_t)xo; +#if defined(__GNUC__) + if (__builtin_constant_p(xo) && n == -2) + p[delta-2] = (MCode)(xo >> 24); + else if (__builtin_constant_p(xo) && n == -3) + *(uint16_t *)(p+delta-3) = (uint16_t)(xo >> 16); + else +#endif + *(uint32_t *)(p+delta-5) = (uint32_t)xo; + p += n + delta; +#if LJ_64 + { + uint32_t rex = 0x40 + ((rr>>1)&(4+(FORCE_REX>>1)))+((rx>>2)&2)+((rb>>3)&1); + if (rex != 0x40) { + rex |= (rr >> 16); + if (n == -4) { *p = (MCode)rex; rex = (MCode)(xo >> 8); } + else if ((xo & 0xffffff) == 0x6600fd) { *p = (MCode)rex; rex = 0x66; } + *--p = (MCode)rex; + } + } +#else + UNUSED(rr); UNUSED(rb); UNUSED(rx); +#endif + return p; +} + +/* op + modrm */ +#define emit_opm(xo, mode, rr, rb, p, delta) \ + (p[(delta)-1] = MODRM((mode), (rr), (rb)), \ + emit_op((xo), (rr), (rb), 0, (p), (delta))) + +/* op + modrm + sib */ +#define emit_opmx(xo, mode, scale, rr, rb, rx, p) \ + (p[-1] = MODRM((scale), (rx), (rb)), \ + p[-2] = MODRM((mode), (rr), RID_ESP), \ + emit_op((xo), (rr), (rb), (rx), (p), -1)) + +/* op r1, r2 */ +static void emit_rr(ASMState *as, x86Op xo, Reg r1, Reg r2) +{ + MCode *p = as->mcp; + as->mcp = emit_opm(xo, XM_REG, r1, r2, p, 0); +} + +#if LJ_64 && defined(LUA_USE_ASSERT) +/* [addr] is sign-extended in x64 and must be in lower 2G (not 4G). */ +static int32_t ptr2addr(const void *p) +{ + lua_assert((uintptr_t)p < (uintptr_t)0x80000000); + return i32ptr(p); +} +#else +#define ptr2addr(p) (i32ptr((p))) +#endif + +/* op r, [addr] */ +static void emit_rma(ASMState *as, x86Op xo, Reg rr, const void *addr) +{ + MCode *p = as->mcp; + *(int32_t *)(p-4) = ptr2addr(addr); +#if LJ_64 + p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP); + as->mcp = emit_opm(xo, XM_OFS0, rr, RID_ESP, p, -5); +#else + as->mcp = emit_opm(xo, XM_OFS0, rr, RID_EBP, p, -4); +#endif +} + +/* op r, [base+ofs] */ +static void emit_rmro(ASMState *as, x86Op xo, Reg rr, Reg rb, int32_t ofs) +{ + MCode *p = as->mcp; + x86Mode mode; + if (ra_hasreg(rb)) { + if (ofs == 0 && (rb&7) != RID_EBP) { + mode = XM_OFS0; + } else if (checki8(ofs)) { + *--p = (MCode)ofs; + mode = XM_OFS8; + } else { + p -= 4; + *(int32_t *)p = ofs; + mode = XM_OFS32; + } + if ((rb&7) == RID_ESP) + *--p = MODRM(XM_SCALE1, RID_ESP, RID_ESP); + } else { + *(int32_t *)(p-4) = ofs; +#if LJ_64 + p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP); + p -= 5; + rb = RID_ESP; +#else + p -= 4; + rb = RID_EBP; +#endif + mode = XM_OFS0; + } + as->mcp = emit_opm(xo, mode, rr, rb, p, 0); +} + +/* op r, [base+idx*scale+ofs] */ +static void emit_rmrxo(ASMState *as, x86Op xo, Reg rr, Reg rb, Reg rx, + x86Mode scale, int32_t ofs) +{ + MCode *p = as->mcp; + x86Mode mode; + if (ofs == 0 && (rb&7) != RID_EBP) { + mode = XM_OFS0; + } else if (checki8(ofs)) { + mode = XM_OFS8; + *--p = (MCode)ofs; + } else { + mode = XM_OFS32; + p -= 4; + *(int32_t *)p = ofs; + } + as->mcp = emit_opmx(xo, mode, scale, rr, rb, rx, p); +} + +/* op r, i */ +static void emit_gri(ASMState *as, x86Group xg, Reg rb, int32_t i) +{ + MCode *p = as->mcp; + x86Op xo; + if (checki8(i)) { + *--p = (MCode)i; + xo = XG_TOXOi8(xg); + } else { + p -= 4; + *(int32_t *)p = i; + xo = XG_TOXOi(xg); + } + as->mcp = emit_opm(xo, XM_REG, (Reg)(xg & 7) | (rb & REX_64), rb, p, 0); +} + +/* op [base+ofs], i */ +static void emit_gmroi(ASMState *as, x86Group xg, Reg rb, int32_t ofs, + int32_t i) +{ + x86Op xo; + if (checki8(i)) { + emit_i8(as, i); + xo = XG_TOXOi8(xg); + } else { + emit_i32(as, i); + xo = XG_TOXOi(xg); + } + emit_rmro(as, xo, (Reg)(xg & 7), rb, ofs); +} + +#define emit_shifti(as, xg, r, i) \ + (emit_i8(as, (i)), emit_rr(as, XO_SHIFTi, (Reg)(xg), (r))) + +/* op r, rm/mrm */ +static void emit_mrm(ASMState *as, x86Op xo, Reg rr, Reg rb) +{ + MCode *p = as->mcp; + x86Mode mode = XM_REG; + if (rb == RID_MRM) { + rb = as->mrm.base; + if (rb == RID_NONE) { + rb = RID_EBP; + mode = XM_OFS0; + p -= 4; + *(int32_t *)p = as->mrm.ofs; + if (as->mrm.idx != RID_NONE) + goto mrmidx; +#if LJ_64 + *--p = MODRM(XM_SCALE1, RID_ESP, RID_EBP); + rb = RID_ESP; +#endif + } else { + if (as->mrm.ofs == 0 && (rb&7) != RID_EBP) { + mode = XM_OFS0; + } else if (checki8(as->mrm.ofs)) { + *--p = (MCode)as->mrm.ofs; + mode = XM_OFS8; + } else { + p -= 4; + *(int32_t *)p = as->mrm.ofs; + mode = XM_OFS32; + } + if (as->mrm.idx != RID_NONE) { + mrmidx: + as->mcp = emit_opmx(xo, mode, as->mrm.scale, rr, rb, as->mrm.idx, p); + return; + } + if ((rb&7) == RID_ESP) + *--p = MODRM(XM_SCALE1, RID_ESP, RID_ESP); + } + } + as->mcp = emit_opm(xo, mode, rr, rb, p, 0); +} + +/* op rm/mrm, i */ +static void emit_gmrmi(ASMState *as, x86Group xg, Reg rb, int32_t i) +{ + x86Op xo; + if (checki8(i)) { + emit_i8(as, i); + xo = XG_TOXOi8(xg); + } else { + emit_i32(as, i); + xo = XG_TOXOi(xg); + } + emit_mrm(as, xo, (Reg)(xg & 7) | (rb & REX_64), (rb & ~REX_64)); +} + +/* -- Emit loads/stores --------------------------------------------------- */ + +/* mov [base+ofs], i */ +static void emit_movmroi(ASMState *as, Reg base, int32_t ofs, int32_t i) +{ + emit_i32(as, i); + emit_rmro(as, XO_MOVmi, 0, base, ofs); +} + +/* mov [base+ofs], r */ +#define emit_movtomro(as, r, base, ofs) \ + emit_rmro(as, XO_MOVto, (r), (base), (ofs)) + +/* Get/set global_State fields. */ +#define emit_opgl(as, xo, r, field) \ + emit_rma(as, (xo), (r), (void *)&J2G(as->J)->field) +#define emit_getgl(as, r, field) emit_opgl(as, XO_MOV, (r), field) +#define emit_setgl(as, r, field) emit_opgl(as, XO_MOVto, (r), field) + +#define emit_setvmstate(as, i) \ + (emit_i32(as, i), emit_opgl(as, XO_MOVmi, 0, vmstate)) + +/* mov r, i / xor r, r */ +static void emit_loadi(ASMState *as, Reg r, int32_t i) +{ + /* XOR r,r is shorter, but modifies the flags. This is bad for HIOP. */ + if (i == 0 && !(LJ_32 && (IR(as->curins)->o == IR_HIOP || + (as->curins+1 < as->T->nins && + IR(as->curins+1)->o == IR_HIOP)))) { + emit_rr(as, XO_ARITH(XOg_XOR), r, r); + } else { + MCode *p = as->mcp; + *(int32_t *)(p-4) = i; + p[-5] = (MCode)(XI_MOVri+(r&7)); + p -= 5; + REXRB(p, 0, r); + as->mcp = p; + } +} + +/* mov r, addr */ +#define emit_loada(as, r, addr) \ + emit_loadi(as, (r), ptr2addr((addr))) + +#if LJ_64 +/* mov r, imm64 or shorter 32 bit extended load. */ +static void emit_loadu64(ASMState *as, Reg r, uint64_t u64) +{ + if (checku32(u64)) { /* 32 bit load clears upper 32 bits. */ + emit_loadi(as, r, (int32_t)u64); + } else if (checki32((int64_t)u64)) { /* Sign-extended 32 bit load. */ + MCode *p = as->mcp; + *(int32_t *)(p-4) = (int32_t)u64; + as->mcp = emit_opm(XO_MOVmi, XM_REG, REX_64, r, p, -4); + } else { /* Full-size 64 bit load. */ + MCode *p = as->mcp; + *(uint64_t *)(p-8) = u64; + p[-9] = (MCode)(XI_MOVri+(r&7)); + p[-10] = 0x48 + ((r>>3)&1); + p -= 10; + as->mcp = p; + } +} +#endif + +/* movsd r, [&tv->n] / xorps r, r */ +static void emit_loadn(ASMState *as, Reg r, cTValue *tv) +{ + if (tvispzero(tv)) /* Use xor only for +0. */ + emit_rr(as, XO_XORPS, r, r); + else + emit_rma(as, XO_MOVSD, r, &tv->n); +} + +/* -- Emit control-flow instructions -------------------------------------- */ + +/* Label for short jumps. */ +typedef MCode *MCLabel; + +#if LJ_32 && LJ_HASFFI +/* jmp short target */ +static void emit_sjmp(ASMState *as, MCLabel target) +{ + MCode *p = as->mcp; + ptrdiff_t delta = target - p; + lua_assert(delta == (int8_t)delta); + p[-1] = (MCode)(int8_t)delta; + p[-2] = XI_JMPs; + as->mcp = p - 2; +} +#endif + +/* jcc short target */ +static void emit_sjcc(ASMState *as, int cc, MCLabel target) +{ + MCode *p = as->mcp; + ptrdiff_t delta = target - p; + lua_assert(delta == (int8_t)delta); + p[-1] = (MCode)(int8_t)delta; + p[-2] = (MCode)(XI_JCCs+(cc&15)); + as->mcp = p - 2; +} + +/* jcc short (pending target) */ +static MCLabel emit_sjcc_label(ASMState *as, int cc) +{ + MCode *p = as->mcp; + p[-1] = 0; + p[-2] = (MCode)(XI_JCCs+(cc&15)); + as->mcp = p - 2; + return p; +} + +/* Fixup jcc short target. */ +static void emit_sfixup(ASMState *as, MCLabel source) +{ + source[-1] = (MCode)(as->mcp-source); +} + +/* Return label pointing to current PC. */ +#define emit_label(as) ((as)->mcp) + +/* Compute relative 32 bit offset for jump and call instructions. */ +static LJ_AINLINE int32_t jmprel(MCode *p, MCode *target) +{ + ptrdiff_t delta = target - p; + lua_assert(delta == (int32_t)delta); + return (int32_t)delta; +} + +/* jcc target */ +static void emit_jcc(ASMState *as, int cc, MCode *target) +{ + MCode *p = as->mcp; + *(int32_t *)(p-4) = jmprel(p, target); + p[-5] = (MCode)(XI_JCCn+(cc&15)); + p[-6] = 0x0f; + as->mcp = p - 6; +} + +/* jmp target */ +static void emit_jmp(ASMState *as, MCode *target) +{ + MCode *p = as->mcp; + *(int32_t *)(p-4) = jmprel(p, target); + p[-5] = XI_JMP; + as->mcp = p - 5; +} + +/* call target */ +static void emit_call_(ASMState *as, MCode *target) +{ + MCode *p = as->mcp; +#if LJ_64 + if (target-p != (int32_t)(target-p)) { + /* Assumes RID_RET is never an argument to calls and always clobbered. */ + emit_rr(as, XO_GROUP5, XOg_CALL, RID_RET); + emit_loadu64(as, RID_RET, (uint64_t)target); + return; + } +#endif + *(int32_t *)(p-4) = jmprel(p, target); + p[-5] = XI_CALL; + as->mcp = p - 5; +} + +#define emit_call(as, f) emit_call_(as, (MCode *)(void *)(f)) + +/* -- Emit generic operations --------------------------------------------- */ + +/* Use 64 bit operations to handle 64 bit IR types. */ +#if LJ_64 +#define REX_64IR(ir, r) ((r) + (irt_is64((ir)->t) ? REX_64 : 0)) +#else +#define REX_64IR(ir, r) (r) +#endif + +/* Generic move between two regs. */ +static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src) +{ + UNUSED(ir); + if (dst < RID_MAX_GPR) + emit_rr(as, XO_MOV, REX_64IR(ir, dst), src); + else + emit_rr(as, XO_MOVAPS, dst, src); +} + +/* Generic load of register with base and (small) offset address. */ +static void emit_loadofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs) +{ + if (r < RID_MAX_GPR) + emit_rmro(as, XO_MOV, REX_64IR(ir, r), base, ofs); + else + emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSD : XO_MOVSS, r, base, ofs); +} + +/* Generic store of register with base and (small) offset address. */ +static void emit_storeofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs) +{ + if (r < RID_MAX_GPR) + emit_rmro(as, XO_MOVto, REX_64IR(ir, r), base, ofs); + else + emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto, r, base, ofs); +} + +/* Add offset to pointer. */ +static void emit_addptr(ASMState *as, Reg r, int32_t ofs) +{ + if (ofs) { + if ((as->flags & JIT_F_LEA_AGU)) + emit_rmro(as, XO_LEA, r, r, ofs); + else + emit_gri(as, XG_ARITHi(XOg_ADD), r, ofs); + } +} + +#define emit_spsub(as, ofs) emit_addptr(as, RID_ESP|REX_64, -(ofs)) + +/* Prefer rematerialization of BASE/L from global_State over spills. */ +#define emit_canremat(ref) ((ref) <= REF_BASE) + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_err.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_err.c new file mode 100644 index 00000000000..9ac0c988fc9 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_err.c @@ -0,0 +1,796 @@ +/* +** Error handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_err_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_err.h" +#include "lj_debug.h" +#include "lj_str.h" +#include "lj_func.h" +#include "lj_state.h" +#include "lj_frame.h" +#include "lj_ff.h" +#include "lj_trace.h" +#include "lj_vm.h" +#include "lj_strfmt.h" + +/* +** LuaJIT can either use internal or external frame unwinding: +** +** - Internal frame unwinding (INT) is free-standing and doesn't require +** any OS or library support. +** +** - External frame unwinding (EXT) uses the system-provided unwind handler. +** +** Pros and Cons: +** +** - EXT requires unwind tables for *all* functions on the C stack between +** the pcall/catch and the error/throw. This is the default on x64, +** but needs to be manually enabled on x86/PPC for non-C++ code. +** +** - INT is faster when actually throwing errors (but this happens rarely). +** Setting up error handlers is zero-cost in any case. +** +** - EXT provides full interoperability with C++ exceptions. You can throw +** Lua errors or C++ exceptions through a mix of Lua frames and C++ frames. +** C++ destructors are called as needed. C++ exceptions caught by pcall +** are converted to the string "C++ exception". Lua errors can be caught +** with catch (...) in C++. +** +** - INT has only limited support for automatically catching C++ exceptions +** on POSIX systems using DWARF2 stack unwinding. Other systems may use +** the wrapper function feature. Lua errors thrown through C++ frames +** cannot be caught by C++ code and C++ destructors are not run. +** +** EXT is the default on x64 systems, INT is the default on all other systems. +** +** EXT can be manually enabled on POSIX systems using GCC and DWARF2 stack +** unwinding with -DLUAJIT_UNWIND_EXTERNAL. *All* C code must be compiled +** with -funwind-tables (or -fexceptions). This includes LuaJIT itself (set +** TARGET_CFLAGS), all of your C/Lua binding code, all loadable C modules +** and all C libraries that have callbacks which may be used to call back +** into Lua. C++ code must *not* be compiled with -fno-exceptions. +** +** EXT cannot be enabled on WIN32 since system exceptions use code-driven SEH. +** EXT is mandatory on WIN64 since the calling convention has an abundance +** of callee-saved registers (rbx, rbp, rsi, rdi, r12-r15, xmm6-xmm15). +** The POSIX/x64 interpreter only saves r12/r13 for INT (e.g. PS4). +*/ + +#if defined(__GNUC__) && (LJ_TARGET_X64 || defined(LUAJIT_UNWIND_EXTERNAL)) && !LJ_NO_UNWIND +#define LJ_UNWIND_EXT 1 +#elif LJ_TARGET_X64 && LJ_TARGET_WINDOWS +#define LJ_UNWIND_EXT 1 +#endif + +/* -- Error messages ------------------------------------------------------ */ + +/* Error message strings. */ +LJ_DATADEF const char *lj_err_allmsg = +#define ERRDEF(name, msg) msg "\0" +#include "lj_errmsg.h" +; + +/* -- Internal frame unwinding -------------------------------------------- */ + +/* Unwind Lua stack and move error message to new top. */ +LJ_NOINLINE static void unwindstack(lua_State *L, TValue *top) +{ + lj_func_closeuv(L, top); + if (top < L->top-1) { + copyTV(L, top, L->top-1); + L->top = top+1; + } + lj_state_relimitstack(L); +} + +/* Unwind until stop frame. Optionally cleanup frames. */ +static void *err_unwind(lua_State *L, void *stopcf, int errcode) +{ + TValue *frame = L->base-1; + void *cf = L->cframe; + while (cf) { + int32_t nres = cframe_nres(cframe_raw(cf)); + if (nres < 0) { /* C frame without Lua frame? */ + TValue *top = restorestack(L, -nres); + if (frame < top) { /* Frame reached? */ + if (errcode) { + L->base = frame+1; + L->cframe = cframe_prev(cf); + unwindstack(L, top); + } + return cf; + } + } + if (frame <= tvref(L->stack)+LJ_FR2) + break; + switch (frame_typep(frame)) { + case FRAME_LUA: /* Lua frame. */ + case FRAME_LUAP: + frame = frame_prevl(frame); + break; + case FRAME_C: /* C frame. */ + unwind_c: +#if LJ_UNWIND_EXT + if (errcode) { + L->base = frame_prevd(frame) + 1; + L->cframe = cframe_prev(cf); + unwindstack(L, frame - LJ_FR2); + } else if (cf != stopcf) { + cf = cframe_prev(cf); + frame = frame_prevd(frame); + break; + } + return NULL; /* Continue unwinding. */ +#else + UNUSED(stopcf); + cf = cframe_prev(cf); + frame = frame_prevd(frame); + break; +#endif + case FRAME_CP: /* Protected C frame. */ + if (cframe_canyield(cf)) { /* Resume? */ + if (errcode) { + hook_leave(G(L)); /* Assumes nobody uses coroutines inside hooks. */ + L->cframe = NULL; + L->status = (uint8_t)errcode; + } + return cf; + } + if (errcode) { + L->base = frame_prevd(frame) + 1; + L->cframe = cframe_prev(cf); + unwindstack(L, frame - LJ_FR2); + } + return cf; + case FRAME_CONT: /* Continuation frame. */ + if (frame_iscont_fficb(frame)) + goto unwind_c; + case FRAME_VARG: /* Vararg frame. */ + frame = frame_prevd(frame); + break; + case FRAME_PCALL: /* FF pcall() frame. */ + case FRAME_PCALLH: /* FF pcall() frame inside hook. */ + if (errcode) { + if (errcode == LUA_YIELD) { + frame = frame_prevd(frame); + break; + } + if (frame_typep(frame) == FRAME_PCALL) + hook_leave(G(L)); + L->base = frame_prevd(frame) + 1; + L->cframe = cf; + unwindstack(L, L->base); + } + return (void *)((intptr_t)cf | CFRAME_UNWIND_FF); + } + } + /* No C frame. */ + if (errcode) { + L->base = tvref(L->stack)+1+LJ_FR2; + L->cframe = NULL; + unwindstack(L, L->base); + if (G(L)->panic) + G(L)->panic(L); + exit(EXIT_FAILURE); + } + return L; /* Anything non-NULL will do. */ +} + +/* -- External frame unwinding -------------------------------------------- */ + +#if defined(__GNUC__) && !LJ_NO_UNWIND && !LJ_ABI_WIN + +/* +** We have to use our own definitions instead of the mandatory (!) unwind.h, +** since various OS, distros and compilers mess up the header installation. +*/ + +typedef struct _Unwind_Exception +{ + uint64_t exclass; + void (*excleanup)(int, struct _Unwind_Exception *); + uintptr_t p1, p2; +} __attribute__((__aligned__)) _Unwind_Exception; + +typedef struct _Unwind_Context _Unwind_Context; + +#define _URC_OK 0 +#define _URC_FATAL_PHASE1_ERROR 3 +#define _URC_HANDLER_FOUND 6 +#define _URC_INSTALL_CONTEXT 7 +#define _URC_CONTINUE_UNWIND 8 +#define _URC_FAILURE 9 + +#if !LJ_TARGET_ARM + +extern uintptr_t _Unwind_GetCFA(_Unwind_Context *); +extern void _Unwind_SetGR(_Unwind_Context *, int, uintptr_t); +extern void _Unwind_SetIP(_Unwind_Context *, uintptr_t); +extern void _Unwind_DeleteException(_Unwind_Exception *); +extern int _Unwind_RaiseException(_Unwind_Exception *); + +#define _UA_SEARCH_PHASE 1 +#define _UA_CLEANUP_PHASE 2 +#define _UA_HANDLER_FRAME 4 +#define _UA_FORCE_UNWIND 8 + +#define LJ_UEXCLASS 0x4c55414a49543200ULL /* LUAJIT2\0 */ +#define LJ_UEXCLASS_MAKE(c) (LJ_UEXCLASS | (uint64_t)(c)) +#define LJ_UEXCLASS_CHECK(cl) (((cl) ^ LJ_UEXCLASS) <= 0xff) +#define LJ_UEXCLASS_ERRCODE(cl) ((int)((cl) & 0xff)) + +/* DWARF2 personality handler referenced from interpreter .eh_frame. */ +LJ_FUNCA int lj_err_unwind_dwarf(int version, int actions, + uint64_t uexclass, _Unwind_Exception *uex, _Unwind_Context *ctx) +{ + void *cf; + lua_State *L; + if (version != 1) + return _URC_FATAL_PHASE1_ERROR; + UNUSED(uexclass); + cf = (void *)_Unwind_GetCFA(ctx); + L = cframe_L(cf); + if ((actions & _UA_SEARCH_PHASE)) { +#if LJ_UNWIND_EXT + if (err_unwind(L, cf, 0) == NULL) + return _URC_CONTINUE_UNWIND; +#endif + if (!LJ_UEXCLASS_CHECK(uexclass)) { + setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRCPP)); + } + return _URC_HANDLER_FOUND; + } + if ((actions & _UA_CLEANUP_PHASE)) { + int errcode; + if (LJ_UEXCLASS_CHECK(uexclass)) { + errcode = LJ_UEXCLASS_ERRCODE(uexclass); + } else { + if ((actions & _UA_HANDLER_FRAME)) + _Unwind_DeleteException(uex); + errcode = LUA_ERRRUN; + } +#if LJ_UNWIND_EXT + cf = err_unwind(L, cf, errcode); + if ((actions & _UA_FORCE_UNWIND)) { + return _URC_CONTINUE_UNWIND; + } else if (cf) { + _Unwind_SetGR(ctx, LJ_TARGET_EHRETREG, errcode); + _Unwind_SetIP(ctx, (uintptr_t)(cframe_unwind_ff(cf) ? + lj_vm_unwind_ff_eh : + lj_vm_unwind_c_eh)); + return _URC_INSTALL_CONTEXT; + } +#if LJ_TARGET_X86ORX64 + else if ((actions & _UA_HANDLER_FRAME)) { + /* Workaround for ancient libgcc bug. Still present in RHEL 5.5. :-/ + ** Real fix: http://gcc.gnu.org/viewcvs/trunk/gcc/unwind-dw2.c?r1=121165&r2=124837&pathrev=153877&diff_format=h + */ + _Unwind_SetGR(ctx, LJ_TARGET_EHRETREG, errcode); + _Unwind_SetIP(ctx, (uintptr_t)lj_vm_unwind_rethrow); + return _URC_INSTALL_CONTEXT; + } +#endif +#else + /* This is not the proper way to escape from the unwinder. We get away with + ** it on non-x64 because the interpreter restores all callee-saved regs. + */ + lj_err_throw(L, errcode); +#endif + } + return _URC_CONTINUE_UNWIND; +} + +#if LJ_UNWIND_EXT +#if LJ_TARGET_OSX || defined(__OpenBSD__) +/* Sorry, no thread safety for OSX. Complain to Apple, not me. */ +static _Unwind_Exception static_uex; +#else +static __thread _Unwind_Exception static_uex; +#endif + +/* Raise DWARF2 exception. */ +static void err_raise_ext(int errcode) +{ + static_uex.exclass = LJ_UEXCLASS_MAKE(errcode); + static_uex.excleanup = NULL; + _Unwind_RaiseException(&static_uex); +} +#endif + +#else + +extern void _Unwind_DeleteException(void *); +extern int __gnu_unwind_frame (void *, _Unwind_Context *); +extern int _Unwind_VRS_Set(_Unwind_Context *, int, uint32_t, int, void *); +extern int _Unwind_VRS_Get(_Unwind_Context *, int, uint32_t, int, void *); + +static inline uint32_t _Unwind_GetGR(_Unwind_Context *ctx, int r) +{ + uint32_t v; + _Unwind_VRS_Get(ctx, 0, r, 0, &v); + return v; +} + +static inline void _Unwind_SetGR(_Unwind_Context *ctx, int r, uint32_t v) +{ + _Unwind_VRS_Set(ctx, 0, r, 0, &v); +} + +#define _US_VIRTUAL_UNWIND_FRAME 0 +#define _US_UNWIND_FRAME_STARTING 1 +#define _US_ACTION_MASK 3 +#define _US_FORCE_UNWIND 8 + +/* ARM unwinder personality handler referenced from interpreter .ARM.extab. */ +LJ_FUNCA int lj_err_unwind_arm(int state, void *ucb, _Unwind_Context *ctx) +{ + void *cf = (void *)_Unwind_GetGR(ctx, 13); + lua_State *L = cframe_L(cf); + if ((state & _US_ACTION_MASK) == _US_VIRTUAL_UNWIND_FRAME) { + setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRCPP)); + return _URC_HANDLER_FOUND; + } + if ((state&(_US_ACTION_MASK|_US_FORCE_UNWIND)) == _US_UNWIND_FRAME_STARTING) { + _Unwind_DeleteException(ucb); + _Unwind_SetGR(ctx, 15, (uint32_t)(void *)lj_err_throw); + _Unwind_SetGR(ctx, 0, (uint32_t)L); + _Unwind_SetGR(ctx, 1, (uint32_t)LUA_ERRRUN); + return _URC_INSTALL_CONTEXT; + } + if (__gnu_unwind_frame(ucb, ctx) != _URC_OK) + return _URC_FAILURE; + return _URC_CONTINUE_UNWIND; +} + +#endif + +#elif LJ_TARGET_X64 && LJ_ABI_WIN + +/* +** Someone in Redmond owes me several days of my life. A lot of this is +** undocumented or just plain wrong on MSDN. Some of it can be gathered +** from 3rd party docs or must be found by trial-and-error. They really +** don't want you to write your own language-specific exception handler +** or to interact gracefully with MSVC. :-( +** +** Apparently MSVC doesn't call C++ destructors for foreign exceptions +** unless you compile your C++ code with /EHa. Unfortunately this means +** catch (...) also catches things like access violations. The use of +** _set_se_translator doesn't really help, because it requires /EHa, too. +*/ + +#define WIN32_LEAN_AND_MEAN +#include + +/* Taken from: http://www.nynaeve.net/?p=99 */ +typedef struct UndocumentedDispatcherContext { + ULONG64 ControlPc; + ULONG64 ImageBase; + PRUNTIME_FUNCTION FunctionEntry; + ULONG64 EstablisherFrame; + ULONG64 TargetIp; + PCONTEXT ContextRecord; + void (*LanguageHandler)(void); + PVOID HandlerData; + PUNWIND_HISTORY_TABLE HistoryTable; + ULONG ScopeIndex; + ULONG Fill0; +} UndocumentedDispatcherContext; + +/* Another wild guess. */ +extern void __DestructExceptionObject(EXCEPTION_RECORD *rec, int nothrow); + +#ifdef MINGW_SDK_INIT +/* Workaround for broken MinGW64 declaration. */ +VOID RtlUnwindEx_FIXED(PVOID,PVOID,PVOID,PVOID,PVOID,PVOID) asm("RtlUnwindEx"); +#define RtlUnwindEx RtlUnwindEx_FIXED +#endif + +#define LJ_MSVC_EXCODE ((DWORD)0xe06d7363) +#define LJ_GCC_EXCODE ((DWORD)0x20474343) + +#define LJ_EXCODE ((DWORD)0xe24c4a00) +#define LJ_EXCODE_MAKE(c) (LJ_EXCODE | (DWORD)(c)) +#define LJ_EXCODE_CHECK(cl) (((cl) ^ LJ_EXCODE) <= 0xff) +#define LJ_EXCODE_ERRCODE(cl) ((int)((cl) & 0xff)) + +/* Win64 exception handler for interpreter frame. */ +LJ_FUNCA EXCEPTION_DISPOSITION lj_err_unwind_win64(EXCEPTION_RECORD *rec, + void *cf, CONTEXT *ctx, UndocumentedDispatcherContext *dispatch) +{ + lua_State *L = cframe_L(cf); + int errcode = LJ_EXCODE_CHECK(rec->ExceptionCode) ? + LJ_EXCODE_ERRCODE(rec->ExceptionCode) : LUA_ERRRUN; + if ((rec->ExceptionFlags & 6)) { /* EH_UNWINDING|EH_EXIT_UNWIND */ + /* Unwind internal frames. */ + err_unwind(L, cf, errcode); + } else { + void *cf2 = err_unwind(L, cf, 0); + if (cf2) { /* We catch it, so start unwinding the upper frames. */ + if (rec->ExceptionCode == LJ_MSVC_EXCODE || + rec->ExceptionCode == LJ_GCC_EXCODE) { +#if LJ_TARGET_WINDOWS + __DestructExceptionObject(rec, 1); +#endif + setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRCPP)); + } else if (!LJ_EXCODE_CHECK(rec->ExceptionCode)) { + /* Don't catch access violations etc. */ + return ExceptionContinueSearch; + } + /* Unwind the stack and call all handlers for all lower C frames + ** (including ourselves) again with EH_UNWINDING set. Then set + ** rsp = cf, rax = errcode and jump to the specified target. + */ + RtlUnwindEx(cf, (void *)((cframe_unwind_ff(cf2) && errcode != LUA_YIELD) ? + lj_vm_unwind_ff_eh : + lj_vm_unwind_c_eh), + rec, (void *)(uintptr_t)errcode, ctx, dispatch->HistoryTable); + /* RtlUnwindEx should never return. */ + } + } + return ExceptionContinueSearch; +} + +/* Raise Windows exception. */ +static void err_raise_ext(int errcode) +{ + RaiseException(LJ_EXCODE_MAKE(errcode), 1 /* EH_NONCONTINUABLE */, 0, NULL); +} + +#endif + +/* -- Error handling ------------------------------------------------------ */ + +/* Throw error. Find catch frame, unwind stack and continue. */ +LJ_NOINLINE void LJ_FASTCALL lj_err_throw(lua_State *L, int errcode) +{ + global_State *g = G(L); + lj_trace_abort(g); + setmref(g->jit_base, NULL); + L->status = 0; +#if LJ_UNWIND_EXT + err_raise_ext(errcode); + /* + ** A return from this function signals a corrupt C stack that cannot be + ** unwound. We have no choice but to call the panic function and exit. + ** + ** Usually this is caused by a C function without unwind information. + ** This should never happen on x64, but may happen if you've manually + ** enabled LUAJIT_UNWIND_EXTERNAL and forgot to recompile *every* + ** non-C++ file with -funwind-tables. + */ + if (G(L)->panic) + G(L)->panic(L); +#else + { + void *cf = err_unwind(L, NULL, errcode); + if (cframe_unwind_ff(cf)) + lj_vm_unwind_ff(cframe_raw(cf)); + else + lj_vm_unwind_c(cframe_raw(cf), errcode); + } +#endif + exit(EXIT_FAILURE); +} + +/* Return string object for error message. */ +LJ_NOINLINE GCstr *lj_err_str(lua_State *L, ErrMsg em) +{ + return lj_str_newz(L, err2msg(em)); +} + +/* Out-of-memory error. */ +LJ_NOINLINE void lj_err_mem(lua_State *L) +{ + if (L->status == LUA_ERRERR+1) /* Don't touch the stack during lua_open. */ + lj_vm_unwind_c(L->cframe, LUA_ERRMEM); + setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRMEM)); + lj_err_throw(L, LUA_ERRMEM); +} + +/* Find error function for runtime errors. Requires an extra stack traversal. */ +static ptrdiff_t finderrfunc(lua_State *L) +{ + cTValue *frame = L->base-1, *bot = tvref(L->stack)+LJ_FR2; + void *cf = L->cframe; + while (frame > bot && cf) { + while (cframe_nres(cframe_raw(cf)) < 0) { /* cframe without frame? */ + if (frame >= restorestack(L, -cframe_nres(cf))) + break; + if (cframe_errfunc(cf) >= 0) /* Error handler not inherited (-1)? */ + return cframe_errfunc(cf); + cf = cframe_prev(cf); /* Else unwind cframe and continue searching. */ + if (cf == NULL) + return 0; + } + switch (frame_typep(frame)) { + case FRAME_LUA: + case FRAME_LUAP: + frame = frame_prevl(frame); + break; + case FRAME_C: + cf = cframe_prev(cf); + /* fallthrough */ + case FRAME_VARG: + frame = frame_prevd(frame); + break; + case FRAME_CONT: + if (frame_iscont_fficb(frame)) + cf = cframe_prev(cf); + frame = frame_prevd(frame); + break; + case FRAME_CP: + if (cframe_canyield(cf)) return 0; + if (cframe_errfunc(cf) >= 0) + return cframe_errfunc(cf); + frame = frame_prevd(frame); + break; + case FRAME_PCALL: + case FRAME_PCALLH: + if (frame_func(frame_prevd(frame))->c.ffid == FF_xpcall) + return savestack(L, frame_prevd(frame)+1); /* xpcall's errorfunc. */ + return 0; + default: + lua_assert(0); + return 0; + } + } + return 0; +} + +/* Runtime error. */ +LJ_NOINLINE void lj_err_run(lua_State *L) +{ + ptrdiff_t ef = finderrfunc(L); + if (ef) { + TValue *errfunc = restorestack(L, ef); + TValue *top = L->top; + lj_trace_abort(G(L)); + if (!tvisfunc(errfunc) || L->status == LUA_ERRERR) { + setstrV(L, top-1, lj_err_str(L, LJ_ERR_ERRERR)); + lj_err_throw(L, LUA_ERRERR); + } + L->status = LUA_ERRERR; + copyTV(L, top+LJ_FR2, top-1); + copyTV(L, top-1, errfunc); + if (LJ_FR2) setnilV(top++); + L->top = top+1; + lj_vm_call(L, top, 1+1); /* Stack: |errfunc|msg| -> |msg| */ + } + lj_err_throw(L, LUA_ERRRUN); +} + +/* Formatted runtime error message. */ +LJ_NORET LJ_NOINLINE static void err_msgv(lua_State *L, ErrMsg em, ...) +{ + const char *msg; + va_list argp; + va_start(argp, em); + if (curr_funcisL(L)) L->top = curr_topL(L); + msg = lj_strfmt_pushvf(L, err2msg(em), argp); + va_end(argp); + lj_debug_addloc(L, msg, L->base-1, NULL); + lj_err_run(L); +} + +/* Non-vararg variant for better calling conventions. */ +LJ_NOINLINE void lj_err_msg(lua_State *L, ErrMsg em) +{ + err_msgv(L, em); +} + +/* Lexer error. */ +LJ_NOINLINE void lj_err_lex(lua_State *L, GCstr *src, const char *tok, + BCLine line, ErrMsg em, va_list argp) +{ + char buff[LUA_IDSIZE]; + const char *msg; + lj_debug_shortname(buff, src, line); + msg = lj_strfmt_pushvf(L, err2msg(em), argp); + msg = lj_strfmt_pushf(L, "%s:%d: %s", buff, line, msg); + if (tok) + lj_strfmt_pushf(L, err2msg(LJ_ERR_XNEAR), msg, tok); + lj_err_throw(L, LUA_ERRSYNTAX); +} + +/* Typecheck error for operands. */ +LJ_NOINLINE void lj_err_optype(lua_State *L, cTValue *o, ErrMsg opm) +{ + const char *tname = lj_typename(o); + const char *opname = err2msg(opm); + if (curr_funcisL(L)) { + GCproto *pt = curr_proto(L); + const BCIns *pc = cframe_Lpc(L) - 1; + const char *oname = NULL; + const char *kind = lj_debug_slotname(pt, pc, (BCReg)(o-L->base), &oname); + if (kind) + err_msgv(L, LJ_ERR_BADOPRT, opname, kind, oname, tname); + } + err_msgv(L, LJ_ERR_BADOPRV, opname, tname); +} + +/* Typecheck error for ordered comparisons. */ +LJ_NOINLINE void lj_err_comp(lua_State *L, cTValue *o1, cTValue *o2) +{ + const char *t1 = lj_typename(o1); + const char *t2 = lj_typename(o2); + err_msgv(L, t1 == t2 ? LJ_ERR_BADCMPV : LJ_ERR_BADCMPT, t1, t2); + /* This assumes the two "boolean" entries are commoned by the C compiler. */ +} + +/* Typecheck error for __call. */ +LJ_NOINLINE void lj_err_optype_call(lua_State *L, TValue *o) +{ + /* Gross hack if lua_[p]call or pcall/xpcall fail for a non-callable object: + ** L->base still points to the caller. So add a dummy frame with L instead + ** of a function. See lua_getstack(). + */ + const BCIns *pc = cframe_Lpc(L); + if (((ptrdiff_t)pc & FRAME_TYPE) != FRAME_LUA) { + const char *tname = lj_typename(o); + if (LJ_FR2) o++; + setframe_pc(o, pc); + setframe_gc(o, obj2gco(L), LJ_TTHREAD); + L->top = L->base = o+1; + err_msgv(L, LJ_ERR_BADCALL, tname); + } + lj_err_optype(L, o, LJ_ERR_OPCALL); +} + +/* Error in context of caller. */ +LJ_NOINLINE void lj_err_callermsg(lua_State *L, const char *msg) +{ + TValue *frame = L->base-1; + TValue *pframe = NULL; + if (frame_islua(frame)) { + pframe = frame_prevl(frame); + } else if (frame_iscont(frame)) { + if (frame_iscont_fficb(frame)) { + pframe = frame; + frame = NULL; + } else { + pframe = frame_prevd(frame); +#if LJ_HASFFI + /* Remove frame for FFI metamethods. */ + if (frame_func(frame)->c.ffid >= FF_ffi_meta___index && + frame_func(frame)->c.ffid <= FF_ffi_meta___tostring) { + L->base = pframe+1; + L->top = frame; + setcframe_pc(cframe_raw(L->cframe), frame_contpc(frame)); + } +#endif + } + } + lj_debug_addloc(L, msg, pframe, frame); + lj_err_run(L); +} + +/* Formatted error in context of caller. */ +LJ_NOINLINE void lj_err_callerv(lua_State *L, ErrMsg em, ...) +{ + const char *msg; + va_list argp; + va_start(argp, em); + msg = lj_strfmt_pushvf(L, err2msg(em), argp); + va_end(argp); + lj_err_callermsg(L, msg); +} + +/* Error in context of caller. */ +LJ_NOINLINE void lj_err_caller(lua_State *L, ErrMsg em) +{ + lj_err_callermsg(L, err2msg(em)); +} + +/* Argument error message. */ +LJ_NORET LJ_NOINLINE static void err_argmsg(lua_State *L, int narg, + const char *msg) +{ + const char *fname = "?"; + const char *ftype = lj_debug_funcname(L, L->base - 1, &fname); + if (narg < 0 && narg > LUA_REGISTRYINDEX) + narg = (int)(L->top - L->base) + narg + 1; + if (ftype && ftype[3] == 'h' && --narg == 0) /* Check for "method". */ + msg = lj_strfmt_pushf(L, err2msg(LJ_ERR_BADSELF), fname, msg); + else + msg = lj_strfmt_pushf(L, err2msg(LJ_ERR_BADARG), narg, fname, msg); + lj_err_callermsg(L, msg); +} + +/* Formatted argument error. */ +LJ_NOINLINE void lj_err_argv(lua_State *L, int narg, ErrMsg em, ...) +{ + const char *msg; + va_list argp; + va_start(argp, em); + msg = lj_strfmt_pushvf(L, err2msg(em), argp); + va_end(argp); + err_argmsg(L, narg, msg); +} + +/* Argument error. */ +LJ_NOINLINE void lj_err_arg(lua_State *L, int narg, ErrMsg em) +{ + err_argmsg(L, narg, err2msg(em)); +} + +/* Typecheck error for arguments. */ +LJ_NOINLINE void lj_err_argtype(lua_State *L, int narg, const char *xname) +{ + const char *tname, *msg; + if (narg <= LUA_REGISTRYINDEX) { + if (narg >= LUA_GLOBALSINDEX) { + tname = lj_obj_itypename[~LJ_TTAB]; + } else { + GCfunc *fn = curr_func(L); + int idx = LUA_GLOBALSINDEX - narg; + if (idx <= fn->c.nupvalues) + tname = lj_typename(&fn->c.upvalue[idx-1]); + else + tname = lj_obj_typename[0]; + } + } else { + TValue *o = narg < 0 ? L->top + narg : L->base + narg-1; + tname = o < L->top ? lj_typename(o) : lj_obj_typename[0]; + } + msg = lj_strfmt_pushf(L, err2msg(LJ_ERR_BADTYPE), xname, tname); + err_argmsg(L, narg, msg); +} + +/* Typecheck error for arguments. */ +LJ_NOINLINE void lj_err_argt(lua_State *L, int narg, int tt) +{ + lj_err_argtype(L, narg, lj_obj_typename[tt+1]); +} + +/* -- Public error handling API ------------------------------------------- */ + +LUA_API lua_CFunction lua_atpanic(lua_State *L, lua_CFunction panicf) +{ + lua_CFunction old = G(L)->panic; + G(L)->panic = panicf; + return old; +} + +/* Forwarders for the public API (C calling convention and no LJ_NORET). */ +LUA_API int lua_error(lua_State *L) +{ + lj_err_run(L); + return 0; /* unreachable */ +} + +LUALIB_API int luaL_argerror(lua_State *L, int narg, const char *msg) +{ + err_argmsg(L, narg, msg); + return 0; /* unreachable */ +} + +LUALIB_API int luaL_typerror(lua_State *L, int narg, const char *xname) +{ + lj_err_argtype(L, narg, xname); + return 0; /* unreachable */ +} + +LUALIB_API void luaL_where(lua_State *L, int level) +{ + int size; + cTValue *frame = lj_debug_frame(L, level, &size); + lj_debug_addloc(L, "", frame, size ? frame+size : NULL); +} + +LUALIB_API int luaL_error(lua_State *L, const char *fmt, ...) +{ + const char *msg; + va_list argp; + va_start(argp, fmt); + msg = lj_strfmt_pushvf(L, fmt, argp); + va_end(argp); + lj_err_callermsg(L, msg); + return 0; /* unreachable */ +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_err.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_err.h new file mode 100644 index 00000000000..03a56f0362c --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_err.h @@ -0,0 +1,41 @@ +/* +** Error handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_ERR_H +#define _LJ_ERR_H + +#include + +#include "lj_obj.h" + +typedef enum { +#define ERRDEF(name, msg) \ + LJ_ERR_##name, LJ_ERR_##name##_ = LJ_ERR_##name + sizeof(msg)-1, +#include "lj_errmsg.h" + LJ_ERR__MAX +} ErrMsg; + +LJ_DATA const char *lj_err_allmsg; +#define err2msg(em) (lj_err_allmsg+(int)(em)) + +LJ_FUNC GCstr *lj_err_str(lua_State *L, ErrMsg em); +LJ_FUNCA_NORET void LJ_FASTCALL lj_err_throw(lua_State *L, int errcode); +LJ_FUNC_NORET void lj_err_mem(lua_State *L); +LJ_FUNC_NORET void lj_err_run(lua_State *L); +LJ_FUNC_NORET void lj_err_msg(lua_State *L, ErrMsg em); +LJ_FUNC_NORET void lj_err_lex(lua_State *L, GCstr *src, const char *tok, + BCLine line, ErrMsg em, va_list argp); +LJ_FUNC_NORET void lj_err_optype(lua_State *L, cTValue *o, ErrMsg opm); +LJ_FUNC_NORET void lj_err_comp(lua_State *L, cTValue *o1, cTValue *o2); +LJ_FUNC_NORET void lj_err_optype_call(lua_State *L, TValue *o); +LJ_FUNC_NORET void lj_err_callermsg(lua_State *L, const char *msg); +LJ_FUNC_NORET void lj_err_callerv(lua_State *L, ErrMsg em, ...); +LJ_FUNC_NORET void lj_err_caller(lua_State *L, ErrMsg em); +LJ_FUNC_NORET void lj_err_arg(lua_State *L, int narg, ErrMsg em); +LJ_FUNC_NORET void lj_err_argv(lua_State *L, int narg, ErrMsg em, ...); +LJ_FUNC_NORET void lj_err_argtype(lua_State *L, int narg, const char *xname); +LJ_FUNC_NORET void lj_err_argt(lua_State *L, int narg, int tt); + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_errmsg.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_errmsg.h new file mode 100644 index 00000000000..7717665b038 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_errmsg.h @@ -0,0 +1,190 @@ +/* +** VM error messages. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +/* This file may be included multiple times with different ERRDEF macros. */ + +/* Basic error handling. */ +ERRDEF(ERRMEM, "not enough memory") +ERRDEF(ERRERR, "error in error handling") +ERRDEF(ERRCPP, "C++ exception") + +/* Allocations. */ +ERRDEF(STROV, "string length overflow") +ERRDEF(UDATAOV, "userdata length overflow") +ERRDEF(STKOV, "stack overflow") +ERRDEF(STKOVM, "stack overflow (%s)") +ERRDEF(TABOV, "table overflow") + +/* Table indexing. */ +ERRDEF(NANIDX, "table index is NaN") +ERRDEF(NILIDX, "table index is nil") +ERRDEF(NEXTIDX, "invalid key to " LUA_QL("next")) + +/* Metamethod resolving. */ +ERRDEF(BADCALL, "attempt to call a %s value") +ERRDEF(BADOPRT, "attempt to %s %s " LUA_QS " (a %s value)") +ERRDEF(BADOPRV, "attempt to %s a %s value") +ERRDEF(BADCMPT, "attempt to compare %s with %s") +ERRDEF(BADCMPV, "attempt to compare two %s values") +ERRDEF(GETLOOP, "loop in gettable") +ERRDEF(SETLOOP, "loop in settable") +ERRDEF(OPCALL, "call") +ERRDEF(OPINDEX, "index") +ERRDEF(OPARITH, "perform arithmetic on") +ERRDEF(OPCAT, "concatenate") +ERRDEF(OPLEN, "get length of") + +/* Type checks. */ +ERRDEF(BADSELF, "calling " LUA_QS " on bad self (%s)") +ERRDEF(BADARG, "bad argument #%d to " LUA_QS " (%s)") +ERRDEF(BADTYPE, "%s expected, got %s") +ERRDEF(BADVAL, "invalid value") +ERRDEF(NOVAL, "value expected") +ERRDEF(NOCORO, "coroutine expected") +ERRDEF(NOTABN, "nil or table expected") +ERRDEF(NOLFUNC, "Lua function expected") +ERRDEF(NOFUNCL, "function or level expected") +ERRDEF(NOSFT, "string/function/table expected") +ERRDEF(NOPROXY, "boolean or proxy expected") +ERRDEF(FORINIT, LUA_QL("for") " initial value must be a number") +ERRDEF(FORLIM, LUA_QL("for") " limit must be a number") +ERRDEF(FORSTEP, LUA_QL("for") " step must be a number") + +/* C API checks. */ +ERRDEF(NOENV, "no calling environment") +ERRDEF(CYIELD, "attempt to yield across C-call boundary") +ERRDEF(BADLU, "bad light userdata pointer") +ERRDEF(NOGCMM, "bad action while in __gc metamethod") +#if LJ_TARGET_WINDOWS +ERRDEF(BADFPU, "bad FPU precision (use D3DCREATE_FPU_PRESERVE with DirectX)") +#endif + +/* Standard library function errors. */ +ERRDEF(ASSERT, "assertion failed!") +ERRDEF(PROTMT, "cannot change a protected metatable") +ERRDEF(UNPACK, "too many results to unpack") +ERRDEF(RDRSTR, "reader function must return a string") +ERRDEF(PRTOSTR, LUA_QL("tostring") " must return a string to " LUA_QL("print")) +ERRDEF(IDXRNG, "index out of range") +ERRDEF(BASERNG, "base out of range") +ERRDEF(LVLRNG, "level out of range") +ERRDEF(INVLVL, "invalid level") +ERRDEF(INVOPT, "invalid option") +ERRDEF(INVOPTM, "invalid option " LUA_QS) +ERRDEF(INVFMT, "invalid format") +ERRDEF(SETFENV, LUA_QL("setfenv") " cannot change environment of given object") +ERRDEF(CORUN, "cannot resume running coroutine") +ERRDEF(CODEAD, "cannot resume dead coroutine") +ERRDEF(COSUSP, "cannot resume non-suspended coroutine") +ERRDEF(TABINS, "wrong number of arguments to " LUA_QL("insert")) +ERRDEF(TABCAT, "invalid value (%s) at index %d in table for " LUA_QL("concat")) +ERRDEF(TABSORT, "invalid order function for sorting") +ERRDEF(IOCLFL, "attempt to use a closed file") +ERRDEF(IOSTDCL, "standard file is closed") +ERRDEF(OSUNIQF, "unable to generate a unique filename") +ERRDEF(OSDATEF, "field " LUA_QS " missing in date table") +ERRDEF(STRDUMP, "unable to dump given function") +ERRDEF(STRSLC, "string slice too long") +ERRDEF(STRPATB, "missing " LUA_QL("[") " after " LUA_QL("%f") " in pattern") +ERRDEF(STRPATC, "invalid pattern capture") +ERRDEF(STRPATE, "malformed pattern (ends with " LUA_QL("%") ")") +ERRDEF(STRPATM, "malformed pattern (missing " LUA_QL("]") ")") +ERRDEF(STRPATU, "unbalanced pattern") +ERRDEF(STRPATX, "pattern too complex") +ERRDEF(STRCAPI, "invalid capture index") +ERRDEF(STRCAPN, "too many captures") +ERRDEF(STRCAPU, "unfinished capture") +ERRDEF(STRFMT, "invalid option " LUA_QS " to " LUA_QL("format")) +ERRDEF(STRGSRV, "invalid replacement value (a %s)") +ERRDEF(BADMODN, "name conflict for module " LUA_QS) +#if LJ_HASJIT +ERRDEF(JITPROT, "runtime code generation failed, restricted kernel?") +#if LJ_TARGET_X86ORX64 +ERRDEF(NOJIT, "JIT compiler disabled, CPU does not support SSE2") +#else +ERRDEF(NOJIT, "JIT compiler disabled") +#endif +#elif defined(LJ_ARCH_NOJIT) +ERRDEF(NOJIT, "no JIT compiler for this architecture (yet)") +#else +ERRDEF(NOJIT, "JIT compiler permanently disabled by build option") +#endif +ERRDEF(JITOPT, "unknown or malformed optimization flag " LUA_QS) + +/* Lexer/parser errors. */ +ERRDEF(XMODE, "attempt to load chunk with wrong mode") +ERRDEF(XNEAR, "%s near " LUA_QS) +ERRDEF(XLINES, "chunk has too many lines") +ERRDEF(XLEVELS, "chunk has too many syntax levels") +ERRDEF(XNUMBER, "malformed number") +ERRDEF(XLSTR, "unfinished long string") +ERRDEF(XLCOM, "unfinished long comment") +ERRDEF(XSTR, "unfinished string") +ERRDEF(XESC, "invalid escape sequence") +ERRDEF(XLDELIM, "invalid long string delimiter") +ERRDEF(XTOKEN, LUA_QS " expected") +ERRDEF(XJUMP, "control structure too long") +ERRDEF(XSLOTS, "function or expression too complex") +ERRDEF(XLIMC, "chunk has more than %d local variables") +ERRDEF(XLIMM, "main function has more than %d %s") +ERRDEF(XLIMF, "function at line %d has more than %d %s") +ERRDEF(XMATCH, LUA_QS " expected (to close " LUA_QS " at line %d)") +ERRDEF(XFIXUP, "function too long for return fixup") +ERRDEF(XPARAM, " or " LUA_QL("...") " expected") +#if !LJ_52 +ERRDEF(XAMBIG, "ambiguous syntax (function call x new statement)") +#endif +ERRDEF(XFUNARG, "function arguments expected") +ERRDEF(XSYMBOL, "unexpected symbol") +ERRDEF(XDOTS, "cannot use " LUA_QL("...") " outside a vararg function") +ERRDEF(XSYNTAX, "syntax error") +ERRDEF(XFOR, LUA_QL("=") " or " LUA_QL("in") " expected") +ERRDEF(XBREAK, "no loop to break") +ERRDEF(XLUNDEF, "undefined label " LUA_QS) +ERRDEF(XLDUP, "duplicate label " LUA_QS) +ERRDEF(XGSCOPE, " jumps into the scope of local " LUA_QS) + +/* Bytecode reader errors. */ +ERRDEF(BCFMT, "cannot load incompatible bytecode") +ERRDEF(BCBAD, "cannot load malformed bytecode") + +#if LJ_HASFFI +/* FFI errors. */ +ERRDEF(FFI_INVTYPE, "invalid C type") +ERRDEF(FFI_INVSIZE, "size of C type is unknown or too large") +ERRDEF(FFI_BADSCL, "bad storage class") +ERRDEF(FFI_DECLSPEC, "declaration specifier expected") +ERRDEF(FFI_BADTAG, "undeclared or implicit tag " LUA_QS) +ERRDEF(FFI_REDEF, "attempt to redefine " LUA_QS) +ERRDEF(FFI_NUMPARAM, "wrong number of type parameters") +ERRDEF(FFI_INITOV, "too many initializers for " LUA_QS) +ERRDEF(FFI_BADCONV, "cannot convert " LUA_QS " to " LUA_QS) +ERRDEF(FFI_BADLEN, "attempt to get length of " LUA_QS) +ERRDEF(FFI_BADCONCAT, "attempt to concatenate " LUA_QS " and " LUA_QS) +ERRDEF(FFI_BADARITH, "attempt to perform arithmetic on " LUA_QS " and " LUA_QS) +ERRDEF(FFI_BADCOMP, "attempt to compare " LUA_QS " with " LUA_QS) +ERRDEF(FFI_BADCALL, LUA_QS " is not callable") +ERRDEF(FFI_NUMARG, "wrong number of arguments for function call") +ERRDEF(FFI_BADMEMBER, LUA_QS " has no member named " LUA_QS) +ERRDEF(FFI_BADIDX, LUA_QS " cannot be indexed") +ERRDEF(FFI_BADIDXW, LUA_QS " cannot be indexed with " LUA_QS) +ERRDEF(FFI_BADMM, LUA_QS " has no " LUA_QS " metamethod") +ERRDEF(FFI_WRCONST, "attempt to write to constant location") +ERRDEF(FFI_NODECL, "missing declaration for symbol " LUA_QS) +ERRDEF(FFI_BADCBACK, "bad callback") +#if LJ_OS_NOJIT +ERRDEF(FFI_CBACKOV, "no support for callbacks on this OS") +#else +ERRDEF(FFI_CBACKOV, "too many callbacks") +#endif +ERRDEF(FFI_NYIPACKBIT, "NYI: packed bit fields") +ERRDEF(FFI_NYICALL, "NYI: cannot call this C function (yet)") +#endif + +#undef ERRDEF + +/* Detecting unused error messages: + awk -F, '/^ERRDEF/ { gsub(/ERRDEF./, ""); printf "grep -q LJ_ERR_%s *.[ch] || echo %s\n", $1, $1}' lj_errmsg.h | sh +*/ diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_ff.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_ff.h new file mode 100644 index 00000000000..73dad9695a6 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_ff.h @@ -0,0 +1,18 @@ +/* +** Fast function IDs. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_FF_H +#define _LJ_FF_H + +/* Fast function ID. */ +typedef enum { + FF_LUA_ = FF_LUA, /* Lua function (must be 0). */ + FF_C_ = FF_C, /* Regular C function (must be 1). */ +#define FFDEF(name) FF_##name, +#include "lj_ffdef.h" + FF__MAX +} FastFunc; + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_ffrecord.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_ffrecord.c new file mode 100644 index 00000000000..a08113ca386 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_ffrecord.c @@ -0,0 +1,1247 @@ +/* +** Fast function call recorder. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_ffrecord_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_err.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_frame.h" +#include "lj_bc.h" +#include "lj_ff.h" +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_ircall.h" +#include "lj_iropt.h" +#include "lj_trace.h" +#include "lj_record.h" +#include "lj_ffrecord.h" +#include "lj_crecord.h" +#include "lj_dispatch.h" +#include "lj_vm.h" +#include "lj_strscan.h" +#include "lj_strfmt.h" + +/* Some local macros to save typing. Undef'd at the end. */ +#define IR(ref) (&J->cur.ir[(ref)]) + +/* Pass IR on to next optimization in chain (FOLD). */ +#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) + +/* -- Fast function recording handlers ------------------------------------ */ + +/* Conventions for fast function call handlers: +** +** The argument slots start at J->base[0]. All of them are guaranteed to be +** valid and type-specialized references. J->base[J->maxslot] is set to 0 +** as a sentinel. The runtime argument values start at rd->argv[0]. +** +** In general fast functions should check for presence of all of their +** arguments and for the correct argument types. Some simplifications +** are allowed if the interpreter throws instead. But even if recording +** is aborted, the generated IR must be consistent (no zero-refs). +** +** The number of results in rd->nres is set to 1. Handlers that return +** a different number of results need to override it. A negative value +** prevents return processing (e.g. for pending calls). +** +** Results need to be stored starting at J->base[0]. Return processing +** moves them to the right slots later. +** +** The per-ffid auxiliary data is the value of the 2nd part of the +** LJLIB_REC() annotation. This allows handling similar functionality +** in a common handler. +*/ + +/* Type of handler to record a fast function. */ +typedef void (LJ_FASTCALL *RecordFunc)(jit_State *J, RecordFFData *rd); + +/* Get runtime value of int argument. */ +static int32_t argv2int(jit_State *J, TValue *o) +{ + if (!lj_strscan_numberobj(o)) + lj_trace_err(J, LJ_TRERR_BADTYPE); + return tvisint(o) ? intV(o) : lj_num2int(numV(o)); +} + +/* Get runtime value of string argument. */ +static GCstr *argv2str(jit_State *J, TValue *o) +{ + if (LJ_LIKELY(tvisstr(o))) { + return strV(o); + } else { + GCstr *s; + if (!tvisnumber(o)) + lj_trace_err(J, LJ_TRERR_BADTYPE); + s = lj_strfmt_number(J->L, o); + setstrV(J->L, o, s); + return s; + } +} + +/* Return number of results wanted by caller. */ +static ptrdiff_t results_wanted(jit_State *J) +{ + TValue *frame = J->L->base-1; + if (frame_islua(frame)) + return (ptrdiff_t)bc_b(frame_pc(frame)[-1]) - 1; + else + return -1; +} + +#ifdef LUAJIT_TRACE_STITCHING +/* This feature is disabled for now due to a design mistake. Sorry. +** +** It causes unpredictable behavior and crashes when a full trace flush +** happens with a stitching continuation still in the stack somewhere. +*/ + +/* Trace stitching: add continuation below frame to start a new trace. */ +static void recff_stitch(jit_State *J) +{ + ASMFunction cont = lj_cont_stitch; + TraceNo traceno = J->cur.traceno; + lua_State *L = J->L; + TValue *base = L->base; + const BCIns *pc = frame_pc(base-1); + TValue *pframe = frame_prevl(base-1); + TRef trcont; + + lua_assert(!LJ_FR2); /* TODO_FR2: handle frame shift. */ + /* Move func + args up in Lua stack and insert continuation. */ + memmove(&base[1], &base[-1], sizeof(TValue)*(J->maxslot+1)); + setframe_ftsz(base+1, ((char *)(base+1) - (char *)pframe) + FRAME_CONT); + setcont(base, cont); + setframe_pc(base, pc); + if (LJ_DUALNUM) setintV(base-1, traceno); else base[-1].u64 = traceno; + L->base += 2; + L->top += 2; + + /* Ditto for the IR. */ + memmove(&J->base[1], &J->base[-1], sizeof(TRef)*(J->maxslot+1)); +#if LJ_64 + trcont = lj_ir_kptr(J, (void *)((int64_t)cont-(int64_t)lj_vm_asm_begin)); +#else + trcont = lj_ir_kptr(J, (void *)cont); +#endif + J->base[0] = trcont | TREF_CONT; + J->base[-1] = LJ_DUALNUM ? lj_ir_kint(J,traceno) : lj_ir_knum_u64(J,traceno); + J->base += 2; + J->baseslot += 2; + J->framedepth++; + + lj_record_stop(J, LJ_TRLINK_STITCH, 0); + + /* Undo Lua stack changes. */ + memmove(&base[-1], &base[1], sizeof(TValue)*(J->maxslot+1)); + setframe_pc(base-1, pc); + L->base -= 2; + L->top -= 2; +} + +/* Fallback handler for fast functions that are not recorded (yet). */ +static void LJ_FASTCALL recff_nyi(jit_State *J, RecordFFData *rd) +{ + if (J->cur.nins < (IRRef)J->param[JIT_P_minstitch] + REF_BASE) { + lj_trace_err_info(J, LJ_TRERR_TRACEUV); + } else { + /* Can only stitch from Lua call. */ + if (J->framedepth && frame_islua(J->L->base-1)) { + BCOp op = bc_op(*frame_pc(J->L->base-1)); + /* Stitched trace cannot start with *M op with variable # of args. */ + if (!(op == BC_CALLM || op == BC_CALLMT || + op == BC_RETM || op == BC_TSETM)) { + switch (J->fn->c.ffid) { + case FF_error: + case FF_debug_sethook: + case FF_jit_flush: + break; /* Don't stitch across special builtins. */ + default: + recff_stitch(J); /* Use trace stitching. */ + rd->nres = -1; + return; + } + } + } + /* Otherwise stop trace and return to interpreter. */ + lj_record_stop(J, LJ_TRLINK_RETURN, 0); + rd->nres = -1; + } +} + +/* Fallback handler for unsupported variants of fast functions. */ +#define recff_nyiu recff_nyi + +/* Must stop the trace for classic C functions with arbitrary side-effects. */ +#define recff_c recff_nyi +#else +/* Fallback handler for fast functions that are not recorded (yet). */ +static void LJ_FASTCALL recff_nyi(jit_State *J, RecordFFData *rd) +{ + setfuncV(J->L, &J->errinfo, J->fn); + lj_trace_err_info(J, LJ_TRERR_NYIFF); + UNUSED(rd); +} + +/* Throw error for unsupported variant of fast function. */ +LJ_NORET static void recff_nyiu(jit_State *J, RecordFFData *rd) +{ + setfuncV(J->L, &J->errinfo, J->fn); + lj_trace_err_info(J, LJ_TRERR_NYIFFU); + UNUSED(rd); +} + +/* Must abort the trace for classic C functions with arbitrary side-effects. */ +static void LJ_FASTCALL recff_c(jit_State *J, RecordFFData *rd) +{ + setfuncV(J->L, &J->errinfo, J->fn); + lj_trace_err_info(J, LJ_TRERR_NYICF); + UNUSED(rd); +} +#endif + +/* Emit BUFHDR for the global temporary buffer. */ +static TRef recff_bufhdr(jit_State *J) +{ + return emitir(IRT(IR_BUFHDR, IRT_P32), + lj_ir_kptr(J, &J2G(J)->tmpbuf), IRBUFHDR_RESET); +} + +/* -- Base library fast functions ----------------------------------------- */ + +static void LJ_FASTCALL recff_assert(jit_State *J, RecordFFData *rd) +{ + /* Arguments already specialized. The interpreter throws for nil/false. */ + rd->nres = J->maxslot; /* Pass through all arguments. */ +} + +static void LJ_FASTCALL recff_type(jit_State *J, RecordFFData *rd) +{ + /* Arguments already specialized. Result is a constant string. Neat, huh? */ + uint32_t t; + if (tvisnumber(&rd->argv[0])) + t = ~LJ_TNUMX; + else if (LJ_64 && !LJ_GC64 && tvislightud(&rd->argv[0])) + t = ~LJ_TLIGHTUD; + else + t = ~itype(&rd->argv[0]); + J->base[0] = lj_ir_kstr(J, strV(&J->fn->c.upvalue[t])); + UNUSED(rd); +} + +static void LJ_FASTCALL recff_getmetatable(jit_State *J, RecordFFData *rd) +{ + TRef tr = J->base[0]; + if (tr) { + RecordIndex ix; + ix.tab = tr; + copyTV(J->L, &ix.tabv, &rd->argv[0]); + if (lj_record_mm_lookup(J, &ix, MM_metatable)) + J->base[0] = ix.mobj; + else + J->base[0] = ix.mt; + } /* else: Interpreter will throw. */ +} + +static void LJ_FASTCALL recff_setmetatable(jit_State *J, RecordFFData *rd) +{ + TRef tr = J->base[0]; + TRef mt = J->base[1]; + if (tref_istab(tr) && (tref_istab(mt) || (mt && tref_isnil(mt)))) { + TRef fref, mtref; + RecordIndex ix; + ix.tab = tr; + copyTV(J->L, &ix.tabv, &rd->argv[0]); + lj_record_mm_lookup(J, &ix, MM_metatable); /* Guard for no __metatable. */ + fref = emitir(IRT(IR_FREF, IRT_P32), tr, IRFL_TAB_META); + mtref = tref_isnil(mt) ? lj_ir_knull(J, IRT_TAB) : mt; + emitir(IRT(IR_FSTORE, IRT_TAB), fref, mtref); + if (!tref_isnil(mt)) + emitir(IRT(IR_TBAR, IRT_TAB), tr, 0); + J->base[0] = tr; + J->needsnap = 1; + } /* else: Interpreter will throw. */ +} + +static void LJ_FASTCALL recff_rawget(jit_State *J, RecordFFData *rd) +{ + RecordIndex ix; + ix.tab = J->base[0]; ix.key = J->base[1]; + if (tref_istab(ix.tab) && ix.key) { + ix.val = 0; ix.idxchain = 0; + settabV(J->L, &ix.tabv, tabV(&rd->argv[0])); + copyTV(J->L, &ix.keyv, &rd->argv[1]); + J->base[0] = lj_record_idx(J, &ix); + } /* else: Interpreter will throw. */ +} + +static void LJ_FASTCALL recff_rawset(jit_State *J, RecordFFData *rd) +{ + RecordIndex ix; + ix.tab = J->base[0]; ix.key = J->base[1]; ix.val = J->base[2]; + if (tref_istab(ix.tab) && ix.key && ix.val) { + ix.idxchain = 0; + settabV(J->L, &ix.tabv, tabV(&rd->argv[0])); + copyTV(J->L, &ix.keyv, &rd->argv[1]); + copyTV(J->L, &ix.valv, &rd->argv[2]); + lj_record_idx(J, &ix); + /* Pass through table at J->base[0] as result. */ + } /* else: Interpreter will throw. */ +} + +static void LJ_FASTCALL recff_rawequal(jit_State *J, RecordFFData *rd) +{ + TRef tra = J->base[0]; + TRef trb = J->base[1]; + if (tra && trb) { + int diff = lj_record_objcmp(J, tra, trb, &rd->argv[0], &rd->argv[1]); + J->base[0] = diff ? TREF_FALSE : TREF_TRUE; + } /* else: Interpreter will throw. */ +} + +#if LJ_52 +static void LJ_FASTCALL recff_rawlen(jit_State *J, RecordFFData *rd) +{ + TRef tr = J->base[0]; + if (tref_isstr(tr)) + J->base[0] = emitir(IRTI(IR_FLOAD), tr, IRFL_STR_LEN); + else if (tref_istab(tr)) + J->base[0] = lj_ir_call(J, IRCALL_lj_tab_len, tr); + /* else: Interpreter will throw. */ + UNUSED(rd); +} +#endif + +/* Determine mode of select() call. */ +int32_t lj_ffrecord_select_mode(jit_State *J, TRef tr, TValue *tv) +{ + if (tref_isstr(tr) && *strVdata(tv) == '#') { /* select('#', ...) */ + if (strV(tv)->len == 1) { + emitir(IRTG(IR_EQ, IRT_STR), tr, lj_ir_kstr(J, strV(tv))); + } else { + TRef trptr = emitir(IRT(IR_STRREF, IRT_P32), tr, lj_ir_kint(J, 0)); + TRef trchar = emitir(IRT(IR_XLOAD, IRT_U8), trptr, IRXLOAD_READONLY); + emitir(IRTG(IR_EQ, IRT_INT), trchar, lj_ir_kint(J, '#')); + } + return 0; + } else { /* select(n, ...) */ + int32_t start = argv2int(J, tv); + if (start == 0) lj_trace_err(J, LJ_TRERR_BADTYPE); /* A bit misleading. */ + return start; + } +} + +static void LJ_FASTCALL recff_select(jit_State *J, RecordFFData *rd) +{ + TRef tr = J->base[0]; + if (tr) { + ptrdiff_t start = lj_ffrecord_select_mode(J, tr, &rd->argv[0]); + if (start == 0) { /* select('#', ...) */ + J->base[0] = lj_ir_kint(J, J->maxslot - 1); + } else if (tref_isk(tr)) { /* select(k, ...) */ + ptrdiff_t n = (ptrdiff_t)J->maxslot; + if (start < 0) start += n; + else if (start > n) start = n; + rd->nres = n - start; + if (start >= 1) { + ptrdiff_t i; + for (i = 0; i < n - start; i++) + J->base[i] = J->base[start+i]; + } /* else: Interpreter will throw. */ + } else { + recff_nyiu(J, rd); + return; + } + } /* else: Interpreter will throw. */ +} + +static void LJ_FASTCALL recff_tonumber(jit_State *J, RecordFFData *rd) +{ + TRef tr = J->base[0]; + TRef base = J->base[1]; + if (tr && !tref_isnil(base)) { + base = lj_opt_narrow_toint(J, base); + if (!tref_isk(base) || IR(tref_ref(base))->i != 10) { + recff_nyiu(J, rd); + return; + } + } + if (tref_isnumber_str(tr)) { + if (tref_isstr(tr)) { + TValue tmp; + if (!lj_strscan_num(strV(&rd->argv[0]), &tmp)) { + recff_nyiu(J, rd); /* Would need an inverted STRTO for this case. */ + return; + } + tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0); + } +#if LJ_HASFFI + } else if (tref_iscdata(tr)) { + lj_crecord_tonumber(J, rd); + return; +#endif + } else { + tr = TREF_NIL; + } + J->base[0] = tr; + UNUSED(rd); +} + +static TValue *recff_metacall_cp(lua_State *L, lua_CFunction dummy, void *ud) +{ + jit_State *J = (jit_State *)ud; + lj_record_tailcall(J, 0, 1); + UNUSED(L); UNUSED(dummy); + return NULL; +} + +static int recff_metacall(jit_State *J, RecordFFData *rd, MMS mm) +{ + RecordIndex ix; + ix.tab = J->base[0]; + copyTV(J->L, &ix.tabv, &rd->argv[0]); + if (lj_record_mm_lookup(J, &ix, mm)) { /* Has metamethod? */ + int errcode; + TValue argv0; + /* Temporarily insert metamethod below object. */ + J->base[1] = J->base[0]; + J->base[0] = ix.mobj; + copyTV(J->L, &argv0, &rd->argv[0]); + copyTV(J->L, &rd->argv[1], &rd->argv[0]); + copyTV(J->L, &rd->argv[0], &ix.mobjv); + /* Need to protect lj_record_tailcall because it may throw. */ + errcode = lj_vm_cpcall(J->L, NULL, J, recff_metacall_cp); + /* Always undo Lua stack changes to avoid confusing the interpreter. */ + copyTV(J->L, &rd->argv[0], &argv0); + if (errcode) + lj_err_throw(J->L, errcode); /* Propagate errors. */ + rd->nres = -1; /* Pending call. */ + return 1; /* Tailcalled to metamethod. */ + } + return 0; +} + +static void LJ_FASTCALL recff_tostring(jit_State *J, RecordFFData *rd) +{ + TRef tr = J->base[0]; + if (tref_isstr(tr)) { + /* Ignore __tostring in the string base metatable. */ + /* Pass on result in J->base[0]. */ + } else if (tr && !recff_metacall(J, rd, MM_tostring)) { + if (tref_isnumber(tr)) { + J->base[0] = emitir(IRT(IR_TOSTR, IRT_STR), tr, + tref_isnum(tr) ? IRTOSTR_NUM : IRTOSTR_INT); + } else if (tref_ispri(tr)) { + J->base[0] = lj_ir_kstr(J, lj_strfmt_obj(J->L, &rd->argv[0])); + } else { + recff_nyiu(J, rd); + return; + } + } +} + +static void LJ_FASTCALL recff_ipairs_aux(jit_State *J, RecordFFData *rd) +{ + RecordIndex ix; + ix.tab = J->base[0]; + if (tref_istab(ix.tab)) { + if (!tvisnumber(&rd->argv[1])) /* No support for string coercion. */ + lj_trace_err(J, LJ_TRERR_BADTYPE); + setintV(&ix.keyv, numberVint(&rd->argv[1])+1); + settabV(J->L, &ix.tabv, tabV(&rd->argv[0])); + ix.val = 0; ix.idxchain = 0; + ix.key = lj_opt_narrow_toint(J, J->base[1]); + J->base[0] = ix.key = emitir(IRTI(IR_ADD), ix.key, lj_ir_kint(J, 1)); + J->base[1] = lj_record_idx(J, &ix); + rd->nres = tref_isnil(J->base[1]) ? 0 : 2; + } /* else: Interpreter will throw. */ +} + +static void LJ_FASTCALL recff_xpairs(jit_State *J, RecordFFData *rd) +{ + if (!(LJ_52 && recff_metacall(J, rd, MM_ipairs))) { + TRef tab = J->base[0]; + if (tref_istab(tab)) { + J->base[0] = lj_ir_kfunc(J, funcV(&J->fn->c.upvalue[0])); + J->base[1] = tab; + J->base[2] = rd->data ? lj_ir_kint(J, 0) : TREF_NIL; + rd->nres = 3; + } /* else: Interpreter will throw. */ + } +} + +static void LJ_FASTCALL recff_pcall(jit_State *J, RecordFFData *rd) +{ + if (J->maxslot >= 1) { + lj_record_call(J, 0, J->maxslot - 1); + rd->nres = -1; /* Pending call. */ + } /* else: Interpreter will throw. */ +} + +static TValue *recff_xpcall_cp(lua_State *L, lua_CFunction dummy, void *ud) +{ + jit_State *J = (jit_State *)ud; + lj_record_call(J, 1, J->maxslot - 2); + UNUSED(L); UNUSED(dummy); + return NULL; +} + +static void LJ_FASTCALL recff_xpcall(jit_State *J, RecordFFData *rd) +{ + if (J->maxslot >= 2) { + TValue argv0, argv1; + TRef tmp; + int errcode; + lua_assert(!LJ_FR2); /* TODO_FR2: handle different frame setup. */ + /* Swap function and traceback. */ + tmp = J->base[0]; J->base[0] = J->base[1]; J->base[1] = tmp; + copyTV(J->L, &argv0, &rd->argv[0]); + copyTV(J->L, &argv1, &rd->argv[1]); + copyTV(J->L, &rd->argv[0], &argv1); + copyTV(J->L, &rd->argv[1], &argv0); + /* Need to protect lj_record_call because it may throw. */ + errcode = lj_vm_cpcall(J->L, NULL, J, recff_xpcall_cp); + /* Always undo Lua stack swap to avoid confusing the interpreter. */ + copyTV(J->L, &rd->argv[0], &argv0); + copyTV(J->L, &rd->argv[1], &argv1); + if (errcode) + lj_err_throw(J->L, errcode); /* Propagate errors. */ + rd->nres = -1; /* Pending call. */ + } /* else: Interpreter will throw. */ +} + +static void LJ_FASTCALL recff_getfenv(jit_State *J, RecordFFData *rd) +{ + TRef tr = J->base[0]; + /* Only support getfenv(0) for now. */ + if (tref_isint(tr) && tref_isk(tr) && IR(tref_ref(tr))->i == 0) { + TRef trl = emitir(IRT(IR_LREF, IRT_THREAD), 0, 0); + J->base[0] = emitir(IRT(IR_FLOAD, IRT_TAB), trl, IRFL_THREAD_ENV); + return; + } + recff_nyiu(J, rd); +} + +/* -- Math library fast functions ----------------------------------------- */ + +static void LJ_FASTCALL recff_math_abs(jit_State *J, RecordFFData *rd) +{ + TRef tr = lj_ir_tonum(J, J->base[0]); + J->base[0] = emitir(IRTN(IR_ABS), tr, lj_ir_knum_abs(J)); + UNUSED(rd); +} + +/* Record rounding functions math.floor and math.ceil. */ +static void LJ_FASTCALL recff_math_round(jit_State *J, RecordFFData *rd) +{ + TRef tr = J->base[0]; + if (!tref_isinteger(tr)) { /* Pass through integers unmodified. */ + tr = emitir(IRTN(IR_FPMATH), lj_ir_tonum(J, tr), rd->data); + /* Result is integral (or NaN/Inf), but may not fit an int32_t. */ + if (LJ_DUALNUM) { /* Try to narrow using a guarded conversion to int. */ + lua_Number n = lj_vm_foldfpm(numberVnum(&rd->argv[0]), rd->data); + if (n == (lua_Number)lj_num2int(n)) + tr = emitir(IRTGI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_CHECK); + } + J->base[0] = tr; + } +} + +/* Record unary math.* functions, mapped to IR_FPMATH opcode. */ +static void LJ_FASTCALL recff_math_unary(jit_State *J, RecordFFData *rd) +{ + J->base[0] = emitir(IRTN(IR_FPMATH), lj_ir_tonum(J, J->base[0]), rd->data); +} + +/* Record math.log. */ +static void LJ_FASTCALL recff_math_log(jit_State *J, RecordFFData *rd) +{ + TRef tr = lj_ir_tonum(J, J->base[0]); + if (J->base[1]) { +#ifdef LUAJIT_NO_LOG2 + uint32_t fpm = IRFPM_LOG; +#else + uint32_t fpm = IRFPM_LOG2; +#endif + TRef trb = lj_ir_tonum(J, J->base[1]); + tr = emitir(IRTN(IR_FPMATH), tr, fpm); + trb = emitir(IRTN(IR_FPMATH), trb, fpm); + trb = emitir(IRTN(IR_DIV), lj_ir_knum_one(J), trb); + tr = emitir(IRTN(IR_MUL), tr, trb); + } else { + tr = emitir(IRTN(IR_FPMATH), tr, IRFPM_LOG); + } + J->base[0] = tr; + UNUSED(rd); +} + +/* Record math.atan2. */ +static void LJ_FASTCALL recff_math_atan2(jit_State *J, RecordFFData *rd) +{ + TRef tr = lj_ir_tonum(J, J->base[0]); + TRef tr2 = lj_ir_tonum(J, J->base[1]); + J->base[0] = emitir(IRTN(IR_ATAN2), tr, tr2); + UNUSED(rd); +} + +/* Record math.ldexp. */ +static void LJ_FASTCALL recff_math_ldexp(jit_State *J, RecordFFData *rd) +{ + TRef tr = lj_ir_tonum(J, J->base[0]); +#if LJ_TARGET_X86ORX64 + TRef tr2 = lj_ir_tonum(J, J->base[1]); +#else + TRef tr2 = lj_opt_narrow_toint(J, J->base[1]); +#endif + J->base[0] = emitir(IRTN(IR_LDEXP), tr, tr2); + UNUSED(rd); +} + +/* Record math.asin, math.acos, math.atan. */ +static void LJ_FASTCALL recff_math_atrig(jit_State *J, RecordFFData *rd) +{ + TRef y = lj_ir_tonum(J, J->base[0]); + TRef x = lj_ir_knum_one(J); + uint32_t ffid = rd->data; + if (ffid != FF_math_atan) { + TRef tmp = emitir(IRTN(IR_MUL), y, y); + tmp = emitir(IRTN(IR_SUB), x, tmp); + tmp = emitir(IRTN(IR_FPMATH), tmp, IRFPM_SQRT); + if (ffid == FF_math_asin) { x = tmp; } else { x = y; y = tmp; } + } + J->base[0] = emitir(IRTN(IR_ATAN2), y, x); +} + +static void LJ_FASTCALL recff_math_htrig(jit_State *J, RecordFFData *rd) +{ + TRef tr = lj_ir_tonum(J, J->base[0]); + J->base[0] = emitir(IRTN(IR_CALLN), tr, rd->data); +} + +static void LJ_FASTCALL recff_math_modf(jit_State *J, RecordFFData *rd) +{ + TRef tr = J->base[0]; + if (tref_isinteger(tr)) { + J->base[0] = tr; + J->base[1] = lj_ir_kint(J, 0); + } else { + TRef trt; + tr = lj_ir_tonum(J, tr); + trt = emitir(IRTN(IR_FPMATH), tr, IRFPM_TRUNC); + J->base[0] = trt; + J->base[1] = emitir(IRTN(IR_SUB), tr, trt); + } + rd->nres = 2; +} + +static void LJ_FASTCALL recff_math_pow(jit_State *J, RecordFFData *rd) +{ + TRef tr = lj_ir_tonum(J, J->base[0]); + if (!tref_isnumber_str(J->base[1])) + lj_trace_err(J, LJ_TRERR_BADTYPE); + J->base[0] = lj_opt_narrow_pow(J, tr, J->base[1], &rd->argv[1]); + UNUSED(rd); +} + +static void LJ_FASTCALL recff_math_minmax(jit_State *J, RecordFFData *rd) +{ + TRef tr = lj_ir_tonumber(J, J->base[0]); + uint32_t op = rd->data; + BCReg i; + for (i = 1; J->base[i] != 0; i++) { + TRef tr2 = lj_ir_tonumber(J, J->base[i]); + IRType t = IRT_INT; + if (!(tref_isinteger(tr) && tref_isinteger(tr2))) { + if (tref_isinteger(tr)) tr = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT); + if (tref_isinteger(tr2)) tr2 = emitir(IRTN(IR_CONV), tr2, IRCONV_NUM_INT); + t = IRT_NUM; + } + tr = emitir(IRT(op, t), tr, tr2); + } + J->base[0] = tr; +} + +static void LJ_FASTCALL recff_math_random(jit_State *J, RecordFFData *rd) +{ + GCudata *ud = udataV(&J->fn->c.upvalue[0]); + TRef tr, one; + lj_ir_kgc(J, obj2gco(ud), IRT_UDATA); /* Prevent collection. */ + tr = lj_ir_call(J, IRCALL_lj_math_random_step, lj_ir_kptr(J, uddata(ud))); + one = lj_ir_knum_one(J); + tr = emitir(IRTN(IR_SUB), tr, one); + if (J->base[0]) { + TRef tr1 = lj_ir_tonum(J, J->base[0]); + if (J->base[1]) { /* d = floor(d*(r2-r1+1.0)) + r1 */ + TRef tr2 = lj_ir_tonum(J, J->base[1]); + tr2 = emitir(IRTN(IR_SUB), tr2, tr1); + tr2 = emitir(IRTN(IR_ADD), tr2, one); + tr = emitir(IRTN(IR_MUL), tr, tr2); + tr = emitir(IRTN(IR_FPMATH), tr, IRFPM_FLOOR); + tr = emitir(IRTN(IR_ADD), tr, tr1); + } else { /* d = floor(d*r1) + 1.0 */ + tr = emitir(IRTN(IR_MUL), tr, tr1); + tr = emitir(IRTN(IR_FPMATH), tr, IRFPM_FLOOR); + tr = emitir(IRTN(IR_ADD), tr, one); + } + } + J->base[0] = tr; + UNUSED(rd); +} + +/* -- Bit library fast functions ------------------------------------------ */ + +/* Record bit.tobit. */ +static void LJ_FASTCALL recff_bit_tobit(jit_State *J, RecordFFData *rd) +{ + TRef tr = J->base[0]; +#if LJ_HASFFI + if (tref_iscdata(tr)) { recff_bit64_tobit(J, rd); return; } +#endif + J->base[0] = lj_opt_narrow_tobit(J, tr); + UNUSED(rd); +} + +/* Record unary bit.bnot, bit.bswap. */ +static void LJ_FASTCALL recff_bit_unary(jit_State *J, RecordFFData *rd) +{ +#if LJ_HASFFI + if (recff_bit64_unary(J, rd)) + return; +#endif + J->base[0] = emitir(IRTI(rd->data), lj_opt_narrow_tobit(J, J->base[0]), 0); +} + +/* Record N-ary bit.band, bit.bor, bit.bxor. */ +static void LJ_FASTCALL recff_bit_nary(jit_State *J, RecordFFData *rd) +{ +#if LJ_HASFFI + if (recff_bit64_nary(J, rd)) + return; +#endif + { + TRef tr = lj_opt_narrow_tobit(J, J->base[0]); + uint32_t ot = IRTI(rd->data); + BCReg i; + for (i = 1; J->base[i] != 0; i++) + tr = emitir(ot, tr, lj_opt_narrow_tobit(J, J->base[i])); + J->base[0] = tr; + } +} + +/* Record bit shifts. */ +static void LJ_FASTCALL recff_bit_shift(jit_State *J, RecordFFData *rd) +{ +#if LJ_HASFFI + if (recff_bit64_shift(J, rd)) + return; +#endif + { + TRef tr = lj_opt_narrow_tobit(J, J->base[0]); + TRef tsh = lj_opt_narrow_tobit(J, J->base[1]); + IROp op = (IROp)rd->data; + if (!(op < IR_BROL ? LJ_TARGET_MASKSHIFT : LJ_TARGET_MASKROT) && + !tref_isk(tsh)) + tsh = emitir(IRTI(IR_BAND), tsh, lj_ir_kint(J, 31)); +#ifdef LJ_TARGET_UNIFYROT + if (op == (LJ_TARGET_UNIFYROT == 1 ? IR_BROR : IR_BROL)) { + op = LJ_TARGET_UNIFYROT == 1 ? IR_BROL : IR_BROR; + tsh = emitir(IRTI(IR_NEG), tsh, tsh); + } +#endif + J->base[0] = emitir(IRTI(op), tr, tsh); + } +} + +static void LJ_FASTCALL recff_bit_tohex(jit_State *J, RecordFFData *rd) +{ +#if LJ_HASFFI + TRef hdr = recff_bufhdr(J); + TRef tr = recff_bit64_tohex(J, rd, hdr); + J->base[0] = emitir(IRT(IR_BUFSTR, IRT_STR), tr, hdr); +#else + recff_nyiu(J, rd); /* Don't bother working around this NYI. */ +#endif +} + +/* -- String library fast functions --------------------------------------- */ + +/* Specialize to relative starting position for string. */ +static TRef recff_string_start(jit_State *J, GCstr *s, int32_t *st, TRef tr, + TRef trlen, TRef tr0) +{ + int32_t start = *st; + if (start < 0) { + emitir(IRTGI(IR_LT), tr, tr0); + tr = emitir(IRTI(IR_ADD), trlen, tr); + start = start + (int32_t)s->len; + emitir(start < 0 ? IRTGI(IR_LT) : IRTGI(IR_GE), tr, tr0); + if (start < 0) { + tr = tr0; + start = 0; + } + } else if (start == 0) { + emitir(IRTGI(IR_EQ), tr, tr0); + tr = tr0; + } else { + tr = emitir(IRTI(IR_ADD), tr, lj_ir_kint(J, -1)); + emitir(IRTGI(IR_GE), tr, tr0); + start--; + } + *st = start; + return tr; +} + +/* Handle string.byte (rd->data = 0) and string.sub (rd->data = 1). */ +static void LJ_FASTCALL recff_string_range(jit_State *J, RecordFFData *rd) +{ + TRef trstr = lj_ir_tostr(J, J->base[0]); + TRef trlen = emitir(IRTI(IR_FLOAD), trstr, IRFL_STR_LEN); + TRef tr0 = lj_ir_kint(J, 0); + TRef trstart, trend; + GCstr *str = argv2str(J, &rd->argv[0]); + int32_t start, end; + if (rd->data) { /* string.sub(str, start [,end]) */ + start = argv2int(J, &rd->argv[1]); + trstart = lj_opt_narrow_toint(J, J->base[1]); + trend = J->base[2]; + if (tref_isnil(trend)) { + trend = lj_ir_kint(J, -1); + end = -1; + } else { + trend = lj_opt_narrow_toint(J, trend); + end = argv2int(J, &rd->argv[2]); + } + } else { /* string.byte(str, [,start [,end]]) */ + if (tref_isnil(J->base[1])) { + start = 1; + trstart = lj_ir_kint(J, 1); + } else { + start = argv2int(J, &rd->argv[1]); + trstart = lj_opt_narrow_toint(J, J->base[1]); + } + if (J->base[1] && !tref_isnil(J->base[2])) { + trend = lj_opt_narrow_toint(J, J->base[2]); + end = argv2int(J, &rd->argv[2]); + } else { + trend = trstart; + end = start; + } + } + if (end < 0) { + emitir(IRTGI(IR_LT), trend, tr0); + trend = emitir(IRTI(IR_ADD), emitir(IRTI(IR_ADD), trlen, trend), + lj_ir_kint(J, 1)); + end = end+(int32_t)str->len+1; + } else if ((MSize)end <= str->len) { + emitir(IRTGI(IR_ULE), trend, trlen); + } else { + emitir(IRTGI(IR_UGT), trend, trlen); + end = (int32_t)str->len; + trend = trlen; + } + trstart = recff_string_start(J, str, &start, trstart, trlen, tr0); + if (rd->data) { /* Return string.sub result. */ + if (end - start >= 0) { + /* Also handle empty range here, to avoid extra traces. */ + TRef trptr, trslen = emitir(IRTI(IR_SUB), trend, trstart); + emitir(IRTGI(IR_GE), trslen, tr0); + trptr = emitir(IRT(IR_STRREF, IRT_P32), trstr, trstart); + J->base[0] = emitir(IRT(IR_SNEW, IRT_STR), trptr, trslen); + } else { /* Range underflow: return empty string. */ + emitir(IRTGI(IR_LT), trend, trstart); + J->base[0] = lj_ir_kstr(J, &J2G(J)->strempty); + } + } else { /* Return string.byte result(s). */ + ptrdiff_t i, len = end - start; + if (len > 0) { + TRef trslen = emitir(IRTI(IR_SUB), trend, trstart); + emitir(IRTGI(IR_EQ), trslen, lj_ir_kint(J, (int32_t)len)); + if (J->baseslot + len > LJ_MAX_JSLOTS) + lj_trace_err_info(J, LJ_TRERR_STACKOV); + rd->nres = len; + for (i = 0; i < len; i++) { + TRef tmp = emitir(IRTI(IR_ADD), trstart, lj_ir_kint(J, (int32_t)i)); + tmp = emitir(IRT(IR_STRREF, IRT_P32), trstr, tmp); + J->base[i] = emitir(IRT(IR_XLOAD, IRT_U8), tmp, IRXLOAD_READONLY); + } + } else { /* Empty range or range underflow: return no results. */ + emitir(IRTGI(IR_LE), trend, trstart); + rd->nres = 0; + } + } +} + +static void LJ_FASTCALL recff_string_char(jit_State *J, RecordFFData *rd) +{ + TRef k255 = lj_ir_kint(J, 255); + BCReg i; + for (i = 0; J->base[i] != 0; i++) { /* Convert char values to strings. */ + TRef tr = lj_opt_narrow_toint(J, J->base[i]); + emitir(IRTGI(IR_ULE), tr, k255); + J->base[i] = emitir(IRT(IR_TOSTR, IRT_STR), tr, IRTOSTR_CHAR); + } + if (i > 1) { /* Concatenate the strings, if there's more than one. */ + TRef hdr = recff_bufhdr(J), tr = hdr; + for (i = 0; J->base[i] != 0; i++) + tr = emitir(IRT(IR_BUFPUT, IRT_P32), tr, J->base[i]); + J->base[0] = emitir(IRT(IR_BUFSTR, IRT_STR), tr, hdr); + } + UNUSED(rd); +} + +static void LJ_FASTCALL recff_string_rep(jit_State *J, RecordFFData *rd) +{ + TRef str = lj_ir_tostr(J, J->base[0]); + TRef rep = lj_opt_narrow_toint(J, J->base[1]); + TRef hdr, tr, str2 = 0; + if (!tref_isnil(J->base[2])) { + TRef sep = lj_ir_tostr(J, J->base[2]); + int32_t vrep = argv2int(J, &rd->argv[1]); + emitir(IRTGI(vrep > 1 ? IR_GT : IR_LE), rep, lj_ir_kint(J, 1)); + if (vrep > 1) { + TRef hdr2 = recff_bufhdr(J); + TRef tr2 = emitir(IRT(IR_BUFPUT, IRT_P32), hdr2, sep); + tr2 = emitir(IRT(IR_BUFPUT, IRT_P32), tr2, str); + str2 = emitir(IRT(IR_BUFSTR, IRT_STR), tr2, hdr2); + } + } + tr = hdr = recff_bufhdr(J); + if (str2) { + tr = emitir(IRT(IR_BUFPUT, IRT_P32), tr, str); + str = str2; + rep = emitir(IRTI(IR_ADD), rep, lj_ir_kint(J, -1)); + } + tr = lj_ir_call(J, IRCALL_lj_buf_putstr_rep, tr, str, rep); + J->base[0] = emitir(IRT(IR_BUFSTR, IRT_STR), tr, hdr); +} + +static void LJ_FASTCALL recff_string_op(jit_State *J, RecordFFData *rd) +{ + TRef str = lj_ir_tostr(J, J->base[0]); + TRef hdr = recff_bufhdr(J); + TRef tr = lj_ir_call(J, rd->data, hdr, str); + J->base[0] = emitir(IRT(IR_BUFSTR, IRT_STR), tr, hdr); +} + +static void LJ_FASTCALL recff_string_find(jit_State *J, RecordFFData *rd) +{ + TRef trstr = lj_ir_tostr(J, J->base[0]); + TRef trpat = lj_ir_tostr(J, J->base[1]); + TRef trlen = emitir(IRTI(IR_FLOAD), trstr, IRFL_STR_LEN); + TRef tr0 = lj_ir_kint(J, 0); + TRef trstart; + GCstr *str = argv2str(J, &rd->argv[0]); + GCstr *pat = argv2str(J, &rd->argv[1]); + int32_t start; + J->needsnap = 1; + if (tref_isnil(J->base[2])) { + trstart = lj_ir_kint(J, 1); + start = 1; + } else { + trstart = lj_opt_narrow_toint(J, J->base[2]); + start = argv2int(J, &rd->argv[2]); + } + trstart = recff_string_start(J, str, &start, trstart, trlen, tr0); + if ((MSize)start <= str->len) { + emitir(IRTGI(IR_ULE), trstart, trlen); + } else { + emitir(IRTGI(IR_UGT), trstart, trlen); +#if LJ_52 + J->base[0] = TREF_NIL; + return; +#else + trstart = trlen; + start = str->len; +#endif + } + /* Fixed arg or no pattern matching chars? (Specialized to pattern string.) */ + if ((J->base[2] && tref_istruecond(J->base[3])) || + (emitir(IRTG(IR_EQ, IRT_STR), trpat, lj_ir_kstr(J, pat)), + !lj_str_haspattern(pat))) { /* Search for fixed string. */ + TRef trsptr = emitir(IRT(IR_STRREF, IRT_P32), trstr, trstart); + TRef trpptr = emitir(IRT(IR_STRREF, IRT_P32), trpat, tr0); + TRef trslen = emitir(IRTI(IR_SUB), trlen, trstart); + TRef trplen = emitir(IRTI(IR_FLOAD), trpat, IRFL_STR_LEN); + TRef tr = lj_ir_call(J, IRCALL_lj_str_find, trsptr, trpptr, trslen, trplen); + TRef trp0 = lj_ir_kkptr(J, NULL); + if (lj_str_find(strdata(str)+(MSize)start, strdata(pat), + str->len-(MSize)start, pat->len)) { + TRef pos; + emitir(IRTG(IR_NE, IRT_P32), tr, trp0); + pos = emitir(IRTI(IR_SUB), tr, emitir(IRT(IR_STRREF, IRT_P32), trstr, tr0)); + J->base[0] = emitir(IRTI(IR_ADD), pos, lj_ir_kint(J, 1)); + J->base[1] = emitir(IRTI(IR_ADD), pos, trplen); + rd->nres = 2; + } else { + emitir(IRTG(IR_EQ, IRT_P32), tr, trp0); + J->base[0] = TREF_NIL; + } + } else { /* Search for pattern. */ + recff_nyiu(J, rd); + return; + } +} + +static void LJ_FASTCALL recff_string_format(jit_State *J, RecordFFData *rd) +{ + TRef trfmt = lj_ir_tostr(J, J->base[0]); + GCstr *fmt = argv2str(J, &rd->argv[0]); + int arg = 1; + TRef hdr, tr; + FormatState fs; + SFormat sf; + /* Specialize to the format string. */ + emitir(IRTG(IR_EQ, IRT_STR), trfmt, lj_ir_kstr(J, fmt)); + tr = hdr = recff_bufhdr(J); + lj_strfmt_init(&fs, strdata(fmt), fmt->len); + while ((sf = lj_strfmt_parse(&fs)) != STRFMT_EOF) { /* Parse format. */ + TRef tra = sf == STRFMT_LIT ? 0 : J->base[arg++]; + TRef trsf = lj_ir_kint(J, (int32_t)sf); + IRCallID id; + switch (STRFMT_TYPE(sf)) { + case STRFMT_LIT: + tr = emitir(IRT(IR_BUFPUT, IRT_P32), tr, + lj_ir_kstr(J, lj_str_new(J->L, fs.str, fs.len))); + break; + case STRFMT_INT: + id = IRCALL_lj_strfmt_putfnum_int; + handle_int: + if (!tref_isinteger(tra)) + goto handle_num; + if (sf == STRFMT_INT) { /* Shortcut for plain %d. */ + tr = emitir(IRT(IR_BUFPUT, IRT_P32), tr, + emitir(IRT(IR_TOSTR, IRT_STR), tra, IRTOSTR_INT)); + } else { +#if LJ_HASFFI + tra = emitir(IRT(IR_CONV, IRT_U64), tra, + (IRT_INT|(IRT_U64<<5)|IRCONV_SEXT)); + tr = lj_ir_call(J, IRCALL_lj_strfmt_putfxint, tr, trsf, tra); + lj_needsplit(J); +#else + recff_nyiu(J, rd); /* Don't bother working around this NYI. */ + return; +#endif + } + break; + case STRFMT_UINT: + id = IRCALL_lj_strfmt_putfnum_uint; + goto handle_int; + case STRFMT_NUM: + id = IRCALL_lj_strfmt_putfnum; + handle_num: + tra = lj_ir_tonum(J, tra); + tr = lj_ir_call(J, id, tr, trsf, tra); + if (LJ_SOFTFP) lj_needsplit(J); + break; + case STRFMT_STR: + if (!tref_isstr(tra)) { + recff_nyiu(J, rd); /* NYI: __tostring and non-string types for %s. */ + return; + } + if (sf == STRFMT_STR) /* Shortcut for plain %s. */ + tr = emitir(IRT(IR_BUFPUT, IRT_P32), tr, tra); + else if ((sf & STRFMT_T_QUOTED)) + tr = lj_ir_call(J, IRCALL_lj_strfmt_putquoted, tr, tra); + else + tr = lj_ir_call(J, IRCALL_lj_strfmt_putfstr, tr, trsf, tra); + break; + case STRFMT_CHAR: + tra = lj_opt_narrow_toint(J, tra); + if (sf == STRFMT_CHAR) /* Shortcut for plain %c. */ + tr = emitir(IRT(IR_BUFPUT, IRT_P32), tr, + emitir(IRT(IR_TOSTR, IRT_STR), tra, IRTOSTR_CHAR)); + else + tr = lj_ir_call(J, IRCALL_lj_strfmt_putfchar, tr, trsf, tra); + break; + case STRFMT_PTR: /* NYI */ + case STRFMT_ERR: + default: + recff_nyiu(J, rd); + return; + } + } + J->base[0] = emitir(IRT(IR_BUFSTR, IRT_STR), tr, hdr); +} + +/* -- Table library fast functions ---------------------------------------- */ + +static void LJ_FASTCALL recff_table_insert(jit_State *J, RecordFFData *rd) +{ + RecordIndex ix; + ix.tab = J->base[0]; + ix.val = J->base[1]; + rd->nres = 0; + if (tref_istab(ix.tab) && ix.val) { + if (!J->base[2]) { /* Simple push: t[#t+1] = v */ + TRef trlen = lj_ir_call(J, IRCALL_lj_tab_len, ix.tab); + GCtab *t = tabV(&rd->argv[0]); + ix.key = emitir(IRTI(IR_ADD), trlen, lj_ir_kint(J, 1)); + settabV(J->L, &ix.tabv, t); + setintV(&ix.keyv, lj_tab_len(t) + 1); + ix.idxchain = 0; + lj_record_idx(J, &ix); /* Set new value. */ + } else { /* Complex case: insert in the middle. */ + recff_nyiu(J, rd); + return; + } + } /* else: Interpreter will throw. */ +} + +static void LJ_FASTCALL recff_table_concat(jit_State *J, RecordFFData *rd) +{ + TRef tab = J->base[0]; + if (tref_istab(tab)) { + TRef sep = !tref_isnil(J->base[1]) ? + lj_ir_tostr(J, J->base[1]) : lj_ir_knull(J, IRT_STR); + TRef tri = (J->base[1] && !tref_isnil(J->base[2])) ? + lj_opt_narrow_toint(J, J->base[2]) : lj_ir_kint(J, 1); + TRef tre = (J->base[1] && J->base[2] && !tref_isnil(J->base[3])) ? + lj_opt_narrow_toint(J, J->base[3]) : + lj_ir_call(J, IRCALL_lj_tab_len, tab); + TRef hdr = recff_bufhdr(J); + TRef tr = lj_ir_call(J, IRCALL_lj_buf_puttab, hdr, tab, sep, tri, tre); + emitir(IRTG(IR_NE, IRT_PTR), tr, lj_ir_kptr(J, NULL)); + J->base[0] = emitir(IRT(IR_BUFSTR, IRT_STR), tr, hdr); + } /* else: Interpreter will throw. */ + UNUSED(rd); +} + +static void LJ_FASTCALL recff_table_new(jit_State *J, RecordFFData *rd) +{ + TRef tra = lj_opt_narrow_toint(J, J->base[0]); + TRef trh = lj_opt_narrow_toint(J, J->base[1]); + J->base[0] = lj_ir_call(J, IRCALL_lj_tab_new_ah, tra, trh); + UNUSED(rd); +} + +static void LJ_FASTCALL recff_table_clear(jit_State *J, RecordFFData *rd) +{ + TRef tr = J->base[0]; + if (tref_istab(tr)) { + rd->nres = 0; + lj_ir_call(J, IRCALL_lj_tab_clear, tr); + J->needsnap = 1; + } /* else: Interpreter will throw. */ +} + +/* -- I/O library fast functions ------------------------------------------ */ + +/* Get FILE* for I/O function. Any I/O error aborts recording, so there's +** no need to encode the alternate cases for any of the guards. +*/ +static TRef recff_io_fp(jit_State *J, TRef *udp, int32_t id) +{ + TRef tr, ud, fp; + if (id) { /* io.func() */ + tr = lj_ir_kptr(J, &J2G(J)->gcroot[id]); + ud = emitir(IRT(IR_XLOAD, IRT_UDATA), tr, 0); + } else { /* fp:method() */ + ud = J->base[0]; + if (!tref_isudata(ud)) + lj_trace_err(J, LJ_TRERR_BADTYPE); + tr = emitir(IRT(IR_FLOAD, IRT_U8), ud, IRFL_UDATA_UDTYPE); + emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, UDTYPE_IO_FILE)); + } + *udp = ud; + fp = emitir(IRT(IR_FLOAD, IRT_PTR), ud, IRFL_UDATA_FILE); + emitir(IRTG(IR_NE, IRT_PTR), fp, lj_ir_knull(J, IRT_PTR)); + return fp; +} + +static void LJ_FASTCALL recff_io_write(jit_State *J, RecordFFData *rd) +{ + TRef ud, fp = recff_io_fp(J, &ud, rd->data); + TRef zero = lj_ir_kint(J, 0); + TRef one = lj_ir_kint(J, 1); + ptrdiff_t i = rd->data == 0 ? 1 : 0; + for (; J->base[i]; i++) { + TRef str = lj_ir_tostr(J, J->base[i]); + TRef buf = emitir(IRT(IR_STRREF, IRT_P32), str, zero); + TRef len = emitir(IRTI(IR_FLOAD), str, IRFL_STR_LEN); + if (tref_isk(len) && IR(tref_ref(len))->i == 1) { + IRIns *irs = IR(tref_ref(str)); + TRef tr = (irs->o == IR_TOSTR && irs->op2 == IRTOSTR_CHAR) ? + irs->op1 : + emitir(IRT(IR_XLOAD, IRT_U8), buf, IRXLOAD_READONLY); + tr = lj_ir_call(J, IRCALL_fputc, tr, fp); + if (results_wanted(J) != 0) /* Check result only if not ignored. */ + emitir(IRTGI(IR_NE), tr, lj_ir_kint(J, -1)); + } else { + TRef tr = lj_ir_call(J, IRCALL_fwrite, buf, one, len, fp); + if (results_wanted(J) != 0) /* Check result only if not ignored. */ + emitir(IRTGI(IR_EQ), tr, len); + } + } + J->base[0] = LJ_52 ? ud : TREF_TRUE; +} + +static void LJ_FASTCALL recff_io_flush(jit_State *J, RecordFFData *rd) +{ + TRef ud, fp = recff_io_fp(J, &ud, rd->data); + TRef tr = lj_ir_call(J, IRCALL_fflush, fp); + if (results_wanted(J) != 0) /* Check result only if not ignored. */ + emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, 0)); + J->base[0] = TREF_TRUE; +} + +/* -- Debug library fast functions ---------------------------------------- */ + +static void LJ_FASTCALL recff_debug_getmetatable(jit_State *J, RecordFFData *rd) +{ + GCtab *mt; + TRef mtref; + TRef tr = J->base[0]; + if (tref_istab(tr)) { + mt = tabref(tabV(&rd->argv[0])->metatable); + mtref = emitir(IRT(IR_FLOAD, IRT_TAB), tr, IRFL_TAB_META); + } else if (tref_isudata(tr)) { + mt = tabref(udataV(&rd->argv[0])->metatable); + mtref = emitir(IRT(IR_FLOAD, IRT_TAB), tr, IRFL_UDATA_META); + } else { + mt = tabref(basemt_obj(J2G(J), &rd->argv[0])); + J->base[0] = mt ? lj_ir_ktab(J, mt) : TREF_NIL; + return; + } + emitir(IRTG(mt ? IR_NE : IR_EQ, IRT_TAB), mtref, lj_ir_knull(J, IRT_TAB)); + J->base[0] = mt ? mtref : TREF_NIL; +} + +/* -- Record calls to fast functions -------------------------------------- */ + +#include "lj_recdef.h" + +static uint32_t recdef_lookup(GCfunc *fn) +{ + if (fn->c.ffid < sizeof(recff_idmap)/sizeof(recff_idmap[0])) + return recff_idmap[fn->c.ffid]; + else + return 0; +} + +/* Record entry to a fast function or C function. */ +void lj_ffrecord_func(jit_State *J) +{ + RecordFFData rd; + uint32_t m = recdef_lookup(J->fn); + rd.data = m & 0xff; + rd.nres = 1; /* Default is one result. */ + rd.argv = J->L->base; + J->base[J->maxslot] = 0; /* Mark end of arguments. */ + (recff_func[m >> 8])(J, &rd); /* Call recff_* handler. */ + if (rd.nres >= 0) { + if (J->postproc == LJ_POST_NONE) J->postproc = LJ_POST_FFRETRY; + lj_record_ret(J, 0, rd.nres); + } +} + +#undef IR +#undef emitir + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_ffrecord.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_ffrecord.h new file mode 100644 index 00000000000..f858ca29cfc --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_ffrecord.h @@ -0,0 +1,24 @@ +/* +** Fast function call recorder. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_FFRECORD_H +#define _LJ_FFRECORD_H + +#include "lj_obj.h" +#include "lj_jit.h" + +#if LJ_HASJIT +/* Data used by handlers to record a fast function. */ +typedef struct RecordFFData { + TValue *argv; /* Runtime argument values. */ + ptrdiff_t nres; /* Number of returned results (defaults to 1). */ + uint32_t data; /* Per-ffid auxiliary data (opcode, literal etc.). */ +} RecordFFData; + +LJ_FUNC int32_t lj_ffrecord_select_mode(jit_State *J, TRef tr, TValue *tv); +LJ_FUNC void lj_ffrecord_func(jit_State *J); +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_frame.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_frame.h new file mode 100644 index 00000000000..a86c36be7e5 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_frame.h @@ -0,0 +1,259 @@ +/* +** Stack frames. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_FRAME_H +#define _LJ_FRAME_H + +#include "lj_obj.h" +#include "lj_bc.h" + +/* -- Lua stack frame ----------------------------------------------------- */ + +/* Frame type markers in LSB of PC (4-byte aligned) or delta (8-byte aligned: +** +** PC 00 Lua frame +** delta 001 C frame +** delta 010 Continuation frame +** delta 011 Lua vararg frame +** delta 101 cpcall() frame +** delta 110 ff pcall() frame +** delta 111 ff pcall() frame with active hook +*/ +enum { + FRAME_LUA, FRAME_C, FRAME_CONT, FRAME_VARG, + FRAME_LUAP, FRAME_CP, FRAME_PCALL, FRAME_PCALLH +}; +#define FRAME_TYPE 3 +#define FRAME_P 4 +#define FRAME_TYPEP (FRAME_TYPE|FRAME_P) + +/* Macros to access and modify Lua frames. */ +#if LJ_FR2 +/* Two-slot frame info, required for 64 bit PC/GCRef: +** +** base-2 base-1 | base base+1 ... +** [func PC/delta/ft] | [slots ...] +** ^-- frame | ^-- base ^-- top +** +** Continuation frames: +** +** base-4 base-3 base-2 base-1 | base base+1 ... +** [cont PC ] [func PC/delta/ft] | [slots ...] +** ^-- frame | ^-- base ^-- top +*/ +#define frame_gc(f) (gcval((f)-1)) +#define frame_ftsz(f) ((ptrdiff_t)(f)->ftsz) +#define frame_pc(f) ((const BCIns *)frame_ftsz(f)) +#define setframe_gc(f, p, tp) (setgcVraw((f)-1, (p), (tp))) +#define setframe_ftsz(f, sz) ((f)->ftsz = (sz)) +#define setframe_pc(f, pc) ((f)->ftsz = (int64_t)(intptr_t)(pc)) +#else +/* One-slot frame info, sufficient for 32 bit PC/GCRef: +** +** base-1 | base base+1 ... +** lo hi | +** [func | PC/delta/ft] | [slots ...] +** ^-- frame | ^-- base ^-- top +** +** Continuation frames: +** +** base-2 base-1 | base base+1 ... +** lo hi lo hi | +** [cont | PC] [func | PC/delta/ft] | [slots ...] +** ^-- frame | ^-- base ^-- top +*/ +#define frame_gc(f) (gcref((f)->fr.func)) +#define frame_ftsz(f) ((ptrdiff_t)(f)->fr.tp.ftsz) +#define frame_pc(f) (mref((f)->fr.tp.pcr, const BCIns)) +#define setframe_gc(f, p, tp) (setgcref((f)->fr.func, (p)), UNUSED(tp)) +#define setframe_ftsz(f, sz) ((f)->fr.tp.ftsz = (int32_t)(sz)) +#define setframe_pc(f, pc) (setmref((f)->fr.tp.pcr, (pc))) +#endif + +#define frame_type(f) (frame_ftsz(f) & FRAME_TYPE) +#define frame_typep(f) (frame_ftsz(f) & FRAME_TYPEP) +#define frame_islua(f) (frame_type(f) == FRAME_LUA) +#define frame_isc(f) (frame_type(f) == FRAME_C) +#define frame_iscont(f) (frame_typep(f) == FRAME_CONT) +#define frame_isvarg(f) (frame_typep(f) == FRAME_VARG) +#define frame_ispcall(f) ((frame_ftsz(f) & 6) == FRAME_PCALL) + +#define frame_func(f) (&frame_gc(f)->fn) +#define frame_delta(f) (frame_ftsz(f) >> 3) +#define frame_sized(f) (frame_ftsz(f) & ~FRAME_TYPEP) + +enum { LJ_CONT_TAILCALL, LJ_CONT_FFI_CALLBACK }; /* Special continuations. */ + +#if LJ_FR2 +#define frame_contpc(f) (frame_pc((f)-2)) +#define frame_contv(f) (((f)-3)->u64) +#else +#define frame_contpc(f) (frame_pc((f)-1)) +#define frame_contv(f) (((f)-1)->u32.lo) +#endif +#if LJ_FR2 +#define frame_contf(f) ((ASMFunction)(uintptr_t)((f)-3)->u64) +#elif LJ_64 +#define frame_contf(f) \ + ((ASMFunction)(void *)((intptr_t)lj_vm_asm_begin + \ + (intptr_t)(int32_t)((f)-1)->u32.lo)) +#else +#define frame_contf(f) ((ASMFunction)gcrefp(((f)-1)->gcr, void)) +#endif +#define frame_iscont_fficb(f) \ + (LJ_HASFFI && frame_contv(f) == LJ_CONT_FFI_CALLBACK) + +#define frame_prevl(f) ((f) - (1+LJ_FR2+bc_a(frame_pc(f)[-1]))) +#define frame_prevd(f) ((TValue *)((char *)(f) - frame_sized(f))) +#define frame_prev(f) (frame_islua(f)?frame_prevl(f):frame_prevd(f)) +/* Note: this macro does not skip over FRAME_VARG. */ + +/* -- C stack frame ------------------------------------------------------- */ + +/* Macros to access and modify the C stack frame chain. */ + +/* These definitions must match with the arch-specific *.dasc files. */ +#if LJ_TARGET_X86 +#define CFRAME_OFS_ERRF (15*4) +#define CFRAME_OFS_NRES (14*4) +#define CFRAME_OFS_PREV (13*4) +#define CFRAME_OFS_L (12*4) +#define CFRAME_OFS_PC (6*4) +#define CFRAME_OFS_MULTRES (5*4) +#define CFRAME_SIZE (12*4) +#define CFRAME_SHIFT_MULTRES 0 +#elif LJ_TARGET_X64 +#if LJ_ABI_WIN +#define CFRAME_OFS_PREV (13*8) +#if LJ_GC64 +#define CFRAME_OFS_PC (12*8) +#define CFRAME_OFS_L (11*8) +#define CFRAME_OFS_ERRF (21*4) +#define CFRAME_OFS_NRES (20*4) +#define CFRAME_OFS_MULTRES (8*4) +#else +#define CFRAME_OFS_PC (25*4) +#define CFRAME_OFS_L (24*4) +#define CFRAME_OFS_ERRF (23*4) +#define CFRAME_OFS_NRES (22*4) +#define CFRAME_OFS_MULTRES (21*4) +#endif +#define CFRAME_SIZE (10*8) +#define CFRAME_SIZE_JIT (CFRAME_SIZE + 9*16 + 4*8) +#define CFRAME_SHIFT_MULTRES 0 +#else +#define CFRAME_OFS_PREV (4*8) +#if LJ_GC64 +#define CFRAME_OFS_PC (3*8) +#define CFRAME_OFS_L (2*8) +#define CFRAME_OFS_ERRF (3*4) +#define CFRAME_OFS_NRES (2*4) +#define CFRAME_OFS_MULTRES (0*4) +#else +#define CFRAME_OFS_PC (7*4) +#define CFRAME_OFS_L (6*4) +#define CFRAME_OFS_ERRF (5*4) +#define CFRAME_OFS_NRES (4*4) +#define CFRAME_OFS_MULTRES (1*4) +#endif +#if LJ_NO_UNWIND +#define CFRAME_SIZE (12*8) +#else +#define CFRAME_SIZE (10*8) +#endif +#define CFRAME_SIZE_JIT (CFRAME_SIZE + 16) +#define CFRAME_SHIFT_MULTRES 0 +#endif +#elif LJ_TARGET_ARM +#define CFRAME_OFS_ERRF 24 +#define CFRAME_OFS_NRES 20 +#define CFRAME_OFS_PREV 16 +#define CFRAME_OFS_L 12 +#define CFRAME_OFS_PC 8 +#define CFRAME_OFS_MULTRES 4 +#if LJ_ARCH_HASFPU +#define CFRAME_SIZE 128 +#else +#define CFRAME_SIZE 64 +#endif +#define CFRAME_SHIFT_MULTRES 3 +#elif LJ_TARGET_ARM64 +#define CFRAME_OFS_ERRF 196 +#define CFRAME_OFS_NRES 200 +#define CFRAME_OFS_PREV 160 +#define CFRAME_OFS_L 176 +#define CFRAME_OFS_PC 168 +#define CFRAME_OFS_MULTRES 192 +#define CFRAME_SIZE 208 +#define CFRAME_SHIFT_MULTRES 3 +#elif LJ_TARGET_PPC +#if LJ_TARGET_XBOX360 +#define CFRAME_OFS_ERRF 424 +#define CFRAME_OFS_NRES 420 +#define CFRAME_OFS_PREV 400 +#define CFRAME_OFS_L 416 +#define CFRAME_OFS_PC 412 +#define CFRAME_OFS_MULTRES 408 +#define CFRAME_SIZE 384 +#define CFRAME_SHIFT_MULTRES 3 +#elif LJ_ARCH_PPC32ON64 +#define CFRAME_OFS_ERRF 472 +#define CFRAME_OFS_NRES 468 +#define CFRAME_OFS_PREV 448 +#define CFRAME_OFS_L 464 +#define CFRAME_OFS_PC 460 +#define CFRAME_OFS_MULTRES 456 +#define CFRAME_SIZE 400 +#define CFRAME_SHIFT_MULTRES 3 +#else +#define CFRAME_OFS_ERRF 48 +#define CFRAME_OFS_NRES 44 +#define CFRAME_OFS_PREV 40 +#define CFRAME_OFS_L 36 +#define CFRAME_OFS_PC 32 +#define CFRAME_OFS_MULTRES 28 +#define CFRAME_SIZE 272 +#define CFRAME_SHIFT_MULTRES 3 +#endif +#elif LJ_TARGET_MIPS +#define CFRAME_OFS_ERRF 124 +#define CFRAME_OFS_NRES 120 +#define CFRAME_OFS_PREV 116 +#define CFRAME_OFS_L 112 +#define CFRAME_OFS_PC 20 +#define CFRAME_OFS_MULTRES 16 +#define CFRAME_SIZE 112 +#define CFRAME_SHIFT_MULTRES 3 +#else +#error "Missing CFRAME_* definitions for this architecture" +#endif + +#ifndef CFRAME_SIZE_JIT +#define CFRAME_SIZE_JIT CFRAME_SIZE +#endif + +#define CFRAME_RESUME 1 +#define CFRAME_UNWIND_FF 2 /* Only used in unwinder. */ +#define CFRAME_RAWMASK (~(intptr_t)(CFRAME_RESUME|CFRAME_UNWIND_FF)) + +#define cframe_errfunc(cf) (*(int32_t *)(((char *)(cf))+CFRAME_OFS_ERRF)) +#define cframe_nres(cf) (*(int32_t *)(((char *)(cf))+CFRAME_OFS_NRES)) +#define cframe_prev(cf) (*(void **)(((char *)(cf))+CFRAME_OFS_PREV)) +#define cframe_multres(cf) (*(uint32_t *)(((char *)(cf))+CFRAME_OFS_MULTRES)) +#define cframe_multres_n(cf) (cframe_multres((cf)) >> CFRAME_SHIFT_MULTRES) +#define cframe_L(cf) \ + (&gcref(*(GCRef *)(((char *)(cf))+CFRAME_OFS_L))->th) +#define cframe_pc(cf) \ + (mref(*(MRef *)(((char *)(cf))+CFRAME_OFS_PC), const BCIns)) +#define setcframe_L(cf, L) \ + (setmref(*(MRef *)(((char *)(cf))+CFRAME_OFS_L), (L))) +#define setcframe_pc(cf, pc) \ + (setmref(*(MRef *)(((char *)(cf))+CFRAME_OFS_PC), (pc))) +#define cframe_canyield(cf) ((intptr_t)(cf) & CFRAME_RESUME) +#define cframe_unwind_ff(cf) ((intptr_t)(cf) & CFRAME_UNWIND_FF) +#define cframe_raw(cf) ((void *)((intptr_t)(cf) & CFRAME_RAWMASK)) +#define cframe_Lpc(L) cframe_pc(cframe_raw(L->cframe)) + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_func.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_func.c new file mode 100644 index 00000000000..eb8a9dbbfd8 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_func.c @@ -0,0 +1,185 @@ +/* +** Function handling (prototypes, functions and upvalues). +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +** +** Portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lj_func_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_func.h" +#include "lj_trace.h" +#include "lj_vm.h" + +/* -- Prototypes ---------------------------------------------------------- */ + +void LJ_FASTCALL lj_func_freeproto(global_State *g, GCproto *pt) +{ + lj_mem_free(g, pt, pt->sizept); +} + +/* -- Upvalues ------------------------------------------------------------ */ + +static void unlinkuv(GCupval *uv) +{ + lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv); + setgcrefr(uvnext(uv)->prev, uv->prev); + setgcrefr(uvprev(uv)->next, uv->next); +} + +/* Find existing open upvalue for a stack slot or create a new one. */ +static GCupval *func_finduv(lua_State *L, TValue *slot) +{ + global_State *g = G(L); + GCRef *pp = &L->openupval; + GCupval *p; + GCupval *uv; + /* Search the sorted list of open upvalues. */ + while (gcref(*pp) != NULL && uvval((p = gco2uv(gcref(*pp)))) >= slot) { + lua_assert(!p->closed && uvval(p) != &p->tv); + if (uvval(p) == slot) { /* Found open upvalue pointing to same slot? */ + if (isdead(g, obj2gco(p))) /* Resurrect it, if it's dead. */ + flipwhite(obj2gco(p)); + return p; + } + pp = &p->nextgc; + } + /* No matching upvalue found. Create a new one. */ + uv = lj_mem_newt(L, sizeof(GCupval), GCupval); + newwhite(g, uv); + uv->gct = ~LJ_TUPVAL; + uv->closed = 0; /* Still open. */ + setmref(uv->v, slot); /* Pointing to the stack slot. */ + /* NOBARRIER: The GCupval is new (marked white) and open. */ + setgcrefr(uv->nextgc, *pp); /* Insert into sorted list of open upvalues. */ + setgcref(*pp, obj2gco(uv)); + setgcref(uv->prev, obj2gco(&g->uvhead)); /* Insert into GC list, too. */ + setgcrefr(uv->next, g->uvhead.next); + setgcref(uvnext(uv)->prev, obj2gco(uv)); + setgcref(g->uvhead.next, obj2gco(uv)); + lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv); + return uv; +} + +/* Create an empty and closed upvalue. */ +static GCupval *func_emptyuv(lua_State *L) +{ + GCupval *uv = (GCupval *)lj_mem_newgco(L, sizeof(GCupval)); + uv->gct = ~LJ_TUPVAL; + uv->closed = 1; + setnilV(&uv->tv); + setmref(uv->v, &uv->tv); + return uv; +} + +/* Close all open upvalues pointing to some stack level or above. */ +void LJ_FASTCALL lj_func_closeuv(lua_State *L, TValue *level) +{ + GCupval *uv; + global_State *g = G(L); + while (gcref(L->openupval) != NULL && + uvval((uv = gco2uv(gcref(L->openupval)))) >= level) { + GCobj *o = obj2gco(uv); + lua_assert(!isblack(o) && !uv->closed && uvval(uv) != &uv->tv); + setgcrefr(L->openupval, uv->nextgc); /* No longer in open list. */ + if (isdead(g, o)) { + lj_func_freeuv(g, uv); + } else { + unlinkuv(uv); + lj_gc_closeuv(g, uv); + } + } +} + +void LJ_FASTCALL lj_func_freeuv(global_State *g, GCupval *uv) +{ + if (!uv->closed) + unlinkuv(uv); + lj_mem_freet(g, uv); +} + +/* -- Functions (closures) ------------------------------------------------ */ + +GCfunc *lj_func_newC(lua_State *L, MSize nelems, GCtab *env) +{ + GCfunc *fn = (GCfunc *)lj_mem_newgco(L, sizeCfunc(nelems)); + fn->c.gct = ~LJ_TFUNC; + fn->c.ffid = FF_C; + fn->c.nupvalues = (uint8_t)nelems; + /* NOBARRIER: The GCfunc is new (marked white). */ + setmref(fn->c.pc, &G(L)->bc_cfunc_ext); + setgcref(fn->c.env, obj2gco(env)); + return fn; +} + +static GCfunc *func_newL(lua_State *L, GCproto *pt, GCtab *env) +{ + uint32_t count; + GCfunc *fn = (GCfunc *)lj_mem_newgco(L, sizeLfunc((MSize)pt->sizeuv)); + fn->l.gct = ~LJ_TFUNC; + fn->l.ffid = FF_LUA; + fn->l.nupvalues = 0; /* Set to zero until upvalues are initialized. */ + /* NOBARRIER: Really a setgcref. But the GCfunc is new (marked white). */ + setmref(fn->l.pc, proto_bc(pt)); + setgcref(fn->l.env, obj2gco(env)); + /* Saturating 3 bit counter (0..7) for created closures. */ + count = (uint32_t)pt->flags + PROTO_CLCOUNT; + pt->flags = (uint8_t)(count - ((count >> PROTO_CLC_BITS) & PROTO_CLCOUNT)); + return fn; +} + +/* Create a new Lua function with empty upvalues. */ +GCfunc *lj_func_newL_empty(lua_State *L, GCproto *pt, GCtab *env) +{ + GCfunc *fn = func_newL(L, pt, env); + MSize i, nuv = pt->sizeuv; + /* NOBARRIER: The GCfunc is new (marked white). */ + for (i = 0; i < nuv; i++) { + GCupval *uv = func_emptyuv(L); + uv->dhash = (uint32_t)(uintptr_t)pt ^ ((uint32_t)proto_uv(pt)[i] << 24); + setgcref(fn->l.uvptr[i], obj2gco(uv)); + } + fn->l.nupvalues = (uint8_t)nuv; + return fn; +} + +/* Do a GC check and create a new Lua function with inherited upvalues. */ +GCfunc *lj_func_newL_gc(lua_State *L, GCproto *pt, GCfuncL *parent) +{ + GCfunc *fn; + GCRef *puv; + MSize i, nuv; + TValue *base; + lj_gc_check_fixtop(L); + fn = func_newL(L, pt, tabref(parent->env)); + /* NOBARRIER: The GCfunc is new (marked white). */ + puv = parent->uvptr; + nuv = pt->sizeuv; + base = L->base; + for (i = 0; i < nuv; i++) { + uint32_t v = proto_uv(pt)[i]; + GCupval *uv; + if ((v & PROTO_UV_LOCAL)) { + uv = func_finduv(L, base + (v & 0xff)); + uv->immutable = ((v / PROTO_UV_IMMUTABLE) & 1); + uv->dhash = (uint32_t)(uintptr_t)mref(parent->pc, char) ^ (v << 24); + } else { + uv = &gcref(puv[v])->uv; + } + setgcref(fn->l.uvptr[i], obj2gco(uv)); + } + fn->l.nupvalues = (uint8_t)nuv; + return fn; +} + +void LJ_FASTCALL lj_func_free(global_State *g, GCfunc *fn) +{ + MSize size = isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) : + sizeCfunc((MSize)fn->c.nupvalues); + lj_mem_free(g, fn, size); +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_func.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_func.h new file mode 100644 index 00000000000..a6e534e7739 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_func.h @@ -0,0 +1,24 @@ +/* +** Function handling (prototypes, functions and upvalues). +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_FUNC_H +#define _LJ_FUNC_H + +#include "lj_obj.h" + +/* Prototypes. */ +LJ_FUNC void LJ_FASTCALL lj_func_freeproto(global_State *g, GCproto *pt); + +/* Upvalues. */ +LJ_FUNCA void LJ_FASTCALL lj_func_closeuv(lua_State *L, TValue *level); +LJ_FUNC void LJ_FASTCALL lj_func_freeuv(global_State *g, GCupval *uv); + +/* Functions (closures). */ +LJ_FUNC GCfunc *lj_func_newC(lua_State *L, MSize nelems, GCtab *env); +LJ_FUNC GCfunc *lj_func_newL_empty(lua_State *L, GCproto *pt, GCtab *env); +LJ_FUNCA GCfunc *lj_func_newL_gc(lua_State *L, GCproto *pt, GCfuncL *parent); +LJ_FUNC void LJ_FASTCALL lj_func_free(global_State *g, GCfunc *c); + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_gc.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_gc.c new file mode 100644 index 00000000000..99d664aa2a1 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_gc.c @@ -0,0 +1,845 @@ +/* +** Garbage collector. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lj_gc_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_buf.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_func.h" +#include "lj_udata.h" +#include "lj_meta.h" +#include "lj_state.h" +#include "lj_frame.h" +#if LJ_HASFFI +#include "lj_ctype.h" +#include "lj_cdata.h" +#endif +#include "lj_trace.h" +#include "lj_vm.h" + +#define GCSTEPSIZE 1024u +#define GCSWEEPMAX 40 +#define GCSWEEPCOST 10 +#define GCFINALIZECOST 100 + +/* Macros to set GCobj colors and flags. */ +#define white2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES) +#define gray2black(x) ((x)->gch.marked |= LJ_GC_BLACK) +#define isfinalized(u) ((u)->marked & LJ_GC_FINALIZED) + +/* -- Mark phase ---------------------------------------------------------- */ + +/* Mark a TValue (if needed). */ +#define gc_marktv(g, tv) \ + { lua_assert(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct)); \ + if (tviswhite(tv)) gc_mark(g, gcV(tv)); } + +/* Mark a GCobj (if needed). */ +#define gc_markobj(g, o) \ + { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); } + +/* Mark a string object. */ +#define gc_mark_str(s) ((s)->marked &= (uint8_t)~LJ_GC_WHITES) + +/* Mark a white GCobj. */ +static void gc_mark(global_State *g, GCobj *o) +{ + int gct = o->gch.gct; + lua_assert(iswhite(o) && !isdead(g, o)); + white2gray(o); + if (LJ_UNLIKELY(gct == ~LJ_TUDATA)) { + GCtab *mt = tabref(gco2ud(o)->metatable); + gray2black(o); /* Userdata are never gray. */ + if (mt) gc_markobj(g, mt); + gc_markobj(g, tabref(gco2ud(o)->env)); + } else if (LJ_UNLIKELY(gct == ~LJ_TUPVAL)) { + GCupval *uv = gco2uv(o); + gc_marktv(g, uvval(uv)); + if (uv->closed) + gray2black(o); /* Closed upvalues are never gray. */ + } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) { + lua_assert(gct == ~LJ_TFUNC || gct == ~LJ_TTAB || + gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO); + setgcrefr(o->gch.gclist, g->gc.gray); + setgcref(g->gc.gray, o); + } +} + +/* Mark GC roots. */ +static void gc_mark_gcroot(global_State *g) +{ + ptrdiff_t i; + for (i = 0; i < GCROOT_MAX; i++) + if (gcref(g->gcroot[i]) != NULL) + gc_markobj(g, gcref(g->gcroot[i])); +} + +/* Start a GC cycle and mark the root set. */ +static void gc_mark_start(global_State *g) +{ + setgcrefnull(g->gc.gray); + setgcrefnull(g->gc.grayagain); + setgcrefnull(g->gc.weak); + gc_markobj(g, mainthread(g)); + gc_markobj(g, tabref(mainthread(g)->env)); + gc_marktv(g, &g->registrytv); + gc_mark_gcroot(g); + g->gc.state = GCSpropagate; +} + +/* Mark open upvalues. */ +static void gc_mark_uv(global_State *g) +{ + GCupval *uv; + for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) { + lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv); + if (isgray(obj2gco(uv))) + gc_marktv(g, uvval(uv)); + } +} + +/* Mark userdata in mmudata list. */ +static void gc_mark_mmudata(global_State *g) +{ + GCobj *root = gcref(g->gc.mmudata); + GCobj *u = root; + if (u) { + do { + u = gcnext(u); + makewhite(g, u); /* Could be from previous GC. */ + gc_mark(g, u); + } while (u != root); + } +} + +/* Separate userdata objects to be finalized to mmudata list. */ +size_t lj_gc_separateudata(global_State *g, int all) +{ + size_t m = 0; + GCRef *p = &mainthread(g)->nextgc; + GCobj *o; + while ((o = gcref(*p)) != NULL) { + if (!(iswhite(o) || all) || isfinalized(gco2ud(o))) { + p = &o->gch.nextgc; /* Nothing to do. */ + } else if (!lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc)) { + markfinalized(o); /* Done, as there's no __gc metamethod. */ + p = &o->gch.nextgc; + } else { /* Otherwise move userdata to be finalized to mmudata list. */ + m += sizeudata(gco2ud(o)); + markfinalized(o); + *p = o->gch.nextgc; + if (gcref(g->gc.mmudata)) { /* Link to end of mmudata list. */ + GCobj *root = gcref(g->gc.mmudata); + setgcrefr(o->gch.nextgc, root->gch.nextgc); + setgcref(root->gch.nextgc, o); + setgcref(g->gc.mmudata, o); + } else { /* Create circular list. */ + setgcref(o->gch.nextgc, o); + setgcref(g->gc.mmudata, o); + } + } + } + return m; +} + +/* -- Propagation phase --------------------------------------------------- */ + +/* Traverse a table. */ +static int gc_traverse_tab(global_State *g, GCtab *t) +{ + int weak = 0; + cTValue *mode; + GCtab *mt = tabref(t->metatable); + if (mt) + gc_markobj(g, mt); + mode = lj_meta_fastg(g, mt, MM_mode); + if (mode && tvisstr(mode)) { /* Valid __mode field? */ + const char *modestr = strVdata(mode); + int c; + while ((c = *modestr++)) { + if (c == 'k') weak |= LJ_GC_WEAKKEY; + else if (c == 'v') weak |= LJ_GC_WEAKVAL; + else if (c == 'K') weak = (int)(~0u & ~LJ_GC_WEAKVAL); + } + if (weak > 0) { /* Weak tables are cleared in the atomic phase. */ + t->marked = (uint8_t)((t->marked & ~LJ_GC_WEAK) | weak); + setgcrefr(t->gclist, g->gc.weak); + setgcref(g->gc.weak, obj2gco(t)); + } + } + if (weak == LJ_GC_WEAK) /* Nothing to mark if both keys/values are weak. */ + return 1; + if (!(weak & LJ_GC_WEAKVAL)) { /* Mark array part. */ + MSize i, asize = t->asize; + for (i = 0; i < asize; i++) + gc_marktv(g, arrayslot(t, i)); + } + if (t->hmask > 0) { /* Mark hash part. */ + Node *node = noderef(t->node); + MSize i, hmask = t->hmask; + for (i = 0; i <= hmask; i++) { + Node *n = &node[i]; + if (!tvisnil(&n->val)) { /* Mark non-empty slot. */ + lua_assert(!tvisnil(&n->key)); + if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key); + if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val); + } + } + } + return weak; +} + +/* Traverse a function. */ +static void gc_traverse_func(global_State *g, GCfunc *fn) +{ + gc_markobj(g, tabref(fn->c.env)); + if (isluafunc(fn)) { + uint32_t i; + lua_assert(fn->l.nupvalues <= funcproto(fn)->sizeuv); + gc_markobj(g, funcproto(fn)); + for (i = 0; i < fn->l.nupvalues; i++) /* Mark Lua function upvalues. */ + gc_markobj(g, &gcref(fn->l.uvptr[i])->uv); + } else { + uint32_t i; + for (i = 0; i < fn->c.nupvalues; i++) /* Mark C function upvalues. */ + gc_marktv(g, &fn->c.upvalue[i]); + } +} + +#if LJ_HASJIT +/* Mark a trace. */ +static void gc_marktrace(global_State *g, TraceNo traceno) +{ + GCobj *o = obj2gco(traceref(G2J(g), traceno)); + lua_assert(traceno != G2J(g)->cur.traceno); + if (iswhite(o)) { + white2gray(o); + setgcrefr(o->gch.gclist, g->gc.gray); + setgcref(g->gc.gray, o); + } +} + +/* Traverse a trace. */ +static void gc_traverse_trace(global_State *g, GCtrace *T) +{ + IRRef ref; + if (T->traceno == 0) return; + for (ref = T->nk; ref < REF_TRUE; ref++) { + IRIns *ir = &T->ir[ref]; + if (ir->o == IR_KGC) + gc_markobj(g, ir_kgc(ir)); + } + if (T->link) gc_marktrace(g, T->link); + if (T->nextroot) gc_marktrace(g, T->nextroot); + if (T->nextside) gc_marktrace(g, T->nextside); + gc_markobj(g, gcref(T->startpt)); +} + +/* The current trace is a GC root while not anchored in the prototype (yet). */ +#define gc_traverse_curtrace(g) gc_traverse_trace(g, &G2J(g)->cur) +#else +#define gc_traverse_curtrace(g) UNUSED(g) +#endif + +/* Traverse a prototype. */ +static void gc_traverse_proto(global_State *g, GCproto *pt) +{ + ptrdiff_t i; + gc_mark_str(proto_chunkname(pt)); + for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++) /* Mark collectable consts. */ + gc_markobj(g, proto_kgc(pt, i)); +#if LJ_HASJIT + if (pt->trace) gc_marktrace(g, pt->trace); +#endif +} + +/* Traverse the frame structure of a stack. */ +static MSize gc_traverse_frames(global_State *g, lua_State *th) +{ + TValue *frame, *top = th->top-1, *bot = tvref(th->stack); + /* Note: extra vararg frame not skipped, marks function twice (harmless). */ + for (frame = th->base-1; frame > bot+LJ_FR2; frame = frame_prev(frame)) { + GCfunc *fn = frame_func(frame); + TValue *ftop = frame; + if (isluafunc(fn)) ftop += funcproto(fn)->framesize; + if (ftop > top) top = ftop; + if (!LJ_FR2) gc_markobj(g, fn); /* Need to mark hidden function (or L). */ + } + top++; /* Correct bias of -1 (frame == base-1). */ + if (top > tvref(th->maxstack)) top = tvref(th->maxstack); + return (MSize)(top - bot); /* Return minimum needed stack size. */ +} + +/* Traverse a thread object. */ +static void gc_traverse_thread(global_State *g, lua_State *th) +{ + TValue *o, *top = th->top; + for (o = tvref(th->stack)+1+LJ_FR2; o < top; o++) + gc_marktv(g, o); + if (g->gc.state == GCSatomic) { + top = tvref(th->stack) + th->stacksize; + for (; o < top; o++) /* Clear unmarked slots. */ + setnilV(o); + } + gc_markobj(g, tabref(th->env)); + lj_state_shrinkstack(th, gc_traverse_frames(g, th)); +} + +/* Propagate one gray object. Traverse it and turn it black. */ +static size_t propagatemark(global_State *g) +{ + GCobj *o = gcref(g->gc.gray); + int gct = o->gch.gct; + lua_assert(isgray(o)); + gray2black(o); + setgcrefr(g->gc.gray, o->gch.gclist); /* Remove from gray list. */ + if (LJ_LIKELY(gct == ~LJ_TTAB)) { + GCtab *t = gco2tab(o); + if (gc_traverse_tab(g, t) > 0) + black2gray(o); /* Keep weak tables gray. */ + return sizeof(GCtab) + sizeof(TValue) * t->asize + + sizeof(Node) * (t->hmask + 1); + } else if (LJ_LIKELY(gct == ~LJ_TFUNC)) { + GCfunc *fn = gco2func(o); + gc_traverse_func(g, fn); + return isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) : + sizeCfunc((MSize)fn->c.nupvalues); + } else if (LJ_LIKELY(gct == ~LJ_TPROTO)) { + GCproto *pt = gco2pt(o); + gc_traverse_proto(g, pt); + return pt->sizept; + } else if (LJ_LIKELY(gct == ~LJ_TTHREAD)) { + lua_State *th = gco2th(o); + setgcrefr(th->gclist, g->gc.grayagain); + setgcref(g->gc.grayagain, o); + black2gray(o); /* Threads are never black. */ + gc_traverse_thread(g, th); + return sizeof(lua_State) + sizeof(TValue) * th->stacksize; + } else { +#if LJ_HASJIT + GCtrace *T = gco2trace(o); + gc_traverse_trace(g, T); + return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) + + T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry); +#else + lua_assert(0); + return 0; +#endif + } +} + +/* Propagate all gray objects. */ +static size_t gc_propagate_gray(global_State *g) +{ + size_t m = 0; + while (gcref(g->gc.gray) != NULL) + m += propagatemark(g); + return m; +} + +/* -- Sweep phase --------------------------------------------------------- */ + +/* Type of GC free functions. */ +typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o); + +/* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */ +static const GCFreeFunc gc_freefunc[] = { + (GCFreeFunc)lj_str_free, + (GCFreeFunc)lj_func_freeuv, + (GCFreeFunc)lj_state_free, + (GCFreeFunc)lj_func_freeproto, + (GCFreeFunc)lj_func_free, +#if LJ_HASJIT + (GCFreeFunc)lj_trace_free, +#else + (GCFreeFunc)0, +#endif +#if LJ_HASFFI + (GCFreeFunc)lj_cdata_free, +#else + (GCFreeFunc)0, +#endif + (GCFreeFunc)lj_tab_free, + (GCFreeFunc)lj_udata_free +}; + +/* Full sweep of a GC list. */ +#define gc_fullsweep(g, p) gc_sweep(g, (p), ~(uint32_t)0) + +/* Partial sweep of a GC list. */ +static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim) +{ + /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */ + int ow = otherwhite(g); + GCobj *o; + while ((o = gcref(*p)) != NULL && lim-- > 0) { + if (o->gch.gct == ~LJ_TTHREAD) /* Need to sweep open upvalues, too. */ + gc_fullsweep(g, &gco2th(o)->openupval); + if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */ + lua_assert(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED)); + makewhite(g, o); /* Value is alive, change to the current white. */ + p = &o->gch.nextgc; + } else { /* Otherwise value is dead, free it. */ + lua_assert(isdead(g, o) || ow == LJ_GC_SFIXED); + setgcrefr(*p, o->gch.nextgc); + if (o == gcref(g->gc.root)) + setgcrefr(g->gc.root, o->gch.nextgc); /* Adjust list anchor. */ + gc_freefunc[o->gch.gct - ~LJ_TSTR](g, o); + } + } + return p; +} + +/* Check whether we can clear a key or a value slot from a table. */ +static int gc_mayclear(cTValue *o, int val) +{ + if (tvisgcv(o)) { /* Only collectable objects can be weak references. */ + if (tvisstr(o)) { /* But strings cannot be used as weak references. */ + gc_mark_str(strV(o)); /* And need to be marked. */ + return 0; + } + if (iswhite(gcV(o))) + return 1; /* Object is about to be collected. */ + if (tvisudata(o) && val && isfinalized(udataV(o))) + return 1; /* Finalized userdata is dropped only from values. */ + } + return 0; /* Cannot clear. */ +} + +/* Clear collected entries from weak tables. */ +static void gc_clearweak(GCobj *o) +{ + while (o) { + GCtab *t = gco2tab(o); + lua_assert((t->marked & LJ_GC_WEAK)); + if ((t->marked & LJ_GC_WEAKVAL)) { + MSize i, asize = t->asize; + for (i = 0; i < asize; i++) { + /* Clear array slot when value is about to be collected. */ + TValue *tv = arrayslot(t, i); + if (gc_mayclear(tv, 1)) + setnilV(tv); + } + } + if (t->hmask > 0) { + Node *node = noderef(t->node); + MSize i, hmask = t->hmask; + for (i = 0; i <= hmask; i++) { + Node *n = &node[i]; + /* Clear hash slot when key or value is about to be collected. */ + if (!tvisnil(&n->val) && (gc_mayclear(&n->key, 0) || + gc_mayclear(&n->val, 1))) + setnilV(&n->val); + } + } + o = gcref(t->gclist); + } +} + +/* Call a userdata or cdata finalizer. */ +static void gc_call_finalizer(global_State *g, lua_State *L, + cTValue *mo, GCobj *o) +{ + /* Save and restore lots of state around the __gc callback. */ + uint8_t oldh = hook_save(g); + GCSize oldt = g->gc.threshold; + int errcode; + TValue *top; + lj_trace_abort(g); + hook_entergc(g); /* Disable hooks and new traces during __gc. */ + g->gc.threshold = LJ_MAX_MEM; /* Prevent GC steps. */ + top = L->top; + copyTV(L, top++, mo); + if (LJ_FR2) setnilV(top++); + setgcV(L, top, o, ~o->gch.gct); + L->top = top+1; + errcode = lj_vm_pcall(L, top, 1+0, -1); /* Stack: |mo|o| -> | */ + hook_restore(g, oldh); + g->gc.threshold = oldt; /* Restore GC threshold. */ + if (errcode) + lj_err_throw(L, errcode); /* Propagate errors. */ +} + +/* Finalize one userdata or cdata object from the mmudata list. */ +static void gc_finalize(lua_State *L) +{ + global_State *g = G(L); + GCobj *o = gcnext(gcref(g->gc.mmudata)); + cTValue *mo; + lua_assert(tvref(g->jit_base) == NULL); /* Must not be called on trace. */ + /* Unchain from list of userdata to be finalized. */ + if (o == gcref(g->gc.mmudata)) + setgcrefnull(g->gc.mmudata); + else + setgcrefr(gcref(g->gc.mmudata)->gch.nextgc, o->gch.nextgc); +#if LJ_HASFFI + if (o->gch.gct == ~LJ_TCDATA) { + TValue tmp, *tv; + /* Add cdata back to the GC list and make it white. */ + setgcrefr(o->gch.nextgc, g->gc.root); + setgcref(g->gc.root, o); + makewhite(g, o); + o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN; + /* Resolve finalizer. */ + setcdataV(L, &tmp, gco2cd(o)); + tv = lj_tab_set(L, ctype_ctsG(g)->finalizer, &tmp); + if (!tvisnil(tv)) { + g->gc.nocdatafin = 0; + copyTV(L, &tmp, tv); + setnilV(tv); /* Clear entry in finalizer table. */ + gc_call_finalizer(g, L, &tmp, o); + } + return; + } +#endif + /* Add userdata back to the main userdata list and make it white. */ + setgcrefr(o->gch.nextgc, mainthread(g)->nextgc); + setgcref(mainthread(g)->nextgc, o); + makewhite(g, o); + /* Resolve the __gc metamethod. */ + mo = lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc); + if (mo) + gc_call_finalizer(g, L, mo, o); +} + +/* Finalize all userdata objects from mmudata list. */ +void lj_gc_finalize_udata(lua_State *L) +{ + while (gcref(G(L)->gc.mmudata) != NULL) + gc_finalize(L); +} + +#if LJ_HASFFI +/* Finalize all cdata objects from finalizer table. */ +void lj_gc_finalize_cdata(lua_State *L) +{ + global_State *g = G(L); + CTState *cts = ctype_ctsG(g); + if (cts) { + GCtab *t = cts->finalizer; + Node *node = noderef(t->node); + ptrdiff_t i; + setgcrefnull(t->metatable); /* Mark finalizer table as disabled. */ + for (i = (ptrdiff_t)t->hmask; i >= 0; i--) + if (!tvisnil(&node[i].val) && tviscdata(&node[i].key)) { + GCobj *o = gcV(&node[i].key); + TValue tmp; + makewhite(g, o); + o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN; + copyTV(L, &tmp, &node[i].val); + setnilV(&node[i].val); + gc_call_finalizer(g, L, &tmp, o); + } + } +} +#endif + +/* Free all remaining GC objects. */ +void lj_gc_freeall(global_State *g) +{ + MSize i, strmask; + /* Free everything, except super-fixed objects (the main thread). */ + g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED; + gc_fullsweep(g, &g->gc.root); + strmask = g->strmask; + for (i = 0; i <= strmask; i++) /* Free all string hash chains. */ + gc_fullsweep(g, &g->strhash[i]); +} + +/* -- Collector ----------------------------------------------------------- */ + +/* Atomic part of the GC cycle, transitioning from mark to sweep phase. */ +static void atomic(global_State *g, lua_State *L) +{ + size_t udsize; + + gc_mark_uv(g); /* Need to remark open upvalues (the thread may be dead). */ + gc_propagate_gray(g); /* Propagate any left-overs. */ + + setgcrefr(g->gc.gray, g->gc.weak); /* Empty the list of weak tables. */ + setgcrefnull(g->gc.weak); + lua_assert(!iswhite(obj2gco(mainthread(g)))); + gc_markobj(g, L); /* Mark running thread. */ + gc_traverse_curtrace(g); /* Traverse current trace. */ + gc_mark_gcroot(g); /* Mark GC roots (again). */ + gc_propagate_gray(g); /* Propagate all of the above. */ + + setgcrefr(g->gc.gray, g->gc.grayagain); /* Empty the 2nd chance list. */ + setgcrefnull(g->gc.grayagain); + gc_propagate_gray(g); /* Propagate it. */ + + udsize = lj_gc_separateudata(g, 0); /* Separate userdata to be finalized. */ + gc_mark_mmudata(g); /* Mark them. */ + udsize += gc_propagate_gray(g); /* And propagate the marks. */ + + /* All marking done, clear weak tables. */ + gc_clearweak(gcref(g->gc.weak)); + + lj_buf_shrink(L, &g->tmpbuf); /* Shrink temp buffer. */ + + /* Prepare for sweep phase. */ + g->gc.currentwhite = (uint8_t)otherwhite(g); /* Flip current white. */ + g->strempty.marked = g->gc.currentwhite; + setmref(g->gc.sweep, &g->gc.root); + g->gc.estimate = g->gc.total - (GCSize)udsize; /* Initial estimate. */ +} + +/* GC state machine. Returns a cost estimate for each step performed. */ +static size_t gc_onestep(lua_State *L) +{ + global_State *g = G(L); + switch (g->gc.state) { + case GCSpause: + gc_mark_start(g); /* Start a new GC cycle by marking all GC roots. */ + return 0; + case GCSpropagate: + if (gcref(g->gc.gray) != NULL) + return propagatemark(g); /* Propagate one gray object. */ + g->gc.state = GCSatomic; /* End of mark phase. */ + return 0; + case GCSatomic: + if (tvref(g->jit_base)) /* Don't run atomic phase on trace. */ + return LJ_MAX_MEM; + atomic(g, L); + g->gc.state = GCSsweepstring; /* Start of sweep phase. */ + g->gc.sweepstr = 0; + return 0; + case GCSsweepstring: { + GCSize old = g->gc.total; + gc_fullsweep(g, &g->strhash[g->gc.sweepstr++]); /* Sweep one chain. */ + if (g->gc.sweepstr > g->strmask) + g->gc.state = GCSsweep; /* All string hash chains sweeped. */ + lua_assert(old >= g->gc.total); + g->gc.estimate -= old - g->gc.total; + return GCSWEEPCOST; + } + case GCSsweep: { + GCSize old = g->gc.total; + setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX)); + lua_assert(old >= g->gc.total); + g->gc.estimate -= old - g->gc.total; + if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) { + if (g->strnum <= (g->strmask >> 2) && g->strmask > LJ_MIN_STRTAB*2-1) + lj_str_resize(L, g->strmask >> 1); /* Shrink string table. */ + if (gcref(g->gc.mmudata)) { /* Need any finalizations? */ + g->gc.state = GCSfinalize; +#if LJ_HASFFI + g->gc.nocdatafin = 1; +#endif + } else { /* Otherwise skip this phase to help the JIT. */ + g->gc.state = GCSpause; /* End of GC cycle. */ + g->gc.debt = 0; + } + } + return GCSWEEPMAX*GCSWEEPCOST; + } + case GCSfinalize: + if (gcref(g->gc.mmudata) != NULL) { + if (tvref(g->jit_base)) /* Don't call finalizers on trace. */ + return LJ_MAX_MEM; + gc_finalize(L); /* Finalize one userdata object. */ + if (g->gc.estimate > GCFINALIZECOST) + g->gc.estimate -= GCFINALIZECOST; + return GCFINALIZECOST; + } +#if LJ_HASFFI + if (!g->gc.nocdatafin) lj_tab_rehash(L, ctype_ctsG(g)->finalizer); +#endif + g->gc.state = GCSpause; /* End of GC cycle. */ + g->gc.debt = 0; + return 0; + default: + lua_assert(0); + return 0; + } +} + +/* Perform a limited amount of incremental GC steps. */ +int LJ_FASTCALL lj_gc_step(lua_State *L) +{ + global_State *g = G(L); + GCSize lim; + int32_t ostate = g->vmstate; + setvmstate(g, GC); + lim = (GCSTEPSIZE/100) * g->gc.stepmul; + if (lim == 0) + lim = LJ_MAX_MEM; + if (g->gc.total > g->gc.threshold) + g->gc.debt += g->gc.total - g->gc.threshold; + do { + lim -= (GCSize)gc_onestep(L); + if (g->gc.state == GCSpause) { + g->gc.threshold = (g->gc.estimate/100) * g->gc.pause; + g->vmstate = ostate; + return 1; /* Finished a GC cycle. */ + } + } while (sizeof(lim) == 8 ? ((int64_t)lim > 0) : ((int32_t)lim > 0)); + if (g->gc.debt < GCSTEPSIZE) { + g->gc.threshold = g->gc.total + GCSTEPSIZE; + g->vmstate = ostate; + return -1; + } else { + g->gc.debt -= GCSTEPSIZE; + g->gc.threshold = g->gc.total; + g->vmstate = ostate; + return 0; + } +} + +/* Ditto, but fix the stack top first. */ +void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L) +{ + if (curr_funcisL(L)) L->top = curr_topL(L); + lj_gc_step(L); +} + +#if LJ_HASJIT +/* Perform multiple GC steps. Called from JIT-compiled code. */ +int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps) +{ + lua_State *L = gco2th(gcref(g->cur_L)); + L->base = tvref(G(L)->jit_base); + L->top = curr_topL(L); + while (steps-- > 0 && lj_gc_step(L) == 0) + ; + /* Return 1 to force a trace exit. */ + return (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize); +} +#endif + +/* Perform a full GC cycle. */ +void lj_gc_fullgc(lua_State *L) +{ + global_State *g = G(L); + int32_t ostate = g->vmstate; + setvmstate(g, GC); + if (g->gc.state <= GCSatomic) { /* Caught somewhere in the middle. */ + setmref(g->gc.sweep, &g->gc.root); /* Sweep everything (preserving it). */ + setgcrefnull(g->gc.gray); /* Reset lists from partial propagation. */ + setgcrefnull(g->gc.grayagain); + setgcrefnull(g->gc.weak); + g->gc.state = GCSsweepstring; /* Fast forward to the sweep phase. */ + g->gc.sweepstr = 0; + } + while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep) + gc_onestep(L); /* Finish sweep. */ + lua_assert(g->gc.state == GCSfinalize || g->gc.state == GCSpause); + /* Now perform a full GC. */ + g->gc.state = GCSpause; + do { gc_onestep(L); } while (g->gc.state != GCSpause); + g->gc.threshold = (g->gc.estimate/100) * g->gc.pause; + g->vmstate = ostate; +} + +/* -- Write barriers ------------------------------------------------------ */ + +/* Move the GC propagation frontier forward. */ +void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v) +{ + lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o)); + lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause); + lua_assert(o->gch.gct != ~LJ_TTAB); + /* Preserve invariant during propagation. Otherwise it doesn't matter. */ + if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) + gc_mark(g, v); /* Move frontier forward. */ + else + makewhite(g, o); /* Make it white to avoid the following barrier. */ +} + +/* Specialized barrier for closed upvalue. Pass &uv->tv. */ +void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv) +{ +#define TV2MARKED(x) \ + (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked))) + if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) + gc_mark(g, gcV(tv)); + else + TV2MARKED(tv) = (TV2MARKED(tv) & (uint8_t)~LJ_GC_COLORS) | curwhite(g); +#undef TV2MARKED +} + +/* Close upvalue. Also needs a write barrier. */ +void lj_gc_closeuv(global_State *g, GCupval *uv) +{ + GCobj *o = obj2gco(uv); + /* Copy stack slot to upvalue itself and point to the copy. */ + copyTV(mainthread(g), &uv->tv, uvval(uv)); + setmref(uv->v, &uv->tv); + uv->closed = 1; + setgcrefr(o->gch.nextgc, g->gc.root); + setgcref(g->gc.root, o); + if (isgray(o)) { /* A closed upvalue is never gray, so fix this. */ + if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) { + gray2black(o); /* Make it black and preserve invariant. */ + if (tviswhite(&uv->tv)) + lj_gc_barrierf(g, o, gcV(&uv->tv)); + } else { + makewhite(g, o); /* Make it white, i.e. sweep the upvalue. */ + lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause); + } + } +} + +#if LJ_HASJIT +/* Mark a trace if it's saved during the propagation phase. */ +void lj_gc_barriertrace(global_State *g, uint32_t traceno) +{ + if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) + gc_marktrace(g, traceno); +} +#endif + +/* -- Allocator ----------------------------------------------------------- */ + +/* Call pluggable memory allocator to allocate or resize a fragment. */ +void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz) +{ + global_State *g = G(L); + lua_assert((osz == 0) == (p == NULL)); + p = g->allocf(g->allocd, p, osz, nsz); + if (p == NULL && nsz > 0) + lj_err_mem(L); + lua_assert((nsz == 0) == (p == NULL)); + lua_assert(checkptrGC(p)); + g->gc.total = (g->gc.total - osz) + nsz; + return p; +} + +/* Allocate new GC object and link it to the root set. */ +void * LJ_FASTCALL lj_mem_newgco(lua_State *L, GCSize size) +{ + global_State *g = G(L); + GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size); + if (o == NULL) + lj_err_mem(L); + lua_assert(checkptrGC(o)); + g->gc.total += size; + setgcrefr(o->gch.nextgc, g->gc.root); + setgcref(g->gc.root, o); + newwhite(g, o); + return o; +} + +/* Resize growable vector. */ +void *lj_mem_grow(lua_State *L, void *p, MSize *szp, MSize lim, MSize esz) +{ + MSize sz = (*szp) << 1; + if (sz < LJ_MIN_VECSZ) + sz = LJ_MIN_VECSZ; + if (sz > lim) + sz = lim; + p = lj_mem_realloc(L, p, (*szp)*esz, sz*esz); + *szp = sz; + return p; +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_gc.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_gc.h new file mode 100644 index 00000000000..847eb7835d9 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_gc.h @@ -0,0 +1,134 @@ +/* +** Garbage collector. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_GC_H +#define _LJ_GC_H + +#include "lj_obj.h" + +/* Garbage collector states. Order matters. */ +enum { + GCSpause, GCSpropagate, GCSatomic, GCSsweepstring, GCSsweep, GCSfinalize +}; + +/* Bitmasks for marked field of GCobj. */ +#define LJ_GC_WHITE0 0x01 +#define LJ_GC_WHITE1 0x02 +#define LJ_GC_BLACK 0x04 +#define LJ_GC_FINALIZED 0x08 +#define LJ_GC_WEAKKEY 0x08 +#define LJ_GC_WEAKVAL 0x10 +#define LJ_GC_CDATA_FIN 0x10 +#define LJ_GC_FIXED 0x20 +#define LJ_GC_SFIXED 0x40 + +#define LJ_GC_WHITES (LJ_GC_WHITE0 | LJ_GC_WHITE1) +#define LJ_GC_COLORS (LJ_GC_WHITES | LJ_GC_BLACK) +#define LJ_GC_WEAK (LJ_GC_WEAKKEY | LJ_GC_WEAKVAL) + +/* Macros to test and set GCobj colors. */ +#define iswhite(x) ((x)->gch.marked & LJ_GC_WHITES) +#define isblack(x) ((x)->gch.marked & LJ_GC_BLACK) +#define isgray(x) (!((x)->gch.marked & (LJ_GC_BLACK|LJ_GC_WHITES))) +#define tviswhite(x) (tvisgcv(x) && iswhite(gcV(x))) +#define otherwhite(g) (g->gc.currentwhite ^ LJ_GC_WHITES) +#define isdead(g, v) ((v)->gch.marked & otherwhite(g) & LJ_GC_WHITES) + +#define curwhite(g) ((g)->gc.currentwhite & LJ_GC_WHITES) +#define newwhite(g, x) (obj2gco(x)->gch.marked = (uint8_t)curwhite(g)) +#define makewhite(g, x) \ + ((x)->gch.marked = ((x)->gch.marked & (uint8_t)~LJ_GC_COLORS) | curwhite(g)) +#define flipwhite(x) ((x)->gch.marked ^= LJ_GC_WHITES) +#define black2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_BLACK) +#define fixstring(s) ((s)->marked |= LJ_GC_FIXED) +#define markfinalized(x) ((x)->gch.marked |= LJ_GC_FINALIZED) + +/* Collector. */ +LJ_FUNC size_t lj_gc_separateudata(global_State *g, int all); +LJ_FUNC void lj_gc_finalize_udata(lua_State *L); +#if LJ_HASFFI +LJ_FUNC void lj_gc_finalize_cdata(lua_State *L); +#else +#define lj_gc_finalize_cdata(L) UNUSED(L) +#endif +LJ_FUNC void lj_gc_freeall(global_State *g); +LJ_FUNCA int LJ_FASTCALL lj_gc_step(lua_State *L); +LJ_FUNCA void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L); +#if LJ_HASJIT +LJ_FUNC int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps); +#endif +LJ_FUNC void lj_gc_fullgc(lua_State *L); + +/* GC check: drive collector forward if the GC threshold has been reached. */ +#define lj_gc_check(L) \ + { if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) \ + lj_gc_step(L); } +#define lj_gc_check_fixtop(L) \ + { if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) \ + lj_gc_step_fixtop(L); } + +/* Write barriers. */ +LJ_FUNC void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v); +LJ_FUNCA void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv); +LJ_FUNC void lj_gc_closeuv(global_State *g, GCupval *uv); +#if LJ_HASJIT +LJ_FUNC void lj_gc_barriertrace(global_State *g, uint32_t traceno); +#endif + +/* Move the GC propagation frontier back for tables (make it gray again). */ +static LJ_AINLINE void lj_gc_barrierback(global_State *g, GCtab *t) +{ + GCobj *o = obj2gco(t); + lua_assert(isblack(o) && !isdead(g, o)); + lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause); + black2gray(o); + setgcrefr(t->gclist, g->gc.grayagain); + setgcref(g->gc.grayagain, o); +} + +/* Barrier for stores to table objects. TValue and GCobj variant. */ +#define lj_gc_anybarriert(L, t) \ + { if (LJ_UNLIKELY(isblack(obj2gco(t)))) lj_gc_barrierback(G(L), (t)); } +#define lj_gc_barriert(L, t, tv) \ + { if (tviswhite(tv) && isblack(obj2gco(t))) \ + lj_gc_barrierback(G(L), (t)); } +#define lj_gc_objbarriert(L, t, o) \ + { if (iswhite(obj2gco(o)) && isblack(obj2gco(t))) \ + lj_gc_barrierback(G(L), (t)); } + +/* Barrier for stores to any other object. TValue and GCobj variant. */ +#define lj_gc_barrier(L, p, tv) \ + { if (tviswhite(tv) && isblack(obj2gco(p))) \ + lj_gc_barrierf(G(L), obj2gco(p), gcV(tv)); } +#define lj_gc_objbarrier(L, p, o) \ + { if (iswhite(obj2gco(o)) && isblack(obj2gco(p))) \ + lj_gc_barrierf(G(L), obj2gco(p), obj2gco(o)); } + +/* Allocator. */ +LJ_FUNC void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz); +LJ_FUNC void * LJ_FASTCALL lj_mem_newgco(lua_State *L, GCSize size); +LJ_FUNC void *lj_mem_grow(lua_State *L, void *p, + MSize *szp, MSize lim, MSize esz); + +#define lj_mem_new(L, s) lj_mem_realloc(L, NULL, 0, (s)) + +static LJ_AINLINE void lj_mem_free(global_State *g, void *p, size_t osize) +{ + g->gc.total -= (GCSize)osize; + g->allocf(g->allocd, p, osize, 0); +} + +#define lj_mem_newvec(L, n, t) ((t *)lj_mem_new(L, (GCSize)((n)*sizeof(t)))) +#define lj_mem_reallocvec(L, p, on, n, t) \ + ((p) = (t *)lj_mem_realloc(L, p, (on)*sizeof(t), (GCSize)((n)*sizeof(t)))) +#define lj_mem_growvec(L, p, n, m, t) \ + ((p) = (t *)lj_mem_grow(L, (p), &(n), (m), (MSize)sizeof(t))) +#define lj_mem_freevec(g, p, n, t) lj_mem_free(g, (p), (n)*sizeof(t)) + +#define lj_mem_newobj(L, t) ((t *)lj_mem_newgco(L, sizeof(t))) +#define lj_mem_newt(L, s, t) ((t *)lj_mem_new(L, (s))) +#define lj_mem_freet(g, p) lj_mem_free(g, (p), sizeof(*(p))) + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_gdbjit.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_gdbjit.c new file mode 100644 index 00000000000..9b95e525840 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_gdbjit.c @@ -0,0 +1,787 @@ +/* +** Client for the GDB JIT API. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_gdbjit_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_debug.h" +#include "lj_frame.h" +#include "lj_buf.h" +#include "lj_strfmt.h" +#include "lj_jit.h" +#include "lj_dispatch.h" + +/* This is not compiled in by default. +** Enable with -DLUAJIT_USE_GDBJIT in the Makefile and recompile everything. +*/ +#ifdef LUAJIT_USE_GDBJIT + +/* The GDB JIT API allows JIT compilers to pass debug information about +** JIT-compiled code back to GDB. You need at least GDB 7.0 or higher +** to see it in action. +** +** This is a passive API, so it works even when not running under GDB +** or when attaching to an already running process. Alas, this implies +** enabling it always has a non-negligible overhead -- do not use in +** release mode! +** +** The LuaJIT GDB JIT client is rather minimal at the moment. It gives +** each trace a symbol name and adds a source location and frame unwind +** information. Obviously LuaJIT itself and any embedding C application +** should be compiled with debug symbols, too (see the Makefile). +** +** Traces are named TRACE_1, TRACE_2, ... these correspond to the trace +** numbers from -jv or -jdump. Use "break TRACE_1" or "tbreak TRACE_1" etc. +** to set breakpoints on specific traces (even ahead of their creation). +** +** The source location for each trace allows listing the corresponding +** source lines with the GDB command "list" (but only if the Lua source +** has been loaded from a file). Currently this is always set to the +** location where the trace has been started. +** +** Frame unwind information can be inspected with the GDB command +** "info frame". This also allows proper backtraces across JIT-compiled +** code with the GDB command "bt". +** +** You probably want to add the following settings to a .gdbinit file +** (or add them to ~/.gdbinit): +** set disassembly-flavor intel +** set breakpoint pending on +** +** Here's a sample GDB session: +** ------------------------------------------------------------------------ + +$ cat >x.lua +for outer=1,100 do + for inner=1,100 do end +end +^D + +$ luajit -jv x.lua +[TRACE 1 x.lua:2] +[TRACE 2 (1/3) x.lua:1 -> 1] + +$ gdb --quiet --args luajit x.lua +(gdb) tbreak TRACE_1 +Function "TRACE_1" not defined. +Temporary breakpoint 1 (TRACE_1) pending. +(gdb) run +Starting program: luajit x.lua + +Temporary breakpoint 1, TRACE_1 () at x.lua:2 +2 for inner=1,100 do end +(gdb) list +1 for outer=1,100 do +2 for inner=1,100 do end +3 end +(gdb) bt +#0 TRACE_1 () at x.lua:2 +#1 0x08053690 in lua_pcall [...] +[...] +#7 0x0806ff90 in main [...] +(gdb) disass TRACE_1 +Dump of assembler code for function TRACE_1: +0xf7fd9fba : mov DWORD PTR ds:0xf7e0e2a0,0x1 +0xf7fd9fc4 : movsd xmm7,QWORD PTR [edx+0x20] +[...] +0xf7fd9ff8 : jmp 0xf7fd2014 +End of assembler dump. +(gdb) tbreak TRACE_2 +Function "TRACE_2" not defined. +Temporary breakpoint 2 (TRACE_2) pending. +(gdb) cont +Continuing. + +Temporary breakpoint 2, TRACE_2 () at x.lua:1 +1 for outer=1,100 do +(gdb) info frame +Stack level 0, frame at 0xffffd7c0: + eip = 0xf7fd9f60 in TRACE_2 (x.lua:1); saved eip 0x8053690 + called by frame at 0xffffd7e0 + source language unknown. + Arglist at 0xffffd78c, args: + Locals at 0xffffd78c, Previous frame's sp is 0xffffd7c0 + Saved registers: + ebx at 0xffffd7ac, ebp at 0xffffd7b8, esi at 0xffffd7b0, edi at 0xffffd7b4, + eip at 0xffffd7bc +(gdb) + +** ------------------------------------------------------------------------ +*/ + +/* -- GDB JIT API --------------------------------------------------------- */ + +/* GDB JIT actions. */ +enum { + GDBJIT_NOACTION = 0, + GDBJIT_REGISTER, + GDBJIT_UNREGISTER +}; + +/* GDB JIT entry. */ +typedef struct GDBJITentry { + struct GDBJITentry *next_entry; + struct GDBJITentry *prev_entry; + const char *symfile_addr; + uint64_t symfile_size; +} GDBJITentry; + +/* GDB JIT descriptor. */ +typedef struct GDBJITdesc { + uint32_t version; + uint32_t action_flag; + GDBJITentry *relevant_entry; + GDBJITentry *first_entry; +} GDBJITdesc; + +GDBJITdesc __jit_debug_descriptor = { + 1, GDBJIT_NOACTION, NULL, NULL +}; + +/* GDB sets a breakpoint at this function. */ +void LJ_NOINLINE __jit_debug_register_code() +{ + __asm__ __volatile__(""); +}; + +/* -- In-memory ELF object definitions ------------------------------------ */ + +/* ELF definitions. */ +typedef struct ELFheader { + uint8_t emagic[4]; + uint8_t eclass; + uint8_t eendian; + uint8_t eversion; + uint8_t eosabi; + uint8_t eabiversion; + uint8_t epad[7]; + uint16_t type; + uint16_t machine; + uint32_t version; + uintptr_t entry; + uintptr_t phofs; + uintptr_t shofs; + uint32_t flags; + uint16_t ehsize; + uint16_t phentsize; + uint16_t phnum; + uint16_t shentsize; + uint16_t shnum; + uint16_t shstridx; +} ELFheader; + +typedef struct ELFsectheader { + uint32_t name; + uint32_t type; + uintptr_t flags; + uintptr_t addr; + uintptr_t ofs; + uintptr_t size; + uint32_t link; + uint32_t info; + uintptr_t align; + uintptr_t entsize; +} ELFsectheader; + +#define ELFSECT_IDX_ABS 0xfff1 + +enum { + ELFSECT_TYPE_PROGBITS = 1, + ELFSECT_TYPE_SYMTAB = 2, + ELFSECT_TYPE_STRTAB = 3, + ELFSECT_TYPE_NOBITS = 8 +}; + +#define ELFSECT_FLAGS_WRITE 1 +#define ELFSECT_FLAGS_ALLOC 2 +#define ELFSECT_FLAGS_EXEC 4 + +typedef struct ELFsymbol { +#if LJ_64 + uint32_t name; + uint8_t info; + uint8_t other; + uint16_t sectidx; + uintptr_t value; + uint64_t size; +#else + uint32_t name; + uintptr_t value; + uint32_t size; + uint8_t info; + uint8_t other; + uint16_t sectidx; +#endif +} ELFsymbol; + +enum { + ELFSYM_TYPE_FUNC = 2, + ELFSYM_TYPE_FILE = 4, + ELFSYM_BIND_LOCAL = 0 << 4, + ELFSYM_BIND_GLOBAL = 1 << 4, +}; + +/* DWARF definitions. */ +#define DW_CIE_VERSION 1 + +enum { + DW_CFA_nop = 0x0, + DW_CFA_offset_extended = 0x5, + DW_CFA_def_cfa = 0xc, + DW_CFA_def_cfa_offset = 0xe, + DW_CFA_offset_extended_sf = 0x11, + DW_CFA_advance_loc = 0x40, + DW_CFA_offset = 0x80 +}; + +enum { + DW_EH_PE_udata4 = 3, + DW_EH_PE_textrel = 0x20 +}; + +enum { + DW_TAG_compile_unit = 0x11 +}; + +enum { + DW_children_no = 0, + DW_children_yes = 1 +}; + +enum { + DW_AT_name = 0x03, + DW_AT_stmt_list = 0x10, + DW_AT_low_pc = 0x11, + DW_AT_high_pc = 0x12 +}; + +enum { + DW_FORM_addr = 0x01, + DW_FORM_data4 = 0x06, + DW_FORM_string = 0x08 +}; + +enum { + DW_LNS_extended_op = 0, + DW_LNS_copy = 1, + DW_LNS_advance_pc = 2, + DW_LNS_advance_line = 3 +}; + +enum { + DW_LNE_end_sequence = 1, + DW_LNE_set_address = 2 +}; + +enum { +#if LJ_TARGET_X86 + DW_REG_AX, DW_REG_CX, DW_REG_DX, DW_REG_BX, + DW_REG_SP, DW_REG_BP, DW_REG_SI, DW_REG_DI, + DW_REG_RA, +#elif LJ_TARGET_X64 + /* Yes, the order is strange, but correct. */ + DW_REG_AX, DW_REG_DX, DW_REG_CX, DW_REG_BX, + DW_REG_SI, DW_REG_DI, DW_REG_BP, DW_REG_SP, + DW_REG_8, DW_REG_9, DW_REG_10, DW_REG_11, + DW_REG_12, DW_REG_13, DW_REG_14, DW_REG_15, + DW_REG_RA, +#elif LJ_TARGET_ARM + DW_REG_SP = 13, + DW_REG_RA = 14, +#elif LJ_TARGET_PPC + DW_REG_SP = 1, + DW_REG_RA = 65, + DW_REG_CR = 70, +#elif LJ_TARGET_MIPS + DW_REG_SP = 29, + DW_REG_RA = 31, +#else +#error "Unsupported target architecture" +#endif +}; + +/* Minimal list of sections for the in-memory ELF object. */ +enum { + GDBJIT_SECT_NULL, + GDBJIT_SECT_text, + GDBJIT_SECT_eh_frame, + GDBJIT_SECT_shstrtab, + GDBJIT_SECT_strtab, + GDBJIT_SECT_symtab, + GDBJIT_SECT_debug_info, + GDBJIT_SECT_debug_abbrev, + GDBJIT_SECT_debug_line, + GDBJIT_SECT__MAX +}; + +enum { + GDBJIT_SYM_UNDEF, + GDBJIT_SYM_FILE, + GDBJIT_SYM_FUNC, + GDBJIT_SYM__MAX +}; + +/* In-memory ELF object. */ +typedef struct GDBJITobj { + ELFheader hdr; /* ELF header. */ + ELFsectheader sect[GDBJIT_SECT__MAX]; /* ELF sections. */ + ELFsymbol sym[GDBJIT_SYM__MAX]; /* ELF symbol table. */ + uint8_t space[4096]; /* Space for various section data. */ +} GDBJITobj; + +/* Combined structure for GDB JIT entry and ELF object. */ +typedef struct GDBJITentryobj { + GDBJITentry entry; + size_t sz; + GDBJITobj obj; +} GDBJITentryobj; + +/* Template for in-memory ELF header. */ +static const ELFheader elfhdr_template = { + .emagic = { 0x7f, 'E', 'L', 'F' }, + .eclass = LJ_64 ? 2 : 1, + .eendian = LJ_ENDIAN_SELECT(1, 2), + .eversion = 1, +#if LJ_TARGET_LINUX + .eosabi = 0, /* Nope, it's not 3. */ +#elif defined(__FreeBSD__) + .eosabi = 9, +#elif defined(__NetBSD__) + .eosabi = 2, +#elif defined(__OpenBSD__) + .eosabi = 12, +#elif defined(__DragonFly__) + .eosabi = 0, +#elif (defined(__sun__) && defined(__svr4__)) + .eosabi = 6, +#else + .eosabi = 0, +#endif + .eabiversion = 0, + .epad = { 0, 0, 0, 0, 0, 0, 0 }, + .type = 1, +#if LJ_TARGET_X86 + .machine = 3, +#elif LJ_TARGET_X64 + .machine = 62, +#elif LJ_TARGET_ARM + .machine = 40, +#elif LJ_TARGET_PPC + .machine = 20, +#elif LJ_TARGET_MIPS + .machine = 8, +#else +#error "Unsupported target architecture" +#endif + .version = 1, + .entry = 0, + .phofs = 0, + .shofs = offsetof(GDBJITobj, sect), + .flags = 0, + .ehsize = sizeof(ELFheader), + .phentsize = 0, + .phnum = 0, + .shentsize = sizeof(ELFsectheader), + .shnum = GDBJIT_SECT__MAX, + .shstridx = GDBJIT_SECT_shstrtab +}; + +/* -- In-memory ELF object generation ------------------------------------- */ + +/* Context for generating the ELF object for the GDB JIT API. */ +typedef struct GDBJITctx { + uint8_t *p; /* Pointer to next address in obj.space. */ + uint8_t *startp; /* Pointer to start address in obj.space. */ + GCtrace *T; /* Generate symbols for this trace. */ + uintptr_t mcaddr; /* Machine code address. */ + MSize szmcode; /* Size of machine code. */ + MSize spadjp; /* Stack adjustment for parent trace or interpreter. */ + MSize spadj; /* Stack adjustment for trace itself. */ + BCLine lineno; /* Starting line number. */ + const char *filename; /* Starting file name. */ + size_t objsize; /* Final size of ELF object. */ + GDBJITobj obj; /* In-memory ELF object. */ +} GDBJITctx; + +/* Add a zero-terminated string. */ +static uint32_t gdbjit_strz(GDBJITctx *ctx, const char *str) +{ + uint8_t *p = ctx->p; + uint32_t ofs = (uint32_t)(p - ctx->startp); + do { + *p++ = (uint8_t)*str; + } while (*str++); + ctx->p = p; + return ofs; +} + +/* Append a decimal number. */ +static void gdbjit_catnum(GDBJITctx *ctx, uint32_t n) +{ + if (n >= 10) { uint32_t m = n / 10; n = n % 10; gdbjit_catnum(ctx, m); } + *ctx->p++ = '0' + n; +} + +/* Add a SLEB128 value. */ +static void gdbjit_sleb128(GDBJITctx *ctx, int32_t v) +{ + uint8_t *p = ctx->p; + for (; (uint32_t)(v+0x40) >= 0x80; v >>= 7) + *p++ = (uint8_t)((v & 0x7f) | 0x80); + *p++ = (uint8_t)(v & 0x7f); + ctx->p = p; +} + +/* Shortcuts to generate DWARF structures. */ +#define DB(x) (*p++ = (x)) +#define DI8(x) (*(int8_t *)p = (x), p++) +#define DU16(x) (*(uint16_t *)p = (x), p += 2) +#define DU32(x) (*(uint32_t *)p = (x), p += 4) +#define DADDR(x) (*(uintptr_t *)p = (x), p += sizeof(uintptr_t)) +#define DUV(x) (p = (uint8_t *)lj_strfmt_wuleb128((char *)p, (x))) +#define DSV(x) (ctx->p = p, gdbjit_sleb128(ctx, (x)), p = ctx->p) +#define DSTR(str) (ctx->p = p, gdbjit_strz(ctx, (str)), p = ctx->p) +#define DALIGNNOP(s) while ((uintptr_t)p & ((s)-1)) *p++ = DW_CFA_nop +#define DSECT(name, stmt) \ + { uint32_t *szp_##name = (uint32_t *)p; p += 4; stmt \ + *szp_##name = (uint32_t)((p-(uint8_t *)szp_##name)-4); } \ + +/* Initialize ELF section headers. */ +static void LJ_FASTCALL gdbjit_secthdr(GDBJITctx *ctx) +{ + ELFsectheader *sect; + + *ctx->p++ = '\0'; /* Empty string at start of string table. */ + +#define SECTDEF(id, tp, al) \ + sect = &ctx->obj.sect[GDBJIT_SECT_##id]; \ + sect->name = gdbjit_strz(ctx, "." #id); \ + sect->type = ELFSECT_TYPE_##tp; \ + sect->align = (al) + + SECTDEF(text, NOBITS, 16); + sect->flags = ELFSECT_FLAGS_ALLOC|ELFSECT_FLAGS_EXEC; + sect->addr = ctx->mcaddr; + sect->ofs = 0; + sect->size = ctx->szmcode; + + SECTDEF(eh_frame, PROGBITS, sizeof(uintptr_t)); + sect->flags = ELFSECT_FLAGS_ALLOC; + + SECTDEF(shstrtab, STRTAB, 1); + SECTDEF(strtab, STRTAB, 1); + + SECTDEF(symtab, SYMTAB, sizeof(uintptr_t)); + sect->ofs = offsetof(GDBJITobj, sym); + sect->size = sizeof(ctx->obj.sym); + sect->link = GDBJIT_SECT_strtab; + sect->entsize = sizeof(ELFsymbol); + sect->info = GDBJIT_SYM_FUNC; + + SECTDEF(debug_info, PROGBITS, 1); + SECTDEF(debug_abbrev, PROGBITS, 1); + SECTDEF(debug_line, PROGBITS, 1); + +#undef SECTDEF +} + +/* Initialize symbol table. */ +static void LJ_FASTCALL gdbjit_symtab(GDBJITctx *ctx) +{ + ELFsymbol *sym; + + *ctx->p++ = '\0'; /* Empty string at start of string table. */ + + sym = &ctx->obj.sym[GDBJIT_SYM_FILE]; + sym->name = gdbjit_strz(ctx, "JIT mcode"); + sym->sectidx = ELFSECT_IDX_ABS; + sym->info = ELFSYM_TYPE_FILE|ELFSYM_BIND_LOCAL; + + sym = &ctx->obj.sym[GDBJIT_SYM_FUNC]; + sym->name = gdbjit_strz(ctx, "TRACE_"); ctx->p--; + gdbjit_catnum(ctx, ctx->T->traceno); *ctx->p++ = '\0'; + sym->sectidx = GDBJIT_SECT_text; + sym->value = 0; + sym->size = ctx->szmcode; + sym->info = ELFSYM_TYPE_FUNC|ELFSYM_BIND_GLOBAL; +} + +/* Initialize .eh_frame section. */ +static void LJ_FASTCALL gdbjit_ehframe(GDBJITctx *ctx) +{ + uint8_t *p = ctx->p; + uint8_t *framep = p; + + /* Emit DWARF EH CIE. */ + DSECT(CIE, + DU32(0); /* Offset to CIE itself. */ + DB(DW_CIE_VERSION); + DSTR("zR"); /* Augmentation. */ + DUV(1); /* Code alignment factor. */ + DSV(-(int32_t)sizeof(uintptr_t)); /* Data alignment factor. */ + DB(DW_REG_RA); /* Return address register. */ + DB(1); DB(DW_EH_PE_textrel|DW_EH_PE_udata4); /* Augmentation data. */ + DB(DW_CFA_def_cfa); DUV(DW_REG_SP); DUV(sizeof(uintptr_t)); +#if LJ_TARGET_PPC + DB(DW_CFA_offset_extended_sf); DB(DW_REG_RA); DSV(-1); +#else + DB(DW_CFA_offset|DW_REG_RA); DUV(1); +#endif + DALIGNNOP(sizeof(uintptr_t)); + ) + + /* Emit DWARF EH FDE. */ + DSECT(FDE, + DU32((uint32_t)(p-framep)); /* Offset to CIE. */ + DU32(0); /* Machine code offset relative to .text. */ + DU32(ctx->szmcode); /* Machine code length. */ + DB(0); /* Augmentation data. */ + /* Registers saved in CFRAME. */ +#if LJ_TARGET_X86 + DB(DW_CFA_offset|DW_REG_BP); DUV(2); + DB(DW_CFA_offset|DW_REG_DI); DUV(3); + DB(DW_CFA_offset|DW_REG_SI); DUV(4); + DB(DW_CFA_offset|DW_REG_BX); DUV(5); +#elif LJ_TARGET_X64 + DB(DW_CFA_offset|DW_REG_BP); DUV(2); + DB(DW_CFA_offset|DW_REG_BX); DUV(3); + DB(DW_CFA_offset|DW_REG_15); DUV(4); + DB(DW_CFA_offset|DW_REG_14); DUV(5); + /* Extra registers saved for JIT-compiled code. */ + DB(DW_CFA_offset|DW_REG_13); DUV(LJ_GC64 ? 10 : 9); + DB(DW_CFA_offset|DW_REG_12); DUV(LJ_GC64 ? 11 : 10); +#elif LJ_TARGET_ARM + { + int i; + for (i = 11; i >= 4; i--) { DB(DW_CFA_offset|i); DUV(2+(11-i)); } + } +#elif LJ_TARGET_PPC + { + int i; + DB(DW_CFA_offset_extended); DB(DW_REG_CR); DUV(55); + for (i = 14; i <= 31; i++) { + DB(DW_CFA_offset|i); DUV(37+(31-i)); + DB(DW_CFA_offset|32|i); DUV(2+2*(31-i)); + } + } +#elif LJ_TARGET_MIPS + { + int i; + DB(DW_CFA_offset|30); DUV(2); + for (i = 23; i >= 16; i--) { DB(DW_CFA_offset|i); DUV(26-i); } + for (i = 30; i >= 20; i -= 2) { DB(DW_CFA_offset|32|i); DUV(42-i); } + } +#else +#error "Unsupported target architecture" +#endif + if (ctx->spadjp != ctx->spadj) { /* Parent/interpreter stack frame size. */ + DB(DW_CFA_def_cfa_offset); DUV(ctx->spadjp); + DB(DW_CFA_advance_loc|1); /* Only an approximation. */ + } + DB(DW_CFA_def_cfa_offset); DUV(ctx->spadj); /* Trace stack frame size. */ + DALIGNNOP(sizeof(uintptr_t)); + ) + + ctx->p = p; +} + +/* Initialize .debug_info section. */ +static void LJ_FASTCALL gdbjit_debuginfo(GDBJITctx *ctx) +{ + uint8_t *p = ctx->p; + + DSECT(info, + DU16(2); /* DWARF version. */ + DU32(0); /* Abbrev offset. */ + DB(sizeof(uintptr_t)); /* Pointer size. */ + + DUV(1); /* Abbrev #1: DW_TAG_compile_unit. */ + DSTR(ctx->filename); /* DW_AT_name. */ + DADDR(ctx->mcaddr); /* DW_AT_low_pc. */ + DADDR(ctx->mcaddr + ctx->szmcode); /* DW_AT_high_pc. */ + DU32(0); /* DW_AT_stmt_list. */ + ) + + ctx->p = p; +} + +/* Initialize .debug_abbrev section. */ +static void LJ_FASTCALL gdbjit_debugabbrev(GDBJITctx *ctx) +{ + uint8_t *p = ctx->p; + + /* Abbrev #1: DW_TAG_compile_unit. */ + DUV(1); DUV(DW_TAG_compile_unit); + DB(DW_children_no); + DUV(DW_AT_name); DUV(DW_FORM_string); + DUV(DW_AT_low_pc); DUV(DW_FORM_addr); + DUV(DW_AT_high_pc); DUV(DW_FORM_addr); + DUV(DW_AT_stmt_list); DUV(DW_FORM_data4); + DB(0); DB(0); + + ctx->p = p; +} + +#define DLNE(op, s) (DB(DW_LNS_extended_op), DUV(1+(s)), DB((op))) + +/* Initialize .debug_line section. */ +static void LJ_FASTCALL gdbjit_debugline(GDBJITctx *ctx) +{ + uint8_t *p = ctx->p; + + DSECT(line, + DU16(2); /* DWARF version. */ + DSECT(header, + DB(1); /* Minimum instruction length. */ + DB(1); /* is_stmt. */ + DI8(0); /* Line base for special opcodes. */ + DB(2); /* Line range for special opcodes. */ + DB(3+1); /* Opcode base at DW_LNS_advance_line+1. */ + DB(0); DB(1); DB(1); /* Standard opcode lengths. */ + /* Directory table. */ + DB(0); + /* File name table. */ + DSTR(ctx->filename); DUV(0); DUV(0); DUV(0); + DB(0); + ) + + DLNE(DW_LNE_set_address, sizeof(uintptr_t)); DADDR(ctx->mcaddr); + if (ctx->lineno) { + DB(DW_LNS_advance_line); DSV(ctx->lineno-1); + } + DB(DW_LNS_copy); + DB(DW_LNS_advance_pc); DUV(ctx->szmcode); + DLNE(DW_LNE_end_sequence, 0); + ) + + ctx->p = p; +} + +#undef DLNE + +/* Undef shortcuts. */ +#undef DB +#undef DI8 +#undef DU16 +#undef DU32 +#undef DADDR +#undef DUV +#undef DSV +#undef DSTR +#undef DALIGNNOP +#undef DSECT + +/* Type of a section initializer callback. */ +typedef void (LJ_FASTCALL *GDBJITinitf)(GDBJITctx *ctx); + +/* Call section initializer and set the section offset and size. */ +static void gdbjit_initsect(GDBJITctx *ctx, int sect, GDBJITinitf initf) +{ + ctx->startp = ctx->p; + ctx->obj.sect[sect].ofs = (uintptr_t)((char *)ctx->p - (char *)&ctx->obj); + initf(ctx); + ctx->obj.sect[sect].size = (uintptr_t)(ctx->p - ctx->startp); +} + +#define SECTALIGN(p, a) \ + ((p) = (uint8_t *)(((uintptr_t)(p) + ((a)-1)) & ~(uintptr_t)((a)-1))) + +/* Build in-memory ELF object. */ +static void gdbjit_buildobj(GDBJITctx *ctx) +{ + GDBJITobj *obj = &ctx->obj; + /* Fill in ELF header and clear structures. */ + memcpy(&obj->hdr, &elfhdr_template, sizeof(ELFheader)); + memset(&obj->sect, 0, sizeof(ELFsectheader)*GDBJIT_SECT__MAX); + memset(&obj->sym, 0, sizeof(ELFsymbol)*GDBJIT_SYM__MAX); + /* Initialize sections. */ + ctx->p = obj->space; + gdbjit_initsect(ctx, GDBJIT_SECT_shstrtab, gdbjit_secthdr); + gdbjit_initsect(ctx, GDBJIT_SECT_strtab, gdbjit_symtab); + gdbjit_initsect(ctx, GDBJIT_SECT_debug_info, gdbjit_debuginfo); + gdbjit_initsect(ctx, GDBJIT_SECT_debug_abbrev, gdbjit_debugabbrev); + gdbjit_initsect(ctx, GDBJIT_SECT_debug_line, gdbjit_debugline); + SECTALIGN(ctx->p, sizeof(uintptr_t)); + gdbjit_initsect(ctx, GDBJIT_SECT_eh_frame, gdbjit_ehframe); + ctx->objsize = (size_t)((char *)ctx->p - (char *)obj); + lua_assert(ctx->objsize < sizeof(GDBJITobj)); +} + +#undef SECTALIGN + +/* -- Interface to GDB JIT API -------------------------------------------- */ + +/* Add new entry to GDB JIT symbol chain. */ +static void gdbjit_newentry(lua_State *L, GDBJITctx *ctx) +{ + /* Allocate memory for GDB JIT entry and ELF object. */ + MSize sz = (MSize)(sizeof(GDBJITentryobj) - sizeof(GDBJITobj) + ctx->objsize); + GDBJITentryobj *eo = lj_mem_newt(L, sz, GDBJITentryobj); + memcpy(&eo->obj, &ctx->obj, ctx->objsize); /* Copy ELF object. */ + eo->sz = sz; + ctx->T->gdbjit_entry = (void *)eo; + /* Link new entry to chain and register it. */ + eo->entry.prev_entry = NULL; + eo->entry.next_entry = __jit_debug_descriptor.first_entry; + if (eo->entry.next_entry) + eo->entry.next_entry->prev_entry = &eo->entry; + eo->entry.symfile_addr = (const char *)&eo->obj; + eo->entry.symfile_size = ctx->objsize; + __jit_debug_descriptor.first_entry = &eo->entry; + __jit_debug_descriptor.relevant_entry = &eo->entry; + __jit_debug_descriptor.action_flag = GDBJIT_REGISTER; + __jit_debug_register_code(); +} + +/* Add debug info for newly compiled trace and notify GDB. */ +void lj_gdbjit_addtrace(jit_State *J, GCtrace *T) +{ + GDBJITctx ctx; + GCproto *pt = &gcref(T->startpt)->pt; + TraceNo parent = T->ir[REF_BASE].op1; + const BCIns *startpc = mref(T->startpc, const BCIns); + ctx.T = T; + ctx.mcaddr = (uintptr_t)T->mcode; + ctx.szmcode = T->szmcode; + ctx.spadjp = CFRAME_SIZE_JIT + + (MSize)(parent ? traceref(J, parent)->spadjust : 0); + ctx.spadj = CFRAME_SIZE_JIT + T->spadjust; + lua_assert(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc); + ctx.lineno = lj_debug_line(pt, proto_bcpos(pt, startpc)); + ctx.filename = proto_chunknamestr(pt); + if (*ctx.filename == '@' || *ctx.filename == '=') + ctx.filename++; + else + ctx.filename = "(string)"; + gdbjit_buildobj(&ctx); + gdbjit_newentry(J->L, &ctx); +} + +/* Delete debug info for trace and notify GDB. */ +void lj_gdbjit_deltrace(jit_State *J, GCtrace *T) +{ + GDBJITentryobj *eo = (GDBJITentryobj *)T->gdbjit_entry; + if (eo) { + if (eo->entry.prev_entry) + eo->entry.prev_entry->next_entry = eo->entry.next_entry; + else + __jit_debug_descriptor.first_entry = eo->entry.next_entry; + if (eo->entry.next_entry) + eo->entry.next_entry->prev_entry = eo->entry.prev_entry; + __jit_debug_descriptor.relevant_entry = &eo->entry; + __jit_debug_descriptor.action_flag = GDBJIT_UNREGISTER; + __jit_debug_register_code(); + lj_mem_free(J2G(J), eo, eo->sz); + } +} + +#endif +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_gdbjit.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_gdbjit.h new file mode 100644 index 00000000000..49c586301a8 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_gdbjit.h @@ -0,0 +1,22 @@ +/* +** Client for the GDB JIT API. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_GDBJIT_H +#define _LJ_GDBJIT_H + +#include "lj_obj.h" +#include "lj_jit.h" + +#if LJ_HASJIT && defined(LUAJIT_USE_GDBJIT) + +LJ_FUNC void lj_gdbjit_addtrace(jit_State *J, GCtrace *T); +LJ_FUNC void lj_gdbjit_deltrace(jit_State *J, GCtrace *T); + +#else +#define lj_gdbjit_addtrace(J, T) UNUSED(T) +#define lj_gdbjit_deltrace(J, T) UNUSED(T) +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_ir.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_ir.c new file mode 100644 index 00000000000..9682e05e577 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_ir.c @@ -0,0 +1,505 @@ +/* +** SSA IR (Intermediate Representation) emitter. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_ir_c +#define LUA_CORE + +/* For pointers to libc/libm functions. */ +#include +#include + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_gc.h" +#include "lj_buf.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_ircall.h" +#include "lj_iropt.h" +#include "lj_trace.h" +#if LJ_HASFFI +#include "lj_ctype.h" +#include "lj_cdata.h" +#include "lj_carith.h" +#endif +#include "lj_vm.h" +#include "lj_strscan.h" +#include "lj_strfmt.h" +#include "lj_lib.h" + +/* Some local macros to save typing. Undef'd at the end. */ +#define IR(ref) (&J->cur.ir[(ref)]) +#define fins (&J->fold.ins) + +/* Pass IR on to next optimization in chain (FOLD). */ +#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) + +/* -- IR tables ----------------------------------------------------------- */ + +/* IR instruction modes. */ +LJ_DATADEF const uint8_t lj_ir_mode[IR__MAX+1] = { +IRDEF(IRMODE) + 0 +}; + +/* IR type sizes. */ +LJ_DATADEF const uint8_t lj_ir_type_size[IRT__MAX+1] = { +#define IRTSIZE(name, size) size, +IRTDEF(IRTSIZE) +#undef IRTSIZE + 0 +}; + +/* C call info for CALL* instructions. */ +LJ_DATADEF const CCallInfo lj_ir_callinfo[] = { +#define IRCALLCI(cond, name, nargs, kind, type, flags) \ + { (ASMFunction)IRCALLCOND_##cond(name), \ + (nargs)|(CCI_CALL_##kind)|(IRT_##type<irbuf + J->irbotlim; + MSize szins = J->irtoplim - J->irbotlim; + if (szins) { + baseir = (IRIns *)lj_mem_realloc(J->L, baseir, szins*sizeof(IRIns), + 2*szins*sizeof(IRIns)); + J->irtoplim = J->irbotlim + 2*szins; + } else { + baseir = (IRIns *)lj_mem_realloc(J->L, NULL, 0, LJ_MIN_IRSZ*sizeof(IRIns)); + J->irbotlim = REF_BASE - LJ_MIN_IRSZ/4; + J->irtoplim = J->irbotlim + LJ_MIN_IRSZ; + } + J->cur.ir = J->irbuf = baseir - J->irbotlim; +} + +/* Grow IR buffer at the bottom or shift it up. */ +static void lj_ir_growbot(jit_State *J) +{ + IRIns *baseir = J->irbuf + J->irbotlim; + MSize szins = J->irtoplim - J->irbotlim; + lua_assert(szins != 0); + lua_assert(J->cur.nk == J->irbotlim); + if (J->cur.nins + (szins >> 1) < J->irtoplim) { + /* More than half of the buffer is free on top: shift up by a quarter. */ + MSize ofs = szins >> 2; + memmove(baseir + ofs, baseir, (J->cur.nins - J->irbotlim)*sizeof(IRIns)); + J->irbotlim -= ofs; + J->irtoplim -= ofs; + J->cur.ir = J->irbuf = baseir - J->irbotlim; + } else { + /* Double the buffer size, but split the growth amongst top/bottom. */ + IRIns *newbase = lj_mem_newt(J->L, 2*szins*sizeof(IRIns), IRIns); + MSize ofs = szins >= 256 ? 128 : (szins >> 1); /* Limit bottom growth. */ + memcpy(newbase + ofs, baseir, (J->cur.nins - J->irbotlim)*sizeof(IRIns)); + lj_mem_free(G(J->L), baseir, szins*sizeof(IRIns)); + J->irbotlim -= ofs; + J->irtoplim = J->irbotlim + 2*szins; + J->cur.ir = J->irbuf = newbase - J->irbotlim; + } +} + +/* Emit IR without any optimizations. */ +TRef LJ_FASTCALL lj_ir_emit(jit_State *J) +{ + IRRef ref = lj_ir_nextins(J); + IRIns *ir = IR(ref); + IROp op = fins->o; + ir->prev = J->chain[op]; + J->chain[op] = (IRRef1)ref; + ir->o = op; + ir->op1 = fins->op1; + ir->op2 = fins->op2; + J->guardemit.irt |= fins->t.irt; + return TREF(ref, irt_t((ir->t = fins->t))); +} + +/* Emit call to a C function. */ +TRef lj_ir_call(jit_State *J, IRCallID id, ...) +{ + const CCallInfo *ci = &lj_ir_callinfo[id]; + uint32_t n = CCI_NARGS(ci); + TRef tr = TREF_NIL; + va_list argp; + va_start(argp, id); + if ((ci->flags & CCI_L)) n--; + if (n > 0) + tr = va_arg(argp, IRRef); + while (n-- > 1) + tr = emitir(IRT(IR_CARG, IRT_NIL), tr, va_arg(argp, IRRef)); + va_end(argp); + if (CCI_OP(ci) == IR_CALLS) + J->needsnap = 1; /* Need snapshot after call with side effect. */ + return emitir(CCI_OPTYPE(ci), tr, id); +} + +/* -- Interning of constants ---------------------------------------------- */ + +/* +** IR instructions for constants are kept between J->cur.nk >= ref < REF_BIAS. +** They are chained like all other instructions, but grow downwards. +** The are interned (like strings in the VM) to facilitate reference +** comparisons. The same constant must get the same reference. +*/ + +/* Get ref of next IR constant and optionally grow IR. +** Note: this may invalidate all IRIns *! +*/ +static LJ_AINLINE IRRef ir_nextk(jit_State *J) +{ + IRRef ref = J->cur.nk; + if (LJ_UNLIKELY(ref <= J->irbotlim)) lj_ir_growbot(J); + J->cur.nk = --ref; + return ref; +} + +/* Intern int32_t constant. */ +TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k) +{ + IRIns *ir, *cir = J->cur.ir; + IRRef ref; + for (ref = J->chain[IR_KINT]; ref; ref = cir[ref].prev) + if (cir[ref].i == k) + goto found; + ref = ir_nextk(J); + ir = IR(ref); + ir->i = k; + ir->t.irt = IRT_INT; + ir->o = IR_KINT; + ir->prev = J->chain[IR_KINT]; + J->chain[IR_KINT] = (IRRef1)ref; +found: + return TREF(ref, IRT_INT); +} + +/* The MRef inside the KNUM/KINT64 IR instructions holds the address of the +** 64 bit constant. The constants themselves are stored in a chained array +** and shared across traces. +** +** Rationale for choosing this data structure: +** - The address of the constants is embedded in the generated machine code +** and must never move. A resizable array or hash table wouldn't work. +** - Most apps need very few non-32 bit integer constants (less than a dozen). +** - Linear search is hard to beat in terms of speed and low complexity. +*/ +typedef struct K64Array { + MRef next; /* Pointer to next list. */ + MSize numk; /* Number of used elements in this array. */ + TValue k[LJ_MIN_K64SZ]; /* Array of constants. */ +} K64Array; + +/* Free all chained arrays. */ +void lj_ir_k64_freeall(jit_State *J) +{ + K64Array *k; + for (k = mref(J->k64, K64Array); k; ) { + K64Array *next = mref(k->next, K64Array); + lj_mem_free(J2G(J), k, sizeof(K64Array)); + k = next; + } +} + +/* Find 64 bit constant in chained array or add it. */ +cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64) +{ + K64Array *k, *kp = NULL; + TValue *ntv; + MSize idx; + /* Search for the constant in the whole chain of arrays. */ + for (k = mref(J->k64, K64Array); k; k = mref(k->next, K64Array)) { + kp = k; /* Remember previous element in list. */ + for (idx = 0; idx < k->numk; idx++) { /* Search one array. */ + TValue *tv = &k->k[idx]; + if (tv->u64 == u64) /* Needed for +-0/NaN/absmask. */ + return tv; + } + } + /* Constant was not found, need to add it. */ + if (!(kp && kp->numk < LJ_MIN_K64SZ)) { /* Allocate a new array. */ + K64Array *kn = lj_mem_newt(J->L, sizeof(K64Array), K64Array); + setmref(kn->next, NULL); + kn->numk = 0; + if (kp) + setmref(kp->next, kn); /* Chain to the end of the list. */ + else + setmref(J->k64, kn); /* Link first array. */ + kp = kn; + } + ntv = &kp->k[kp->numk++]; /* Add to current array. */ + ntv->u64 = u64; + return ntv; +} + +/* Intern 64 bit constant, given by its address. */ +TRef lj_ir_k64(jit_State *J, IROp op, cTValue *tv) +{ + IRIns *ir, *cir = J->cur.ir; + IRRef ref; + IRType t = op == IR_KNUM ? IRT_NUM : IRT_I64; + for (ref = J->chain[op]; ref; ref = cir[ref].prev) + if (ir_k64(&cir[ref]) == tv) + goto found; + ref = ir_nextk(J); + ir = IR(ref); + lua_assert(checkptrGC(tv)); + setmref(ir->ptr, tv); + ir->t.irt = t; + ir->o = op; + ir->prev = J->chain[op]; + J->chain[op] = (IRRef1)ref; +found: + return TREF(ref, t); +} + +/* Intern FP constant, given by its 64 bit pattern. */ +TRef lj_ir_knum_u64(jit_State *J, uint64_t u64) +{ + return lj_ir_k64(J, IR_KNUM, lj_ir_k64_find(J, u64)); +} + +/* Intern 64 bit integer constant. */ +TRef lj_ir_kint64(jit_State *J, uint64_t u64) +{ + return lj_ir_k64(J, IR_KINT64, lj_ir_k64_find(J, u64)); +} + +/* Check whether a number is int and return it. -0 is NOT considered an int. */ +static int numistrueint(lua_Number n, int32_t *kp) +{ + int32_t k = lj_num2int(n); + if (n == (lua_Number)k) { + if (kp) *kp = k; + if (k == 0) { /* Special check for -0. */ + TValue tv; + setnumV(&tv, n); + if (tv.u32.hi != 0) + return 0; + } + return 1; + } + return 0; +} + +/* Intern number as int32_t constant if possible, otherwise as FP constant. */ +TRef lj_ir_knumint(jit_State *J, lua_Number n) +{ + int32_t k; + if (numistrueint(n, &k)) + return lj_ir_kint(J, k); + else + return lj_ir_knum(J, n); +} + +/* Intern GC object "constant". */ +TRef lj_ir_kgc(jit_State *J, GCobj *o, IRType t) +{ + IRIns *ir, *cir = J->cur.ir; + IRRef ref; + lua_assert(!LJ_GC64); /* TODO_GC64: major changes required. */ + lua_assert(!isdead(J2G(J), o)); + for (ref = J->chain[IR_KGC]; ref; ref = cir[ref].prev) + if (ir_kgc(&cir[ref]) == o) + goto found; + ref = ir_nextk(J); + ir = IR(ref); + /* NOBARRIER: Current trace is a GC root. */ + setgcref(ir->gcr, o); + ir->t.irt = (uint8_t)t; + ir->o = IR_KGC; + ir->prev = J->chain[IR_KGC]; + J->chain[IR_KGC] = (IRRef1)ref; +found: + return TREF(ref, t); +} + +/* Intern 32 bit pointer constant. */ +TRef lj_ir_kptr_(jit_State *J, IROp op, void *ptr) +{ + IRIns *ir, *cir = J->cur.ir; + IRRef ref; + lua_assert((void *)(intptr_t)i32ptr(ptr) == ptr); + for (ref = J->chain[op]; ref; ref = cir[ref].prev) + if (mref(cir[ref].ptr, void) == ptr) + goto found; + ref = ir_nextk(J); + ir = IR(ref); + setmref(ir->ptr, ptr); + ir->t.irt = IRT_P32; + ir->o = op; + ir->prev = J->chain[op]; + J->chain[op] = (IRRef1)ref; +found: + return TREF(ref, IRT_P32); +} + +/* Intern typed NULL constant. */ +TRef lj_ir_knull(jit_State *J, IRType t) +{ + IRIns *ir, *cir = J->cur.ir; + IRRef ref; + for (ref = J->chain[IR_KNULL]; ref; ref = cir[ref].prev) + if (irt_t(cir[ref].t) == t) + goto found; + ref = ir_nextk(J); + ir = IR(ref); + ir->i = 0; + ir->t.irt = (uint8_t)t; + ir->o = IR_KNULL; + ir->prev = J->chain[IR_KNULL]; + J->chain[IR_KNULL] = (IRRef1)ref; +found: + return TREF(ref, t); +} + +/* Intern key slot. */ +TRef lj_ir_kslot(jit_State *J, TRef key, IRRef slot) +{ + IRIns *ir, *cir = J->cur.ir; + IRRef2 op12 = IRREF2((IRRef1)key, (IRRef1)slot); + IRRef ref; + /* Const part is not touched by CSE/DCE, so 0-65535 is ok for IRMlit here. */ + lua_assert(tref_isk(key) && slot == (IRRef)(IRRef1)slot); + for (ref = J->chain[IR_KSLOT]; ref; ref = cir[ref].prev) + if (cir[ref].op12 == op12) + goto found; + ref = ir_nextk(J); + ir = IR(ref); + ir->op12 = op12; + ir->t.irt = IRT_P32; + ir->o = IR_KSLOT; + ir->prev = J->chain[IR_KSLOT]; + J->chain[IR_KSLOT] = (IRRef1)ref; +found: + return TREF(ref, IRT_P32); +} + +/* -- Access to IR constants ---------------------------------------------- */ + +/* Copy value of IR constant. */ +void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir) +{ + UNUSED(L); + lua_assert(ir->o != IR_KSLOT); /* Common mistake. */ + switch (ir->o) { + case IR_KPRI: setpriV(tv, irt_toitype(ir->t)); break; + case IR_KINT: setintV(tv, ir->i); break; + case IR_KGC: setgcV(L, tv, ir_kgc(ir), irt_toitype(ir->t)); break; + case IR_KPTR: case IR_KKPTR: case IR_KNULL: + setlightudV(tv, mref(ir->ptr, void)); + break; + case IR_KNUM: setnumV(tv, ir_knum(ir)->n); break; +#if LJ_HASFFI + case IR_KINT64: { + GCcdata *cd = lj_cdata_new_(L, CTID_INT64, 8); + *(uint64_t *)cdataptr(cd) = ir_kint64(ir)->u64; + setcdataV(L, tv, cd); + break; + } +#endif + default: lua_assert(0); break; + } +} + +/* -- Convert IR operand types -------------------------------------------- */ + +/* Convert from string to number. */ +TRef LJ_FASTCALL lj_ir_tonumber(jit_State *J, TRef tr) +{ + if (!tref_isnumber(tr)) { + if (tref_isstr(tr)) + tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0); + else + lj_trace_err(J, LJ_TRERR_BADTYPE); + } + return tr; +} + +/* Convert from integer or string to number. */ +TRef LJ_FASTCALL lj_ir_tonum(jit_State *J, TRef tr) +{ + if (!tref_isnum(tr)) { + if (tref_isinteger(tr)) + tr = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT); + else if (tref_isstr(tr)) + tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0); + else + lj_trace_err(J, LJ_TRERR_BADTYPE); + } + return tr; +} + +/* Convert from integer or number to string. */ +TRef LJ_FASTCALL lj_ir_tostr(jit_State *J, TRef tr) +{ + if (!tref_isstr(tr)) { + if (!tref_isnumber(tr)) + lj_trace_err(J, LJ_TRERR_BADTYPE); + tr = emitir(IRT(IR_TOSTR, IRT_STR), tr, + tref_isnum(tr) ? IRTOSTR_NUM : IRTOSTR_INT); + } + return tr; +} + +/* -- Miscellaneous IR ops ------------------------------------------------ */ + +/* Evaluate numeric comparison. */ +int lj_ir_numcmp(lua_Number a, lua_Number b, IROp op) +{ + switch (op) { + case IR_EQ: return (a == b); + case IR_NE: return (a != b); + case IR_LT: return (a < b); + case IR_GE: return (a >= b); + case IR_LE: return (a <= b); + case IR_GT: return (a > b); + case IR_ULT: return !(a >= b); + case IR_UGE: return !(a < b); + case IR_ULE: return !(a > b); + case IR_UGT: return !(a <= b); + default: lua_assert(0); return 0; + } +} + +/* Evaluate string comparison. */ +int lj_ir_strcmp(GCstr *a, GCstr *b, IROp op) +{ + int res = lj_str_cmp(a, b); + switch (op) { + case IR_LT: return (res < 0); + case IR_GE: return (res >= 0); + case IR_LE: return (res <= 0); + case IR_GT: return (res > 0); + default: lua_assert(0); return 0; + } +} + +/* Rollback IR to previous state. */ +void lj_ir_rollback(jit_State *J, IRRef ref) +{ + IRRef nins = J->cur.nins; + while (nins > ref) { + IRIns *ir; + nins--; + ir = IR(nins); + J->chain[ir->o] = ir->prev; + } + J->cur.nins = nins; +} + +#undef IR +#undef fins +#undef emitir + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_ir.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_ir.h new file mode 100644 index 00000000000..56e19774c97 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_ir.h @@ -0,0 +1,577 @@ +/* +** SSA IR (Intermediate Representation) format. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_IR_H +#define _LJ_IR_H + +#include "lj_obj.h" + +/* -- IR instructions ----------------------------------------------------- */ + +/* IR instruction definition. Order matters, see below. ORDER IR */ +#define IRDEF(_) \ + /* Guarded assertions. */ \ + /* Must be properly aligned to flip opposites (^1) and (un)ordered (^4). */ \ + _(LT, N , ref, ref) \ + _(GE, N , ref, ref) \ + _(LE, N , ref, ref) \ + _(GT, N , ref, ref) \ + \ + _(ULT, N , ref, ref) \ + _(UGE, N , ref, ref) \ + _(ULE, N , ref, ref) \ + _(UGT, N , ref, ref) \ + \ + _(EQ, C , ref, ref) \ + _(NE, C , ref, ref) \ + \ + _(ABC, N , ref, ref) \ + _(RETF, S , ref, ref) \ + \ + /* Miscellaneous ops. */ \ + _(NOP, N , ___, ___) \ + _(BASE, N , lit, lit) \ + _(PVAL, N , lit, ___) \ + _(GCSTEP, S , ___, ___) \ + _(HIOP, S , ref, ref) \ + _(LOOP, S , ___, ___) \ + _(USE, S , ref, ___) \ + _(PHI, S , ref, ref) \ + _(RENAME, S , ref, lit) \ + _(PROF, S , ___, ___) \ + \ + /* Constants. */ \ + _(KPRI, N , ___, ___) \ + _(KINT, N , cst, ___) \ + _(KGC, N , cst, ___) \ + _(KPTR, N , cst, ___) \ + _(KKPTR, N , cst, ___) \ + _(KNULL, N , cst, ___) \ + _(KNUM, N , cst, ___) \ + _(KINT64, N , cst, ___) \ + _(KSLOT, N , ref, lit) \ + \ + /* Bit ops. */ \ + _(BNOT, N , ref, ___) \ + _(BSWAP, N , ref, ___) \ + _(BAND, C , ref, ref) \ + _(BOR, C , ref, ref) \ + _(BXOR, C , ref, ref) \ + _(BSHL, N , ref, ref) \ + _(BSHR, N , ref, ref) \ + _(BSAR, N , ref, ref) \ + _(BROL, N , ref, ref) \ + _(BROR, N , ref, ref) \ + \ + /* Arithmetic ops. ORDER ARITH */ \ + _(ADD, C , ref, ref) \ + _(SUB, N , ref, ref) \ + _(MUL, C , ref, ref) \ + _(DIV, N , ref, ref) \ + _(MOD, N , ref, ref) \ + _(POW, N , ref, ref) \ + _(NEG, N , ref, ref) \ + \ + _(ABS, N , ref, ref) \ + _(ATAN2, N , ref, ref) \ + _(LDEXP, N , ref, ref) \ + _(MIN, C , ref, ref) \ + _(MAX, C , ref, ref) \ + _(FPMATH, N , ref, lit) \ + \ + /* Overflow-checking arithmetic ops. */ \ + _(ADDOV, CW, ref, ref) \ + _(SUBOV, NW, ref, ref) \ + _(MULOV, CW, ref, ref) \ + \ + /* Memory ops. A = array, H = hash, U = upvalue, F = field, S = stack. */ \ + \ + /* Memory references. */ \ + _(AREF, R , ref, ref) \ + _(HREFK, R , ref, ref) \ + _(HREF, L , ref, ref) \ + _(NEWREF, S , ref, ref) \ + _(UREFO, LW, ref, lit) \ + _(UREFC, LW, ref, lit) \ + _(FREF, R , ref, lit) \ + _(STRREF, N , ref, ref) \ + _(LREF, L , ___, ___) \ + \ + /* Loads and Stores. These must be in the same order. */ \ + _(ALOAD, L , ref, ___) \ + _(HLOAD, L , ref, ___) \ + _(ULOAD, L , ref, ___) \ + _(FLOAD, L , ref, lit) \ + _(XLOAD, L , ref, lit) \ + _(SLOAD, L , lit, lit) \ + _(VLOAD, L , ref, ___) \ + \ + _(ASTORE, S , ref, ref) \ + _(HSTORE, S , ref, ref) \ + _(USTORE, S , ref, ref) \ + _(FSTORE, S , ref, ref) \ + _(XSTORE, S , ref, ref) \ + \ + /* Allocations. */ \ + _(SNEW, N , ref, ref) /* CSE is ok, not marked as A. */ \ + _(XSNEW, A , ref, ref) \ + _(TNEW, AW, lit, lit) \ + _(TDUP, AW, ref, ___) \ + _(CNEW, AW, ref, ref) \ + _(CNEWI, NW, ref, ref) /* CSE is ok, not marked as A. */ \ + \ + /* Buffer operations. */ \ + _(BUFHDR, L , ref, lit) \ + _(BUFPUT, L , ref, ref) \ + _(BUFSTR, A , ref, ref) \ + \ + /* Barriers. */ \ + _(TBAR, S , ref, ___) \ + _(OBAR, S , ref, ref) \ + _(XBAR, S , ___, ___) \ + \ + /* Type conversions. */ \ + _(CONV, NW, ref, lit) \ + _(TOBIT, N , ref, ref) \ + _(TOSTR, N , ref, lit) \ + _(STRTO, N , ref, ___) \ + \ + /* Calls. */ \ + _(CALLN, N , ref, lit) \ + _(CALLA, A , ref, lit) \ + _(CALLL, L , ref, lit) \ + _(CALLS, S , ref, lit) \ + _(CALLXS, S , ref, ref) \ + _(CARG, N , ref, ref) \ + \ + /* End of list. */ + +/* IR opcodes (max. 256). */ +typedef enum { +#define IRENUM(name, m, m1, m2) IR_##name, +IRDEF(IRENUM) +#undef IRENUM + IR__MAX +} IROp; + +/* Stored opcode. */ +typedef uint8_t IROp1; + +LJ_STATIC_ASSERT(((int)IR_EQ^1) == (int)IR_NE); +LJ_STATIC_ASSERT(((int)IR_LT^1) == (int)IR_GE); +LJ_STATIC_ASSERT(((int)IR_LE^1) == (int)IR_GT); +LJ_STATIC_ASSERT(((int)IR_LT^3) == (int)IR_GT); +LJ_STATIC_ASSERT(((int)IR_LT^4) == (int)IR_ULT); + +/* Delta between xLOAD and xSTORE. */ +#define IRDELTA_L2S ((int)IR_ASTORE - (int)IR_ALOAD) + +LJ_STATIC_ASSERT((int)IR_HLOAD + IRDELTA_L2S == (int)IR_HSTORE); +LJ_STATIC_ASSERT((int)IR_ULOAD + IRDELTA_L2S == (int)IR_USTORE); +LJ_STATIC_ASSERT((int)IR_FLOAD + IRDELTA_L2S == (int)IR_FSTORE); +LJ_STATIC_ASSERT((int)IR_XLOAD + IRDELTA_L2S == (int)IR_XSTORE); + +/* -- Named IR literals --------------------------------------------------- */ + +/* FPMATH sub-functions. ORDER FPM. */ +#define IRFPMDEF(_) \ + _(FLOOR) _(CEIL) _(TRUNC) /* Must be first and in this order. */ \ + _(SQRT) _(EXP) _(EXP2) _(LOG) _(LOG2) _(LOG10) \ + _(SIN) _(COS) _(TAN) \ + _(OTHER) + +typedef enum { +#define FPMENUM(name) IRFPM_##name, +IRFPMDEF(FPMENUM) +#undef FPMENUM + IRFPM__MAX +} IRFPMathOp; + +/* FLOAD fields. */ +#define IRFLDEF(_) \ + _(STR_LEN, offsetof(GCstr, len)) \ + _(FUNC_ENV, offsetof(GCfunc, l.env)) \ + _(FUNC_PC, offsetof(GCfunc, l.pc)) \ + _(FUNC_FFID, offsetof(GCfunc, l.ffid)) \ + _(THREAD_ENV, offsetof(lua_State, env)) \ + _(TAB_META, offsetof(GCtab, metatable)) \ + _(TAB_ARRAY, offsetof(GCtab, array)) \ + _(TAB_NODE, offsetof(GCtab, node)) \ + _(TAB_ASIZE, offsetof(GCtab, asize)) \ + _(TAB_HMASK, offsetof(GCtab, hmask)) \ + _(TAB_NOMM, offsetof(GCtab, nomm)) \ + _(UDATA_META, offsetof(GCudata, metatable)) \ + _(UDATA_UDTYPE, offsetof(GCudata, udtype)) \ + _(UDATA_FILE, sizeof(GCudata)) \ + _(CDATA_CTYPEID, offsetof(GCcdata, ctypeid)) \ + _(CDATA_PTR, sizeof(GCcdata)) \ + _(CDATA_INT, sizeof(GCcdata)) \ + _(CDATA_INT64, sizeof(GCcdata)) \ + _(CDATA_INT64_4, sizeof(GCcdata) + 4) + +typedef enum { +#define FLENUM(name, ofs) IRFL_##name, +IRFLDEF(FLENUM) +#undef FLENUM + IRFL__MAX +} IRFieldID; + +/* SLOAD mode bits, stored in op2. */ +#define IRSLOAD_PARENT 0x01 /* Coalesce with parent trace. */ +#define IRSLOAD_FRAME 0x02 /* Load hiword of frame. */ +#define IRSLOAD_TYPECHECK 0x04 /* Needs type check. */ +#define IRSLOAD_CONVERT 0x08 /* Number to integer conversion. */ +#define IRSLOAD_READONLY 0x10 /* Read-only, omit slot store. */ +#define IRSLOAD_INHERIT 0x20 /* Inherited by exits/side traces. */ + +/* XLOAD mode, stored in op2. */ +#define IRXLOAD_READONLY 1 /* Load from read-only data. */ +#define IRXLOAD_VOLATILE 2 /* Load from volatile data. */ +#define IRXLOAD_UNALIGNED 4 /* Unaligned load. */ + +/* BUFHDR mode, stored in op2. */ +#define IRBUFHDR_RESET 0 /* Reset buffer. */ +#define IRBUFHDR_APPEND 1 /* Append to buffer. */ + +/* CONV mode, stored in op2. */ +#define IRCONV_SRCMASK 0x001f /* Source IRType. */ +#define IRCONV_DSTMASK 0x03e0 /* Dest. IRType (also in ir->t). */ +#define IRCONV_DSH 5 +#define IRCONV_NUM_INT ((IRT_NUM<>2)&3)) +#define irm_iscomm(m) ((m) & IRM_C) +#define irm_kind(m) ((m) & IRM_S) + +#define IRMODE(name, m, m1, m2) (((IRM##m1)|((IRM##m2)<<2)|(IRM_##m))^IRM_W), + +LJ_DATA const uint8_t lj_ir_mode[IR__MAX+1]; + +/* -- IR instruction types ------------------------------------------------ */ + +/* Map of itypes to non-negative numbers. ORDER LJ_T. +** LJ_TUPVAL/LJ_TTRACE never appear in a TValue. Use these itypes for +** IRT_P32 and IRT_P64, which never escape the IR. +** The various integers are only used in the IR and can only escape to +** a TValue after implicit or explicit conversion. Their types must be +** contiguous and next to IRT_NUM (see the typerange macros below). +*/ +#define IRTDEF(_) \ + _(NIL, 4) _(FALSE, 4) _(TRUE, 4) _(LIGHTUD, LJ_64 ? 8 : 4) _(STR, 4) \ + _(P32, 4) _(THREAD, 4) _(PROTO, 4) _(FUNC, 4) _(P64, 8) _(CDATA, 4) \ + _(TAB, 4) _(UDATA, 4) \ + _(FLOAT, 4) _(NUM, 8) _(I8, 1) _(U8, 1) _(I16, 2) _(U16, 2) \ + _(INT, 4) _(U32, 4) _(I64, 8) _(U64, 8) \ + _(SOFTFP, 4) /* There is room for 9 more types. */ + +/* IR result type and flags (8 bit). */ +typedef enum { +#define IRTENUM(name, size) IRT_##name, +IRTDEF(IRTENUM) +#undef IRTENUM + IRT__MAX, + + /* Native pointer type and the corresponding integer type. */ + IRT_PTR = LJ_64 ? IRT_P64 : IRT_P32, + IRT_INTP = LJ_64 ? IRT_I64 : IRT_INT, + IRT_UINTP = LJ_64 ? IRT_U64 : IRT_U32, + /* TODO_GC64: major changes required for all uses of IRT_P32. */ + + /* Additional flags. */ + IRT_MARK = 0x20, /* Marker for misc. purposes. */ + IRT_ISPHI = 0x40, /* Instruction is left or right PHI operand. */ + IRT_GUARD = 0x80, /* Instruction is a guard. */ + + /* Masks. */ + IRT_TYPE = 0x1f, + IRT_T = 0xff +} IRType; + +#define irtype_ispri(irt) ((uint32_t)(irt) <= IRT_TRUE) + +/* Stored IRType. */ +typedef struct IRType1 { uint8_t irt; } IRType1; + +#define IRT(o, t) ((uint32_t)(((o)<<8) | (t))) +#define IRTI(o) (IRT((o), IRT_INT)) +#define IRTN(o) (IRT((o), IRT_NUM)) +#define IRTG(o, t) (IRT((o), IRT_GUARD|(t))) +#define IRTGI(o) (IRT((o), IRT_GUARD|IRT_INT)) + +#define irt_t(t) ((IRType)(t).irt) +#define irt_type(t) ((IRType)((t).irt & IRT_TYPE)) +#define irt_sametype(t1, t2) ((((t1).irt ^ (t2).irt) & IRT_TYPE) == 0) +#define irt_typerange(t, first, last) \ + ((uint32_t)((t).irt & IRT_TYPE) - (uint32_t)(first) <= (uint32_t)(last-first)) + +#define irt_isnil(t) (irt_type(t) == IRT_NIL) +#define irt_ispri(t) ((uint32_t)irt_type(t) <= IRT_TRUE) +#define irt_islightud(t) (irt_type(t) == IRT_LIGHTUD) +#define irt_isstr(t) (irt_type(t) == IRT_STR) +#define irt_istab(t) (irt_type(t) == IRT_TAB) +#define irt_iscdata(t) (irt_type(t) == IRT_CDATA) +#define irt_isfloat(t) (irt_type(t) == IRT_FLOAT) +#define irt_isnum(t) (irt_type(t) == IRT_NUM) +#define irt_isint(t) (irt_type(t) == IRT_INT) +#define irt_isi8(t) (irt_type(t) == IRT_I8) +#define irt_isu8(t) (irt_type(t) == IRT_U8) +#define irt_isi16(t) (irt_type(t) == IRT_I16) +#define irt_isu16(t) (irt_type(t) == IRT_U16) +#define irt_isu32(t) (irt_type(t) == IRT_U32) +#define irt_isi64(t) (irt_type(t) == IRT_I64) +#define irt_isu64(t) (irt_type(t) == IRT_U64) + +#define irt_isfp(t) (irt_isnum(t) || irt_isfloat(t)) +#define irt_isinteger(t) (irt_typerange((t), IRT_I8, IRT_INT)) +#define irt_isgcv(t) (irt_typerange((t), IRT_STR, IRT_UDATA)) +#define irt_isaddr(t) (irt_typerange((t), IRT_LIGHTUD, IRT_UDATA)) +#define irt_isint64(t) (irt_typerange((t), IRT_I64, IRT_U64)) + +#if LJ_GC64 +#define IRT_IS64 \ + ((1u<> irt_type(t)) & 1) +#define irt_is64orfp(t) (((IRT_IS64|(1u<>irt_type(t)) & 1) + +#define irt_size(t) (lj_ir_type_size[irt_t((t))]) + +LJ_DATA const uint8_t lj_ir_type_size[]; + +static LJ_AINLINE IRType itype2irt(const TValue *tv) +{ + if (tvisint(tv)) + return IRT_INT; + else if (tvisnum(tv)) + return IRT_NUM; +#if LJ_64 && !LJ_GC64 + else if (tvislightud(tv)) + return IRT_LIGHTUD; +#endif + else + return (IRType)~itype(tv); +} + +static LJ_AINLINE uint32_t irt_toitype_(IRType t) +{ + lua_assert(!LJ_64 || t != IRT_LIGHTUD); + if (LJ_DUALNUM && t > IRT_NUM) { + return LJ_TISNUM; + } else { + lua_assert(t <= IRT_NUM); + return ~(uint32_t)t; + } +} + +#define irt_toitype(t) irt_toitype_(irt_type((t))) + +#define irt_isguard(t) ((t).irt & IRT_GUARD) +#define irt_ismarked(t) ((t).irt & IRT_MARK) +#define irt_setmark(t) ((t).irt |= IRT_MARK) +#define irt_clearmark(t) ((t).irt &= ~IRT_MARK) +#define irt_isphi(t) ((t).irt & IRT_ISPHI) +#define irt_setphi(t) ((t).irt |= IRT_ISPHI) +#define irt_clearphi(t) ((t).irt &= ~IRT_ISPHI) + +/* Stored combined IR opcode and type. */ +typedef uint16_t IROpT; + +/* -- IR references ------------------------------------------------------- */ + +/* IR references. */ +typedef uint16_t IRRef1; /* One stored reference. */ +typedef uint32_t IRRef2; /* Two stored references. */ +typedef uint32_t IRRef; /* Used to pass around references. */ + +/* Fixed references. */ +enum { + REF_BIAS = 0x8000, + REF_TRUE = REF_BIAS-3, + REF_FALSE = REF_BIAS-2, + REF_NIL = REF_BIAS-1, /* \--- Constants grow downwards. */ + REF_BASE = REF_BIAS, /* /--- IR grows upwards. */ + REF_FIRST = REF_BIAS+1, + REF_DROP = 0xffff +}; + +/* Note: IRMlit operands must be < REF_BIAS, too! +** This allows for fast and uniform manipulation of all operands +** without looking up the operand mode in lj_ir_mode: +** - CSE calculates the maximum reference of two operands. +** This must work with mixed reference/literal operands, too. +** - DCE marking only checks for operand >= REF_BIAS. +** - LOOP needs to substitute reference operands. +** Constant references and literals must not be modified. +*/ + +#define IRREF2(lo, hi) ((IRRef2)(lo) | ((IRRef2)(hi) << 16)) + +#define irref_isk(ref) ((ref) < REF_BIAS) + +/* Tagged IR references (32 bit). +** +** +-------+-------+---------------+ +** | irt | flags | ref | +** +-------+-------+---------------+ +** +** The tag holds a copy of the IRType and speeds up IR type checks. +*/ +typedef uint32_t TRef; + +#define TREF_REFMASK 0x0000ffff +#define TREF_FRAME 0x00010000 +#define TREF_CONT 0x00020000 + +#define TREF(ref, t) ((TRef)((ref) + ((t)<<24))) + +#define tref_ref(tr) ((IRRef1)(tr)) +#define tref_t(tr) ((IRType)((tr)>>24)) +#define tref_type(tr) ((IRType)(((tr)>>24) & IRT_TYPE)) +#define tref_typerange(tr, first, last) \ + ((((tr)>>24) & IRT_TYPE) - (TRef)(first) <= (TRef)(last-first)) + +#define tref_istype(tr, t) (((tr) & (IRT_TYPE<<24)) == ((t)<<24)) +#define tref_isnil(tr) (tref_istype((tr), IRT_NIL)) +#define tref_isfalse(tr) (tref_istype((tr), IRT_FALSE)) +#define tref_istrue(tr) (tref_istype((tr), IRT_TRUE)) +#define tref_islightud(tr) (tref_istype((tr), IRT_LIGHTUD)) +#define tref_isstr(tr) (tref_istype((tr), IRT_STR)) +#define tref_isfunc(tr) (tref_istype((tr), IRT_FUNC)) +#define tref_iscdata(tr) (tref_istype((tr), IRT_CDATA)) +#define tref_istab(tr) (tref_istype((tr), IRT_TAB)) +#define tref_isudata(tr) (tref_istype((tr), IRT_UDATA)) +#define tref_isnum(tr) (tref_istype((tr), IRT_NUM)) +#define tref_isint(tr) (tref_istype((tr), IRT_INT)) + +#define tref_isbool(tr) (tref_typerange((tr), IRT_FALSE, IRT_TRUE)) +#define tref_ispri(tr) (tref_typerange((tr), IRT_NIL, IRT_TRUE)) +#define tref_istruecond(tr) (!tref_typerange((tr), IRT_NIL, IRT_FALSE)) +#define tref_isinteger(tr) (tref_typerange((tr), IRT_I8, IRT_INT)) +#define tref_isnumber(tr) (tref_typerange((tr), IRT_NUM, IRT_INT)) +#define tref_isnumber_str(tr) (tref_isnumber((tr)) || tref_isstr((tr))) +#define tref_isgcv(tr) (tref_typerange((tr), IRT_STR, IRT_UDATA)) + +#define tref_isk(tr) (irref_isk(tref_ref((tr)))) +#define tref_isk2(tr1, tr2) (irref_isk(tref_ref((tr1) | (tr2)))) + +#define TREF_PRI(t) (TREF(REF_NIL-(t), (t))) +#define TREF_NIL (TREF_PRI(IRT_NIL)) +#define TREF_FALSE (TREF_PRI(IRT_FALSE)) +#define TREF_TRUE (TREF_PRI(IRT_TRUE)) + +/* -- IR format ----------------------------------------------------------- */ + +/* IR instruction format (64 bit). +** +** 16 16 8 8 8 8 +** +-------+-------+---+---+---+---+ +** | op1 | op2 | t | o | r | s | +** +-------+-------+---+---+---+---+ +** | op12/i/gco | ot | prev | (alternative fields in union) +** +---------------+-------+-------+ +** 32 16 16 +** +** prev is only valid prior to register allocation and then reused for r + s. +*/ + +typedef union IRIns { + struct { + LJ_ENDIAN_LOHI( + IRRef1 op1; /* IR operand 1. */ + , IRRef1 op2; /* IR operand 2. */ + ) + IROpT ot; /* IR opcode and type (overlaps t and o). */ + IRRef1 prev; /* Previous ins in same chain (overlaps r and s). */ + }; + struct { + IRRef2 op12; /* IR operand 1 and 2 (overlaps op1 and op2). */ + LJ_ENDIAN_LOHI( + IRType1 t; /* IR type. */ + , IROp1 o; /* IR opcode. */ + ) + LJ_ENDIAN_LOHI( + uint8_t r; /* Register allocation (overlaps prev). */ + , uint8_t s; /* Spill slot allocation (overlaps prev). */ + ) + }; + int32_t i; /* 32 bit signed integer literal (overlaps op12). */ + GCRef gcr; /* GCobj constant (overlaps op12). */ + MRef ptr; /* Pointer constant (overlaps op12). */ +} IRIns; + +/* TODO_GC64: major changes required. */ +#define ir_kgc(ir) check_exp((ir)->o == IR_KGC, gcref((ir)->gcr)) +#define ir_kstr(ir) (gco2str(ir_kgc((ir)))) +#define ir_ktab(ir) (gco2tab(ir_kgc((ir)))) +#define ir_kfunc(ir) (gco2func(ir_kgc((ir)))) +#define ir_kcdata(ir) (gco2cd(ir_kgc((ir)))) +#define ir_knum(ir) check_exp((ir)->o == IR_KNUM, mref((ir)->ptr, cTValue)) +#define ir_kint64(ir) check_exp((ir)->o == IR_KINT64, mref((ir)->ptr,cTValue)) +#define ir_k64(ir) \ + check_exp((ir)->o == IR_KNUM || (ir)->o == IR_KINT64, mref((ir)->ptr,cTValue)) +#define ir_kptr(ir) \ + check_exp((ir)->o == IR_KPTR || (ir)->o == IR_KKPTR, mref((ir)->ptr, void)) + +/* A store or any other op with a non-weak guard has a side-effect. */ +static LJ_AINLINE int ir_sideeff(IRIns *ir) +{ + return (((ir->t.irt | ~IRT_GUARD) & lj_ir_mode[ir->o]) >= IRM_S); +} + +LJ_STATIC_ASSERT((int)IRT_GUARD == (int)IRM_W); + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_ircall.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_ircall.h new file mode 100644 index 00000000000..84e41ecfccb --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_ircall.h @@ -0,0 +1,321 @@ +/* +** IR CALL* instruction definitions. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_IRCALL_H +#define _LJ_IRCALL_H + +#include "lj_obj.h" +#include "lj_ir.h" +#include "lj_jit.h" + +/* C call info for CALL* instructions. */ +typedef struct CCallInfo { + ASMFunction func; /* Function pointer. */ + uint32_t flags; /* Number of arguments and flags. */ +} CCallInfo; + +#define CCI_NARGS(ci) ((ci)->flags & 0xff) /* # of args. */ +#define CCI_NARGS_MAX 32 /* Max. # of args. */ + +#define CCI_OTSHIFT 16 +#define CCI_OPTYPE(ci) ((ci)->flags >> CCI_OTSHIFT) /* Get op/type. */ +#define CCI_OPSHIFT 24 +#define CCI_OP(ci) ((ci)->flags >> CCI_OPSHIFT) /* Get op. */ + +#define CCI_CALL_N (IR_CALLN << CCI_OPSHIFT) +#define CCI_CALL_A (IR_CALLA << CCI_OPSHIFT) +#define CCI_CALL_L (IR_CALLL << CCI_OPSHIFT) +#define CCI_CALL_S (IR_CALLS << CCI_OPSHIFT) +#define CCI_CALL_FN (CCI_CALL_N|CCI_CC_FASTCALL) +#define CCI_CALL_FL (CCI_CALL_L|CCI_CC_FASTCALL) +#define CCI_CALL_FS (CCI_CALL_S|CCI_CC_FASTCALL) + +/* C call info flags. */ +#define CCI_L 0x0100 /* Implicit L arg. */ +#define CCI_CASTU64 0x0200 /* Cast u64 result to number. */ +#define CCI_NOFPRCLOBBER 0x0400 /* Does not clobber any FPRs. */ +#define CCI_VARARG 0x0800 /* Vararg function. */ + +#define CCI_CC_MASK 0x3000 /* Calling convention mask. */ +#define CCI_CC_SHIFT 12 +/* ORDER CC */ +#define CCI_CC_CDECL 0x0000 /* Default cdecl calling convention. */ +#define CCI_CC_THISCALL 0x1000 /* Thiscall calling convention. */ +#define CCI_CC_FASTCALL 0x2000 /* Fastcall calling convention. */ +#define CCI_CC_STDCALL 0x3000 /* Stdcall calling convention. */ + +/* Extra args for SOFTFP, SPLIT 64 bit. */ +#define CCI_XARGS_SHIFT 14 +#define CCI_XARGS(ci) (((ci)->flags >> CCI_XARGS_SHIFT) & 3) +#define CCI_XA (1u << CCI_XARGS_SHIFT) + +#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI) +#define CCI_XNARGS(ci) (CCI_NARGS((ci)) + CCI_XARGS((ci))) +#else +#define CCI_XNARGS(ci) CCI_NARGS((ci)) +#endif + +/* Helpers for conditional function definitions. */ +#define IRCALLCOND_ANY(x) x + +#if LJ_TARGET_X86ORX64 +#define IRCALLCOND_FPMATH(x) NULL +#else +#define IRCALLCOND_FPMATH(x) x +#endif + +#if LJ_SOFTFP +#define IRCALLCOND_SOFTFP(x) x +#if LJ_HASFFI +#define IRCALLCOND_SOFTFP_FFI(x) x +#else +#define IRCALLCOND_SOFTFP_FFI(x) NULL +#endif +#else +#define IRCALLCOND_SOFTFP(x) NULL +#define IRCALLCOND_SOFTFP_FFI(x) NULL +#endif + +#define LJ_NEED_FP64 (LJ_TARGET_ARM || LJ_TARGET_PPC || LJ_TARGET_MIPS) + +#if LJ_HASFFI && (LJ_SOFTFP || LJ_NEED_FP64) +#define IRCALLCOND_FP64_FFI(x) x +#else +#define IRCALLCOND_FP64_FFI(x) NULL +#endif + +#if LJ_HASFFI +#define IRCALLCOND_FFI(x) x +#if LJ_32 +#define IRCALLCOND_FFI32(x) x +#else +#define IRCALLCOND_FFI32(x) NULL +#endif +#else +#define IRCALLCOND_FFI(x) NULL +#define IRCALLCOND_FFI32(x) NULL +#endif + +#if LJ_TARGET_X86 +#define CCI_RANDFPR 0 /* Clang on OSX/x86 is overzealous. */ +#else +#define CCI_RANDFPR CCI_NOFPRCLOBBER +#endif + +#if LJ_SOFTFP +#define XA_FP CCI_XA +#define XA2_FP (CCI_XA+CCI_XA) +#else +#define XA_FP 0 +#define XA2_FP 0 +#endif + +#if LJ_32 +#define XA_64 CCI_XA +#define XA2_64 (CCI_XA+CCI_XA) +#else +#define XA_64 0 +#define XA2_64 0 +#endif + +/* Function definitions for CALL* instructions. */ +#define IRCALLDEF(_) \ + _(ANY, lj_str_cmp, 2, FN, INT, CCI_NOFPRCLOBBER) \ + _(ANY, lj_str_find, 4, N, P32, 0) \ + _(ANY, lj_str_new, 3, S, STR, CCI_L) \ + _(ANY, lj_strscan_num, 2, FN, INT, 0) \ + _(ANY, lj_strfmt_int, 2, FN, STR, CCI_L) \ + _(ANY, lj_strfmt_num, 2, FN, STR, CCI_L) \ + _(ANY, lj_strfmt_char, 2, FN, STR, CCI_L) \ + _(ANY, lj_strfmt_putint, 2, FL, P32, 0) \ + _(ANY, lj_strfmt_putnum, 2, FL, P32, 0) \ + _(ANY, lj_strfmt_putquoted, 2, FL, P32, 0) \ + _(ANY, lj_strfmt_putfxint, 3, L, P32, XA_64) \ + _(ANY, lj_strfmt_putfnum_int, 3, L, P32, XA_FP) \ + _(ANY, lj_strfmt_putfnum_uint, 3, L, P32, XA_FP) \ + _(ANY, lj_strfmt_putfnum, 3, L, P32, XA_FP) \ + _(ANY, lj_strfmt_putfstr, 3, L, P32, 0) \ + _(ANY, lj_strfmt_putfchar, 3, L, P32, 0) \ + _(ANY, lj_buf_putmem, 3, S, P32, 0) \ + _(ANY, lj_buf_putstr, 2, FL, P32, 0) \ + _(ANY, lj_buf_putchar, 2, FL, P32, 0) \ + _(ANY, lj_buf_putstr_reverse, 2, FL, P32, 0) \ + _(ANY, lj_buf_putstr_lower, 2, FL, P32, 0) \ + _(ANY, lj_buf_putstr_upper, 2, FL, P32, 0) \ + _(ANY, lj_buf_putstr_rep, 3, L, P32, 0) \ + _(ANY, lj_buf_puttab, 5, L, P32, 0) \ + _(ANY, lj_buf_tostr, 1, FL, STR, 0) \ + _(ANY, lj_tab_new_ah, 3, A, TAB, CCI_L) \ + _(ANY, lj_tab_new1, 2, FS, TAB, CCI_L) \ + _(ANY, lj_tab_dup, 2, FS, TAB, CCI_L) \ + _(ANY, lj_tab_clear, 1, FS, NIL, 0) \ + _(ANY, lj_tab_newkey, 3, S, P32, CCI_L) \ + _(ANY, lj_tab_len, 1, FL, INT, 0) \ + _(ANY, lj_gc_step_jit, 2, FS, NIL, CCI_L) \ + _(ANY, lj_gc_barrieruv, 2, FS, NIL, 0) \ + _(ANY, lj_mem_newgco, 2, FS, P32, CCI_L) \ + _(ANY, lj_math_random_step, 1, FS, NUM, CCI_CASTU64|CCI_RANDFPR)\ + _(ANY, lj_vm_modi, 2, FN, INT, 0) \ + _(ANY, sinh, 1, N, NUM, XA_FP) \ + _(ANY, cosh, 1, N, NUM, XA_FP) \ + _(ANY, tanh, 1, N, NUM, XA_FP) \ + _(ANY, fputc, 2, S, INT, 0) \ + _(ANY, fwrite, 4, S, INT, 0) \ + _(ANY, fflush, 1, S, INT, 0) \ + /* ORDER FPM */ \ + _(FPMATH, lj_vm_floor, 1, N, NUM, XA_FP) \ + _(FPMATH, lj_vm_ceil, 1, N, NUM, XA_FP) \ + _(FPMATH, lj_vm_trunc, 1, N, NUM, XA_FP) \ + _(FPMATH, sqrt, 1, N, NUM, XA_FP) \ + _(ANY, exp, 1, N, NUM, XA_FP) \ + _(ANY, lj_vm_exp2, 1, N, NUM, XA_FP) \ + _(ANY, log, 1, N, NUM, XA_FP) \ + _(ANY, lj_vm_log2, 1, N, NUM, XA_FP) \ + _(ANY, log10, 1, N, NUM, XA_FP) \ + _(ANY, sin, 1, N, NUM, XA_FP) \ + _(ANY, cos, 1, N, NUM, XA_FP) \ + _(ANY, tan, 1, N, NUM, XA_FP) \ + _(ANY, lj_vm_powi, 2, N, NUM, XA_FP) \ + _(ANY, pow, 2, N, NUM, XA2_FP) \ + _(ANY, atan2, 2, N, NUM, XA2_FP) \ + _(ANY, ldexp, 2, N, NUM, XA_FP) \ + _(SOFTFP, lj_vm_tobit, 2, N, INT, 0) \ + _(SOFTFP, softfp_add, 4, N, NUM, 0) \ + _(SOFTFP, softfp_sub, 4, N, NUM, 0) \ + _(SOFTFP, softfp_mul, 4, N, NUM, 0) \ + _(SOFTFP, softfp_div, 4, N, NUM, 0) \ + _(SOFTFP, softfp_cmp, 4, N, NIL, 0) \ + _(SOFTFP, softfp_i2d, 1, N, NUM, 0) \ + _(SOFTFP, softfp_d2i, 2, N, INT, 0) \ + _(SOFTFP_FFI, softfp_ui2d, 1, N, NUM, 0) \ + _(SOFTFP_FFI, softfp_f2d, 1, N, NUM, 0) \ + _(SOFTFP_FFI, softfp_d2ui, 2, N, INT, 0) \ + _(SOFTFP_FFI, softfp_d2f, 2, N, FLOAT, 0) \ + _(SOFTFP_FFI, softfp_i2f, 1, N, FLOAT, 0) \ + _(SOFTFP_FFI, softfp_ui2f, 1, N, FLOAT, 0) \ + _(SOFTFP_FFI, softfp_f2i, 1, N, INT, 0) \ + _(SOFTFP_FFI, softfp_f2ui, 1, N, INT, 0) \ + _(FP64_FFI, fp64_l2d, 1, N, NUM, XA_64) \ + _(FP64_FFI, fp64_ul2d, 1, N, NUM, XA_64) \ + _(FP64_FFI, fp64_l2f, 1, N, FLOAT, XA_64) \ + _(FP64_FFI, fp64_ul2f, 1, N, FLOAT, XA_64) \ + _(FP64_FFI, fp64_d2l, 1, N, I64, XA_FP) \ + _(FP64_FFI, fp64_d2ul, 1, N, U64, XA_FP) \ + _(FP64_FFI, fp64_f2l, 1, N, I64, 0) \ + _(FP64_FFI, fp64_f2ul, 1, N, U64, 0) \ + _(FFI, lj_carith_divi64, 2, N, I64, XA2_64|CCI_NOFPRCLOBBER) \ + _(FFI, lj_carith_divu64, 2, N, U64, XA2_64|CCI_NOFPRCLOBBER) \ + _(FFI, lj_carith_modi64, 2, N, I64, XA2_64|CCI_NOFPRCLOBBER) \ + _(FFI, lj_carith_modu64, 2, N, U64, XA2_64|CCI_NOFPRCLOBBER) \ + _(FFI, lj_carith_powi64, 2, N, I64, XA2_64|CCI_NOFPRCLOBBER) \ + _(FFI, lj_carith_powu64, 2, N, U64, XA2_64|CCI_NOFPRCLOBBER) \ + _(FFI, lj_cdata_newv, 4, S, CDATA, CCI_L) \ + _(FFI, lj_cdata_setfin, 4, S, NIL, CCI_L) \ + _(FFI, strlen, 1, L, INTP, 0) \ + _(FFI, memcpy, 3, S, PTR, 0) \ + _(FFI, memset, 3, S, PTR, 0) \ + _(FFI, lj_vm_errno, 0, S, INT, CCI_NOFPRCLOBBER) \ + _(FFI32, lj_carith_mul64, 2, N, I64, XA2_64|CCI_NOFPRCLOBBER) \ + _(FFI32, lj_carith_shl64, 2, N, U64, XA_64|CCI_NOFPRCLOBBER) \ + _(FFI32, lj_carith_shr64, 2, N, U64, XA_64|CCI_NOFPRCLOBBER) \ + _(FFI32, lj_carith_sar64, 2, N, U64, XA_64|CCI_NOFPRCLOBBER) \ + _(FFI32, lj_carith_rol64, 2, N, U64, XA_64|CCI_NOFPRCLOBBER) \ + _(FFI32, lj_carith_ror64, 2, N, U64, XA_64|CCI_NOFPRCLOBBER) \ + \ + /* End of list. */ + +typedef enum { +#define IRCALLENUM(cond, name, nargs, kind, type, flags) IRCALL_##name, +IRCALLDEF(IRCALLENUM) +#undef IRCALLENUM + IRCALL__MAX +} IRCallID; + +LJ_FUNC TRef lj_ir_call(jit_State *J, IRCallID id, ...); + +LJ_DATA const CCallInfo lj_ir_callinfo[IRCALL__MAX+1]; + +/* Soft-float declarations. */ +#if LJ_SOFTFP +#if LJ_TARGET_ARM +#define softfp_add __aeabi_dadd +#define softfp_sub __aeabi_dsub +#define softfp_mul __aeabi_dmul +#define softfp_div __aeabi_ddiv +#define softfp_cmp __aeabi_cdcmple +#define softfp_i2d __aeabi_i2d +#define softfp_d2i __aeabi_d2iz +#define softfp_ui2d __aeabi_ui2d +#define softfp_f2d __aeabi_f2d +#define softfp_d2ui __aeabi_d2uiz +#define softfp_d2f __aeabi_d2f +#define softfp_i2f __aeabi_i2f +#define softfp_ui2f __aeabi_ui2f +#define softfp_f2i __aeabi_f2iz +#define softfp_f2ui __aeabi_f2uiz +#define fp64_l2d __aeabi_l2d +#define fp64_ul2d __aeabi_ul2d +#define fp64_l2f __aeabi_l2f +#define fp64_ul2f __aeabi_ul2f +#if LJ_TARGET_IOS +#define fp64_d2l __fixdfdi +#define fp64_d2ul __fixunsdfdi +#define fp64_f2l __fixsfdi +#define fp64_f2ul __fixunssfdi +#else +#define fp64_d2l __aeabi_d2lz +#define fp64_d2ul __aeabi_d2ulz +#define fp64_f2l __aeabi_f2lz +#define fp64_f2ul __aeabi_f2ulz +#endif +#else +#error "Missing soft-float definitions for target architecture" +#endif +extern double softfp_add(double a, double b); +extern double softfp_sub(double a, double b); +extern double softfp_mul(double a, double b); +extern double softfp_div(double a, double b); +extern void softfp_cmp(double a, double b); +extern double softfp_i2d(int32_t a); +extern int32_t softfp_d2i(double a); +#if LJ_HASFFI +extern double softfp_ui2d(uint32_t a); +extern double softfp_f2d(float a); +extern uint32_t softfp_d2ui(double a); +extern float softfp_d2f(double a); +extern float softfp_i2f(int32_t a); +extern float softfp_ui2f(uint32_t a); +extern int32_t softfp_f2i(float a); +extern uint32_t softfp_f2ui(float a); +#endif +#endif + +#if LJ_HASFFI && LJ_NEED_FP64 && !(LJ_TARGET_ARM && LJ_SOFTFP) +#ifdef __GNUC__ +#define fp64_l2d __floatdidf +#define fp64_ul2d __floatundidf +#define fp64_l2f __floatdisf +#define fp64_ul2f __floatundisf +#define fp64_d2l __fixdfdi +#define fp64_d2ul __fixunsdfdi +#define fp64_f2l __fixsfdi +#define fp64_f2ul __fixunssfdi +#else +#error "Missing fp64 helper definitions for this compiler" +#endif +#endif + +#if LJ_HASFFI && (LJ_SOFTFP || LJ_NEED_FP64) +extern double fp64_l2d(int64_t a); +extern double fp64_ul2d(uint64_t a); +extern float fp64_l2f(int64_t a); +extern float fp64_ul2f(uint64_t a); +extern int64_t fp64_d2l(double a); +extern uint64_t fp64_d2ul(double a); +extern int64_t fp64_f2l(float a); +extern uint64_t fp64_f2ul(float a); +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_iropt.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_iropt.h new file mode 100644 index 00000000000..4e424e70045 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_iropt.h @@ -0,0 +1,161 @@ +/* +** Common header for IR emitter and optimizations. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_IROPT_H +#define _LJ_IROPT_H + +#include + +#include "lj_obj.h" +#include "lj_jit.h" + +#if LJ_HASJIT +/* IR emitter. */ +LJ_FUNC void LJ_FASTCALL lj_ir_growtop(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_ir_emit(jit_State *J); + +/* Save current IR in J->fold.ins, but do not emit it (yet). */ +static LJ_AINLINE void lj_ir_set_(jit_State *J, uint16_t ot, IRRef1 a, IRRef1 b) +{ + J->fold.ins.ot = ot; J->fold.ins.op1 = a; J->fold.ins.op2 = b; +} + +#define lj_ir_set(J, ot, a, b) \ + lj_ir_set_(J, (uint16_t)(ot), (IRRef1)(a), (IRRef1)(b)) + +/* Get ref of next IR instruction and optionally grow IR. +** Note: this may invalidate all IRIns*! +*/ +static LJ_AINLINE IRRef lj_ir_nextins(jit_State *J) +{ + IRRef ref = J->cur.nins; + if (LJ_UNLIKELY(ref >= J->irtoplim)) lj_ir_growtop(J); + J->cur.nins = ref + 1; + return ref; +} + +/* Interning of constants. */ +LJ_FUNC TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k); +LJ_FUNC void lj_ir_k64_freeall(jit_State *J); +LJ_FUNC TRef lj_ir_k64(jit_State *J, IROp op, cTValue *tv); +LJ_FUNC cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64); +LJ_FUNC TRef lj_ir_knum_u64(jit_State *J, uint64_t u64); +LJ_FUNC TRef lj_ir_knumint(jit_State *J, lua_Number n); +LJ_FUNC TRef lj_ir_kint64(jit_State *J, uint64_t u64); +LJ_FUNC TRef lj_ir_kgc(jit_State *J, GCobj *o, IRType t); +LJ_FUNC TRef lj_ir_kptr_(jit_State *J, IROp op, void *ptr); +LJ_FUNC TRef lj_ir_knull(jit_State *J, IRType t); +LJ_FUNC TRef lj_ir_kslot(jit_State *J, TRef key, IRRef slot); + +#if LJ_64 +#define lj_ir_kintp(J, k) lj_ir_kint64(J, (uint64_t)(k)) +#else +#define lj_ir_kintp(J, k) lj_ir_kint(J, (int32_t)(k)) +#endif + +static LJ_AINLINE TRef lj_ir_knum(jit_State *J, lua_Number n) +{ + TValue tv; + tv.n = n; + return lj_ir_knum_u64(J, tv.u64); +} + +#define lj_ir_kstr(J, str) lj_ir_kgc(J, obj2gco((str)), IRT_STR) +#define lj_ir_ktab(J, tab) lj_ir_kgc(J, obj2gco((tab)), IRT_TAB) +#define lj_ir_kfunc(J, func) lj_ir_kgc(J, obj2gco((func)), IRT_FUNC) +#define lj_ir_kptr(J, ptr) lj_ir_kptr_(J, IR_KPTR, (ptr)) +#define lj_ir_kkptr(J, ptr) lj_ir_kptr_(J, IR_KKPTR, (ptr)) + +/* Special FP constants. */ +#define lj_ir_knum_zero(J) lj_ir_knum_u64(J, U64x(00000000,00000000)) +#define lj_ir_knum_one(J) lj_ir_knum_u64(J, U64x(3ff00000,00000000)) +#define lj_ir_knum_tobit(J) lj_ir_knum_u64(J, U64x(43380000,00000000)) + +/* Special 128 bit SIMD constants. */ +#define lj_ir_knum_abs(J) lj_ir_k64(J, IR_KNUM, LJ_KSIMD(J, LJ_KSIMD_ABS)) +#define lj_ir_knum_neg(J) lj_ir_k64(J, IR_KNUM, LJ_KSIMD(J, LJ_KSIMD_NEG)) + +/* Access to constants. */ +LJ_FUNC void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir); + +/* Convert IR operand types. */ +LJ_FUNC TRef LJ_FASTCALL lj_ir_tonumber(jit_State *J, TRef tr); +LJ_FUNC TRef LJ_FASTCALL lj_ir_tonum(jit_State *J, TRef tr); +LJ_FUNC TRef LJ_FASTCALL lj_ir_tostr(jit_State *J, TRef tr); + +/* Miscellaneous IR ops. */ +LJ_FUNC int lj_ir_numcmp(lua_Number a, lua_Number b, IROp op); +LJ_FUNC int lj_ir_strcmp(GCstr *a, GCstr *b, IROp op); +LJ_FUNC void lj_ir_rollback(jit_State *J, IRRef ref); + +/* Emit IR instructions with on-the-fly optimizations. */ +LJ_FUNC TRef LJ_FASTCALL lj_opt_fold(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_opt_cse(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_opt_cselim(jit_State *J, IRRef lim); + +/* Special return values for the fold functions. */ +enum { + NEXTFOLD, /* Couldn't fold, pass on. */ + RETRYFOLD, /* Retry fold with modified fins. */ + KINTFOLD, /* Return ref for int constant in fins->i. */ + FAILFOLD, /* Guard would always fail. */ + DROPFOLD, /* Guard eliminated. */ + MAX_FOLD +}; + +#define INTFOLD(k) ((J->fold.ins.i = (k)), (TRef)KINTFOLD) +#define INT64FOLD(k) (lj_ir_kint64(J, (k))) +#define CONDFOLD(cond) ((TRef)FAILFOLD + (TRef)(cond)) +#define LEFTFOLD (J->fold.ins.op1) +#define RIGHTFOLD (J->fold.ins.op2) +#define CSEFOLD (lj_opt_cse(J)) +#define EMITFOLD (lj_ir_emit(J)) + +/* Load/store forwarding. */ +LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_tab_len(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_hrefk(jit_State *J); +LJ_FUNC int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J); +LJ_FUNC int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim); +LJ_FUNC int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref); + +/* Dead-store elimination. */ +LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_xstore(jit_State *J); + +/* Narrowing. */ +LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_convert(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_index(jit_State *J, TRef key); +LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_toint(jit_State *J, TRef tr); +LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_tobit(jit_State *J, TRef tr); +#if LJ_HASFFI +LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_cindex(jit_State *J, TRef key); +#endif +LJ_FUNC TRef lj_opt_narrow_arith(jit_State *J, TRef rb, TRef rc, + TValue *vb, TValue *vc, IROp op); +LJ_FUNC TRef lj_opt_narrow_unm(jit_State *J, TRef rc, TValue *vc); +LJ_FUNC TRef lj_opt_narrow_mod(jit_State *J, TRef rb, TRef rc, TValue *vc); +LJ_FUNC TRef lj_opt_narrow_pow(jit_State *J, TRef rb, TRef rc, TValue *vc); +LJ_FUNC IRType lj_opt_narrow_forl(jit_State *J, cTValue *forbase); + +/* Optimization passes. */ +LJ_FUNC void lj_opt_dce(jit_State *J); +LJ_FUNC int lj_opt_loop(jit_State *J); +#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI) +LJ_FUNC void lj_opt_split(jit_State *J); +#else +#define lj_opt_split(J) UNUSED(J) +#endif +LJ_FUNC void lj_opt_sink(jit_State *J); + +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_jit.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_jit.h new file mode 100644 index 00000000000..1df56cae55e --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_jit.h @@ -0,0 +1,440 @@ +/* +** Common definitions for the JIT compiler. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_JIT_H +#define _LJ_JIT_H + +#include "lj_obj.h" +#include "lj_ir.h" + +/* JIT engine flags. */ +#define JIT_F_ON 0x00000001 + +/* CPU-specific JIT engine flags. */ +#if LJ_TARGET_X86ORX64 +#define JIT_F_SSE2 0x00000010 +#define JIT_F_SSE3 0x00000020 +#define JIT_F_SSE4_1 0x00000040 +#define JIT_F_PREFER_IMUL 0x00000080 +#define JIT_F_LEA_AGU 0x00000100 + +/* Names for the CPU-specific flags. Must match the order above. */ +#define JIT_F_CPU_FIRST JIT_F_SSE2 +#define JIT_F_CPUSTRING "\4SSE2\4SSE3\6SSE4.1\3AMD\4ATOM" +#elif LJ_TARGET_ARM +#define JIT_F_ARMV6_ 0x00000010 +#define JIT_F_ARMV6T2_ 0x00000020 +#define JIT_F_ARMV7 0x00000040 +#define JIT_F_VFPV2 0x00000080 +#define JIT_F_VFPV3 0x00000100 + +#define JIT_F_ARMV6 (JIT_F_ARMV6_|JIT_F_ARMV6T2_|JIT_F_ARMV7) +#define JIT_F_ARMV6T2 (JIT_F_ARMV6T2_|JIT_F_ARMV7) +#define JIT_F_VFP (JIT_F_VFPV2|JIT_F_VFPV3) + +/* Names for the CPU-specific flags. Must match the order above. */ +#define JIT_F_CPU_FIRST JIT_F_ARMV6_ +#define JIT_F_CPUSTRING "\5ARMv6\7ARMv6T2\5ARMv7\5VFPv2\5VFPv3" +#elif LJ_TARGET_PPC +#define JIT_F_SQRT 0x00000010 +#define JIT_F_ROUND 0x00000020 + +/* Names for the CPU-specific flags. Must match the order above. */ +#define JIT_F_CPU_FIRST JIT_F_SQRT +#define JIT_F_CPUSTRING "\4SQRT\5ROUND" +#elif LJ_TARGET_MIPS +#define JIT_F_MIPS32R2 0x00000010 + +/* Names for the CPU-specific flags. Must match the order above. */ +#define JIT_F_CPU_FIRST JIT_F_MIPS32R2 +#define JIT_F_CPUSTRING "\010MIPS32R2" +#else +#define JIT_F_CPU_FIRST 0 +#define JIT_F_CPUSTRING "" +#endif + +/* Optimization flags. */ +#define JIT_F_OPT_MASK 0x0fff0000 + +#define JIT_F_OPT_FOLD 0x00010000 +#define JIT_F_OPT_CSE 0x00020000 +#define JIT_F_OPT_DCE 0x00040000 +#define JIT_F_OPT_FWD 0x00080000 +#define JIT_F_OPT_DSE 0x00100000 +#define JIT_F_OPT_NARROW 0x00200000 +#define JIT_F_OPT_LOOP 0x00400000 +#define JIT_F_OPT_ABC 0x00800000 +#define JIT_F_OPT_SINK 0x01000000 +#define JIT_F_OPT_FUSE 0x02000000 + +/* Optimizations names for -O. Must match the order above. */ +#define JIT_F_OPT_FIRST JIT_F_OPT_FOLD +#define JIT_F_OPTSTRING \ + "\4fold\3cse\3dce\3fwd\3dse\6narrow\4loop\3abc\4sink\4fuse" + +/* Optimization levels set a fixed combination of flags. */ +#define JIT_F_OPT_0 0 +#define JIT_F_OPT_1 (JIT_F_OPT_FOLD|JIT_F_OPT_CSE|JIT_F_OPT_DCE) +#define JIT_F_OPT_2 (JIT_F_OPT_1|JIT_F_OPT_NARROW|JIT_F_OPT_LOOP) +#define JIT_F_OPT_3 (JIT_F_OPT_2|\ + JIT_F_OPT_FWD|JIT_F_OPT_DSE|JIT_F_OPT_ABC|JIT_F_OPT_SINK|JIT_F_OPT_FUSE) +#define JIT_F_OPT_DEFAULT JIT_F_OPT_3 + +#if LJ_TARGET_WINDOWS || LJ_64 +/* See: http://blogs.msdn.com/oldnewthing/archive/2003/10/08/55239.aspx */ +#define JIT_P_sizemcode_DEFAULT 64 +#else +/* Could go as low as 4K, but the mmap() overhead would be rather high. */ +#define JIT_P_sizemcode_DEFAULT 32 +#endif + +/* Optimization parameters and their defaults. Length is a char in octal! */ +#define JIT_PARAMDEF(_) \ + _(\010, maxtrace, 1000) /* Max. # of traces in cache. */ \ + _(\011, maxrecord, 4000) /* Max. # of recorded IR instructions. */ \ + _(\012, maxirconst, 500) /* Max. # of IR constants of a trace. */ \ + _(\007, maxside, 100) /* Max. # of side traces of a root trace. */ \ + _(\007, maxsnap, 500) /* Max. # of snapshots for a trace. */ \ + _(\011, minstitch, 0) /* Min. # of IR ins for a stitched trace. */ \ + \ + _(\007, hotloop, 56) /* # of iter. to detect a hot loop/call. */ \ + _(\007, hotexit, 10) /* # of taken exits to start a side trace. */ \ + _(\007, tryside, 4) /* # of attempts to compile a side trace. */ \ + \ + _(\012, instunroll, 4) /* Max. unroll for instable loops. */ \ + _(\012, loopunroll, 15) /* Max. unroll for loop ops in side traces. */ \ + _(\012, callunroll, 3) /* Max. unroll for recursive calls. */ \ + _(\011, recunroll, 2) /* Min. unroll for true recursion. */ \ + \ + /* Size of each machine code area (in KBytes). */ \ + _(\011, sizemcode, JIT_P_sizemcode_DEFAULT) \ + /* Max. total size of all machine code areas (in KBytes). */ \ + _(\010, maxmcode, 512) \ + /* End of list. */ + +enum { +#define JIT_PARAMENUM(len, name, value) JIT_P_##name, +JIT_PARAMDEF(JIT_PARAMENUM) +#undef JIT_PARAMENUM + JIT_P__MAX +}; + +#define JIT_PARAMSTR(len, name, value) #len #name +#define JIT_P_STRING JIT_PARAMDEF(JIT_PARAMSTR) + +/* Trace compiler state. */ +typedef enum { + LJ_TRACE_IDLE, /* Trace compiler idle. */ + LJ_TRACE_ACTIVE = 0x10, + LJ_TRACE_RECORD, /* Bytecode recording active. */ + LJ_TRACE_START, /* New trace started. */ + LJ_TRACE_END, /* End of trace. */ + LJ_TRACE_ASM, /* Assemble trace. */ + LJ_TRACE_ERR /* Trace aborted with error. */ +} TraceState; + +/* Post-processing action. */ +typedef enum { + LJ_POST_NONE, /* No action. */ + LJ_POST_FIXCOMP, /* Fixup comparison and emit pending guard. */ + LJ_POST_FIXGUARD, /* Fixup and emit pending guard. */ + LJ_POST_FIXGUARDSNAP, /* Fixup and emit pending guard and snapshot. */ + LJ_POST_FIXBOOL, /* Fixup boolean result. */ + LJ_POST_FIXCONST, /* Fixup constant results. */ + LJ_POST_FFRETRY /* Suppress recording of retried fast functions. */ +} PostProc; + +/* Machine code type. */ +#if LJ_TARGET_X86ORX64 +typedef uint8_t MCode; +#else +typedef uint32_t MCode; +#endif + +/* Stack snapshot header. */ +typedef struct SnapShot { + uint16_t mapofs; /* Offset into snapshot map. */ + IRRef1 ref; /* First IR ref for this snapshot. */ + uint8_t nslots; /* Number of valid slots. */ + uint8_t topslot; /* Maximum frame extent. */ + uint8_t nent; /* Number of compressed entries. */ + uint8_t count; /* Count of taken exits for this snapshot. */ +} SnapShot; + +#define SNAPCOUNT_DONE 255 /* Already compiled and linked a side trace. */ + +/* Compressed snapshot entry. */ +typedef uint32_t SnapEntry; + +#define SNAP_FRAME 0x010000 /* Frame slot. */ +#define SNAP_CONT 0x020000 /* Continuation slot. */ +#define SNAP_NORESTORE 0x040000 /* No need to restore slot. */ +#define SNAP_SOFTFPNUM 0x080000 /* Soft-float number. */ +LJ_STATIC_ASSERT(SNAP_FRAME == TREF_FRAME); +LJ_STATIC_ASSERT(SNAP_CONT == TREF_CONT); + +#define SNAP(slot, flags, ref) (((SnapEntry)(slot) << 24) + (flags) + (ref)) +#define SNAP_TR(slot, tr) \ + (((SnapEntry)(slot) << 24) + ((tr) & (TREF_CONT|TREF_FRAME|TREF_REFMASK))) +#define SNAP_MKPC(pc) ((SnapEntry)u32ptr(pc)) +#define SNAP_MKFTSZ(ftsz) ((SnapEntry)(ftsz)) +#define snap_ref(sn) ((sn) & 0xffff) +#define snap_slot(sn) ((BCReg)((sn) >> 24)) +#define snap_isframe(sn) ((sn) & SNAP_FRAME) +#define snap_pc(sn) ((const BCIns *)(uintptr_t)(sn)) +#define snap_setref(sn, ref) (((sn) & (0xffff0000&~SNAP_NORESTORE)) | (ref)) + +/* Snapshot and exit numbers. */ +typedef uint32_t SnapNo; +typedef uint32_t ExitNo; + +/* Trace number. */ +typedef uint32_t TraceNo; /* Used to pass around trace numbers. */ +typedef uint16_t TraceNo1; /* Stored trace number. */ + +/* Type of link. ORDER LJ_TRLINK */ +typedef enum { + LJ_TRLINK_NONE, /* Incomplete trace. No link, yet. */ + LJ_TRLINK_ROOT, /* Link to other root trace. */ + LJ_TRLINK_LOOP, /* Loop to same trace. */ + LJ_TRLINK_TAILREC, /* Tail-recursion. */ + LJ_TRLINK_UPREC, /* Up-recursion. */ + LJ_TRLINK_DOWNREC, /* Down-recursion. */ + LJ_TRLINK_INTERP, /* Fallback to interpreter. */ + LJ_TRLINK_RETURN, /* Return to interpreter. */ + LJ_TRLINK_STITCH /* Trace stitching. */ +} TraceLink; + +/* Trace object. */ +typedef struct GCtrace { + GCHeader; + uint8_t topslot; /* Top stack slot already checked to be allocated. */ + uint8_t linktype; /* Type of link. */ + IRRef nins; /* Next IR instruction. Biased with REF_BIAS. */ +#if LJ_GC64 + uint32_t unused_gc64; +#endif + GCRef gclist; + IRIns *ir; /* IR instructions/constants. Biased with REF_BIAS. */ + IRRef nk; /* Lowest IR constant. Biased with REF_BIAS. */ + uint16_t nsnap; /* Number of snapshots. */ + uint16_t nsnapmap; /* Number of snapshot map elements. */ + SnapShot *snap; /* Snapshot array. */ + SnapEntry *snapmap; /* Snapshot map. */ + GCRef startpt; /* Starting prototype. */ + MRef startpc; /* Bytecode PC of starting instruction. */ + BCIns startins; /* Original bytecode of starting instruction. */ + MSize szmcode; /* Size of machine code. */ + MCode *mcode; /* Start of machine code. */ + MSize mcloop; /* Offset of loop start in machine code. */ + uint16_t nchild; /* Number of child traces (root trace only). */ + uint16_t spadjust; /* Stack pointer adjustment (offset in bytes). */ + TraceNo1 traceno; /* Trace number. */ + TraceNo1 link; /* Linked trace (or self for loops). */ + TraceNo1 root; /* Root trace of side trace (or 0 for root traces). */ + TraceNo1 nextroot; /* Next root trace for same prototype. */ + TraceNo1 nextside; /* Next side trace of same root trace. */ + uint8_t sinktags; /* Trace has SINK tags. */ + uint8_t unused1; +#ifdef LUAJIT_USE_GDBJIT + void *gdbjit_entry; /* GDB JIT entry. */ +#endif +} GCtrace; + +#define gco2trace(o) check_exp((o)->gch.gct == ~LJ_TTRACE, (GCtrace *)(o)) +#define traceref(J, n) \ + check_exp((n)>0 && (MSize)(n)sizetrace, (GCtrace *)gcref(J->trace[(n)])) + +LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCtrace, gclist)); + +static LJ_AINLINE MSize snap_nextofs(GCtrace *T, SnapShot *snap) +{ + if (snap+1 == &T->snap[T->nsnap]) + return T->nsnapmap; + else + return (snap+1)->mapofs; +} + +/* Round-robin penalty cache for bytecodes leading to aborted traces. */ +typedef struct HotPenalty { + MRef pc; /* Starting bytecode PC. */ + uint16_t val; /* Penalty value, i.e. hotcount start. */ + uint16_t reason; /* Abort reason (really TraceErr). */ +} HotPenalty; + +#define PENALTY_SLOTS 64 /* Penalty cache slot. Must be a power of 2. */ +#define PENALTY_MIN (36*2) /* Minimum penalty value. */ +#define PENALTY_MAX 60000 /* Maximum penalty value. */ +#define PENALTY_RNDBITS 4 /* # of random bits to add to penalty value. */ + +/* Round-robin backpropagation cache for narrowing conversions. */ +typedef struct BPropEntry { + IRRef1 key; /* Key: original reference. */ + IRRef1 val; /* Value: reference after conversion. */ + IRRef mode; /* Mode for this entry (currently IRCONV_*). */ +} BPropEntry; + +/* Number of slots for the backpropagation cache. Must be a power of 2. */ +#define BPROP_SLOTS 16 + +/* Scalar evolution analysis cache. */ +typedef struct ScEvEntry { + MRef pc; /* Bytecode PC of FORI. */ + IRRef1 idx; /* Index reference. */ + IRRef1 start; /* Constant start reference. */ + IRRef1 stop; /* Constant stop reference. */ + IRRef1 step; /* Constant step reference. */ + IRType1 t; /* Scalar type. */ + uint8_t dir; /* Direction. 1: +, 0: -. */ +} ScEvEntry; + +/* Reverse bytecode map (IRRef -> PC). Only for selected instructions. */ +typedef struct RBCHashEntry { + MRef pc; /* Bytecode PC. */ + GCRef pt; /* Prototype. */ + IRRef ref; /* IR reference. */ +} RBCHashEntry; + +/* Number of slots in the reverse bytecode hash table. Must be a power of 2. */ +#define RBCHASH_SLOTS 8 + +/* 128 bit SIMD constants. */ +enum { + LJ_KSIMD_ABS, + LJ_KSIMD_NEG, + LJ_KSIMD__MAX +}; + +/* Get 16 byte aligned pointer to SIMD constant. */ +#define LJ_KSIMD(J, n) \ + ((TValue *)(((intptr_t)&J->ksimd[2*(n)] + 15) & ~(intptr_t)15)) + +/* Set/reset flag to activate the SPLIT pass for the current trace. */ +#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI) +#define lj_needsplit(J) (J->needsplit = 1) +#define lj_resetsplit(J) (J->needsplit = 0) +#else +#define lj_needsplit(J) UNUSED(J) +#define lj_resetsplit(J) UNUSED(J) +#endif + +/* Fold state is used to fold instructions on-the-fly. */ +typedef struct FoldState { + IRIns ins; /* Currently emitted instruction. */ + IRIns left; /* Instruction referenced by left operand. */ + IRIns right; /* Instruction referenced by right operand. */ +} FoldState; + +/* JIT compiler state. */ +typedef struct jit_State { + GCtrace cur; /* Current trace. */ + + lua_State *L; /* Current Lua state. */ + const BCIns *pc; /* Current PC. */ + GCfunc *fn; /* Current function. */ + GCproto *pt; /* Current prototype. */ + TRef *base; /* Current frame base, points into J->slots. */ + + uint32_t flags; /* JIT engine flags. */ + BCReg maxslot; /* Relative to baseslot. */ + BCReg baseslot; /* Current frame base, offset into J->slots. */ + + uint8_t mergesnap; /* Allowed to merge with next snapshot. */ + uint8_t needsnap; /* Need snapshot before recording next bytecode. */ + IRType1 guardemit; /* Accumulated IRT_GUARD for emitted instructions. */ + uint8_t bcskip; /* Number of bytecode instructions to skip. */ + + FoldState fold; /* Fold state. */ + + const BCIns *bc_min; /* Start of allowed bytecode range for root trace. */ + MSize bc_extent; /* Extent of the range. */ + + TraceState state; /* Trace compiler state. */ + + int32_t instunroll; /* Unroll counter for instable loops. */ + int32_t loopunroll; /* Unroll counter for loop ops in side traces. */ + int32_t tailcalled; /* Number of successive tailcalls. */ + int32_t framedepth; /* Current frame depth. */ + int32_t retdepth; /* Return frame depth (count of RETF). */ + + MRef k64; /* Pointer to chained array of 64 bit constants. */ + TValue ksimd[LJ_KSIMD__MAX*2+1]; /* 16 byte aligned SIMD constants. */ + + IRIns *irbuf; /* Temp. IR instruction buffer. Biased with REF_BIAS. */ + IRRef irtoplim; /* Upper limit of instuction buffer (biased). */ + IRRef irbotlim; /* Lower limit of instuction buffer (biased). */ + IRRef loopref; /* Last loop reference or ref of final LOOP (or 0). */ + + MSize sizesnap; /* Size of temp. snapshot buffer. */ + SnapShot *snapbuf; /* Temp. snapshot buffer. */ + SnapEntry *snapmapbuf; /* Temp. snapshot map buffer. */ + MSize sizesnapmap; /* Size of temp. snapshot map buffer. */ + + PostProc postproc; /* Required post-processing after execution. */ +#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI) + uint8_t needsplit; /* Need SPLIT pass. */ +#endif + uint8_t retryrec; /* Retry recording. */ + + GCRef *trace; /* Array of traces. */ + TraceNo freetrace; /* Start of scan for next free trace. */ + MSize sizetrace; /* Size of trace array. */ + + IRRef1 chain[IR__MAX]; /* IR instruction skip-list chain anchors. */ + TRef slot[LJ_MAX_JSLOTS+LJ_STACK_EXTRA]; /* Stack slot map. */ + + int32_t param[JIT_P__MAX]; /* JIT engine parameters. */ + + MCode *exitstubgroup[LJ_MAX_EXITSTUBGR]; /* Exit stub group addresses. */ + + HotPenalty penalty[PENALTY_SLOTS]; /* Penalty slots. */ + uint32_t penaltyslot; /* Round-robin index into penalty slots. */ + uint32_t prngstate; /* PRNG state. */ + +#ifdef LUAJIT_ENABLE_TABLE_BUMP + RBCHashEntry rbchash[RBCHASH_SLOTS]; /* Reverse bytecode map. */ +#endif + + BPropEntry bpropcache[BPROP_SLOTS]; /* Backpropagation cache slots. */ + uint32_t bpropslot; /* Round-robin index into bpropcache slots. */ + + ScEvEntry scev; /* Scalar evolution analysis cache slots. */ + + const BCIns *startpc; /* Bytecode PC of starting instruction. */ + TraceNo parent; /* Parent of current side trace (0 for root traces). */ + ExitNo exitno; /* Exit number in parent of current side trace. */ + + BCIns *patchpc; /* PC for pending re-patch. */ + BCIns patchins; /* Instruction for pending re-patch. */ + + int mcprot; /* Protection of current mcode area. */ + MCode *mcarea; /* Base of current mcode area. */ + MCode *mctop; /* Top of current mcode area. */ + MCode *mcbot; /* Bottom of current mcode area. */ + size_t szmcarea; /* Size of current mcode area. */ + size_t szallmcarea; /* Total size of all allocated mcode areas. */ + + TValue errinfo; /* Additional info element for trace errors. */ + +#if LJ_HASPROFILE + GCproto *prev_pt; /* Previous prototype. */ + BCLine prev_line; /* Previous line. */ + int prof_mode; /* Profiling mode: 0, 'f', 'l'. */ +#endif +} +#if LJ_TARGET_ARM +LJ_ALIGN(16) /* For DISPATCH-relative addresses in assembler part. */ +#endif +jit_State; + +/* Trivial PRNG e.g. used for penalty randomization. */ +static LJ_AINLINE uint32_t LJ_PRNG_BITS(jit_State *J, int bits) +{ + /* Yes, this LCG is very weak, but that doesn't matter for our use case. */ + J->prngstate = J->prngstate * 1103515245 + 12345; + return J->prngstate >> (32-bits); +} + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_lex.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_lex.c new file mode 100644 index 00000000000..8409cd78a71 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_lex.c @@ -0,0 +1,482 @@ +/* +** Lexical analyzer. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lj_lex_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_buf.h" +#include "lj_str.h" +#if LJ_HASFFI +#include "lj_tab.h" +#include "lj_ctype.h" +#include "lj_cdata.h" +#include "lualib.h" +#endif +#include "lj_state.h" +#include "lj_lex.h" +#include "lj_parse.h" +#include "lj_char.h" +#include "lj_strscan.h" +#include "lj_strfmt.h" + +/* Lua lexer token names. */ +static const char *const tokennames[] = { +#define TKSTR1(name) #name, +#define TKSTR2(name, sym) #sym, +TKDEF(TKSTR1, TKSTR2) +#undef TKSTR1 +#undef TKSTR2 + NULL +}; + +/* -- Buffer handling ----------------------------------------------------- */ + +#define LEX_EOF (-1) +#define lex_iseol(ls) (ls->c == '\n' || ls->c == '\r') + +/* Get more input from reader. */ +static LJ_NOINLINE LexChar lex_more(LexState *ls) +{ + size_t sz; + const char *p = ls->rfunc(ls->L, ls->rdata, &sz); + if (p == NULL || sz == 0) return LEX_EOF; + ls->pe = p + sz; + ls->p = p + 1; + return (LexChar)(uint8_t)p[0]; +} + +/* Get next character. */ +static LJ_AINLINE LexChar lex_next(LexState *ls) +{ + return (ls->c = ls->p < ls->pe ? (LexChar)(uint8_t)*ls->p++ : lex_more(ls)); +} + +/* Save character. */ +static LJ_AINLINE void lex_save(LexState *ls, LexChar c) +{ + lj_buf_putb(&ls->sb, c); +} + +/* Save previous character and get next character. */ +static LJ_AINLINE LexChar lex_savenext(LexState *ls) +{ + lex_save(ls, ls->c); + return lex_next(ls); +} + +/* Skip line break. Handles "\n", "\r", "\r\n" or "\n\r". */ +static void lex_newline(LexState *ls) +{ + LexChar old = ls->c; + lua_assert(lex_iseol(ls)); + lex_next(ls); /* Skip "\n" or "\r". */ + if (lex_iseol(ls) && ls->c != old) lex_next(ls); /* Skip "\n\r" or "\r\n". */ + if (++ls->linenumber >= LJ_MAX_LINE) + lj_lex_error(ls, ls->tok, LJ_ERR_XLINES); +} + +/* -- Scanner for terminals ----------------------------------------------- */ + +/* Parse a number literal. */ +static void lex_number(LexState *ls, TValue *tv) +{ + StrScanFmt fmt; + LexChar c, xp = 'e'; + lua_assert(lj_char_isdigit(ls->c)); + if ((c = ls->c) == '0' && (lex_savenext(ls) | 0x20) == 'x') + xp = 'p'; + while (lj_char_isident(ls->c) || ls->c == '.' || + ((ls->c == '-' || ls->c == '+') && (c | 0x20) == xp)) { + c = ls->c; + lex_savenext(ls); + } + lex_save(ls, '\0'); + fmt = lj_strscan_scan((const uint8_t *)sbufB(&ls->sb), tv, + (LJ_DUALNUM ? STRSCAN_OPT_TOINT : STRSCAN_OPT_TONUM) | + (LJ_HASFFI ? (STRSCAN_OPT_LL|STRSCAN_OPT_IMAG) : 0)); + if (LJ_DUALNUM && fmt == STRSCAN_INT) { + setitype(tv, LJ_TISNUM); + } else if (fmt == STRSCAN_NUM) { + /* Already in correct format. */ +#if LJ_HASFFI + } else if (fmt != STRSCAN_ERROR) { + lua_State *L = ls->L; + GCcdata *cd; + lua_assert(fmt == STRSCAN_I64 || fmt == STRSCAN_U64 || fmt == STRSCAN_IMAG); + if (!ctype_ctsG(G(L))) { + ptrdiff_t oldtop = savestack(L, L->top); + luaopen_ffi(L); /* Load FFI library on-demand. */ + L->top = restorestack(L, oldtop); + } + if (fmt == STRSCAN_IMAG) { + cd = lj_cdata_new_(L, CTID_COMPLEX_DOUBLE, 2*sizeof(double)); + ((double *)cdataptr(cd))[0] = 0; + ((double *)cdataptr(cd))[1] = numV(tv); + } else { + cd = lj_cdata_new_(L, fmt==STRSCAN_I64 ? CTID_INT64 : CTID_UINT64, 8); + *(uint64_t *)cdataptr(cd) = tv->u64; + } + lj_parse_keepcdata(ls, tv, cd); +#endif + } else { + lua_assert(fmt == STRSCAN_ERROR); + lj_lex_error(ls, TK_number, LJ_ERR_XNUMBER); + } +} + +/* Skip equal signs for "[=...=[" and "]=...=]" and return their count. */ +static int lex_skipeq(LexState *ls) +{ + int count = 0; + LexChar s = ls->c; + lua_assert(s == '[' || s == ']'); + while (lex_savenext(ls) == '=') + count++; + return (ls->c == s) ? count : (-count) - 1; +} + +/* Parse a long string or long comment (tv set to NULL). */ +static void lex_longstring(LexState *ls, TValue *tv, int sep) +{ + lex_savenext(ls); /* Skip second '['. */ + if (lex_iseol(ls)) /* Skip initial newline. */ + lex_newline(ls); + for (;;) { + switch (ls->c) { + case LEX_EOF: + lj_lex_error(ls, TK_eof, tv ? LJ_ERR_XLSTR : LJ_ERR_XLCOM); + break; + case ']': + if (lex_skipeq(ls) == sep) { + lex_savenext(ls); /* Skip second ']'. */ + goto endloop; + } + break; + case '\n': + case '\r': + lex_save(ls, '\n'); + lex_newline(ls); + if (!tv) lj_buf_reset(&ls->sb); /* Don't waste space for comments. */ + break; + default: + lex_savenext(ls); + break; + } + } endloop: + if (tv) { + GCstr *str = lj_parse_keepstr(ls, sbufB(&ls->sb) + (2 + (MSize)sep), + sbuflen(&ls->sb) - 2*(2 + (MSize)sep)); + setstrV(ls->L, tv, str); + } +} + +/* Parse a string. */ +static void lex_string(LexState *ls, TValue *tv) +{ + LexChar delim = ls->c; /* Delimiter is '\'' or '"'. */ + lex_savenext(ls); + while (ls->c != delim) { + switch (ls->c) { + case LEX_EOF: + lj_lex_error(ls, TK_eof, LJ_ERR_XSTR); + continue; + case '\n': + case '\r': + lj_lex_error(ls, TK_string, LJ_ERR_XSTR); + continue; + case '\\': { + LexChar c = lex_next(ls); /* Skip the '\\'. */ + switch (c) { + case 'a': c = '\a'; break; + case 'b': c = '\b'; break; + case 'f': c = '\f'; break; + case 'n': c = '\n'; break; + case 'r': c = '\r'; break; + case 't': c = '\t'; break; + case 'v': c = '\v'; break; + case 'x': /* Hexadecimal escape '\xXX'. */ + c = (lex_next(ls) & 15u) << 4; + if (!lj_char_isdigit(ls->c)) { + if (!lj_char_isxdigit(ls->c)) goto err_xesc; + c += 9 << 4; + } + c += (lex_next(ls) & 15u); + if (!lj_char_isdigit(ls->c)) { + if (!lj_char_isxdigit(ls->c)) goto err_xesc; + c += 9; + } + break; + case 'z': /* Skip whitespace. */ + lex_next(ls); + while (lj_char_isspace(ls->c)) + if (lex_iseol(ls)) lex_newline(ls); else lex_next(ls); + continue; + case '\n': case '\r': lex_save(ls, '\n'); lex_newline(ls); continue; + case '\\': case '\"': case '\'': break; + case LEX_EOF: continue; + default: + if (!lj_char_isdigit(c)) + goto err_xesc; + c -= '0'; /* Decimal escape '\ddd'. */ + if (lj_char_isdigit(lex_next(ls))) { + c = c*10 + (ls->c - '0'); + if (lj_char_isdigit(lex_next(ls))) { + c = c*10 + (ls->c - '0'); + if (c > 255) { + err_xesc: + lj_lex_error(ls, TK_string, LJ_ERR_XESC); + } + lex_next(ls); + } + } + lex_save(ls, c); + continue; + } + lex_save(ls, c); + lex_next(ls); + continue; + } + default: + lex_savenext(ls); + break; + } + } + lex_savenext(ls); /* Skip trailing delimiter. */ + setstrV(ls->L, tv, + lj_parse_keepstr(ls, sbufB(&ls->sb)+1, sbuflen(&ls->sb)-2)); +} + +/* -- Main lexical scanner ------------------------------------------------ */ + +/* Get next lexical token. */ +static LexToken lex_scan(LexState *ls, TValue *tv) +{ + lj_buf_reset(&ls->sb); + for (;;) { + if (lj_char_isident(ls->c)) { + GCstr *s; + if (lj_char_isdigit(ls->c)) { /* Numeric literal. */ + lex_number(ls, tv); + return TK_number; + } + /* Identifier or reserved word. */ + do { + lex_savenext(ls); + } while (lj_char_isident(ls->c)); + s = lj_parse_keepstr(ls, sbufB(&ls->sb), sbuflen(&ls->sb)); + setstrV(ls->L, tv, s); + if (s->reserved > 0) /* Reserved word? */ + return TK_OFS + s->reserved; + return TK_name; + } + switch (ls->c) { + case '\n': + case '\r': + lex_newline(ls); + continue; + case ' ': + case '\t': + case '\v': + case '\f': + lex_next(ls); + continue; + case '-': + lex_next(ls); + if (ls->c != '-') return '-'; + lex_next(ls); + if (ls->c == '[') { /* Long comment "--[=*[...]=*]". */ + int sep = lex_skipeq(ls); + lj_buf_reset(&ls->sb); /* `lex_skipeq' may dirty the buffer */ + if (sep >= 0) { + lex_longstring(ls, NULL, sep); + lj_buf_reset(&ls->sb); + continue; + } + } + /* Short comment "--.*\n". */ + while (!lex_iseol(ls) && ls->c != LEX_EOF) + lex_next(ls); + continue; + case '[': { + int sep = lex_skipeq(ls); + if (sep >= 0) { + lex_longstring(ls, tv, sep); + return TK_string; + } else if (sep == -1) { + return '['; + } else { + lj_lex_error(ls, TK_string, LJ_ERR_XLDELIM); + continue; + } + } + case '=': + lex_next(ls); + if (ls->c != '=') return '='; else { lex_next(ls); return TK_eq; } + case '<': + lex_next(ls); + if (ls->c != '=') return '<'; else { lex_next(ls); return TK_le; } + case '>': + lex_next(ls); + if (ls->c != '=') return '>'; else { lex_next(ls); return TK_ge; } + case '~': + lex_next(ls); + if (ls->c != '=') return '~'; else { lex_next(ls); return TK_ne; } + case ':': + lex_next(ls); + if (ls->c != ':') return ':'; else { lex_next(ls); return TK_label; } + case '"': + case '\'': + lex_string(ls, tv); + return TK_string; + case '.': + if (lex_savenext(ls) == '.') { + lex_next(ls); + if (ls->c == '.') { + lex_next(ls); + return TK_dots; /* ... */ + } + return TK_concat; /* .. */ + } else if (!lj_char_isdigit(ls->c)) { + return '.'; + } else { + lex_number(ls, tv); + return TK_number; + } + case LEX_EOF: + return TK_eof; + default: { + LexChar c = ls->c; + lex_next(ls); + return c; /* Single-char tokens (+ - / ...). */ + } + } + } +} + +/* -- Lexer API ----------------------------------------------------------- */ + +/* Setup lexer state. */ +int lj_lex_setup(lua_State *L, LexState *ls) +{ + int header = 0; + ls->L = L; + ls->fs = NULL; + ls->pe = ls->p = NULL; + ls->vstack = NULL; + ls->sizevstack = 0; + ls->vtop = 0; + ls->bcstack = NULL; + ls->sizebcstack = 0; + ls->tok = 0; + ls->lookahead = TK_eof; /* No look-ahead token. */ + ls->linenumber = 1; + ls->lastline = 1; + lex_next(ls); /* Read-ahead first char. */ + if (ls->c == 0xef && ls->p + 2 <= ls->pe && (uint8_t)ls->p[0] == 0xbb && + (uint8_t)ls->p[1] == 0xbf) { /* Skip UTF-8 BOM (if buffered). */ + ls->p += 2; + lex_next(ls); + header = 1; + } + if (ls->c == '#') { /* Skip POSIX #! header line. */ + do { + lex_next(ls); + if (ls->c == LEX_EOF) return 0; + } while (!lex_iseol(ls)); + lex_newline(ls); + header = 1; + } + if (ls->c == LUA_SIGNATURE[0]) { /* Bytecode dump. */ + if (header) { + /* + ** Loading bytecode with an extra header is disabled for security + ** reasons. This may circumvent the usual check for bytecode vs. + ** Lua code by looking at the first char. Since this is a potential + ** security violation no attempt is made to echo the chunkname either. + */ + setstrV(L, L->top++, lj_err_str(L, LJ_ERR_BCBAD)); + lj_err_throw(L, LUA_ERRSYNTAX); + } + return 1; + } + return 0; +} + +/* Cleanup lexer state. */ +void lj_lex_cleanup(lua_State *L, LexState *ls) +{ + global_State *g = G(L); + lj_mem_freevec(g, ls->bcstack, ls->sizebcstack, BCInsLine); + lj_mem_freevec(g, ls->vstack, ls->sizevstack, VarInfo); + lj_buf_free(g, &ls->sb); +} + +/* Return next lexical token. */ +void lj_lex_next(LexState *ls) +{ + ls->lastline = ls->linenumber; + if (LJ_LIKELY(ls->lookahead == TK_eof)) { /* No lookahead token? */ + ls->tok = lex_scan(ls, &ls->tokval); /* Get next token. */ + } else { /* Otherwise return lookahead token. */ + ls->tok = ls->lookahead; + ls->lookahead = TK_eof; + ls->tokval = ls->lookaheadval; + } +} + +/* Look ahead for the next token. */ +LexToken lj_lex_lookahead(LexState *ls) +{ + lua_assert(ls->lookahead == TK_eof); + ls->lookahead = lex_scan(ls, &ls->lookaheadval); + return ls->lookahead; +} + +/* Convert token to string. */ +const char *lj_lex_token2str(LexState *ls, LexToken tok) +{ + if (tok > TK_OFS) + return tokennames[tok-TK_OFS-1]; + else if (!lj_char_iscntrl(tok)) + return lj_strfmt_pushf(ls->L, "%c", tok); + else + return lj_strfmt_pushf(ls->L, "char(%d)", tok); +} + +/* Lexer error. */ +void lj_lex_error(LexState *ls, LexToken tok, ErrMsg em, ...) +{ + const char *tokstr; + va_list argp; + if (tok == 0) { + tokstr = NULL; + } else if (tok == TK_name || tok == TK_string || tok == TK_number) { + lex_save(ls, '\0'); + tokstr = sbufB(&ls->sb); + } else { + tokstr = lj_lex_token2str(ls, tok); + } + va_start(argp, em); + lj_err_lex(ls->L, ls->chunkname, tokstr, ls->linenumber, em, argp); + va_end(argp); +} + +/* Initialize strings for reserved words. */ +void lj_lex_init(lua_State *L) +{ + uint32_t i; + for (i = 0; i < TK_RESERVED; i++) { + GCstr *s = lj_str_newz(L, tokennames[i]); + fixstring(s); /* Reserved words are never collected. */ + s->reserved = (uint8_t)(i+1); + } +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_lex.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_lex.h new file mode 100644 index 00000000000..acd2285d789 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_lex.h @@ -0,0 +1,86 @@ +/* +** Lexical analyzer. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_LEX_H +#define _LJ_LEX_H + +#include + +#include "lj_obj.h" +#include "lj_err.h" + +/* Lua lexer tokens. */ +#define TKDEF(_, __) \ + _(and) _(break) _(do) _(else) _(elseif) _(end) _(false) \ + _(for) _(function) _(goto) _(if) _(in) _(local) _(nil) _(not) _(or) \ + _(repeat) _(return) _(then) _(true) _(until) _(while) \ + __(concat, ..) __(dots, ...) __(eq, ==) __(ge, >=) __(le, <=) __(ne, ~=) \ + __(label, ::) __(number, ) __(name, ) __(string, ) \ + __(eof, ) + +enum { + TK_OFS = 256, +#define TKENUM1(name) TK_##name, +#define TKENUM2(name, sym) TK_##name, +TKDEF(TKENUM1, TKENUM2) +#undef TKENUM1 +#undef TKENUM2 + TK_RESERVED = TK_while - TK_OFS +}; + +typedef int LexChar; /* Lexical character. Unsigned ext. from char. */ +typedef int LexToken; /* Lexical token. */ + +/* Combined bytecode ins/line. Only used during bytecode generation. */ +typedef struct BCInsLine { + BCIns ins; /* Bytecode instruction. */ + BCLine line; /* Line number for this bytecode. */ +} BCInsLine; + +/* Info for local variables. Only used during bytecode generation. */ +typedef struct VarInfo { + GCRef name; /* Local variable name or goto/label name. */ + BCPos startpc; /* First point where the local variable is active. */ + BCPos endpc; /* First point where the local variable is dead. */ + uint8_t slot; /* Variable slot. */ + uint8_t info; /* Variable/goto/label info. */ +} VarInfo; + +/* Lua lexer state. */ +typedef struct LexState { + struct FuncState *fs; /* Current FuncState. Defined in lj_parse.c. */ + struct lua_State *L; /* Lua state. */ + TValue tokval; /* Current token value. */ + TValue lookaheadval; /* Lookahead token value. */ + const char *p; /* Current position in input buffer. */ + const char *pe; /* End of input buffer. */ + LexChar c; /* Current character. */ + LexToken tok; /* Current token. */ + LexToken lookahead; /* Lookahead token. */ + SBuf sb; /* String buffer for tokens. */ + lua_Reader rfunc; /* Reader callback. */ + void *rdata; /* Reader callback data. */ + BCLine linenumber; /* Input line counter. */ + BCLine lastline; /* Line of last token. */ + GCstr *chunkname; /* Current chunk name (interned string). */ + const char *chunkarg; /* Chunk name argument. */ + const char *mode; /* Allow loading bytecode (b) and/or source text (t). */ + VarInfo *vstack; /* Stack for names and extents of local variables. */ + MSize sizevstack; /* Size of variable stack. */ + MSize vtop; /* Top of variable stack. */ + BCInsLine *bcstack; /* Stack for bytecode instructions/line numbers. */ + MSize sizebcstack; /* Size of bytecode stack. */ + uint32_t level; /* Syntactical nesting level. */ +} LexState; + +LJ_FUNC int lj_lex_setup(lua_State *L, LexState *ls); +LJ_FUNC void lj_lex_cleanup(lua_State *L, LexState *ls); +LJ_FUNC void lj_lex_next(LexState *ls); +LJ_FUNC LexToken lj_lex_lookahead(LexState *ls); +LJ_FUNC const char *lj_lex_token2str(LexState *ls, LexToken tok); +LJ_FUNC_NORET void lj_lex_error(LexState *ls, LexToken tok, ErrMsg em, ...); +LJ_FUNC void lj_lex_init(lua_State *L); + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_lib.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_lib.c new file mode 100644 index 00000000000..b16d0564fa7 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_lib.c @@ -0,0 +1,303 @@ +/* +** Library function support. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_lib_c +#define LUA_CORE + +#include "lauxlib.h" + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_func.h" +#include "lj_bc.h" +#include "lj_dispatch.h" +#include "lj_vm.h" +#include "lj_strscan.h" +#include "lj_strfmt.h" +#include "lj_lex.h" +#include "lj_bcdump.h" +#include "lj_lib.h" + +/* -- Library initialization ---------------------------------------------- */ + +static GCtab *lib_create_table(lua_State *L, const char *libname, int hsize) +{ + if (libname) { + luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16); + lua_getfield(L, -1, libname); + if (!tvistab(L->top-1)) { + L->top--; + if (luaL_findtable(L, LUA_GLOBALSINDEX, libname, hsize) != NULL) + lj_err_callerv(L, LJ_ERR_BADMODN, libname); + settabV(L, L->top, tabV(L->top-1)); + L->top++; + lua_setfield(L, -3, libname); /* _LOADED[libname] = new table */ + } + L->top--; + settabV(L, L->top-1, tabV(L->top)); + } else { + lua_createtable(L, 0, hsize); + } + return tabV(L->top-1); +} + +static const uint8_t *lib_read_lfunc(lua_State *L, const uint8_t *p, GCtab *tab) +{ + int len = *p++; + GCstr *name = lj_str_new(L, (const char *)p, len); + LexState ls; + GCproto *pt; + GCfunc *fn; + memset(&ls, 0, sizeof(ls)); + ls.L = L; + ls.p = (const char *)(p+len); + ls.pe = (const char *)~(uintptr_t)0; + ls.c = -1; + ls.level = (BCDUMP_F_STRIP|(LJ_BE*BCDUMP_F_BE)); + ls.chunkname = name; + pt = lj_bcread_proto(&ls); + pt->firstline = ~(BCLine)0; + fn = lj_func_newL_empty(L, pt, tabref(L->env)); + /* NOBARRIER: See below for common barrier. */ + setfuncV(L, lj_tab_setstr(L, tab, name), fn); + return (const uint8_t *)ls.p; +} + +void lj_lib_register(lua_State *L, const char *libname, + const uint8_t *p, const lua_CFunction *cf) +{ + GCtab *env = tabref(L->env); + GCfunc *ofn = NULL; + int ffid = *p++; + BCIns *bcff = &L2GG(L)->bcff[*p++]; + GCtab *tab = lib_create_table(L, libname, *p++); + ptrdiff_t tpos = L->top - L->base; + + /* Avoid barriers further down. */ + lj_gc_anybarriert(L, tab); + tab->nomm = 0; + + for (;;) { + uint32_t tag = *p++; + MSize len = tag & LIBINIT_LENMASK; + tag &= LIBINIT_TAGMASK; + if (tag != LIBINIT_STRING) { + const char *name; + MSize nuv = (MSize)(L->top - L->base - tpos); + GCfunc *fn = lj_func_newC(L, nuv, env); + if (nuv) { + L->top = L->base + tpos; + memcpy(fn->c.upvalue, L->top, sizeof(TValue)*nuv); + } + fn->c.ffid = (uint8_t)(ffid++); + name = (const char *)p; + p += len; + if (tag == LIBINIT_CF) + setmref(fn->c.pc, &G(L)->bc_cfunc_int); + else + setmref(fn->c.pc, bcff++); + if (tag == LIBINIT_ASM_) + fn->c.f = ofn->c.f; /* Copy handler from previous function. */ + else + fn->c.f = *cf++; /* Get cf or handler from C function table. */ + if (len) { + /* NOBARRIER: See above for common barrier. */ + setfuncV(L, lj_tab_setstr(L, tab, lj_str_new(L, name, len)), fn); + } + ofn = fn; + } else { + switch (tag | len) { + case LIBINIT_LUA: + p = lib_read_lfunc(L, p, tab); + break; + case LIBINIT_SET: + L->top -= 2; + if (tvisstr(L->top+1) && strV(L->top+1)->len == 0) + env = tabV(L->top); + else /* NOBARRIER: See above for common barrier. */ + copyTV(L, lj_tab_set(L, tab, L->top+1), L->top); + break; + case LIBINIT_NUMBER: + memcpy(&L->top->n, p, sizeof(double)); + L->top++; + p += sizeof(double); + break; + case LIBINIT_COPY: + copyTV(L, L->top, L->top - *p++); + L->top++; + break; + case LIBINIT_LASTCL: + setfuncV(L, L->top++, ofn); + break; + case LIBINIT_FFID: + ffid++; + break; + case LIBINIT_END: + return; + default: + setstrV(L, L->top++, lj_str_new(L, (const char *)p, len)); + p += len; + break; + } + } + } +} + +/* Push internal function on the stack. */ +GCfunc *lj_lib_pushcc(lua_State *L, lua_CFunction f, int id, int n) +{ + GCfunc *fn; + lua_pushcclosure(L, f, n); + fn = funcV(L->top-1); + fn->c.ffid = (uint8_t)id; + setmref(fn->c.pc, &G(L)->bc_cfunc_int); + return fn; +} + +void lj_lib_prereg(lua_State *L, const char *name, lua_CFunction f, GCtab *env) +{ + luaL_findtable(L, LUA_REGISTRYINDEX, "_PRELOAD", 4); + lua_pushcfunction(L, f); + /* NOBARRIER: The function is new (marked white). */ + setgcref(funcV(L->top-1)->c.env, obj2gco(env)); + lua_setfield(L, -2, name); + L->top--; +} + +int lj_lib_postreg(lua_State *L, lua_CFunction cf, int id, const char *name) +{ + GCfunc *fn = lj_lib_pushcf(L, cf, id); + GCtab *t = tabref(curr_func(L)->c.env); /* Reference to parent table. */ + setfuncV(L, lj_tab_setstr(L, t, lj_str_newz(L, name)), fn); + lj_gc_anybarriert(L, t); + setfuncV(L, L->top++, fn); + return 1; +} + +/* -- Type checks --------------------------------------------------------- */ + +TValue *lj_lib_checkany(lua_State *L, int narg) +{ + TValue *o = L->base + narg-1; + if (o >= L->top) + lj_err_arg(L, narg, LJ_ERR_NOVAL); + return o; +} + +GCstr *lj_lib_checkstr(lua_State *L, int narg) +{ + TValue *o = L->base + narg-1; + if (o < L->top) { + if (LJ_LIKELY(tvisstr(o))) { + return strV(o); + } else if (tvisnumber(o)) { + GCstr *s = lj_strfmt_number(L, o); + setstrV(L, o, s); + return s; + } + } + lj_err_argt(L, narg, LUA_TSTRING); + return NULL; /* unreachable */ +} + +GCstr *lj_lib_optstr(lua_State *L, int narg) +{ + TValue *o = L->base + narg-1; + return (o < L->top && !tvisnil(o)) ? lj_lib_checkstr(L, narg) : NULL; +} + +#if LJ_DUALNUM +void lj_lib_checknumber(lua_State *L, int narg) +{ + TValue *o = L->base + narg-1; + if (!(o < L->top && lj_strscan_numberobj(o))) + lj_err_argt(L, narg, LUA_TNUMBER); +} +#endif + +lua_Number lj_lib_checknum(lua_State *L, int narg) +{ + TValue *o = L->base + narg-1; + if (!(o < L->top && + (tvisnumber(o) || (tvisstr(o) && lj_strscan_num(strV(o), o))))) + lj_err_argt(L, narg, LUA_TNUMBER); + if (LJ_UNLIKELY(tvisint(o))) { + lua_Number n = (lua_Number)intV(o); + setnumV(o, n); + return n; + } else { + return numV(o); + } +} + +int32_t lj_lib_checkint(lua_State *L, int narg) +{ + TValue *o = L->base + narg-1; + if (!(o < L->top && lj_strscan_numberobj(o))) + lj_err_argt(L, narg, LUA_TNUMBER); + if (LJ_LIKELY(tvisint(o))) { + return intV(o); + } else { + int32_t i = lj_num2int(numV(o)); + if (LJ_DUALNUM) setintV(o, i); + return i; + } +} + +int32_t lj_lib_optint(lua_State *L, int narg, int32_t def) +{ + TValue *o = L->base + narg-1; + return (o < L->top && !tvisnil(o)) ? lj_lib_checkint(L, narg) : def; +} + +GCfunc *lj_lib_checkfunc(lua_State *L, int narg) +{ + TValue *o = L->base + narg-1; + if (!(o < L->top && tvisfunc(o))) + lj_err_argt(L, narg, LUA_TFUNCTION); + return funcV(o); +} + +GCtab *lj_lib_checktab(lua_State *L, int narg) +{ + TValue *o = L->base + narg-1; + if (!(o < L->top && tvistab(o))) + lj_err_argt(L, narg, LUA_TTABLE); + return tabV(o); +} + +GCtab *lj_lib_checktabornil(lua_State *L, int narg) +{ + TValue *o = L->base + narg-1; + if (o < L->top) { + if (tvistab(o)) + return tabV(o); + else if (tvisnil(o)) + return NULL; + } + lj_err_arg(L, narg, LJ_ERR_NOTABN); + return NULL; /* unreachable */ +} + +int lj_lib_checkopt(lua_State *L, int narg, int def, const char *lst) +{ + GCstr *s = def >= 0 ? lj_lib_optstr(L, narg) : lj_lib_checkstr(L, narg); + if (s) { + const char *opt = strdata(s); + MSize len = s->len; + int i; + for (i = 0; *(const uint8_t *)lst; i++) { + if (*(const uint8_t *)lst == len && memcmp(opt, lst+1, len) == 0) + return i; + lst += 1+*(const uint8_t *)lst; + } + lj_err_argv(L, narg, LJ_ERR_INVOPTM, opt); + } + return def; +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_lib.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_lib.h new file mode 100644 index 00000000000..3fa7aa17eff --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_lib.h @@ -0,0 +1,115 @@ +/* +** Library function support. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_LIB_H +#define _LJ_LIB_H + +#include "lj_obj.h" + +/* +** A fallback handler is called by the assembler VM if the fast path fails: +** +** - too few arguments: unrecoverable. +** - wrong argument type: recoverable, if coercion succeeds. +** - bad argument value: unrecoverable. +** - stack overflow: recoverable, if stack reallocation succeeds. +** - extra handling: recoverable. +** +** The unrecoverable cases throw an error with lj_err_arg(), lj_err_argtype(), +** lj_err_caller() or lj_err_callermsg(). +** The recoverable cases return 0 or the number of results + 1. +** The assembler VM retries the fast path only if 0 is returned. +** This time the fallback must not be called again or it gets stuck in a loop. +*/ + +/* Return values from fallback handler. */ +#define FFH_RETRY 0 +#define FFH_UNREACHABLE FFH_RETRY +#define FFH_RES(n) ((n)+1) +#define FFH_TAILCALL (-1) + +LJ_FUNC TValue *lj_lib_checkany(lua_State *L, int narg); +LJ_FUNC GCstr *lj_lib_checkstr(lua_State *L, int narg); +LJ_FUNC GCstr *lj_lib_optstr(lua_State *L, int narg); +#if LJ_DUALNUM +LJ_FUNC void lj_lib_checknumber(lua_State *L, int narg); +#else +#define lj_lib_checknumber(L, narg) lj_lib_checknum((L), (narg)) +#endif +LJ_FUNC lua_Number lj_lib_checknum(lua_State *L, int narg); +LJ_FUNC int32_t lj_lib_checkint(lua_State *L, int narg); +LJ_FUNC int32_t lj_lib_optint(lua_State *L, int narg, int32_t def); +LJ_FUNC GCfunc *lj_lib_checkfunc(lua_State *L, int narg); +LJ_FUNC GCtab *lj_lib_checktab(lua_State *L, int narg); +LJ_FUNC GCtab *lj_lib_checktabornil(lua_State *L, int narg); +LJ_FUNC int lj_lib_checkopt(lua_State *L, int narg, int def, const char *lst); + +/* Avoid including lj_frame.h. */ +#if LJ_GC64 +#define lj_lib_upvalue(L, n) \ + (&gcval(L->base-2)->fn.c.upvalue[(n)-1]) +#elif LJ_FR2 +#define lj_lib_upvalue(L, n) \ + (&gcref((L->base-2)->gcr)->fn.c.upvalue[(n)-1]) +#else +#define lj_lib_upvalue(L, n) \ + (&gcref((L->base-1)->fr.func)->fn.c.upvalue[(n)-1]) +#endif + +#if LJ_TARGET_WINDOWS +#define lj_lib_checkfpu(L) \ + do { setnumV(L->top++, (lua_Number)1437217655); \ + if (lua_tointeger(L, -1) != 1437217655) lj_err_caller(L, LJ_ERR_BADFPU); \ + L->top--; } while (0) +#else +#define lj_lib_checkfpu(L) UNUSED(L) +#endif + +LJ_FUNC GCfunc *lj_lib_pushcc(lua_State *L, lua_CFunction f, int id, int n); +#define lj_lib_pushcf(L, fn, id) (lj_lib_pushcc(L, (fn), (id), 0)) + +/* Library function declarations. Scanned by buildvm. */ +#define LJLIB_CF(name) static int lj_cf_##name(lua_State *L) +#define LJLIB_ASM(name) static int lj_ffh_##name(lua_State *L) +#define LJLIB_ASM_(name) +#define LJLIB_LUA(name) +#define LJLIB_SET(name) +#define LJLIB_PUSH(arg) +#define LJLIB_REC(handler) +#define LJLIB_NOREGUV +#define LJLIB_NOREG + +#define LJ_LIB_REG(L, regname, name) \ + lj_lib_register(L, regname, lj_lib_init_##name, lj_lib_cf_##name) + +LJ_FUNC void lj_lib_register(lua_State *L, const char *libname, + const uint8_t *init, const lua_CFunction *cf); +LJ_FUNC void lj_lib_prereg(lua_State *L, const char *name, lua_CFunction f, + GCtab *env); +LJ_FUNC int lj_lib_postreg(lua_State *L, lua_CFunction cf, int id, + const char *name); + +/* Library init data tags. */ +#define LIBINIT_LENMASK 0x3f +#define LIBINIT_TAGMASK 0xc0 +#define LIBINIT_CF 0x00 +#define LIBINIT_ASM 0x40 +#define LIBINIT_ASM_ 0x80 +#define LIBINIT_STRING 0xc0 +#define LIBINIT_MAXSTR 0x38 +#define LIBINIT_LUA 0xf9 +#define LIBINIT_SET 0xfa +#define LIBINIT_NUMBER 0xfb +#define LIBINIT_COPY 0xfc +#define LIBINIT_LASTCL 0xfd +#define LIBINIT_FFID 0xfe +#define LIBINIT_END 0xff + +/* Exported library functions. */ + +typedef struct RandomState RandomState; +LJ_FUNC uint64_t LJ_FASTCALL lj_math_random_step(RandomState *rs); + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_load.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_load.c new file mode 100644 index 00000000000..95a6ab0d4de --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_load.c @@ -0,0 +1,168 @@ +/* +** Load and dump code. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include +#include + +#define lj_load_c +#define LUA_CORE + +#include "lua.h" +#include "lauxlib.h" + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_buf.h" +#include "lj_func.h" +#include "lj_frame.h" +#include "lj_vm.h" +#include "lj_lex.h" +#include "lj_bcdump.h" +#include "lj_parse.h" + +/* -- Load Lua source code and bytecode ----------------------------------- */ + +static TValue *cpparser(lua_State *L, lua_CFunction dummy, void *ud) +{ + LexState *ls = (LexState *)ud; + GCproto *pt; + GCfunc *fn; + int bc; + UNUSED(dummy); + cframe_errfunc(L->cframe) = -1; /* Inherit error function. */ + bc = lj_lex_setup(L, ls); + if (ls->mode && !strchr(ls->mode, bc ? 'b' : 't')) { + setstrV(L, L->top++, lj_err_str(L, LJ_ERR_XMODE)); + lj_err_throw(L, LUA_ERRSYNTAX); + } + pt = bc ? lj_bcread(ls) : lj_parse(ls); + fn = lj_func_newL_empty(L, pt, tabref(L->env)); + /* Don't combine above/below into one statement. */ + setfuncV(L, L->top++, fn); + return NULL; +} + +LUA_API int lua_loadx(lua_State *L, lua_Reader reader, void *data, + const char *chunkname, const char *mode) +{ + LexState ls; + int status; + ls.rfunc = reader; + ls.rdata = data; + ls.chunkarg = chunkname ? chunkname : "?"; + ls.mode = mode; + lj_buf_init(L, &ls.sb); + status = lj_vm_cpcall(L, NULL, &ls, cpparser); + lj_lex_cleanup(L, &ls); + lj_gc_check(L); + return status; +} + +LUA_API int lua_load(lua_State *L, lua_Reader reader, void *data, + const char *chunkname) +{ + return lua_loadx(L, reader, data, chunkname, NULL); +} + +typedef struct FileReaderCtx { + FILE *fp; + char buf[LUAL_BUFFERSIZE]; +} FileReaderCtx; + +static const char *reader_file(lua_State *L, void *ud, size_t *size) +{ + FileReaderCtx *ctx = (FileReaderCtx *)ud; + UNUSED(L); + if (feof(ctx->fp)) return NULL; + *size = fread(ctx->buf, 1, sizeof(ctx->buf), ctx->fp); + return *size > 0 ? ctx->buf : NULL; +} + +LUALIB_API int luaL_loadfilex(lua_State *L, const char *filename, + const char *mode) +{ + FileReaderCtx ctx; + int status; + const char *chunkname; + if (filename) { + ctx.fp = fopen(filename, "rb"); + if (ctx.fp == NULL) { + lua_pushfstring(L, "cannot open %s: %s", filename, strerror(errno)); + return LUA_ERRFILE; + } + chunkname = lua_pushfstring(L, "@%s", filename); + } else { + ctx.fp = stdin; + chunkname = "=stdin"; + } + status = lua_loadx(L, reader_file, &ctx, chunkname, mode); + if (ferror(ctx.fp)) { + L->top -= filename ? 2 : 1; + lua_pushfstring(L, "cannot read %s: %s", chunkname+1, strerror(errno)); + if (filename) + fclose(ctx.fp); + return LUA_ERRFILE; + } + if (filename) { + L->top--; + copyTV(L, L->top-1, L->top); + fclose(ctx.fp); + } + return status; +} + +LUALIB_API int luaL_loadfile(lua_State *L, const char *filename) +{ + return luaL_loadfilex(L, filename, NULL); +} + +typedef struct StringReaderCtx { + const char *str; + size_t size; +} StringReaderCtx; + +static const char *reader_string(lua_State *L, void *ud, size_t *size) +{ + StringReaderCtx *ctx = (StringReaderCtx *)ud; + UNUSED(L); + if (ctx->size == 0) return NULL; + *size = ctx->size; + ctx->size = 0; + return ctx->str; +} + +LUALIB_API int luaL_loadbufferx(lua_State *L, const char *buf, size_t size, + const char *name, const char *mode) +{ + StringReaderCtx ctx; + ctx.str = buf; + ctx.size = size; + return lua_loadx(L, reader_string, &ctx, name, mode); +} + +LUALIB_API int luaL_loadbuffer(lua_State *L, const char *buf, size_t size, + const char *name) +{ + return luaL_loadbufferx(L, buf, size, name, NULL); +} + +LUALIB_API int luaL_loadstring(lua_State *L, const char *s) +{ + return luaL_loadbuffer(L, s, strlen(s), s); +} + +/* -- Dump bytecode ------------------------------------------------------- */ + +LUA_API int lua_dump(lua_State *L, lua_Writer writer, void *data) +{ + cTValue *o = L->top-1; + api_check(L, L->top > L->base); + if (tvisfunc(o) && isluafunc(funcV(o))) + return lj_bcwrite(L, funcproto(funcV(o)), writer, data, 0); + else + return 1; +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_mcode.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_mcode.c new file mode 100644 index 00000000000..d95ebeb1fe7 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_mcode.c @@ -0,0 +1,386 @@ +/* +** Machine code management. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_mcode_c +#define LUA_CORE + +#include "lj_obj.h" +#if LJ_HASJIT +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_jit.h" +#include "lj_mcode.h" +#include "lj_trace.h" +#include "lj_dispatch.h" +#endif +#if LJ_HASJIT || LJ_HASFFI +#include "lj_vm.h" +#endif + +/* -- OS-specific functions ----------------------------------------------- */ + +#if LJ_HASJIT || LJ_HASFFI + +/* Define this if you want to run LuaJIT with Valgrind. */ +#ifdef LUAJIT_USE_VALGRIND +#include +#endif + +#if LJ_TARGET_IOS +void sys_icache_invalidate(void *start, size_t len); +#endif + +/* Synchronize data/instruction cache. */ +void lj_mcode_sync(void *start, void *end) +{ +#ifdef LUAJIT_USE_VALGRIND + VALGRIND_DISCARD_TRANSLATIONS(start, (char *)end-(char *)start); +#endif +#if LJ_TARGET_X86ORX64 + UNUSED(start); UNUSED(end); +#elif LJ_TARGET_IOS + sys_icache_invalidate(start, (char *)end-(char *)start); +#elif LJ_TARGET_PPC + lj_vm_cachesync(start, end); +#elif defined(__GNUC__) + __clear_cache(start, end); +#else +#error "Missing builtin to flush instruction cache" +#endif +} + +#endif + +#if LJ_HASJIT + +#if LJ_TARGET_WINDOWS + +#define WIN32_LEAN_AND_MEAN +#include + +#define MCPROT_RW PAGE_READWRITE +#define MCPROT_RX PAGE_EXECUTE_READ +#define MCPROT_RWX PAGE_EXECUTE_READWRITE + +static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, DWORD prot) +{ + void *p = VirtualAlloc((void *)hint, sz, + MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, prot); + if (!p && !hint) + lj_trace_err(J, LJ_TRERR_MCODEAL); + return p; +} + +static void mcode_free(jit_State *J, void *p, size_t sz) +{ + UNUSED(J); UNUSED(sz); + VirtualFree(p, 0, MEM_RELEASE); +} + +static int mcode_setprot(void *p, size_t sz, DWORD prot) +{ + DWORD oprot; + return !VirtualProtect(p, sz, prot, &oprot); +} + +#elif LJ_TARGET_POSIX + +#include + +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif + +#define MCPROT_RW (PROT_READ|PROT_WRITE) +#define MCPROT_RX (PROT_READ|PROT_EXEC) +#define MCPROT_RWX (PROT_READ|PROT_WRITE|PROT_EXEC) + +static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot) +{ + void *p = mmap((void *)hint, sz, prot, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (p == MAP_FAILED) { + if (!hint) lj_trace_err(J, LJ_TRERR_MCODEAL); + p = NULL; + } + return p; +} + +static void mcode_free(jit_State *J, void *p, size_t sz) +{ + UNUSED(J); + munmap(p, sz); +} + +static int mcode_setprot(void *p, size_t sz, int prot) +{ + return mprotect(p, sz, prot); +} + +#elif LJ_64 + +#error "Missing OS support for explicit placement of executable memory" + +#else + +/* Fallback allocator. This will fail if memory is not executable by default. */ +#define LUAJIT_UNPROTECT_MCODE +#define MCPROT_RW 0 +#define MCPROT_RX 0 +#define MCPROT_RWX 0 + +static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot) +{ + UNUSED(hint); UNUSED(prot); + return lj_mem_new(J->L, sz); +} + +static void mcode_free(jit_State *J, void *p, size_t sz) +{ + lj_mem_free(J2G(J), p, sz); +} + +#endif + +/* -- MCode area protection ----------------------------------------------- */ + +/* Define this ONLY if page protection twiddling becomes a bottleneck. */ +#ifdef LUAJIT_UNPROTECT_MCODE + +/* It's generally considered to be a potential security risk to have +** pages with simultaneous write *and* execute access in a process. +** +** Do not even think about using this mode for server processes or +** apps handling untrusted external data (such as a browser). +** +** The security risk is not in LuaJIT itself -- but if an adversary finds +** any *other* flaw in your C application logic, then any RWX memory page +** simplifies writing an exploit considerably. +*/ +#define MCPROT_GEN MCPROT_RWX +#define MCPROT_RUN MCPROT_RWX + +static void mcode_protect(jit_State *J, int prot) +{ + UNUSED(J); UNUSED(prot); +} + +#else + +/* This is the default behaviour and much safer: +** +** Most of the time the memory pages holding machine code are executable, +** but NONE of them is writable. +** +** The current memory area is marked read-write (but NOT executable) only +** during the short time window while the assembler generates machine code. +*/ +#define MCPROT_GEN MCPROT_RW +#define MCPROT_RUN MCPROT_RX + +/* Protection twiddling failed. Probably due to kernel security. */ +static LJ_NOINLINE void mcode_protfail(jit_State *J) +{ + lua_CFunction panic = J2G(J)->panic; + if (panic) { + lua_State *L = J->L; + setstrV(L, L->top++, lj_err_str(L, LJ_ERR_JITPROT)); + panic(L); + } +} + +/* Change protection of MCode area. */ +static void mcode_protect(jit_State *J, int prot) +{ + if (J->mcprot != prot) { + if (LJ_UNLIKELY(mcode_setprot(J->mcarea, J->szmcarea, prot))) + mcode_protfail(J); + J->mcprot = prot; + } +} + +#endif + +/* -- MCode area allocation ----------------------------------------------- */ + +#if LJ_TARGET_X64 +#define mcode_validptr(p) ((p) && (uintptr_t)(p) < (uintptr_t)1<<47) +#else +#define mcode_validptr(p) ((p) && (uintptr_t)(p) < 0xffff0000) +#endif + +#ifdef LJ_TARGET_JUMPRANGE + +/* Get memory within relative jump distance of our code in 64 bit mode. */ +static void *mcode_alloc(jit_State *J, size_t sz) +{ + /* Target an address in the static assembler code (64K aligned). + ** Try addresses within a distance of target-range/2+1MB..target+range/2-1MB. + ** Use half the jump range so every address in the range can reach any other. + */ +#if LJ_TARGET_MIPS + /* Use the middle of the 256MB-aligned region. */ + uintptr_t target = ((uintptr_t)(void *)lj_vm_exit_handler & 0xf0000000u) + + 0x08000000u; +#else + uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff; +#endif + const uintptr_t range = (1u << (LJ_TARGET_JUMPRANGE-1)) - (1u << 21); + /* First try a contiguous area below the last one. */ + uintptr_t hint = J->mcarea ? (uintptr_t)J->mcarea - sz : 0; + int i; + for (i = 0; i < 32; i++) { /* 32 attempts ought to be enough ... */ + if (mcode_validptr(hint)) { + void *p = mcode_alloc_at(J, hint, sz, MCPROT_GEN); + + if (mcode_validptr(p) && + ((uintptr_t)p + sz - target < range || target - (uintptr_t)p < range)) + return p; + if (p) mcode_free(J, p, sz); /* Free badly placed area. */ + } + /* Next try probing pseudo-random addresses. */ + do { + hint = (0x78fb ^ LJ_PRNG_BITS(J, 15)) << 16; /* 64K aligned. */ + } while (!(hint + sz < range)); + hint = target + hint - (range>>1); + } + lj_trace_err(J, LJ_TRERR_MCODEAL); /* Give up. OS probably ignores hints? */ + return NULL; +} + +#else + +/* All memory addresses are reachable by relative jumps. */ +static void *mcode_alloc(jit_State *J, size_t sz) +{ +#ifdef __OpenBSD__ + /* Allow better executable memory allocation for OpenBSD W^X mode. */ + void *p = mcode_alloc_at(J, 0, sz, MCPROT_RUN); + if (p && mcode_setprot(p, sz, MCPROT_GEN)) { + mcode_free(J, p, sz); + return NULL; + } + return p; +#else + return mcode_alloc_at(J, 0, sz, MCPROT_GEN); +#endif +} + +#endif + +/* -- MCode area management ----------------------------------------------- */ + +/* Linked list of MCode areas. */ +typedef struct MCLink { + MCode *next; /* Next area. */ + size_t size; /* Size of current area. */ +} MCLink; + +/* Allocate a new MCode area. */ +static void mcode_allocarea(jit_State *J) +{ + MCode *oldarea = J->mcarea; + size_t sz = (size_t)J->param[JIT_P_sizemcode] << 10; + sz = (sz + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1); + J->mcarea = (MCode *)mcode_alloc(J, sz); + J->szmcarea = sz; + J->mcprot = MCPROT_GEN; + J->mctop = (MCode *)((char *)J->mcarea + J->szmcarea); + J->mcbot = (MCode *)((char *)J->mcarea + sizeof(MCLink)); + ((MCLink *)J->mcarea)->next = oldarea; + ((MCLink *)J->mcarea)->size = sz; + J->szallmcarea += sz; +} + +/* Free all MCode areas. */ +void lj_mcode_free(jit_State *J) +{ + MCode *mc = J->mcarea; + J->mcarea = NULL; + J->szallmcarea = 0; + while (mc) { + MCode *next = ((MCLink *)mc)->next; + mcode_free(J, mc, ((MCLink *)mc)->size); + mc = next; + } +} + +/* -- MCode transactions -------------------------------------------------- */ + +/* Reserve the remainder of the current MCode area. */ +MCode *lj_mcode_reserve(jit_State *J, MCode **lim) +{ + if (!J->mcarea) + mcode_allocarea(J); + else + mcode_protect(J, MCPROT_GEN); + *lim = J->mcbot; + return J->mctop; +} + +/* Commit the top part of the current MCode area. */ +void lj_mcode_commit(jit_State *J, MCode *top) +{ + J->mctop = top; + mcode_protect(J, MCPROT_RUN); +} + +/* Abort the reservation. */ +void lj_mcode_abort(jit_State *J) +{ + if (J->mcarea) + mcode_protect(J, MCPROT_RUN); +} + +/* Set/reset protection to allow patching of MCode areas. */ +MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish) +{ +#ifdef LUAJIT_UNPROTECT_MCODE + UNUSED(J); UNUSED(ptr); UNUSED(finish); + return NULL; +#else + if (finish) { + if (J->mcarea == ptr) + mcode_protect(J, MCPROT_RUN); + else if (LJ_UNLIKELY(mcode_setprot(ptr, ((MCLink *)ptr)->size, MCPROT_RUN))) + mcode_protfail(J); + return NULL; + } else { + MCode *mc = J->mcarea; + /* Try current area first to use the protection cache. */ + if (ptr >= mc && ptr < (MCode *)((char *)mc + J->szmcarea)) { + mcode_protect(J, MCPROT_GEN); + return mc; + } + /* Otherwise search through the list of MCode areas. */ + for (;;) { + mc = ((MCLink *)mc)->next; + lua_assert(mc != NULL); + if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) { + if (LJ_UNLIKELY(mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN))) + mcode_protfail(J); + return mc; + } + } + } +#endif +} + +/* Limit of MCode reservation reached. */ +void lj_mcode_limiterr(jit_State *J, size_t need) +{ + size_t sizemcode, maxmcode; + lj_mcode_abort(J); + sizemcode = (size_t)J->param[JIT_P_sizemcode] << 10; + sizemcode = (sizemcode + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1); + maxmcode = (size_t)J->param[JIT_P_maxmcode] << 10; + if ((size_t)need > sizemcode) + lj_trace_err(J, LJ_TRERR_MCODEOV); /* Too long for any area. */ + if (J->szallmcarea + sizemcode > maxmcode) + lj_trace_err(J, LJ_TRERR_MCODEAL); + mcode_allocarea(J); + lj_trace_err(J, LJ_TRERR_MCODELM); /* Retry with new area. */ +} + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_mcode.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_mcode.h new file mode 100644 index 00000000000..ee604523496 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_mcode.h @@ -0,0 +1,30 @@ +/* +** Machine code management. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_MCODE_H +#define _LJ_MCODE_H + +#include "lj_obj.h" + +#if LJ_HASJIT || LJ_HASFFI +LJ_FUNC void lj_mcode_sync(void *start, void *end); +#endif + +#if LJ_HASJIT + +#include "lj_jit.h" + +LJ_FUNC void lj_mcode_free(jit_State *J); +LJ_FUNC MCode *lj_mcode_reserve(jit_State *J, MCode **lim); +LJ_FUNC void lj_mcode_commit(jit_State *J, MCode *m); +LJ_FUNC void lj_mcode_abort(jit_State *J); +LJ_FUNC MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish); +LJ_FUNC_NORET void lj_mcode_limiterr(jit_State *J, size_t need); + +#define lj_mcode_commitbot(J, m) (J->mcbot = (m)) + +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_meta.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_meta.c new file mode 100644 index 00000000000..104ecf07c3d --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_meta.c @@ -0,0 +1,477 @@ +/* +** Metamethod handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +** +** Portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lj_meta_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_buf.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_meta.h" +#include "lj_frame.h" +#include "lj_bc.h" +#include "lj_vm.h" +#include "lj_strscan.h" +#include "lj_strfmt.h" +#include "lj_lib.h" + +/* -- Metamethod handling ------------------------------------------------- */ + +/* String interning of metamethod names for fast indexing. */ +void lj_meta_init(lua_State *L) +{ +#define MMNAME(name) "__" #name + const char *metanames = MMDEF(MMNAME); +#undef MMNAME + global_State *g = G(L); + const char *p, *q; + uint32_t mm; + for (mm = 0, p = metanames; *p; mm++, p = q) { + GCstr *s; + for (q = p+2; *q && *q != '_'; q++) ; + s = lj_str_new(L, p, (size_t)(q-p)); + /* NOBARRIER: g->gcroot[] is a GC root. */ + setgcref(g->gcroot[GCROOT_MMNAME+mm], obj2gco(s)); + } +} + +/* Negative caching of a few fast metamethods. See the lj_meta_fast() macro. */ +cTValue *lj_meta_cache(GCtab *mt, MMS mm, GCstr *name) +{ + cTValue *mo = lj_tab_getstr(mt, name); + lua_assert(mm <= MM_FAST); + if (!mo || tvisnil(mo)) { /* No metamethod? */ + mt->nomm |= (uint8_t)(1u<metatable); + else if (tvisudata(o)) + mt = tabref(udataV(o)->metatable); + else + mt = tabref(basemt_obj(G(L), o)); + if (mt) { + cTValue *mo = lj_tab_getstr(mt, mmname_str(G(L), mm)); + if (mo) + return mo; + } + return niltv(L); +} + +#if LJ_HASFFI +/* Tailcall from C function. */ +int lj_meta_tailcall(lua_State *L, cTValue *tv) +{ + TValue *base = L->base; + TValue *top = L->top; + const BCIns *pc = frame_pc(base-1); /* Preserve old PC from frame. */ + copyTV(L, base-1-LJ_FR2, tv); /* Replace frame with new object. */ + if (LJ_FR2) + (top++)->u64 = LJ_CONT_TAILCALL; + else + top->u32.lo = LJ_CONT_TAILCALL; + setframe_pc(top++, pc); + if (LJ_FR2) top++; + setframe_gc(top, obj2gco(L), LJ_TTHREAD); /* Dummy frame object. */ + setframe_ftsz(top, ((char *)(top+1) - (char *)base) + FRAME_CONT); + L->base = L->top = top+1; + /* + ** before: [old_mo|PC] [... ...] + ** ^base ^top + ** after: [new_mo|itype] [... ...] [NULL|PC] [dummy|delta] + ** ^base/top + ** tailcall: [new_mo|PC] [... ...] + ** ^base ^top + */ + return 0; +} +#endif + +/* Setup call to metamethod to be run by Assembler VM. */ +static TValue *mmcall(lua_State *L, ASMFunction cont, cTValue *mo, + cTValue *a, cTValue *b) +{ + /* + ** |-- framesize -> top top+1 top+2 top+3 + ** before: [func slots ...] + ** mm setup: [func slots ...] [cont|?] [mo|tmtype] [a] [b] + ** in asm: [func slots ...] [cont|PC] [mo|delta] [a] [b] + ** ^-- func base ^-- mm base + ** after mm: [func slots ...] [result] + ** ^-- copy to base[PC_RA] --/ for lj_cont_ra + ** istruecond + branch for lj_cont_cond* + ** ignore for lj_cont_nop + ** next PC: [func slots ...] + */ + TValue *top = L->top; + if (curr_funcisL(L)) top = curr_topL(L); + setcont(top++, cont); /* Assembler VM stores PC in upper word or FR2. */ + if (LJ_FR2) setnilV(top++); + copyTV(L, top++, mo); /* Store metamethod and two arguments. */ + if (LJ_FR2) setnilV(top++); + copyTV(L, top, a); + copyTV(L, top+1, b); + return top; /* Return new base. */ +} + +/* -- C helpers for some instructions, called from assembler VM ----------- */ + +/* Helper for TGET*. __index chain and metamethod. */ +cTValue *lj_meta_tget(lua_State *L, cTValue *o, cTValue *k) +{ + int loop; + for (loop = 0; loop < LJ_MAX_IDXCHAIN; loop++) { + cTValue *mo; + if (LJ_LIKELY(tvistab(o))) { + GCtab *t = tabV(o); + cTValue *tv = lj_tab_get(L, t, k); + if (!tvisnil(tv) || + !(mo = lj_meta_fast(L, tabref(t->metatable), MM_index))) + return tv; + } else if (tvisnil(mo = lj_meta_lookup(L, o, MM_index))) { + lj_err_optype(L, o, LJ_ERR_OPINDEX); + return NULL; /* unreachable */ + } + if (tvisfunc(mo)) { + L->top = mmcall(L, lj_cont_ra, mo, o, k); + return NULL; /* Trigger metamethod call. */ + } + o = mo; + } + lj_err_msg(L, LJ_ERR_GETLOOP); + return NULL; /* unreachable */ +} + +/* Helper for TSET*. __newindex chain and metamethod. */ +TValue *lj_meta_tset(lua_State *L, cTValue *o, cTValue *k) +{ + TValue tmp; + int loop; + for (loop = 0; loop < LJ_MAX_IDXCHAIN; loop++) { + cTValue *mo; + if (LJ_LIKELY(tvistab(o))) { + GCtab *t = tabV(o); + cTValue *tv = lj_tab_get(L, t, k); + if (LJ_LIKELY(!tvisnil(tv))) { + t->nomm = 0; /* Invalidate negative metamethod cache. */ + lj_gc_anybarriert(L, t); + return (TValue *)tv; + } else if (!(mo = lj_meta_fast(L, tabref(t->metatable), MM_newindex))) { + t->nomm = 0; /* Invalidate negative metamethod cache. */ + lj_gc_anybarriert(L, t); + if (tv != niltv(L)) + return (TValue *)tv; + if (tvisnil(k)) lj_err_msg(L, LJ_ERR_NILIDX); + else if (tvisint(k)) { setnumV(&tmp, (lua_Number)intV(k)); k = &tmp; } + else if (tvisnum(k) && tvisnan(k)) lj_err_msg(L, LJ_ERR_NANIDX); + return lj_tab_newkey(L, t, k); + } + } else if (tvisnil(mo = lj_meta_lookup(L, o, MM_newindex))) { + lj_err_optype(L, o, LJ_ERR_OPINDEX); + return NULL; /* unreachable */ + } + if (tvisfunc(mo)) { + L->top = mmcall(L, lj_cont_nop, mo, o, k); + /* L->top+2 = v filled in by caller. */ + return NULL; /* Trigger metamethod call. */ + } + copyTV(L, &tmp, mo); + o = &tmp; + } + lj_err_msg(L, LJ_ERR_SETLOOP); + return NULL; /* unreachable */ +} + +static cTValue *str2num(cTValue *o, TValue *n) +{ + if (tvisnum(o)) + return o; + else if (tvisint(o)) + return (setnumV(n, (lua_Number)intV(o)), n); + else if (tvisstr(o) && lj_strscan_num(strV(o), n)) + return n; + else + return NULL; +} + +/* Helper for arithmetic instructions. Coercion, metamethod. */ +TValue *lj_meta_arith(lua_State *L, TValue *ra, cTValue *rb, cTValue *rc, + BCReg op) +{ + MMS mm = bcmode_mm(op); + TValue tempb, tempc; + cTValue *b, *c; + if ((b = str2num(rb, &tempb)) != NULL && + (c = str2num(rc, &tempc)) != NULL) { /* Try coercion first. */ + setnumV(ra, lj_vm_foldarith(numV(b), numV(c), (int)mm-MM_add)); + return NULL; + } else { + cTValue *mo = lj_meta_lookup(L, rb, mm); + if (tvisnil(mo)) { + mo = lj_meta_lookup(L, rc, mm); + if (tvisnil(mo)) { + if (str2num(rb, &tempb) == NULL) rc = rb; + lj_err_optype(L, rc, LJ_ERR_OPARITH); + return NULL; /* unreachable */ + } + } + return mmcall(L, lj_cont_ra, mo, rb, rc); + } +} + +/* Helper for CAT. Coercion, iterative concat, __concat metamethod. */ +TValue *lj_meta_cat(lua_State *L, TValue *top, int left) +{ + int fromc = 0; + if (left < 0) { left = -left; fromc = 1; } + do { + if (!(tvisstr(top) || tvisnumber(top)) || + !(tvisstr(top-1) || tvisnumber(top-1))) { + cTValue *mo = lj_meta_lookup(L, top-1, MM_concat); + if (tvisnil(mo)) { + mo = lj_meta_lookup(L, top, MM_concat); + if (tvisnil(mo)) { + if (tvisstr(top-1) || tvisnumber(top-1)) top++; + lj_err_optype(L, top-1, LJ_ERR_OPCAT); + return NULL; /* unreachable */ + } + } + /* One of the top two elements is not a string, call __cat metamethod: + ** + ** before: [...][CAT stack .........................] + ** top-1 top top+1 top+2 + ** pick two: [...][CAT stack ...] [o1] [o2] + ** setup mm: [...][CAT stack ...] [cont|?] [mo|tmtype] [o1] [o2] + ** in asm: [...][CAT stack ...] [cont|PC] [mo|delta] [o1] [o2] + ** ^-- func base ^-- mm base + ** after mm: [...][CAT stack ...] <--push-- [result] + ** next step: [...][CAT stack .............] + */ + copyTV(L, top+2*LJ_FR2+2, top); /* Carefully ordered stack copies! */ + copyTV(L, top+2*LJ_FR2+1, top-1); + copyTV(L, top+LJ_FR2, mo); + setcont(top-1, lj_cont_cat); + if (LJ_FR2) { setnilV(top); setnilV(top+2); top += 2; } + return top+1; /* Trigger metamethod call. */ + } else { + /* Pick as many strings as possible from the top and concatenate them: + ** + ** before: [...][CAT stack ...........................] + ** pick str: [...][CAT stack ...] [...... strings ......] + ** concat: [...][CAT stack ...] [result] + ** next step: [...][CAT stack ............] + */ + TValue *e, *o = top; + uint64_t tlen = tvisstr(o) ? strV(o)->len : STRFMT_MAXBUF_NUM; + char *p, *buf; + do { + o--; tlen += tvisstr(o) ? strV(o)->len : STRFMT_MAXBUF_NUM; + } while (--left > 0 && (tvisstr(o-1) || tvisnumber(o-1))); + if (tlen >= LJ_MAX_STR) lj_err_msg(L, LJ_ERR_STROV); + p = buf = lj_buf_tmp(L, (MSize)tlen); + for (e = top, top = o; o <= e; o++) { + if (tvisstr(o)) { + GCstr *s = strV(o); + MSize len = s->len; + p = lj_buf_wmem(p, strdata(s), len); + } else if (tvisint(o)) { + p = lj_strfmt_wint(p, intV(o)); + } else { + lua_assert(tvisnum(o)); + p = lj_strfmt_wnum(p, o); + } + } + setstrV(L, top, lj_str_new(L, buf, (size_t)(p-buf))); + } + } while (left >= 1); + if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) { + if (!fromc) L->top = curr_topL(L); + lj_gc_step(L); + } + return NULL; +} + +/* Helper for LEN. __len metamethod. */ +TValue * LJ_FASTCALL lj_meta_len(lua_State *L, cTValue *o) +{ + cTValue *mo = lj_meta_lookup(L, o, MM_len); + if (tvisnil(mo)) { + if (LJ_52 && tvistab(o)) + tabref(tabV(o)->metatable)->nomm |= (uint8_t)(1u<gch.metatable), MM_eq); + if (mo) { + TValue *top; + uint32_t it; + if (tabref(o1->gch.metatable) != tabref(o2->gch.metatable)) { + cTValue *mo2 = lj_meta_fast(L, tabref(o2->gch.metatable), MM_eq); + if (mo2 == NULL || !lj_obj_equal(mo, mo2)) + return (TValue *)(intptr_t)ne; + } + top = curr_top(L); + setcont(top++, ne ? lj_cont_condf : lj_cont_condt); + if (LJ_FR2) setnilV(top++); + copyTV(L, top++, mo); + if (LJ_FR2) setnilV(top++); + it = ~(uint32_t)o1->gch.gct; + setgcV(L, top, o1, it); + setgcV(L, top+1, o2, it); + return top; /* Trigger metamethod call. */ + } + return (TValue *)(intptr_t)ne; +} + +#if LJ_HASFFI +TValue * LJ_FASTCALL lj_meta_equal_cd(lua_State *L, BCIns ins) +{ + ASMFunction cont = (bc_op(ins) & 1) ? lj_cont_condf : lj_cont_condt; + int op = (int)bc_op(ins) & ~1; + TValue tv; + cTValue *mo, *o2, *o1 = &L->base[bc_a(ins)]; + cTValue *o1mm = o1; + if (op == BC_ISEQV) { + o2 = &L->base[bc_d(ins)]; + if (!tviscdata(o1mm)) o1mm = o2; + } else if (op == BC_ISEQS) { + setstrV(L, &tv, gco2str(proto_kgc(curr_proto(L), ~(ptrdiff_t)bc_d(ins)))); + o2 = &tv; + } else if (op == BC_ISEQN) { + o2 = &mref(curr_proto(L)->k, cTValue)[bc_d(ins)]; + } else { + lua_assert(op == BC_ISEQP); + setpriV(&tv, ~bc_d(ins)); + o2 = &tv; + } + mo = lj_meta_lookup(L, o1mm, MM_eq); + if (LJ_LIKELY(!tvisnil(mo))) + return mmcall(L, cont, mo, o1, o2); + else + return (TValue *)(intptr_t)(bc_op(ins) & 1); +} +#endif + +/* Helper for ordered comparisons. String compare, __lt/__le metamethods. */ +TValue *lj_meta_comp(lua_State *L, cTValue *o1, cTValue *o2, int op) +{ + if (LJ_HASFFI && (tviscdata(o1) || tviscdata(o2))) { + ASMFunction cont = (op & 1) ? lj_cont_condf : lj_cont_condt; + MMS mm = (op & 2) ? MM_le : MM_lt; + cTValue *mo = lj_meta_lookup(L, tviscdata(o1) ? o1 : o2, mm); + if (LJ_UNLIKELY(tvisnil(mo))) goto err; + return mmcall(L, cont, mo, o1, o2); + } else if (LJ_52 || itype(o1) == itype(o2)) { + /* Never called with two numbers. */ + if (tvisstr(o1) && tvisstr(o2)) { + int32_t res = lj_str_cmp(strV(o1), strV(o2)); + return (TValue *)(intptr_t)(((op&2) ? res <= 0 : res < 0) ^ (op&1)); + } else { + trymt: + while (1) { + ASMFunction cont = (op & 1) ? lj_cont_condf : lj_cont_condt; + MMS mm = (op & 2) ? MM_le : MM_lt; + cTValue *mo = lj_meta_lookup(L, o1, mm); +#if LJ_52 + if (tvisnil(mo) && tvisnil((mo = lj_meta_lookup(L, o2, mm)))) +#else + cTValue *mo2 = lj_meta_lookup(L, o2, mm); + if (tvisnil(mo) || !lj_obj_equal(mo, mo2)) +#endif + { + if (op & 2) { /* MM_le not found: retry with MM_lt. */ + cTValue *ot = o1; o1 = o2; o2 = ot; /* Swap operands. */ + op ^= 3; /* Use LT and flip condition. */ + continue; + } + goto err; + } + return mmcall(L, cont, mo, o1, o2); + } + } + } else if (tvisbool(o1) && tvisbool(o2)) { + goto trymt; + } else { + err: + lj_err_comp(L, o1, o2); + return NULL; + } +} + +/* Helper for ISTYPE and ISNUM. Implicit coercion or error. */ +void lj_meta_istype(lua_State *L, BCReg ra, BCReg tp) +{ + L->top = curr_topL(L); + ra++; tp--; + lua_assert(LJ_DUALNUM || tp != ~LJ_TNUMX); /* ISTYPE -> ISNUM broken. */ + if (LJ_DUALNUM && tp == ~LJ_TNUMX) lj_lib_checkint(L, ra); + else if (tp == ~LJ_TNUMX+1) lj_lib_checknum(L, ra); + else if (tp == ~LJ_TSTR) lj_lib_checkstr(L, ra); + else lj_err_argtype(L, ra, lj_obj_itypename[tp]); +} + +/* Helper for calls. __call metamethod. */ +void lj_meta_call(lua_State *L, TValue *func, TValue *top) +{ + cTValue *mo = lj_meta_lookup(L, func, MM_call); + TValue *p; + if (!tvisfunc(mo)) + lj_err_optype_call(L, func); + for (p = top; p > func+2*LJ_FR2; p--) copyTV(L, p, p-1); + if (LJ_FR2) copyTV(L, func+2, func); + copyTV(L, func, mo); +} + +/* Helper for FORI. Coercion. */ +void LJ_FASTCALL lj_meta_for(lua_State *L, TValue *o) +{ + if (!lj_strscan_numberobj(o)) lj_err_msg(L, LJ_ERR_FORINIT); + if (!lj_strscan_numberobj(o+1)) lj_err_msg(L, LJ_ERR_FORLIM); + if (!lj_strscan_numberobj(o+2)) lj_err_msg(L, LJ_ERR_FORSTEP); + if (LJ_DUALNUM) { + /* Ensure all slots are integers or all slots are numbers. */ + int32_t k[3]; + int nint = 0; + ptrdiff_t i; + for (i = 0; i <= 2; i++) { + if (tvisint(o+i)) { + k[i] = intV(o+i); nint++; + } else { + k[i] = lj_num2int(numV(o+i)); nint += ((lua_Number)k[i] == numV(o+i)); + } + } + if (nint == 3) { /* Narrow to integers. */ + setintV(o, k[0]); + setintV(o+1, k[1]); + setintV(o+2, k[2]); + } else if (nint != 0) { /* Widen to numbers. */ + if (tvisint(o)) setnumV(o, (lua_Number)intV(o)); + if (tvisint(o+1)) setnumV(o+1, (lua_Number)intV(o+1)); + if (tvisint(o+2)) setnumV(o+2, (lua_Number)intV(o+2)); + } + } +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_meta.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_meta.h new file mode 100644 index 00000000000..7f71633362b --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_meta.h @@ -0,0 +1,38 @@ +/* +** Metamethod handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_META_H +#define _LJ_META_H + +#include "lj_obj.h" + +/* Metamethod handling */ +LJ_FUNC void lj_meta_init(lua_State *L); +LJ_FUNC cTValue *lj_meta_cache(GCtab *mt, MMS mm, GCstr *name); +LJ_FUNC cTValue *lj_meta_lookup(lua_State *L, cTValue *o, MMS mm); +#if LJ_HASFFI +LJ_FUNC int lj_meta_tailcall(lua_State *L, cTValue *tv); +#endif + +#define lj_meta_fastg(g, mt, mm) \ + ((mt) == NULL ? NULL : ((mt)->nomm & (1u<<(mm))) ? NULL : \ + lj_meta_cache(mt, mm, mmname_str(g, mm))) +#define lj_meta_fast(L, mt, mm) lj_meta_fastg(G(L), mt, mm) + +/* C helpers for some instructions, called from assembler VM. */ +LJ_FUNCA cTValue *lj_meta_tget(lua_State *L, cTValue *o, cTValue *k); +LJ_FUNCA TValue *lj_meta_tset(lua_State *L, cTValue *o, cTValue *k); +LJ_FUNCA TValue *lj_meta_arith(lua_State *L, TValue *ra, cTValue *rb, + cTValue *rc, BCReg op); +LJ_FUNCA TValue *lj_meta_cat(lua_State *L, TValue *top, int left); +LJ_FUNCA TValue * LJ_FASTCALL lj_meta_len(lua_State *L, cTValue *o); +LJ_FUNCA TValue *lj_meta_equal(lua_State *L, GCobj *o1, GCobj *o2, int ne); +LJ_FUNCA TValue * LJ_FASTCALL lj_meta_equal_cd(lua_State *L, BCIns ins); +LJ_FUNCA TValue *lj_meta_comp(lua_State *L, cTValue *o1, cTValue *o2, int op); +LJ_FUNCA void lj_meta_istype(lua_State *L, BCReg ra, BCReg tp); +LJ_FUNCA void lj_meta_call(lua_State *L, TValue *func, TValue *top); +LJ_FUNCA void LJ_FASTCALL lj_meta_for(lua_State *L, TValue *o); + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_obj.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_obj.c new file mode 100644 index 00000000000..b78d2c8d835 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_obj.c @@ -0,0 +1,50 @@ +/* +** Miscellaneous object handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_obj_c +#define LUA_CORE + +#include "lj_obj.h" + +/* Object type names. */ +LJ_DATADEF const char *const lj_obj_typename[] = { /* ORDER LUA_T */ + "no value", "nil", "boolean", "userdata", "number", "string", + "table", "function", "userdata", "thread", "proto", "cdata" +}; + +LJ_DATADEF const char *const lj_obj_itypename[] = { /* ORDER LJ_T */ + "nil", "boolean", "boolean", "userdata", "string", "upval", "thread", + "proto", "function", "trace", "cdata", "table", "userdata", "number" +}; + +/* Compare two objects without calling metamethods. */ +int LJ_FASTCALL lj_obj_equal(cTValue *o1, cTValue *o2) +{ + if (itype(o1) == itype(o2)) { + if (tvispri(o1)) + return 1; + if (!tvisnum(o1)) + return gcrefeq(o1->gcr, o2->gcr); + } else if (!tvisnumber(o1) || !tvisnumber(o2)) { + return 0; + } + return numberVnum(o1) == numberVnum(o2); +} + +/* Return pointer to object or its object data. */ +const void * LJ_FASTCALL lj_obj_ptr(cTValue *o) +{ + if (tvisudata(o)) + return uddata(udataV(o)); + else if (tvislightud(o)) + return lightudV(o); + else if (LJ_HASFFI && tviscdata(o)) + return cdataptr(cdataV(o)); + else if (tvisgcv(o)) + return gcV(o); + else + return NULL; +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_obj.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_obj.h new file mode 100644 index 00000000000..74ed59bc726 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_obj.h @@ -0,0 +1,976 @@ +/* +** LuaJIT VM tags, values and objects. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +** +** Portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#ifndef _LJ_OBJ_H +#define _LJ_OBJ_H + +#include "lua.h" +#include "lj_def.h" +#include "lj_arch.h" + +/* -- Memory references (32 bit address space) ---------------------------- */ + +/* Memory and GC object sizes. */ +typedef uint32_t MSize; +#if LJ_GC64 +typedef uint64_t GCSize; +#else +typedef uint32_t GCSize; +#endif + +/* Memory reference */ +typedef struct MRef { +#if LJ_GC64 + uint64_t ptr64; /* True 64 bit pointer. */ +#else + uint32_t ptr32; /* Pseudo 32 bit pointer. */ +#endif +} MRef; + +#if LJ_GC64 +#define mref(r, t) ((t *)(void *)(r).ptr64) + +#define setmref(r, p) ((r).ptr64 = (uint64_t)(void *)(p)) +#define setmrefr(r, v) ((r).ptr64 = (v).ptr64) +#else +#define mref(r, t) ((t *)(void *)(uintptr_t)(r).ptr32) + +#define setmref(r, p) ((r).ptr32 = (uint32_t)(uintptr_t)(void *)(p)) +#define setmrefr(r, v) ((r).ptr32 = (v).ptr32) +#endif + +/* -- GC object references (32 bit address space) ------------------------- */ + +/* GCobj reference */ +typedef struct GCRef { +#if LJ_GC64 + uint64_t gcptr64; /* True 64 bit pointer. */ +#else + uint32_t gcptr32; /* Pseudo 32 bit pointer. */ +#endif +} GCRef; + +/* Common GC header for all collectable objects. */ +#define GCHeader GCRef nextgc; uint8_t marked; uint8_t gct +/* This occupies 6 bytes, so use the next 2 bytes for non-32 bit fields. */ + +#if LJ_GC64 +#define gcref(r) ((GCobj *)(r).gcptr64) +#define gcrefp(r, t) ((t *)(void *)(r).gcptr64) +#define gcrefu(r) ((r).gcptr64) +#define gcrefeq(r1, r2) ((r1).gcptr64 == (r2).gcptr64) + +#define setgcref(r, gc) ((r).gcptr64 = (uint64_t)&(gc)->gch) +#define setgcreft(r, gc, it) \ + (r).gcptr64 = (uint64_t)&(gc)->gch | (((uint64_t)(it)) << 47) +#define setgcrefp(r, p) ((r).gcptr64 = (uint64_t)(p)) +#define setgcrefnull(r) ((r).gcptr64 = 0) +#define setgcrefr(r, v) ((r).gcptr64 = (v).gcptr64) +#else +#define gcref(r) ((GCobj *)(uintptr_t)(r).gcptr32) +#define gcrefp(r, t) ((t *)(void *)(uintptr_t)(r).gcptr32) +#define gcrefu(r) ((r).gcptr32) +#define gcrefeq(r1, r2) ((r1).gcptr32 == (r2).gcptr32) + +#define setgcref(r, gc) ((r).gcptr32 = (uint32_t)(uintptr_t)&(gc)->gch) +#define setgcrefp(r, p) ((r).gcptr32 = (uint32_t)(uintptr_t)(p)) +#define setgcrefnull(r) ((r).gcptr32 = 0) +#define setgcrefr(r, v) ((r).gcptr32 = (v).gcptr32) +#endif + +#define gcnext(gc) (gcref((gc)->gch.nextgc)) + +/* IMPORTANT NOTE: +** +** All uses of the setgcref* macros MUST be accompanied with a write barrier. +** +** This is to ensure the integrity of the incremental GC. The invariant +** to preserve is that a black object never points to a white object. +** I.e. never store a white object into a field of a black object. +** +** It's ok to LEAVE OUT the write barrier ONLY in the following cases: +** - The source is not a GC object (NULL). +** - The target is a GC root. I.e. everything in global_State. +** - The target is a lua_State field (threads are never black). +** - The target is a stack slot, see setgcV et al. +** - The target is an open upvalue, i.e. pointing to a stack slot. +** - The target is a newly created object (i.e. marked white). But make +** sure nothing invokes the GC inbetween. +** - The target and the source are the same object (self-reference). +** - The target already contains the object (e.g. moving elements around). +** +** The most common case is a store to a stack slot. All other cases where +** a barrier has been omitted are annotated with a NOBARRIER comment. +** +** The same logic applies for stores to table slots (array part or hash +** part). ALL uses of lj_tab_set* require a barrier for the stored value +** *and* the stored key, based on the above rules. In practice this means +** a barrier is needed if *either* of the key or value are a GC object. +** +** It's ok to LEAVE OUT the write barrier in the following special cases: +** - The stored value is nil. The key doesn't matter because it's either +** not resurrected or lj_tab_newkey() will take care of the key barrier. +** - The key doesn't matter if the *previously* stored value is guaranteed +** to be non-nil (because the key is kept alive in the table). +** - The key doesn't matter if it's guaranteed not to be part of the table, +** since lj_tab_newkey() takes care of the key barrier. This applies +** trivially to new tables, but watch out for resurrected keys. Storing +** a nil value leaves the key in the table! +** +** In case of doubt use lj_gc_anybarriert() as it's rather cheap. It's used +** by the interpreter for all table stores. +** +** Note: In contrast to Lua's GC, LuaJIT's GC does *not* specially mark +** dead keys in tables. The reference is left in, but it's guaranteed to +** be never dereferenced as long as the value is nil. It's ok if the key is +** freed or if any object subsequently gets the same address. +** +** Not destroying dead keys helps to keep key hash slots stable. This avoids +** specialization back-off for HREFK when a value flips between nil and +** non-nil and the GC gets in the way. It also allows safely hoisting +** HREF/HREFK across GC steps. Dead keys are only removed if a table is +** resized (i.e. by NEWREF) and xREF must not be CSEd across a resize. +** +** The trade-off is that a write barrier for tables must take the key into +** account, too. Implicitly resurrecting the key by storing a non-nil value +** may invalidate the incremental GC invariant. +*/ + +/* -- Common type definitions --------------------------------------------- */ + +/* Types for handling bytecodes. Need this here, details in lj_bc.h. */ +typedef uint32_t BCIns; /* Bytecode instruction. */ +typedef uint32_t BCPos; /* Bytecode position. */ +typedef uint32_t BCReg; /* Bytecode register. */ +typedef int32_t BCLine; /* Bytecode line number. */ + +/* Internal assembler functions. Never call these directly from C. */ +typedef void (*ASMFunction)(void); + +/* Resizable string buffer. Need this here, details in lj_buf.h. */ +typedef struct SBuf { + MRef p; /* String buffer pointer. */ + MRef e; /* String buffer end pointer. */ + MRef b; /* String buffer base. */ + MRef L; /* lua_State, used for buffer resizing. */ +} SBuf; + +/* -- Tags and values ----------------------------------------------------- */ + +/* Frame link. */ +typedef union { + int32_t ftsz; /* Frame type and size of previous frame. */ + MRef pcr; /* Or PC for Lua frames. */ +} FrameLink; + +/* Tagged value. */ +typedef LJ_ALIGN(8) union TValue { + uint64_t u64; /* 64 bit pattern overlaps number. */ + lua_Number n; /* Number object overlaps split tag/value object. */ +#if LJ_GC64 + GCRef gcr; /* GCobj reference with tag. */ + int64_t it64; + struct { + LJ_ENDIAN_LOHI( + int32_t i; /* Integer value. */ + , uint32_t it; /* Internal object tag. Must overlap MSW of number. */ + ) + }; +#else + struct { + LJ_ENDIAN_LOHI( + union { + GCRef gcr; /* GCobj reference (if any). */ + int32_t i; /* Integer value. */ + }; + , uint32_t it; /* Internal object tag. Must overlap MSW of number. */ + ) + }; +#endif +#if LJ_FR2 + int64_t ftsz; /* Frame type and size of previous frame, or PC. */ +#else + struct { + LJ_ENDIAN_LOHI( + GCRef func; /* Function for next frame (or dummy L). */ + , FrameLink tp; /* Link to previous frame. */ + ) + } fr; +#endif + struct { + LJ_ENDIAN_LOHI( + uint32_t lo; /* Lower 32 bits of number. */ + , uint32_t hi; /* Upper 32 bits of number. */ + ) + } u32; +} TValue; + +typedef const TValue cTValue; + +#define tvref(r) (mref(r, TValue)) + +/* More external and GCobj tags for internal objects. */ +#define LAST_TT LUA_TTHREAD +#define LUA_TPROTO (LAST_TT+1) +#define LUA_TCDATA (LAST_TT+2) + +/* Internal object tags. +** +** Format for 32 bit GC references (!LJ_GC64): +** +** Internal tags overlap the MSW of a number object (must be a double). +** Interpreted as a double these are special NaNs. The FPU only generates +** one type of NaN (0xfff8_0000_0000_0000). So MSWs > 0xfff80000 are available +** for use as internal tags. Small negative numbers are used to shorten the +** encoding of type comparisons (reg/mem against sign-ext. 8 bit immediate). +** +** ---MSW---.---LSW--- +** primitive types | itype | | +** lightuserdata | itype | void * | (32 bit platforms) +** lightuserdata |ffff| void * | (64 bit platforms, 47 bit pointers) +** GC objects | itype | GCRef | +** int (LJ_DUALNUM)| itype | int | +** number -------double------ +** +** Format for 64 bit GC references (LJ_GC64): +** +** The upper 13 bits must be 1 (0xfff8...) for a special NaN. The next +** 4 bits hold the internal tag. The lowest 47 bits either hold a pointer, +** a zero-extended 32 bit integer or all bits set to 1 for primitive types. +** +** ------MSW------.------LSW------ +** primitive types |1..1|itype|1..................1| +** GC objects/lightud |1..1|itype|-------GCRef--------| +** int (LJ_DUALNUM) |1..1|itype|0..0|-----int-------| +** number ------------double------------- +** +** ORDER LJ_T +** Primitive types nil/false/true must be first, lightuserdata next. +** GC objects are at the end, table/userdata must be lowest. +** Also check lj_ir.h for similar ordering constraints. +*/ +#define LJ_TNIL (~0u) +#define LJ_TFALSE (~1u) +#define LJ_TTRUE (~2u) +#define LJ_TLIGHTUD (~3u) +#define LJ_TSTR (~4u) +#define LJ_TUPVAL (~5u) +#define LJ_TTHREAD (~6u) +#define LJ_TPROTO (~7u) +#define LJ_TFUNC (~8u) +#define LJ_TTRACE (~9u) +#define LJ_TCDATA (~10u) +#define LJ_TTAB (~11u) +#define LJ_TUDATA (~12u) +/* This is just the canonical number type used in some places. */ +#define LJ_TNUMX (~13u) + +/* Integers have itype == LJ_TISNUM doubles have itype < LJ_TISNUM */ +#if LJ_64 && !LJ_GC64 +#define LJ_TISNUM 0xfffeffffu +#else +#define LJ_TISNUM LJ_TNUMX +#endif +#define LJ_TISTRUECOND LJ_TFALSE +#define LJ_TISPRI LJ_TTRUE +#define LJ_TISGCV (LJ_TSTR+1) +#define LJ_TISTABUD LJ_TTAB + +#if LJ_GC64 +#define LJ_GCVMASK (((uint64_t)1 << 47) - 1) +#endif + +/* -- String object ------------------------------------------------------- */ + +/* String object header. String payload follows. */ +typedef struct GCstr { + GCHeader; + uint8_t reserved; /* Used by lexer for fast lookup of reserved words. */ + uint8_t unused; + MSize hash; /* Hash of string. */ + MSize len; /* Size of string. */ +} GCstr; + +#define strref(r) (&gcref((r))->str) +#define strdata(s) ((const char *)((s)+1)) +#define strdatawr(s) ((char *)((s)+1)) +#define strVdata(o) strdata(strV(o)) +#define sizestring(s) (sizeof(struct GCstr)+(s)->len+1) + +/* -- Userdata object ----------------------------------------------------- */ + +/* Userdata object. Payload follows. */ +typedef struct GCudata { + GCHeader; + uint8_t udtype; /* Userdata type. */ + uint8_t unused2; + GCRef env; /* Should be at same offset in GCfunc. */ + MSize len; /* Size of payload. */ + GCRef metatable; /* Must be at same offset in GCtab. */ + uint32_t align1; /* To force 8 byte alignment of the payload. */ +} GCudata; + +/* Userdata types. */ +enum { + UDTYPE_USERDATA, /* Regular userdata. */ + UDTYPE_IO_FILE, /* I/O library FILE. */ + UDTYPE_FFI_CLIB, /* FFI C library namespace. */ + UDTYPE__MAX +}; + +#define uddata(u) ((void *)((u)+1)) +#define sizeudata(u) (sizeof(struct GCudata)+(u)->len) + +/* -- C data object ------------------------------------------------------- */ + +/* C data object. Payload follows. */ +typedef struct GCcdata { + GCHeader; + uint16_t ctypeid; /* C type ID. */ +} GCcdata; + +/* Prepended to variable-sized or realigned C data objects. */ +typedef struct GCcdataVar { + uint16_t offset; /* Offset to allocated memory (relative to GCcdata). */ + uint16_t extra; /* Extra space allocated (incl. GCcdata + GCcdatav). */ + MSize len; /* Size of payload. */ +} GCcdataVar; + +#define cdataptr(cd) ((void *)((cd)+1)) +#define cdataisv(cd) ((cd)->marked & 0x80) +#define cdatav(cd) ((GCcdataVar *)((char *)(cd) - sizeof(GCcdataVar))) +#define cdatavlen(cd) check_exp(cdataisv(cd), cdatav(cd)->len) +#define sizecdatav(cd) (cdatavlen(cd) + cdatav(cd)->extra) +#define memcdatav(cd) ((void *)((char *)(cd) - cdatav(cd)->offset)) + +/* -- Prototype object ---------------------------------------------------- */ + +#define SCALE_NUM_GCO ((int32_t)sizeof(lua_Number)/sizeof(GCRef)) +#define round_nkgc(n) (((n) + SCALE_NUM_GCO-1) & ~(SCALE_NUM_GCO-1)) + +typedef struct GCproto { + GCHeader; + uint8_t numparams; /* Number of parameters. */ + uint8_t framesize; /* Fixed frame size. */ + MSize sizebc; /* Number of bytecode instructions. */ +#if LJ_GC64 + uint32_t unused_gc64; +#endif + GCRef gclist; + MRef k; /* Split constant array (points to the middle). */ + MRef uv; /* Upvalue list. local slot|0x8000 or parent uv idx. */ + MSize sizekgc; /* Number of collectable constants. */ + MSize sizekn; /* Number of lua_Number constants. */ + MSize sizept; /* Total size including colocated arrays. */ + uint8_t sizeuv; /* Number of upvalues. */ + uint8_t flags; /* Miscellaneous flags (see below). */ + uint16_t trace; /* Anchor for chain of root traces. */ + /* ------ The following fields are for debugging/tracebacks only ------ */ + GCRef chunkname; /* Name of the chunk this function was defined in. */ + BCLine firstline; /* First line of the function definition. */ + BCLine numline; /* Number of lines for the function definition. */ + MRef lineinfo; /* Compressed map from bytecode ins. to source line. */ + MRef uvinfo; /* Upvalue names. */ + MRef varinfo; /* Names and compressed extents of local variables. */ +} GCproto; + +/* Flags for prototype. */ +#define PROTO_CHILD 0x01 /* Has child prototypes. */ +#define PROTO_VARARG 0x02 /* Vararg function. */ +#define PROTO_FFI 0x04 /* Uses BC_KCDATA for FFI datatypes. */ +#define PROTO_NOJIT 0x08 /* JIT disabled for this function. */ +#define PROTO_ILOOP 0x10 /* Patched bytecode with ILOOP etc. */ +/* Only used during parsing. */ +#define PROTO_HAS_RETURN 0x20 /* Already emitted a return. */ +#define PROTO_FIXUP_RETURN 0x40 /* Need to fixup emitted returns. */ +/* Top bits used for counting created closures. */ +#define PROTO_CLCOUNT 0x20 /* Base of saturating 3 bit counter. */ +#define PROTO_CLC_BITS 3 +#define PROTO_CLC_POLY (3*PROTO_CLCOUNT) /* Polymorphic threshold. */ + +#define PROTO_UV_LOCAL 0x8000 /* Upvalue for local slot. */ +#define PROTO_UV_IMMUTABLE 0x4000 /* Immutable upvalue. */ + +#define proto_kgc(pt, idx) \ + check_exp((uintptr_t)(intptr_t)(idx) >= (uintptr_t)-(intptr_t)(pt)->sizekgc, \ + gcref(mref((pt)->k, GCRef)[(idx)])) +#define proto_knumtv(pt, idx) \ + check_exp((uintptr_t)(idx) < (pt)->sizekn, &mref((pt)->k, TValue)[(idx)]) +#define proto_bc(pt) ((BCIns *)((char *)(pt) + sizeof(GCproto))) +#define proto_bcpos(pt, pc) ((BCPos)((pc) - proto_bc(pt))) +#define proto_uv(pt) (mref((pt)->uv, uint16_t)) + +#define proto_chunkname(pt) (strref((pt)->chunkname)) +#define proto_chunknamestr(pt) (strdata(proto_chunkname((pt)))) +#define proto_lineinfo(pt) (mref((pt)->lineinfo, const void)) +#define proto_uvinfo(pt) (mref((pt)->uvinfo, const uint8_t)) +#define proto_varinfo(pt) (mref((pt)->varinfo, const uint8_t)) + +/* -- Upvalue object ------------------------------------------------------ */ + +typedef struct GCupval { + GCHeader; + uint8_t closed; /* Set if closed (i.e. uv->v == &uv->u.value). */ + uint8_t immutable; /* Immutable value. */ + union { + TValue tv; /* If closed: the value itself. */ + struct { /* If open: double linked list, anchored at thread. */ + GCRef prev; + GCRef next; + }; + }; + MRef v; /* Points to stack slot (open) or above (closed). */ + uint32_t dhash; /* Disambiguation hash: dh1 != dh2 => cannot alias. */ +} GCupval; + +#define uvprev(uv_) (&gcref((uv_)->prev)->uv) +#define uvnext(uv_) (&gcref((uv_)->next)->uv) +#define uvval(uv_) (mref((uv_)->v, TValue)) + +/* -- Function object (closures) ------------------------------------------ */ + +/* Common header for functions. env should be at same offset in GCudata. */ +#define GCfuncHeader \ + GCHeader; uint8_t ffid; uint8_t nupvalues; \ + GCRef env; GCRef gclist; MRef pc + +typedef struct GCfuncC { + GCfuncHeader; + lua_CFunction f; /* C function to be called. */ + TValue upvalue[1]; /* Array of upvalues (TValue). */ +} GCfuncC; + +typedef struct GCfuncL { + GCfuncHeader; + GCRef uvptr[1]; /* Array of _pointers_ to upvalue objects (GCupval). */ +} GCfuncL; + +typedef union GCfunc { + GCfuncC c; + GCfuncL l; +} GCfunc; + +#define FF_LUA 0 +#define FF_C 1 +#define isluafunc(fn) ((fn)->c.ffid == FF_LUA) +#define iscfunc(fn) ((fn)->c.ffid == FF_C) +#define isffunc(fn) ((fn)->c.ffid > FF_C) +#define funcproto(fn) \ + check_exp(isluafunc(fn), (GCproto *)(mref((fn)->l.pc, char)-sizeof(GCproto))) +#define sizeCfunc(n) (sizeof(GCfuncC)-sizeof(TValue)+sizeof(TValue)*(n)) +#define sizeLfunc(n) (sizeof(GCfuncL)-sizeof(GCRef)+sizeof(GCRef)*(n)) + +/* -- Table object -------------------------------------------------------- */ + +/* Hash node. */ +typedef struct Node { + TValue val; /* Value object. Must be first field. */ + TValue key; /* Key object. */ + MRef next; /* Hash chain. */ +#if !LJ_GC64 + MRef freetop; /* Top of free elements (stored in t->node[0]). */ +#endif +} Node; + +LJ_STATIC_ASSERT(offsetof(Node, val) == 0); + +typedef struct GCtab { + GCHeader; + uint8_t nomm; /* Negative cache for fast metamethods. */ + int8_t colo; /* Array colocation. */ + MRef array; /* Array part. */ + GCRef gclist; + GCRef metatable; /* Must be at same offset in GCudata. */ + MRef node; /* Hash part. */ + uint32_t asize; /* Size of array part (keys [0, asize-1]). */ + uint32_t hmask; /* Hash part mask (size of hash part - 1). */ +#if LJ_GC64 + MRef freetop; /* Top of free elements. */ +#endif +} GCtab; + +#define sizetabcolo(n) ((n)*sizeof(TValue) + sizeof(GCtab)) +#define tabref(r) (&gcref((r))->tab) +#define noderef(r) (mref((r), Node)) +#define nextnode(n) (mref((n)->next, Node)) +#if LJ_GC64 +#define getfreetop(t, n) (noderef((t)->freetop)) +#define setfreetop(t, n, v) (setmref((t)->freetop, (v))) +#else +#define getfreetop(t, n) (noderef((n)->freetop)) +#define setfreetop(t, n, v) (setmref((n)->freetop, (v))) +#endif + +/* -- State objects ------------------------------------------------------- */ + +/* VM states. */ +enum { + LJ_VMST_INTERP, /* Interpreter. */ + LJ_VMST_C, /* C function. */ + LJ_VMST_GC, /* Garbage collector. */ + LJ_VMST_EXIT, /* Trace exit handler. */ + LJ_VMST_RECORD, /* Trace recorder. */ + LJ_VMST_OPT, /* Optimizer. */ + LJ_VMST_ASM, /* Assembler. */ + LJ_VMST__MAX +}; + +#define setvmstate(g, st) ((g)->vmstate = ~LJ_VMST_##st) + +/* Metamethods. ORDER MM */ +#ifdef LJ_HASFFI +#define MMDEF_FFI(_) _(new) +#else +#define MMDEF_FFI(_) +#endif + +#if LJ_52 || LJ_HASFFI +#define MMDEF_PAIRS(_) _(pairs) _(ipairs) +#else +#define MMDEF_PAIRS(_) +#define MM_pairs 255 +#define MM_ipairs 255 +#endif + +#define MMDEF(_) \ + _(index) _(newindex) _(gc) _(mode) _(eq) _(len) \ + /* Only the above (fast) metamethods are negative cached (max. 8). */ \ + _(lt) _(le) _(concat) _(call) \ + /* The following must be in ORDER ARITH. */ \ + _(add) _(sub) _(mul) _(div) _(mod) _(pow) _(unm) \ + /* The following are used in the standard libraries. */ \ + _(metatable) _(tostring) MMDEF_FFI(_) MMDEF_PAIRS(_) + +typedef enum { +#define MMENUM(name) MM_##name, +MMDEF(MMENUM) +#undef MMENUM + MM__MAX, + MM____ = MM__MAX, + MM_FAST = MM_len +} MMS; + +/* GC root IDs. */ +typedef enum { + GCROOT_MMNAME, /* Metamethod names. */ + GCROOT_MMNAME_LAST = GCROOT_MMNAME + MM__MAX-1, + GCROOT_BASEMT, /* Metatables for base types. */ + GCROOT_BASEMT_NUM = GCROOT_BASEMT + ~LJ_TNUMX, + GCROOT_IO_INPUT, /* Userdata for default I/O input file. */ + GCROOT_IO_OUTPUT, /* Userdata for default I/O output file. */ + GCROOT_MAX +} GCRootID; + +#define basemt_it(g, it) ((g)->gcroot[GCROOT_BASEMT+~(it)]) +#define basemt_obj(g, o) ((g)->gcroot[GCROOT_BASEMT+itypemap(o)]) +#define mmname_str(g, mm) (strref((g)->gcroot[GCROOT_MMNAME+(mm)])) + +typedef struct GCState { + GCSize total; /* Memory currently allocated. */ + GCSize threshold; /* Memory threshold. */ + uint8_t currentwhite; /* Current white color. */ + uint8_t state; /* GC state. */ + uint8_t nocdatafin; /* No cdata finalizer called. */ + uint8_t unused2; + MSize sweepstr; /* Sweep position in string table. */ + GCRef root; /* List of all collectable objects. */ + MRef sweep; /* Sweep position in root list. */ + GCRef gray; /* List of gray objects. */ + GCRef grayagain; /* List of objects for atomic traversal. */ + GCRef weak; /* List of weak tables (to be cleared). */ + GCRef mmudata; /* List of userdata (to be finalized). */ + GCSize debt; /* Debt (how much GC is behind schedule). */ + GCSize estimate; /* Estimate of memory actually in use. */ + MSize stepmul; /* Incremental GC step granularity. */ + MSize pause; /* Pause between successive GC cycles. */ +} GCState; + +/* Global state, shared by all threads of a Lua universe. */ +typedef struct global_State { + GCRef *strhash; /* String hash table (hash chain anchors). */ + MSize strmask; /* String hash mask (size of hash table - 1). */ + MSize strnum; /* Number of strings in hash table. */ + lua_Alloc allocf; /* Memory allocator. */ + void *allocd; /* Memory allocator data. */ + GCState gc; /* Garbage collector. */ + volatile int32_t vmstate; /* VM state or current JIT code trace number. */ + SBuf tmpbuf; /* Temporary string buffer. */ + GCstr strempty; /* Empty string. */ + uint8_t stremptyz; /* Zero terminator of empty string. */ + uint8_t hookmask; /* Hook mask. */ + uint8_t dispatchmode; /* Dispatch mode. */ + uint8_t vmevmask; /* VM event mask. */ + GCRef mainthref; /* Link to main thread. */ + TValue registrytv; /* Anchor for registry. */ + TValue tmptv, tmptv2; /* Temporary TValues. */ + Node nilnode; /* Fallback 1-element hash part (nil key and value). */ + GCupval uvhead; /* Head of double-linked list of all open upvalues. */ + int32_t hookcount; /* Instruction hook countdown. */ + int32_t hookcstart; /* Start count for instruction hook counter. */ + lua_Hook hookf; /* Hook function. */ + lua_CFunction wrapf; /* Wrapper for C function calls. */ + lua_CFunction panic; /* Called as a last resort for errors. */ + BCIns bc_cfunc_int; /* Bytecode for internal C function calls. */ + BCIns bc_cfunc_ext; /* Bytecode for external C function calls. */ + GCRef cur_L; /* Currently executing lua_State. */ + MRef jit_base; /* Current JIT code L->base or NULL. */ + MRef ctype_state; /* Pointer to C type state. */ + GCRef gcroot[GCROOT_MAX]; /* GC roots. */ +} global_State; + +#define mainthread(g) (&gcref(g->mainthref)->th) +#define niltv(L) \ + check_exp(tvisnil(&G(L)->nilnode.val), &G(L)->nilnode.val) +#define niltvg(g) \ + check_exp(tvisnil(&(g)->nilnode.val), &(g)->nilnode.val) + +/* Hook management. Hook event masks are defined in lua.h. */ +#define HOOK_EVENTMASK 0x0f +#define HOOK_ACTIVE 0x10 +#define HOOK_ACTIVE_SHIFT 4 +#define HOOK_VMEVENT 0x20 +#define HOOK_GC 0x40 +#define HOOK_PROFILE 0x80 +#define hook_active(g) ((g)->hookmask & HOOK_ACTIVE) +#define hook_enter(g) ((g)->hookmask |= HOOK_ACTIVE) +#define hook_entergc(g) ((g)->hookmask |= (HOOK_ACTIVE|HOOK_GC)) +#define hook_vmevent(g) ((g)->hookmask |= (HOOK_ACTIVE|HOOK_VMEVENT)) +#define hook_leave(g) ((g)->hookmask &= ~HOOK_ACTIVE) +#define hook_save(g) ((g)->hookmask & ~HOOK_EVENTMASK) +#define hook_restore(g, h) \ + ((g)->hookmask = ((g)->hookmask & HOOK_EVENTMASK) | (h)) + +/* Per-thread state object. */ +struct lua_State { + GCHeader; + uint8_t dummy_ffid; /* Fake FF_C for curr_funcisL() on dummy frames. */ + uint8_t status; /* Thread status. */ + MRef glref; /* Link to global state. */ + GCRef gclist; /* GC chain. */ + TValue *base; /* Base of currently executing function. */ + TValue *top; /* First free slot in the stack. */ + MRef maxstack; /* Last free slot in the stack. */ + MRef stack; /* Stack base. */ + GCRef openupval; /* List of open upvalues in the stack. */ + GCRef env; /* Thread environment (table of globals). */ + void *cframe; /* End of C stack frame chain. */ + MSize stacksize; /* True stack size (incl. LJ_STACK_EXTRA). */ +}; + +#define G(L) (mref(L->glref, global_State)) +#define registry(L) (&G(L)->registrytv) + +/* Macros to access the currently executing (Lua) function. */ +#if LJ_GC64 +#define curr_func(L) (&gcval(L->base-2)->fn) +#elif LJ_FR2 +#define curr_func(L) (&gcref((L->base-2)->gcr)->fn) +#else +#define curr_func(L) (&gcref((L->base-1)->fr.func)->fn) +#endif +#define curr_funcisL(L) (isluafunc(curr_func(L))) +#define curr_proto(L) (funcproto(curr_func(L))) +#define curr_topL(L) (L->base + curr_proto(L)->framesize) +#define curr_top(L) (curr_funcisL(L) ? curr_topL(L) : L->top) + +/* -- GC object definition and conversions -------------------------------- */ + +/* GC header for generic access to common fields of GC objects. */ +typedef struct GChead { + GCHeader; + uint8_t unused1; + uint8_t unused2; + GCRef env; + GCRef gclist; + GCRef metatable; +} GChead; + +/* The env field SHOULD be at the same offset for all GC objects. */ +LJ_STATIC_ASSERT(offsetof(GChead, env) == offsetof(GCfuncL, env)); +LJ_STATIC_ASSERT(offsetof(GChead, env) == offsetof(GCudata, env)); + +/* The metatable field MUST be at the same offset for all GC objects. */ +LJ_STATIC_ASSERT(offsetof(GChead, metatable) == offsetof(GCtab, metatable)); +LJ_STATIC_ASSERT(offsetof(GChead, metatable) == offsetof(GCudata, metatable)); + +/* The gclist field MUST be at the same offset for all GC objects. */ +LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(lua_State, gclist)); +LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCproto, gclist)); +LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCfuncL, gclist)); +LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCtab, gclist)); + +typedef union GCobj { + GChead gch; + GCstr str; + GCupval uv; + lua_State th; + GCproto pt; + GCfunc fn; + GCcdata cd; + GCtab tab; + GCudata ud; +} GCobj; + +/* Macros to convert a GCobj pointer into a specific value. */ +#define gco2str(o) check_exp((o)->gch.gct == ~LJ_TSTR, &(o)->str) +#define gco2uv(o) check_exp((o)->gch.gct == ~LJ_TUPVAL, &(o)->uv) +#define gco2th(o) check_exp((o)->gch.gct == ~LJ_TTHREAD, &(o)->th) +#define gco2pt(o) check_exp((o)->gch.gct == ~LJ_TPROTO, &(o)->pt) +#define gco2func(o) check_exp((o)->gch.gct == ~LJ_TFUNC, &(o)->fn) +#define gco2cd(o) check_exp((o)->gch.gct == ~LJ_TCDATA, &(o)->cd) +#define gco2tab(o) check_exp((o)->gch.gct == ~LJ_TTAB, &(o)->tab) +#define gco2ud(o) check_exp((o)->gch.gct == ~LJ_TUDATA, &(o)->ud) + +/* Macro to convert any collectable object into a GCobj pointer. */ +#define obj2gco(v) ((GCobj *)(v)) + +/* -- TValue getters/setters ---------------------------------------------- */ + +#ifdef LUA_USE_ASSERT +#include "lj_gc.h" +#endif + +/* Macros to test types. */ +#if LJ_GC64 +#define itype(o) ((uint32_t)((o)->it64 >> 47)) +#define tvisnil(o) ((o)->it64 == -1) +#else +#define itype(o) ((o)->it) +#define tvisnil(o) (itype(o) == LJ_TNIL) +#endif +#define tvisfalse(o) (itype(o) == LJ_TFALSE) +#define tvistrue(o) (itype(o) == LJ_TTRUE) +#define tvisbool(o) (tvisfalse(o) || tvistrue(o)) +#if LJ_64 && !LJ_GC64 +#define tvislightud(o) (((int32_t)itype(o) >> 15) == -2) +#else +#define tvislightud(o) (itype(o) == LJ_TLIGHTUD) +#endif +#define tvisstr(o) (itype(o) == LJ_TSTR) +#define tvisfunc(o) (itype(o) == LJ_TFUNC) +#define tvisthread(o) (itype(o) == LJ_TTHREAD) +#define tvisproto(o) (itype(o) == LJ_TPROTO) +#define tviscdata(o) (itype(o) == LJ_TCDATA) +#define tvistab(o) (itype(o) == LJ_TTAB) +#define tvisudata(o) (itype(o) == LJ_TUDATA) +#define tvisnumber(o) (itype(o) <= LJ_TISNUM) +#define tvisint(o) (LJ_DUALNUM && itype(o) == LJ_TISNUM) +#define tvisnum(o) (itype(o) < LJ_TISNUM) + +#define tvistruecond(o) (itype(o) < LJ_TISTRUECOND) +#define tvispri(o) (itype(o) >= LJ_TISPRI) +#define tvistabud(o) (itype(o) <= LJ_TISTABUD) /* && !tvisnum() */ +#define tvisgcv(o) ((itype(o) - LJ_TISGCV) > (LJ_TNUMX - LJ_TISGCV)) + +/* Special macros to test numbers for NaN, +0, -0, +1 and raw equality. */ +#define tvisnan(o) ((o)->n != (o)->n) +#if LJ_64 +#define tviszero(o) (((o)->u64 << 1) == 0) +#else +#define tviszero(o) (((o)->u32.lo | ((o)->u32.hi << 1)) == 0) +#endif +#define tvispzero(o) ((o)->u64 == 0) +#define tvismzero(o) ((o)->u64 == U64x(80000000,00000000)) +#define tvispone(o) ((o)->u64 == U64x(3ff00000,00000000)) +#define rawnumequal(o1, o2) ((o1)->u64 == (o2)->u64) + +/* Macros to convert type ids. */ +#if LJ_64 && !LJ_GC64 +#define itypemap(o) \ + (tvisnumber(o) ? ~LJ_TNUMX : tvislightud(o) ? ~LJ_TLIGHTUD : ~itype(o)) +#else +#define itypemap(o) (tvisnumber(o) ? ~LJ_TNUMX : ~itype(o)) +#endif + +/* Macros to get tagged values. */ +#if LJ_GC64 +#define gcval(o) ((GCobj *)(gcrefu((o)->gcr) & LJ_GCVMASK)) +#else +#define gcval(o) (gcref((o)->gcr)) +#endif +#define boolV(o) check_exp(tvisbool(o), (LJ_TFALSE - itype(o))) +#if LJ_64 +#define lightudV(o) \ + check_exp(tvislightud(o), (void *)((o)->u64 & U64x(00007fff,ffffffff))) +#else +#define lightudV(o) check_exp(tvislightud(o), gcrefp((o)->gcr, void)) +#endif +#define gcV(o) check_exp(tvisgcv(o), gcval(o)) +#define strV(o) check_exp(tvisstr(o), &gcval(o)->str) +#define funcV(o) check_exp(tvisfunc(o), &gcval(o)->fn) +#define threadV(o) check_exp(tvisthread(o), &gcval(o)->th) +#define protoV(o) check_exp(tvisproto(o), &gcval(o)->pt) +#define cdataV(o) check_exp(tviscdata(o), &gcval(o)->cd) +#define tabV(o) check_exp(tvistab(o), &gcval(o)->tab) +#define udataV(o) check_exp(tvisudata(o), &gcval(o)->ud) +#define numV(o) check_exp(tvisnum(o), (o)->n) +#define intV(o) check_exp(tvisint(o), (int32_t)(o)->i) + +/* Macros to set tagged values. */ +#if LJ_GC64 +#define setitype(o, i) ((o)->it = ((i) << 15)) +#define setnilV(o) ((o)->it64 = -1) +#define setpriV(o, x) ((o)->it64 = (int64_t)~((uint64_t)~(x)<<47)) +#define setboolV(o, x) ((o)->it64 = (int64_t)~((uint64_t)((x)+1)<<47)) +#else +#define setitype(o, i) ((o)->it = (i)) +#define setnilV(o) ((o)->it = LJ_TNIL) +#define setboolV(o, x) ((o)->it = LJ_TFALSE-(uint32_t)(x)) +#define setpriV(o, i) (setitype((o), (i))) +#endif + +static LJ_AINLINE void setlightudV(TValue *o, void *p) +{ +#if LJ_GC64 + o->u64 = (uint64_t)p | (((uint64_t)LJ_TLIGHTUD) << 47); +#elif LJ_64 + o->u64 = (uint64_t)p | (((uint64_t)0xffff) << 48); +#else + setgcrefp(o->gcr, p); setitype(o, LJ_TLIGHTUD); +#endif +} + +#if LJ_64 +#define checklightudptr(L, p) \ + (((uint64_t)(p) >> 47) ? (lj_err_msg(L, LJ_ERR_BADLU), NULL) : (p)) +#else +#define checklightudptr(L, p) (p) +#endif + +#if LJ_FR2 +#define setcont(o, f) ((o)->u64 = (uint64_t)(uintptr_t)(void *)(f)) +#elif LJ_64 +#define setcont(o, f) \ + ((o)->u64 = (uint64_t)(void *)(f) - (uint64_t)lj_vm_asm_begin) +#else +#define setcont(o, f) setlightudV((o), (void *)(f)) +#endif + +#define tvchecklive(L, o) \ + UNUSED(L), lua_assert(!tvisgcv(o) || \ + ((~itype(o) == gcval(o)->gch.gct) && !isdead(G(L), gcval(o)))) + +static LJ_AINLINE void setgcVraw(TValue *o, GCobj *v, uint32_t itype) +{ +#if LJ_GC64 + setgcreft(o->gcr, v, itype); +#else + setgcref(o->gcr, v); setitype(o, itype); +#endif +} + +static LJ_AINLINE void setgcV(lua_State *L, TValue *o, GCobj *v, uint32_t it) +{ + setgcVraw(o, v, it); tvchecklive(L, o); +} + +#define define_setV(name, type, tag) \ +static LJ_AINLINE void name(lua_State *L, TValue *o, type *v) \ +{ \ + setgcV(L, o, obj2gco(v), tag); \ +} +define_setV(setstrV, GCstr, LJ_TSTR) +define_setV(setthreadV, lua_State, LJ_TTHREAD) +define_setV(setprotoV, GCproto, LJ_TPROTO) +define_setV(setfuncV, GCfunc, LJ_TFUNC) +define_setV(setcdataV, GCcdata, LJ_TCDATA) +define_setV(settabV, GCtab, LJ_TTAB) +define_setV(setudataV, GCudata, LJ_TUDATA) + +#define setnumV(o, x) ((o)->n = (x)) +#define setnanV(o) ((o)->u64 = U64x(fff80000,00000000)) +#define setpinfV(o) ((o)->u64 = U64x(7ff00000,00000000)) +#define setminfV(o) ((o)->u64 = U64x(fff00000,00000000)) + +static LJ_AINLINE void setintV(TValue *o, int32_t i) +{ +#if LJ_DUALNUM + o->i = (uint32_t)i; setitype(o, LJ_TISNUM); +#else + o->n = (lua_Number)i; +#endif +} + +static LJ_AINLINE void setint64V(TValue *o, int64_t i) +{ + if (LJ_DUALNUM && LJ_LIKELY(i == (int64_t)(int32_t)i)) + setintV(o, (int32_t)i); + else + setnumV(o, (lua_Number)i); +} + +#if LJ_64 +#define setintptrV(o, i) setint64V((o), (i)) +#else +#define setintptrV(o, i) setintV((o), (i)) +#endif + +/* Copy tagged values. */ +static LJ_AINLINE void copyTV(lua_State *L, TValue *o1, const TValue *o2) +{ + *o1 = *o2; tvchecklive(L, o1); +} + +/* -- Number to integer conversion ---------------------------------------- */ + +#if LJ_SOFTFP +LJ_ASMF int32_t lj_vm_tobit(double x); +#endif + +static LJ_AINLINE int32_t lj_num2bit(lua_Number n) +{ +#if LJ_SOFTFP + return lj_vm_tobit(n); +#else + TValue o; + o.n = n + 6755399441055744.0; /* 2^52 + 2^51 */ + return (int32_t)o.u32.lo; +#endif +} + +#define lj_num2int(n) ((int32_t)(n)) + +static LJ_AINLINE uint64_t lj_num2u64(lua_Number n) +{ +#ifdef _MSC_VER + if (n >= 9223372036854775808.0) /* They think it's a feature. */ + return (uint64_t)(int64_t)(n - 18446744073709551616.0); + else +#endif + return (uint64_t)n; +} + +static LJ_AINLINE int32_t numberVint(cTValue *o) +{ + if (LJ_LIKELY(tvisint(o))) + return intV(o); + else + return lj_num2int(numV(o)); +} + +static LJ_AINLINE lua_Number numberVnum(cTValue *o) +{ + if (LJ_UNLIKELY(tvisint(o))) + return (lua_Number)intV(o); + else + return numV(o); +} + +/* -- Miscellaneous object handling --------------------------------------- */ + +/* Names and maps for internal and external object tags. */ +LJ_DATA const char *const lj_obj_typename[1+LUA_TCDATA+1]; +LJ_DATA const char *const lj_obj_itypename[~LJ_TNUMX+1]; + +#define lj_typename(o) (lj_obj_itypename[itypemap(o)]) + +/* Compare two objects without calling metamethods. */ +LJ_FUNC int LJ_FASTCALL lj_obj_equal(cTValue *o1, cTValue *o2); +LJ_FUNC const void * LJ_FASTCALL lj_obj_ptr(cTValue *o); + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_opt_dce.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_opt_dce.c new file mode 100644 index 00000000000..7f1faafe647 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_opt_dce.c @@ -0,0 +1,78 @@ +/* +** DCE: Dead Code Elimination. Pre-LOOP only -- ASM already performs DCE. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_opt_dce_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_iropt.h" + +/* Some local macros to save typing. Undef'd at the end. */ +#define IR(ref) (&J->cur.ir[(ref)]) + +/* Scan through all snapshots and mark all referenced instructions. */ +static void dce_marksnap(jit_State *J) +{ + SnapNo i, nsnap = J->cur.nsnap; + for (i = 0; i < nsnap; i++) { + SnapShot *snap = &J->cur.snap[i]; + SnapEntry *map = &J->cur.snapmap[snap->mapofs]; + MSize n, nent = snap->nent; + for (n = 0; n < nent; n++) { + IRRef ref = snap_ref(map[n]); + if (ref >= REF_FIRST) + irt_setmark(IR(ref)->t); + } + } +} + +/* Backwards propagate marks. Replace unused instructions with NOPs. */ +static void dce_propagate(jit_State *J) +{ + IRRef1 *pchain[IR__MAX]; + IRRef ins; + uint32_t i; + for (i = 0; i < IR__MAX; i++) pchain[i] = &J->chain[i]; + for (ins = J->cur.nins-1; ins >= REF_FIRST; ins--) { + IRIns *ir = IR(ins); + if (irt_ismarked(ir->t)) { + irt_clearmark(ir->t); + pchain[ir->o] = &ir->prev; + } else if (!ir_sideeff(ir)) { + *pchain[ir->o] = ir->prev; /* Reroute original instruction chain. */ + ir->t.irt = IRT_NIL; + ir->o = IR_NOP; /* Replace instruction with NOP. */ + ir->op1 = ir->op2 = 0; + ir->prev = 0; + continue; + } + if (ir->op1 >= REF_FIRST) irt_setmark(IR(ir->op1)->t); + if (ir->op2 >= REF_FIRST) irt_setmark(IR(ir->op2)->t); + } +} + +/* Dead Code Elimination. +** +** First backpropagate marks for all used instructions. Then replace +** the unused ones with a NOP. Note that compressing the IR to eliminate +** the NOPs does not pay off. +*/ +void lj_opt_dce(jit_State *J) +{ + if ((J->flags & JIT_F_OPT_DCE)) { + dce_marksnap(J); + dce_propagate(J); + memset(J->bpropcache, 0, sizeof(J->bpropcache)); /* Invalidate cache. */ + } +} + +#undef IR + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_opt_fold.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_opt_fold.c new file mode 100644 index 00000000000..f809a991a73 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_opt_fold.c @@ -0,0 +1,2488 @@ +/* +** FOLD: Constant Folding, Algebraic Simplifications and Reassociation. +** ABCelim: Array Bounds Check Elimination. +** CSE: Common-Subexpression Elimination. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_opt_fold_c +#define LUA_CORE + +#include + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_buf.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_ircall.h" +#include "lj_iropt.h" +#include "lj_trace.h" +#if LJ_HASFFI +#include "lj_ctype.h" +#include "lj_carith.h" +#endif +#include "lj_vm.h" +#include "lj_strscan.h" +#include "lj_strfmt.h" + +/* Here's a short description how the FOLD engine processes instructions: +** +** The FOLD engine receives a single instruction stored in fins (J->fold.ins). +** The instruction and its operands are used to select matching fold rules. +** These are applied iteratively until a fixed point is reached. +** +** The 8 bit opcode of the instruction itself plus the opcodes of the +** two instructions referenced by its operands form a 24 bit key +** 'ins left right' (unused operands -> 0, literals -> lowest 8 bits). +** +** This key is used for partial matching against the fold rules. The +** left/right operand fields of the key are successively masked with +** the 'any' wildcard, from most specific to least specific: +** +** ins left right +** ins any right +** ins left any +** ins any any +** +** The masked key is used to lookup a matching fold rule in a semi-perfect +** hash table. If a matching rule is found, the related fold function is run. +** Multiple rules can share the same fold function. A fold rule may return +** one of several special values: +** +** - NEXTFOLD means no folding was applied, because an additional test +** inside the fold function failed. Matching continues against less +** specific fold rules. Finally the instruction is passed on to CSE. +** +** - RETRYFOLD means the instruction was modified in-place. Folding is +** retried as if this instruction had just been received. +** +** All other return values are terminal actions -- no further folding is +** applied: +** +** - INTFOLD(i) returns a reference to the integer constant i. +** +** - LEFTFOLD and RIGHTFOLD return the left/right operand reference +** without emitting an instruction. +** +** - CSEFOLD and EMITFOLD pass the instruction directly to CSE or emit +** it without passing through any further optimizations. +** +** - FAILFOLD, DROPFOLD and CONDFOLD only apply to instructions which have +** no result (e.g. guarded assertions): FAILFOLD means the guard would +** always fail, i.e. the current trace is pointless. DROPFOLD means +** the guard is always true and has been eliminated. CONDFOLD is a +** shortcut for FAILFOLD + cond (i.e. drop if true, otherwise fail). +** +** - Any other return value is interpreted as an IRRef or TRef. This +** can be a reference to an existing or a newly created instruction. +** Only the least-significant 16 bits (IRRef1) are used to form a TRef +** which is finally returned to the caller. +** +** The FOLD engine receives instructions both from the trace recorder and +** substituted instructions from LOOP unrolling. This means all types +** of instructions may end up here, even though the recorder bypasses +** FOLD in some cases. Thus all loads, stores and allocations must have +** an any/any rule to avoid being passed on to CSE. +** +** Carefully read the following requirements before adding or modifying +** any fold rules: +** +** Requirement #1: All fold rules must preserve their destination type. +** +** Consistently use INTFOLD() (KINT result) or lj_ir_knum() (KNUM result). +** Never use lj_ir_knumint() which can have either a KINT or KNUM result. +** +** Requirement #2: Fold rules should not create *new* instructions which +** reference operands *across* PHIs. +** +** E.g. a RETRYFOLD with 'fins->op1 = fleft->op1' is invalid if the +** left operand is a PHI. Then fleft->op1 would point across the PHI +** frontier to an invariant instruction. Adding a PHI for this instruction +** would be counterproductive. The solution is to add a barrier which +** prevents folding across PHIs, i.e. 'PHIBARRIER(fleft)' in this case. +** The only exception is for recurrences with high latencies like +** repeated int->num->int conversions. +** +** One could relax this condition a bit if the referenced instruction is +** a PHI, too. But this often leads to worse code due to excessive +** register shuffling. +** +** Note: returning *existing* instructions (e.g. LEFTFOLD) is ok, though. +** Even returning fleft->op1 would be ok, because a new PHI will added, +** if needed. But again, this leads to excessive register shuffling and +** should be avoided. +** +** Requirement #3: The set of all fold rules must be monotonic to guarantee +** termination. +** +** The goal is optimization, so one primarily wants to add strength-reducing +** rules. This means eliminating an instruction or replacing an instruction +** with one or more simpler instructions. Don't add fold rules which point +** into the other direction. +** +** Some rules (like commutativity) do not directly reduce the strength of +** an instruction, but enable other fold rules (e.g. by moving constants +** to the right operand). These rules must be made unidirectional to avoid +** cycles. +** +** Rule of thumb: the trace recorder expands the IR and FOLD shrinks it. +*/ + +/* Some local macros to save typing. Undef'd at the end. */ +#define IR(ref) (&J->cur.ir[(ref)]) +#define fins (&J->fold.ins) +#define fleft (&J->fold.left) +#define fright (&J->fold.right) +#define knumleft (ir_knum(fleft)->n) +#define knumright (ir_knum(fright)->n) + +/* Pass IR on to next optimization in chain (FOLD). */ +#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) + +/* Fold function type. Fastcall on x86 significantly reduces their size. */ +typedef IRRef (LJ_FASTCALL *FoldFunc)(jit_State *J); + +/* Macros for the fold specs, so buildvm can recognize them. */ +#define LJFOLD(x) +#define LJFOLDX(x) +#define LJFOLDF(name) static TRef LJ_FASTCALL fold_##name(jit_State *J) +/* Note: They must be at the start of a line or buildvm ignores them! */ + +/* Barrier to prevent using operands across PHIs. */ +#define PHIBARRIER(ir) if (irt_isphi((ir)->t)) return NEXTFOLD + +/* Barrier to prevent folding across a GC step. +** GC steps can only happen at the head of a trace and at LOOP. +** And the GC is only driven forward if there's at least one allocation. +*/ +#define gcstep_barrier(J, ref) \ + ((ref) < J->chain[IR_LOOP] && \ + (J->chain[IR_SNEW] || J->chain[IR_XSNEW] || \ + J->chain[IR_TNEW] || J->chain[IR_TDUP] || \ + J->chain[IR_CNEW] || J->chain[IR_CNEWI] || \ + J->chain[IR_BUFSTR] || J->chain[IR_TOSTR] || J->chain[IR_CALLA])) + +/* -- Constant folding for FP numbers ------------------------------------- */ + +LJFOLD(ADD KNUM KNUM) +LJFOLD(SUB KNUM KNUM) +LJFOLD(MUL KNUM KNUM) +LJFOLD(DIV KNUM KNUM) +LJFOLD(NEG KNUM KNUM) +LJFOLD(ABS KNUM KNUM) +LJFOLD(ATAN2 KNUM KNUM) +LJFOLD(LDEXP KNUM KNUM) +LJFOLD(MIN KNUM KNUM) +LJFOLD(MAX KNUM KNUM) +LJFOLDF(kfold_numarith) +{ + lua_Number a = knumleft; + lua_Number b = knumright; + lua_Number y = lj_vm_foldarith(a, b, fins->o - IR_ADD); + return lj_ir_knum(J, y); +} + +LJFOLD(LDEXP KNUM KINT) +LJFOLDF(kfold_ldexp) +{ +#if LJ_TARGET_X86ORX64 + UNUSED(J); + return NEXTFOLD; +#else + return lj_ir_knum(J, ldexp(knumleft, fright->i)); +#endif +} + +LJFOLD(FPMATH KNUM any) +LJFOLDF(kfold_fpmath) +{ + lua_Number a = knumleft; + lua_Number y = lj_vm_foldfpm(a, fins->op2); + return lj_ir_knum(J, y); +} + +LJFOLD(POW KNUM KINT) +LJFOLDF(kfold_numpow) +{ + lua_Number a = knumleft; + lua_Number b = (lua_Number)fright->i; + lua_Number y = lj_vm_foldarith(a, b, IR_POW - IR_ADD); + return lj_ir_knum(J, y); +} + +/* Must not use kfold_kref for numbers (could be NaN). */ +LJFOLD(EQ KNUM KNUM) +LJFOLD(NE KNUM KNUM) +LJFOLD(LT KNUM KNUM) +LJFOLD(GE KNUM KNUM) +LJFOLD(LE KNUM KNUM) +LJFOLD(GT KNUM KNUM) +LJFOLD(ULT KNUM KNUM) +LJFOLD(UGE KNUM KNUM) +LJFOLD(ULE KNUM KNUM) +LJFOLD(UGT KNUM KNUM) +LJFOLDF(kfold_numcomp) +{ + return CONDFOLD(lj_ir_numcmp(knumleft, knumright, (IROp)fins->o)); +} + +/* -- Constant folding for 32 bit integers -------------------------------- */ + +static int32_t kfold_intop(int32_t k1, int32_t k2, IROp op) +{ + switch (op) { + case IR_ADD: k1 += k2; break; + case IR_SUB: k1 -= k2; break; + case IR_MUL: k1 *= k2; break; + case IR_MOD: k1 = lj_vm_modi(k1, k2); break; + case IR_NEG: k1 = -k1; break; + case IR_BAND: k1 &= k2; break; + case IR_BOR: k1 |= k2; break; + case IR_BXOR: k1 ^= k2; break; + case IR_BSHL: k1 <<= (k2 & 31); break; + case IR_BSHR: k1 = (int32_t)((uint32_t)k1 >> (k2 & 31)); break; + case IR_BSAR: k1 >>= (k2 & 31); break; + case IR_BROL: k1 = (int32_t)lj_rol((uint32_t)k1, (k2 & 31)); break; + case IR_BROR: k1 = (int32_t)lj_ror((uint32_t)k1, (k2 & 31)); break; + case IR_MIN: k1 = k1 < k2 ? k1 : k2; break; + case IR_MAX: k1 = k1 > k2 ? k1 : k2; break; + default: lua_assert(0); break; + } + return k1; +} + +LJFOLD(ADD KINT KINT) +LJFOLD(SUB KINT KINT) +LJFOLD(MUL KINT KINT) +LJFOLD(MOD KINT KINT) +LJFOLD(NEG KINT KINT) +LJFOLD(BAND KINT KINT) +LJFOLD(BOR KINT KINT) +LJFOLD(BXOR KINT KINT) +LJFOLD(BSHL KINT KINT) +LJFOLD(BSHR KINT KINT) +LJFOLD(BSAR KINT KINT) +LJFOLD(BROL KINT KINT) +LJFOLD(BROR KINT KINT) +LJFOLD(MIN KINT KINT) +LJFOLD(MAX KINT KINT) +LJFOLDF(kfold_intarith) +{ + return INTFOLD(kfold_intop(fleft->i, fright->i, (IROp)fins->o)); +} + +LJFOLD(ADDOV KINT KINT) +LJFOLD(SUBOV KINT KINT) +LJFOLD(MULOV KINT KINT) +LJFOLDF(kfold_intovarith) +{ + lua_Number n = lj_vm_foldarith((lua_Number)fleft->i, (lua_Number)fright->i, + fins->o - IR_ADDOV); + int32_t k = lj_num2int(n); + if (n != (lua_Number)k) + return FAILFOLD; + return INTFOLD(k); +} + +LJFOLD(BNOT KINT) +LJFOLDF(kfold_bnot) +{ + return INTFOLD(~fleft->i); +} + +LJFOLD(BSWAP KINT) +LJFOLDF(kfold_bswap) +{ + return INTFOLD((int32_t)lj_bswap((uint32_t)fleft->i)); +} + +LJFOLD(LT KINT KINT) +LJFOLD(GE KINT KINT) +LJFOLD(LE KINT KINT) +LJFOLD(GT KINT KINT) +LJFOLD(ULT KINT KINT) +LJFOLD(UGE KINT KINT) +LJFOLD(ULE KINT KINT) +LJFOLD(UGT KINT KINT) +LJFOLD(ABC KINT KINT) +LJFOLDF(kfold_intcomp) +{ + int32_t a = fleft->i, b = fright->i; + switch ((IROp)fins->o) { + case IR_LT: return CONDFOLD(a < b); + case IR_GE: return CONDFOLD(a >= b); + case IR_LE: return CONDFOLD(a <= b); + case IR_GT: return CONDFOLD(a > b); + case IR_ULT: return CONDFOLD((uint32_t)a < (uint32_t)b); + case IR_UGE: return CONDFOLD((uint32_t)a >= (uint32_t)b); + case IR_ULE: return CONDFOLD((uint32_t)a <= (uint32_t)b); + case IR_ABC: + case IR_UGT: return CONDFOLD((uint32_t)a > (uint32_t)b); + default: lua_assert(0); return FAILFOLD; + } +} + +LJFOLD(UGE any KINT) +LJFOLDF(kfold_intcomp0) +{ + if (fright->i == 0) + return DROPFOLD; + return NEXTFOLD; +} + +/* -- Constant folding for 64 bit integers -------------------------------- */ + +static uint64_t kfold_int64arith(uint64_t k1, uint64_t k2, IROp op) +{ + switch (op) { +#if LJ_HASFFI + case IR_ADD: k1 += k2; break; + case IR_SUB: k1 -= k2; break; + case IR_MUL: k1 *= k2; break; + case IR_BAND: k1 &= k2; break; + case IR_BOR: k1 |= k2; break; + case IR_BXOR: k1 ^= k2; break; +#endif + default: UNUSED(k2); lua_assert(0); break; + } + return k1; +} + +LJFOLD(ADD KINT64 KINT64) +LJFOLD(SUB KINT64 KINT64) +LJFOLD(MUL KINT64 KINT64) +LJFOLD(BAND KINT64 KINT64) +LJFOLD(BOR KINT64 KINT64) +LJFOLD(BXOR KINT64 KINT64) +LJFOLDF(kfold_int64arith) +{ + return INT64FOLD(kfold_int64arith(ir_k64(fleft)->u64, + ir_k64(fright)->u64, (IROp)fins->o)); +} + +LJFOLD(DIV KINT64 KINT64) +LJFOLD(MOD KINT64 KINT64) +LJFOLD(POW KINT64 KINT64) +LJFOLDF(kfold_int64arith2) +{ +#if LJ_HASFFI + uint64_t k1 = ir_k64(fleft)->u64, k2 = ir_k64(fright)->u64; + if (irt_isi64(fins->t)) { + k1 = fins->o == IR_DIV ? lj_carith_divi64((int64_t)k1, (int64_t)k2) : + fins->o == IR_MOD ? lj_carith_modi64((int64_t)k1, (int64_t)k2) : + lj_carith_powi64((int64_t)k1, (int64_t)k2); + } else { + k1 = fins->o == IR_DIV ? lj_carith_divu64(k1, k2) : + fins->o == IR_MOD ? lj_carith_modu64(k1, k2) : + lj_carith_powu64(k1, k2); + } + return INT64FOLD(k1); +#else + UNUSED(J); lua_assert(0); return FAILFOLD; +#endif +} + +LJFOLD(BSHL KINT64 KINT) +LJFOLD(BSHR KINT64 KINT) +LJFOLD(BSAR KINT64 KINT) +LJFOLD(BROL KINT64 KINT) +LJFOLD(BROR KINT64 KINT) +LJFOLDF(kfold_int64shift) +{ +#if LJ_HASFFI + uint64_t k = ir_k64(fleft)->u64; + int32_t sh = (fright->i & 63); + return INT64FOLD(lj_carith_shift64(k, sh, fins->o - IR_BSHL)); +#else + UNUSED(J); lua_assert(0); return FAILFOLD; +#endif +} + +LJFOLD(BNOT KINT64) +LJFOLDF(kfold_bnot64) +{ +#if LJ_HASFFI + return INT64FOLD(~ir_k64(fleft)->u64); +#else + UNUSED(J); lua_assert(0); return FAILFOLD; +#endif +} + +LJFOLD(BSWAP KINT64) +LJFOLDF(kfold_bswap64) +{ +#if LJ_HASFFI + return INT64FOLD(lj_bswap64(ir_k64(fleft)->u64)); +#else + UNUSED(J); lua_assert(0); return FAILFOLD; +#endif +} + +LJFOLD(LT KINT64 KINT64) +LJFOLD(GE KINT64 KINT64) +LJFOLD(LE KINT64 KINT64) +LJFOLD(GT KINT64 KINT64) +LJFOLD(ULT KINT64 KINT64) +LJFOLD(UGE KINT64 KINT64) +LJFOLD(ULE KINT64 KINT64) +LJFOLD(UGT KINT64 KINT64) +LJFOLDF(kfold_int64comp) +{ +#if LJ_HASFFI + uint64_t a = ir_k64(fleft)->u64, b = ir_k64(fright)->u64; + switch ((IROp)fins->o) { + case IR_LT: return CONDFOLD(a < b); + case IR_GE: return CONDFOLD(a >= b); + case IR_LE: return CONDFOLD(a <= b); + case IR_GT: return CONDFOLD(a > b); + case IR_ULT: return CONDFOLD((uint64_t)a < (uint64_t)b); + case IR_UGE: return CONDFOLD((uint64_t)a >= (uint64_t)b); + case IR_ULE: return CONDFOLD((uint64_t)a <= (uint64_t)b); + case IR_UGT: return CONDFOLD((uint64_t)a > (uint64_t)b); + default: lua_assert(0); return FAILFOLD; + } +#else + UNUSED(J); lua_assert(0); return FAILFOLD; +#endif +} + +LJFOLD(UGE any KINT64) +LJFOLDF(kfold_int64comp0) +{ +#if LJ_HASFFI + if (ir_k64(fright)->u64 == 0) + return DROPFOLD; + return NEXTFOLD; +#else + UNUSED(J); lua_assert(0); return FAILFOLD; +#endif +} + +/* -- Constant folding for strings ---------------------------------------- */ + +LJFOLD(SNEW KKPTR KINT) +LJFOLDF(kfold_snew_kptr) +{ + GCstr *s = lj_str_new(J->L, (const char *)ir_kptr(fleft), (size_t)fright->i); + return lj_ir_kstr(J, s); +} + +LJFOLD(SNEW any KINT) +LJFOLDF(kfold_snew_empty) +{ + if (fright->i == 0) + return lj_ir_kstr(J, &J2G(J)->strempty); + return NEXTFOLD; +} + +LJFOLD(STRREF KGC KINT) +LJFOLDF(kfold_strref) +{ + GCstr *str = ir_kstr(fleft); + lua_assert((MSize)fright->i <= str->len); + return lj_ir_kkptr(J, (char *)strdata(str) + fright->i); +} + +LJFOLD(STRREF SNEW any) +LJFOLDF(kfold_strref_snew) +{ + PHIBARRIER(fleft); + if (irref_isk(fins->op2) && fright->i == 0) { + return fleft->op1; /* strref(snew(ptr, len), 0) ==> ptr */ + } else { + /* Reassociate: strref(snew(strref(str, a), len), b) ==> strref(str, a+b) */ + IRIns *ir = IR(fleft->op1); + if (ir->o == IR_STRREF) { + IRRef1 str = ir->op1; /* IRIns * is not valid across emitir. */ + PHIBARRIER(ir); + fins->op2 = emitir(IRTI(IR_ADD), ir->op2, fins->op2); /* Clobbers fins! */ + fins->op1 = str; + fins->ot = IRT(IR_STRREF, IRT_P32); + return RETRYFOLD; + } + } + return NEXTFOLD; +} + +LJFOLD(CALLN CARG IRCALL_lj_str_cmp) +LJFOLDF(kfold_strcmp) +{ + if (irref_isk(fleft->op1) && irref_isk(fleft->op2)) { + GCstr *a = ir_kstr(IR(fleft->op1)); + GCstr *b = ir_kstr(IR(fleft->op2)); + return INTFOLD(lj_str_cmp(a, b)); + } + return NEXTFOLD; +} + +/* -- Constant folding and forwarding for buffers ------------------------- */ + +/* +** Buffer ops perform stores, but their effect is limited to the buffer +** itself. Also, buffer ops are chained: a use of an op implies a use of +** all other ops up the chain. Conversely, if an op is unused, all ops +** up the chain can go unsed. This largely eliminates the need to treat +** them as stores. +** +** Alas, treating them as normal (IRM_N) ops doesn't work, because they +** cannot be CSEd in isolation. CSE for IRM_N is implicitly done in LOOP +** or if FOLD is disabled. +** +** The compromise is to declare them as loads, emit them like stores and +** CSE whole chains manually when the BUFSTR is to be emitted. Any chain +** fragments left over from CSE are eliminated by DCE. +*/ + +/* BUFHDR is emitted like a store, see below. */ + +LJFOLD(BUFPUT BUFHDR BUFSTR) +LJFOLDF(bufput_append) +{ + /* New buffer, no other buffer op inbetween and same buffer? */ + if ((J->flags & JIT_F_OPT_FWD) && + !(fleft->op2 & IRBUFHDR_APPEND) && + fleft->prev == fright->op2 && + fleft->op1 == IR(fright->op2)->op1) { + IRRef ref = fins->op1; + IR(ref)->op2 = (fleft->op2 | IRBUFHDR_APPEND); /* Modify BUFHDR. */ + IR(ref)->op1 = fright->op1; + return ref; + } + return EMITFOLD; /* Always emit, CSE later. */ +} + +LJFOLD(BUFPUT any any) +LJFOLDF(bufput_kgc) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && fright->o == IR_KGC) { + GCstr *s2 = ir_kstr(fright); + if (s2->len == 0) { /* Empty string? */ + return LEFTFOLD; + } else { + if (fleft->o == IR_BUFPUT && irref_isk(fleft->op2) && + !irt_isphi(fleft->t)) { /* Join two constant string puts in a row. */ + GCstr *s1 = ir_kstr(IR(fleft->op2)); + IRRef kref = lj_ir_kstr(J, lj_buf_cat2str(J->L, s1, s2)); + /* lj_ir_kstr() may realloc the IR and invalidates any IRIns *. */ + IR(fins->op1)->op2 = kref; /* Modify previous BUFPUT. */ + return fins->op1; + } + } + } + return EMITFOLD; /* Always emit, CSE later. */ +} + +LJFOLD(BUFSTR any any) +LJFOLDF(bufstr_kfold_cse) +{ + lua_assert(fleft->o == IR_BUFHDR || fleft->o == IR_BUFPUT || + fleft->o == IR_CALLL); + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) { + if (fleft->o == IR_BUFHDR) { /* No put operations? */ + if (!(fleft->op2 & IRBUFHDR_APPEND)) /* Empty buffer? */ + return lj_ir_kstr(J, &J2G(J)->strempty); + fins->op1 = fleft->op1; + fins->op2 = fleft->prev; /* Relies on checks in bufput_append. */ + return CSEFOLD; + } else if (fleft->o == IR_BUFPUT) { + IRIns *irb = IR(fleft->op1); + if (irb->o == IR_BUFHDR && !(irb->op2 & IRBUFHDR_APPEND)) + return fleft->op2; /* Shortcut for a single put operation. */ + } + } + /* Try to CSE the whole chain. */ + if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) { + IRRef ref = J->chain[IR_BUFSTR]; + while (ref) { + IRIns *irs = IR(ref), *ira = fleft, *irb = IR(irs->op1); + while (ira->o == irb->o && ira->op2 == irb->op2) { + lua_assert(ira->o == IR_BUFHDR || ira->o == IR_BUFPUT || + ira->o == IR_CALLL || ira->o == IR_CARG); + if (ira->o == IR_BUFHDR && !(ira->op2 & IRBUFHDR_APPEND)) + return ref; /* CSE succeeded. */ + if (ira->o == IR_CALLL && ira->op2 == IRCALL_lj_buf_puttab) + break; + ira = IR(ira->op1); + irb = IR(irb->op1); + } + ref = irs->prev; + } + } + return EMITFOLD; /* No CSE possible. */ +} + +LJFOLD(CALLL CARG IRCALL_lj_buf_putstr_reverse) +LJFOLD(CALLL CARG IRCALL_lj_buf_putstr_upper) +LJFOLD(CALLL CARG IRCALL_lj_buf_putstr_lower) +LJFOLD(CALLL CARG IRCALL_lj_strfmt_putquoted) +LJFOLDF(bufput_kfold_op) +{ + if (irref_isk(fleft->op2)) { + const CCallInfo *ci = &lj_ir_callinfo[fins->op2]; + SBuf *sb = lj_buf_tmp_(J->L); + sb = ((SBuf * (LJ_FASTCALL *)(SBuf *, GCstr *))ci->func)(sb, + ir_kstr(IR(fleft->op2))); + fins->o = IR_BUFPUT; + fins->op1 = fleft->op1; + fins->op2 = lj_ir_kstr(J, lj_buf_tostr(sb)); + return RETRYFOLD; + } + return EMITFOLD; /* Always emit, CSE later. */ +} + +LJFOLD(CALLL CARG IRCALL_lj_buf_putstr_rep) +LJFOLDF(bufput_kfold_rep) +{ + if (irref_isk(fleft->op2)) { + IRIns *irc = IR(fleft->op1); + if (irref_isk(irc->op2)) { + SBuf *sb = lj_buf_tmp_(J->L); + sb = lj_buf_putstr_rep(sb, ir_kstr(IR(irc->op2)), IR(fleft->op2)->i); + fins->o = IR_BUFPUT; + fins->op1 = irc->op1; + fins->op2 = lj_ir_kstr(J, lj_buf_tostr(sb)); + return RETRYFOLD; + } + } + return EMITFOLD; /* Always emit, CSE later. */ +} + +LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfxint) +LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfnum_int) +LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfnum_uint) +LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfnum) +LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfstr) +LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfchar) +LJFOLDF(bufput_kfold_fmt) +{ + IRIns *irc = IR(fleft->op1); + lua_assert(irref_isk(irc->op2)); /* SFormat must be const. */ + if (irref_isk(fleft->op2)) { + SFormat sf = (SFormat)IR(irc->op2)->i; + IRIns *ira = IR(fleft->op2); + SBuf *sb = lj_buf_tmp_(J->L); + switch (fins->op2) { + case IRCALL_lj_strfmt_putfxint: + sb = lj_strfmt_putfxint(sb, sf, ir_k64(ira)->u64); + break; + case IRCALL_lj_strfmt_putfstr: + sb = lj_strfmt_putfstr(sb, sf, ir_kstr(ira)); + break; + case IRCALL_lj_strfmt_putfchar: + sb = lj_strfmt_putfchar(sb, sf, ira->i); + break; + case IRCALL_lj_strfmt_putfnum_int: + case IRCALL_lj_strfmt_putfnum_uint: + case IRCALL_lj_strfmt_putfnum: + default: { + const CCallInfo *ci = &lj_ir_callinfo[fins->op2]; + sb = ((SBuf * (*)(SBuf *, SFormat, lua_Number))ci->func)(sb, sf, + ir_knum(ira)->n); + break; + } + } + fins->o = IR_BUFPUT; + fins->op1 = irc->op1; + fins->op2 = lj_ir_kstr(J, lj_buf_tostr(sb)); + return RETRYFOLD; + } + return EMITFOLD; /* Always emit, CSE later. */ +} + +/* -- Constant folding of pointer arithmetic ------------------------------ */ + +LJFOLD(ADD KGC KINT) +LJFOLD(ADD KGC KINT64) +LJFOLDF(kfold_add_kgc) +{ + GCobj *o = ir_kgc(fleft); +#if LJ_64 + ptrdiff_t ofs = (ptrdiff_t)ir_kint64(fright)->u64; +#else + ptrdiff_t ofs = fright->i; +#endif +#if LJ_HASFFI + if (irt_iscdata(fleft->t)) { + CType *ct = ctype_raw(ctype_ctsG(J2G(J)), gco2cd(o)->ctypeid); + if (ctype_isnum(ct->info) || ctype_isenum(ct->info) || + ctype_isptr(ct->info) || ctype_isfunc(ct->info) || + ctype_iscomplex(ct->info) || ctype_isvector(ct->info)) + return lj_ir_kkptr(J, (char *)o + ofs); + } +#endif + return lj_ir_kptr(J, (char *)o + ofs); +} + +LJFOLD(ADD KPTR KINT) +LJFOLD(ADD KPTR KINT64) +LJFOLD(ADD KKPTR KINT) +LJFOLD(ADD KKPTR KINT64) +LJFOLDF(kfold_add_kptr) +{ + void *p = ir_kptr(fleft); +#if LJ_64 + ptrdiff_t ofs = (ptrdiff_t)ir_kint64(fright)->u64; +#else + ptrdiff_t ofs = fright->i; +#endif + return lj_ir_kptr_(J, fleft->o, (char *)p + ofs); +} + +LJFOLD(ADD any KGC) +LJFOLD(ADD any KPTR) +LJFOLD(ADD any KKPTR) +LJFOLDF(kfold_add_kright) +{ + if (fleft->o == IR_KINT || fleft->o == IR_KINT64) { + IRRef1 tmp = fins->op1; fins->op1 = fins->op2; fins->op2 = tmp; + return RETRYFOLD; + } + return NEXTFOLD; +} + +/* -- Constant folding of conversions ------------------------------------- */ + +LJFOLD(TOBIT KNUM KNUM) +LJFOLDF(kfold_tobit) +{ + return INTFOLD(lj_num2bit(knumleft)); +} + +LJFOLD(CONV KINT IRCONV_NUM_INT) +LJFOLDF(kfold_conv_kint_num) +{ + return lj_ir_knum(J, (lua_Number)fleft->i); +} + +LJFOLD(CONV KINT IRCONV_NUM_U32) +LJFOLDF(kfold_conv_kintu32_num) +{ + return lj_ir_knum(J, (lua_Number)(uint32_t)fleft->i); +} + +LJFOLD(CONV KINT IRCONV_INT_I8) +LJFOLD(CONV KINT IRCONV_INT_U8) +LJFOLD(CONV KINT IRCONV_INT_I16) +LJFOLD(CONV KINT IRCONV_INT_U16) +LJFOLDF(kfold_conv_kint_ext) +{ + int32_t k = fleft->i; + if ((fins->op2 & IRCONV_SRCMASK) == IRT_I8) k = (int8_t)k; + else if ((fins->op2 & IRCONV_SRCMASK) == IRT_U8) k = (uint8_t)k; + else if ((fins->op2 & IRCONV_SRCMASK) == IRT_I16) k = (int16_t)k; + else k = (uint16_t)k; + return INTFOLD(k); +} + +LJFOLD(CONV KINT IRCONV_I64_INT) +LJFOLD(CONV KINT IRCONV_U64_INT) +LJFOLD(CONV KINT IRCONV_I64_U32) +LJFOLD(CONV KINT IRCONV_U64_U32) +LJFOLDF(kfold_conv_kint_i64) +{ + if ((fins->op2 & IRCONV_SEXT)) + return INT64FOLD((uint64_t)(int64_t)fleft->i); + else + return INT64FOLD((uint64_t)(int64_t)(uint32_t)fleft->i); +} + +LJFOLD(CONV KINT64 IRCONV_NUM_I64) +LJFOLDF(kfold_conv_kint64_num_i64) +{ + return lj_ir_knum(J, (lua_Number)(int64_t)ir_kint64(fleft)->u64); +} + +LJFOLD(CONV KINT64 IRCONV_NUM_U64) +LJFOLDF(kfold_conv_kint64_num_u64) +{ + return lj_ir_knum(J, (lua_Number)ir_kint64(fleft)->u64); +} + +LJFOLD(CONV KINT64 IRCONV_INT_I64) +LJFOLD(CONV KINT64 IRCONV_U32_I64) +LJFOLDF(kfold_conv_kint64_int_i64) +{ + return INTFOLD((int32_t)ir_kint64(fleft)->u64); +} + +LJFOLD(CONV KNUM IRCONV_INT_NUM) +LJFOLDF(kfold_conv_knum_int_num) +{ + lua_Number n = knumleft; + int32_t k = lj_num2int(n); + if (irt_isguard(fins->t) && n != (lua_Number)k) { + /* We're about to create a guard which always fails, like CONV +1.5. + ** Some pathological loops cause this during LICM, e.g.: + ** local x,k,t = 0,1.5,{1,[1.5]=2} + ** for i=1,200 do x = x+ t[k]; k = k == 1 and 1.5 or 1 end + ** assert(x == 300) + */ + return FAILFOLD; + } + return INTFOLD(k); +} + +LJFOLD(CONV KNUM IRCONV_U32_NUM) +LJFOLDF(kfold_conv_knum_u32_num) +{ +#ifdef _MSC_VER + { /* Workaround for MSVC bug. */ + volatile uint32_t u = (uint32_t)knumleft; + return INTFOLD((int32_t)u); + } +#else + return INTFOLD((int32_t)(uint32_t)knumleft); +#endif +} + +LJFOLD(CONV KNUM IRCONV_I64_NUM) +LJFOLDF(kfold_conv_knum_i64_num) +{ + return INT64FOLD((uint64_t)(int64_t)knumleft); +} + +LJFOLD(CONV KNUM IRCONV_U64_NUM) +LJFOLDF(kfold_conv_knum_u64_num) +{ + return INT64FOLD(lj_num2u64(knumleft)); +} + +LJFOLD(TOSTR KNUM any) +LJFOLDF(kfold_tostr_knum) +{ + return lj_ir_kstr(J, lj_strfmt_num(J->L, ir_knum(fleft))); +} + +LJFOLD(TOSTR KINT any) +LJFOLDF(kfold_tostr_kint) +{ + return lj_ir_kstr(J, fins->op2 == IRTOSTR_INT ? + lj_strfmt_int(J->L, fleft->i) : + lj_strfmt_char(J->L, fleft->i)); +} + +LJFOLD(STRTO KGC) +LJFOLDF(kfold_strto) +{ + TValue n; + if (lj_strscan_num(ir_kstr(fleft), &n)) + return lj_ir_knum(J, numV(&n)); + return FAILFOLD; +} + +/* -- Constant folding of equality checks --------------------------------- */ + +/* Don't constant-fold away FLOAD checks against KNULL. */ +LJFOLD(EQ FLOAD KNULL) +LJFOLD(NE FLOAD KNULL) +LJFOLDX(lj_opt_cse) + +/* But fold all other KNULL compares, since only KNULL is equal to KNULL. */ +LJFOLD(EQ any KNULL) +LJFOLD(NE any KNULL) +LJFOLD(EQ KNULL any) +LJFOLD(NE KNULL any) +LJFOLD(EQ KINT KINT) /* Constants are unique, so same refs <==> same value. */ +LJFOLD(NE KINT KINT) +LJFOLD(EQ KINT64 KINT64) +LJFOLD(NE KINT64 KINT64) +LJFOLD(EQ KGC KGC) +LJFOLD(NE KGC KGC) +LJFOLDF(kfold_kref) +{ + return CONDFOLD((fins->op1 == fins->op2) ^ (fins->o == IR_NE)); +} + +/* -- Algebraic shortcuts ------------------------------------------------- */ + +LJFOLD(FPMATH FPMATH IRFPM_FLOOR) +LJFOLD(FPMATH FPMATH IRFPM_CEIL) +LJFOLD(FPMATH FPMATH IRFPM_TRUNC) +LJFOLDF(shortcut_round) +{ + IRFPMathOp op = (IRFPMathOp)fleft->op2; + if (op == IRFPM_FLOOR || op == IRFPM_CEIL || op == IRFPM_TRUNC) + return LEFTFOLD; /* round(round_left(x)) = round_left(x) */ + return NEXTFOLD; +} + +LJFOLD(ABS ABS KNUM) +LJFOLDF(shortcut_left) +{ + return LEFTFOLD; /* f(g(x)) ==> g(x) */ +} + +LJFOLD(ABS NEG KNUM) +LJFOLDF(shortcut_dropleft) +{ + PHIBARRIER(fleft); + fins->op1 = fleft->op1; /* abs(neg(x)) ==> abs(x) */ + return RETRYFOLD; +} + +/* Note: no safe shortcuts with STRTO and TOSTR ("1e2" ==> +100 ==> "100"). */ +LJFOLD(NEG NEG any) +LJFOLD(BNOT BNOT) +LJFOLD(BSWAP BSWAP) +LJFOLDF(shortcut_leftleft) +{ + PHIBARRIER(fleft); /* See above. Fold would be ok, but not beneficial. */ + return fleft->op1; /* f(g(x)) ==> x */ +} + +/* -- FP algebraic simplifications ---------------------------------------- */ + +/* FP arithmetic is tricky -- there's not much to simplify. +** Please note the following common pitfalls before sending "improvements": +** x+0 ==> x is INVALID for x=-0 +** 0-x ==> -x is INVALID for x=+0 +** x*0 ==> 0 is INVALID for x=-0, x=+-Inf or x=NaN +*/ + +LJFOLD(ADD NEG any) +LJFOLDF(simplify_numadd_negx) +{ + PHIBARRIER(fleft); + fins->o = IR_SUB; /* (-a) + b ==> b - a */ + fins->op1 = fins->op2; + fins->op2 = fleft->op1; + return RETRYFOLD; +} + +LJFOLD(ADD any NEG) +LJFOLDF(simplify_numadd_xneg) +{ + PHIBARRIER(fright); + fins->o = IR_SUB; /* a + (-b) ==> a - b */ + fins->op2 = fright->op1; + return RETRYFOLD; +} + +LJFOLD(SUB any KNUM) +LJFOLDF(simplify_numsub_k) +{ + lua_Number n = knumright; + if (n == 0.0) /* x - (+-0) ==> x */ + return LEFTFOLD; + return NEXTFOLD; +} + +LJFOLD(SUB NEG KNUM) +LJFOLDF(simplify_numsub_negk) +{ + PHIBARRIER(fleft); + fins->op2 = fleft->op1; /* (-x) - k ==> (-k) - x */ + fins->op1 = (IRRef1)lj_ir_knum(J, -knumright); + return RETRYFOLD; +} + +LJFOLD(SUB any NEG) +LJFOLDF(simplify_numsub_xneg) +{ + PHIBARRIER(fright); + fins->o = IR_ADD; /* a - (-b) ==> a + b */ + fins->op2 = fright->op1; + return RETRYFOLD; +} + +LJFOLD(MUL any KNUM) +LJFOLD(DIV any KNUM) +LJFOLDF(simplify_nummuldiv_k) +{ + lua_Number n = knumright; + if (n == 1.0) { /* x o 1 ==> x */ + return LEFTFOLD; + } else if (n == -1.0) { /* x o -1 ==> -x */ + fins->o = IR_NEG; + fins->op2 = (IRRef1)lj_ir_knum_neg(J); + return RETRYFOLD; + } else if (fins->o == IR_MUL && n == 2.0) { /* x * 2 ==> x + x */ + fins->o = IR_ADD; + fins->op2 = fins->op1; + return RETRYFOLD; + } else if (fins->o == IR_DIV) { /* x / 2^k ==> x * 2^-k */ + uint64_t u = ir_knum(fright)->u64; + uint32_t ex = ((uint32_t)(u >> 52) & 0x7ff); + if ((u & U64x(000fffff,ffffffff)) == 0 && ex - 1 < 0x7fd) { + u = (u & ((uint64_t)1 << 63)) | ((uint64_t)(0x7fe - ex) << 52); + fins->o = IR_MUL; /* Multiply by exact reciprocal. */ + fins->op2 = lj_ir_knum_u64(J, u); + return RETRYFOLD; + } + } + return NEXTFOLD; +} + +LJFOLD(MUL NEG KNUM) +LJFOLD(DIV NEG KNUM) +LJFOLDF(simplify_nummuldiv_negk) +{ + PHIBARRIER(fleft); + fins->op1 = fleft->op1; /* (-a) o k ==> a o (-k) */ + fins->op2 = (IRRef1)lj_ir_knum(J, -knumright); + return RETRYFOLD; +} + +LJFOLD(MUL NEG NEG) +LJFOLD(DIV NEG NEG) +LJFOLDF(simplify_nummuldiv_negneg) +{ + PHIBARRIER(fleft); + PHIBARRIER(fright); + fins->op1 = fleft->op1; /* (-a) o (-b) ==> a o b */ + fins->op2 = fright->op1; + return RETRYFOLD; +} + +LJFOLD(POW any KINT) +LJFOLDF(simplify_numpow_xk) +{ + int32_t k = fright->i; + TRef ref = fins->op1; + if (k == 0) /* x ^ 0 ==> 1 */ + return lj_ir_knum_one(J); /* Result must be a number, not an int. */ + if (k == 1) /* x ^ 1 ==> x */ + return LEFTFOLD; + if ((uint32_t)(k+65536) > 2*65536u) /* Limit code explosion. */ + return NEXTFOLD; + if (k < 0) { /* x ^ (-k) ==> (1/x) ^ k. */ + ref = emitir(IRTN(IR_DIV), lj_ir_knum_one(J), ref); + k = -k; + } + /* Unroll x^k for 1 <= k <= 65536. */ + for (; (k & 1) == 0; k >>= 1) /* Handle leading zeros. */ + ref = emitir(IRTN(IR_MUL), ref, ref); + if ((k >>= 1) != 0) { /* Handle trailing bits. */ + TRef tmp = emitir(IRTN(IR_MUL), ref, ref); + for (; k != 1; k >>= 1) { + if (k & 1) + ref = emitir(IRTN(IR_MUL), ref, tmp); + tmp = emitir(IRTN(IR_MUL), tmp, tmp); + } + ref = emitir(IRTN(IR_MUL), ref, tmp); + } + return ref; +} + +LJFOLD(POW KNUM any) +LJFOLDF(simplify_numpow_kx) +{ + lua_Number n = knumleft; + if (n == 2.0) { /* 2.0 ^ i ==> ldexp(1.0, tonum(i)) */ + fins->o = IR_CONV; +#if LJ_TARGET_X86ORX64 + fins->op1 = fins->op2; + fins->op2 = IRCONV_NUM_INT; + fins->op2 = (IRRef1)lj_opt_fold(J); +#endif + fins->op1 = (IRRef1)lj_ir_knum_one(J); + fins->o = IR_LDEXP; + return RETRYFOLD; + } + return NEXTFOLD; +} + +/* -- Simplify conversions ------------------------------------------------ */ + +LJFOLD(CONV CONV IRCONV_NUM_INT) /* _NUM */ +LJFOLDF(shortcut_conv_num_int) +{ + PHIBARRIER(fleft); + /* Only safe with a guarded conversion to int. */ + if ((fleft->op2 & IRCONV_SRCMASK) == IRT_NUM && irt_isguard(fleft->t)) + return fleft->op1; /* f(g(x)) ==> x */ + return NEXTFOLD; +} + +LJFOLD(CONV CONV IRCONV_INT_NUM) /* _INT */ +LJFOLD(CONV CONV IRCONV_U32_NUM) /* _U32*/ +LJFOLDF(simplify_conv_int_num) +{ + /* Fold even across PHI to avoid expensive num->int conversions in loop. */ + if ((fleft->op2 & IRCONV_SRCMASK) == + ((fins->op2 & IRCONV_DSTMASK) >> IRCONV_DSH)) + return fleft->op1; + return NEXTFOLD; +} + +LJFOLD(CONV CONV IRCONV_I64_NUM) /* _INT or _U32 */ +LJFOLD(CONV CONV IRCONV_U64_NUM) /* _INT or _U32 */ +LJFOLDF(simplify_conv_i64_num) +{ + PHIBARRIER(fleft); + if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT) { + /* Reduce to a sign-extension. */ + fins->op1 = fleft->op1; + fins->op2 = ((IRT_I64<<5)|IRT_INT|IRCONV_SEXT); + return RETRYFOLD; + } else if ((fleft->op2 & IRCONV_SRCMASK) == IRT_U32) { +#if LJ_TARGET_X64 + return fleft->op1; +#else + /* Reduce to a zero-extension. */ + fins->op1 = fleft->op1; + fins->op2 = (IRT_I64<<5)|IRT_U32; + return RETRYFOLD; +#endif + } + return NEXTFOLD; +} + +LJFOLD(CONV CONV IRCONV_INT_I64) /* _INT or _U32 */ +LJFOLD(CONV CONV IRCONV_INT_U64) /* _INT or _U32 */ +LJFOLD(CONV CONV IRCONV_U32_I64) /* _INT or _U32 */ +LJFOLD(CONV CONV IRCONV_U32_U64) /* _INT or _U32 */ +LJFOLDF(simplify_conv_int_i64) +{ + int src; + PHIBARRIER(fleft); + src = (fleft->op2 & IRCONV_SRCMASK); + if (src == IRT_INT || src == IRT_U32) { + if (src == ((fins->op2 & IRCONV_DSTMASK) >> IRCONV_DSH)) { + return fleft->op1; + } else { + fins->op2 = ((fins->op2 & IRCONV_DSTMASK) | src); + fins->op1 = fleft->op1; + return RETRYFOLD; + } + } + return NEXTFOLD; +} + +LJFOLD(CONV CONV IRCONV_FLOAT_NUM) /* _FLOAT */ +LJFOLDF(simplify_conv_flt_num) +{ + PHIBARRIER(fleft); + if ((fleft->op2 & IRCONV_SRCMASK) == IRT_FLOAT) + return fleft->op1; + return NEXTFOLD; +} + +/* Shortcut TOBIT + IRT_NUM <- IRT_INT/IRT_U32 conversion. */ +LJFOLD(TOBIT CONV KNUM) +LJFOLDF(simplify_tobit_conv) +{ + /* Fold even across PHI to avoid expensive num->int conversions in loop. */ + if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT) { + lua_assert(irt_isnum(fleft->t)); + return fleft->op1; + } else if ((fleft->op2 & IRCONV_SRCMASK) == IRT_U32) { + lua_assert(irt_isnum(fleft->t)); + fins->o = IR_CONV; + fins->op1 = fleft->op1; + fins->op2 = (IRT_INT<<5)|IRT_U32; + return RETRYFOLD; + } + return NEXTFOLD; +} + +/* Shortcut floor/ceil/round + IRT_NUM <- IRT_INT/IRT_U32 conversion. */ +LJFOLD(FPMATH CONV IRFPM_FLOOR) +LJFOLD(FPMATH CONV IRFPM_CEIL) +LJFOLD(FPMATH CONV IRFPM_TRUNC) +LJFOLDF(simplify_floor_conv) +{ + if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT || + (fleft->op2 & IRCONV_SRCMASK) == IRT_U32) + return LEFTFOLD; + return NEXTFOLD; +} + +/* Strength reduction of widening. */ +LJFOLD(CONV any IRCONV_I64_INT) +LJFOLD(CONV any IRCONV_U64_INT) +LJFOLDF(simplify_conv_sext) +{ + IRRef ref = fins->op1; + int64_t ofs = 0; + if (!(fins->op2 & IRCONV_SEXT)) + return NEXTFOLD; + PHIBARRIER(fleft); + if (fleft->o == IR_XLOAD && (irt_isu8(fleft->t) || irt_isu16(fleft->t))) + goto ok_reduce; + if (fleft->o == IR_ADD && irref_isk(fleft->op2)) { + ofs = (int64_t)IR(fleft->op2)->i; + ref = fleft->op1; + } + /* Use scalar evolution analysis results to strength-reduce sign-extension. */ + if (ref == J->scev.idx) { + IRRef lo = J->scev.dir ? J->scev.start : J->scev.stop; + lua_assert(irt_isint(J->scev.t)); + if (lo && IR(lo)->i + ofs >= 0) { + ok_reduce: +#if LJ_TARGET_X64 + /* Eliminate widening. All 32 bit ops do an implicit zero-extension. */ + return LEFTFOLD; +#else + /* Reduce to a (cheaper) zero-extension. */ + fins->op2 &= ~IRCONV_SEXT; + return RETRYFOLD; +#endif + } + } + return NEXTFOLD; +} + +/* Strength reduction of narrowing. */ +LJFOLD(CONV ADD IRCONV_INT_I64) +LJFOLD(CONV SUB IRCONV_INT_I64) +LJFOLD(CONV MUL IRCONV_INT_I64) +LJFOLD(CONV ADD IRCONV_INT_U64) +LJFOLD(CONV SUB IRCONV_INT_U64) +LJFOLD(CONV MUL IRCONV_INT_U64) +LJFOLD(CONV ADD IRCONV_U32_I64) +LJFOLD(CONV SUB IRCONV_U32_I64) +LJFOLD(CONV MUL IRCONV_U32_I64) +LJFOLD(CONV ADD IRCONV_U32_U64) +LJFOLD(CONV SUB IRCONV_U32_U64) +LJFOLD(CONV MUL IRCONV_U32_U64) +LJFOLDF(simplify_conv_narrow) +{ + IROp op = (IROp)fleft->o; + IRType t = irt_type(fins->t); + IRRef op1 = fleft->op1, op2 = fleft->op2, mode = fins->op2; + PHIBARRIER(fleft); + op1 = emitir(IRTI(IR_CONV), op1, mode); + op2 = emitir(IRTI(IR_CONV), op2, mode); + fins->ot = IRT(op, t); + fins->op1 = op1; + fins->op2 = op2; + return RETRYFOLD; +} + +/* Special CSE rule for CONV. */ +LJFOLD(CONV any any) +LJFOLDF(cse_conv) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) { + IRRef op1 = fins->op1, op2 = (fins->op2 & IRCONV_MODEMASK); + uint8_t guard = irt_isguard(fins->t); + IRRef ref = J->chain[IR_CONV]; + while (ref > op1) { + IRIns *ir = IR(ref); + /* Commoning with stronger checks is ok. */ + if (ir->op1 == op1 && (ir->op2 & IRCONV_MODEMASK) == op2 && + irt_isguard(ir->t) >= guard) + return ref; + ref = ir->prev; + } + } + return EMITFOLD; /* No fallthrough to regular CSE. */ +} + +/* FP conversion narrowing. */ +LJFOLD(TOBIT ADD KNUM) +LJFOLD(TOBIT SUB KNUM) +LJFOLD(CONV ADD IRCONV_INT_NUM) +LJFOLD(CONV SUB IRCONV_INT_NUM) +LJFOLD(CONV ADD IRCONV_I64_NUM) +LJFOLD(CONV SUB IRCONV_I64_NUM) +LJFOLDF(narrow_convert) +{ + PHIBARRIER(fleft); + /* Narrowing ignores PHIs and repeating it inside the loop is not useful. */ + if (J->chain[IR_LOOP]) + return NEXTFOLD; + lua_assert(fins->o != IR_CONV || (fins->op2&IRCONV_CONVMASK) != IRCONV_TOBIT); + return lj_opt_narrow_convert(J); +} + +/* -- Integer algebraic simplifications ----------------------------------- */ + +LJFOLD(ADD any KINT) +LJFOLD(ADDOV any KINT) +LJFOLD(SUBOV any KINT) +LJFOLDF(simplify_intadd_k) +{ + if (fright->i == 0) /* i o 0 ==> i */ + return LEFTFOLD; + return NEXTFOLD; +} + +LJFOLD(MULOV any KINT) +LJFOLDF(simplify_intmul_k) +{ + if (fright->i == 0) /* i * 0 ==> 0 */ + return RIGHTFOLD; + if (fright->i == 1) /* i * 1 ==> i */ + return LEFTFOLD; + if (fright->i == 2) { /* i * 2 ==> i + i */ + fins->o = IR_ADDOV; + fins->op2 = fins->op1; + return RETRYFOLD; + } + return NEXTFOLD; +} + +LJFOLD(SUB any KINT) +LJFOLDF(simplify_intsub_k) +{ + if (fright->i == 0) /* i - 0 ==> i */ + return LEFTFOLD; + fins->o = IR_ADD; /* i - k ==> i + (-k) */ + fins->op2 = (IRRef1)lj_ir_kint(J, -fright->i); /* Overflow for -2^31 ok. */ + return RETRYFOLD; +} + +LJFOLD(SUB KINT any) +LJFOLD(SUB KINT64 any) +LJFOLDF(simplify_intsub_kleft) +{ + if (fleft->o == IR_KINT ? (fleft->i == 0) : (ir_kint64(fleft)->u64 == 0)) { + fins->o = IR_NEG; /* 0 - i ==> -i */ + fins->op1 = fins->op2; + return RETRYFOLD; + } + return NEXTFOLD; +} + +LJFOLD(ADD any KINT64) +LJFOLDF(simplify_intadd_k64) +{ + if (ir_kint64(fright)->u64 == 0) /* i + 0 ==> i */ + return LEFTFOLD; + return NEXTFOLD; +} + +LJFOLD(SUB any KINT64) +LJFOLDF(simplify_intsub_k64) +{ + uint64_t k = ir_kint64(fright)->u64; + if (k == 0) /* i - 0 ==> i */ + return LEFTFOLD; + fins->o = IR_ADD; /* i - k ==> i + (-k) */ + fins->op2 = (IRRef1)lj_ir_kint64(J, (uint64_t)-(int64_t)k); + return RETRYFOLD; +} + +static TRef simplify_intmul_k(jit_State *J, int32_t k) +{ + /* Note: many more simplifications are possible, e.g. 2^k1 +- 2^k2. + ** But this is mainly intended for simple address arithmetic. + ** Also it's easier for the backend to optimize the original multiplies. + */ + if (k == 0) { /* i * 0 ==> 0 */ + return RIGHTFOLD; + } else if (k == 1) { /* i * 1 ==> i */ + return LEFTFOLD; + } else if ((k & (k-1)) == 0) { /* i * 2^k ==> i << k */ + fins->o = IR_BSHL; + fins->op2 = lj_ir_kint(J, lj_fls((uint32_t)k)); + return RETRYFOLD; + } + return NEXTFOLD; +} + +LJFOLD(MUL any KINT) +LJFOLDF(simplify_intmul_k32) +{ + if (fright->i >= 0) + return simplify_intmul_k(J, fright->i); + return NEXTFOLD; +} + +LJFOLD(MUL any KINT64) +LJFOLDF(simplify_intmul_k64) +{ +#if LJ_HASFFI + if (ir_kint64(fright)->u64 < 0x80000000u) + return simplify_intmul_k(J, (int32_t)ir_kint64(fright)->u64); + return NEXTFOLD; +#else + UNUSED(J); lua_assert(0); return FAILFOLD; +#endif +} + +LJFOLD(MOD any KINT) +LJFOLDF(simplify_intmod_k) +{ + int32_t k = fright->i; + lua_assert(k != 0); + if (k > 0 && (k & (k-1)) == 0) { /* i % (2^k) ==> i & (2^k-1) */ + fins->o = IR_BAND; + fins->op2 = lj_ir_kint(J, k-1); + return RETRYFOLD; + } + return NEXTFOLD; +} + +LJFOLD(MOD KINT any) +LJFOLDF(simplify_intmod_kleft) +{ + if (fleft->i == 0) + return INTFOLD(0); + return NEXTFOLD; +} + +LJFOLD(SUB any any) +LJFOLD(SUBOV any any) +LJFOLDF(simplify_intsub) +{ + if (fins->op1 == fins->op2 && !irt_isnum(fins->t)) /* i - i ==> 0 */ + return irt_is64(fins->t) ? INT64FOLD(0) : INTFOLD(0); + return NEXTFOLD; +} + +LJFOLD(SUB ADD any) +LJFOLDF(simplify_intsubadd_leftcancel) +{ + if (!irt_isnum(fins->t)) { + PHIBARRIER(fleft); + if (fins->op2 == fleft->op1) /* (i + j) - i ==> j */ + return fleft->op2; + if (fins->op2 == fleft->op2) /* (i + j) - j ==> i */ + return fleft->op1; + } + return NEXTFOLD; +} + +LJFOLD(SUB SUB any) +LJFOLDF(simplify_intsubsub_leftcancel) +{ + if (!irt_isnum(fins->t)) { + PHIBARRIER(fleft); + if (fins->op2 == fleft->op1) { /* (i - j) - i ==> 0 - j */ + fins->op1 = (IRRef1)lj_ir_kint(J, 0); + fins->op2 = fleft->op2; + return RETRYFOLD; + } + } + return NEXTFOLD; +} + +LJFOLD(SUB any SUB) +LJFOLDF(simplify_intsubsub_rightcancel) +{ + if (!irt_isnum(fins->t)) { + PHIBARRIER(fright); + if (fins->op1 == fright->op1) /* i - (i - j) ==> j */ + return fright->op2; + } + return NEXTFOLD; +} + +LJFOLD(SUB any ADD) +LJFOLDF(simplify_intsubadd_rightcancel) +{ + if (!irt_isnum(fins->t)) { + PHIBARRIER(fright); + if (fins->op1 == fright->op1) { /* i - (i + j) ==> 0 - j */ + fins->op2 = fright->op2; + fins->op1 = (IRRef1)lj_ir_kint(J, 0); + return RETRYFOLD; + } + if (fins->op1 == fright->op2) { /* i - (j + i) ==> 0 - j */ + fins->op2 = fright->op1; + fins->op1 = (IRRef1)lj_ir_kint(J, 0); + return RETRYFOLD; + } + } + return NEXTFOLD; +} + +LJFOLD(SUB ADD ADD) +LJFOLDF(simplify_intsubaddadd_cancel) +{ + if (!irt_isnum(fins->t)) { + PHIBARRIER(fleft); + PHIBARRIER(fright); + if (fleft->op1 == fright->op1) { /* (i + j1) - (i + j2) ==> j1 - j2 */ + fins->op1 = fleft->op2; + fins->op2 = fright->op2; + return RETRYFOLD; + } + if (fleft->op1 == fright->op2) { /* (i + j1) - (j2 + i) ==> j1 - j2 */ + fins->op1 = fleft->op2; + fins->op2 = fright->op1; + return RETRYFOLD; + } + if (fleft->op2 == fright->op1) { /* (j1 + i) - (i + j2) ==> j1 - j2 */ + fins->op1 = fleft->op1; + fins->op2 = fright->op2; + return RETRYFOLD; + } + if (fleft->op2 == fright->op2) { /* (j1 + i) - (j2 + i) ==> j1 - j2 */ + fins->op1 = fleft->op1; + fins->op2 = fright->op1; + return RETRYFOLD; + } + } + return NEXTFOLD; +} + +LJFOLD(BAND any KINT) +LJFOLD(BAND any KINT64) +LJFOLDF(simplify_band_k) +{ + int64_t k = fright->o == IR_KINT ? (int64_t)fright->i : + (int64_t)ir_k64(fright)->u64; + if (k == 0) /* i & 0 ==> 0 */ + return RIGHTFOLD; + if (k == -1) /* i & -1 ==> i */ + return LEFTFOLD; + return NEXTFOLD; +} + +LJFOLD(BOR any KINT) +LJFOLD(BOR any KINT64) +LJFOLDF(simplify_bor_k) +{ + int64_t k = fright->o == IR_KINT ? (int64_t)fright->i : + (int64_t)ir_k64(fright)->u64; + if (k == 0) /* i | 0 ==> i */ + return LEFTFOLD; + if (k == -1) /* i | -1 ==> -1 */ + return RIGHTFOLD; + return NEXTFOLD; +} + +LJFOLD(BXOR any KINT) +LJFOLD(BXOR any KINT64) +LJFOLDF(simplify_bxor_k) +{ + int64_t k = fright->o == IR_KINT ? (int64_t)fright->i : + (int64_t)ir_k64(fright)->u64; + if (k == 0) /* i xor 0 ==> i */ + return LEFTFOLD; + if (k == -1) { /* i xor -1 ==> ~i */ + fins->o = IR_BNOT; + fins->op2 = 0; + return RETRYFOLD; + } + return NEXTFOLD; +} + +LJFOLD(BSHL any KINT) +LJFOLD(BSHR any KINT) +LJFOLD(BSAR any KINT) +LJFOLD(BROL any KINT) +LJFOLD(BROR any KINT) +LJFOLDF(simplify_shift_ik) +{ + int32_t mask = irt_is64(fins->t) ? 63 : 31; + int32_t k = (fright->i & mask); + if (k == 0) /* i o 0 ==> i */ + return LEFTFOLD; + if (k == 1 && fins->o == IR_BSHL) { /* i << 1 ==> i + i */ + fins->o = IR_ADD; + fins->op2 = fins->op1; + return RETRYFOLD; + } + if (k != fright->i) { /* i o k ==> i o (k & mask) */ + fins->op2 = (IRRef1)lj_ir_kint(J, k); + return RETRYFOLD; + } +#ifndef LJ_TARGET_UNIFYROT + if (fins->o == IR_BROR) { /* bror(i, k) ==> brol(i, (-k)&mask) */ + fins->o = IR_BROL; + fins->op2 = (IRRef1)lj_ir_kint(J, (-k)&mask); + return RETRYFOLD; + } +#endif + return NEXTFOLD; +} + +LJFOLD(BSHL any BAND) +LJFOLD(BSHR any BAND) +LJFOLD(BSAR any BAND) +LJFOLD(BROL any BAND) +LJFOLD(BROR any BAND) +LJFOLDF(simplify_shift_andk) +{ + IRIns *irk = IR(fright->op2); + PHIBARRIER(fright); + if ((fins->o < IR_BROL ? LJ_TARGET_MASKSHIFT : LJ_TARGET_MASKROT) && + irk->o == IR_KINT) { /* i o (j & mask) ==> i o j */ + int32_t mask = irt_is64(fins->t) ? 63 : 31; + int32_t k = irk->i & mask; + if (k == mask) { + fins->op2 = fright->op1; + return RETRYFOLD; + } + } + return NEXTFOLD; +} + +LJFOLD(BSHL KINT any) +LJFOLD(BSHR KINT any) +LJFOLD(BSHL KINT64 any) +LJFOLD(BSHR KINT64 any) +LJFOLDF(simplify_shift1_ki) +{ + int64_t k = fleft->o == IR_KINT ? (int64_t)fleft->i : + (int64_t)ir_k64(fleft)->u64; + if (k == 0) /* 0 o i ==> 0 */ + return LEFTFOLD; + return NEXTFOLD; +} + +LJFOLD(BSAR KINT any) +LJFOLD(BROL KINT any) +LJFOLD(BROR KINT any) +LJFOLD(BSAR KINT64 any) +LJFOLD(BROL KINT64 any) +LJFOLD(BROR KINT64 any) +LJFOLDF(simplify_shift2_ki) +{ + int64_t k = fleft->o == IR_KINT ? (int64_t)fleft->i : + (int64_t)ir_k64(fleft)->u64; + if (k == 0 || k == -1) /* 0 o i ==> 0; -1 o i ==> -1 */ + return LEFTFOLD; + return NEXTFOLD; +} + +LJFOLD(BSHL BAND KINT) +LJFOLD(BSHR BAND KINT) +LJFOLD(BROL BAND KINT) +LJFOLD(BROR BAND KINT) +LJFOLDF(simplify_shiftk_andk) +{ + IRIns *irk = IR(fleft->op2); + PHIBARRIER(fleft); + if (irk->o == IR_KINT) { /* (i & k1) o k2 ==> (i o k2) & (k1 o k2) */ + int32_t k = kfold_intop(irk->i, fright->i, (IROp)fins->o); + fins->op1 = fleft->op1; + fins->op1 = (IRRef1)lj_opt_fold(J); + fins->op2 = (IRRef1)lj_ir_kint(J, k); + fins->ot = IRTI(IR_BAND); + return RETRYFOLD; + } + return NEXTFOLD; +} + +LJFOLD(BAND BSHL KINT) +LJFOLD(BAND BSHR KINT) +LJFOLDF(simplify_andk_shiftk) +{ + IRIns *irk = IR(fleft->op2); + if (irk->o == IR_KINT && + kfold_intop(-1, irk->i, (IROp)fleft->o) == fright->i) + return LEFTFOLD; /* (i o k1) & k2 ==> i, if (-1 o k1) == k2 */ + return NEXTFOLD; +} + +/* -- Reassociation ------------------------------------------------------- */ + +LJFOLD(ADD ADD KINT) +LJFOLD(MUL MUL KINT) +LJFOLD(BAND BAND KINT) +LJFOLD(BOR BOR KINT) +LJFOLD(BXOR BXOR KINT) +LJFOLDF(reassoc_intarith_k) +{ + IRIns *irk = IR(fleft->op2); + if (irk->o == IR_KINT) { + int32_t k = kfold_intop(irk->i, fright->i, (IROp)fins->o); + if (k == irk->i) /* (i o k1) o k2 ==> i o k1, if (k1 o k2) == k1. */ + return LEFTFOLD; + PHIBARRIER(fleft); + fins->op1 = fleft->op1; + fins->op2 = (IRRef1)lj_ir_kint(J, k); + return RETRYFOLD; /* (i o k1) o k2 ==> i o (k1 o k2) */ + } + return NEXTFOLD; +} + +LJFOLD(ADD ADD KINT64) +LJFOLD(MUL MUL KINT64) +LJFOLD(BAND BAND KINT64) +LJFOLD(BOR BOR KINT64) +LJFOLD(BXOR BXOR KINT64) +LJFOLDF(reassoc_intarith_k64) +{ +#if LJ_HASFFI + IRIns *irk = IR(fleft->op2); + if (irk->o == IR_KINT64) { + uint64_t k = kfold_int64arith(ir_k64(irk)->u64, + ir_k64(fright)->u64, (IROp)fins->o); + PHIBARRIER(fleft); + fins->op1 = fleft->op1; + fins->op2 = (IRRef1)lj_ir_kint64(J, k); + return RETRYFOLD; /* (i o k1) o k2 ==> i o (k1 o k2) */ + } + return NEXTFOLD; +#else + UNUSED(J); lua_assert(0); return FAILFOLD; +#endif +} + +LJFOLD(MIN MIN any) +LJFOLD(MAX MAX any) +LJFOLD(BAND BAND any) +LJFOLD(BOR BOR any) +LJFOLDF(reassoc_dup) +{ + if (fins->op2 == fleft->op1 || fins->op2 == fleft->op2) + return LEFTFOLD; /* (a o b) o a ==> a o b; (a o b) o b ==> a o b */ + return NEXTFOLD; +} + +LJFOLD(BXOR BXOR any) +LJFOLDF(reassoc_bxor) +{ + PHIBARRIER(fleft); + if (fins->op2 == fleft->op1) /* (a xor b) xor a ==> b */ + return fleft->op2; + if (fins->op2 == fleft->op2) /* (a xor b) xor b ==> a */ + return fleft->op1; + return NEXTFOLD; +} + +LJFOLD(BSHL BSHL KINT) +LJFOLD(BSHR BSHR KINT) +LJFOLD(BSAR BSAR KINT) +LJFOLD(BROL BROL KINT) +LJFOLD(BROR BROR KINT) +LJFOLDF(reassoc_shift) +{ + IRIns *irk = IR(fleft->op2); + PHIBARRIER(fleft); /* The (shift any KINT) rule covers k2 == 0 and more. */ + if (irk->o == IR_KINT) { /* (i o k1) o k2 ==> i o (k1 + k2) */ + int32_t mask = irt_is64(fins->t) ? 63 : 31; + int32_t k = (irk->i & mask) + (fright->i & mask); + if (k > mask) { /* Combined shift too wide? */ + if (fins->o == IR_BSHL || fins->o == IR_BSHR) + return mask == 31 ? INTFOLD(0) : INT64FOLD(0); + else if (fins->o == IR_BSAR) + k = mask; + else + k &= mask; + } + fins->op1 = fleft->op1; + fins->op2 = (IRRef1)lj_ir_kint(J, k); + return RETRYFOLD; + } + return NEXTFOLD; +} + +LJFOLD(MIN MIN KNUM) +LJFOLD(MAX MAX KNUM) +LJFOLD(MIN MIN KINT) +LJFOLD(MAX MAX KINT) +LJFOLDF(reassoc_minmax_k) +{ + IRIns *irk = IR(fleft->op2); + if (irk->o == IR_KNUM) { + lua_Number a = ir_knum(irk)->n; + lua_Number y = lj_vm_foldarith(a, knumright, fins->o - IR_ADD); + if (a == y) /* (x o k1) o k2 ==> x o k1, if (k1 o k2) == k1. */ + return LEFTFOLD; + PHIBARRIER(fleft); + fins->op1 = fleft->op1; + fins->op2 = (IRRef1)lj_ir_knum(J, y); + return RETRYFOLD; /* (x o k1) o k2 ==> x o (k1 o k2) */ + } else if (irk->o == IR_KINT) { + int32_t a = irk->i; + int32_t y = kfold_intop(a, fright->i, fins->o); + if (a == y) /* (x o k1) o k2 ==> x o k1, if (k1 o k2) == k1. */ + return LEFTFOLD; + PHIBARRIER(fleft); + fins->op1 = fleft->op1; + fins->op2 = (IRRef1)lj_ir_kint(J, y); + return RETRYFOLD; /* (x o k1) o k2 ==> x o (k1 o k2) */ + } + return NEXTFOLD; +} + +LJFOLD(MIN MAX any) +LJFOLD(MAX MIN any) +LJFOLDF(reassoc_minmax_left) +{ + if (fins->op2 == fleft->op1 || fins->op2 == fleft->op2) + return RIGHTFOLD; /* (b o1 a) o2 b ==> b; (a o1 b) o2 b ==> b */ + return NEXTFOLD; +} + +LJFOLD(MIN any MAX) +LJFOLD(MAX any MIN) +LJFOLDF(reassoc_minmax_right) +{ + if (fins->op1 == fright->op1 || fins->op1 == fright->op2) + return LEFTFOLD; /* a o2 (a o1 b) ==> a; a o2 (b o1 a) ==> a */ + return NEXTFOLD; +} + +/* -- Array bounds check elimination -------------------------------------- */ + +/* Eliminate ABC across PHIs to handle t[i-1] forwarding case. +** ABC(asize, (i+k)+(-k)) ==> ABC(asize, i), but only if it already exists. +** Could be generalized to (i+k1)+k2 ==> i+(k1+k2), but needs better disambig. +*/ +LJFOLD(ABC any ADD) +LJFOLDF(abc_fwd) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_ABC)) { + if (irref_isk(fright->op2)) { + IRIns *add2 = IR(fright->op1); + if (add2->o == IR_ADD && irref_isk(add2->op2) && + IR(fright->op2)->i == -IR(add2->op2)->i) { + IRRef ref = J->chain[IR_ABC]; + IRRef lim = add2->op1; + if (fins->op1 > lim) lim = fins->op1; + while (ref > lim) { + IRIns *ir = IR(ref); + if (ir->op1 == fins->op1 && ir->op2 == add2->op1) + return DROPFOLD; + ref = ir->prev; + } + } + } + } + return NEXTFOLD; +} + +/* Eliminate ABC for constants. +** ABC(asize, k1), ABC(asize k2) ==> ABC(asize, max(k1, k2)) +** Drop second ABC if k2 is lower. Otherwise patch first ABC with k2. +*/ +LJFOLD(ABC any KINT) +LJFOLDF(abc_k) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_ABC)) { + IRRef ref = J->chain[IR_ABC]; + IRRef asize = fins->op1; + while (ref > asize) { + IRIns *ir = IR(ref); + if (ir->op1 == asize && irref_isk(ir->op2)) { + int32_t k = IR(ir->op2)->i; + if (fright->i > k) + ir->op2 = fins->op2; + return DROPFOLD; + } + ref = ir->prev; + } + return EMITFOLD; /* Already performed CSE. */ + } + return NEXTFOLD; +} + +/* Eliminate invariant ABC inside loop. */ +LJFOLD(ABC any any) +LJFOLDF(abc_invar) +{ + /* Invariant ABC marked as PTR. Drop if op1 is invariant, too. */ + if (!irt_isint(fins->t) && fins->op1 < J->chain[IR_LOOP] && + !irt_isphi(IR(fins->op1)->t)) + return DROPFOLD; + return NEXTFOLD; +} + +/* -- Commutativity ------------------------------------------------------- */ + +/* The refs of commutative ops are canonicalized. Lower refs go to the right. +** Rationale behind this: +** - It (also) moves constants to the right. +** - It reduces the number of FOLD rules (e.g. (BOR any KINT) suffices). +** - It helps CSE to find more matches. +** - The assembler generates better code with constants at the right. +*/ + +LJFOLD(ADD any any) +LJFOLD(MUL any any) +LJFOLD(ADDOV any any) +LJFOLD(MULOV any any) +LJFOLDF(comm_swap) +{ + if (fins->op1 < fins->op2) { /* Move lower ref to the right. */ + IRRef1 tmp = fins->op1; + fins->op1 = fins->op2; + fins->op2 = tmp; + return RETRYFOLD; + } + return NEXTFOLD; +} + +LJFOLD(EQ any any) +LJFOLD(NE any any) +LJFOLDF(comm_equal) +{ + /* For non-numbers only: x == x ==> drop; x ~= x ==> fail */ + if (fins->op1 == fins->op2 && !irt_isnum(fins->t)) + return CONDFOLD(fins->o == IR_EQ); + return fold_comm_swap(J); +} + +LJFOLD(LT any any) +LJFOLD(GE any any) +LJFOLD(LE any any) +LJFOLD(GT any any) +LJFOLD(ULT any any) +LJFOLD(UGE any any) +LJFOLD(ULE any any) +LJFOLD(UGT any any) +LJFOLDF(comm_comp) +{ + /* For non-numbers only: x <=> x ==> drop; x <> x ==> fail */ + if (fins->op1 == fins->op2 && !irt_isnum(fins->t)) + return CONDFOLD((fins->o ^ (fins->o >> 1)) & 1); + if (fins->op1 < fins->op2) { /* Move lower ref to the right. */ + IRRef1 tmp = fins->op1; + fins->op1 = fins->op2; + fins->op2 = tmp; + fins->o ^= 3; /* GT <-> LT, GE <-> LE, does not affect U */ + return RETRYFOLD; + } + return NEXTFOLD; +} + +LJFOLD(BAND any any) +LJFOLD(BOR any any) +LJFOLD(MIN any any) +LJFOLD(MAX any any) +LJFOLDF(comm_dup) +{ + if (fins->op1 == fins->op2) /* x o x ==> x */ + return LEFTFOLD; + return fold_comm_swap(J); +} + +LJFOLD(BXOR any any) +LJFOLDF(comm_bxor) +{ + if (fins->op1 == fins->op2) /* i xor i ==> 0 */ + return irt_is64(fins->t) ? INT64FOLD(0) : INTFOLD(0); + return fold_comm_swap(J); +} + +/* -- Simplification of compound expressions ------------------------------ */ + +static TRef kfold_xload(jit_State *J, IRIns *ir, const void *p) +{ + int32_t k; + switch (irt_type(ir->t)) { + case IRT_NUM: return lj_ir_knum_u64(J, *(uint64_t *)p); + case IRT_I8: k = (int32_t)*(int8_t *)p; break; + case IRT_U8: k = (int32_t)*(uint8_t *)p; break; + case IRT_I16: k = (int32_t)(int16_t)lj_getu16(p); break; + case IRT_U16: k = (int32_t)(uint16_t)lj_getu16(p); break; + case IRT_INT: case IRT_U32: k = (int32_t)lj_getu32(p); break; + case IRT_I64: case IRT_U64: return lj_ir_kint64(J, *(uint64_t *)p); + default: return 0; + } + return lj_ir_kint(J, k); +} + +/* Turn: string.sub(str, a, b) == kstr +** into: string.byte(str, a) == string.byte(kstr, 1) etc. +** Note: this creates unaligned XLOADs on x86/x64. +*/ +LJFOLD(EQ SNEW KGC) +LJFOLD(NE SNEW KGC) +LJFOLDF(merge_eqne_snew_kgc) +{ + GCstr *kstr = ir_kstr(fright); + int32_t len = (int32_t)kstr->len; + lua_assert(irt_isstr(fins->t)); + +#if LJ_TARGET_UNALIGNED +#define FOLD_SNEW_MAX_LEN 4 /* Handle string lengths 0, 1, 2, 3, 4. */ +#define FOLD_SNEW_TYPE8 IRT_I8 /* Creates shorter immediates. */ +#else +#define FOLD_SNEW_MAX_LEN 1 /* Handle string lengths 0 or 1. */ +#define FOLD_SNEW_TYPE8 IRT_U8 /* Prefer unsigned loads. */ +#endif + + PHIBARRIER(fleft); + if (len <= FOLD_SNEW_MAX_LEN) { + IROp op = (IROp)fins->o; + IRRef strref = fleft->op1; + if (IR(strref)->o != IR_STRREF) + return NEXTFOLD; + if (op == IR_EQ) { + emitir(IRTGI(IR_EQ), fleft->op2, lj_ir_kint(J, len)); + /* Caveat: fins/fleft/fright is no longer valid after emitir. */ + } else { + /* NE is not expanded since this would need an OR of two conds. */ + if (!irref_isk(fleft->op2)) /* Only handle the constant length case. */ + return NEXTFOLD; + if (IR(fleft->op2)->i != len) + return DROPFOLD; + } + if (len > 0) { + /* A 4 byte load for length 3 is ok -- all strings have an extra NUL. */ + uint16_t ot = (uint16_t)(len == 1 ? IRT(IR_XLOAD, FOLD_SNEW_TYPE8) : + len == 2 ? IRT(IR_XLOAD, IRT_U16) : + IRTI(IR_XLOAD)); + TRef tmp = emitir(ot, strref, + IRXLOAD_READONLY | (len > 1 ? IRXLOAD_UNALIGNED : 0)); + TRef val = kfold_xload(J, IR(tref_ref(tmp)), strdata(kstr)); + if (len == 3) + tmp = emitir(IRTI(IR_BAND), tmp, + lj_ir_kint(J, LJ_ENDIAN_SELECT(0x00ffffff, 0xffffff00))); + fins->op1 = (IRRef1)tmp; + fins->op2 = (IRRef1)val; + fins->ot = (IROpT)IRTGI(op); + return RETRYFOLD; + } else { + return DROPFOLD; + } + } + return NEXTFOLD; +} + +/* -- Loads --------------------------------------------------------------- */ + +/* Loads cannot be folded or passed on to CSE in general. +** Alias analysis is needed to check for forwarding opportunities. +** +** Caveat: *all* loads must be listed here or they end up at CSE! +*/ + +LJFOLD(ALOAD any) +LJFOLDX(lj_opt_fwd_aload) + +/* From HREF fwd (see below). Must eliminate, not supported by fwd/backend. */ +LJFOLD(HLOAD KKPTR) +LJFOLDF(kfold_hload_kkptr) +{ + UNUSED(J); + lua_assert(ir_kptr(fleft) == niltvg(J2G(J))); + return TREF_NIL; +} + +LJFOLD(HLOAD any) +LJFOLDX(lj_opt_fwd_hload) + +LJFOLD(ULOAD any) +LJFOLDX(lj_opt_fwd_uload) + +LJFOLD(CALLL any IRCALL_lj_tab_len) +LJFOLDX(lj_opt_fwd_tab_len) + +/* Upvalue refs are really loads, but there are no corresponding stores. +** So CSE is ok for them, except for UREFO across a GC step (see below). +** If the referenced function is const, its upvalue addresses are const, too. +** This can be used to improve CSE by looking for the same address, +** even if the upvalues originate from a different function. +*/ +LJFOLD(UREFO KGC any) +LJFOLD(UREFC KGC any) +LJFOLDF(cse_uref) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) { + IRRef ref = J->chain[fins->o]; + GCfunc *fn = ir_kfunc(fleft); + GCupval *uv = gco2uv(gcref(fn->l.uvptr[(fins->op2 >> 8)])); + while (ref > 0) { + IRIns *ir = IR(ref); + if (irref_isk(ir->op1)) { + GCfunc *fn2 = ir_kfunc(IR(ir->op1)); + if (gco2uv(gcref(fn2->l.uvptr[(ir->op2 >> 8)])) == uv) { + if (fins->o == IR_UREFO && gcstep_barrier(J, ref)) + break; + return ref; + } + } + ref = ir->prev; + } + } + return EMITFOLD; +} + +LJFOLD(HREFK any any) +LJFOLDX(lj_opt_fwd_hrefk) + +LJFOLD(HREF TNEW any) +LJFOLDF(fwd_href_tnew) +{ + if (lj_opt_fwd_href_nokey(J)) + return lj_ir_kkptr(J, niltvg(J2G(J))); + return NEXTFOLD; +} + +LJFOLD(HREF TDUP KPRI) +LJFOLD(HREF TDUP KGC) +LJFOLD(HREF TDUP KNUM) +LJFOLDF(fwd_href_tdup) +{ + TValue keyv; + lj_ir_kvalue(J->L, &keyv, fright); + if (lj_tab_get(J->L, ir_ktab(IR(fleft->op1)), &keyv) == niltvg(J2G(J)) && + lj_opt_fwd_href_nokey(J)) + return lj_ir_kkptr(J, niltvg(J2G(J))); + return NEXTFOLD; +} + +/* We can safely FOLD/CSE array/hash refs and field loads, since there +** are no corresponding stores. But we need to check for any NEWREF with +** an aliased table, as it may invalidate all of the pointers and fields. +** Only HREF needs the NEWREF check -- AREF and HREFK already depend on +** FLOADs. And NEWREF itself is treated like a store (see below). +** LREF is constant (per trace) since coroutine switches are not inlined. +*/ +LJFOLD(FLOAD TNEW IRFL_TAB_ASIZE) +LJFOLDF(fload_tab_tnew_asize) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1)) + return INTFOLD(fleft->op1); + return NEXTFOLD; +} + +LJFOLD(FLOAD TNEW IRFL_TAB_HMASK) +LJFOLDF(fload_tab_tnew_hmask) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1)) + return INTFOLD((1 << fleft->op2)-1); + return NEXTFOLD; +} + +LJFOLD(FLOAD TDUP IRFL_TAB_ASIZE) +LJFOLDF(fload_tab_tdup_asize) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1)) + return INTFOLD((int32_t)ir_ktab(IR(fleft->op1))->asize); + return NEXTFOLD; +} + +LJFOLD(FLOAD TDUP IRFL_TAB_HMASK) +LJFOLDF(fload_tab_tdup_hmask) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1)) + return INTFOLD((int32_t)ir_ktab(IR(fleft->op1))->hmask); + return NEXTFOLD; +} + +LJFOLD(HREF any any) +LJFOLD(FLOAD any IRFL_TAB_ARRAY) +LJFOLD(FLOAD any IRFL_TAB_NODE) +LJFOLD(FLOAD any IRFL_TAB_ASIZE) +LJFOLD(FLOAD any IRFL_TAB_HMASK) +LJFOLDF(fload_tab_ah) +{ + TRef tr = lj_opt_cse(J); + return lj_opt_fwd_tptr(J, tref_ref(tr)) ? tr : EMITFOLD; +} + +/* Strings are immutable, so we can safely FOLD/CSE the related FLOAD. */ +LJFOLD(FLOAD KGC IRFL_STR_LEN) +LJFOLDF(fload_str_len_kgc) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) + return INTFOLD((int32_t)ir_kstr(fleft)->len); + return NEXTFOLD; +} + +LJFOLD(FLOAD SNEW IRFL_STR_LEN) +LJFOLDF(fload_str_len_snew) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) { + PHIBARRIER(fleft); + return fleft->op2; + } + return NEXTFOLD; +} + +LJFOLD(FLOAD TOSTR IRFL_STR_LEN) +LJFOLDF(fload_str_len_tostr) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && fleft->op2 == IRTOSTR_CHAR) + return INTFOLD(1); + return NEXTFOLD; +} + +/* The C type ID of cdata objects is immutable. */ +LJFOLD(FLOAD KGC IRFL_CDATA_CTYPEID) +LJFOLDF(fload_cdata_typeid_kgc) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) + return INTFOLD((int32_t)ir_kcdata(fleft)->ctypeid); + return NEXTFOLD; +} + +/* Get the contents of immutable cdata objects. */ +LJFOLD(FLOAD KGC IRFL_CDATA_PTR) +LJFOLD(FLOAD KGC IRFL_CDATA_INT) +LJFOLD(FLOAD KGC IRFL_CDATA_INT64) +LJFOLDF(fload_cdata_int64_kgc) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) { + void *p = cdataptr(ir_kcdata(fleft)); + if (irt_is64(fins->t)) + return INT64FOLD(*(uint64_t *)p); + else + return INTFOLD(*(int32_t *)p); + } + return NEXTFOLD; +} + +LJFOLD(FLOAD CNEW IRFL_CDATA_CTYPEID) +LJFOLD(FLOAD CNEWI IRFL_CDATA_CTYPEID) +LJFOLDF(fload_cdata_typeid_cnew) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) + return fleft->op1; /* No PHI barrier needed. CNEW/CNEWI op1 is const. */ + return NEXTFOLD; +} + +/* Pointer, int and int64 cdata objects are immutable. */ +LJFOLD(FLOAD CNEWI IRFL_CDATA_PTR) +LJFOLD(FLOAD CNEWI IRFL_CDATA_INT) +LJFOLD(FLOAD CNEWI IRFL_CDATA_INT64) +LJFOLDF(fload_cdata_ptr_int64_cnew) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) + return fleft->op2; /* Fold even across PHI to avoid allocations. */ + return NEXTFOLD; +} + +LJFOLD(FLOAD any IRFL_STR_LEN) +LJFOLD(FLOAD any IRFL_FUNC_ENV) +LJFOLD(FLOAD any IRFL_THREAD_ENV) +LJFOLD(FLOAD any IRFL_CDATA_CTYPEID) +LJFOLD(FLOAD any IRFL_CDATA_PTR) +LJFOLD(FLOAD any IRFL_CDATA_INT) +LJFOLD(FLOAD any IRFL_CDATA_INT64) +LJFOLD(VLOAD any any) /* Vararg loads have no corresponding stores. */ +LJFOLDX(lj_opt_cse) + +/* All other field loads need alias analysis. */ +LJFOLD(FLOAD any any) +LJFOLDX(lj_opt_fwd_fload) + +/* This is for LOOP only. Recording handles SLOADs internally. */ +LJFOLD(SLOAD any any) +LJFOLDF(fwd_sload) +{ + if ((fins->op2 & IRSLOAD_FRAME)) { + TRef tr = lj_opt_cse(J); + return tref_ref(tr) < J->chain[IR_RETF] ? EMITFOLD : tr; + } else { + lua_assert(J->slot[fins->op1] != 0); + return J->slot[fins->op1]; + } +} + +/* Only fold for KKPTR. The pointer _and_ the contents must be const. */ +LJFOLD(XLOAD KKPTR any) +LJFOLDF(xload_kptr) +{ + TRef tr = kfold_xload(J, fins, ir_kptr(fleft)); + return tr ? tr : NEXTFOLD; +} + +LJFOLD(XLOAD any any) +LJFOLDX(lj_opt_fwd_xload) + +/* -- Write barriers ------------------------------------------------------ */ + +/* Write barriers are amenable to CSE, but not across any incremental +** GC steps. +** +** The same logic applies to open upvalue references, because a stack +** may be resized during a GC step (not the current stack, but maybe that +** of a coroutine). +*/ +LJFOLD(TBAR any) +LJFOLD(OBAR any any) +LJFOLD(UREFO any any) +LJFOLDF(barrier_tab) +{ + TRef tr = lj_opt_cse(J); + if (gcstep_barrier(J, tref_ref(tr))) /* CSE across GC step? */ + return EMITFOLD; /* Raw emit. Assumes fins is left intact by CSE. */ + return tr; +} + +LJFOLD(TBAR TNEW) +LJFOLD(TBAR TDUP) +LJFOLDF(barrier_tnew_tdup) +{ + /* New tables are always white and never need a barrier. */ + if (fins->op1 < J->chain[IR_LOOP]) /* Except across a GC step. */ + return NEXTFOLD; + return DROPFOLD; +} + +/* -- Profiling ----------------------------------------------------------- */ + +LJFOLD(PROF any any) +LJFOLDF(prof) +{ + IRRef ref = J->chain[IR_PROF]; + if (ref+1 == J->cur.nins) /* Drop neighbouring IR_PROF. */ + return ref; + return EMITFOLD; +} + +/* -- Stores and allocations ---------------------------------------------- */ + +/* Stores and allocations cannot be folded or passed on to CSE in general. +** But some stores can be eliminated with dead-store elimination (DSE). +** +** Caveat: *all* stores and allocs must be listed here or they end up at CSE! +*/ + +LJFOLD(ASTORE any any) +LJFOLD(HSTORE any any) +LJFOLDX(lj_opt_dse_ahstore) + +LJFOLD(USTORE any any) +LJFOLDX(lj_opt_dse_ustore) + +LJFOLD(FSTORE any any) +LJFOLDX(lj_opt_dse_fstore) + +LJFOLD(XSTORE any any) +LJFOLDX(lj_opt_dse_xstore) + +LJFOLD(NEWREF any any) /* Treated like a store. */ +LJFOLD(CALLA any any) +LJFOLD(CALLL any any) /* Safeguard fallback. */ +LJFOLD(CALLS any any) +LJFOLD(CALLXS any any) +LJFOLD(XBAR) +LJFOLD(RETF any any) /* Modifies BASE. */ +LJFOLD(TNEW any any) +LJFOLD(TDUP any) +LJFOLD(CNEW any any) +LJFOLD(XSNEW any any) +LJFOLD(BUFHDR any any) +LJFOLDX(lj_ir_emit) + +/* ------------------------------------------------------------------------ */ + +/* Every entry in the generated hash table is a 32 bit pattern: +** +** xxxxxxxx iiiiiii lllllll rrrrrrrrrr +** +** xxxxxxxx = 8 bit index into fold function table +** iiiiiii = 7 bit folded instruction opcode +** lllllll = 7 bit left instruction opcode +** rrrrrrrrrr = 8 bit right instruction opcode or 10 bits from literal field +*/ + +#include "lj_folddef.h" + +/* ------------------------------------------------------------------------ */ + +/* Fold IR instruction. */ +TRef LJ_FASTCALL lj_opt_fold(jit_State *J) +{ + uint32_t key, any; + IRRef ref; + + if (LJ_UNLIKELY((J->flags & JIT_F_OPT_MASK) != JIT_F_OPT_DEFAULT)) { + lua_assert(((JIT_F_OPT_FOLD|JIT_F_OPT_FWD|JIT_F_OPT_CSE|JIT_F_OPT_DSE) | + JIT_F_OPT_DEFAULT) == JIT_F_OPT_DEFAULT); + /* Folding disabled? Chain to CSE, but not for loads/stores/allocs. */ + if (!(J->flags & JIT_F_OPT_FOLD) && irm_kind(lj_ir_mode[fins->o]) == IRM_N) + return lj_opt_cse(J); + + /* No FOLD, forwarding or CSE? Emit raw IR for loads, except for SLOAD. */ + if ((J->flags & (JIT_F_OPT_FOLD|JIT_F_OPT_FWD|JIT_F_OPT_CSE)) != + (JIT_F_OPT_FOLD|JIT_F_OPT_FWD|JIT_F_OPT_CSE) && + irm_kind(lj_ir_mode[fins->o]) == IRM_L && fins->o != IR_SLOAD) + return lj_ir_emit(J); + + /* No FOLD or DSE? Emit raw IR for stores. */ + if ((J->flags & (JIT_F_OPT_FOLD|JIT_F_OPT_DSE)) != + (JIT_F_OPT_FOLD|JIT_F_OPT_DSE) && + irm_kind(lj_ir_mode[fins->o]) == IRM_S) + return lj_ir_emit(J); + } + + /* Fold engine start/retry point. */ +retry: + /* Construct key from opcode and operand opcodes (unless literal/none). */ + key = ((uint32_t)fins->o << 17); + if (fins->op1 >= J->cur.nk) { + key += (uint32_t)IR(fins->op1)->o << 10; + *fleft = *IR(fins->op1); + } + if (fins->op2 >= J->cur.nk) { + key += (uint32_t)IR(fins->op2)->o; + *fright = *IR(fins->op2); + } else { + key += (fins->op2 & 0x3ffu); /* Literal mask. Must include IRCONV_*MASK. */ + } + + /* Check for a match in order from most specific to least specific. */ + any = 0; + for (;;) { + uint32_t k = key | (any & 0x1ffff); + uint32_t h = fold_hashkey(k); + uint32_t fh = fold_hash[h]; /* Lookup key in semi-perfect hash table. */ + if ((fh & 0xffffff) == k || (fh = fold_hash[h+1], (fh & 0xffffff) == k)) { + ref = (IRRef)tref_ref(fold_func[fh >> 24](J)); + if (ref != NEXTFOLD) + break; + } + if (any == 0xfffff) /* Exhausted folding. Pass on to CSE. */ + return lj_opt_cse(J); + any = (any | (any >> 10)) ^ 0xffc00; + } + + /* Return value processing, ordered by frequency. */ + if (LJ_LIKELY(ref >= MAX_FOLD)) + return TREF(ref, irt_t(IR(ref)->t)); + if (ref == RETRYFOLD) + goto retry; + if (ref == KINTFOLD) + return lj_ir_kint(J, fins->i); + if (ref == FAILFOLD) + lj_trace_err(J, LJ_TRERR_GFAIL); + lua_assert(ref == DROPFOLD); + return REF_DROP; +} + +/* -- Common-Subexpression Elimination ------------------------------------ */ + +/* CSE an IR instruction. This is very fast due to the skip-list chains. */ +TRef LJ_FASTCALL lj_opt_cse(jit_State *J) +{ + /* Avoid narrow to wide store-to-load forwarding stall */ + IRRef2 op12 = (IRRef2)fins->op1 + ((IRRef2)fins->op2 << 16); + IROp op = fins->o; + if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) { + /* Limited search for same operands in per-opcode chain. */ + IRRef ref = J->chain[op]; + IRRef lim = fins->op1; + if (fins->op2 > lim) lim = fins->op2; /* Relies on lit < REF_BIAS. */ + while (ref > lim) { + if (IR(ref)->op12 == op12) + return TREF(ref, irt_t(IR(ref)->t)); /* Common subexpression found. */ + ref = IR(ref)->prev; + } + } + /* Otherwise emit IR (inlined for speed). */ + { + IRRef ref = lj_ir_nextins(J); + IRIns *ir = IR(ref); + ir->prev = J->chain[op]; + ir->op12 = op12; + J->chain[op] = (IRRef1)ref; + ir->o = fins->o; + J->guardemit.irt |= fins->t.irt; + return TREF(ref, irt_t((ir->t = fins->t))); + } +} + +/* CSE with explicit search limit. */ +TRef LJ_FASTCALL lj_opt_cselim(jit_State *J, IRRef lim) +{ + IRRef ref = J->chain[fins->o]; + IRRef2 op12 = (IRRef2)fins->op1 + ((IRRef2)fins->op2 << 16); + while (ref > lim) { + if (IR(ref)->op12 == op12) + return ref; + ref = IR(ref)->prev; + } + return lj_ir_emit(J); +} + +/* ------------------------------------------------------------------------ */ + +#undef IR +#undef fins +#undef fleft +#undef fright +#undef knumleft +#undef knumright +#undef emitir + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_opt_loop.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_opt_loop.c new file mode 100644 index 00000000000..4b4ab7dc3a3 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_opt_loop.c @@ -0,0 +1,449 @@ +/* +** LOOP: Loop Optimizations. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_opt_loop_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_err.h" +#include "lj_buf.h" +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_iropt.h" +#include "lj_trace.h" +#include "lj_snap.h" +#include "lj_vm.h" + +/* Loop optimization: +** +** Traditional Loop-Invariant Code Motion (LICM) splits the instructions +** of a loop into invariant and variant instructions. The invariant +** instructions are hoisted out of the loop and only the variant +** instructions remain inside the loop body. +** +** Unfortunately LICM is mostly useless for compiling dynamic languages. +** The IR has many guards and most of the subsequent instructions are +** control-dependent on them. The first non-hoistable guard would +** effectively prevent hoisting of all subsequent instructions. +** +** That's why we use a special form of unrolling using copy-substitution, +** combined with redundancy elimination: +** +** The recorded instruction stream is re-emitted to the compiler pipeline +** with substituted operands. The substitution table is filled with the +** refs returned by re-emitting each instruction. This can be done +** on-the-fly, because the IR is in strict SSA form, where every ref is +** defined before its use. +** +** This aproach generates two code sections, separated by the LOOP +** instruction: +** +** 1. The recorded instructions form a kind of pre-roll for the loop. It +** contains a mix of invariant and variant instructions and performs +** exactly one loop iteration (but not necessarily the 1st iteration). +** +** 2. The loop body contains only the variant instructions and performs +** all remaining loop iterations. +** +** On first sight that looks like a waste of space, because the variant +** instructions are present twice. But the key insight is that the +** pre-roll honors the control-dependencies for *both* the pre-roll itself +** *and* the loop body! +** +** It also means one doesn't have to explicitly model control-dependencies +** (which, BTW, wouldn't help LICM much). And it's much easier to +** integrate sparse snapshotting with this approach. +** +** One of the nicest aspects of this approach is that all of the +** optimizations of the compiler pipeline (FOLD, CSE, FWD, etc.) can be +** reused with only minor restrictions (e.g. one should not fold +** instructions across loop-carried dependencies). +** +** But in general all optimizations can be applied which only need to look +** backwards into the generated instruction stream. At any point in time +** during the copy-substitution process this contains both a static loop +** iteration (the pre-roll) and a dynamic one (from the to-be-copied +** instruction up to the end of the partial loop body). +** +** Since control-dependencies are implicitly kept, CSE also applies to all +** kinds of guards. The major advantage is that all invariant guards can +** be hoisted, too. +** +** Load/store forwarding works across loop iterations, too. This is +** important if loop-carried dependencies are kept in upvalues or tables. +** E.g. 'self.idx = self.idx + 1' deep down in some OO-style method may +** become a forwarded loop-recurrence after inlining. +** +** Since the IR is in SSA form, loop-carried dependencies have to be +** modeled with PHI instructions. The potential candidates for PHIs are +** collected on-the-fly during copy-substitution. After eliminating the +** redundant ones, PHI instructions are emitted *below* the loop body. +** +** Note that this departure from traditional SSA form doesn't change the +** semantics of the PHI instructions themselves. But it greatly simplifies +** on-the-fly generation of the IR and the machine code. +*/ + +/* Some local macros to save typing. Undef'd at the end. */ +#define IR(ref) (&J->cur.ir[(ref)]) + +/* Pass IR on to next optimization in chain (FOLD). */ +#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) + +/* Emit raw IR without passing through optimizations. */ +#define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J)) + +/* -- PHI elimination ----------------------------------------------------- */ + +/* Emit or eliminate collected PHIs. */ +static void loop_emit_phi(jit_State *J, IRRef1 *subst, IRRef1 *phi, IRRef nphi, + SnapNo onsnap) +{ + int passx = 0; + IRRef i, j, nslots; + IRRef invar = J->chain[IR_LOOP]; + /* Pass #1: mark redundant and potentially redundant PHIs. */ + for (i = 0, j = 0; i < nphi; i++) { + IRRef lref = phi[i]; + IRRef rref = subst[lref]; + if (lref == rref || rref == REF_DROP) { /* Invariants are redundant. */ + irt_clearphi(IR(lref)->t); + } else { + phi[j++] = (IRRef1)lref; + if (!(IR(rref)->op1 == lref || IR(rref)->op2 == lref)) { + /* Quick check for simple recurrences failed, need pass2. */ + irt_setmark(IR(lref)->t); + passx = 1; + } + } + } + nphi = j; + /* Pass #2: traverse variant part and clear marks of non-redundant PHIs. */ + if (passx) { + SnapNo s; + for (i = J->cur.nins-1; i > invar; i--) { + IRIns *ir = IR(i); + if (!irref_isk(ir->op2)) irt_clearmark(IR(ir->op2)->t); + if (!irref_isk(ir->op1)) { + irt_clearmark(IR(ir->op1)->t); + if (ir->op1 < invar && + ir->o >= IR_CALLN && ir->o <= IR_CARG) { /* ORDER IR */ + ir = IR(ir->op1); + while (ir->o == IR_CARG) { + if (!irref_isk(ir->op2)) irt_clearmark(IR(ir->op2)->t); + if (irref_isk(ir->op1)) break; + ir = IR(ir->op1); + irt_clearmark(ir->t); + } + } + } + } + for (s = J->cur.nsnap-1; s >= onsnap; s--) { + SnapShot *snap = &J->cur.snap[s]; + SnapEntry *map = &J->cur.snapmap[snap->mapofs]; + MSize n, nent = snap->nent; + for (n = 0; n < nent; n++) { + IRRef ref = snap_ref(map[n]); + if (!irref_isk(ref)) irt_clearmark(IR(ref)->t); + } + } + } + /* Pass #3: add PHIs for variant slots without a corresponding SLOAD. */ + nslots = J->baseslot+J->maxslot; + for (i = 1; i < nslots; i++) { + IRRef ref = tref_ref(J->slot[i]); + while (!irref_isk(ref) && ref != subst[ref]) { + IRIns *ir = IR(ref); + irt_clearmark(ir->t); /* Unmark potential uses, too. */ + if (irt_isphi(ir->t) || irt_ispri(ir->t)) + break; + irt_setphi(ir->t); + if (nphi >= LJ_MAX_PHI) + lj_trace_err(J, LJ_TRERR_PHIOV); + phi[nphi++] = (IRRef1)ref; + ref = subst[ref]; + if (ref > invar) + break; + } + } + /* Pass #4: propagate non-redundant PHIs. */ + while (passx) { + passx = 0; + for (i = 0; i < nphi; i++) { + IRRef lref = phi[i]; + IRIns *ir = IR(lref); + if (!irt_ismarked(ir->t)) { /* Propagate only from unmarked PHIs. */ + IRIns *irr = IR(subst[lref]); + if (irt_ismarked(irr->t)) { /* Right ref points to other PHI? */ + irt_clearmark(irr->t); /* Mark that PHI as non-redundant. */ + passx = 1; /* Retry. */ + } + } + } + } + /* Pass #5: emit PHI instructions or eliminate PHIs. */ + for (i = 0; i < nphi; i++) { + IRRef lref = phi[i]; + IRIns *ir = IR(lref); + if (!irt_ismarked(ir->t)) { /* Emit PHI if not marked. */ + IRRef rref = subst[lref]; + if (rref > invar) + irt_setphi(IR(rref)->t); + emitir_raw(IRT(IR_PHI, irt_type(ir->t)), lref, rref); + } else { /* Otherwise eliminate PHI. */ + irt_clearmark(ir->t); + irt_clearphi(ir->t); + } + } +} + +/* -- Loop unrolling using copy-substitution ------------------------------ */ + +/* Copy-substitute snapshot. */ +static void loop_subst_snap(jit_State *J, SnapShot *osnap, + SnapEntry *loopmap, IRRef1 *subst) +{ + SnapEntry *nmap, *omap = &J->cur.snapmap[osnap->mapofs]; + SnapEntry *nextmap = &J->cur.snapmap[snap_nextofs(&J->cur, osnap)]; + MSize nmapofs; + MSize on, ln, nn, onent = osnap->nent; + BCReg nslots = osnap->nslots; + SnapShot *snap = &J->cur.snap[J->cur.nsnap]; + if (irt_isguard(J->guardemit)) { /* Guard inbetween? */ + nmapofs = J->cur.nsnapmap; + J->cur.nsnap++; /* Add new snapshot. */ + } else { /* Otherwise overwrite previous snapshot. */ + snap--; + nmapofs = snap->mapofs; + } + J->guardemit.irt = 0; + /* Setup new snapshot. */ + snap->mapofs = (uint16_t)nmapofs; + snap->ref = (IRRef1)J->cur.nins; + snap->nslots = nslots; + snap->topslot = osnap->topslot; + snap->count = 0; + nmap = &J->cur.snapmap[nmapofs]; + /* Substitute snapshot slots. */ + on = ln = nn = 0; + while (on < onent) { + SnapEntry osn = omap[on], lsn = loopmap[ln]; + if (snap_slot(lsn) < snap_slot(osn)) { /* Copy slot from loop map. */ + nmap[nn++] = lsn; + ln++; + } else { /* Copy substituted slot from snapshot map. */ + if (snap_slot(lsn) == snap_slot(osn)) ln++; /* Shadowed loop slot. */ + if (!irref_isk(snap_ref(osn))) + osn = snap_setref(osn, subst[snap_ref(osn)]); + nmap[nn++] = osn; + on++; + } + } + while (snap_slot(loopmap[ln]) < nslots) /* Copy remaining loop slots. */ + nmap[nn++] = loopmap[ln++]; + snap->nent = (uint8_t)nn; + omap += onent; + nmap += nn; + while (omap < nextmap) /* Copy PC + frame links. */ + *nmap++ = *omap++; + J->cur.nsnapmap = (uint16_t)(nmap - J->cur.snapmap); +} + +typedef struct LoopState { + jit_State *J; + IRRef1 *subst; + MSize sizesubst; +} LoopState; + +/* Unroll loop. */ +static void loop_unroll(LoopState *lps) +{ + jit_State *J = lps->J; + IRRef1 phi[LJ_MAX_PHI]; + uint32_t nphi = 0; + IRRef1 *subst; + SnapNo onsnap; + SnapShot *osnap, *loopsnap; + SnapEntry *loopmap, *psentinel; + IRRef ins, invar; + + /* Allocate substitution table. + ** Only non-constant refs in [REF_BIAS,invar) are valid indexes. + */ + invar = J->cur.nins; + lps->sizesubst = invar - REF_BIAS; + lps->subst = lj_mem_newvec(J->L, lps->sizesubst, IRRef1); + subst = lps->subst - REF_BIAS; + subst[REF_BASE] = REF_BASE; + + /* LOOP separates the pre-roll from the loop body. */ + emitir_raw(IRTG(IR_LOOP, IRT_NIL), 0, 0); + + /* Grow snapshot buffer and map for copy-substituted snapshots. + ** Need up to twice the number of snapshots minus #0 and loop snapshot. + ** Need up to twice the number of entries plus fallback substitutions + ** from the loop snapshot entries for each new snapshot. + ** Caveat: both calls may reallocate J->cur.snap and J->cur.snapmap! + */ + onsnap = J->cur.nsnap; + lj_snap_grow_buf(J, 2*onsnap-2); + lj_snap_grow_map(J, J->cur.nsnapmap*2+(onsnap-2)*J->cur.snap[onsnap-1].nent); + + /* The loop snapshot is used for fallback substitutions. */ + loopsnap = &J->cur.snap[onsnap-1]; + loopmap = &J->cur.snapmap[loopsnap->mapofs]; + /* The PC of snapshot #0 and the loop snapshot must match. */ + psentinel = &loopmap[loopsnap->nent]; + lua_assert(*psentinel == J->cur.snapmap[J->cur.snap[0].nent]); + *psentinel = SNAP(255, 0, 0); /* Replace PC with temporary sentinel. */ + + /* Start substitution with snapshot #1 (#0 is empty for root traces). */ + osnap = &J->cur.snap[1]; + + /* Copy and substitute all recorded instructions and snapshots. */ + for (ins = REF_FIRST; ins < invar; ins++) { + IRIns *ir; + IRRef op1, op2; + + if (ins >= osnap->ref) /* Instruction belongs to next snapshot? */ + loop_subst_snap(J, osnap++, loopmap, subst); /* Copy-substitute it. */ + + /* Substitute instruction operands. */ + ir = IR(ins); + op1 = ir->op1; + if (!irref_isk(op1)) op1 = subst[op1]; + op2 = ir->op2; + if (!irref_isk(op2)) op2 = subst[op2]; + if (irm_kind(lj_ir_mode[ir->o]) == IRM_N && + op1 == ir->op1 && op2 == ir->op2) { /* Regular invariant ins? */ + subst[ins] = (IRRef1)ins; /* Shortcut. */ + } else { + /* Re-emit substituted instruction to the FOLD/CSE/etc. pipeline. */ + IRType1 t = ir->t; /* Get this first, since emitir may invalidate ir. */ + IRRef ref = tref_ref(emitir(ir->ot & ~IRT_ISPHI, op1, op2)); + subst[ins] = (IRRef1)ref; + if (ref != ins) { + IRIns *irr = IR(ref); + if (ref < invar) { /* Loop-carried dependency? */ + /* Potential PHI? */ + if (!irref_isk(ref) && !irt_isphi(irr->t) && !irt_ispri(irr->t)) { + irt_setphi(irr->t); + if (nphi >= LJ_MAX_PHI) + lj_trace_err(J, LJ_TRERR_PHIOV); + phi[nphi++] = (IRRef1)ref; + } + /* Check all loop-carried dependencies for type instability. */ + if (!irt_sametype(t, irr->t)) { + if (irt_isinteger(t) && irt_isinteger(irr->t)) + continue; + else if (irt_isnum(t) && irt_isinteger(irr->t)) /* Fix int->num. */ + ref = tref_ref(emitir(IRTN(IR_CONV), ref, IRCONV_NUM_INT)); + else if (irt_isnum(irr->t) && irt_isinteger(t)) /* Fix num->int. */ + ref = tref_ref(emitir(IRTGI(IR_CONV), ref, + IRCONV_INT_NUM|IRCONV_CHECK)); + else + lj_trace_err(J, LJ_TRERR_TYPEINS); + subst[ins] = (IRRef1)ref; + irr = IR(ref); + goto phiconv; + } + } else if (ref != REF_DROP && irr->o == IR_CONV && + ref > invar && irr->op1 < invar) { + /* May need an extra PHI for a CONV. */ + ref = irr->op1; + irr = IR(ref); + phiconv: + if (ref < invar && !irref_isk(ref) && !irt_isphi(irr->t)) { + irt_setphi(irr->t); + if (nphi >= LJ_MAX_PHI) + lj_trace_err(J, LJ_TRERR_PHIOV); + phi[nphi++] = (IRRef1)ref; + } + } + } + } + } + if (!irt_isguard(J->guardemit)) /* Drop redundant snapshot. */ + J->cur.nsnapmap = (uint16_t)J->cur.snap[--J->cur.nsnap].mapofs; + lua_assert(J->cur.nsnapmap <= J->sizesnapmap); + *psentinel = J->cur.snapmap[J->cur.snap[0].nent]; /* Restore PC. */ + + loop_emit_phi(J, subst, phi, nphi, onsnap); +} + +/* Undo any partial changes made by the loop optimization. */ +static void loop_undo(jit_State *J, IRRef ins, SnapNo nsnap, MSize nsnapmap) +{ + ptrdiff_t i; + SnapShot *snap = &J->cur.snap[nsnap-1]; + SnapEntry *map = J->cur.snapmap; + map[snap->mapofs + snap->nent] = map[J->cur.snap[0].nent]; /* Restore PC. */ + J->cur.nsnapmap = (uint16_t)nsnapmap; + J->cur.nsnap = nsnap; + J->guardemit.irt = 0; + lj_ir_rollback(J, ins); + for (i = 0; i < BPROP_SLOTS; i++) { /* Remove backprop. cache entries. */ + BPropEntry *bp = &J->bpropcache[i]; + if (bp->val >= ins) + bp->key = 0; + } + for (ins--; ins >= REF_FIRST; ins--) { /* Remove flags. */ + IRIns *ir = IR(ins); + irt_clearphi(ir->t); + irt_clearmark(ir->t); + } +} + +/* Protected callback for loop optimization. */ +static TValue *cploop_opt(lua_State *L, lua_CFunction dummy, void *ud) +{ + UNUSED(L); UNUSED(dummy); + loop_unroll((LoopState *)ud); + return NULL; +} + +/* Loop optimization. */ +int lj_opt_loop(jit_State *J) +{ + IRRef nins = J->cur.nins; + SnapNo nsnap = J->cur.nsnap; + MSize nsnapmap = J->cur.nsnapmap; + LoopState lps; + int errcode; + lps.J = J; + lps.subst = NULL; + lps.sizesubst = 0; + errcode = lj_vm_cpcall(J->L, NULL, &lps, cploop_opt); + lj_mem_freevec(J2G(J), lps.subst, lps.sizesubst, IRRef1); + if (LJ_UNLIKELY(errcode)) { + lua_State *L = J->L; + if (errcode == LUA_ERRRUN && tvisnumber(L->top-1)) { /* Trace error? */ + int32_t e = numberVint(L->top-1); + switch ((TraceError)e) { + case LJ_TRERR_TYPEINS: /* Type instability. */ + case LJ_TRERR_GFAIL: /* Guard would always fail. */ + /* Unrolling via recording fixes many cases, e.g. a flipped boolean. */ + if (--J->instunroll < 0) /* But do not unroll forever. */ + break; + L->top--; /* Remove error object. */ + loop_undo(J, nins, nsnap, nsnapmap); + return 1; /* Loop optimization failed, continue recording. */ + default: + break; + } + } + lj_err_throw(L, errcode); /* Propagate all other errors. */ + } + return 0; /* Loop optimization is ok. */ +} + +#undef IR +#undef emitir +#undef emitir_raw + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_opt_mem.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_opt_mem.c new file mode 100644 index 00000000000..e04a6228f4b --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_opt_mem.c @@ -0,0 +1,935 @@ +/* +** Memory access optimizations. +** AA: Alias Analysis using high-level semantic disambiguation. +** FWD: Load Forwarding (L2L) + Store Forwarding (S2L). +** DSE: Dead-Store Elimination. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_opt_mem_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_tab.h" +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_iropt.h" +#include "lj_ircall.h" + +/* Some local macros to save typing. Undef'd at the end. */ +#define IR(ref) (&J->cur.ir[(ref)]) +#define fins (&J->fold.ins) +#define fleft (&J->fold.left) +#define fright (&J->fold.right) + +/* +** Caveat #1: return value is not always a TRef -- only use with tref_ref(). +** Caveat #2: FWD relies on active CSE for xREF operands -- see lj_opt_fold(). +*/ + +/* Return values from alias analysis. */ +typedef enum { + ALIAS_NO, /* The two refs CANNOT alias (exact). */ + ALIAS_MAY, /* The two refs MAY alias (inexact). */ + ALIAS_MUST /* The two refs MUST alias (exact). */ +} AliasRet; + +/* -- ALOAD/HLOAD forwarding and ASTORE/HSTORE elimination ---------------- */ + +/* Simplified escape analysis: check for intervening stores. */ +static AliasRet aa_escape(jit_State *J, IRIns *ir, IRIns *stop) +{ + IRRef ref = (IRRef)(ir - J->cur.ir); /* The ref that might be stored. */ + for (ir++; ir < stop; ir++) + if (ir->op2 == ref && + (ir->o == IR_ASTORE || ir->o == IR_HSTORE || + ir->o == IR_USTORE || ir->o == IR_FSTORE)) + return ALIAS_MAY; /* Reference was stored and might alias. */ + return ALIAS_NO; /* Reference was not stored. */ +} + +/* Alias analysis for two different table references. */ +static AliasRet aa_table(jit_State *J, IRRef ta, IRRef tb) +{ + IRIns *taba = IR(ta), *tabb = IR(tb); + int newa, newb; + lua_assert(ta != tb); + lua_assert(irt_istab(taba->t) && irt_istab(tabb->t)); + /* Disambiguate new allocations. */ + newa = (taba->o == IR_TNEW || taba->o == IR_TDUP); + newb = (tabb->o == IR_TNEW || tabb->o == IR_TDUP); + if (newa && newb) + return ALIAS_NO; /* Two different allocations never alias. */ + if (newb) { /* At least one allocation? */ + IRIns *tmp = taba; taba = tabb; tabb = tmp; + } else if (!newa) { + return ALIAS_MAY; /* Anything else: we just don't know. */ + } + return aa_escape(J, taba, tabb); +} + +/* Alias analysis for array and hash access using key-based disambiguation. */ +static AliasRet aa_ahref(jit_State *J, IRIns *refa, IRIns *refb) +{ + IRRef ka = refa->op2; + IRRef kb = refb->op2; + IRIns *keya, *keyb; + IRRef ta, tb; + if (refa == refb) + return ALIAS_MUST; /* Shortcut for same refs. */ + keya = IR(ka); + if (keya->o == IR_KSLOT) { ka = keya->op1; keya = IR(ka); } + keyb = IR(kb); + if (keyb->o == IR_KSLOT) { kb = keyb->op1; keyb = IR(kb); } + ta = (refa->o==IR_HREFK || refa->o==IR_AREF) ? IR(refa->op1)->op1 : refa->op1; + tb = (refb->o==IR_HREFK || refb->o==IR_AREF) ? IR(refb->op1)->op1 : refb->op1; + if (ka == kb) { + /* Same key. Check for same table with different ref (NEWREF vs. HREF). */ + if (ta == tb) + return ALIAS_MUST; /* Same key, same table. */ + else + return aa_table(J, ta, tb); /* Same key, possibly different table. */ + } + if (irref_isk(ka) && irref_isk(kb)) + return ALIAS_NO; /* Different constant keys. */ + if (refa->o == IR_AREF) { + /* Disambiguate array references based on index arithmetic. */ + int32_t ofsa = 0, ofsb = 0; + IRRef basea = ka, baseb = kb; + lua_assert(refb->o == IR_AREF); + /* Gather base and offset from t[base] or t[base+-ofs]. */ + if (keya->o == IR_ADD && irref_isk(keya->op2)) { + basea = keya->op1; + ofsa = IR(keya->op2)->i; + if (basea == kb && ofsa != 0) + return ALIAS_NO; /* t[base+-ofs] vs. t[base]. */ + } + if (keyb->o == IR_ADD && irref_isk(keyb->op2)) { + baseb = keyb->op1; + ofsb = IR(keyb->op2)->i; + if (ka == baseb && ofsb != 0) + return ALIAS_NO; /* t[base] vs. t[base+-ofs]. */ + } + if (basea == baseb && ofsa != ofsb) + return ALIAS_NO; /* t[base+-o1] vs. t[base+-o2] and o1 != o2. */ + } else { + /* Disambiguate hash references based on the type of their keys. */ + lua_assert((refa->o==IR_HREF || refa->o==IR_HREFK || refa->o==IR_NEWREF) && + (refb->o==IR_HREF || refb->o==IR_HREFK || refb->o==IR_NEWREF)); + if (!irt_sametype(keya->t, keyb->t)) + return ALIAS_NO; /* Different key types. */ + } + if (ta == tb) + return ALIAS_MAY; /* Same table, cannot disambiguate keys. */ + else + return aa_table(J, ta, tb); /* Try to disambiguate tables. */ +} + +/* Array and hash load forwarding. */ +static TRef fwd_ahload(jit_State *J, IRRef xref) +{ + IRIns *xr = IR(xref); + IRRef lim = xref; /* Search limit. */ + IRRef ref; + + /* Search for conflicting stores. */ + ref = J->chain[fins->o+IRDELTA_L2S]; + while (ref > xref) { + IRIns *store = IR(ref); + switch (aa_ahref(J, xr, IR(store->op1))) { + case ALIAS_NO: break; /* Continue searching. */ + case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */ + case ALIAS_MUST: return store->op2; /* Store forwarding. */ + } + ref = store->prev; + } + + /* No conflicting store (yet): const-fold loads from allocations. */ + { + IRIns *ir = (xr->o == IR_HREFK || xr->o == IR_AREF) ? IR(xr->op1) : xr; + IRRef tab = ir->op1; + ir = IR(tab); + if (ir->o == IR_TNEW || (ir->o == IR_TDUP && irref_isk(xr->op2))) { + /* A NEWREF with a number key may end up pointing to the array part. + ** But it's referenced from HSTORE and not found in the ASTORE chain. + ** For now simply consider this a conflict without forwarding anything. + */ + if (xr->o == IR_AREF) { + IRRef ref2 = J->chain[IR_NEWREF]; + while (ref2 > tab) { + IRIns *newref = IR(ref2); + if (irt_isnum(IR(newref->op2)->t)) + goto cselim; + ref2 = newref->prev; + } + } + /* NEWREF inhibits CSE for HREF, and dependent FLOADs from HREFK/AREF. + ** But the above search for conflicting stores was limited by xref. + ** So continue searching, limited by the TNEW/TDUP. Store forwarding + ** is ok, too. A conflict does NOT limit the search for a matching load. + */ + while (ref > tab) { + IRIns *store = IR(ref); + switch (aa_ahref(J, xr, IR(store->op1))) { + case ALIAS_NO: break; /* Continue searching. */ + case ALIAS_MAY: goto cselim; /* Conflicting store. */ + case ALIAS_MUST: return store->op2; /* Store forwarding. */ + } + ref = store->prev; + } + lua_assert(ir->o != IR_TNEW || irt_isnil(fins->t)); + if (irt_ispri(fins->t)) { + return TREF_PRI(irt_type(fins->t)); + } else if (irt_isnum(fins->t) || (LJ_DUALNUM && irt_isint(fins->t)) || + irt_isstr(fins->t)) { + TValue keyv; + cTValue *tv; + IRIns *key = IR(xr->op2); + if (key->o == IR_KSLOT) key = IR(key->op1); + lj_ir_kvalue(J->L, &keyv, key); + tv = lj_tab_get(J->L, ir_ktab(IR(ir->op1)), &keyv); + lua_assert(itype2irt(tv) == irt_type(fins->t)); + if (irt_isnum(fins->t)) + return lj_ir_knum_u64(J, tv->u64); + else if (LJ_DUALNUM && irt_isint(fins->t)) + return lj_ir_kint(J, intV(tv)); + else + return lj_ir_kstr(J, strV(tv)); + } + /* Othwerwise: don't intern as a constant. */ + } + } + +cselim: + /* Try to find a matching load. Below the conflicting store, if any. */ + ref = J->chain[fins->o]; + while (ref > lim) { + IRIns *load = IR(ref); + if (load->op1 == xref) + return ref; /* Load forwarding. */ + ref = load->prev; + } + return 0; /* Conflict or no match. */ +} + +/* Reassociate ALOAD across PHIs to handle t[i-1] forwarding case. */ +static TRef fwd_aload_reassoc(jit_State *J) +{ + IRIns *irx = IR(fins->op1); + IRIns *key = IR(irx->op2); + if (key->o == IR_ADD && irref_isk(key->op2)) { + IRIns *add2 = IR(key->op1); + if (add2->o == IR_ADD && irref_isk(add2->op2) && + IR(key->op2)->i == -IR(add2->op2)->i) { + IRRef ref = J->chain[IR_AREF]; + IRRef lim = add2->op1; + if (irx->op1 > lim) lim = irx->op1; + while (ref > lim) { + IRIns *ir = IR(ref); + if (ir->op1 == irx->op1 && ir->op2 == add2->op1) + return fwd_ahload(J, ref); + ref = ir->prev; + } + } + } + return 0; +} + +/* ALOAD forwarding. */ +TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J) +{ + IRRef ref; + if ((ref = fwd_ahload(J, fins->op1)) || + (ref = fwd_aload_reassoc(J))) + return ref; + return EMITFOLD; +} + +/* HLOAD forwarding. */ +TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J) +{ + IRRef ref = fwd_ahload(J, fins->op1); + if (ref) + return ref; + return EMITFOLD; +} + +/* HREFK forwarding. */ +TRef LJ_FASTCALL lj_opt_fwd_hrefk(jit_State *J) +{ + IRRef tab = fleft->op1; + IRRef ref = J->chain[IR_NEWREF]; + while (ref > tab) { + IRIns *newref = IR(ref); + if (tab == newref->op1) { + if (fright->op1 == newref->op2) + return ref; /* Forward from NEWREF. */ + else + goto docse; + } else if (aa_table(J, tab, newref->op1) != ALIAS_NO) { + goto docse; + } + ref = newref->prev; + } + /* No conflicting NEWREF: key location unchanged for HREFK of TDUP. */ + if (IR(tab)->o == IR_TDUP) + fins->t.irt &= ~IRT_GUARD; /* Drop HREFK guard. */ +docse: + return CSEFOLD; +} + +/* Check whether HREF of TNEW/TDUP can be folded to niltv. */ +int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J) +{ + IRRef lim = fins->op1; /* Search limit. */ + IRRef ref; + + /* The key for an ASTORE may end up in the hash part after a NEWREF. */ + if (irt_isnum(fright->t) && J->chain[IR_NEWREF] > lim) { + ref = J->chain[IR_ASTORE]; + while (ref > lim) { + if (ref < J->chain[IR_NEWREF]) + return 0; /* Conflict. */ + ref = IR(ref)->prev; + } + } + + /* Search for conflicting stores. */ + ref = J->chain[IR_HSTORE]; + while (ref > lim) { + IRIns *store = IR(ref); + if (aa_ahref(J, fins, IR(store->op1)) != ALIAS_NO) + return 0; /* Conflict. */ + ref = store->prev; + } + + return 1; /* No conflict. Can fold to niltv. */ +} + +/* Check whether there's no aliasing table.clear. */ +static int fwd_aa_tab_clear(jit_State *J, IRRef lim, IRRef ta) +{ + IRRef ref = J->chain[IR_CALLS]; + while (ref > lim) { + IRIns *calls = IR(ref); + if (calls->op2 == IRCALL_lj_tab_clear && + (ta == calls->op1 || aa_table(J, ta, calls->op1) != ALIAS_NO)) + return 0; /* Conflict. */ + ref = calls->prev; + } + return 1; /* No conflict. Can safely FOLD/CSE. */ +} + +/* Check whether there's no aliasing NEWREF/table.clear for the left operand. */ +int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim) +{ + IRRef ta = fins->op1; + IRRef ref = J->chain[IR_NEWREF]; + while (ref > lim) { + IRIns *newref = IR(ref); + if (ta == newref->op1 || aa_table(J, ta, newref->op1) != ALIAS_NO) + return 0; /* Conflict. */ + ref = newref->prev; + } + return fwd_aa_tab_clear(J, lim, ta); +} + +/* ASTORE/HSTORE elimination. */ +TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J) +{ + IRRef xref = fins->op1; /* xREF reference. */ + IRRef val = fins->op2; /* Stored value reference. */ + IRIns *xr = IR(xref); + IRRef1 *refp = &J->chain[fins->o]; + IRRef ref = *refp; + while (ref > xref) { /* Search for redundant or conflicting stores. */ + IRIns *store = IR(ref); + switch (aa_ahref(J, xr, IR(store->op1))) { + case ALIAS_NO: + break; /* Continue searching. */ + case ALIAS_MAY: /* Store to MAYBE the same location. */ + if (store->op2 != val) /* Conflict if the value is different. */ + goto doemit; + break; /* Otherwise continue searching. */ + case ALIAS_MUST: /* Store to the same location. */ + if (store->op2 == val) /* Same value: drop the new store. */ + return DROPFOLD; + /* Different value: try to eliminate the redundant store. */ + if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */ + IRIns *ir; + /* Check for any intervening guards (includes conflicting loads). */ + for (ir = IR(J->cur.nins-1); ir > store; ir--) + if (irt_isguard(ir->t) || ir->o == IR_CALLL) + goto doemit; /* No elimination possible. */ + /* Remove redundant store from chain and replace with NOP. */ + *refp = store->prev; + store->o = IR_NOP; + store->t.irt = IRT_NIL; + store->op1 = store->op2 = 0; + store->prev = 0; + /* Now emit the new store instead. */ + } + goto doemit; + } + ref = *(refp = &store->prev); + } +doemit: + return EMITFOLD; /* Otherwise we have a conflict or simply no match. */ +} + +/* -- ULOAD forwarding ---------------------------------------------------- */ + +/* The current alias analysis for upvalues is very simplistic. It only +** disambiguates between the unique upvalues of the same function. +** This is good enough for now, since most upvalues are read-only. +** +** A more precise analysis would be feasible with the help of the parser: +** generate a unique key for every upvalue, even across all prototypes. +** Lacking a realistic use-case, it's unclear whether this is beneficial. +*/ +static AliasRet aa_uref(IRIns *refa, IRIns *refb) +{ + if (refa->o != refb->o) + return ALIAS_NO; /* Different UREFx type. */ + if (refa->op1 == refb->op1) { /* Same function. */ + if (refa->op2 == refb->op2) + return ALIAS_MUST; /* Same function, same upvalue idx. */ + else + return ALIAS_NO; /* Same function, different upvalue idx. */ + } else { /* Different functions, check disambiguation hash values. */ + if (((refa->op2 ^ refb->op2) & 0xff)) + return ALIAS_NO; /* Upvalues with different hash values cannot alias. */ + else + return ALIAS_MAY; /* No conclusion can be drawn for same hash value. */ + } +} + +/* ULOAD forwarding. */ +TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J) +{ + IRRef uref = fins->op1; + IRRef lim = REF_BASE; /* Search limit. */ + IRIns *xr = IR(uref); + IRRef ref; + + /* Search for conflicting stores. */ + ref = J->chain[IR_USTORE]; + while (ref > lim) { + IRIns *store = IR(ref); + switch (aa_uref(xr, IR(store->op1))) { + case ALIAS_NO: break; /* Continue searching. */ + case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */ + case ALIAS_MUST: return store->op2; /* Store forwarding. */ + } + ref = store->prev; + } + +cselim: + /* Try to find a matching load. Below the conflicting store, if any. */ + + ref = J->chain[IR_ULOAD]; + while (ref > lim) { + IRIns *ir = IR(ref); + if (ir->op1 == uref || + (IR(ir->op1)->op12 == IR(uref)->op12 && IR(ir->op1)->o == IR(uref)->o)) + return ref; /* Match for identical or equal UREFx (non-CSEable UREFO). */ + ref = ir->prev; + } + return lj_ir_emit(J); +} + +/* USTORE elimination. */ +TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J) +{ + IRRef xref = fins->op1; /* xREF reference. */ + IRRef val = fins->op2; /* Stored value reference. */ + IRIns *xr = IR(xref); + IRRef1 *refp = &J->chain[IR_USTORE]; + IRRef ref = *refp; + while (ref > xref) { /* Search for redundant or conflicting stores. */ + IRIns *store = IR(ref); + switch (aa_uref(xr, IR(store->op1))) { + case ALIAS_NO: + break; /* Continue searching. */ + case ALIAS_MAY: /* Store to MAYBE the same location. */ + if (store->op2 != val) /* Conflict if the value is different. */ + goto doemit; + break; /* Otherwise continue searching. */ + case ALIAS_MUST: /* Store to the same location. */ + if (store->op2 == val) /* Same value: drop the new store. */ + return DROPFOLD; + /* Different value: try to eliminate the redundant store. */ + if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */ + IRIns *ir; + /* Check for any intervening guards (includes conflicting loads). */ + for (ir = IR(J->cur.nins-1); ir > store; ir--) + if (irt_isguard(ir->t)) + goto doemit; /* No elimination possible. */ + /* Remove redundant store from chain and replace with NOP. */ + *refp = store->prev; + store->o = IR_NOP; + store->t.irt = IRT_NIL; + store->op1 = store->op2 = 0; + store->prev = 0; + if (ref+1 < J->cur.nins && + store[1].o == IR_OBAR && store[1].op1 == xref) { + IRRef1 *bp = &J->chain[IR_OBAR]; + IRIns *obar; + for (obar = IR(*bp); *bp > ref+1; obar = IR(*bp)) + bp = &obar->prev; + /* Remove OBAR, too. */ + *bp = obar->prev; + obar->o = IR_NOP; + obar->t.irt = IRT_NIL; + obar->op1 = obar->op2 = 0; + obar->prev = 0; + } + /* Now emit the new store instead. */ + } + goto doemit; + } + ref = *(refp = &store->prev); + } +doemit: + return EMITFOLD; /* Otherwise we have a conflict or simply no match. */ +} + +/* -- FLOAD forwarding and FSTORE elimination ----------------------------- */ + +/* Alias analysis for field access. +** Field loads are cheap and field stores are rare. +** Simple disambiguation based on field types is good enough. +*/ +static AliasRet aa_fref(jit_State *J, IRIns *refa, IRIns *refb) +{ + if (refa->op2 != refb->op2) + return ALIAS_NO; /* Different fields. */ + if (refa->op1 == refb->op1) + return ALIAS_MUST; /* Same field, same object. */ + else if (refa->op2 >= IRFL_TAB_META && refa->op2 <= IRFL_TAB_NOMM) + return aa_table(J, refa->op1, refb->op1); /* Disambiguate tables. */ + else + return ALIAS_MAY; /* Same field, possibly different object. */ +} + +/* Only the loads for mutable fields end up here (see FOLD). */ +TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J) +{ + IRRef oref = fins->op1; /* Object reference. */ + IRRef fid = fins->op2; /* Field ID. */ + IRRef lim = oref; /* Search limit. */ + IRRef ref; + + /* Search for conflicting stores. */ + ref = J->chain[IR_FSTORE]; + while (ref > oref) { + IRIns *store = IR(ref); + switch (aa_fref(J, fins, IR(store->op1))) { + case ALIAS_NO: break; /* Continue searching. */ + case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */ + case ALIAS_MUST: return store->op2; /* Store forwarding. */ + } + ref = store->prev; + } + + /* No conflicting store: const-fold field loads from allocations. */ + if (fid == IRFL_TAB_META) { + IRIns *ir = IR(oref); + if (ir->o == IR_TNEW || ir->o == IR_TDUP) + return lj_ir_knull(J, IRT_TAB); + } + +cselim: + /* Try to find a matching load. Below the conflicting store, if any. */ + return lj_opt_cselim(J, lim); +} + +/* FSTORE elimination. */ +TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J) +{ + IRRef fref = fins->op1; /* FREF reference. */ + IRRef val = fins->op2; /* Stored value reference. */ + IRIns *xr = IR(fref); + IRRef1 *refp = &J->chain[IR_FSTORE]; + IRRef ref = *refp; + while (ref > fref) { /* Search for redundant or conflicting stores. */ + IRIns *store = IR(ref); + switch (aa_fref(J, xr, IR(store->op1))) { + case ALIAS_NO: + break; /* Continue searching. */ + case ALIAS_MAY: + if (store->op2 != val) /* Conflict if the value is different. */ + goto doemit; + break; /* Otherwise continue searching. */ + case ALIAS_MUST: + if (store->op2 == val) /* Same value: drop the new store. */ + return DROPFOLD; + /* Different value: try to eliminate the redundant store. */ + if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */ + IRIns *ir; + /* Check for any intervening guards or conflicting loads. */ + for (ir = IR(J->cur.nins-1); ir > store; ir--) + if (irt_isguard(ir->t) || (ir->o == IR_FLOAD && ir->op2 == xr->op2)) + goto doemit; /* No elimination possible. */ + /* Remove redundant store from chain and replace with NOP. */ + *refp = store->prev; + store->o = IR_NOP; + store->t.irt = IRT_NIL; + store->op1 = store->op2 = 0; + store->prev = 0; + /* Now emit the new store instead. */ + } + goto doemit; + } + ref = *(refp = &store->prev); + } +doemit: + return EMITFOLD; /* Otherwise we have a conflict or simply no match. */ +} + +/* -- XLOAD forwarding and XSTORE elimination ----------------------------- */ + +/* Find cdata allocation for a reference (if any). */ +static IRIns *aa_findcnew(jit_State *J, IRIns *ir) +{ + while (ir->o == IR_ADD) { + if (!irref_isk(ir->op1)) { + IRIns *ir1 = aa_findcnew(J, IR(ir->op1)); /* Left-recursion. */ + if (ir1) return ir1; + } + if (irref_isk(ir->op2)) return NULL; + ir = IR(ir->op2); /* Flatten right-recursion. */ + } + return ir->o == IR_CNEW ? ir : NULL; +} + +/* Alias analysis for two cdata allocations. */ +static AliasRet aa_cnew(jit_State *J, IRIns *refa, IRIns *refb) +{ + IRIns *cnewa = aa_findcnew(J, refa); + IRIns *cnewb = aa_findcnew(J, refb); + if (cnewa == cnewb) + return ALIAS_MAY; /* Same allocation or neither is an allocation. */ + if (cnewa && cnewb) + return ALIAS_NO; /* Two different allocations never alias. */ + if (cnewb) { cnewa = cnewb; refb = refa; } + return aa_escape(J, cnewa, refb); +} + +/* Alias analysis for XLOAD/XSTORE. */ +static AliasRet aa_xref(jit_State *J, IRIns *refa, IRIns *xa, IRIns *xb) +{ + ptrdiff_t ofsa = 0, ofsb = 0; + IRIns *refb = IR(xb->op1); + IRIns *basea = refa, *baseb = refb; + if (refa == refb && irt_sametype(xa->t, xb->t)) + return ALIAS_MUST; /* Shortcut for same refs with identical type. */ + /* Offset-based disambiguation. */ + if (refa->o == IR_ADD && irref_isk(refa->op2)) { + IRIns *irk = IR(refa->op2); + basea = IR(refa->op1); + ofsa = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 : + (ptrdiff_t)irk->i; + } + if (refb->o == IR_ADD && irref_isk(refb->op2)) { + IRIns *irk = IR(refb->op2); + baseb = IR(refb->op1); + ofsb = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 : + (ptrdiff_t)irk->i; + } + /* Treat constified pointers like base vs. base+offset. */ + if (basea->o == IR_KPTR && baseb->o == IR_KPTR) { + ofsb += (char *)ir_kptr(baseb) - (char *)ir_kptr(basea); + baseb = basea; + } + /* This implements (very) strict aliasing rules. + ** Different types do NOT alias, except for differences in signedness. + ** Type punning through unions is allowed (but forces a reload). + */ + if (basea == baseb) { + ptrdiff_t sza = irt_size(xa->t), szb = irt_size(xb->t); + if (ofsa == ofsb) { + if (sza == szb && irt_isfp(xa->t) == irt_isfp(xb->t)) + return ALIAS_MUST; /* Same-sized, same-kind. May need to convert. */ + } else if (ofsa + sza <= ofsb || ofsb + szb <= ofsa) { + return ALIAS_NO; /* Non-overlapping base+-o1 vs. base+-o2. */ + } + /* NYI: extract, extend or reinterpret bits (int <-> fp). */ + return ALIAS_MAY; /* Overlapping or type punning: force reload. */ + } + if (!irt_sametype(xa->t, xb->t) && + !(irt_typerange(xa->t, IRT_I8, IRT_U64) && + ((xa->t.irt - IRT_I8) ^ (xb->t.irt - IRT_I8)) == 1)) + return ALIAS_NO; + /* NYI: structural disambiguation. */ + return aa_cnew(J, basea, baseb); /* Try to disambiguate allocations. */ +} + +/* Return CSEd reference or 0. Caveat: swaps lower ref to the right! */ +static IRRef reassoc_trycse(jit_State *J, IROp op, IRRef op1, IRRef op2) +{ + IRRef ref = J->chain[op]; + IRRef lim = op1; + if (op2 > lim) { lim = op2; op2 = op1; op1 = lim; } + while (ref > lim) { + IRIns *ir = IR(ref); + if (ir->op1 == op1 && ir->op2 == op2) + return ref; + ref = ir->prev; + } + return 0; +} + +/* Reassociate index references. */ +static IRRef reassoc_xref(jit_State *J, IRIns *ir) +{ + ptrdiff_t ofs = 0; + if (ir->o == IR_ADD && irref_isk(ir->op2)) { /* Get constant offset. */ + IRIns *irk = IR(ir->op2); + ofs = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 : + (ptrdiff_t)irk->i; + ir = IR(ir->op1); + } + if (ir->o == IR_ADD) { /* Add of base + index. */ + /* Index ref > base ref for loop-carried dependences. Only check op1. */ + IRIns *ir2, *ir1 = IR(ir->op1); + int32_t shift = 0; + IRRef idxref; + /* Determine index shifts. Don't bother with IR_MUL here. */ + if (ir1->o == IR_BSHL && irref_isk(ir1->op2)) + shift = IR(ir1->op2)->i; + else if (ir1->o == IR_ADD && ir1->op1 == ir1->op2) + shift = 1; + else + ir1 = ir; + ir2 = IR(ir1->op1); + /* A non-reassociated add. Must be a loop-carried dependence. */ + if (ir2->o == IR_ADD && irt_isint(ir2->t) && irref_isk(ir2->op2)) + ofs += (ptrdiff_t)IR(ir2->op2)->i << shift; + else + return 0; + idxref = ir2->op1; + /* Try to CSE the reassociated chain. Give up if not found. */ + if (ir1 != ir && + !(idxref = reassoc_trycse(J, ir1->o, idxref, + ir1->o == IR_BSHL ? ir1->op2 : idxref))) + return 0; + if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, ir->op2))) + return 0; + if (ofs != 0) { + IRRef refk = tref_ref(lj_ir_kintp(J, ofs)); + if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, refk))) + return 0; + } + return idxref; /* Success, found a reassociated index reference. Phew. */ + } + return 0; /* Failure. */ +} + +/* XLOAD forwarding. */ +TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J) +{ + IRRef xref = fins->op1; + IRIns *xr = IR(xref); + IRRef lim = xref; /* Search limit. */ + IRRef ref; + + if ((fins->op2 & IRXLOAD_READONLY)) + goto cselim; + if ((fins->op2 & IRXLOAD_VOLATILE)) + goto doemit; + + /* Search for conflicting stores. */ + ref = J->chain[IR_XSTORE]; +retry: + if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS]; + if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR]; + while (ref > lim) { + IRIns *store = IR(ref); + switch (aa_xref(J, xr, fins, store)) { + case ALIAS_NO: break; /* Continue searching. */ + case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */ + case ALIAS_MUST: + /* Emit conversion if the loaded type doesn't match the forwarded type. */ + if (!irt_sametype(fins->t, IR(store->op2)->t)) { + IRType dt = irt_type(fins->t), st = irt_type(IR(store->op2)->t); + if (dt == IRT_I8 || dt == IRT_I16) { /* Trunc + sign-extend. */ + st = dt | IRCONV_SEXT; + dt = IRT_INT; + } else if (dt == IRT_U8 || dt == IRT_U16) { /* Trunc + zero-extend. */ + st = dt; + dt = IRT_INT; + } + fins->ot = IRT(IR_CONV, dt); + fins->op1 = store->op2; + fins->op2 = (dt<<5)|st; + return RETRYFOLD; + } + return store->op2; /* Store forwarding. */ + } + ref = store->prev; + } + +cselim: + /* Try to find a matching load. Below the conflicting store, if any. */ + ref = J->chain[IR_XLOAD]; + while (ref > lim) { + /* CSE for XLOAD depends on the type, but not on the IRXLOAD_* flags. */ + if (IR(ref)->op1 == xref && irt_sametype(IR(ref)->t, fins->t)) + return ref; + ref = IR(ref)->prev; + } + + /* Reassociate XLOAD across PHIs to handle a[i-1] forwarding case. */ + if (!(fins->op2 & IRXLOAD_READONLY) && J->chain[IR_LOOP] && + xref == fins->op1 && (xref = reassoc_xref(J, xr)) != 0) { + ref = J->chain[IR_XSTORE]; + while (ref > lim) /* Skip stores that have already been checked. */ + ref = IR(ref)->prev; + lim = xref; + xr = IR(xref); + goto retry; /* Retry with the reassociated reference. */ + } +doemit: + return EMITFOLD; +} + +/* XSTORE elimination. */ +TRef LJ_FASTCALL lj_opt_dse_xstore(jit_State *J) +{ + IRRef xref = fins->op1; + IRIns *xr = IR(xref); + IRRef lim = xref; /* Search limit. */ + IRRef val = fins->op2; /* Stored value reference. */ + IRRef1 *refp = &J->chain[IR_XSTORE]; + IRRef ref = *refp; + if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS]; + if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR]; + if (J->chain[IR_XSNEW] > lim) lim = J->chain[IR_XSNEW]; + while (ref > lim) { /* Search for redundant or conflicting stores. */ + IRIns *store = IR(ref); + switch (aa_xref(J, xr, fins, store)) { + case ALIAS_NO: + break; /* Continue searching. */ + case ALIAS_MAY: + if (store->op2 != val) /* Conflict if the value is different. */ + goto doemit; + break; /* Otherwise continue searching. */ + case ALIAS_MUST: + if (store->op2 == val) /* Same value: drop the new store. */ + return DROPFOLD; + /* Different value: try to eliminate the redundant store. */ + if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */ + IRIns *ir; + /* Check for any intervening guards or any XLOADs (no AA performed). */ + for (ir = IR(J->cur.nins-1); ir > store; ir--) + if (irt_isguard(ir->t) || ir->o == IR_XLOAD) + goto doemit; /* No elimination possible. */ + /* Remove redundant store from chain and replace with NOP. */ + *refp = store->prev; + store->o = IR_NOP; + store->t.irt = IRT_NIL; + store->op1 = store->op2 = 0; + store->prev = 0; + /* Now emit the new store instead. */ + } + goto doemit; + } + ref = *(refp = &store->prev); + } +doemit: + return EMITFOLD; /* Otherwise we have a conflict or simply no match. */ +} + +/* -- Forwarding of lj_tab_len -------------------------------------------- */ + +/* This is rather simplistic right now, but better than nothing. */ +TRef LJ_FASTCALL lj_opt_fwd_tab_len(jit_State *J) +{ + IRRef tab = fins->op1; /* Table reference. */ + IRRef lim = tab; /* Search limit. */ + IRRef ref; + + /* Any ASTORE is a conflict and limits the search. */ + if (J->chain[IR_ASTORE] > lim) lim = J->chain[IR_ASTORE]; + + /* Search for conflicting HSTORE with numeric key. */ + ref = J->chain[IR_HSTORE]; + while (ref > lim) { + IRIns *store = IR(ref); + IRIns *href = IR(store->op1); + IRIns *key = IR(href->op2); + if (irt_isnum(key->o == IR_KSLOT ? IR(key->op1)->t : key->t)) { + lim = ref; /* Conflicting store found, limits search for TLEN. */ + break; + } + ref = store->prev; + } + + /* Search for aliasing table.clear. */ + if (!fwd_aa_tab_clear(J, lim, tab)) + return lj_ir_emit(J); + + /* Try to find a matching load. Below the conflicting store, if any. */ + return lj_opt_cselim(J, lim); +} + +/* -- ASTORE/HSTORE previous type analysis -------------------------------- */ + +/* Check whether the previous value for a table store is non-nil. +** This can be derived either from a previous store or from a previous +** load (because all loads from tables perform a type check). +** +** The result of the analysis can be used to avoid the metatable check +** and the guard against HREF returning niltv. Both of these are cheap, +** so let's not spend too much effort on the analysis. +** +** A result of 1 is exact: previous value CANNOT be nil. +** A result of 0 is inexact: previous value MAY be nil. +*/ +int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref) +{ + /* First check stores. */ + IRRef ref = J->chain[loadop+IRDELTA_L2S]; + while (ref > xref) { + IRIns *store = IR(ref); + if (store->op1 == xref) { /* Same xREF. */ + /* A nil store MAY alias, but a non-nil store MUST alias. */ + return !irt_isnil(store->t); + } else if (irt_isnil(store->t)) { /* Must check any nil store. */ + IRRef skref = IR(store->op1)->op2; + IRRef xkref = IR(xref)->op2; + /* Same key type MAY alias. Need ALOAD check due to multiple int types. */ + if (loadop == IR_ALOAD || irt_sametype(IR(skref)->t, IR(xkref)->t)) { + if (skref == xkref || !irref_isk(skref) || !irref_isk(xkref)) + return 0; /* A nil store with same const key or var key MAY alias. */ + /* Different const keys CANNOT alias. */ + } /* Different key types CANNOT alias. */ + } /* Other non-nil stores MAY alias. */ + ref = store->prev; + } + + /* Check loads since nothing could be derived from stores. */ + ref = J->chain[loadop]; + while (ref > xref) { + IRIns *load = IR(ref); + if (load->op1 == xref) { /* Same xREF. */ + /* A nil load MAY alias, but a non-nil load MUST alias. */ + return !irt_isnil(load->t); + } /* Other non-nil loads MAY alias. */ + ref = load->prev; + } + return 0; /* Nothing derived at all, previous value MAY be nil. */ +} + +/* ------------------------------------------------------------------------ */ + +#undef IR +#undef fins +#undef fleft +#undef fright + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_opt_narrow.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_opt_narrow.c new file mode 100644 index 00000000000..d1993452a6a --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_opt_narrow.c @@ -0,0 +1,652 @@ +/* +** NARROW: Narrowing of numbers to integers (double to int32_t). +** STRIPOV: Stripping of overflow checks. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_opt_narrow_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_bc.h" +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_iropt.h" +#include "lj_trace.h" +#include "lj_vm.h" +#include "lj_strscan.h" + +/* Rationale for narrowing optimizations: +** +** Lua has only a single number type and this is a FP double by default. +** Narrowing doubles to integers does not pay off for the interpreter on a +** current-generation x86/x64 machine. Most FP operations need the same +** amount of execution resources as their integer counterparts, except +** with slightly longer latencies. Longer latencies are a non-issue for +** the interpreter, since they are usually hidden by other overhead. +** +** The total CPU execution bandwidth is the sum of the bandwidth of the FP +** and the integer units, because they execute in parallel. The FP units +** have an equal or higher bandwidth than the integer units. Not using +** them means losing execution bandwidth. Moving work away from them to +** the already quite busy integer units is a losing proposition. +** +** The situation for JIT-compiled code is a bit different: the higher code +** density makes the extra latencies much more visible. Tight loops expose +** the latencies for updating the induction variables. Array indexing +** requires narrowing conversions with high latencies and additional +** guards (to check that the index is really an integer). And many common +** optimizations only work on integers. +** +** One solution would be speculative, eager narrowing of all number loads. +** This causes many problems, like losing -0 or the need to resolve type +** mismatches between traces. It also effectively forces the integer type +** to have overflow-checking semantics. This impedes many basic +** optimizations and requires adding overflow checks to all integer +** arithmetic operations (whereas FP arithmetics can do without). +** +** Always replacing an FP op with an integer op plus an overflow check is +** counter-productive on a current-generation super-scalar CPU. Although +** the overflow check branches are highly predictable, they will clog the +** execution port for the branch unit and tie up reorder buffers. This is +** turning a pure data-flow dependency into a different data-flow +** dependency (with slightly lower latency) *plus* a control dependency. +** In general, you don't want to do this since latencies due to data-flow +** dependencies can be well hidden by out-of-order execution. +** +** A better solution is to keep all numbers as FP values and only narrow +** when it's beneficial to do so. LuaJIT uses predictive narrowing for +** induction variables and demand-driven narrowing for index expressions, +** integer arguments and bit operations. Additionally it can eliminate or +** hoist most of the resulting overflow checks. Regular arithmetic +** computations are never narrowed to integers. +** +** The integer type in the IR has convenient wrap-around semantics and +** ignores overflow. Extra operations have been added for +** overflow-checking arithmetic (ADDOV/SUBOV) instead of an extra type. +** Apart from reducing overall complexity of the compiler, this also +** nicely solves the problem where you want to apply algebraic +** simplifications to ADD, but not to ADDOV. And the x86/x64 assembler can +** use lea instead of an add for integer ADD, but not for ADDOV (lea does +** not affect the flags, but it helps to avoid register moves). +** +** +** All of the above has to be reconsidered for architectures with slow FP +** operations or without a hardware FPU. The dual-number mode of LuaJIT +** addresses this issue. Arithmetic operations are performed on integers +** as far as possible and overflow checks are added as needed. +** +** This implies that narrowing for integer arguments and bit operations +** should also strip overflow checks, e.g. replace ADDOV with ADD. The +** original overflow guards are weak and can be eliminated by DCE, if +** there's no other use. +** +** A slight twist is that it's usually beneficial to use overflow-checked +** integer arithmetics if all inputs are already integers. This is the only +** change that affects the single-number mode, too. +*/ + +/* Some local macros to save typing. Undef'd at the end. */ +#define IR(ref) (&J->cur.ir[(ref)]) +#define fins (&J->fold.ins) + +/* Pass IR on to next optimization in chain (FOLD). */ +#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) + +#define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J)) + +/* -- Elimination of narrowing type conversions --------------------------- */ + +/* Narrowing of index expressions and bit operations is demand-driven. The +** trace recorder emits a narrowing type conversion (CONV.int.num or TOBIT) +** in all of these cases (e.g. array indexing or string indexing). FOLD +** already takes care of eliminating simple redundant conversions like +** CONV.int.num(CONV.num.int(x)) ==> x. +** +** But the surrounding code is FP-heavy and arithmetic operations are +** performed on FP numbers (for the single-number mode). Consider a common +** example such as 'x=t[i+1]', with 'i' already an integer (due to induction +** variable narrowing). The index expression would be recorded as +** CONV.int.num(ADD(CONV.num.int(i), 1)) +** which is clearly suboptimal. +** +** One can do better by recursively backpropagating the narrowing type +** conversion across FP arithmetic operations. This turns FP ops into +** their corresponding integer counterparts. Depending on the semantics of +** the conversion they also need to check for overflow. Currently only ADD +** and SUB are supported. +** +** The above example can be rewritten as +** ADDOV(CONV.int.num(CONV.num.int(i)), 1) +** and then into ADDOV(i, 1) after folding of the conversions. The original +** FP ops remain in the IR and are eliminated by DCE since all references to +** them are gone. +** +** [In dual-number mode the trace recorder already emits ADDOV etc., but +** this can be further reduced. See below.] +** +** Special care has to be taken to avoid narrowing across an operation +** which is potentially operating on non-integral operands. One obvious +** case is when an expression contains a non-integral constant, but ends +** up as an integer index at runtime (like t[x+1.5] with x=0.5). +** +** Operations with two non-constant operands illustrate a similar problem +** (like t[a+b] with a=1.5 and b=2.5). Backpropagation has to stop there, +** unless it can be proven that either operand is integral (e.g. by CSEing +** a previous conversion). As a not-so-obvious corollary this logic also +** applies for a whole expression tree (e.g. t[(a+1)+(b+1)]). +** +** Correctness of the transformation is guaranteed by avoiding to expand +** the tree by adding more conversions than the one we would need to emit +** if not backpropagating. TOBIT employs a more optimistic rule, because +** the conversion has special semantics, designed to make the life of the +** compiler writer easier. ;-) +** +** Using on-the-fly backpropagation of an expression tree doesn't work +** because it's unknown whether the transform is correct until the end. +** This either requires IR rollback and cache invalidation for every +** subtree or a two-pass algorithm. The former didn't work out too well, +** so the code now combines a recursive collector with a stack-based +** emitter. +** +** [A recursive backpropagation algorithm with backtracking, employing +** skip-list lookup and round-robin caching, emitting stack operations +** on-the-fly for a stack-based interpreter -- and all of that in a meager +** kilobyte? Yep, compilers are a great treasure chest. Throw away your +** textbooks and read the codebase of a compiler today!] +** +** There's another optimization opportunity for array indexing: it's +** always accompanied by an array bounds-check. The outermost overflow +** check may be delegated to the ABC operation. This works because ABC is +** an unsigned comparison and wrap-around due to overflow creates negative +** numbers. +** +** But this optimization is only valid for constants that cannot overflow +** an int32_t into the range of valid array indexes [0..2^27+1). A check +** for +-2^30 is safe since -2^31 - 2^30 wraps to 2^30 and 2^31-1 + 2^30 +** wraps to -2^30-1. +** +** It's also good enough in practice, since e.g. t[i+1] or t[i-10] are +** quite common. So the above example finally ends up as ADD(i, 1)! +** +** Later on, the assembler is able to fuse the whole array reference and +** the ADD into the memory operands of loads and other instructions. This +** is why LuaJIT is able to generate very pretty (and fast) machine code +** for array indexing. And that, my dear, concludes another story about +** one of the hidden secrets of LuaJIT ... +*/ + +/* Maximum backpropagation depth and maximum stack size. */ +#define NARROW_MAX_BACKPROP 100 +#define NARROW_MAX_STACK 256 + +/* The stack machine has a 32 bit instruction format: [IROpT | IRRef1] +** The lower 16 bits hold a reference (or 0). The upper 16 bits hold +** the IR opcode + type or one of the following special opcodes: +*/ +enum { + NARROW_REF, /* Push ref. */ + NARROW_CONV, /* Push conversion of ref. */ + NARROW_SEXT, /* Push sign-extension of ref. */ + NARROW_INT /* Push KINT ref. The next code holds an int32_t. */ +}; + +typedef uint32_t NarrowIns; + +#define NARROWINS(op, ref) (((op) << 16) + (ref)) +#define narrow_op(ins) ((IROpT)((ins) >> 16)) +#define narrow_ref(ins) ((IRRef1)(ins)) + +/* Context used for narrowing of type conversions. */ +typedef struct NarrowConv { + jit_State *J; /* JIT compiler state. */ + NarrowIns *sp; /* Current stack pointer. */ + NarrowIns *maxsp; /* Maximum stack pointer minus redzone. */ + IRRef mode; /* Conversion mode (IRCONV_*). */ + IRType t; /* Destination type: IRT_INT or IRT_I64. */ + NarrowIns stack[NARROW_MAX_STACK]; /* Stack holding stack-machine code. */ +} NarrowConv; + +/* Lookup a reference in the backpropagation cache. */ +static BPropEntry *narrow_bpc_get(jit_State *J, IRRef1 key, IRRef mode) +{ + ptrdiff_t i; + for (i = 0; i < BPROP_SLOTS; i++) { + BPropEntry *bp = &J->bpropcache[i]; + /* Stronger checks are ok, too. */ + if (bp->key == key && bp->mode >= mode && + ((bp->mode ^ mode) & IRCONV_MODEMASK) == 0) + return bp; + } + return NULL; +} + +/* Add an entry to the backpropagation cache. */ +static void narrow_bpc_set(jit_State *J, IRRef1 key, IRRef1 val, IRRef mode) +{ + uint32_t slot = J->bpropslot; + BPropEntry *bp = &J->bpropcache[slot]; + J->bpropslot = (slot + 1) & (BPROP_SLOTS-1); + bp->key = key; + bp->val = val; + bp->mode = mode; +} + +/* Backpropagate overflow stripping. */ +static void narrow_stripov_backprop(NarrowConv *nc, IRRef ref, int depth) +{ + jit_State *J = nc->J; + IRIns *ir = IR(ref); + if (ir->o == IR_ADDOV || ir->o == IR_SUBOV || + (ir->o == IR_MULOV && (nc->mode & IRCONV_CONVMASK) == IRCONV_ANY)) { + BPropEntry *bp = narrow_bpc_get(nc->J, ref, IRCONV_TOBIT); + if (bp) { + ref = bp->val; + } else if (++depth < NARROW_MAX_BACKPROP && nc->sp < nc->maxsp) { + NarrowIns *savesp = nc->sp; + narrow_stripov_backprop(nc, ir->op1, depth); + if (nc->sp < nc->maxsp) { + narrow_stripov_backprop(nc, ir->op2, depth); + if (nc->sp < nc->maxsp) { + *nc->sp++ = NARROWINS(IRT(ir->o - IR_ADDOV + IR_ADD, IRT_INT), ref); + return; + } + } + nc->sp = savesp; /* Path too deep, need to backtrack. */ + } + } + *nc->sp++ = NARROWINS(NARROW_REF, ref); +} + +/* Backpropagate narrowing conversion. Return number of needed conversions. */ +static int narrow_conv_backprop(NarrowConv *nc, IRRef ref, int depth) +{ + jit_State *J = nc->J; + IRIns *ir = IR(ref); + IRRef cref; + + if (nc->sp >= nc->maxsp) return 10; /* Path too deep. */ + + /* Check the easy cases first. */ + if (ir->o == IR_CONV && (ir->op2 & IRCONV_SRCMASK) == IRT_INT) { + if ((nc->mode & IRCONV_CONVMASK) <= IRCONV_ANY) + narrow_stripov_backprop(nc, ir->op1, depth+1); + else + *nc->sp++ = NARROWINS(NARROW_REF, ir->op1); /* Undo conversion. */ + if (nc->t == IRT_I64) + *nc->sp++ = NARROWINS(NARROW_SEXT, 0); /* Sign-extend integer. */ + return 0; + } else if (ir->o == IR_KNUM) { /* Narrow FP constant. */ + lua_Number n = ir_knum(ir)->n; + if ((nc->mode & IRCONV_CONVMASK) == IRCONV_TOBIT) { + /* Allows a wider range of constants. */ + int64_t k64 = (int64_t)n; + if (n == (lua_Number)k64) { /* Only if const doesn't lose precision. */ + *nc->sp++ = NARROWINS(NARROW_INT, 0); + *nc->sp++ = (NarrowIns)k64; /* But always truncate to 32 bits. */ + return 0; + } + } else { + int32_t k = lj_num2int(n); + /* Only if constant is a small integer. */ + if (checki16(k) && n == (lua_Number)k) { + *nc->sp++ = NARROWINS(NARROW_INT, 0); + *nc->sp++ = (NarrowIns)k; + return 0; + } + } + return 10; /* Never narrow other FP constants (this is rare). */ + } + + /* Try to CSE the conversion. Stronger checks are ok, too. */ + cref = J->chain[fins->o]; + while (cref > ref) { + IRIns *cr = IR(cref); + if (cr->op1 == ref && + (fins->o == IR_TOBIT || + ((cr->op2 & IRCONV_MODEMASK) == (nc->mode & IRCONV_MODEMASK) && + irt_isguard(cr->t) >= irt_isguard(fins->t)))) { + *nc->sp++ = NARROWINS(NARROW_REF, cref); + return 0; /* Already there, no additional conversion needed. */ + } + cref = cr->prev; + } + + /* Backpropagate across ADD/SUB. */ + if (ir->o == IR_ADD || ir->o == IR_SUB) { + /* Try cache lookup first. */ + IRRef mode = nc->mode; + BPropEntry *bp; + /* Inner conversions need a stronger check. */ + if ((mode & IRCONV_CONVMASK) == IRCONV_INDEX && depth > 0) + mode += IRCONV_CHECK-IRCONV_INDEX; + bp = narrow_bpc_get(nc->J, (IRRef1)ref, mode); + if (bp) { + *nc->sp++ = NARROWINS(NARROW_REF, bp->val); + return 0; + } else if (nc->t == IRT_I64) { + /* Try sign-extending from an existing (checked) conversion to int. */ + mode = (IRT_INT<<5)|IRT_NUM|IRCONV_INDEX; + bp = narrow_bpc_get(nc->J, (IRRef1)ref, mode); + if (bp) { + *nc->sp++ = NARROWINS(NARROW_REF, bp->val); + *nc->sp++ = NARROWINS(NARROW_SEXT, 0); + return 0; + } + } + if (++depth < NARROW_MAX_BACKPROP && nc->sp < nc->maxsp) { + NarrowIns *savesp = nc->sp; + int count = narrow_conv_backprop(nc, ir->op1, depth); + count += narrow_conv_backprop(nc, ir->op2, depth); + if (count <= 1) { /* Limit total number of conversions. */ + *nc->sp++ = NARROWINS(IRT(ir->o, nc->t), ref); + return count; + } + nc->sp = savesp; /* Too many conversions, need to backtrack. */ + } + } + + /* Otherwise add a conversion. */ + *nc->sp++ = NARROWINS(NARROW_CONV, ref); + return 1; +} + +/* Emit the conversions collected during backpropagation. */ +static IRRef narrow_conv_emit(jit_State *J, NarrowConv *nc) +{ + /* The fins fields must be saved now -- emitir() overwrites them. */ + IROpT guardot = irt_isguard(fins->t) ? IRTG(IR_ADDOV-IR_ADD, 0) : 0; + IROpT convot = fins->ot; + IRRef1 convop2 = fins->op2; + NarrowIns *next = nc->stack; /* List of instructions from backpropagation. */ + NarrowIns *last = nc->sp; + NarrowIns *sp = nc->stack; /* Recycle the stack to store operands. */ + while (next < last) { /* Simple stack machine to process the ins. list. */ + NarrowIns ref = *next++; + IROpT op = narrow_op(ref); + if (op == NARROW_REF) { + *sp++ = ref; + } else if (op == NARROW_CONV) { + *sp++ = emitir_raw(convot, ref, convop2); /* Raw emit avoids a loop. */ + } else if (op == NARROW_SEXT) { + lua_assert(sp >= nc->stack+1); + sp[-1] = emitir(IRT(IR_CONV, IRT_I64), sp[-1], + (IRT_I64<<5)|IRT_INT|IRCONV_SEXT); + } else if (op == NARROW_INT) { + lua_assert(next < last); + *sp++ = nc->t == IRT_I64 ? + lj_ir_kint64(J, (int64_t)(int32_t)*next++) : + lj_ir_kint(J, *next++); + } else { /* Regular IROpT. Pops two operands and pushes one result. */ + IRRef mode = nc->mode; + lua_assert(sp >= nc->stack+2); + sp--; + /* Omit some overflow checks for array indexing. See comments above. */ + if ((mode & IRCONV_CONVMASK) == IRCONV_INDEX) { + if (next == last && irref_isk(narrow_ref(sp[0])) && + (uint32_t)IR(narrow_ref(sp[0]))->i + 0x40000000u < 0x80000000u) + guardot = 0; + else /* Otherwise cache a stronger check. */ + mode += IRCONV_CHECK-IRCONV_INDEX; + } + sp[-1] = emitir(op+guardot, sp[-1], sp[0]); + /* Add to cache. */ + if (narrow_ref(ref)) + narrow_bpc_set(J, narrow_ref(ref), narrow_ref(sp[-1]), mode); + } + } + lua_assert(sp == nc->stack+1); + return nc->stack[0]; +} + +/* Narrow a type conversion of an arithmetic operation. */ +TRef LJ_FASTCALL lj_opt_narrow_convert(jit_State *J) +{ + if ((J->flags & JIT_F_OPT_NARROW)) { + NarrowConv nc; + nc.J = J; + nc.sp = nc.stack; + nc.maxsp = &nc.stack[NARROW_MAX_STACK-4]; + nc.t = irt_type(fins->t); + if (fins->o == IR_TOBIT) { + nc.mode = IRCONV_TOBIT; /* Used only in the backpropagation cache. */ + } else { + nc.mode = fins->op2; + } + if (narrow_conv_backprop(&nc, fins->op1, 0) <= 1) + return narrow_conv_emit(J, &nc); + } + return NEXTFOLD; +} + +/* -- Narrowing of implicit conversions ----------------------------------- */ + +/* Recursively strip overflow checks. */ +static TRef narrow_stripov(jit_State *J, TRef tr, int lastop, IRRef mode) +{ + IRRef ref = tref_ref(tr); + IRIns *ir = IR(ref); + int op = ir->o; + if (op >= IR_ADDOV && op <= lastop) { + BPropEntry *bp = narrow_bpc_get(J, ref, mode); + if (bp) { + return TREF(bp->val, irt_t(IR(bp->val)->t)); + } else { + IRRef op1 = ir->op1, op2 = ir->op2; /* The IR may be reallocated. */ + op1 = narrow_stripov(J, op1, lastop, mode); + op2 = narrow_stripov(J, op2, lastop, mode); + tr = emitir(IRT(op - IR_ADDOV + IR_ADD, + ((mode & IRCONV_DSTMASK) >> IRCONV_DSH)), op1, op2); + narrow_bpc_set(J, ref, tref_ref(tr), mode); + } + } else if (LJ_64 && (mode & IRCONV_SEXT) && !irt_is64(ir->t)) { + tr = emitir(IRT(IR_CONV, IRT_INTP), tr, mode); + } + return tr; +} + +/* Narrow array index. */ +TRef LJ_FASTCALL lj_opt_narrow_index(jit_State *J, TRef tr) +{ + IRIns *ir; + lua_assert(tref_isnumber(tr)); + if (tref_isnum(tr)) /* Conversion may be narrowed, too. See above. */ + return emitir(IRTGI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_INDEX); + /* Omit some overflow checks for array indexing. See comments above. */ + ir = IR(tref_ref(tr)); + if ((ir->o == IR_ADDOV || ir->o == IR_SUBOV) && irref_isk(ir->op2) && + (uint32_t)IR(ir->op2)->i + 0x40000000u < 0x80000000u) + return emitir(IRTI(ir->o - IR_ADDOV + IR_ADD), ir->op1, ir->op2); + return tr; +} + +/* Narrow conversion to integer operand (overflow undefined). */ +TRef LJ_FASTCALL lj_opt_narrow_toint(jit_State *J, TRef tr) +{ + if (tref_isstr(tr)) + tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0); + if (tref_isnum(tr)) /* Conversion may be narrowed, too. See above. */ + return emitir(IRTI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_ANY); + if (!tref_isinteger(tr)) + lj_trace_err(J, LJ_TRERR_BADTYPE); + /* + ** Undefined overflow semantics allow stripping of ADDOV, SUBOV and MULOV. + ** Use IRCONV_TOBIT for the cache entries, since the semantics are the same. + */ + return narrow_stripov(J, tr, IR_MULOV, (IRT_INT<<5)|IRT_INT|IRCONV_TOBIT); +} + +/* Narrow conversion to bitop operand (overflow wrapped). */ +TRef LJ_FASTCALL lj_opt_narrow_tobit(jit_State *J, TRef tr) +{ + if (tref_isstr(tr)) + tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0); + if (tref_isnum(tr)) /* Conversion may be narrowed, too. See above. */ + return emitir(IRTI(IR_TOBIT), tr, lj_ir_knum_tobit(J)); + if (!tref_isinteger(tr)) + lj_trace_err(J, LJ_TRERR_BADTYPE); + /* + ** Wrapped overflow semantics allow stripping of ADDOV and SUBOV. + ** MULOV cannot be stripped due to precision widening. + */ + return narrow_stripov(J, tr, IR_SUBOV, (IRT_INT<<5)|IRT_INT|IRCONV_TOBIT); +} + +#if LJ_HASFFI +/* Narrow C array index (overflow undefined). */ +TRef LJ_FASTCALL lj_opt_narrow_cindex(jit_State *J, TRef tr) +{ + lua_assert(tref_isnumber(tr)); + if (tref_isnum(tr)) + return emitir(IRT(IR_CONV, IRT_INTP), tr, (IRT_INTP<<5)|IRT_NUM|IRCONV_ANY); + /* Undefined overflow semantics allow stripping of ADDOV, SUBOV and MULOV. */ + return narrow_stripov(J, tr, IR_MULOV, + LJ_64 ? ((IRT_INTP<<5)|IRT_INT|IRCONV_SEXT) : + ((IRT_INTP<<5)|IRT_INT|IRCONV_TOBIT)); +} +#endif + +/* -- Narrowing of arithmetic operators ----------------------------------- */ + +/* Check whether a number fits into an int32_t (-0 is ok, too). */ +static int numisint(lua_Number n) +{ + return (n == (lua_Number)lj_num2int(n)); +} + +/* Narrowing of arithmetic operations. */ +TRef lj_opt_narrow_arith(jit_State *J, TRef rb, TRef rc, + TValue *vb, TValue *vc, IROp op) +{ + if (tref_isstr(rb)) { + rb = emitir(IRTG(IR_STRTO, IRT_NUM), rb, 0); + lj_strscan_num(strV(vb), vb); + } + if (tref_isstr(rc)) { + rc = emitir(IRTG(IR_STRTO, IRT_NUM), rc, 0); + lj_strscan_num(strV(vc), vc); + } + /* Must not narrow MUL in non-DUALNUM variant, because it loses -0. */ + if ((op >= IR_ADD && op <= (LJ_DUALNUM ? IR_MUL : IR_SUB)) && + tref_isinteger(rb) && tref_isinteger(rc) && + numisint(lj_vm_foldarith(numberVnum(vb), numberVnum(vc), + (int)op - (int)IR_ADD))) + return emitir(IRTGI((int)op - (int)IR_ADD + (int)IR_ADDOV), rb, rc); + if (!tref_isnum(rb)) rb = emitir(IRTN(IR_CONV), rb, IRCONV_NUM_INT); + if (!tref_isnum(rc)) rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT); + return emitir(IRTN(op), rb, rc); +} + +/* Narrowing of unary minus operator. */ +TRef lj_opt_narrow_unm(jit_State *J, TRef rc, TValue *vc) +{ + if (tref_isstr(rc)) { + rc = emitir(IRTG(IR_STRTO, IRT_NUM), rc, 0); + lj_strscan_num(strV(vc), vc); + } + if (tref_isinteger(rc)) { + if ((uint32_t)numberVint(vc) != 0x80000000u) + return emitir(IRTGI(IR_SUBOV), lj_ir_kint(J, 0), rc); + rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT); + } + return emitir(IRTN(IR_NEG), rc, lj_ir_knum_neg(J)); +} + +/* Narrowing of modulo operator. */ +TRef lj_opt_narrow_mod(jit_State *J, TRef rb, TRef rc, TValue *vc) +{ + TRef tmp; + if (tvisstr(vc) && !lj_strscan_num(strV(vc), vc)) + lj_trace_err(J, LJ_TRERR_BADTYPE); + if ((LJ_DUALNUM || (J->flags & JIT_F_OPT_NARROW)) && + tref_isinteger(rb) && tref_isinteger(rc) && + (tvisint(vc) ? intV(vc) != 0 : !tviszero(vc))) { + emitir(IRTGI(IR_NE), rc, lj_ir_kint(J, 0)); + return emitir(IRTI(IR_MOD), rb, rc); + } + /* b % c ==> b - floor(b/c)*c */ + rb = lj_ir_tonum(J, rb); + rc = lj_ir_tonum(J, rc); + tmp = emitir(IRTN(IR_DIV), rb, rc); + tmp = emitir(IRTN(IR_FPMATH), tmp, IRFPM_FLOOR); + tmp = emitir(IRTN(IR_MUL), tmp, rc); + return emitir(IRTN(IR_SUB), rb, tmp); +} + +/* Narrowing of power operator or math.pow. */ +TRef lj_opt_narrow_pow(jit_State *J, TRef rb, TRef rc, TValue *vc) +{ + if (tvisstr(vc) && !lj_strscan_num(strV(vc), vc)) + lj_trace_err(J, LJ_TRERR_BADTYPE); + /* Narrowing must be unconditional to preserve (-x)^i semantics. */ + if (tvisint(vc) || numisint(numV(vc))) { + int checkrange = 0; + /* Split pow is faster for bigger exponents. But do this only for (+k)^i. */ + if (tref_isk(rb) && (int32_t)ir_knum(IR(tref_ref(rb)))->u32.hi >= 0) { + int32_t k = numberVint(vc); + if (!(k >= -65536 && k <= 65536)) goto split_pow; + checkrange = 1; + } + if (!tref_isinteger(rc)) { + if (tref_isstr(rc)) + rc = emitir(IRTG(IR_STRTO, IRT_NUM), rc, 0); + /* Guarded conversion to integer! */ + rc = emitir(IRTGI(IR_CONV), rc, IRCONV_INT_NUM|IRCONV_CHECK); + } + if (checkrange && !tref_isk(rc)) { /* Range guard: -65536 <= i <= 65536 */ + TRef tmp = emitir(IRTI(IR_ADD), rc, lj_ir_kint(J, 65536)); + emitir(IRTGI(IR_ULE), tmp, lj_ir_kint(J, 2*65536)); + } + return emitir(IRTN(IR_POW), rb, rc); + } +split_pow: + /* FOLD covers most cases, but some are easier to do here. */ + if (tref_isk(rb) && tvispone(ir_knum(IR(tref_ref(rb))))) + return rb; /* 1 ^ x ==> 1 */ + rc = lj_ir_tonum(J, rc); + if (tref_isk(rc) && ir_knum(IR(tref_ref(rc)))->n == 0.5) + return emitir(IRTN(IR_FPMATH), rb, IRFPM_SQRT); /* x ^ 0.5 ==> sqrt(x) */ + /* Split up b^c into exp2(c*log2(b)). Assembler may rejoin later. */ + rb = emitir(IRTN(IR_FPMATH), rb, IRFPM_LOG2); + rc = emitir(IRTN(IR_MUL), rb, rc); + return emitir(IRTN(IR_FPMATH), rc, IRFPM_EXP2); +} + +/* -- Predictive narrowing of induction variables ------------------------- */ + +/* Narrow a single runtime value. */ +static int narrow_forl(jit_State *J, cTValue *o) +{ + if (tvisint(o)) return 1; + if (LJ_DUALNUM || (J->flags & JIT_F_OPT_NARROW)) return numisint(numV(o)); + return 0; +} + +/* Narrow the FORL index type by looking at the runtime values. */ +IRType lj_opt_narrow_forl(jit_State *J, cTValue *tv) +{ + lua_assert(tvisnumber(&tv[FORL_IDX]) && + tvisnumber(&tv[FORL_STOP]) && + tvisnumber(&tv[FORL_STEP])); + /* Narrow only if the runtime values of start/stop/step are all integers. */ + if (narrow_forl(J, &tv[FORL_IDX]) && + narrow_forl(J, &tv[FORL_STOP]) && + narrow_forl(J, &tv[FORL_STEP])) { + /* And if the loop index can't possibly overflow. */ + lua_Number step = numberVnum(&tv[FORL_STEP]); + lua_Number sum = numberVnum(&tv[FORL_STOP]) + step; + if (0 <= step ? (sum <= 2147483647.0) : (sum >= -2147483648.0)) + return IRT_INT; + } + return IRT_NUM; +} + +#undef IR +#undef fins +#undef emitir +#undef emitir_raw + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_opt_sink.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_opt_sink.c new file mode 100644 index 00000000000..a98e9dfe3b4 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_opt_sink.c @@ -0,0 +1,245 @@ +/* +** SINK: Allocation Sinking and Store Sinking. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_opt_sink_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_iropt.h" +#include "lj_target.h" + +/* Some local macros to save typing. Undef'd at the end. */ +#define IR(ref) (&J->cur.ir[(ref)]) + +/* Check whether the store ref points to an eligible allocation. */ +static IRIns *sink_checkalloc(jit_State *J, IRIns *irs) +{ + IRIns *ir = IR(irs->op1); + if (!irref_isk(ir->op2)) + return NULL; /* Non-constant key. */ + if (ir->o == IR_HREFK || ir->o == IR_AREF) + ir = IR(ir->op1); + else if (!(ir->o == IR_HREF || ir->o == IR_NEWREF || + ir->o == IR_FREF || ir->o == IR_ADD)) + return NULL; /* Unhandled reference type (for XSTORE). */ + ir = IR(ir->op1); + if (!(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW)) + return NULL; /* Not an allocation. */ + return ir; /* Return allocation. */ +} + +/* Recursively check whether a value depends on a PHI. */ +static int sink_phidep(jit_State *J, IRRef ref) +{ + IRIns *ir = IR(ref); + if (irt_isphi(ir->t)) return 1; + if (ir->op1 >= REF_FIRST && sink_phidep(J, ir->op1)) return 1; + if (ir->op2 >= REF_FIRST && sink_phidep(J, ir->op2)) return 1; + return 0; +} + +/* Check whether a value is a sinkable PHI or loop-invariant. */ +static int sink_checkphi(jit_State *J, IRIns *ira, IRRef ref) +{ + if (ref >= REF_FIRST) { + IRIns *ir = IR(ref); + if (irt_isphi(ir->t) || (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT && + irt_isphi(IR(ir->op1)->t))) { + ira->prev++; + return 1; /* Sinkable PHI. */ + } + /* Otherwise the value must be loop-invariant. */ + return ref < J->loopref && !sink_phidep(J, ref); + } + return 1; /* Constant (non-PHI). */ +} + +/* Mark non-sinkable allocations using single-pass backward propagation. +** +** Roots for the marking process are: +** - Some PHIs or snapshots (see below). +** - Non-PHI, non-constant values stored to PHI allocations. +** - All guards. +** - Any remaining loads not eliminated by store-to-load forwarding. +** - Stores with non-constant keys. +** - All stored values. +*/ +static void sink_mark_ins(jit_State *J) +{ + IRIns *ir, *irlast = IR(J->cur.nins-1); + for (ir = irlast ; ; ir--) { + switch (ir->o) { + case IR_BASE: + return; /* Finished. */ + case IR_CALLL: /* IRCALL_lj_tab_len */ + case IR_ALOAD: case IR_HLOAD: case IR_XLOAD: case IR_TBAR: + irt_setmark(IR(ir->op1)->t); /* Mark ref for remaining loads. */ + break; + case IR_FLOAD: + if (irt_ismarked(ir->t) || ir->op2 == IRFL_TAB_META) + irt_setmark(IR(ir->op1)->t); /* Mark table for remaining loads. */ + break; + case IR_ASTORE: case IR_HSTORE: case IR_FSTORE: case IR_XSTORE: { + IRIns *ira = sink_checkalloc(J, ir); + if (!ira || (irt_isphi(ira->t) && !sink_checkphi(J, ira, ir->op2))) + irt_setmark(IR(ir->op1)->t); /* Mark ineligible ref. */ + irt_setmark(IR(ir->op2)->t); /* Mark stored value. */ + break; + } +#if LJ_HASFFI + case IR_CNEWI: + if (irt_isphi(ir->t) && + (!sink_checkphi(J, ir, ir->op2) || + (LJ_32 && ir+1 < irlast && (ir+1)->o == IR_HIOP && + !sink_checkphi(J, ir, (ir+1)->op2)))) + irt_setmark(ir->t); /* Mark ineligible allocation. */ + /* fallthrough */ +#endif + case IR_USTORE: + irt_setmark(IR(ir->op2)->t); /* Mark stored value. */ + break; +#if LJ_HASFFI + case IR_CALLXS: +#endif + case IR_CALLS: + irt_setmark(IR(ir->op1)->t); /* Mark (potentially) stored values. */ + break; + case IR_PHI: { + IRIns *irl = IR(ir->op1), *irr = IR(ir->op2); + irl->prev = irr->prev = 0; /* Clear PHI value counts. */ + if (irl->o == irr->o && + (irl->o == IR_TNEW || irl->o == IR_TDUP || + (LJ_HASFFI && (irl->o == IR_CNEW || irl->o == IR_CNEWI)))) + break; + irt_setmark(irl->t); + irt_setmark(irr->t); + break; + } + default: + if (irt_ismarked(ir->t) || irt_isguard(ir->t)) { /* Propagate mark. */ + if (ir->op1 >= REF_FIRST) irt_setmark(IR(ir->op1)->t); + if (ir->op2 >= REF_FIRST) irt_setmark(IR(ir->op2)->t); + } + break; + } + } +} + +/* Mark all instructions referenced by a snapshot. */ +static void sink_mark_snap(jit_State *J, SnapShot *snap) +{ + SnapEntry *map = &J->cur.snapmap[snap->mapofs]; + MSize n, nent = snap->nent; + for (n = 0; n < nent; n++) { + IRRef ref = snap_ref(map[n]); + if (!irref_isk(ref)) + irt_setmark(IR(ref)->t); + } +} + +/* Iteratively remark PHI refs with differing marks or PHI value counts. */ +static void sink_remark_phi(jit_State *J) +{ + IRIns *ir; + int remark; + do { + remark = 0; + for (ir = IR(J->cur.nins-1); ir->o == IR_PHI; ir--) { + IRIns *irl = IR(ir->op1), *irr = IR(ir->op2); + if (((irl->t.irt ^ irr->t.irt) & IRT_MARK)) + remark = 1; + else if (irl->prev == irr->prev) + continue; + irt_setmark(IR(ir->op1)->t); + irt_setmark(IR(ir->op2)->t); + } + } while (remark); +} + +/* Sweep instructions and tag sunken allocations and stores. */ +static void sink_sweep_ins(jit_State *J) +{ + IRIns *ir, *irfirst = IR(J->cur.nk); + for (ir = IR(J->cur.nins-1) ; ir >= irfirst; ir--) { + switch (ir->o) { + case IR_ASTORE: case IR_HSTORE: case IR_FSTORE: case IR_XSTORE: { + IRIns *ira = sink_checkalloc(J, ir); + if (ira && !irt_ismarked(ira->t)) { + int delta = (int)(ir - ira); + ir->prev = REGSP(RID_SINK, delta > 255 ? 255 : delta); + } else { + ir->prev = REGSP_INIT; + } + break; + } + case IR_NEWREF: + if (!irt_ismarked(IR(ir->op1)->t)) { + ir->prev = REGSP(RID_SINK, 0); + } else { + irt_clearmark(ir->t); + ir->prev = REGSP_INIT; + } + break; +#if LJ_HASFFI + case IR_CNEW: case IR_CNEWI: +#endif + case IR_TNEW: case IR_TDUP: + if (!irt_ismarked(ir->t)) { + ir->t.irt &= ~IRT_GUARD; + ir->prev = REGSP(RID_SINK, 0); + J->cur.sinktags = 1; /* Signal present SINK tags to assembler. */ + } else { + irt_clearmark(ir->t); + ir->prev = REGSP_INIT; + } + break; + case IR_PHI: { + IRIns *ira = IR(ir->op2); + if (!irt_ismarked(ira->t) && + (ira->o == IR_TNEW || ira->o == IR_TDUP || + (LJ_HASFFI && (ira->o == IR_CNEW || ira->o == IR_CNEWI)))) { + ir->prev = REGSP(RID_SINK, 0); + } else { + ir->prev = REGSP_INIT; + } + break; + } + default: + irt_clearmark(ir->t); + ir->prev = REGSP_INIT; + break; + } + } +} + +/* Allocation sinking and store sinking. +** +** 1. Mark all non-sinkable allocations. +** 2. Then sink all remaining allocations and the related stores. +*/ +void lj_opt_sink(jit_State *J) +{ + const uint32_t need = (JIT_F_OPT_SINK|JIT_F_OPT_FWD| + JIT_F_OPT_DCE|JIT_F_OPT_CSE|JIT_F_OPT_FOLD); + if ((J->flags & need) == need && + (J->chain[IR_TNEW] || J->chain[IR_TDUP] || + (LJ_HASFFI && (J->chain[IR_CNEW] || J->chain[IR_CNEWI])))) { + if (!J->loopref) + sink_mark_snap(J, &J->cur.snap[J->cur.nsnap-1]); + sink_mark_ins(J); + if (J->loopref) + sink_remark_phi(J); + sink_sweep_ins(J); + } +} + +#undef IR + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_opt_split.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_opt_split.c new file mode 100644 index 00000000000..81ded6c0a01 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_opt_split.c @@ -0,0 +1,856 @@ +/* +** SPLIT: Split 64 bit IR instructions into 32 bit IR instructions. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_opt_split_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT && (LJ_SOFTFP || (LJ_32 && LJ_HASFFI)) + +#include "lj_err.h" +#include "lj_buf.h" +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_ircall.h" +#include "lj_iropt.h" +#include "lj_vm.h" + +/* SPLIT pass: +** +** This pass splits up 64 bit IR instructions into multiple 32 bit IR +** instructions. It's only active for soft-float targets or for 32 bit CPUs +** which lack native 64 bit integer operations (the FFI is currently the +** only emitter for 64 bit integer instructions). +** +** Splitting the IR in a separate pass keeps each 32 bit IR assembler +** backend simple. Only a small amount of extra functionality needs to be +** implemented. This is much easier than adding support for allocating +** register pairs to each backend (believe me, I tried). A few simple, but +** important optimizations can be performed by the SPLIT pass, which would +** be tedious to do in the backend. +** +** The basic idea is to replace each 64 bit IR instruction with its 32 bit +** equivalent plus an extra HIOP instruction. The splitted IR is not passed +** through FOLD or any other optimizations, so each HIOP is guaranteed to +** immediately follow it's counterpart. The actual functionality of HIOP is +** inferred from the previous instruction. +** +** The operands of HIOP hold the hiword input references. The output of HIOP +** is the hiword output reference, which is also used to hold the hiword +** register or spill slot information. The register allocator treats this +** instruction independently of any other instruction, which improves code +** quality compared to using fixed register pairs. +** +** It's easier to split up some instructions into two regular 32 bit +** instructions. E.g. XLOAD is split up into two XLOADs with two different +** addresses. Obviously 64 bit constants need to be split up into two 32 bit +** constants, too. Some hiword instructions can be entirely omitted, e.g. +** when zero-extending a 32 bit value to 64 bits. 64 bit arguments for calls +** are split up into two 32 bit arguments each. +** +** On soft-float targets, floating-point instructions are directly converted +** to soft-float calls by the SPLIT pass (except for comparisons and MIN/MAX). +** HIOP for number results has the type IRT_SOFTFP ("sfp" in -jdump). +** +** Here's the IR and x64 machine code for 'x.b = x.a + 1' for a struct with +** two int64_t fields: +** +** 0100 p32 ADD base +8 +** 0101 i64 XLOAD 0100 +** 0102 i64 ADD 0101 +1 +** 0103 p32 ADD base +16 +** 0104 i64 XSTORE 0103 0102 +** +** mov rax, [esi+0x8] +** add rax, +0x01 +** mov [esi+0x10], rax +** +** Here's the transformed IR and the x86 machine code after the SPLIT pass: +** +** 0100 p32 ADD base +8 +** 0101 int XLOAD 0100 +** 0102 p32 ADD base +12 +** 0103 int XLOAD 0102 +** 0104 int ADD 0101 +1 +** 0105 int HIOP 0103 +0 +** 0106 p32 ADD base +16 +** 0107 int XSTORE 0106 0104 +** 0108 int HIOP 0106 0105 +** +** mov eax, [esi+0x8] +** mov ecx, [esi+0xc] +** add eax, +0x01 +** adc ecx, +0x00 +** mov [esi+0x10], eax +** mov [esi+0x14], ecx +** +** You may notice the reassociated hiword address computation, which is +** later fused into the mov operands by the assembler. +*/ + +/* Some local macros to save typing. Undef'd at the end. */ +#define IR(ref) (&J->cur.ir[(ref)]) + +/* Directly emit the transformed IR without updating chains etc. */ +static IRRef split_emit(jit_State *J, uint16_t ot, IRRef1 op1, IRRef1 op2) +{ + IRRef nref = lj_ir_nextins(J); + IRIns *ir = IR(nref); + ir->ot = ot; + ir->op1 = op1; + ir->op2 = op2; + return nref; +} + +#if LJ_SOFTFP +/* Emit a (checked) number to integer conversion. */ +static IRRef split_num2int(jit_State *J, IRRef lo, IRRef hi, int check) +{ + IRRef tmp, res; +#if LJ_LE + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), lo, hi); +#else + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hi, lo); +#endif + res = split_emit(J, IRTI(IR_CALLN), tmp, IRCALL_softfp_d2i); + if (check) { + tmp = split_emit(J, IRTI(IR_CALLN), res, IRCALL_softfp_i2d); + split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp); + split_emit(J, IRTGI(IR_EQ), tmp, lo); + split_emit(J, IRTG(IR_HIOP, IRT_SOFTFP), tmp+1, hi); + } + return res; +} + +/* Emit a CALLN with one split 64 bit argument. */ +static IRRef split_call_l(jit_State *J, IRRef1 *hisubst, IRIns *oir, + IRIns *ir, IRCallID id) +{ + IRRef tmp, op1 = ir->op1; + J->cur.nins--; +#if LJ_LE + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]); +#else + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev); +#endif + ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id); + return split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp); +} +#endif + +/* Emit a CALLN with one split 64 bit argument and a 32 bit argument. */ +static IRRef split_call_li(jit_State *J, IRRef1 *hisubst, IRIns *oir, + IRIns *ir, IRCallID id) +{ + IRRef tmp, op1 = ir->op1, op2 = ir->op2; + J->cur.nins--; +#if LJ_LE + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]); +#else + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev); +#endif + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, oir[op2].prev); + ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id); + return split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp); +} + +/* Emit a CALLN with two split 64 bit arguments. */ +static IRRef split_call_ll(jit_State *J, IRRef1 *hisubst, IRIns *oir, + IRIns *ir, IRCallID id) +{ + IRRef tmp, op1 = ir->op1, op2 = ir->op2; + J->cur.nins--; +#if LJ_LE + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]); + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, oir[op2].prev); + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, hisubst[op2]); +#else + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev); + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, hisubst[op2]); + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, oir[op2].prev); +#endif + ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id); + return split_emit(J, + IRT(IR_HIOP, (LJ_SOFTFP && irt_isnum(ir->t)) ? IRT_SOFTFP : IRT_INT), + tmp, tmp); +} + +/* Get a pointer to the other 32 bit word (LE: hiword, BE: loword). */ +static IRRef split_ptr(jit_State *J, IRIns *oir, IRRef ref) +{ + IRRef nref = oir[ref].prev; + IRIns *ir = IR(nref); + int32_t ofs = 4; + if (ir->o == IR_KPTR) + return lj_ir_kptr(J, (char *)ir_kptr(ir) + ofs); + if (ir->o == IR_ADD && irref_isk(ir->op2) && !irt_isphi(oir[ref].t)) { + /* Reassociate address. */ + ofs += IR(ir->op2)->i; + nref = ir->op1; + if (ofs == 0) return nref; + } + return split_emit(J, IRTI(IR_ADD), nref, lj_ir_kint(J, ofs)); +} + +#if LJ_HASFFI +static IRRef split_bitshift(jit_State *J, IRRef1 *hisubst, + IRIns *oir, IRIns *nir, IRIns *ir) +{ + IROp op = ir->o; + IRRef kref = nir->op2; + if (irref_isk(kref)) { /* Optimize constant shifts. */ + int32_t k = (IR(kref)->i & 63); + IRRef lo = nir->op1, hi = hisubst[ir->op1]; + if (op == IR_BROL || op == IR_BROR) { + if (op == IR_BROR) k = (-k & 63); + if (k >= 32) { IRRef t = lo; lo = hi; hi = t; k -= 32; } + if (k == 0) { + passthrough: + J->cur.nins--; + ir->prev = lo; + return hi; + } else { + TRef k1, k2; + IRRef t1, t2, t3, t4; + J->cur.nins--; + k1 = lj_ir_kint(J, k); + k2 = lj_ir_kint(J, (-k & 31)); + t1 = split_emit(J, IRTI(IR_BSHL), lo, k1); + t2 = split_emit(J, IRTI(IR_BSHL), hi, k1); + t3 = split_emit(J, IRTI(IR_BSHR), lo, k2); + t4 = split_emit(J, IRTI(IR_BSHR), hi, k2); + ir->prev = split_emit(J, IRTI(IR_BOR), t1, t4); + return split_emit(J, IRTI(IR_BOR), t2, t3); + } + } else if (k == 0) { + goto passthrough; + } else if (k < 32) { + if (op == IR_BSHL) { + IRRef t1 = split_emit(J, IRTI(IR_BSHL), hi, kref); + IRRef t2 = split_emit(J, IRTI(IR_BSHR), lo, lj_ir_kint(J, (-k&31))); + return split_emit(J, IRTI(IR_BOR), t1, t2); + } else { + IRRef t1 = ir->prev, t2; + lua_assert(op == IR_BSHR || op == IR_BSAR); + nir->o = IR_BSHR; + t2 = split_emit(J, IRTI(IR_BSHL), hi, lj_ir_kint(J, (-k&31))); + ir->prev = split_emit(J, IRTI(IR_BOR), t1, t2); + return split_emit(J, IRTI(op), hi, kref); + } + } else { + if (op == IR_BSHL) { + if (k == 32) + J->cur.nins--; + else + lo = ir->prev; + ir->prev = lj_ir_kint(J, 0); + return lo; + } else { + lua_assert(op == IR_BSHR || op == IR_BSAR); + if (k == 32) { + J->cur.nins--; + ir->prev = hi; + } else { + nir->op1 = hi; + } + if (op == IR_BSHR) + return lj_ir_kint(J, 0); + else + return split_emit(J, IRTI(IR_BSAR), hi, lj_ir_kint(J, 31)); + } + } + } + return split_call_li(J, hisubst, oir, ir, + op - IR_BSHL + IRCALL_lj_carith_shl64); +} + +static IRRef split_bitop(jit_State *J, IRRef1 *hisubst, + IRIns *nir, IRIns *ir) +{ + IROp op = ir->o; + IRRef hi, kref = nir->op2; + if (irref_isk(kref)) { /* Optimize bit operations with lo constant. */ + int32_t k = IR(kref)->i; + if (k == 0 || k == -1) { + if (op == IR_BAND) k = ~k; + if (k == 0) { + J->cur.nins--; + ir->prev = nir->op1; + } else if (op == IR_BXOR) { + nir->o = IR_BNOT; + nir->op2 = 0; + } else { + J->cur.nins--; + ir->prev = kref; + } + } + } + hi = hisubst[ir->op1]; + kref = hisubst[ir->op2]; + if (irref_isk(kref)) { /* Optimize bit operations with hi constant. */ + int32_t k = IR(kref)->i; + if (k == 0 || k == -1) { + if (op == IR_BAND) k = ~k; + if (k == 0) { + return hi; + } else if (op == IR_BXOR) { + return split_emit(J, IRTI(IR_BNOT), hi, 0); + } else { + return kref; + } + } + } + return split_emit(J, IRTI(op), hi, kref); +} +#endif + +/* Substitute references of a snapshot. */ +static void split_subst_snap(jit_State *J, SnapShot *snap, IRIns *oir) +{ + SnapEntry *map = &J->cur.snapmap[snap->mapofs]; + MSize n, nent = snap->nent; + for (n = 0; n < nent; n++) { + SnapEntry sn = map[n]; + IRIns *ir = &oir[snap_ref(sn)]; + if (!(LJ_SOFTFP && (sn & SNAP_SOFTFPNUM) && irref_isk(snap_ref(sn)))) + map[n] = ((sn & 0xffff0000) | ir->prev); + } +} + +/* Transform the old IR to the new IR. */ +static void split_ir(jit_State *J) +{ + IRRef nins = J->cur.nins, nk = J->cur.nk; + MSize irlen = nins - nk; + MSize need = (irlen+1)*(sizeof(IRIns) + sizeof(IRRef1)); + IRIns *oir = (IRIns *)lj_buf_tmp(J->L, need); + IRRef1 *hisubst; + IRRef ref, snref; + SnapShot *snap; + + /* Copy old IR to buffer. */ + memcpy(oir, IR(nk), irlen*sizeof(IRIns)); + /* Bias hiword substitution table and old IR. Loword kept in field prev. */ + hisubst = (IRRef1 *)&oir[irlen] - nk; + oir -= nk; + + /* Remove all IR instructions, but retain IR constants. */ + J->cur.nins = REF_FIRST; + J->loopref = 0; + + /* Process constants and fixed references. */ + for (ref = nk; ref <= REF_BASE; ref++) { + IRIns *ir = &oir[ref]; + if ((LJ_SOFTFP && ir->o == IR_KNUM) || ir->o == IR_KINT64) { + /* Split up 64 bit constant. */ + TValue tv = *ir_k64(ir); + ir->prev = lj_ir_kint(J, (int32_t)tv.u32.lo); + hisubst[ref] = lj_ir_kint(J, (int32_t)tv.u32.hi); + } else { + ir->prev = ref; /* Identity substitution for loword. */ + hisubst[ref] = 0; + } + } + + /* Process old IR instructions. */ + snap = J->cur.snap; + snref = snap->ref; + for (ref = REF_FIRST; ref < nins; ref++) { + IRIns *ir = &oir[ref]; + IRRef nref = lj_ir_nextins(J); + IRIns *nir = IR(nref); + IRRef hi = 0; + + if (ref >= snref) { + snap->ref = nref; + split_subst_snap(J, snap++, oir); + snref = snap < &J->cur.snap[J->cur.nsnap] ? snap->ref : ~(IRRef)0; + } + + /* Copy-substitute old instruction to new instruction. */ + nir->op1 = ir->op1 < nk ? ir->op1 : oir[ir->op1].prev; + nir->op2 = ir->op2 < nk ? ir->op2 : oir[ir->op2].prev; + ir->prev = nref; /* Loword substitution. */ + nir->o = ir->o; + nir->t.irt = ir->t.irt & ~(IRT_MARK|IRT_ISPHI); + hisubst[ref] = 0; + + /* Split 64 bit instructions. */ +#if LJ_SOFTFP + if (irt_isnum(ir->t)) { + nir->t.irt = IRT_INT | (nir->t.irt & IRT_GUARD); /* Turn into INT op. */ + /* Note: hi ref = lo ref + 1! Required for SNAP_SOFTFPNUM logic. */ + switch (ir->o) { + case IR_ADD: + hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_add); + break; + case IR_SUB: + hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_sub); + break; + case IR_MUL: + hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_mul); + break; + case IR_DIV: + hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_div); + break; + case IR_POW: + hi = split_call_li(J, hisubst, oir, ir, IRCALL_lj_vm_powi); + break; + case IR_FPMATH: + /* Try to rejoin pow from EXP2, MUL and LOG2. */ + if (nir->op2 == IRFPM_EXP2 && nir->op1 > J->loopref) { + IRIns *irp = IR(nir->op1); + if (irp->o == IR_CALLN && irp->op2 == IRCALL_softfp_mul) { + IRIns *irm4 = IR(irp->op1); + IRIns *irm3 = IR(irm4->op1); + IRIns *irm12 = IR(irm3->op1); + IRIns *irl1 = IR(irm12->op1); + if (irm12->op1 > J->loopref && irl1->o == IR_CALLN && + irl1->op2 == IRCALL_lj_vm_log2) { + IRRef tmp = irl1->op1; /* Recycle first two args from LOG2. */ + IRRef arg3 = irm3->op2, arg4 = irm4->op2; + J->cur.nins--; + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, arg3); + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, arg4); + ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, IRCALL_pow); + hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp); + break; + } + } + } + hi = split_call_l(J, hisubst, oir, ir, IRCALL_lj_vm_floor + ir->op2); + break; + case IR_ATAN2: + hi = split_call_ll(J, hisubst, oir, ir, IRCALL_atan2); + break; + case IR_LDEXP: + hi = split_call_li(J, hisubst, oir, ir, IRCALL_ldexp); + break; + case IR_NEG: case IR_ABS: + nir->o = IR_CONV; /* Pass through loword. */ + nir->op2 = (IRT_INT << 5) | IRT_INT; + hi = split_emit(J, IRT(ir->o == IR_NEG ? IR_BXOR : IR_BAND, IRT_SOFTFP), + hisubst[ir->op1], hisubst[ir->op2]); + break; + case IR_SLOAD: + if ((nir->op2 & IRSLOAD_CONVERT)) { /* Convert from int to number. */ + nir->op2 &= ~IRSLOAD_CONVERT; + ir->prev = nref = split_emit(J, IRTI(IR_CALLN), nref, + IRCALL_softfp_i2d); + hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref); + break; + } + /* fallthrough */ + case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: + case IR_STRTO: + hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref); + break; + case IR_XLOAD: { + IRIns inslo = *nir; /* Save/undo the emit of the lo XLOAD. */ + J->cur.nins--; + hi = split_ptr(J, oir, ir->op1); /* Insert the hiref ADD. */ + nref = lj_ir_nextins(J); + nir = IR(nref); + *nir = inslo; /* Re-emit lo XLOAD immediately before hi XLOAD. */ + hi = split_emit(J, IRT(IR_XLOAD, IRT_SOFTFP), hi, ir->op2); +#if LJ_LE + ir->prev = nref; +#else + ir->prev = hi; hi = nref; +#endif + break; + } + case IR_ASTORE: case IR_HSTORE: case IR_USTORE: case IR_XSTORE: + split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nir->op1, hisubst[ir->op2]); + break; + case IR_CONV: { /* Conversion to number. Others handled below. */ + IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); + UNUSED(st); +#if LJ_32 && LJ_HASFFI + if (st == IRT_I64 || st == IRT_U64) { + hi = split_call_l(J, hisubst, oir, ir, + st == IRT_I64 ? IRCALL_fp64_l2d : IRCALL_fp64_ul2d); + break; + } +#endif + lua_assert(st == IRT_INT || + (LJ_32 && LJ_HASFFI && (st == IRT_U32 || st == IRT_FLOAT))); + nir->o = IR_CALLN; +#if LJ_32 && LJ_HASFFI + nir->op2 = st == IRT_INT ? IRCALL_softfp_i2d : + st == IRT_FLOAT ? IRCALL_softfp_f2d : + IRCALL_softfp_ui2d; +#else + nir->op2 = IRCALL_softfp_i2d; +#endif + hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref); + break; + } + case IR_CALLN: + case IR_CALLL: + case IR_CALLS: + case IR_CALLXS: + goto split_call; + case IR_PHI: + if (nir->op1 == nir->op2) + J->cur.nins--; /* Drop useless PHIs. */ + if (hisubst[ir->op1] != hisubst[ir->op2]) + split_emit(J, IRT(IR_PHI, IRT_SOFTFP), + hisubst[ir->op1], hisubst[ir->op2]); + break; + case IR_HIOP: + J->cur.nins--; /* Drop joining HIOP. */ + ir->prev = nir->op1; + hi = nir->op2; + break; + default: + lua_assert(ir->o <= IR_NE || ir->o == IR_MIN || ir->o == IR_MAX); + hi = split_emit(J, IRTG(IR_HIOP, IRT_SOFTFP), + hisubst[ir->op1], hisubst[ir->op2]); + break; + } + } else +#endif +#if LJ_32 && LJ_HASFFI + if (irt_isint64(ir->t)) { + IRRef hiref = hisubst[ir->op1]; + nir->t.irt = IRT_INT | (nir->t.irt & IRT_GUARD); /* Turn into INT op. */ + switch (ir->o) { + case IR_ADD: + case IR_SUB: + /* Use plain op for hiword if loword cannot produce a carry/borrow. */ + if (irref_isk(nir->op2) && IR(nir->op2)->i == 0) { + ir->prev = nir->op1; /* Pass through loword. */ + nir->op1 = hiref; nir->op2 = hisubst[ir->op2]; + hi = nref; + break; + } + /* fallthrough */ + case IR_NEG: + hi = split_emit(J, IRTI(IR_HIOP), hiref, hisubst[ir->op2]); + break; + case IR_MUL: + hi = split_call_ll(J, hisubst, oir, ir, IRCALL_lj_carith_mul64); + break; + case IR_DIV: + hi = split_call_ll(J, hisubst, oir, ir, + irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 : + IRCALL_lj_carith_divu64); + break; + case IR_MOD: + hi = split_call_ll(J, hisubst, oir, ir, + irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 : + IRCALL_lj_carith_modu64); + break; + case IR_POW: + hi = split_call_ll(J, hisubst, oir, ir, + irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 : + IRCALL_lj_carith_powu64); + break; + case IR_BNOT: + hi = split_emit(J, IRTI(IR_BNOT), hiref, 0); + break; + case IR_BSWAP: + ir->prev = split_emit(J, IRTI(IR_BSWAP), hiref, 0); + hi = nref; + break; + case IR_BAND: case IR_BOR: case IR_BXOR: + hi = split_bitop(J, hisubst, nir, ir); + break; + case IR_BSHL: case IR_BSHR: case IR_BSAR: case IR_BROL: case IR_BROR: + hi = split_bitshift(J, hisubst, oir, nir, ir); + break; + case IR_FLOAD: + lua_assert(ir->op2 == IRFL_CDATA_INT64); + hi = split_emit(J, IRTI(IR_FLOAD), nir->op1, IRFL_CDATA_INT64_4); +#if LJ_BE + ir->prev = hi; hi = nref; +#endif + break; + case IR_XLOAD: + hi = split_emit(J, IRTI(IR_XLOAD), split_ptr(J, oir, ir->op1), ir->op2); +#if LJ_BE + ir->prev = hi; hi = nref; +#endif + break; + case IR_XSTORE: + split_emit(J, IRTI(IR_HIOP), nir->op1, hisubst[ir->op2]); + break; + case IR_CONV: { /* Conversion to 64 bit integer. Others handled below. */ + IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); +#if LJ_SOFTFP + if (st == IRT_NUM) { /* NUM to 64 bit int conv. */ + hi = split_call_l(J, hisubst, oir, ir, + irt_isi64(ir->t) ? IRCALL_fp64_d2l : IRCALL_fp64_d2ul); + } else if (st == IRT_FLOAT) { /* FLOAT to 64 bit int conv. */ + nir->o = IR_CALLN; + nir->op2 = irt_isi64(ir->t) ? IRCALL_fp64_f2l : IRCALL_fp64_f2ul; + hi = split_emit(J, IRTI(IR_HIOP), nref, nref); + } +#else + if (st == IRT_NUM || st == IRT_FLOAT) { /* FP to 64 bit int conv. */ + hi = split_emit(J, IRTI(IR_HIOP), nir->op1, nref); + } +#endif + else if (st == IRT_I64 || st == IRT_U64) { /* 64/64 bit cast. */ + /* Drop cast, since assembler doesn't care. */ + goto fwdlo; + } else if ((ir->op2 & IRCONV_SEXT)) { /* Sign-extend to 64 bit. */ + IRRef k31 = lj_ir_kint(J, 31); + nir = IR(nref); /* May have been reallocated. */ + ir->prev = nir->op1; /* Pass through loword. */ + nir->o = IR_BSAR; /* hi = bsar(lo, 31). */ + nir->op2 = k31; + hi = nref; + } else { /* Zero-extend to 64 bit. */ + hi = lj_ir_kint(J, 0); + goto fwdlo; + } + break; + } + case IR_CALLXS: + goto split_call; + case IR_PHI: { + IRRef hiref2; + if ((irref_isk(nir->op1) && irref_isk(nir->op2)) || + nir->op1 == nir->op2) + J->cur.nins--; /* Drop useless PHIs. */ + hiref2 = hisubst[ir->op2]; + if (!((irref_isk(hiref) && irref_isk(hiref2)) || hiref == hiref2)) + split_emit(J, IRTI(IR_PHI), hiref, hiref2); + break; + } + case IR_HIOP: + J->cur.nins--; /* Drop joining HIOP. */ + ir->prev = nir->op1; + hi = nir->op2; + break; + default: + lua_assert(ir->o <= IR_NE); /* Comparisons. */ + split_emit(J, IRTGI(IR_HIOP), hiref, hisubst[ir->op2]); + break; + } + } else +#endif +#if LJ_SOFTFP + if (ir->o == IR_SLOAD) { + if ((nir->op2 & IRSLOAD_CONVERT)) { /* Convert from number to int. */ + nir->op2 &= ~IRSLOAD_CONVERT; + if (!(nir->op2 & IRSLOAD_TYPECHECK)) + nir->t.irt = IRT_INT; /* Drop guard. */ + split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref); + ir->prev = split_num2int(J, nref, nref+1, irt_isguard(ir->t)); + } + } else if (ir->o == IR_TOBIT) { + IRRef tmp, op1 = ir->op1; + J->cur.nins--; +#if LJ_LE + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]); +#else + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev); +#endif + ir->prev = split_emit(J, IRTI(IR_CALLN), tmp, IRCALL_lj_vm_tobit); + } else if (ir->o == IR_TOSTR) { + if (hisubst[ir->op1]) { + if (irref_isk(ir->op1)) + nir->op1 = ir->op1; + else + split_emit(J, IRT(IR_HIOP, IRT_NIL), hisubst[ir->op1], nref); + } + } else if (ir->o == IR_HREF || ir->o == IR_NEWREF) { + if (irref_isk(ir->op2) && hisubst[ir->op2]) + nir->op2 = ir->op2; + } else +#endif + if (ir->o == IR_CONV) { /* See above, too. */ + IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); +#if LJ_32 && LJ_HASFFI + if (st == IRT_I64 || st == IRT_U64) { /* Conversion from 64 bit int. */ +#if LJ_SOFTFP + if (irt_isfloat(ir->t)) { + split_call_l(J, hisubst, oir, ir, + st == IRT_I64 ? IRCALL_fp64_l2f : IRCALL_fp64_ul2f); + J->cur.nins--; /* Drop unused HIOP. */ + } +#else + if (irt_isfp(ir->t)) { /* 64 bit integer to FP conversion. */ + ir->prev = split_emit(J, IRT(IR_HIOP, irt_type(ir->t)), + hisubst[ir->op1], nref); + } +#endif + else { /* Truncate to lower 32 bits. */ + fwdlo: + ir->prev = nir->op1; /* Forward loword. */ + /* Replace with NOP to avoid messing up the snapshot logic. */ + nir->ot = IRT(IR_NOP, IRT_NIL); + nir->op1 = nir->op2 = 0; + } + } +#endif +#if LJ_SOFTFP && LJ_32 && LJ_HASFFI + else if (irt_isfloat(ir->t)) { + if (st == IRT_NUM) { + split_call_l(J, hisubst, oir, ir, IRCALL_softfp_d2f); + J->cur.nins--; /* Drop unused HIOP. */ + } else { + nir->o = IR_CALLN; + nir->op2 = st == IRT_INT ? IRCALL_softfp_i2f : IRCALL_softfp_ui2f; + } + } else if (st == IRT_FLOAT) { + nir->o = IR_CALLN; + nir->op2 = irt_isint(ir->t) ? IRCALL_softfp_f2i : IRCALL_softfp_f2ui; + } else +#endif +#if LJ_SOFTFP + if (st == IRT_NUM || (LJ_32 && LJ_HASFFI && st == IRT_FLOAT)) { + if (irt_isguard(ir->t)) { + lua_assert(st == IRT_NUM && irt_isint(ir->t)); + J->cur.nins--; + ir->prev = split_num2int(J, nir->op1, hisubst[ir->op1], 1); + } else { + split_call_l(J, hisubst, oir, ir, +#if LJ_32 && LJ_HASFFI + st == IRT_NUM ? + (irt_isint(ir->t) ? IRCALL_softfp_d2i : IRCALL_softfp_d2ui) : + (irt_isint(ir->t) ? IRCALL_softfp_f2i : IRCALL_softfp_f2ui) +#else + IRCALL_softfp_d2i +#endif + ); + J->cur.nins--; /* Drop unused HIOP. */ + } + } +#endif + } else if (ir->o == IR_CALLXS) { + IRRef hiref; + split_call: + hiref = hisubst[ir->op1]; + if (hiref) { + IROpT ot = nir->ot; + IRRef op2 = nir->op2; + nir->ot = IRT(IR_CARG, IRT_NIL); +#if LJ_LE + nir->op2 = hiref; +#else + nir->op2 = nir->op1; nir->op1 = hiref; +#endif + ir->prev = nref = split_emit(J, ot, nref, op2); + } + if (LJ_SOFTFP ? irt_is64(ir->t) : irt_isint64(ir->t)) + hi = split_emit(J, + IRT(IR_HIOP, (LJ_SOFTFP && irt_isnum(ir->t)) ? IRT_SOFTFP : IRT_INT), + nref, nref); + } else if (ir->o == IR_CARG) { + IRRef hiref = hisubst[ir->op1]; + if (hiref) { + IRRef op2 = nir->op2; +#if LJ_LE + nir->op2 = hiref; +#else + nir->op2 = nir->op1; nir->op1 = hiref; +#endif + ir->prev = nref = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, op2); + nir = IR(nref); + } + hiref = hisubst[ir->op2]; + if (hiref) { +#if !LJ_TARGET_X86 + int carg = 0; + IRIns *cir; + for (cir = IR(nir->op1); cir->o == IR_CARG; cir = IR(cir->op1)) + carg++; + if ((carg & 1) == 0) { /* Align 64 bit arguments. */ + IRRef op2 = nir->op2; + nir->op2 = REF_NIL; + nref = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, op2); + nir = IR(nref); + } +#endif +#if LJ_BE + { IRRef tmp = nir->op2; nir->op2 = hiref; hiref = tmp; } +#endif + ir->prev = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, hiref); + } + } else if (ir->o == IR_CNEWI) { + if (hisubst[ir->op2]) + split_emit(J, IRT(IR_HIOP, IRT_NIL), nref, hisubst[ir->op2]); + } else if (ir->o == IR_LOOP) { + J->loopref = nref; /* Needed by assembler. */ + } + hisubst[ref] = hi; /* Store hiword substitution. */ + } + if (snref == nins) { /* Substitution for last snapshot. */ + snap->ref = J->cur.nins; + split_subst_snap(J, snap, oir); + } + + /* Add PHI marks. */ + for (ref = J->cur.nins-1; ref >= REF_FIRST; ref--) { + IRIns *ir = IR(ref); + if (ir->o != IR_PHI) break; + if (!irref_isk(ir->op1)) irt_setphi(IR(ir->op1)->t); + if (ir->op2 > J->loopref) irt_setphi(IR(ir->op2)->t); + } +} + +/* Protected callback for split pass. */ +static TValue *cpsplit(lua_State *L, lua_CFunction dummy, void *ud) +{ + jit_State *J = (jit_State *)ud; + split_ir(J); + UNUSED(L); UNUSED(dummy); + return NULL; +} + +#if defined(LUA_USE_ASSERT) || LJ_SOFTFP +/* Slow, but sure way to check whether a SPLIT pass is needed. */ +static int split_needsplit(jit_State *J) +{ + IRIns *ir, *irend; + IRRef ref; + for (ir = IR(REF_FIRST), irend = IR(J->cur.nins); ir < irend; ir++) + if (LJ_SOFTFP ? irt_is64orfp(ir->t) : irt_isint64(ir->t)) + return 1; + if (LJ_SOFTFP) { + for (ref = J->chain[IR_SLOAD]; ref; ref = IR(ref)->prev) + if ((IR(ref)->op2 & IRSLOAD_CONVERT)) + return 1; + if (J->chain[IR_TOBIT]) + return 1; + } + for (ref = J->chain[IR_CONV]; ref; ref = IR(ref)->prev) { + IRType st = (IR(ref)->op2 & IRCONV_SRCMASK); + if ((LJ_SOFTFP && (st == IRT_NUM || st == IRT_FLOAT)) || + st == IRT_I64 || st == IRT_U64) + return 1; + } + return 0; /* Nope. */ +} +#endif + +/* SPLIT pass. */ +void lj_opt_split(jit_State *J) +{ +#if LJ_SOFTFP + if (!J->needsplit) + J->needsplit = split_needsplit(J); +#else + lua_assert(J->needsplit >= split_needsplit(J)); /* Verify flag. */ +#endif + if (J->needsplit) { + int errcode = lj_vm_cpcall(J->L, NULL, J, cpsplit); + if (errcode) { + /* Completely reset the trace to avoid inconsistent dump on abort. */ + J->cur.nins = J->cur.nk = REF_BASE; + J->cur.nsnap = 0; + lj_err_throw(J->L, errcode); /* Propagate errors. */ + } + } +} + +#undef IR + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_parse.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_parse.c new file mode 100644 index 00000000000..9891897ed6f --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_parse.c @@ -0,0 +1,2725 @@ +/* +** Lua parser (source code -> bytecode). +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lj_parse_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_debug.h" +#include "lj_buf.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_func.h" +#include "lj_state.h" +#include "lj_bc.h" +#if LJ_HASFFI +#include "lj_ctype.h" +#endif +#include "lj_strfmt.h" +#include "lj_lex.h" +#include "lj_parse.h" +#include "lj_vm.h" +#include "lj_vmevent.h" + +/* -- Parser structures and definitions ----------------------------------- */ + +/* Expression kinds. */ +typedef enum { + /* Constant expressions must be first and in this order: */ + VKNIL, + VKFALSE, + VKTRUE, + VKSTR, /* sval = string value */ + VKNUM, /* nval = number value */ + VKLAST = VKNUM, + VKCDATA, /* nval = cdata value, not treated as a constant expression */ + /* Non-constant expressions follow: */ + VLOCAL, /* info = local register, aux = vstack index */ + VUPVAL, /* info = upvalue index, aux = vstack index */ + VGLOBAL, /* sval = string value */ + VINDEXED, /* info = table register, aux = index reg/byte/string const */ + VJMP, /* info = instruction PC */ + VRELOCABLE, /* info = instruction PC */ + VNONRELOC, /* info = result register */ + VCALL, /* info = instruction PC, aux = base */ + VVOID +} ExpKind; + +/* Expression descriptor. */ +typedef struct ExpDesc { + union { + struct { + uint32_t info; /* Primary info. */ + uint32_t aux; /* Secondary info. */ + } s; + TValue nval; /* Number value. */ + GCstr *sval; /* String value. */ + } u; + ExpKind k; + BCPos t; /* True condition jump list. */ + BCPos f; /* False condition jump list. */ +} ExpDesc; + +/* Macros for expressions. */ +#define expr_hasjump(e) ((e)->t != (e)->f) + +#define expr_isk(e) ((e)->k <= VKLAST) +#define expr_isk_nojump(e) (expr_isk(e) && !expr_hasjump(e)) +#define expr_isnumk(e) ((e)->k == VKNUM) +#define expr_isnumk_nojump(e) (expr_isnumk(e) && !expr_hasjump(e)) +#define expr_isstrk(e) ((e)->k == VKSTR) + +#define expr_numtv(e) check_exp(expr_isnumk((e)), &(e)->u.nval) +#define expr_numberV(e) numberVnum(expr_numtv((e))) + +/* Initialize expression. */ +static LJ_AINLINE void expr_init(ExpDesc *e, ExpKind k, uint32_t info) +{ + e->k = k; + e->u.s.info = info; + e->f = e->t = NO_JMP; +} + +/* Check number constant for +-0. */ +static int expr_numiszero(ExpDesc *e) +{ + TValue *o = expr_numtv(e); + return tvisint(o) ? (intV(o) == 0) : tviszero(o); +} + +/* Per-function linked list of scope blocks. */ +typedef struct FuncScope { + struct FuncScope *prev; /* Link to outer scope. */ + MSize vstart; /* Start of block-local variables. */ + uint8_t nactvar; /* Number of active vars outside the scope. */ + uint8_t flags; /* Scope flags. */ +} FuncScope; + +#define FSCOPE_LOOP 0x01 /* Scope is a (breakable) loop. */ +#define FSCOPE_BREAK 0x02 /* Break used in scope. */ +#define FSCOPE_GOLA 0x04 /* Goto or label used in scope. */ +#define FSCOPE_UPVAL 0x08 /* Upvalue in scope. */ +#define FSCOPE_NOCLOSE 0x10 /* Do not close upvalues. */ + +#define NAME_BREAK ((GCstr *)(uintptr_t)1) + +/* Index into variable stack. */ +typedef uint16_t VarIndex; +#define LJ_MAX_VSTACK (65536 - LJ_MAX_UPVAL) + +/* Variable/goto/label info. */ +#define VSTACK_VAR_RW 0x01 /* R/W variable. */ +#define VSTACK_GOTO 0x02 /* Pending goto. */ +#define VSTACK_LABEL 0x04 /* Label. */ + +/* Per-function state. */ +typedef struct FuncState { + GCtab *kt; /* Hash table for constants. */ + LexState *ls; /* Lexer state. */ + lua_State *L; /* Lua state. */ + FuncScope *bl; /* Current scope. */ + struct FuncState *prev; /* Enclosing function. */ + BCPos pc; /* Next bytecode position. */ + BCPos lasttarget; /* Bytecode position of last jump target. */ + BCPos jpc; /* Pending jump list to next bytecode. */ + BCReg freereg; /* First free register. */ + BCReg nactvar; /* Number of active local variables. */ + BCReg nkn, nkgc; /* Number of lua_Number/GCobj constants */ + BCLine linedefined; /* First line of the function definition. */ + BCInsLine *bcbase; /* Base of bytecode stack. */ + BCPos bclim; /* Limit of bytecode stack. */ + MSize vbase; /* Base of variable stack for this function. */ + uint8_t flags; /* Prototype flags. */ + uint8_t numparams; /* Number of parameters. */ + uint8_t framesize; /* Fixed frame size. */ + uint8_t nuv; /* Number of upvalues */ + VarIndex varmap[LJ_MAX_LOCVAR]; /* Map from register to variable idx. */ + VarIndex uvmap[LJ_MAX_UPVAL]; /* Map from upvalue to variable idx. */ + VarIndex uvtmp[LJ_MAX_UPVAL]; /* Temporary upvalue map. */ +} FuncState; + +/* Binary and unary operators. ORDER OPR */ +typedef enum BinOpr { + OPR_ADD, OPR_SUB, OPR_MUL, OPR_DIV, OPR_MOD, OPR_POW, /* ORDER ARITH */ + OPR_CONCAT, + OPR_NE, OPR_EQ, + OPR_LT, OPR_GE, OPR_LE, OPR_GT, + OPR_AND, OPR_OR, + OPR_NOBINOPR +} BinOpr; + +LJ_STATIC_ASSERT((int)BC_ISGE-(int)BC_ISLT == (int)OPR_GE-(int)OPR_LT); +LJ_STATIC_ASSERT((int)BC_ISLE-(int)BC_ISLT == (int)OPR_LE-(int)OPR_LT); +LJ_STATIC_ASSERT((int)BC_ISGT-(int)BC_ISLT == (int)OPR_GT-(int)OPR_LT); +LJ_STATIC_ASSERT((int)BC_SUBVV-(int)BC_ADDVV == (int)OPR_SUB-(int)OPR_ADD); +LJ_STATIC_ASSERT((int)BC_MULVV-(int)BC_ADDVV == (int)OPR_MUL-(int)OPR_ADD); +LJ_STATIC_ASSERT((int)BC_DIVVV-(int)BC_ADDVV == (int)OPR_DIV-(int)OPR_ADD); +LJ_STATIC_ASSERT((int)BC_MODVV-(int)BC_ADDVV == (int)OPR_MOD-(int)OPR_ADD); + +/* -- Error handling ------------------------------------------------------ */ + +LJ_NORET LJ_NOINLINE static void err_syntax(LexState *ls, ErrMsg em) +{ + lj_lex_error(ls, ls->tok, em); +} + +LJ_NORET LJ_NOINLINE static void err_token(LexState *ls, LexToken tok) +{ + lj_lex_error(ls, ls->tok, LJ_ERR_XTOKEN, lj_lex_token2str(ls, tok)); +} + +LJ_NORET static void err_limit(FuncState *fs, uint32_t limit, const char *what) +{ + if (fs->linedefined == 0) + lj_lex_error(fs->ls, 0, LJ_ERR_XLIMM, limit, what); + else + lj_lex_error(fs->ls, 0, LJ_ERR_XLIMF, fs->linedefined, limit, what); +} + +#define checklimit(fs, v, l, m) if ((v) >= (l)) err_limit(fs, l, m) +#define checklimitgt(fs, v, l, m) if ((v) > (l)) err_limit(fs, l, m) +#define checkcond(ls, c, em) { if (!(c)) err_syntax(ls, em); } + +/* -- Management of constants --------------------------------------------- */ + +/* Return bytecode encoding for primitive constant. */ +#define const_pri(e) check_exp((e)->k <= VKTRUE, (e)->k) + +#define tvhaskslot(o) ((o)->u32.hi == 0) +#define tvkslot(o) ((o)->u32.lo) + +/* Add a number constant. */ +static BCReg const_num(FuncState *fs, ExpDesc *e) +{ + lua_State *L = fs->L; + TValue *o; + lua_assert(expr_isnumk(e)); + o = lj_tab_set(L, fs->kt, &e->u.nval); + if (tvhaskslot(o)) + return tvkslot(o); + o->u64 = fs->nkn; + return fs->nkn++; +} + +/* Add a GC object constant. */ +static BCReg const_gc(FuncState *fs, GCobj *gc, uint32_t itype) +{ + lua_State *L = fs->L; + TValue key, *o; + setgcV(L, &key, gc, itype); + /* NOBARRIER: the key is new or kept alive. */ + o = lj_tab_set(L, fs->kt, &key); + if (tvhaskslot(o)) + return tvkslot(o); + o->u64 = fs->nkgc; + return fs->nkgc++; +} + +/* Add a string constant. */ +static BCReg const_str(FuncState *fs, ExpDesc *e) +{ + lua_assert(expr_isstrk(e) || e->k == VGLOBAL); + return const_gc(fs, obj2gco(e->u.sval), LJ_TSTR); +} + +/* Anchor string constant to avoid GC. */ +GCstr *lj_parse_keepstr(LexState *ls, const char *str, size_t len) +{ + /* NOBARRIER: the key is new or kept alive. */ + lua_State *L = ls->L; + GCstr *s = lj_str_new(L, str, len); + TValue *tv = lj_tab_setstr(L, ls->fs->kt, s); + if (tvisnil(tv)) setboolV(tv, 1); + lj_gc_check(L); + return s; +} + +#if LJ_HASFFI +/* Anchor cdata to avoid GC. */ +void lj_parse_keepcdata(LexState *ls, TValue *tv, GCcdata *cd) +{ + /* NOBARRIER: the key is new or kept alive. */ + lua_State *L = ls->L; + setcdataV(L, tv, cd); + setboolV(lj_tab_set(L, ls->fs->kt, tv), 1); +} +#endif + +/* -- Jump list handling -------------------------------------------------- */ + +/* Get next element in jump list. */ +static BCPos jmp_next(FuncState *fs, BCPos pc) +{ + ptrdiff_t delta = bc_j(fs->bcbase[pc].ins); + if ((BCPos)delta == NO_JMP) + return NO_JMP; + else + return (BCPos)(((ptrdiff_t)pc+1)+delta); +} + +/* Check if any of the instructions on the jump list produce no value. */ +static int jmp_novalue(FuncState *fs, BCPos list) +{ + for (; list != NO_JMP; list = jmp_next(fs, list)) { + BCIns p = fs->bcbase[list >= 1 ? list-1 : list].ins; + if (!(bc_op(p) == BC_ISTC || bc_op(p) == BC_ISFC || bc_a(p) == NO_REG)) + return 1; + } + return 0; +} + +/* Patch register of test instructions. */ +static int jmp_patchtestreg(FuncState *fs, BCPos pc, BCReg reg) +{ + BCInsLine *ilp = &fs->bcbase[pc >= 1 ? pc-1 : pc]; + BCOp op = bc_op(ilp->ins); + if (op == BC_ISTC || op == BC_ISFC) { + if (reg != NO_REG && reg != bc_d(ilp->ins)) { + setbc_a(&ilp->ins, reg); + } else { /* Nothing to store or already in the right register. */ + setbc_op(&ilp->ins, op+(BC_IST-BC_ISTC)); + setbc_a(&ilp->ins, 0); + } + } else if (bc_a(ilp->ins) == NO_REG) { + if (reg == NO_REG) { + ilp->ins = BCINS_AJ(BC_JMP, bc_a(fs->bcbase[pc].ins), 0); + } else { + setbc_a(&ilp->ins, reg); + if (reg >= bc_a(ilp[1].ins)) + setbc_a(&ilp[1].ins, reg+1); + } + } else { + return 0; /* Cannot patch other instructions. */ + } + return 1; +} + +/* Drop values for all instructions on jump list. */ +static void jmp_dropval(FuncState *fs, BCPos list) +{ + for (; list != NO_JMP; list = jmp_next(fs, list)) + jmp_patchtestreg(fs, list, NO_REG); +} + +/* Patch jump instruction to target. */ +static void jmp_patchins(FuncState *fs, BCPos pc, BCPos dest) +{ + BCIns *jmp = &fs->bcbase[pc].ins; + BCPos offset = dest-(pc+1)+BCBIAS_J; + lua_assert(dest != NO_JMP); + if (offset > BCMAX_D) + err_syntax(fs->ls, LJ_ERR_XJUMP); + setbc_d(jmp, offset); +} + +/* Append to jump list. */ +static void jmp_append(FuncState *fs, BCPos *l1, BCPos l2) +{ + if (l2 == NO_JMP) { + return; + } else if (*l1 == NO_JMP) { + *l1 = l2; + } else { + BCPos list = *l1; + BCPos next; + while ((next = jmp_next(fs, list)) != NO_JMP) /* Find last element. */ + list = next; + jmp_patchins(fs, list, l2); + } +} + +/* Patch jump list and preserve produced values. */ +static void jmp_patchval(FuncState *fs, BCPos list, BCPos vtarget, + BCReg reg, BCPos dtarget) +{ + while (list != NO_JMP) { + BCPos next = jmp_next(fs, list); + if (jmp_patchtestreg(fs, list, reg)) + jmp_patchins(fs, list, vtarget); /* Jump to target with value. */ + else + jmp_patchins(fs, list, dtarget); /* Jump to default target. */ + list = next; + } +} + +/* Jump to following instruction. Append to list of pending jumps. */ +static void jmp_tohere(FuncState *fs, BCPos list) +{ + fs->lasttarget = fs->pc; + jmp_append(fs, &fs->jpc, list); +} + +/* Patch jump list to target. */ +static void jmp_patch(FuncState *fs, BCPos list, BCPos target) +{ + if (target == fs->pc) { + jmp_tohere(fs, list); + } else { + lua_assert(target < fs->pc); + jmp_patchval(fs, list, target, NO_REG, target); + } +} + +/* -- Bytecode register allocator ----------------------------------------- */ + +/* Bump frame size. */ +static void bcreg_bump(FuncState *fs, BCReg n) +{ + BCReg sz = fs->freereg + n; + if (sz > fs->framesize) { + if (sz >= LJ_MAX_SLOTS) + err_syntax(fs->ls, LJ_ERR_XSLOTS); + fs->framesize = (uint8_t)sz; + } +} + +/* Reserve registers. */ +static void bcreg_reserve(FuncState *fs, BCReg n) +{ + bcreg_bump(fs, n); + fs->freereg += n; +} + +/* Free register. */ +static void bcreg_free(FuncState *fs, BCReg reg) +{ + if (reg >= fs->nactvar) { + fs->freereg--; + lua_assert(reg == fs->freereg); + } +} + +/* Free register for expression. */ +static void expr_free(FuncState *fs, ExpDesc *e) +{ + if (e->k == VNONRELOC) + bcreg_free(fs, e->u.s.info); +} + +/* -- Bytecode emitter ---------------------------------------------------- */ + +/* Emit bytecode instruction. */ +static BCPos bcemit_INS(FuncState *fs, BCIns ins) +{ + BCPos pc = fs->pc; + LexState *ls = fs->ls; + jmp_patchval(fs, fs->jpc, pc, NO_REG, pc); + fs->jpc = NO_JMP; + if (LJ_UNLIKELY(pc >= fs->bclim)) { + ptrdiff_t base = fs->bcbase - ls->bcstack; + checklimit(fs, ls->sizebcstack, LJ_MAX_BCINS, "bytecode instructions"); + lj_mem_growvec(fs->L, ls->bcstack, ls->sizebcstack, LJ_MAX_BCINS,BCInsLine); + fs->bclim = (BCPos)(ls->sizebcstack - base); + fs->bcbase = ls->bcstack + base; + } + fs->bcbase[pc].ins = ins; + fs->bcbase[pc].line = ls->lastline; + fs->pc = pc+1; + return pc; +} + +#define bcemit_ABC(fs, o, a, b, c) bcemit_INS(fs, BCINS_ABC(o, a, b, c)) +#define bcemit_AD(fs, o, a, d) bcemit_INS(fs, BCINS_AD(o, a, d)) +#define bcemit_AJ(fs, o, a, j) bcemit_INS(fs, BCINS_AJ(o, a, j)) + +#define bcptr(fs, e) (&(fs)->bcbase[(e)->u.s.info].ins) + +/* -- Bytecode emitter for expressions ------------------------------------ */ + +/* Discharge non-constant expression to any register. */ +static void expr_discharge(FuncState *fs, ExpDesc *e) +{ + BCIns ins; + if (e->k == VUPVAL) { + ins = BCINS_AD(BC_UGET, 0, e->u.s.info); + } else if (e->k == VGLOBAL) { + ins = BCINS_AD(BC_GGET, 0, const_str(fs, e)); + } else if (e->k == VINDEXED) { + BCReg rc = e->u.s.aux; + if ((int32_t)rc < 0) { + ins = BCINS_ABC(BC_TGETS, 0, e->u.s.info, ~rc); + } else if (rc > BCMAX_C) { + ins = BCINS_ABC(BC_TGETB, 0, e->u.s.info, rc-(BCMAX_C+1)); + } else { + bcreg_free(fs, rc); + ins = BCINS_ABC(BC_TGETV, 0, e->u.s.info, rc); + } + bcreg_free(fs, e->u.s.info); + } else if (e->k == VCALL) { + e->u.s.info = e->u.s.aux; + e->k = VNONRELOC; + return; + } else if (e->k == VLOCAL) { + e->k = VNONRELOC; + return; + } else { + return; + } + e->u.s.info = bcemit_INS(fs, ins); + e->k = VRELOCABLE; +} + +/* Emit bytecode to set a range of registers to nil. */ +static void bcemit_nil(FuncState *fs, BCReg from, BCReg n) +{ + if (fs->pc > fs->lasttarget) { /* No jumps to current position? */ + BCIns *ip = &fs->bcbase[fs->pc-1].ins; + BCReg pto, pfrom = bc_a(*ip); + switch (bc_op(*ip)) { /* Try to merge with the previous instruction. */ + case BC_KPRI: + if (bc_d(*ip) != ~LJ_TNIL) break; + if (from == pfrom) { + if (n == 1) return; + } else if (from == pfrom+1) { + from = pfrom; + n++; + } else { + break; + } + *ip = BCINS_AD(BC_KNIL, from, from+n-1); /* Replace KPRI. */ + return; + case BC_KNIL: + pto = bc_d(*ip); + if (pfrom <= from && from <= pto+1) { /* Can we connect both ranges? */ + if (from+n-1 > pto) + setbc_d(ip, from+n-1); /* Patch previous instruction range. */ + return; + } + break; + default: + break; + } + } + /* Emit new instruction or replace old instruction. */ + bcemit_INS(fs, n == 1 ? BCINS_AD(BC_KPRI, from, VKNIL) : + BCINS_AD(BC_KNIL, from, from+n-1)); +} + +/* Discharge an expression to a specific register. Ignore branches. */ +static void expr_toreg_nobranch(FuncState *fs, ExpDesc *e, BCReg reg) +{ + BCIns ins; + expr_discharge(fs, e); + if (e->k == VKSTR) { + ins = BCINS_AD(BC_KSTR, reg, const_str(fs, e)); + } else if (e->k == VKNUM) { +#if LJ_DUALNUM + cTValue *tv = expr_numtv(e); + if (tvisint(tv) && checki16(intV(tv))) + ins = BCINS_AD(BC_KSHORT, reg, (BCReg)(uint16_t)intV(tv)); + else +#else + lua_Number n = expr_numberV(e); + int32_t k = lj_num2int(n); + if (checki16(k) && n == (lua_Number)k) + ins = BCINS_AD(BC_KSHORT, reg, (BCReg)(uint16_t)k); + else +#endif + ins = BCINS_AD(BC_KNUM, reg, const_num(fs, e)); +#if LJ_HASFFI + } else if (e->k == VKCDATA) { + fs->flags |= PROTO_FFI; + ins = BCINS_AD(BC_KCDATA, reg, + const_gc(fs, obj2gco(cdataV(&e->u.nval)), LJ_TCDATA)); +#endif + } else if (e->k == VRELOCABLE) { + setbc_a(bcptr(fs, e), reg); + goto noins; + } else if (e->k == VNONRELOC) { + if (reg == e->u.s.info) + goto noins; + ins = BCINS_AD(BC_MOV, reg, e->u.s.info); + } else if (e->k == VKNIL) { + bcemit_nil(fs, reg, 1); + goto noins; + } else if (e->k <= VKTRUE) { + ins = BCINS_AD(BC_KPRI, reg, const_pri(e)); + } else { + lua_assert(e->k == VVOID || e->k == VJMP); + return; + } + bcemit_INS(fs, ins); +noins: + e->u.s.info = reg; + e->k = VNONRELOC; +} + +/* Forward declaration. */ +static BCPos bcemit_jmp(FuncState *fs); + +/* Discharge an expression to a specific register. */ +static void expr_toreg(FuncState *fs, ExpDesc *e, BCReg reg) +{ + expr_toreg_nobranch(fs, e, reg); + if (e->k == VJMP) + jmp_append(fs, &e->t, e->u.s.info); /* Add it to the true jump list. */ + if (expr_hasjump(e)) { /* Discharge expression with branches. */ + BCPos jend, jfalse = NO_JMP, jtrue = NO_JMP; + if (jmp_novalue(fs, e->t) || jmp_novalue(fs, e->f)) { + BCPos jval = (e->k == VJMP) ? NO_JMP : bcemit_jmp(fs); + jfalse = bcemit_AD(fs, BC_KPRI, reg, VKFALSE); + bcemit_AJ(fs, BC_JMP, fs->freereg, 1); + jtrue = bcemit_AD(fs, BC_KPRI, reg, VKTRUE); + jmp_tohere(fs, jval); + } + jend = fs->pc; + fs->lasttarget = jend; + jmp_patchval(fs, e->f, jend, reg, jfalse); + jmp_patchval(fs, e->t, jend, reg, jtrue); + } + e->f = e->t = NO_JMP; + e->u.s.info = reg; + e->k = VNONRELOC; +} + +/* Discharge an expression to the next free register. */ +static void expr_tonextreg(FuncState *fs, ExpDesc *e) +{ + expr_discharge(fs, e); + expr_free(fs, e); + bcreg_reserve(fs, 1); + expr_toreg(fs, e, fs->freereg - 1); +} + +/* Discharge an expression to any register. */ +static BCReg expr_toanyreg(FuncState *fs, ExpDesc *e) +{ + expr_discharge(fs, e); + if (e->k == VNONRELOC) { + if (!expr_hasjump(e)) return e->u.s.info; /* Already in a register. */ + if (e->u.s.info >= fs->nactvar) { + expr_toreg(fs, e, e->u.s.info); /* Discharge to temp. register. */ + return e->u.s.info; + } + } + expr_tonextreg(fs, e); /* Discharge to next register. */ + return e->u.s.info; +} + +/* Partially discharge expression to a value. */ +static void expr_toval(FuncState *fs, ExpDesc *e) +{ + if (expr_hasjump(e)) + expr_toanyreg(fs, e); + else + expr_discharge(fs, e); +} + +/* Emit store for LHS expression. */ +static void bcemit_store(FuncState *fs, ExpDesc *var, ExpDesc *e) +{ + BCIns ins; + if (var->k == VLOCAL) { + fs->ls->vstack[var->u.s.aux].info |= VSTACK_VAR_RW; + expr_free(fs, e); + expr_toreg(fs, e, var->u.s.info); + return; + } else if (var->k == VUPVAL) { + fs->ls->vstack[var->u.s.aux].info |= VSTACK_VAR_RW; + expr_toval(fs, e); + if (e->k <= VKTRUE) + ins = BCINS_AD(BC_USETP, var->u.s.info, const_pri(e)); + else if (e->k == VKSTR) + ins = BCINS_AD(BC_USETS, var->u.s.info, const_str(fs, e)); + else if (e->k == VKNUM) + ins = BCINS_AD(BC_USETN, var->u.s.info, const_num(fs, e)); + else + ins = BCINS_AD(BC_USETV, var->u.s.info, expr_toanyreg(fs, e)); + } else if (var->k == VGLOBAL) { + BCReg ra = expr_toanyreg(fs, e); + ins = BCINS_AD(BC_GSET, ra, const_str(fs, var)); + } else { + BCReg ra, rc; + lua_assert(var->k == VINDEXED); + ra = expr_toanyreg(fs, e); + rc = var->u.s.aux; + if ((int32_t)rc < 0) { + ins = BCINS_ABC(BC_TSETS, ra, var->u.s.info, ~rc); + } else if (rc > BCMAX_C) { + ins = BCINS_ABC(BC_TSETB, ra, var->u.s.info, rc-(BCMAX_C+1)); + } else { + /* Free late alloced key reg to avoid assert on free of value reg. */ + /* This can only happen when called from expr_table(). */ + lua_assert(e->k != VNONRELOC || ra < fs->nactvar || + rc < ra || (bcreg_free(fs, rc),1)); + ins = BCINS_ABC(BC_TSETV, ra, var->u.s.info, rc); + } + } + bcemit_INS(fs, ins); + expr_free(fs, e); +} + +/* Emit method lookup expression. */ +static void bcemit_method(FuncState *fs, ExpDesc *e, ExpDesc *key) +{ + BCReg idx, func, obj = expr_toanyreg(fs, e); + expr_free(fs, e); + func = fs->freereg; + bcemit_AD(fs, BC_MOV, func+1+LJ_FR2, obj); /* Copy object to 1st argument. */ + lua_assert(expr_isstrk(key)); + idx = const_str(fs, key); + if (idx <= BCMAX_C) { + bcreg_reserve(fs, 2+LJ_FR2); + bcemit_ABC(fs, BC_TGETS, func, obj, idx); + } else { + bcreg_reserve(fs, 3+LJ_FR2); + bcemit_AD(fs, BC_KSTR, func+2+LJ_FR2, idx); + bcemit_ABC(fs, BC_TGETV, func, obj, func+2+LJ_FR2); + fs->freereg--; + } + e->u.s.info = func; + e->k = VNONRELOC; +} + +/* -- Bytecode emitter for branches --------------------------------------- */ + +/* Emit unconditional branch. */ +static BCPos bcemit_jmp(FuncState *fs) +{ + BCPos jpc = fs->jpc; + BCPos j = fs->pc - 1; + BCIns *ip = &fs->bcbase[j].ins; + fs->jpc = NO_JMP; + if ((int32_t)j >= (int32_t)fs->lasttarget && bc_op(*ip) == BC_UCLO) { + setbc_j(ip, NO_JMP); + fs->lasttarget = j+1; + } else { + j = bcemit_AJ(fs, BC_JMP, fs->freereg, NO_JMP); + } + jmp_append(fs, &j, jpc); + return j; +} + +/* Invert branch condition of bytecode instruction. */ +static void invertcond(FuncState *fs, ExpDesc *e) +{ + BCIns *ip = &fs->bcbase[e->u.s.info - 1].ins; + setbc_op(ip, bc_op(*ip)^1); +} + +/* Emit conditional branch. */ +static BCPos bcemit_branch(FuncState *fs, ExpDesc *e, int cond) +{ + BCPos pc; + if (e->k == VRELOCABLE) { + BCIns *ip = bcptr(fs, e); + if (bc_op(*ip) == BC_NOT) { + *ip = BCINS_AD(cond ? BC_ISF : BC_IST, 0, bc_d(*ip)); + return bcemit_jmp(fs); + } + } + if (e->k != VNONRELOC) { + bcreg_reserve(fs, 1); + expr_toreg_nobranch(fs, e, fs->freereg-1); + } + bcemit_AD(fs, cond ? BC_ISTC : BC_ISFC, NO_REG, e->u.s.info); + pc = bcemit_jmp(fs); + expr_free(fs, e); + return pc; +} + +/* Emit branch on true condition. */ +static void bcemit_branch_t(FuncState *fs, ExpDesc *e) +{ + BCPos pc; + expr_discharge(fs, e); + if (e->k == VKSTR || e->k == VKNUM || e->k == VKTRUE) + pc = NO_JMP; /* Never jump. */ + else if (e->k == VJMP) + invertcond(fs, e), pc = e->u.s.info; + else if (e->k == VKFALSE || e->k == VKNIL) + expr_toreg_nobranch(fs, e, NO_REG), pc = bcemit_jmp(fs); + else + pc = bcemit_branch(fs, e, 0); + jmp_append(fs, &e->f, pc); + jmp_tohere(fs, e->t); + e->t = NO_JMP; +} + +/* Emit branch on false condition. */ +static void bcemit_branch_f(FuncState *fs, ExpDesc *e) +{ + BCPos pc; + expr_discharge(fs, e); + if (e->k == VKNIL || e->k == VKFALSE) + pc = NO_JMP; /* Never jump. */ + else if (e->k == VJMP) + pc = e->u.s.info; + else if (e->k == VKSTR || e->k == VKNUM || e->k == VKTRUE) + expr_toreg_nobranch(fs, e, NO_REG), pc = bcemit_jmp(fs); + else + pc = bcemit_branch(fs, e, 1); + jmp_append(fs, &e->t, pc); + jmp_tohere(fs, e->f); + e->f = NO_JMP; +} + +/* -- Bytecode emitter for operators -------------------------------------- */ + +/* Try constant-folding of arithmetic operators. */ +static int foldarith(BinOpr opr, ExpDesc *e1, ExpDesc *e2) +{ + TValue o; + lua_Number n; + if (!expr_isnumk_nojump(e1) || !expr_isnumk_nojump(e2)) return 0; + n = lj_vm_foldarith(expr_numberV(e1), expr_numberV(e2), (int)opr-OPR_ADD); + setnumV(&o, n); + if (tvisnan(&o) || tvismzero(&o)) return 0; /* Avoid NaN and -0 as consts. */ + if (LJ_DUALNUM) { + int32_t k = lj_num2int(n); + if ((lua_Number)k == n) { + setintV(&e1->u.nval, k); + return 1; + } + } + setnumV(&e1->u.nval, n); + return 1; +} + +/* Emit arithmetic operator. */ +static void bcemit_arith(FuncState *fs, BinOpr opr, ExpDesc *e1, ExpDesc *e2) +{ + BCReg rb, rc, t; + uint32_t op; + if (foldarith(opr, e1, e2)) + return; + if (opr == OPR_POW) { + op = BC_POW; + rc = expr_toanyreg(fs, e2); + rb = expr_toanyreg(fs, e1); + } else { + op = opr-OPR_ADD+BC_ADDVV; + /* Must discharge 2nd operand first since VINDEXED might free regs. */ + expr_toval(fs, e2); + if (expr_isnumk(e2) && (rc = const_num(fs, e2)) <= BCMAX_C) + op -= BC_ADDVV-BC_ADDVN; + else + rc = expr_toanyreg(fs, e2); + /* 1st operand discharged by bcemit_binop_left, but need KNUM/KSHORT. */ + lua_assert(expr_isnumk(e1) || e1->k == VNONRELOC); + expr_toval(fs, e1); + /* Avoid two consts to satisfy bytecode constraints. */ + if (expr_isnumk(e1) && !expr_isnumk(e2) && + (t = const_num(fs, e1)) <= BCMAX_B) { + rb = rc; rc = t; op -= BC_ADDVV-BC_ADDNV; + } else { + rb = expr_toanyreg(fs, e1); + } + } + /* Using expr_free might cause asserts if the order is wrong. */ + if (e1->k == VNONRELOC && e1->u.s.info >= fs->nactvar) fs->freereg--; + if (e2->k == VNONRELOC && e2->u.s.info >= fs->nactvar) fs->freereg--; + e1->u.s.info = bcemit_ABC(fs, op, 0, rb, rc); + e1->k = VRELOCABLE; +} + +/* Emit comparison operator. */ +static void bcemit_comp(FuncState *fs, BinOpr opr, ExpDesc *e1, ExpDesc *e2) +{ + ExpDesc *eret = e1; + BCIns ins; + expr_toval(fs, e1); + if (opr == OPR_EQ || opr == OPR_NE) { + BCOp op = opr == OPR_EQ ? BC_ISEQV : BC_ISNEV; + BCReg ra; + if (expr_isk(e1)) { e1 = e2; e2 = eret; } /* Need constant in 2nd arg. */ + ra = expr_toanyreg(fs, e1); /* First arg must be in a reg. */ + expr_toval(fs, e2); + switch (e2->k) { + case VKNIL: case VKFALSE: case VKTRUE: + ins = BCINS_AD(op+(BC_ISEQP-BC_ISEQV), ra, const_pri(e2)); + break; + case VKSTR: + ins = BCINS_AD(op+(BC_ISEQS-BC_ISEQV), ra, const_str(fs, e2)); + break; + case VKNUM: + ins = BCINS_AD(op+(BC_ISEQN-BC_ISEQV), ra, const_num(fs, e2)); + break; + default: + ins = BCINS_AD(op, ra, expr_toanyreg(fs, e2)); + break; + } + } else { + uint32_t op = opr-OPR_LT+BC_ISLT; + BCReg ra, rd; + if ((op-BC_ISLT) & 1) { /* GT -> LT, GE -> LE */ + e1 = e2; e2 = eret; /* Swap operands. */ + op = ((op-BC_ISLT)^3)+BC_ISLT; + expr_toval(fs, e1); + } + rd = expr_toanyreg(fs, e2); + ra = expr_toanyreg(fs, e1); + ins = BCINS_AD(op, ra, rd); + } + /* Using expr_free might cause asserts if the order is wrong. */ + if (e1->k == VNONRELOC && e1->u.s.info >= fs->nactvar) fs->freereg--; + if (e2->k == VNONRELOC && e2->u.s.info >= fs->nactvar) fs->freereg--; + bcemit_INS(fs, ins); + eret->u.s.info = bcemit_jmp(fs); + eret->k = VJMP; +} + +/* Fixup left side of binary operator. */ +static void bcemit_binop_left(FuncState *fs, BinOpr op, ExpDesc *e) +{ + if (op == OPR_AND) { + bcemit_branch_t(fs, e); + } else if (op == OPR_OR) { + bcemit_branch_f(fs, e); + } else if (op == OPR_CONCAT) { + expr_tonextreg(fs, e); + } else if (op == OPR_EQ || op == OPR_NE) { + if (!expr_isk_nojump(e)) expr_toanyreg(fs, e); + } else { + if (!expr_isnumk_nojump(e)) expr_toanyreg(fs, e); + } +} + +/* Emit binary operator. */ +static void bcemit_binop(FuncState *fs, BinOpr op, ExpDesc *e1, ExpDesc *e2) +{ + if (op <= OPR_POW) { + bcemit_arith(fs, op, e1, e2); + } else if (op == OPR_AND) { + lua_assert(e1->t == NO_JMP); /* List must be closed. */ + expr_discharge(fs, e2); + jmp_append(fs, &e2->f, e1->f); + *e1 = *e2; + } else if (op == OPR_OR) { + lua_assert(e1->f == NO_JMP); /* List must be closed. */ + expr_discharge(fs, e2); + jmp_append(fs, &e2->t, e1->t); + *e1 = *e2; + } else if (op == OPR_CONCAT) { + expr_toval(fs, e2); + if (e2->k == VRELOCABLE && bc_op(*bcptr(fs, e2)) == BC_CAT) { + lua_assert(e1->u.s.info == bc_b(*bcptr(fs, e2))-1); + expr_free(fs, e1); + setbc_b(bcptr(fs, e2), e1->u.s.info); + e1->u.s.info = e2->u.s.info; + } else { + expr_tonextreg(fs, e2); + expr_free(fs, e2); + expr_free(fs, e1); + e1->u.s.info = bcemit_ABC(fs, BC_CAT, 0, e1->u.s.info, e2->u.s.info); + } + e1->k = VRELOCABLE; + } else { + lua_assert(op == OPR_NE || op == OPR_EQ || + op == OPR_LT || op == OPR_GE || op == OPR_LE || op == OPR_GT); + bcemit_comp(fs, op, e1, e2); + } +} + +/* Emit unary operator. */ +static void bcemit_unop(FuncState *fs, BCOp op, ExpDesc *e) +{ + if (op == BC_NOT) { + /* Swap true and false lists. */ + { BCPos temp = e->f; e->f = e->t; e->t = temp; } + jmp_dropval(fs, e->f); + jmp_dropval(fs, e->t); + expr_discharge(fs, e); + if (e->k == VKNIL || e->k == VKFALSE) { + e->k = VKTRUE; + return; + } else if (expr_isk(e) || (LJ_HASFFI && e->k == VKCDATA)) { + e->k = VKFALSE; + return; + } else if (e->k == VJMP) { + invertcond(fs, e); + return; + } else if (e->k == VRELOCABLE) { + bcreg_reserve(fs, 1); + setbc_a(bcptr(fs, e), fs->freereg-1); + e->u.s.info = fs->freereg-1; + e->k = VNONRELOC; + } else { + lua_assert(e->k == VNONRELOC); + } + } else { + lua_assert(op == BC_UNM || op == BC_LEN); + if (op == BC_UNM && !expr_hasjump(e)) { /* Constant-fold negations. */ +#if LJ_HASFFI + if (e->k == VKCDATA) { /* Fold in-place since cdata is not interned. */ + GCcdata *cd = cdataV(&e->u.nval); + int64_t *p = (int64_t *)cdataptr(cd); + if (cd->ctypeid == CTID_COMPLEX_DOUBLE) + p[1] ^= (int64_t)U64x(80000000,00000000); + else + *p = -*p; + return; + } else +#endif + if (expr_isnumk(e) && !expr_numiszero(e)) { /* Avoid folding to -0. */ + TValue *o = expr_numtv(e); + if (tvisint(o)) { + int32_t k = intV(o); + if (k == -k) + setnumV(o, -(lua_Number)k); + else + setintV(o, -k); + return; + } else { + o->u64 ^= U64x(80000000,00000000); + return; + } + } + } + expr_toanyreg(fs, e); + } + expr_free(fs, e); + e->u.s.info = bcemit_AD(fs, op, 0, e->u.s.info); + e->k = VRELOCABLE; +} + +/* -- Lexer support ------------------------------------------------------- */ + +/* Check and consume optional token. */ +static int lex_opt(LexState *ls, LexToken tok) +{ + if (ls->tok == tok) { + lj_lex_next(ls); + return 1; + } + return 0; +} + +/* Check and consume token. */ +static void lex_check(LexState *ls, LexToken tok) +{ + if (ls->tok != tok) + err_token(ls, tok); + lj_lex_next(ls); +} + +/* Check for matching token. */ +static void lex_match(LexState *ls, LexToken what, LexToken who, BCLine line) +{ + if (!lex_opt(ls, what)) { + if (line == ls->linenumber) { + err_token(ls, what); + } else { + const char *swhat = lj_lex_token2str(ls, what); + const char *swho = lj_lex_token2str(ls, who); + lj_lex_error(ls, ls->tok, LJ_ERR_XMATCH, swhat, swho, line); + } + } +} + +/* Check for string token. */ +static GCstr *lex_str(LexState *ls) +{ + GCstr *s; + if (ls->tok != TK_name && (LJ_52 || ls->tok != TK_goto)) + err_token(ls, TK_name); + s = strV(&ls->tokval); + lj_lex_next(ls); + return s; +} + +/* -- Variable handling --------------------------------------------------- */ + +#define var_get(ls, fs, i) ((ls)->vstack[(fs)->varmap[(i)]]) + +/* Define a new local variable. */ +static void var_new(LexState *ls, BCReg n, GCstr *name) +{ + FuncState *fs = ls->fs; + MSize vtop = ls->vtop; + checklimit(fs, fs->nactvar+n, LJ_MAX_LOCVAR, "local variables"); + if (LJ_UNLIKELY(vtop >= ls->sizevstack)) { + if (ls->sizevstack >= LJ_MAX_VSTACK) + lj_lex_error(ls, 0, LJ_ERR_XLIMC, LJ_MAX_VSTACK); + lj_mem_growvec(ls->L, ls->vstack, ls->sizevstack, LJ_MAX_VSTACK, VarInfo); + } + lua_assert((uintptr_t)name < VARNAME__MAX || + lj_tab_getstr(fs->kt, name) != NULL); + /* NOBARRIER: name is anchored in fs->kt and ls->vstack is not a GCobj. */ + setgcref(ls->vstack[vtop].name, obj2gco(name)); + fs->varmap[fs->nactvar+n] = (uint16_t)vtop; + ls->vtop = vtop+1; +} + +#define var_new_lit(ls, n, v) \ + var_new(ls, (n), lj_parse_keepstr(ls, "" v, sizeof(v)-1)) + +#define var_new_fixed(ls, n, vn) \ + var_new(ls, (n), (GCstr *)(uintptr_t)(vn)) + +/* Add local variables. */ +static void var_add(LexState *ls, BCReg nvars) +{ + FuncState *fs = ls->fs; + BCReg nactvar = fs->nactvar; + while (nvars--) { + VarInfo *v = &var_get(ls, fs, nactvar); + v->startpc = fs->pc; + v->slot = nactvar++; + v->info = 0; + } + fs->nactvar = nactvar; +} + +/* Remove local variables. */ +static void var_remove(LexState *ls, BCReg tolevel) +{ + FuncState *fs = ls->fs; + while (fs->nactvar > tolevel) + var_get(ls, fs, --fs->nactvar).endpc = fs->pc; +} + +/* Lookup local variable name. */ +static BCReg var_lookup_local(FuncState *fs, GCstr *n) +{ + int i; + for (i = fs->nactvar-1; i >= 0; i--) { + if (n == strref(var_get(fs->ls, fs, i).name)) + return (BCReg)i; + } + return (BCReg)-1; /* Not found. */ +} + +/* Lookup or add upvalue index. */ +static MSize var_lookup_uv(FuncState *fs, MSize vidx, ExpDesc *e) +{ + MSize i, n = fs->nuv; + for (i = 0; i < n; i++) + if (fs->uvmap[i] == vidx) + return i; /* Already exists. */ + /* Otherwise create a new one. */ + checklimit(fs, fs->nuv, LJ_MAX_UPVAL, "upvalues"); + lua_assert(e->k == VLOCAL || e->k == VUPVAL); + fs->uvmap[n] = (uint16_t)vidx; + fs->uvtmp[n] = (uint16_t)(e->k == VLOCAL ? vidx : LJ_MAX_VSTACK+e->u.s.info); + fs->nuv = n+1; + return n; +} + +/* Forward declaration. */ +static void fscope_uvmark(FuncState *fs, BCReg level); + +/* Recursively lookup variables in enclosing functions. */ +static MSize var_lookup_(FuncState *fs, GCstr *name, ExpDesc *e, int first) +{ + if (fs) { + BCReg reg = var_lookup_local(fs, name); + if ((int32_t)reg >= 0) { /* Local in this function? */ + expr_init(e, VLOCAL, reg); + if (!first) + fscope_uvmark(fs, reg); /* Scope now has an upvalue. */ + return (MSize)(e->u.s.aux = (uint32_t)fs->varmap[reg]); + } else { + MSize vidx = var_lookup_(fs->prev, name, e, 0); /* Var in outer func? */ + if ((int32_t)vidx >= 0) { /* Yes, make it an upvalue here. */ + e->u.s.info = (uint8_t)var_lookup_uv(fs, vidx, e); + e->k = VUPVAL; + return vidx; + } + } + } else { /* Not found in any function, must be a global. */ + expr_init(e, VGLOBAL, 0); + e->u.sval = name; + } + return (MSize)-1; /* Global. */ +} + +/* Lookup variable name. */ +#define var_lookup(ls, e) \ + var_lookup_((ls)->fs, lex_str(ls), (e), 1) + +/* -- Goto an label handling ---------------------------------------------- */ + +/* Add a new goto or label. */ +static MSize gola_new(LexState *ls, GCstr *name, uint8_t info, BCPos pc) +{ + FuncState *fs = ls->fs; + MSize vtop = ls->vtop; + if (LJ_UNLIKELY(vtop >= ls->sizevstack)) { + if (ls->sizevstack >= LJ_MAX_VSTACK) + lj_lex_error(ls, 0, LJ_ERR_XLIMC, LJ_MAX_VSTACK); + lj_mem_growvec(ls->L, ls->vstack, ls->sizevstack, LJ_MAX_VSTACK, VarInfo); + } + lua_assert(name == NAME_BREAK || lj_tab_getstr(fs->kt, name) != NULL); + /* NOBARRIER: name is anchored in fs->kt and ls->vstack is not a GCobj. */ + setgcref(ls->vstack[vtop].name, obj2gco(name)); + ls->vstack[vtop].startpc = pc; + ls->vstack[vtop].slot = (uint8_t)fs->nactvar; + ls->vstack[vtop].info = info; + ls->vtop = vtop+1; + return vtop; +} + +#define gola_isgoto(v) ((v)->info & VSTACK_GOTO) +#define gola_islabel(v) ((v)->info & VSTACK_LABEL) +#define gola_isgotolabel(v) ((v)->info & (VSTACK_GOTO|VSTACK_LABEL)) + +/* Patch goto to jump to label. */ +static void gola_patch(LexState *ls, VarInfo *vg, VarInfo *vl) +{ + FuncState *fs = ls->fs; + BCPos pc = vg->startpc; + setgcrefnull(vg->name); /* Invalidate pending goto. */ + setbc_a(&fs->bcbase[pc].ins, vl->slot); + jmp_patch(fs, pc, vl->startpc); +} + +/* Patch goto to close upvalues. */ +static void gola_close(LexState *ls, VarInfo *vg) +{ + FuncState *fs = ls->fs; + BCPos pc = vg->startpc; + BCIns *ip = &fs->bcbase[pc].ins; + lua_assert(gola_isgoto(vg)); + lua_assert(bc_op(*ip) == BC_JMP || bc_op(*ip) == BC_UCLO); + setbc_a(ip, vg->slot); + if (bc_op(*ip) == BC_JMP) { + BCPos next = jmp_next(fs, pc); + if (next != NO_JMP) jmp_patch(fs, next, pc); /* Jump to UCLO. */ + setbc_op(ip, BC_UCLO); /* Turn into UCLO. */ + setbc_j(ip, NO_JMP); + } +} + +/* Resolve pending forward gotos for label. */ +static void gola_resolve(LexState *ls, FuncScope *bl, MSize idx) +{ + VarInfo *vg = ls->vstack + bl->vstart; + VarInfo *vl = ls->vstack + idx; + for (; vg < vl; vg++) + if (gcrefeq(vg->name, vl->name) && gola_isgoto(vg)) { + if (vg->slot < vl->slot) { + GCstr *name = strref(var_get(ls, ls->fs, vg->slot).name); + lua_assert((uintptr_t)name >= VARNAME__MAX); + ls->linenumber = ls->fs->bcbase[vg->startpc].line; + lua_assert(strref(vg->name) != NAME_BREAK); + lj_lex_error(ls, 0, LJ_ERR_XGSCOPE, + strdata(strref(vg->name)), strdata(name)); + } + gola_patch(ls, vg, vl); + } +} + +/* Fixup remaining gotos and labels for scope. */ +static void gola_fixup(LexState *ls, FuncScope *bl) +{ + VarInfo *v = ls->vstack + bl->vstart; + VarInfo *ve = ls->vstack + ls->vtop; + for (; v < ve; v++) { + GCstr *name = strref(v->name); + if (name != NULL) { /* Only consider remaining valid gotos/labels. */ + if (gola_islabel(v)) { + VarInfo *vg; + setgcrefnull(v->name); /* Invalidate label that goes out of scope. */ + for (vg = v+1; vg < ve; vg++) /* Resolve pending backward gotos. */ + if (strref(vg->name) == name && gola_isgoto(vg)) { + if ((bl->flags&FSCOPE_UPVAL) && vg->slot > v->slot) + gola_close(ls, vg); + gola_patch(ls, vg, v); + } + } else if (gola_isgoto(v)) { + if (bl->prev) { /* Propagate goto or break to outer scope. */ + bl->prev->flags |= name == NAME_BREAK ? FSCOPE_BREAK : FSCOPE_GOLA; + v->slot = bl->nactvar; + if ((bl->flags & FSCOPE_UPVAL)) + gola_close(ls, v); + } else { /* No outer scope: undefined goto label or no loop. */ + ls->linenumber = ls->fs->bcbase[v->startpc].line; + if (name == NAME_BREAK) + lj_lex_error(ls, 0, LJ_ERR_XBREAK); + else + lj_lex_error(ls, 0, LJ_ERR_XLUNDEF, strdata(name)); + } + } + } + } +} + +/* Find existing label. */ +static VarInfo *gola_findlabel(LexState *ls, GCstr *name) +{ + VarInfo *v = ls->vstack + ls->fs->bl->vstart; + VarInfo *ve = ls->vstack + ls->vtop; + for (; v < ve; v++) + if (strref(v->name) == name && gola_islabel(v)) + return v; + return NULL; +} + +/* -- Scope handling ------------------------------------------------------ */ + +/* Begin a scope. */ +static void fscope_begin(FuncState *fs, FuncScope *bl, int flags) +{ + bl->nactvar = (uint8_t)fs->nactvar; + bl->flags = flags; + bl->vstart = fs->ls->vtop; + bl->prev = fs->bl; + fs->bl = bl; + lua_assert(fs->freereg == fs->nactvar); +} + +/* End a scope. */ +static void fscope_end(FuncState *fs) +{ + FuncScope *bl = fs->bl; + LexState *ls = fs->ls; + fs->bl = bl->prev; + var_remove(ls, bl->nactvar); + fs->freereg = fs->nactvar; + lua_assert(bl->nactvar == fs->nactvar); + if ((bl->flags & (FSCOPE_UPVAL|FSCOPE_NOCLOSE)) == FSCOPE_UPVAL) + bcemit_AJ(fs, BC_UCLO, bl->nactvar, 0); + if ((bl->flags & FSCOPE_BREAK)) { + if ((bl->flags & FSCOPE_LOOP)) { + MSize idx = gola_new(ls, NAME_BREAK, VSTACK_LABEL, fs->pc); + ls->vtop = idx; /* Drop break label immediately. */ + gola_resolve(ls, bl, idx); + return; + } /* else: need the fixup step to propagate the breaks. */ + } else if (!(bl->flags & FSCOPE_GOLA)) { + return; + } + gola_fixup(ls, bl); +} + +/* Mark scope as having an upvalue. */ +static void fscope_uvmark(FuncState *fs, BCReg level) +{ + FuncScope *bl; + for (bl = fs->bl; bl && bl->nactvar > level; bl = bl->prev) + ; + if (bl) + bl->flags |= FSCOPE_UPVAL; +} + +/* -- Function state management ------------------------------------------- */ + +/* Fixup bytecode for prototype. */ +static void fs_fixup_bc(FuncState *fs, GCproto *pt, BCIns *bc, MSize n) +{ + BCInsLine *base = fs->bcbase; + MSize i; + pt->sizebc = n; + bc[0] = BCINS_AD((fs->flags & PROTO_VARARG) ? BC_FUNCV : BC_FUNCF, + fs->framesize, 0); + for (i = 1; i < n; i++) + bc[i] = base[i].ins; +} + +/* Fixup upvalues for child prototype, step #2. */ +static void fs_fixup_uv2(FuncState *fs, GCproto *pt) +{ + VarInfo *vstack = fs->ls->vstack; + uint16_t *uv = proto_uv(pt); + MSize i, n = pt->sizeuv; + for (i = 0; i < n; i++) { + VarIndex vidx = uv[i]; + if (vidx >= LJ_MAX_VSTACK) + uv[i] = vidx - LJ_MAX_VSTACK; + else if ((vstack[vidx].info & VSTACK_VAR_RW)) + uv[i] = vstack[vidx].slot | PROTO_UV_LOCAL; + else + uv[i] = vstack[vidx].slot | PROTO_UV_LOCAL | PROTO_UV_IMMUTABLE; + } +} + +/* Fixup constants for prototype. */ +static void fs_fixup_k(FuncState *fs, GCproto *pt, void *kptr) +{ + GCtab *kt; + TValue *array; + Node *node; + MSize i, hmask; + checklimitgt(fs, fs->nkn, BCMAX_D+1, "constants"); + checklimitgt(fs, fs->nkgc, BCMAX_D+1, "constants"); + setmref(pt->k, kptr); + pt->sizekn = fs->nkn; + pt->sizekgc = fs->nkgc; + kt = fs->kt; + array = tvref(kt->array); + for (i = 0; i < kt->asize; i++) + if (tvhaskslot(&array[i])) { + TValue *tv = &((TValue *)kptr)[tvkslot(&array[i])]; + if (LJ_DUALNUM) + setintV(tv, (int32_t)i); + else + setnumV(tv, (lua_Number)i); + } + node = noderef(kt->node); + hmask = kt->hmask; + for (i = 0; i <= hmask; i++) { + Node *n = &node[i]; + if (tvhaskslot(&n->val)) { + ptrdiff_t kidx = (ptrdiff_t)tvkslot(&n->val); + lua_assert(!tvisint(&n->key)); + if (tvisnum(&n->key)) { + TValue *tv = &((TValue *)kptr)[kidx]; + if (LJ_DUALNUM) { + lua_Number nn = numV(&n->key); + int32_t k = lj_num2int(nn); + lua_assert(!tvismzero(&n->key)); + if ((lua_Number)k == nn) + setintV(tv, k); + else + *tv = n->key; + } else { + *tv = n->key; + } + } else { + GCobj *o = gcV(&n->key); + setgcref(((GCRef *)kptr)[~kidx], o); + lj_gc_objbarrier(fs->L, pt, o); + if (tvisproto(&n->key)) + fs_fixup_uv2(fs, gco2pt(o)); + } + } + } +} + +/* Fixup upvalues for prototype, step #1. */ +static void fs_fixup_uv1(FuncState *fs, GCproto *pt, uint16_t *uv) +{ + setmref(pt->uv, uv); + pt->sizeuv = fs->nuv; + memcpy(uv, fs->uvtmp, fs->nuv*sizeof(VarIndex)); +} + +#ifndef LUAJIT_DISABLE_DEBUGINFO +/* Prepare lineinfo for prototype. */ +static size_t fs_prep_line(FuncState *fs, BCLine numline) +{ + return (fs->pc-1) << (numline < 256 ? 0 : numline < 65536 ? 1 : 2); +} + +/* Fixup lineinfo for prototype. */ +static void fs_fixup_line(FuncState *fs, GCproto *pt, + void *lineinfo, BCLine numline) +{ + BCInsLine *base = fs->bcbase + 1; + BCLine first = fs->linedefined; + MSize i = 0, n = fs->pc-1; + pt->firstline = fs->linedefined; + pt->numline = numline; + setmref(pt->lineinfo, lineinfo); + if (LJ_LIKELY(numline < 256)) { + uint8_t *li = (uint8_t *)lineinfo; + do { + BCLine delta = base[i].line - first; + lua_assert(delta >= 0 && delta < 256); + li[i] = (uint8_t)delta; + } while (++i < n); + } else if (LJ_LIKELY(numline < 65536)) { + uint16_t *li = (uint16_t *)lineinfo; + do { + BCLine delta = base[i].line - first; + lua_assert(delta >= 0 && delta < 65536); + li[i] = (uint16_t)delta; + } while (++i < n); + } else { + uint32_t *li = (uint32_t *)lineinfo; + do { + BCLine delta = base[i].line - first; + lua_assert(delta >= 0); + li[i] = (uint32_t)delta; + } while (++i < n); + } +} + +/* Prepare variable info for prototype. */ +static size_t fs_prep_var(LexState *ls, FuncState *fs, size_t *ofsvar) +{ + VarInfo *vs =ls->vstack, *ve; + MSize i, n; + BCPos lastpc; + lj_buf_reset(&ls->sb); /* Copy to temp. string buffer. */ + /* Store upvalue names. */ + for (i = 0, n = fs->nuv; i < n; i++) { + GCstr *s = strref(vs[fs->uvmap[i]].name); + MSize len = s->len+1; + char *p = lj_buf_more(&ls->sb, len); + p = lj_buf_wmem(p, strdata(s), len); + setsbufP(&ls->sb, p); + } + *ofsvar = sbuflen(&ls->sb); + lastpc = 0; + /* Store local variable names and compressed ranges. */ + for (ve = vs + ls->vtop, vs += fs->vbase; vs < ve; vs++) { + if (!gola_isgotolabel(vs)) { + GCstr *s = strref(vs->name); + BCPos startpc; + char *p; + if ((uintptr_t)s < VARNAME__MAX) { + p = lj_buf_more(&ls->sb, 1 + 2*5); + *p++ = (char)(uintptr_t)s; + } else { + MSize len = s->len+1; + p = lj_buf_more(&ls->sb, len + 2*5); + p = lj_buf_wmem(p, strdata(s), len); + } + startpc = vs->startpc; + p = lj_strfmt_wuleb128(p, startpc-lastpc); + p = lj_strfmt_wuleb128(p, vs->endpc-startpc); + setsbufP(&ls->sb, p); + lastpc = startpc; + } + } + lj_buf_putb(&ls->sb, '\0'); /* Terminator for varinfo. */ + return sbuflen(&ls->sb); +} + +/* Fixup variable info for prototype. */ +static void fs_fixup_var(LexState *ls, GCproto *pt, uint8_t *p, size_t ofsvar) +{ + setmref(pt->uvinfo, p); + setmref(pt->varinfo, (char *)p + ofsvar); + memcpy(p, sbufB(&ls->sb), sbuflen(&ls->sb)); /* Copy from temp. buffer. */ +} +#else + +/* Initialize with empty debug info, if disabled. */ +#define fs_prep_line(fs, numline) (UNUSED(numline), 0) +#define fs_fixup_line(fs, pt, li, numline) \ + pt->firstline = pt->numline = 0, setmref((pt)->lineinfo, NULL) +#define fs_prep_var(ls, fs, ofsvar) (UNUSED(ofsvar), 0) +#define fs_fixup_var(ls, pt, p, ofsvar) \ + setmref((pt)->uvinfo, NULL), setmref((pt)->varinfo, NULL) + +#endif + +/* Check if bytecode op returns. */ +static int bcopisret(BCOp op) +{ + switch (op) { + case BC_CALLMT: case BC_CALLT: + case BC_RETM: case BC_RET: case BC_RET0: case BC_RET1: + return 1; + default: + return 0; + } +} + +/* Fixup return instruction for prototype. */ +static void fs_fixup_ret(FuncState *fs) +{ + BCPos lastpc = fs->pc; + if (lastpc <= fs->lasttarget || !bcopisret(bc_op(fs->bcbase[lastpc-1].ins))) { + if ((fs->bl->flags & FSCOPE_UPVAL)) + bcemit_AJ(fs, BC_UCLO, 0, 0); + bcemit_AD(fs, BC_RET0, 0, 1); /* Need final return. */ + } + fs->bl->flags |= FSCOPE_NOCLOSE; /* Handled above. */ + fscope_end(fs); + lua_assert(fs->bl == NULL); + /* May need to fixup returns encoded before first function was created. */ + if (fs->flags & PROTO_FIXUP_RETURN) { + BCPos pc; + for (pc = 1; pc < lastpc; pc++) { + BCIns ins = fs->bcbase[pc].ins; + BCPos offset; + switch (bc_op(ins)) { + case BC_CALLMT: case BC_CALLT: + case BC_RETM: case BC_RET: case BC_RET0: case BC_RET1: + offset = bcemit_INS(fs, ins); /* Copy original instruction. */ + fs->bcbase[offset].line = fs->bcbase[pc].line; + offset = offset-(pc+1)+BCBIAS_J; + if (offset > BCMAX_D) + err_syntax(fs->ls, LJ_ERR_XFIXUP); + /* Replace with UCLO plus branch. */ + fs->bcbase[pc].ins = BCINS_AD(BC_UCLO, 0, offset); + break; + case BC_UCLO: + return; /* We're done. */ + default: + break; + } + } + } +} + +/* Finish a FuncState and return the new prototype. */ +static GCproto *fs_finish(LexState *ls, BCLine line) +{ + lua_State *L = ls->L; + FuncState *fs = ls->fs; + BCLine numline = line - fs->linedefined; + size_t sizept, ofsk, ofsuv, ofsli, ofsdbg, ofsvar; + GCproto *pt; + + /* Apply final fixups. */ + fs_fixup_ret(fs); + + /* Calculate total size of prototype including all colocated arrays. */ + sizept = sizeof(GCproto) + fs->pc*sizeof(BCIns) + fs->nkgc*sizeof(GCRef); + sizept = (sizept + sizeof(TValue)-1) & ~(sizeof(TValue)-1); + ofsk = sizept; sizept += fs->nkn*sizeof(TValue); + ofsuv = sizept; sizept += ((fs->nuv+1)&~1)*2; + ofsli = sizept; sizept += fs_prep_line(fs, numline); + ofsdbg = sizept; sizept += fs_prep_var(ls, fs, &ofsvar); + + /* Allocate prototype and initialize its fields. */ + pt = (GCproto *)lj_mem_newgco(L, (MSize)sizept); + pt->gct = ~LJ_TPROTO; + pt->sizept = (MSize)sizept; + pt->trace = 0; + pt->flags = (uint8_t)(fs->flags & ~(PROTO_HAS_RETURN|PROTO_FIXUP_RETURN)); + pt->numparams = fs->numparams; + pt->framesize = fs->framesize; + setgcref(pt->chunkname, obj2gco(ls->chunkname)); + + /* Close potentially uninitialized gap between bc and kgc. */ + *(uint32_t *)((char *)pt + ofsk - sizeof(GCRef)*(fs->nkgc+1)) = 0; + fs_fixup_bc(fs, pt, (BCIns *)((char *)pt + sizeof(GCproto)), fs->pc); + fs_fixup_k(fs, pt, (void *)((char *)pt + ofsk)); + fs_fixup_uv1(fs, pt, (uint16_t *)((char *)pt + ofsuv)); + fs_fixup_line(fs, pt, (void *)((char *)pt + ofsli), numline); + fs_fixup_var(ls, pt, (uint8_t *)((char *)pt + ofsdbg), ofsvar); + + lj_vmevent_send(L, BC, + setprotoV(L, L->top++, pt); + ); + + L->top--; /* Pop table of constants. */ + ls->vtop = fs->vbase; /* Reset variable stack. */ + ls->fs = fs->prev; + lua_assert(ls->fs != NULL || ls->tok == TK_eof); + return pt; +} + +/* Initialize a new FuncState. */ +static void fs_init(LexState *ls, FuncState *fs) +{ + lua_State *L = ls->L; + fs->prev = ls->fs; ls->fs = fs; /* Append to list. */ + fs->ls = ls; + fs->vbase = ls->vtop; + fs->L = L; + fs->pc = 0; + fs->lasttarget = 0; + fs->jpc = NO_JMP; + fs->freereg = 0; + fs->nkgc = 0; + fs->nkn = 0; + fs->nactvar = 0; + fs->nuv = 0; + fs->bl = NULL; + fs->flags = 0; + fs->framesize = 1; /* Minimum frame size. */ + fs->kt = lj_tab_new(L, 0, 0); + /* Anchor table of constants in stack to avoid being collected. */ + settabV(L, L->top, fs->kt); + incr_top(L); +} + +/* -- Expressions --------------------------------------------------------- */ + +/* Forward declaration. */ +static void expr(LexState *ls, ExpDesc *v); + +/* Return string expression. */ +static void expr_str(LexState *ls, ExpDesc *e) +{ + expr_init(e, VKSTR, 0); + e->u.sval = lex_str(ls); +} + +/* Return index expression. */ +static void expr_index(FuncState *fs, ExpDesc *t, ExpDesc *e) +{ + /* Already called: expr_toval(fs, e). */ + t->k = VINDEXED; + if (expr_isnumk(e)) { +#if LJ_DUALNUM + if (tvisint(expr_numtv(e))) { + int32_t k = intV(expr_numtv(e)); + if (checku8(k)) { + t->u.s.aux = BCMAX_C+1+(uint32_t)k; /* 256..511: const byte key */ + return; + } + } +#else + lua_Number n = expr_numberV(e); + int32_t k = lj_num2int(n); + if (checku8(k) && n == (lua_Number)k) { + t->u.s.aux = BCMAX_C+1+(uint32_t)k; /* 256..511: const byte key */ + return; + } +#endif + } else if (expr_isstrk(e)) { + BCReg idx = const_str(fs, e); + if (idx <= BCMAX_C) { + t->u.s.aux = ~idx; /* -256..-1: const string key */ + return; + } + } + t->u.s.aux = expr_toanyreg(fs, e); /* 0..255: register */ +} + +/* Parse index expression with named field. */ +static void expr_field(LexState *ls, ExpDesc *v) +{ + FuncState *fs = ls->fs; + ExpDesc key; + expr_toanyreg(fs, v); + lj_lex_next(ls); /* Skip dot or colon. */ + expr_str(ls, &key); + expr_index(fs, v, &key); +} + +/* Parse index expression with brackets. */ +static void expr_bracket(LexState *ls, ExpDesc *v) +{ + lj_lex_next(ls); /* Skip '['. */ + expr(ls, v); + expr_toval(ls->fs, v); + lex_check(ls, ']'); +} + +/* Get value of constant expression. */ +static void expr_kvalue(TValue *v, ExpDesc *e) +{ + if (e->k <= VKTRUE) { + setpriV(v, ~(uint32_t)e->k); + } else if (e->k == VKSTR) { + setgcVraw(v, obj2gco(e->u.sval), LJ_TSTR); + } else { + lua_assert(tvisnumber(expr_numtv(e))); + *v = *expr_numtv(e); + } +} + +/* Parse table constructor expression. */ +static void expr_table(LexState *ls, ExpDesc *e) +{ + FuncState *fs = ls->fs; + BCLine line = ls->linenumber; + GCtab *t = NULL; + int vcall = 0, needarr = 0, fixt = 0; + uint32_t narr = 1; /* First array index. */ + uint32_t nhash = 0; /* Number of hash entries. */ + BCReg freg = fs->freereg; + BCPos pc = bcemit_AD(fs, BC_TNEW, freg, 0); + expr_init(e, VNONRELOC, freg); + bcreg_reserve(fs, 1); + freg++; + lex_check(ls, '{'); + while (ls->tok != '}') { + ExpDesc key, val; + vcall = 0; + if (ls->tok == '[') { + expr_bracket(ls, &key); /* Already calls expr_toval. */ + if (!expr_isk(&key)) expr_index(fs, e, &key); + if (expr_isnumk(&key) && expr_numiszero(&key)) needarr = 1; else nhash++; + lex_check(ls, '='); + } else if ((ls->tok == TK_name || (!LJ_52 && ls->tok == TK_goto)) && + lj_lex_lookahead(ls) == '=') { + expr_str(ls, &key); + lex_check(ls, '='); + nhash++; + } else { + expr_init(&key, VKNUM, 0); + setintV(&key.u.nval, (int)narr); + narr++; + needarr = vcall = 1; + } + expr(ls, &val); + if (expr_isk(&key) && key.k != VKNIL && + (key.k == VKSTR || expr_isk_nojump(&val))) { + TValue k, *v; + if (!t) { /* Create template table on demand. */ + BCReg kidx; + t = lj_tab_new(fs->L, needarr ? narr : 0, hsize2hbits(nhash)); + kidx = const_gc(fs, obj2gco(t), LJ_TTAB); + fs->bcbase[pc].ins = BCINS_AD(BC_TDUP, freg-1, kidx); + } + vcall = 0; + expr_kvalue(&k, &key); + v = lj_tab_set(fs->L, t, &k); + lj_gc_anybarriert(fs->L, t); + if (expr_isk_nojump(&val)) { /* Add const key/value to template table. */ + expr_kvalue(v, &val); + } else { /* Otherwise create dummy string key (avoids lj_tab_newkey). */ + settabV(fs->L, v, t); /* Preserve key with table itself as value. */ + fixt = 1; /* Fix this later, after all resizes. */ + goto nonconst; + } + } else { + nonconst: + if (val.k != VCALL) { expr_toanyreg(fs, &val); vcall = 0; } + if (expr_isk(&key)) expr_index(fs, e, &key); + bcemit_store(fs, e, &val); + } + fs->freereg = freg; + if (!lex_opt(ls, ',') && !lex_opt(ls, ';')) break; + } + lex_match(ls, '}', '{', line); + if (vcall) { + BCInsLine *ilp = &fs->bcbase[fs->pc-1]; + ExpDesc en; + lua_assert(bc_a(ilp->ins) == freg && + bc_op(ilp->ins) == (narr > 256 ? BC_TSETV : BC_TSETB)); + expr_init(&en, VKNUM, 0); + en.u.nval.u32.lo = narr-1; + en.u.nval.u32.hi = 0x43300000; /* Biased integer to avoid denormals. */ + if (narr > 256) { fs->pc--; ilp--; } + ilp->ins = BCINS_AD(BC_TSETM, freg, const_num(fs, &en)); + setbc_b(&ilp[-1].ins, 0); + } + if (pc == fs->pc-1) { /* Make expr relocable if possible. */ + e->u.s.info = pc; + fs->freereg--; + e->k = VRELOCABLE; + } else { + e->k = VNONRELOC; /* May have been changed by expr_index. */ + } + if (!t) { /* Construct TNEW RD: hhhhhaaaaaaaaaaa. */ + BCIns *ip = &fs->bcbase[pc].ins; + if (!needarr) narr = 0; + else if (narr < 3) narr = 3; + else if (narr > 0x7ff) narr = 0x7ff; + setbc_d(ip, narr|(hsize2hbits(nhash)<<11)); + } else { + if (needarr && t->asize < narr) + lj_tab_reasize(fs->L, t, narr-1); + if (fixt) { /* Fix value for dummy keys in template table. */ + Node *node = noderef(t->node); + uint32_t i, hmask = t->hmask; + for (i = 0; i <= hmask; i++) { + Node *n = &node[i]; + if (tvistab(&n->val)) { + lua_assert(tabV(&n->val) == t); + setnilV(&n->val); /* Turn value into nil. */ + } + } + } + lj_gc_check(fs->L); + } +} + +/* Parse function parameters. */ +static BCReg parse_params(LexState *ls, int needself) +{ + FuncState *fs = ls->fs; + BCReg nparams = 0; + lex_check(ls, '('); + if (needself) + var_new_lit(ls, nparams++, "self"); + if (ls->tok != ')') { + do { + if (ls->tok == TK_name || (!LJ_52 && ls->tok == TK_goto)) { + var_new(ls, nparams++, lex_str(ls)); + } else if (ls->tok == TK_dots) { + lj_lex_next(ls); + fs->flags |= PROTO_VARARG; + break; + } else { + err_syntax(ls, LJ_ERR_XPARAM); + } + } while (lex_opt(ls, ',')); + } + var_add(ls, nparams); + lua_assert(fs->nactvar == nparams); + bcreg_reserve(fs, nparams); + lex_check(ls, ')'); + return nparams; +} + +/* Forward declaration. */ +static void parse_chunk(LexState *ls); + +/* Parse body of a function. */ +static void parse_body(LexState *ls, ExpDesc *e, int needself, BCLine line) +{ + FuncState fs, *pfs = ls->fs; + FuncScope bl; + GCproto *pt; + ptrdiff_t oldbase = pfs->bcbase - ls->bcstack; + fs_init(ls, &fs); + fscope_begin(&fs, &bl, 0); + fs.linedefined = line; + fs.numparams = (uint8_t)parse_params(ls, needself); + fs.bcbase = pfs->bcbase + pfs->pc; + fs.bclim = pfs->bclim - pfs->pc; + bcemit_AD(&fs, BC_FUNCF, 0, 0); /* Placeholder. */ + parse_chunk(ls); + if (ls->tok != TK_end) lex_match(ls, TK_end, TK_function, line); + pt = fs_finish(ls, (ls->lastline = ls->linenumber)); + pfs->bcbase = ls->bcstack + oldbase; /* May have been reallocated. */ + pfs->bclim = (BCPos)(ls->sizebcstack - oldbase); + /* Store new prototype in the constant array of the parent. */ + expr_init(e, VRELOCABLE, + bcemit_AD(pfs, BC_FNEW, 0, const_gc(pfs, obj2gco(pt), LJ_TPROTO))); +#if LJ_HASFFI + pfs->flags |= (fs.flags & PROTO_FFI); +#endif + if (!(pfs->flags & PROTO_CHILD)) { + if (pfs->flags & PROTO_HAS_RETURN) + pfs->flags |= PROTO_FIXUP_RETURN; + pfs->flags |= PROTO_CHILD; + } + lj_lex_next(ls); +} + +/* Parse expression list. Last expression is left open. */ +static BCReg expr_list(LexState *ls, ExpDesc *v) +{ + BCReg n = 1; + expr(ls, v); + while (lex_opt(ls, ',')) { + expr_tonextreg(ls->fs, v); + expr(ls, v); + n++; + } + return n; +} + +/* Parse function argument list. */ +static void parse_args(LexState *ls, ExpDesc *e) +{ + FuncState *fs = ls->fs; + ExpDesc args; + BCIns ins; + BCReg base; + BCLine line = ls->linenumber; + if (ls->tok == '(') { +#if !LJ_52 + if (line != ls->lastline) + err_syntax(ls, LJ_ERR_XAMBIG); +#endif + lj_lex_next(ls); + if (ls->tok == ')') { /* f(). */ + args.k = VVOID; + } else { + expr_list(ls, &args); + if (args.k == VCALL) /* f(a, b, g()) or f(a, b, ...). */ + setbc_b(bcptr(fs, &args), 0); /* Pass on multiple results. */ + } + lex_match(ls, ')', '(', line); + } else if (ls->tok == '{') { + expr_table(ls, &args); + } else if (ls->tok == TK_string) { + expr_init(&args, VKSTR, 0); + args.u.sval = strV(&ls->tokval); + lj_lex_next(ls); + } else { + err_syntax(ls, LJ_ERR_XFUNARG); + return; /* Silence compiler. */ + } + lua_assert(e->k == VNONRELOC); + base = e->u.s.info; /* Base register for call. */ + if (args.k == VCALL) { + ins = BCINS_ABC(BC_CALLM, base, 2, args.u.s.aux - base - 1 - LJ_FR2); + } else { + if (args.k != VVOID) + expr_tonextreg(fs, &args); + ins = BCINS_ABC(BC_CALL, base, 2, fs->freereg - base - LJ_FR2); + } + expr_init(e, VCALL, bcemit_INS(fs, ins)); + e->u.s.aux = base; + fs->bcbase[fs->pc - 1].line = line; + fs->freereg = base+1; /* Leave one result by default. */ +} + +/* Parse primary expression. */ +static void expr_primary(LexState *ls, ExpDesc *v) +{ + FuncState *fs = ls->fs; + /* Parse prefix expression. */ + if (ls->tok == '(') { + BCLine line = ls->linenumber; + lj_lex_next(ls); + expr(ls, v); + lex_match(ls, ')', '(', line); + expr_discharge(ls->fs, v); + } else if (ls->tok == TK_name || (!LJ_52 && ls->tok == TK_goto)) { + var_lookup(ls, v); + } else { + err_syntax(ls, LJ_ERR_XSYMBOL); + } + for (;;) { /* Parse multiple expression suffixes. */ + if (ls->tok == '.') { + expr_field(ls, v); + } else if (ls->tok == '[') { + ExpDesc key; + expr_toanyreg(fs, v); + expr_bracket(ls, &key); + expr_index(fs, v, &key); + } else if (ls->tok == ':') { + ExpDesc key; + lj_lex_next(ls); + expr_str(ls, &key); + bcemit_method(fs, v, &key); + parse_args(ls, v); + } else if (ls->tok == '(' || ls->tok == TK_string || ls->tok == '{') { + expr_tonextreg(fs, v); + if (LJ_FR2) bcreg_reserve(fs, 1); + parse_args(ls, v); + } else { + break; + } + } +} + +/* Parse simple expression. */ +static void expr_simple(LexState *ls, ExpDesc *v) +{ + switch (ls->tok) { + case TK_number: + expr_init(v, (LJ_HASFFI && tviscdata(&ls->tokval)) ? VKCDATA : VKNUM, 0); + copyTV(ls->L, &v->u.nval, &ls->tokval); + break; + case TK_string: + expr_init(v, VKSTR, 0); + v->u.sval = strV(&ls->tokval); + break; + case TK_nil: + expr_init(v, VKNIL, 0); + break; + case TK_true: + expr_init(v, VKTRUE, 0); + break; + case TK_false: + expr_init(v, VKFALSE, 0); + break; + case TK_dots: { /* Vararg. */ + FuncState *fs = ls->fs; + BCReg base; + checkcond(ls, fs->flags & PROTO_VARARG, LJ_ERR_XDOTS); + bcreg_reserve(fs, 1); + base = fs->freereg-1; + expr_init(v, VCALL, bcemit_ABC(fs, BC_VARG, base, 2, fs->numparams)); + v->u.s.aux = base; + break; + } + case '{': /* Table constructor. */ + expr_table(ls, v); + return; + case TK_function: + lj_lex_next(ls); + parse_body(ls, v, 0, ls->linenumber); + return; + default: + expr_primary(ls, v); + return; + } + lj_lex_next(ls); +} + +/* Manage syntactic levels to avoid blowing up the stack. */ +static void synlevel_begin(LexState *ls) +{ + if (++ls->level >= LJ_MAX_XLEVEL) + lj_lex_error(ls, 0, LJ_ERR_XLEVELS); +} + +#define synlevel_end(ls) ((ls)->level--) + +/* Convert token to binary operator. */ +static BinOpr token2binop(LexToken tok) +{ + switch (tok) { + case '+': return OPR_ADD; + case '-': return OPR_SUB; + case '*': return OPR_MUL; + case '/': return OPR_DIV; + case '%': return OPR_MOD; + case '^': return OPR_POW; + case TK_concat: return OPR_CONCAT; + case TK_ne: return OPR_NE; + case TK_eq: return OPR_EQ; + case '<': return OPR_LT; + case TK_le: return OPR_LE; + case '>': return OPR_GT; + case TK_ge: return OPR_GE; + case TK_and: return OPR_AND; + case TK_or: return OPR_OR; + default: return OPR_NOBINOPR; + } +} + +/* Priorities for each binary operator. ORDER OPR. */ +static const struct { + uint8_t left; /* Left priority. */ + uint8_t right; /* Right priority. */ +} priority[] = { + {6,6}, {6,6}, {7,7}, {7,7}, {7,7}, /* ADD SUB MUL DIV MOD */ + {10,9}, {5,4}, /* POW CONCAT (right associative) */ + {3,3}, {3,3}, /* EQ NE */ + {3,3}, {3,3}, {3,3}, {3,3}, /* LT GE GT LE */ + {2,2}, {1,1} /* AND OR */ +}; + +#define UNARY_PRIORITY 8 /* Priority for unary operators. */ + +/* Forward declaration. */ +static BinOpr expr_binop(LexState *ls, ExpDesc *v, uint32_t limit); + +/* Parse unary expression. */ +static void expr_unop(LexState *ls, ExpDesc *v) +{ + BCOp op; + if (ls->tok == TK_not) { + op = BC_NOT; + } else if (ls->tok == '-') { + op = BC_UNM; + } else if (ls->tok == '#') { + op = BC_LEN; + } else { + expr_simple(ls, v); + return; + } + lj_lex_next(ls); + expr_binop(ls, v, UNARY_PRIORITY); + bcemit_unop(ls->fs, op, v); +} + +/* Parse binary expressions with priority higher than the limit. */ +static BinOpr expr_binop(LexState *ls, ExpDesc *v, uint32_t limit) +{ + BinOpr op; + synlevel_begin(ls); + expr_unop(ls, v); + op = token2binop(ls->tok); + while (op != OPR_NOBINOPR && priority[op].left > limit) { + ExpDesc v2; + BinOpr nextop; + lj_lex_next(ls); + bcemit_binop_left(ls->fs, op, v); + /* Parse binary expression with higher priority. */ + nextop = expr_binop(ls, &v2, priority[op].right); + bcemit_binop(ls->fs, op, v, &v2); + op = nextop; + } + synlevel_end(ls); + return op; /* Return unconsumed binary operator (if any). */ +} + +/* Parse expression. */ +static void expr(LexState *ls, ExpDesc *v) +{ + expr_binop(ls, v, 0); /* Priority 0: parse whole expression. */ +} + +/* Assign expression to the next register. */ +static void expr_next(LexState *ls) +{ + ExpDesc e; + expr(ls, &e); + expr_tonextreg(ls->fs, &e); +} + +/* Parse conditional expression. */ +static BCPos expr_cond(LexState *ls) +{ + ExpDesc v; + expr(ls, &v); + if (v.k == VKNIL) v.k = VKFALSE; + bcemit_branch_t(ls->fs, &v); + return v.f; +} + +/* -- Assignments --------------------------------------------------------- */ + +/* List of LHS variables. */ +typedef struct LHSVarList { + ExpDesc v; /* LHS variable. */ + struct LHSVarList *prev; /* Link to previous LHS variable. */ +} LHSVarList; + +/* Eliminate write-after-read hazards for local variable assignment. */ +static void assign_hazard(LexState *ls, LHSVarList *lh, const ExpDesc *v) +{ + FuncState *fs = ls->fs; + BCReg reg = v->u.s.info; /* Check against this variable. */ + BCReg tmp = fs->freereg; /* Rename to this temp. register (if needed). */ + int hazard = 0; + for (; lh; lh = lh->prev) { + if (lh->v.k == VINDEXED) { + if (lh->v.u.s.info == reg) { /* t[i], t = 1, 2 */ + hazard = 1; + lh->v.u.s.info = tmp; + } + if (lh->v.u.s.aux == reg) { /* t[i], i = 1, 2 */ + hazard = 1; + lh->v.u.s.aux = tmp; + } + } + } + if (hazard) { + bcemit_AD(fs, BC_MOV, tmp, reg); /* Rename conflicting variable. */ + bcreg_reserve(fs, 1); + } +} + +/* Adjust LHS/RHS of an assignment. */ +static void assign_adjust(LexState *ls, BCReg nvars, BCReg nexps, ExpDesc *e) +{ + FuncState *fs = ls->fs; + int32_t extra = (int32_t)nvars - (int32_t)nexps; + if (e->k == VCALL) { + extra++; /* Compensate for the VCALL itself. */ + if (extra < 0) extra = 0; + setbc_b(bcptr(fs, e), extra+1); /* Fixup call results. */ + if (extra > 1) bcreg_reserve(fs, (BCReg)extra-1); + } else { + if (e->k != VVOID) + expr_tonextreg(fs, e); /* Close last expression. */ + if (extra > 0) { /* Leftover LHS are set to nil. */ + BCReg reg = fs->freereg; + bcreg_reserve(fs, (BCReg)extra); + bcemit_nil(fs, reg, (BCReg)extra); + } + } +} + +/* Recursively parse assignment statement. */ +static void parse_assignment(LexState *ls, LHSVarList *lh, BCReg nvars) +{ + ExpDesc e; + checkcond(ls, VLOCAL <= lh->v.k && lh->v.k <= VINDEXED, LJ_ERR_XSYNTAX); + if (lex_opt(ls, ',')) { /* Collect LHS list and recurse upwards. */ + LHSVarList vl; + vl.prev = lh; + expr_primary(ls, &vl.v); + if (vl.v.k == VLOCAL) + assign_hazard(ls, lh, &vl.v); + checklimit(ls->fs, ls->level + nvars, LJ_MAX_XLEVEL, "variable names"); + parse_assignment(ls, &vl, nvars+1); + } else { /* Parse RHS. */ + BCReg nexps; + lex_check(ls, '='); + nexps = expr_list(ls, &e); + if (nexps == nvars) { + if (e.k == VCALL) { + if (bc_op(*bcptr(ls->fs, &e)) == BC_VARG) { /* Vararg assignment. */ + ls->fs->freereg--; + e.k = VRELOCABLE; + } else { /* Multiple call results. */ + e.u.s.info = e.u.s.aux; /* Base of call is not relocatable. */ + e.k = VNONRELOC; + } + } + bcemit_store(ls->fs, &lh->v, &e); + return; + } + assign_adjust(ls, nvars, nexps, &e); + if (nexps > nvars) + ls->fs->freereg -= nexps - nvars; /* Drop leftover regs. */ + } + /* Assign RHS to LHS and recurse downwards. */ + expr_init(&e, VNONRELOC, ls->fs->freereg-1); + bcemit_store(ls->fs, &lh->v, &e); +} + +/* Parse call statement or assignment. */ +static void parse_call_assign(LexState *ls) +{ + FuncState *fs = ls->fs; + LHSVarList vl; + expr_primary(ls, &vl.v); + if (vl.v.k == VCALL) { /* Function call statement. */ + setbc_b(bcptr(fs, &vl.v), 1); /* No results. */ + } else { /* Start of an assignment. */ + vl.prev = NULL; + parse_assignment(ls, &vl, 1); + } +} + +/* Parse 'local' statement. */ +static void parse_local(LexState *ls) +{ + if (lex_opt(ls, TK_function)) { /* Local function declaration. */ + ExpDesc v, b; + FuncState *fs = ls->fs; + var_new(ls, 0, lex_str(ls)); + expr_init(&v, VLOCAL, fs->freereg); + v.u.s.aux = fs->varmap[fs->freereg]; + bcreg_reserve(fs, 1); + var_add(ls, 1); + parse_body(ls, &b, 0, ls->linenumber); + /* bcemit_store(fs, &v, &b) without setting VSTACK_VAR_RW. */ + expr_free(fs, &b); + expr_toreg(fs, &b, v.u.s.info); + /* The upvalue is in scope, but the local is only valid after the store. */ + var_get(ls, fs, fs->nactvar - 1).startpc = fs->pc; + } else { /* Local variable declaration. */ + ExpDesc e; + BCReg nexps, nvars = 0; + do { /* Collect LHS. */ + var_new(ls, nvars++, lex_str(ls)); + } while (lex_opt(ls, ',')); + if (lex_opt(ls, '=')) { /* Optional RHS. */ + nexps = expr_list(ls, &e); + } else { /* Or implicitly set to nil. */ + e.k = VVOID; + nexps = 0; + } + assign_adjust(ls, nvars, nexps, &e); + var_add(ls, nvars); + } +} + +/* Parse 'function' statement. */ +static void parse_func(LexState *ls, BCLine line) +{ + FuncState *fs; + ExpDesc v, b; + int needself = 0; + lj_lex_next(ls); /* Skip 'function'. */ + /* Parse function name. */ + var_lookup(ls, &v); + while (ls->tok == '.') /* Multiple dot-separated fields. */ + expr_field(ls, &v); + if (ls->tok == ':') { /* Optional colon to signify method call. */ + needself = 1; + expr_field(ls, &v); + } + parse_body(ls, &b, needself, line); + fs = ls->fs; + bcemit_store(fs, &v, &b); + fs->bcbase[fs->pc - 1].line = line; /* Set line for the store. */ +} + +/* -- Control transfer statements ----------------------------------------- */ + +/* Check for end of block. */ +static int parse_isend(LexToken tok) +{ + switch (tok) { + case TK_else: case TK_elseif: case TK_end: case TK_until: case TK_eof: + return 1; + default: + return 0; + } +} + +/* Parse 'return' statement. */ +static void parse_return(LexState *ls) +{ + BCIns ins; + FuncState *fs = ls->fs; + lj_lex_next(ls); /* Skip 'return'. */ + fs->flags |= PROTO_HAS_RETURN; + if (parse_isend(ls->tok) || ls->tok == ';') { /* Bare return. */ + ins = BCINS_AD(BC_RET0, 0, 1); + } else { /* Return with one or more values. */ + ExpDesc e; /* Receives the _last_ expression in the list. */ + BCReg nret = expr_list(ls, &e); + if (nret == 1) { /* Return one result. */ + if (e.k == VCALL) { /* Check for tail call. */ + BCIns *ip = bcptr(fs, &e); + /* It doesn't pay off to add BC_VARGT just for 'return ...'. */ + if (bc_op(*ip) == BC_VARG) goto notailcall; + fs->pc--; + ins = BCINS_AD(bc_op(*ip)-BC_CALL+BC_CALLT, bc_a(*ip), bc_c(*ip)); + } else { /* Can return the result from any register. */ + ins = BCINS_AD(BC_RET1, expr_toanyreg(fs, &e), 2); + } + } else { + if (e.k == VCALL) { /* Append all results from a call. */ + notailcall: + setbc_b(bcptr(fs, &e), 0); + ins = BCINS_AD(BC_RETM, fs->nactvar, e.u.s.aux - fs->nactvar); + } else { + expr_tonextreg(fs, &e); /* Force contiguous registers. */ + ins = BCINS_AD(BC_RET, fs->nactvar, nret+1); + } + } + } + if (fs->flags & PROTO_CHILD) + bcemit_AJ(fs, BC_UCLO, 0, 0); /* May need to close upvalues first. */ + bcemit_INS(fs, ins); +} + +/* Parse 'break' statement. */ +static void parse_break(LexState *ls) +{ + ls->fs->bl->flags |= FSCOPE_BREAK; + gola_new(ls, NAME_BREAK, VSTACK_GOTO, bcemit_jmp(ls->fs)); +} + +/* Parse 'goto' statement. */ +static void parse_goto(LexState *ls) +{ + FuncState *fs = ls->fs; + GCstr *name = lex_str(ls); + VarInfo *vl = gola_findlabel(ls, name); + if (vl) /* Treat backwards goto within same scope like a loop. */ + bcemit_AJ(fs, BC_LOOP, vl->slot, -1); /* No BC range check. */ + fs->bl->flags |= FSCOPE_GOLA; + gola_new(ls, name, VSTACK_GOTO, bcemit_jmp(fs)); +} + +/* Parse label. */ +static void parse_label(LexState *ls) +{ + FuncState *fs = ls->fs; + GCstr *name; + MSize idx; + fs->lasttarget = fs->pc; + fs->bl->flags |= FSCOPE_GOLA; + lj_lex_next(ls); /* Skip '::'. */ + name = lex_str(ls); + if (gola_findlabel(ls, name)) + lj_lex_error(ls, 0, LJ_ERR_XLDUP, strdata(name)); + idx = gola_new(ls, name, VSTACK_LABEL, fs->pc); + lex_check(ls, TK_label); + /* Recursively parse trailing statements: labels and ';' (Lua 5.2 only). */ + for (;;) { + if (ls->tok == TK_label) { + synlevel_begin(ls); + parse_label(ls); + synlevel_end(ls); + } else if (LJ_52 && ls->tok == ';') { + lj_lex_next(ls); + } else { + break; + } + } + /* Trailing label is considered to be outside of scope. */ + if (parse_isend(ls->tok) && ls->tok != TK_until) + ls->vstack[idx].slot = fs->bl->nactvar; + gola_resolve(ls, fs->bl, idx); +} + +/* -- Blocks, loops and conditional statements ---------------------------- */ + +/* Parse a block. */ +static void parse_block(LexState *ls) +{ + FuncState *fs = ls->fs; + FuncScope bl; + fscope_begin(fs, &bl, 0); + parse_chunk(ls); + fscope_end(fs); +} + +/* Parse 'while' statement. */ +static void parse_while(LexState *ls, BCLine line) +{ + FuncState *fs = ls->fs; + BCPos start, loop, condexit; + FuncScope bl; + lj_lex_next(ls); /* Skip 'while'. */ + start = fs->lasttarget = fs->pc; + condexit = expr_cond(ls); + fscope_begin(fs, &bl, FSCOPE_LOOP); + lex_check(ls, TK_do); + loop = bcemit_AD(fs, BC_LOOP, fs->nactvar, 0); + parse_block(ls); + jmp_patch(fs, bcemit_jmp(fs), start); + lex_match(ls, TK_end, TK_while, line); + fscope_end(fs); + jmp_tohere(fs, condexit); + jmp_patchins(fs, loop, fs->pc); +} + +/* Parse 'repeat' statement. */ +static void parse_repeat(LexState *ls, BCLine line) +{ + FuncState *fs = ls->fs; + BCPos loop = fs->lasttarget = fs->pc; + BCPos condexit; + FuncScope bl1, bl2; + fscope_begin(fs, &bl1, FSCOPE_LOOP); /* Breakable loop scope. */ + fscope_begin(fs, &bl2, 0); /* Inner scope. */ + lj_lex_next(ls); /* Skip 'repeat'. */ + bcemit_AD(fs, BC_LOOP, fs->nactvar, 0); + parse_chunk(ls); + lex_match(ls, TK_until, TK_repeat, line); + condexit = expr_cond(ls); /* Parse condition (still inside inner scope). */ + if (!(bl2.flags & FSCOPE_UPVAL)) { /* No upvalues? Just end inner scope. */ + fscope_end(fs); + } else { /* Otherwise generate: cond: UCLO+JMP out, !cond: UCLO+JMP loop. */ + parse_break(ls); /* Break from loop and close upvalues. */ + jmp_tohere(fs, condexit); + fscope_end(fs); /* End inner scope and close upvalues. */ + condexit = bcemit_jmp(fs); + } + jmp_patch(fs, condexit, loop); /* Jump backwards if !cond. */ + jmp_patchins(fs, loop, fs->pc); + fscope_end(fs); /* End loop scope. */ +} + +/* Parse numeric 'for'. */ +static void parse_for_num(LexState *ls, GCstr *varname, BCLine line) +{ + FuncState *fs = ls->fs; + BCReg base = fs->freereg; + FuncScope bl; + BCPos loop, loopend; + /* Hidden control variables. */ + var_new_fixed(ls, FORL_IDX, VARNAME_FOR_IDX); + var_new_fixed(ls, FORL_STOP, VARNAME_FOR_STOP); + var_new_fixed(ls, FORL_STEP, VARNAME_FOR_STEP); + /* Visible copy of index variable. */ + var_new(ls, FORL_EXT, varname); + lex_check(ls, '='); + expr_next(ls); + lex_check(ls, ','); + expr_next(ls); + if (lex_opt(ls, ',')) { + expr_next(ls); + } else { + bcemit_AD(fs, BC_KSHORT, fs->freereg, 1); /* Default step is 1. */ + bcreg_reserve(fs, 1); + } + var_add(ls, 3); /* Hidden control variables. */ + lex_check(ls, TK_do); + loop = bcemit_AJ(fs, BC_FORI, base, NO_JMP); + fscope_begin(fs, &bl, 0); /* Scope for visible variables. */ + var_add(ls, 1); + bcreg_reserve(fs, 1); + parse_block(ls); + fscope_end(fs); + /* Perform loop inversion. Loop control instructions are at the end. */ + loopend = bcemit_AJ(fs, BC_FORL, base, NO_JMP); + fs->bcbase[loopend].line = line; /* Fix line for control ins. */ + jmp_patchins(fs, loopend, loop+1); + jmp_patchins(fs, loop, fs->pc); +} + +/* Try to predict whether the iterator is next() and specialize the bytecode. +** Detecting next() and pairs() by name is simplistic, but quite effective. +** The interpreter backs off if the check for the closure fails at runtime. +*/ +static int predict_next(LexState *ls, FuncState *fs, BCPos pc) +{ + BCIns ins = fs->bcbase[pc].ins; + GCstr *name; + cTValue *o; + switch (bc_op(ins)) { + case BC_MOV: + name = gco2str(gcref(var_get(ls, fs, bc_d(ins)).name)); + break; + case BC_UGET: + name = gco2str(gcref(ls->vstack[fs->uvmap[bc_d(ins)]].name)); + break; + case BC_GGET: + /* There's no inverse index (yet), so lookup the strings. */ + o = lj_tab_getstr(fs->kt, lj_str_newlit(ls->L, "pairs")); + if (o && tvhaskslot(o) && tvkslot(o) == bc_d(ins)) + return 1; + o = lj_tab_getstr(fs->kt, lj_str_newlit(ls->L, "next")); + if (o && tvhaskslot(o) && tvkslot(o) == bc_d(ins)) + return 1; + return 0; + default: + return 0; + } + return (name->len == 5 && !strcmp(strdata(name), "pairs")) || + (name->len == 4 && !strcmp(strdata(name), "next")); +} + +/* Parse 'for' iterator. */ +static void parse_for_iter(LexState *ls, GCstr *indexname) +{ + FuncState *fs = ls->fs; + ExpDesc e; + BCReg nvars = 0; + BCLine line; + BCReg base = fs->freereg + 3; + BCPos loop, loopend, exprpc = fs->pc; + FuncScope bl; + int isnext; + /* Hidden control variables. */ + var_new_fixed(ls, nvars++, VARNAME_FOR_GEN); + var_new_fixed(ls, nvars++, VARNAME_FOR_STATE); + var_new_fixed(ls, nvars++, VARNAME_FOR_CTL); + /* Visible variables returned from iterator. */ + var_new(ls, nvars++, indexname); + while (lex_opt(ls, ',')) + var_new(ls, nvars++, lex_str(ls)); + lex_check(ls, TK_in); + line = ls->linenumber; + assign_adjust(ls, 3, expr_list(ls, &e), &e); + /* The iterator needs another 3 [4] slots (func [pc] | state ctl). */ + bcreg_bump(fs, 3+LJ_FR2); + isnext = (nvars <= 5 && predict_next(ls, fs, exprpc)); + var_add(ls, 3); /* Hidden control variables. */ + lex_check(ls, TK_do); + loop = bcemit_AJ(fs, isnext ? BC_ISNEXT : BC_JMP, base, NO_JMP); + fscope_begin(fs, &bl, 0); /* Scope for visible variables. */ + var_add(ls, nvars-3); + bcreg_reserve(fs, nvars-3); + parse_block(ls); + fscope_end(fs); + /* Perform loop inversion. Loop control instructions are at the end. */ + jmp_patchins(fs, loop, fs->pc); + bcemit_ABC(fs, isnext ? BC_ITERN : BC_ITERC, base, nvars-3+1, 2+1); + loopend = bcemit_AJ(fs, BC_ITERL, base, NO_JMP); + fs->bcbase[loopend-1].line = line; /* Fix line for control ins. */ + fs->bcbase[loopend].line = line; + jmp_patchins(fs, loopend, loop+1); +} + +/* Parse 'for' statement. */ +static void parse_for(LexState *ls, BCLine line) +{ + FuncState *fs = ls->fs; + GCstr *varname; + FuncScope bl; + fscope_begin(fs, &bl, FSCOPE_LOOP); + lj_lex_next(ls); /* Skip 'for'. */ + varname = lex_str(ls); /* Get first variable name. */ + if (ls->tok == '=') + parse_for_num(ls, varname, line); + else if (ls->tok == ',' || ls->tok == TK_in) + parse_for_iter(ls, varname); + else + err_syntax(ls, LJ_ERR_XFOR); + lex_match(ls, TK_end, TK_for, line); + fscope_end(fs); /* Resolve break list. */ +} + +/* Parse condition and 'then' block. */ +static BCPos parse_then(LexState *ls) +{ + BCPos condexit; + lj_lex_next(ls); /* Skip 'if' or 'elseif'. */ + condexit = expr_cond(ls); + lex_check(ls, TK_then); + parse_block(ls); + return condexit; +} + +/* Parse 'if' statement. */ +static void parse_if(LexState *ls, BCLine line) +{ + FuncState *fs = ls->fs; + BCPos flist; + BCPos escapelist = NO_JMP; + flist = parse_then(ls); + while (ls->tok == TK_elseif) { /* Parse multiple 'elseif' blocks. */ + jmp_append(fs, &escapelist, bcemit_jmp(fs)); + jmp_tohere(fs, flist); + flist = parse_then(ls); + } + if (ls->tok == TK_else) { /* Parse optional 'else' block. */ + jmp_append(fs, &escapelist, bcemit_jmp(fs)); + jmp_tohere(fs, flist); + lj_lex_next(ls); /* Skip 'else'. */ + parse_block(ls); + } else { + jmp_append(fs, &escapelist, flist); + } + jmp_tohere(fs, escapelist); + lex_match(ls, TK_end, TK_if, line); +} + +/* -- Parse statements ---------------------------------------------------- */ + +/* Parse a statement. Returns 1 if it must be the last one in a chunk. */ +static int parse_stmt(LexState *ls) +{ + BCLine line = ls->linenumber; + switch (ls->tok) { + case TK_if: + parse_if(ls, line); + break; + case TK_while: + parse_while(ls, line); + break; + case TK_do: + lj_lex_next(ls); + parse_block(ls); + lex_match(ls, TK_end, TK_do, line); + break; + case TK_for: + parse_for(ls, line); + break; + case TK_repeat: + parse_repeat(ls, line); + break; + case TK_function: + parse_func(ls, line); + break; + case TK_local: + lj_lex_next(ls); + parse_local(ls); + break; + case TK_return: + parse_return(ls); + return 1; /* Must be last. */ + case TK_break: + lj_lex_next(ls); + parse_break(ls); + return !LJ_52; /* Must be last in Lua 5.1. */ +#if LJ_52 + case ';': + lj_lex_next(ls); + break; +#endif + case TK_label: + parse_label(ls); + break; + case TK_goto: + if (LJ_52 || lj_lex_lookahead(ls) == TK_name) { + lj_lex_next(ls); + parse_goto(ls); + break; + } /* else: fallthrough */ + default: + parse_call_assign(ls); + break; + } + return 0; +} + +/* A chunk is a list of statements optionally separated by semicolons. */ +static void parse_chunk(LexState *ls) +{ + int islast = 0; + synlevel_begin(ls); + while (!islast && !parse_isend(ls->tok)) { + islast = parse_stmt(ls); + lex_opt(ls, ';'); + lua_assert(ls->fs->framesize >= ls->fs->freereg && + ls->fs->freereg >= ls->fs->nactvar); + ls->fs->freereg = ls->fs->nactvar; /* Free registers after each stmt. */ + } + synlevel_end(ls); +} + +/* Entry point of bytecode parser. */ +GCproto *lj_parse(LexState *ls) +{ + FuncState fs; + FuncScope bl; + GCproto *pt; + lua_State *L = ls->L; +#ifdef LUAJIT_DISABLE_DEBUGINFO + ls->chunkname = lj_str_newlit(L, "="); +#else + ls->chunkname = lj_str_newz(L, ls->chunkarg); +#endif + setstrV(L, L->top, ls->chunkname); /* Anchor chunkname string. */ + incr_top(L); + ls->level = 0; + fs_init(ls, &fs); + fs.linedefined = 0; + fs.numparams = 0; + fs.bcbase = NULL; + fs.bclim = 0; + fs.flags |= PROTO_VARARG; /* Main chunk is always a vararg func. */ + fscope_begin(&fs, &bl, 0); + bcemit_AD(&fs, BC_FUNCV, 0, 0); /* Placeholder. */ + lj_lex_next(ls); /* Read-ahead first token. */ + parse_chunk(ls); + if (ls->tok != TK_eof) + err_token(ls, TK_eof); + pt = fs_finish(ls, ls->linenumber); + L->top--; /* Drop chunkname. */ + lua_assert(fs.prev == NULL); + lua_assert(ls->fs == NULL); + lua_assert(pt->sizeuv == 0); + return pt; +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_parse.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_parse.h new file mode 100644 index 00000000000..dc4fd405d18 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_parse.h @@ -0,0 +1,18 @@ +/* +** Lua parser (source code -> bytecode). +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_PARSE_H +#define _LJ_PARSE_H + +#include "lj_obj.h" +#include "lj_lex.h" + +LJ_FUNC GCproto *lj_parse(LexState *ls); +LJ_FUNC GCstr *lj_parse_keepstr(LexState *ls, const char *str, size_t l); +#if LJ_HASFFI +LJ_FUNC void lj_parse_keepcdata(LexState *ls, TValue *tv, GCcdata *cd); +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_profile.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_profile.c new file mode 100644 index 00000000000..c7e53963b5b --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_profile.c @@ -0,0 +1,368 @@ +/* +** Low-overhead profiling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_profile_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASPROFILE + +#include "lj_buf.h" +#include "lj_frame.h" +#include "lj_debug.h" +#include "lj_dispatch.h" +#if LJ_HASJIT +#include "lj_jit.h" +#include "lj_trace.h" +#endif +#include "lj_profile.h" + +#include "luajit.h" + +#if LJ_PROFILE_SIGPROF + +#include +#include +#define profile_lock(ps) UNUSED(ps) +#define profile_unlock(ps) UNUSED(ps) + +#elif LJ_PROFILE_PTHREAD + +#include +#include +#if LJ_TARGET_PS3 +#include +#endif +#define profile_lock(ps) pthread_mutex_lock(&ps->lock) +#define profile_unlock(ps) pthread_mutex_unlock(&ps->lock) + +#elif LJ_PROFILE_WTHREAD + +#define WIN32_LEAN_AND_MEAN +#if LJ_TARGET_XBOX360 +#include +#include +#else +#include +#endif +typedef unsigned int (WINAPI *WMM_TPFUNC)(unsigned int); +#define profile_lock(ps) EnterCriticalSection(&ps->lock) +#define profile_unlock(ps) LeaveCriticalSection(&ps->lock) + +#endif + +/* Profiler state. */ +typedef struct ProfileState { + global_State *g; /* VM state that started the profiler. */ + luaJIT_profile_callback cb; /* Profiler callback. */ + void *data; /* Profiler callback data. */ + SBuf sb; /* String buffer for stack dumps. */ + int interval; /* Sample interval in milliseconds. */ + int samples; /* Number of samples for next callback. */ + int vmstate; /* VM state when profile timer triggered. */ +#if LJ_PROFILE_SIGPROF + struct sigaction oldsa; /* Previous SIGPROF state. */ +#elif LJ_PROFILE_PTHREAD + pthread_mutex_t lock; /* g->hookmask update lock. */ + pthread_t thread; /* Timer thread. */ + int abort; /* Abort timer thread. */ +#elif LJ_PROFILE_WTHREAD +#if LJ_TARGET_WINDOWS + HINSTANCE wmm; /* WinMM library handle. */ + WMM_TPFUNC wmm_tbp; /* WinMM timeBeginPeriod function. */ + WMM_TPFUNC wmm_tep; /* WinMM timeEndPeriod function. */ +#endif + CRITICAL_SECTION lock; /* g->hookmask update lock. */ + HANDLE thread; /* Timer thread. */ + int abort; /* Abort timer thread. */ +#endif +} ProfileState; + +/* Sadly, we have to use a static profiler state. +** +** The SIGPROF variant needs a static pointer to the global state, anyway. +** And it would be hard to extend for multiple threads. You can still use +** multiple VMs in multiple threads, but only profile one at a time. +*/ +static ProfileState profile_state; + +/* Default sample interval in milliseconds. */ +#define LJ_PROFILE_INTERVAL_DEFAULT 10 + +/* -- Profiler/hook interaction ------------------------------------------- */ + +#if !LJ_PROFILE_SIGPROF +void LJ_FASTCALL lj_profile_hook_enter(global_State *g) +{ + ProfileState *ps = &profile_state; + if (ps->g) { + profile_lock(ps); + hook_enter(g); + profile_unlock(ps); + } else { + hook_enter(g); + } +} + +void LJ_FASTCALL lj_profile_hook_leave(global_State *g) +{ + ProfileState *ps = &profile_state; + if (ps->g) { + profile_lock(ps); + hook_leave(g); + profile_unlock(ps); + } else { + hook_leave(g); + } +} +#endif + +/* -- Profile callbacks --------------------------------------------------- */ + +/* Callback from profile hook (HOOK_PROFILE already cleared). */ +void LJ_FASTCALL lj_profile_interpreter(lua_State *L) +{ + ProfileState *ps = &profile_state; + global_State *g = G(L); + uint8_t mask; + profile_lock(ps); + mask = (g->hookmask & ~HOOK_PROFILE); + if (!(mask & HOOK_VMEVENT)) { + int samples = ps->samples; + ps->samples = 0; + g->hookmask = HOOK_VMEVENT; + lj_dispatch_update(g); + profile_unlock(ps); + ps->cb(ps->data, L, samples, ps->vmstate); /* Invoke user callback. */ + profile_lock(ps); + mask |= (g->hookmask & HOOK_PROFILE); + } + g->hookmask = mask; + lj_dispatch_update(g); + profile_unlock(ps); +} + +/* Trigger profile hook. Asynchronous call from OS-specific profile timer. */ +static void profile_trigger(ProfileState *ps) +{ + global_State *g = ps->g; + uint8_t mask; + profile_lock(ps); + ps->samples++; /* Always increment number of samples. */ + mask = g->hookmask; + if (!(mask & (HOOK_PROFILE|HOOK_VMEVENT))) { /* Set profile hook. */ + int st = g->vmstate; + ps->vmstate = st >= 0 ? 'N' : + st == ~LJ_VMST_INTERP ? 'I' : + st == ~LJ_VMST_C ? 'C' : + st == ~LJ_VMST_GC ? 'G' : 'J'; + g->hookmask = (mask | HOOK_PROFILE); + lj_dispatch_update(g); + } + profile_unlock(ps); +} + +/* -- OS-specific profile timer handling ---------------------------------- */ + +#if LJ_PROFILE_SIGPROF + +/* SIGPROF handler. */ +static void profile_signal(int sig) +{ + UNUSED(sig); + profile_trigger(&profile_state); +} + +/* Start profiling timer. */ +static void profile_timer_start(ProfileState *ps) +{ + int interval = ps->interval; + struct itimerval tm; + struct sigaction sa; + tm.it_value.tv_sec = tm.it_interval.tv_sec = interval / 1000; + tm.it_value.tv_usec = tm.it_interval.tv_usec = (interval % 1000) * 1000; + setitimer(ITIMER_PROF, &tm, NULL); + sa.sa_flags = SA_RESTART; + sa.sa_handler = profile_signal; + sigemptyset(&sa.sa_mask); + sigaction(SIGPROF, &sa, &ps->oldsa); +} + +/* Stop profiling timer. */ +static void profile_timer_stop(ProfileState *ps) +{ + struct itimerval tm; + tm.it_value.tv_sec = tm.it_interval.tv_sec = 0; + tm.it_value.tv_usec = tm.it_interval.tv_usec = 0; + setitimer(ITIMER_PROF, &tm, NULL); + sigaction(SIGPROF, &ps->oldsa, NULL); +} + +#elif LJ_PROFILE_PTHREAD + +/* POSIX timer thread. */ +static void *profile_thread(ProfileState *ps) +{ + int interval = ps->interval; +#if !LJ_TARGET_PS3 + struct timespec ts; + ts.tv_sec = interval / 1000; + ts.tv_nsec = (interval % 1000) * 1000000; +#endif + while (1) { +#if LJ_TARGET_PS3 + sys_timer_usleep(interval * 1000); +#else + nanosleep(&ts, NULL); +#endif + if (ps->abort) break; + profile_trigger(ps); + } + return NULL; +} + +/* Start profiling timer thread. */ +static void profile_timer_start(ProfileState *ps) +{ + pthread_mutex_init(&ps->lock, 0); + ps->abort = 0; + pthread_create(&ps->thread, NULL, (void *(*)(void *))profile_thread, ps); +} + +/* Stop profiling timer thread. */ +static void profile_timer_stop(ProfileState *ps) +{ + ps->abort = 1; + pthread_join(ps->thread, NULL); + pthread_mutex_destroy(&ps->lock); +} + +#elif LJ_PROFILE_WTHREAD + +/* Windows timer thread. */ +static DWORD WINAPI profile_thread(void *psx) +{ + ProfileState *ps = (ProfileState *)psx; + int interval = ps->interval; +#if LJ_TARGET_WINDOWS + ps->wmm_tbp(interval); +#endif + while (1) { + Sleep(interval); + if (ps->abort) break; + profile_trigger(ps); + } +#if LJ_TARGET_WINDOWS + ps->wmm_tep(interval); +#endif + return 0; +} + +/* Start profiling timer thread. */ +static void profile_timer_start(ProfileState *ps) +{ +#if LJ_TARGET_WINDOWS + if (!ps->wmm) { /* Load WinMM library on-demand. */ + ps->wmm = LoadLibraryExA("winmm.dll", NULL, 0); + if (ps->wmm) { + ps->wmm_tbp = (WMM_TPFUNC)GetProcAddress(ps->wmm, "timeBeginPeriod"); + ps->wmm_tep = (WMM_TPFUNC)GetProcAddress(ps->wmm, "timeEndPeriod"); + if (!ps->wmm_tbp || !ps->wmm_tep) { + ps->wmm = NULL; + return; + } + } + } +#endif + InitializeCriticalSection(&ps->lock); + ps->abort = 0; + ps->thread = CreateThread(NULL, 0, profile_thread, ps, 0, NULL); +} + +/* Stop profiling timer thread. */ +static void profile_timer_stop(ProfileState *ps) +{ + ps->abort = 1; + WaitForSingleObject(ps->thread, INFINITE); + DeleteCriticalSection(&ps->lock); +} + +#endif + +/* -- Public profiling API ------------------------------------------------ */ + +/* Start profiling. */ +LUA_API void luaJIT_profile_start(lua_State *L, const char *mode, + luaJIT_profile_callback cb, void *data) +{ + ProfileState *ps = &profile_state; + int interval = LJ_PROFILE_INTERVAL_DEFAULT; + while (*mode) { + int m = *mode++; + switch (m) { + case 'i': + interval = 0; + while (*mode >= '0' && *mode <= '9') + interval = interval * 10 + (*mode++ - '0'); + if (interval <= 0) interval = 1; + break; +#if LJ_HASJIT + case 'l': case 'f': + L2J(L)->prof_mode = m; + lj_trace_flushall(L); + break; +#endif + default: /* Ignore unknown mode chars. */ + break; + } + } + if (ps->g) { + luaJIT_profile_stop(L); + if (ps->g) return; /* Profiler in use by another VM. */ + } + ps->g = G(L); + ps->interval = interval; + ps->cb = cb; + ps->data = data; + ps->samples = 0; + lj_buf_init(L, &ps->sb); + profile_timer_start(ps); +} + +/* Stop profiling. */ +LUA_API void luaJIT_profile_stop(lua_State *L) +{ + ProfileState *ps = &profile_state; + global_State *g = ps->g; + if (G(L) == g) { /* Only stop profiler if started by this VM. */ + profile_timer_stop(ps); + g->hookmask &= ~HOOK_PROFILE; + lj_dispatch_update(g); +#if LJ_HASJIT + G2J(g)->prof_mode = 0; + lj_trace_flushall(L); +#endif + lj_buf_free(g, &ps->sb); + setmref(ps->sb.b, NULL); + setmref(ps->sb.e, NULL); + ps->g = NULL; + } +} + +/* Return a compact stack dump. */ +LUA_API const char *luaJIT_profile_dumpstack(lua_State *L, const char *fmt, + int depth, size_t *len) +{ + ProfileState *ps = &profile_state; + SBuf *sb = &ps->sb; + setsbufL(sb, L); + lj_buf_reset(sb); + lj_debug_dumpstack(L, sb, fmt, depth); + *len = (size_t)sbuflen(sb); + return sbufB(sb); +} + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_profile.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_profile.h new file mode 100644 index 00000000000..26cb9db380c --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_profile.h @@ -0,0 +1,21 @@ +/* +** Low-overhead profiling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_PROFILE_H +#define _LJ_PROFILE_H + +#include "lj_obj.h" + +#if LJ_HASPROFILE + +LJ_FUNC void LJ_FASTCALL lj_profile_interpreter(lua_State *L); +#if !LJ_PROFILE_SIGPROF +LJ_FUNC void LJ_FASTCALL lj_profile_hook_enter(global_State *g); +LJ_FUNC void LJ_FASTCALL lj_profile_hook_leave(global_State *g); +#endif + +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_record.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_record.c new file mode 100644 index 00000000000..dc5f2d547ae --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_record.c @@ -0,0 +1,2554 @@ +/* +** Trace recorder (bytecode -> SSA IR). +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_record_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_err.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_meta.h" +#include "lj_frame.h" +#if LJ_HASFFI +#include "lj_ctype.h" +#endif +#include "lj_bc.h" +#include "lj_ff.h" +#if LJ_HASPROFILE +#include "lj_debug.h" +#endif +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_ircall.h" +#include "lj_iropt.h" +#include "lj_trace.h" +#include "lj_record.h" +#include "lj_ffrecord.h" +#include "lj_snap.h" +#include "lj_dispatch.h" +#include "lj_vm.h" + +/* Some local macros to save typing. Undef'd at the end. */ +#define IR(ref) (&J->cur.ir[(ref)]) + +/* Pass IR on to next optimization in chain (FOLD). */ +#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) + +/* Emit raw IR without passing through optimizations. */ +#define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J)) + +/* -- Sanity checks ------------------------------------------------------- */ + +#ifdef LUA_USE_ASSERT +/* Sanity check the whole IR -- sloooow. */ +static void rec_check_ir(jit_State *J) +{ + IRRef i, nins = J->cur.nins, nk = J->cur.nk; + lua_assert(nk <= REF_BIAS && nins >= REF_BIAS && nins < 65536); + for (i = nins-1; i >= nk; i--) { + IRIns *ir = IR(i); + uint32_t mode = lj_ir_mode[ir->o]; + IRRef op1 = ir->op1; + IRRef op2 = ir->op2; + switch (irm_op1(mode)) { + case IRMnone: lua_assert(op1 == 0); break; + case IRMref: lua_assert(op1 >= nk); + lua_assert(i >= REF_BIAS ? op1 < i : op1 > i); break; + case IRMlit: break; + case IRMcst: lua_assert(i < REF_BIAS); continue; + } + switch (irm_op2(mode)) { + case IRMnone: lua_assert(op2 == 0); break; + case IRMref: lua_assert(op2 >= nk); + lua_assert(i >= REF_BIAS ? op2 < i : op2 > i); break; + case IRMlit: break; + case IRMcst: lua_assert(0); break; + } + if (ir->prev) { + lua_assert(ir->prev >= nk); + lua_assert(i >= REF_BIAS ? ir->prev < i : ir->prev > i); + lua_assert(ir->o == IR_NOP || IR(ir->prev)->o == ir->o); + } + } +} + +/* Compare stack slots and frames of the recorder and the VM. */ +static void rec_check_slots(jit_State *J) +{ + BCReg s, nslots = J->baseslot + J->maxslot; + int32_t depth = 0; + cTValue *base = J->L->base - J->baseslot; + lua_assert(J->baseslot >= 1 && J->baseslot < LJ_MAX_JSLOTS); + lua_assert(J->baseslot == 1 || (J->slot[J->baseslot-1] & TREF_FRAME)); + lua_assert(nslots < LJ_MAX_JSLOTS); + for (s = 0; s < nslots; s++) { + TRef tr = J->slot[s]; + if (tr) { + cTValue *tv = &base[s]; + IRRef ref = tref_ref(tr); + IRIns *ir; + lua_assert(ref >= J->cur.nk && ref < J->cur.nins); + ir = IR(ref); + lua_assert(irt_t(ir->t) == tref_t(tr)); + if (s == 0) { + lua_assert(tref_isfunc(tr)); + } else if ((tr & TREF_FRAME)) { + GCfunc *fn = gco2func(frame_gc(tv)); + BCReg delta = (BCReg)(tv - frame_prev(tv)); + lua_assert(tref_isfunc(tr)); + if (tref_isk(tr)) lua_assert(fn == ir_kfunc(ir)); + lua_assert(s > delta ? (J->slot[s-delta] & TREF_FRAME) : (s == delta)); + depth++; + } else if ((tr & TREF_CONT)) { + lua_assert(ir_kptr(ir) == gcrefp(tv->gcr, void)); + lua_assert((J->slot[s+1] & TREF_FRAME)); + depth++; + } else { + if (tvisnumber(tv)) + lua_assert(tref_isnumber(tr)); /* Could be IRT_INT etc., too. */ + else + lua_assert(itype2irt(tv) == tref_type(tr)); + if (tref_isk(tr)) { /* Compare constants. */ + TValue tvk; + lj_ir_kvalue(J->L, &tvk, ir); + if (!(tvisnum(&tvk) && tvisnan(&tvk))) + lua_assert(lj_obj_equal(tv, &tvk)); + else + lua_assert(tvisnum(tv) && tvisnan(tv)); + } + } + } + } + lua_assert(J->framedepth == depth); +} +#endif + +/* -- Type handling and specialization ------------------------------------ */ + +/* Note: these functions return tagged references (TRef). */ + +/* Specialize a slot to a specific type. Note: slot can be negative! */ +static TRef sloadt(jit_State *J, int32_t slot, IRType t, int mode) +{ + /* Caller may set IRT_GUARD in t. */ + TRef ref = emitir_raw(IRT(IR_SLOAD, t), (int32_t)J->baseslot+slot, mode); + J->base[slot] = ref; + return ref; +} + +/* Specialize a slot to the runtime type. Note: slot can be negative! */ +static TRef sload(jit_State *J, int32_t slot) +{ + IRType t = itype2irt(&J->L->base[slot]); + TRef ref = emitir_raw(IRTG(IR_SLOAD, t), (int32_t)J->baseslot+slot, + IRSLOAD_TYPECHECK); + if (irtype_ispri(t)) ref = TREF_PRI(t); /* Canonicalize primitive refs. */ + J->base[slot] = ref; + return ref; +} + +/* Get TRef from slot. Load slot and specialize if not done already. */ +#define getslot(J, s) (J->base[(s)] ? J->base[(s)] : sload(J, (int32_t)(s))) + +/* Get TRef for current function. */ +static TRef getcurrf(jit_State *J) +{ + if (J->base[-1]) + return J->base[-1]; + lua_assert(J->baseslot == 1); + return sloadt(J, -1, IRT_FUNC, IRSLOAD_READONLY); +} + +/* Compare for raw object equality. +** Returns 0 if the objects are the same. +** Returns 1 if they are different, but the same type. +** Returns 2 for two different types. +** Comparisons between primitives always return 1 -- no caller cares about it. +*/ +int lj_record_objcmp(jit_State *J, TRef a, TRef b, cTValue *av, cTValue *bv) +{ + int diff = !lj_obj_equal(av, bv); + if (!tref_isk2(a, b)) { /* Shortcut, also handles primitives. */ + IRType ta = tref_isinteger(a) ? IRT_INT : tref_type(a); + IRType tb = tref_isinteger(b) ? IRT_INT : tref_type(b); + if (ta != tb) { + /* Widen mixed number/int comparisons to number/number comparison. */ + if (ta == IRT_INT && tb == IRT_NUM) { + a = emitir(IRTN(IR_CONV), a, IRCONV_NUM_INT); + ta = IRT_NUM; + } else if (ta == IRT_NUM && tb == IRT_INT) { + b = emitir(IRTN(IR_CONV), b, IRCONV_NUM_INT); + } else { + return 2; /* Two different types are never equal. */ + } + } + emitir(IRTG(diff ? IR_NE : IR_EQ, ta), a, b); + } + return diff; +} + +/* Constify a value. Returns 0 for non-representable object types. */ +TRef lj_record_constify(jit_State *J, cTValue *o) +{ + if (tvisgcv(o)) + return lj_ir_kgc(J, gcV(o), itype2irt(o)); + else if (tvisint(o)) + return lj_ir_kint(J, intV(o)); + else if (tvisnum(o)) + return lj_ir_knumint(J, numV(o)); + else if (tvisbool(o)) + return TREF_PRI(itype2irt(o)); + else + return 0; /* Can't represent lightuserdata (pointless). */ +} + +/* -- Record loop ops ----------------------------------------------------- */ + +/* Loop event. */ +typedef enum { + LOOPEV_LEAVE, /* Loop is left or not entered. */ + LOOPEV_ENTERLO, /* Loop is entered with a low iteration count left. */ + LOOPEV_ENTER /* Loop is entered. */ +} LoopEvent; + +/* Canonicalize slots: convert integers to numbers. */ +static void canonicalize_slots(jit_State *J) +{ + BCReg s; + if (LJ_DUALNUM) return; + for (s = J->baseslot+J->maxslot-1; s >= 1; s--) { + TRef tr = J->slot[s]; + if (tref_isinteger(tr)) { + IRIns *ir = IR(tref_ref(tr)); + if (!(ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_READONLY))) + J->slot[s] = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT); + } + } +} + +/* Stop recording. */ +void lj_record_stop(jit_State *J, TraceLink linktype, TraceNo lnk) +{ +#ifdef LUAJIT_ENABLE_TABLE_BUMP + if (J->retryrec) + lj_trace_err(J, LJ_TRERR_RETRY); +#endif + lj_trace_end(J); + J->cur.linktype = (uint8_t)linktype; + J->cur.link = (uint16_t)lnk; + /* Looping back at the same stack level? */ + if (lnk == J->cur.traceno && J->framedepth + J->retdepth == 0) { + if ((J->flags & JIT_F_OPT_LOOP)) /* Shall we try to create a loop? */ + goto nocanon; /* Do not canonicalize or we lose the narrowing. */ + if (J->cur.root) /* Otherwise ensure we always link to the root trace. */ + J->cur.link = J->cur.root; + } + canonicalize_slots(J); +nocanon: + /* Note: all loop ops must set J->pc to the following instruction! */ + lj_snap_add(J); /* Add loop snapshot. */ + J->needsnap = 0; + J->mergesnap = 1; /* In case recording continues. */ +} + +/* Search bytecode backwards for a int/num constant slot initializer. */ +static TRef find_kinit(jit_State *J, const BCIns *endpc, BCReg slot, IRType t) +{ + /* This algorithm is rather simplistic and assumes quite a bit about + ** how the bytecode is generated. It works fine for FORI initializers, + ** but it won't necessarily work in other cases (e.g. iterator arguments). + ** It doesn't do anything fancy, either (like backpropagating MOVs). + */ + const BCIns *pc, *startpc = proto_bc(J->pt); + for (pc = endpc-1; pc > startpc; pc--) { + BCIns ins = *pc; + BCOp op = bc_op(ins); + /* First try to find the last instruction that stores to this slot. */ + if (bcmode_a(op) == BCMbase && bc_a(ins) <= slot) { + return 0; /* Multiple results, e.g. from a CALL or KNIL. */ + } else if (bcmode_a(op) == BCMdst && bc_a(ins) == slot) { + if (op == BC_KSHORT || op == BC_KNUM) { /* Found const. initializer. */ + /* Now try to verify there's no forward jump across it. */ + const BCIns *kpc = pc; + for (; pc > startpc; pc--) + if (bc_op(*pc) == BC_JMP) { + const BCIns *target = pc+bc_j(*pc)+1; + if (target > kpc && target <= endpc) + return 0; /* Conditional assignment. */ + } + if (op == BC_KSHORT) { + int32_t k = (int32_t)(int16_t)bc_d(ins); + return t == IRT_INT ? lj_ir_kint(J, k) : lj_ir_knum(J, (lua_Number)k); + } else { + cTValue *tv = proto_knumtv(J->pt, bc_d(ins)); + if (t == IRT_INT) { + int32_t k = numberVint(tv); + if (tvisint(tv) || numV(tv) == (lua_Number)k) /* -0 is ok here. */ + return lj_ir_kint(J, k); + return 0; /* Type mismatch. */ + } else { + return lj_ir_knum(J, numberVnum(tv)); + } + } + } + return 0; /* Non-constant initializer. */ + } + } + return 0; /* No assignment to this slot found? */ +} + +/* Load and optionally convert a FORI argument from a slot. */ +static TRef fori_load(jit_State *J, BCReg slot, IRType t, int mode) +{ + int conv = (tvisint(&J->L->base[slot]) != (t==IRT_INT)) ? IRSLOAD_CONVERT : 0; + return sloadt(J, (int32_t)slot, + t + (((mode & IRSLOAD_TYPECHECK) || + (conv && t == IRT_INT && !(mode >> 16))) ? + IRT_GUARD : 0), + mode + conv); +} + +/* Peek before FORI to find a const initializer. Otherwise load from slot. */ +static TRef fori_arg(jit_State *J, const BCIns *fori, BCReg slot, + IRType t, int mode) +{ + TRef tr = J->base[slot]; + if (!tr) { + tr = find_kinit(J, fori, slot, t); + if (!tr) + tr = fori_load(J, slot, t, mode); + } + return tr; +} + +/* Return the direction of the FOR loop iterator. +** It's important to exactly reproduce the semantics of the interpreter. +*/ +static int rec_for_direction(cTValue *o) +{ + return (tvisint(o) ? intV(o) : (int32_t)o->u32.hi) >= 0; +} + +/* Simulate the runtime behavior of the FOR loop iterator. */ +static LoopEvent rec_for_iter(IROp *op, cTValue *o, int isforl) +{ + lua_Number stopv = numberVnum(&o[FORL_STOP]); + lua_Number idxv = numberVnum(&o[FORL_IDX]); + lua_Number stepv = numberVnum(&o[FORL_STEP]); + if (isforl) + idxv += stepv; + if (rec_for_direction(&o[FORL_STEP])) { + if (idxv <= stopv) { + *op = IR_LE; + return idxv + 2*stepv > stopv ? LOOPEV_ENTERLO : LOOPEV_ENTER; + } + *op = IR_GT; return LOOPEV_LEAVE; + } else { + if (stopv <= idxv) { + *op = IR_GE; + return idxv + 2*stepv < stopv ? LOOPEV_ENTERLO : LOOPEV_ENTER; + } + *op = IR_LT; return LOOPEV_LEAVE; + } +} + +/* Record checks for FOR loop overflow and step direction. */ +static void rec_for_check(jit_State *J, IRType t, int dir, + TRef stop, TRef step, int init) +{ + if (!tref_isk(step)) { + /* Non-constant step: need a guard for the direction. */ + TRef zero = (t == IRT_INT) ? lj_ir_kint(J, 0) : lj_ir_knum_zero(J); + emitir(IRTG(dir ? IR_GE : IR_LT, t), step, zero); + /* Add hoistable overflow checks for a narrowed FORL index. */ + if (init && t == IRT_INT) { + if (tref_isk(stop)) { + /* Constant stop: optimize check away or to a range check for step. */ + int32_t k = IR(tref_ref(stop))->i; + if (dir) { + if (k > 0) + emitir(IRTGI(IR_LE), step, lj_ir_kint(J, (int32_t)0x7fffffff-k)); + } else { + if (k < 0) + emitir(IRTGI(IR_GE), step, lj_ir_kint(J, (int32_t)0x80000000-k)); + } + } else { + /* Stop+step variable: need full overflow check. */ + TRef tr = emitir(IRTGI(IR_ADDOV), step, stop); + emitir(IRTI(IR_USE), tr, 0); /* ADDOV is weak. Avoid dead result. */ + } + } + } else if (init && t == IRT_INT && !tref_isk(stop)) { + /* Constant step: optimize overflow check to a range check for stop. */ + int32_t k = IR(tref_ref(step))->i; + k = (int32_t)(dir ? 0x7fffffff : 0x80000000) - k; + emitir(IRTGI(dir ? IR_LE : IR_GE), stop, lj_ir_kint(J, k)); + } +} + +/* Record a FORL instruction. */ +static void rec_for_loop(jit_State *J, const BCIns *fori, ScEvEntry *scev, + int init) +{ + BCReg ra = bc_a(*fori); + cTValue *tv = &J->L->base[ra]; + TRef idx = J->base[ra+FORL_IDX]; + IRType t = idx ? tref_type(idx) : + (init || LJ_DUALNUM) ? lj_opt_narrow_forl(J, tv) : IRT_NUM; + int mode = IRSLOAD_INHERIT + + ((!LJ_DUALNUM || tvisint(tv) == (t == IRT_INT)) ? IRSLOAD_READONLY : 0); + TRef stop = fori_arg(J, fori, ra+FORL_STOP, t, mode); + TRef step = fori_arg(J, fori, ra+FORL_STEP, t, mode); + int tc, dir = rec_for_direction(&tv[FORL_STEP]); + lua_assert(bc_op(*fori) == BC_FORI || bc_op(*fori) == BC_JFORI); + scev->t.irt = t; + scev->dir = dir; + scev->stop = tref_ref(stop); + scev->step = tref_ref(step); + rec_for_check(J, t, dir, stop, step, init); + scev->start = tref_ref(find_kinit(J, fori, ra+FORL_IDX, IRT_INT)); + tc = (LJ_DUALNUM && + !(scev->start && irref_isk(scev->stop) && irref_isk(scev->step) && + tvisint(&tv[FORL_IDX]) == (t == IRT_INT))) ? + IRSLOAD_TYPECHECK : 0; + if (tc) { + J->base[ra+FORL_STOP] = stop; + J->base[ra+FORL_STEP] = step; + } + if (!idx) + idx = fori_load(J, ra+FORL_IDX, t, + IRSLOAD_INHERIT + tc + (J->scev.start << 16)); + if (!init) + J->base[ra+FORL_IDX] = idx = emitir(IRT(IR_ADD, t), idx, step); + J->base[ra+FORL_EXT] = idx; + scev->idx = tref_ref(idx); + setmref(scev->pc, fori); + J->maxslot = ra+FORL_EXT+1; +} + +/* Record FORL/JFORL or FORI/JFORI. */ +static LoopEvent rec_for(jit_State *J, const BCIns *fori, int isforl) +{ + BCReg ra = bc_a(*fori); + TValue *tv = &J->L->base[ra]; + TRef *tr = &J->base[ra]; + IROp op; + LoopEvent ev; + TRef stop; + IRType t; + if (isforl) { /* Handle FORL/JFORL opcodes. */ + TRef idx = tr[FORL_IDX]; + if (mref(J->scev.pc, const BCIns) == fori && tref_ref(idx) == J->scev.idx) { + t = J->scev.t.irt; + stop = J->scev.stop; + idx = emitir(IRT(IR_ADD, t), idx, J->scev.step); + tr[FORL_EXT] = tr[FORL_IDX] = idx; + } else { + ScEvEntry scev; + rec_for_loop(J, fori, &scev, 0); + t = scev.t.irt; + stop = scev.stop; + } + } else { /* Handle FORI/JFORI opcodes. */ + BCReg i; + lj_meta_for(J->L, tv); + t = (LJ_DUALNUM || tref_isint(tr[FORL_IDX])) ? lj_opt_narrow_forl(J, tv) : + IRT_NUM; + for (i = FORL_IDX; i <= FORL_STEP; i++) { + if (!tr[i]) sload(J, ra+i); + lua_assert(tref_isnumber_str(tr[i])); + if (tref_isstr(tr[i])) + tr[i] = emitir(IRTG(IR_STRTO, IRT_NUM), tr[i], 0); + if (t == IRT_INT) { + if (!tref_isinteger(tr[i])) + tr[i] = emitir(IRTGI(IR_CONV), tr[i], IRCONV_INT_NUM|IRCONV_CHECK); + } else { + if (!tref_isnum(tr[i])) + tr[i] = emitir(IRTN(IR_CONV), tr[i], IRCONV_NUM_INT); + } + } + tr[FORL_EXT] = tr[FORL_IDX]; + stop = tr[FORL_STOP]; + rec_for_check(J, t, rec_for_direction(&tv[FORL_STEP]), + stop, tr[FORL_STEP], 1); + } + + ev = rec_for_iter(&op, tv, isforl); + if (ev == LOOPEV_LEAVE) { + J->maxslot = ra+FORL_EXT+1; + J->pc = fori+1; + } else { + J->maxslot = ra; + J->pc = fori+bc_j(*fori)+1; + } + lj_snap_add(J); + + emitir(IRTG(op, t), tr[FORL_IDX], stop); + + if (ev == LOOPEV_LEAVE) { + J->maxslot = ra; + J->pc = fori+bc_j(*fori)+1; + } else { + J->maxslot = ra+FORL_EXT+1; + J->pc = fori+1; + } + J->needsnap = 1; + return ev; +} + +/* Record ITERL/JITERL. */ +static LoopEvent rec_iterl(jit_State *J, const BCIns iterins) +{ + BCReg ra = bc_a(iterins); + lua_assert(!LJ_FR2); /* TODO_FR2: handle different frame setup. */ + if (!tref_isnil(getslot(J, ra))) { /* Looping back? */ + J->base[ra-1] = J->base[ra]; /* Copy result of ITERC to control var. */ + J->maxslot = ra-1+bc_b(J->pc[-1]); + J->pc += bc_j(iterins)+1; + return LOOPEV_ENTER; + } else { + J->maxslot = ra-3; + J->pc++; + return LOOPEV_LEAVE; + } +} + +/* Record LOOP/JLOOP. Now, that was easy. */ +static LoopEvent rec_loop(jit_State *J, BCReg ra) +{ + if (ra < J->maxslot) J->maxslot = ra; + J->pc++; + return LOOPEV_ENTER; +} + +/* Check if a loop repeatedly failed to trace because it didn't loop back. */ +static int innerloopleft(jit_State *J, const BCIns *pc) +{ + ptrdiff_t i; + for (i = 0; i < PENALTY_SLOTS; i++) + if (mref(J->penalty[i].pc, const BCIns) == pc) { + if ((J->penalty[i].reason == LJ_TRERR_LLEAVE || + J->penalty[i].reason == LJ_TRERR_LINNER) && + J->penalty[i].val >= 2*PENALTY_MIN) + return 1; + break; + } + return 0; +} + +/* Handle the case when an interpreted loop op is hit. */ +static void rec_loop_interp(jit_State *J, const BCIns *pc, LoopEvent ev) +{ + if (J->parent == 0 && J->exitno == 0) { + if (pc == J->startpc && J->framedepth + J->retdepth == 0) { + /* Same loop? */ + if (ev == LOOPEV_LEAVE) /* Must loop back to form a root trace. */ + lj_trace_err(J, LJ_TRERR_LLEAVE); + lj_record_stop(J, LJ_TRLINK_LOOP, J->cur.traceno); /* Looping trace. */ + } else if (ev != LOOPEV_LEAVE) { /* Entering inner loop? */ + /* It's usually better to abort here and wait until the inner loop + ** is traced. But if the inner loop repeatedly didn't loop back, + ** this indicates a low trip count. In this case try unrolling + ** an inner loop even in a root trace. But it's better to be a bit + ** more conservative here and only do it for very short loops. + */ + if (bc_j(*pc) != -1 && !innerloopleft(J, pc)) + lj_trace_err(J, LJ_TRERR_LINNER); /* Root trace hit an inner loop. */ + if ((ev != LOOPEV_ENTERLO && + J->loopref && J->cur.nins - J->loopref > 24) || --J->loopunroll < 0) + lj_trace_err(J, LJ_TRERR_LUNROLL); /* Limit loop unrolling. */ + J->loopref = J->cur.nins; + } + } else if (ev != LOOPEV_LEAVE) { /* Side trace enters an inner loop. */ + J->loopref = J->cur.nins; + if (--J->loopunroll < 0) + lj_trace_err(J, LJ_TRERR_LUNROLL); /* Limit loop unrolling. */ + } /* Side trace continues across a loop that's left or not entered. */ +} + +/* Handle the case when an already compiled loop op is hit. */ +static void rec_loop_jit(jit_State *J, TraceNo lnk, LoopEvent ev) +{ + if (J->parent == 0 && J->exitno == 0) { /* Root trace hit an inner loop. */ + /* Better let the inner loop spawn a side trace back here. */ + lj_trace_err(J, LJ_TRERR_LINNER); + } else if (ev != LOOPEV_LEAVE) { /* Side trace enters a compiled loop. */ + J->instunroll = 0; /* Cannot continue across a compiled loop op. */ + if (J->pc == J->startpc && J->framedepth + J->retdepth == 0) + lj_record_stop(J, LJ_TRLINK_LOOP, J->cur.traceno); /* Form extra loop. */ + else + lj_record_stop(J, LJ_TRLINK_ROOT, lnk); /* Link to the loop. */ + } /* Side trace continues across a loop that's left or not entered. */ +} + +/* -- Record profiler hook checks ----------------------------------------- */ + +#if LJ_HASPROFILE + +/* Need to insert profiler hook check? */ +static int rec_profile_need(jit_State *J, GCproto *pt, const BCIns *pc) +{ + GCproto *ppt; + lua_assert(J->prof_mode == 'f' || J->prof_mode == 'l'); + if (!pt) + return 0; + ppt = J->prev_pt; + J->prev_pt = pt; + if (pt != ppt && ppt) { + J->prev_line = -1; + return 1; + } + if (J->prof_mode == 'l') { + BCLine line = lj_debug_line(pt, proto_bcpos(pt, pc)); + BCLine pline = J->prev_line; + J->prev_line = line; + if (pline != line) + return 1; + } + return 0; +} + +static void rec_profile_ins(jit_State *J, const BCIns *pc) +{ + if (J->prof_mode && rec_profile_need(J, J->pt, pc)) { + emitir(IRTG(IR_PROF, IRT_NIL), 0, 0); + lj_snap_add(J); + } +} + +static void rec_profile_ret(jit_State *J) +{ + if (J->prof_mode == 'f') { + emitir(IRTG(IR_PROF, IRT_NIL), 0, 0); + J->prev_pt = NULL; + lj_snap_add(J); + } +} + +#endif + +/* -- Record calls and returns -------------------------------------------- */ + +/* Specialize to the runtime value of the called function or its prototype. */ +static TRef rec_call_specialize(jit_State *J, GCfunc *fn, TRef tr) +{ + TRef kfunc; + if (isluafunc(fn)) { + GCproto *pt = funcproto(fn); + /* Too many closures created? Probably not a monomorphic function. */ + if (pt->flags >= PROTO_CLC_POLY) { /* Specialize to prototype instead. */ + TRef trpt = emitir(IRT(IR_FLOAD, IRT_P32), tr, IRFL_FUNC_PC); + emitir(IRTG(IR_EQ, IRT_P32), trpt, lj_ir_kptr(J, proto_bc(pt))); + (void)lj_ir_kgc(J, obj2gco(pt), IRT_PROTO); /* Prevent GC of proto. */ + return tr; + } + } else { + /* Don't specialize to non-monomorphic builtins. */ + switch (fn->c.ffid) { + case FF_coroutine_wrap_aux: + case FF_string_gmatch_aux: + /* NYI: io_file_iter doesn't have an ffid, yet. */ + { /* Specialize to the ffid. */ + TRef trid = emitir(IRT(IR_FLOAD, IRT_U8), tr, IRFL_FUNC_FFID); + emitir(IRTG(IR_EQ, IRT_INT), trid, lj_ir_kint(J, fn->c.ffid)); + } + return tr; + default: + /* NYI: don't specialize to non-monomorphic C functions. */ + break; + } + } + /* Otherwise specialize to the function (closure) value itself. */ + kfunc = lj_ir_kfunc(J, fn); + emitir(IRTG(IR_EQ, IRT_FUNC), tr, kfunc); + return kfunc; +} + +/* Record call setup. */ +static void rec_call_setup(jit_State *J, BCReg func, ptrdiff_t nargs) +{ + RecordIndex ix; + TValue *functv = &J->L->base[func]; + TRef *fbase = &J->base[func]; + ptrdiff_t i; + lua_assert(!LJ_FR2); /* TODO_FR2: handle different frame setup. */ + for (i = 0; i <= nargs; i++) + (void)getslot(J, func+i); /* Ensure func and all args have a reference. */ + if (!tref_isfunc(fbase[0])) { /* Resolve __call metamethod. */ + ix.tab = fbase[0]; + copyTV(J->L, &ix.tabv, functv); + if (!lj_record_mm_lookup(J, &ix, MM_call) || !tref_isfunc(ix.mobj)) + lj_trace_err(J, LJ_TRERR_NOMM); + for (i = ++nargs; i > 0; i--) /* Shift arguments up. */ + fbase[i] = fbase[i-1]; + fbase[0] = ix.mobj; /* Replace function. */ + functv = &ix.mobjv; + } + fbase[0] = TREF_FRAME | rec_call_specialize(J, funcV(functv), fbase[0]); + J->maxslot = (BCReg)nargs; +} + +/* Record call. */ +void lj_record_call(jit_State *J, BCReg func, ptrdiff_t nargs) +{ + rec_call_setup(J, func, nargs); + /* Bump frame. */ + J->framedepth++; + J->base += func+1; + J->baseslot += func+1; +} + +/* Record tail call. */ +void lj_record_tailcall(jit_State *J, BCReg func, ptrdiff_t nargs) +{ + rec_call_setup(J, func, nargs); + if (frame_isvarg(J->L->base - 1)) { + BCReg cbase = (BCReg)frame_delta(J->L->base - 1); + if (--J->framedepth < 0) + lj_trace_err(J, LJ_TRERR_NYIRETL); + J->baseslot -= (BCReg)cbase; + J->base -= cbase; + func += cbase; + } + /* Move func + args down. */ + memmove(&J->base[-1], &J->base[func], sizeof(TRef)*(J->maxslot+1)); + /* Note: the new TREF_FRAME is now at J->base[-1] (even for slot #0). */ + /* Tailcalls can form a loop, so count towards the loop unroll limit. */ + if (++J->tailcalled > J->loopunroll) + lj_trace_err(J, LJ_TRERR_LUNROLL); +} + +/* Check unroll limits for down-recursion. */ +static int check_downrec_unroll(jit_State *J, GCproto *pt) +{ + IRRef ptref; + for (ptref = J->chain[IR_KGC]; ptref; ptref = IR(ptref)->prev) + if (ir_kgc(IR(ptref)) == obj2gco(pt)) { + int count = 0; + IRRef ref; + for (ref = J->chain[IR_RETF]; ref; ref = IR(ref)->prev) + if (IR(ref)->op1 == ptref) + count++; + if (count) { + if (J->pc == J->startpc) { + if (count + J->tailcalled > J->param[JIT_P_recunroll]) + return 1; + } else { + lj_trace_err(J, LJ_TRERR_DOWNREC); + } + } + } + return 0; +} + +static TRef rec_cat(jit_State *J, BCReg baseslot, BCReg topslot); + +/* Record return. */ +void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults) +{ + TValue *frame = J->L->base - 1; + ptrdiff_t i; + for (i = 0; i < gotresults; i++) + (void)getslot(J, rbase+i); /* Ensure all results have a reference. */ + while (frame_ispcall(frame)) { /* Immediately resolve pcall() returns. */ + BCReg cbase = (BCReg)frame_delta(frame); + if (--J->framedepth < 0) + lj_trace_err(J, LJ_TRERR_NYIRETL); + lua_assert(J->baseslot > 1); + gotresults++; + rbase += cbase; + J->baseslot -= (BCReg)cbase; + J->base -= cbase; + J->base[--rbase] = TREF_TRUE; /* Prepend true to results. */ + frame = frame_prevd(frame); + } + /* Return to lower frame via interpreter for unhandled cases. */ + if (J->framedepth == 0 && J->pt && bc_isret(bc_op(*J->pc)) && + (!frame_islua(frame) || + (J->parent == 0 && J->exitno == 0 && + !bc_isret(bc_op(J->cur.startins))))) { + /* NYI: specialize to frame type and return directly, not via RET*. */ + for (i = 0; i < (ptrdiff_t)rbase; i++) + J->base[i] = 0; /* Purge dead slots. */ + J->maxslot = rbase + (BCReg)gotresults; + lj_record_stop(J, LJ_TRLINK_RETURN, 0); /* Return to interpreter. */ + return; + } + if (frame_isvarg(frame)) { + BCReg cbase = (BCReg)frame_delta(frame); + if (--J->framedepth < 0) /* NYI: return of vararg func to lower frame. */ + lj_trace_err(J, LJ_TRERR_NYIRETL); + lua_assert(J->baseslot > 1); + rbase += cbase; + J->baseslot -= (BCReg)cbase; + J->base -= cbase; + frame = frame_prevd(frame); + } + if (frame_islua(frame)) { /* Return to Lua frame. */ + BCIns callins = *(frame_pc(frame)-1); + ptrdiff_t nresults = bc_b(callins) ? (ptrdiff_t)bc_b(callins)-1 :gotresults; + BCReg cbase = bc_a(callins); + GCproto *pt = funcproto(frame_func(frame - (cbase+1-LJ_FR2))); + lua_assert(!LJ_FR2); /* TODO_FR2: handle different frame teardown. */ + if ((pt->flags & PROTO_NOJIT)) + lj_trace_err(J, LJ_TRERR_CJITOFF); + if (J->framedepth == 0 && J->pt && frame == J->L->base - 1) { + if (check_downrec_unroll(J, pt)) { + J->maxslot = (BCReg)(rbase + gotresults); + lj_snap_purge(J); + lj_record_stop(J, LJ_TRLINK_DOWNREC, J->cur.traceno); /* Down-rec. */ + return; + } + lj_snap_add(J); + } + for (i = 0; i < nresults; i++) /* Adjust results. */ + J->base[i-1] = i < gotresults ? J->base[rbase+i] : TREF_NIL; + J->maxslot = cbase+(BCReg)nresults; + if (J->framedepth > 0) { /* Return to a frame that is part of the trace. */ + J->framedepth--; + lua_assert(J->baseslot > cbase+1); + J->baseslot -= cbase+1; + J->base -= cbase+1; + } else if (J->parent == 0 && J->exitno == 0 && + !bc_isret(bc_op(J->cur.startins))) { + /* Return to lower frame would leave the loop in a root trace. */ + lj_trace_err(J, LJ_TRERR_LLEAVE); + } else if (J->needsnap) { /* Tailcalled to ff with side-effects. */ + lj_trace_err(J, LJ_TRERR_NYIRETL); /* No way to insert snapshot here. */ + } else { /* Return to lower frame. Guard for the target we return to. */ + TRef trpt = lj_ir_kgc(J, obj2gco(pt), IRT_PROTO); + TRef trpc = lj_ir_kptr(J, (void *)frame_pc(frame)); + emitir(IRTG(IR_RETF, IRT_P32), trpt, trpc); + J->retdepth++; + J->needsnap = 1; + lua_assert(J->baseslot == 1); + /* Shift result slots up and clear the slots of the new frame below. */ + memmove(J->base + cbase, J->base-1, sizeof(TRef)*nresults); + memset(J->base-1, 0, sizeof(TRef)*(cbase+1)); + } + } else if (frame_iscont(frame)) { /* Return to continuation frame. */ + ASMFunction cont = frame_contf(frame); + BCReg cbase = (BCReg)frame_delta(frame); + if ((J->framedepth -= 2) < 0) + lj_trace_err(J, LJ_TRERR_NYIRETL); + J->baseslot -= (BCReg)cbase; + J->base -= cbase; + J->maxslot = cbase-2; + if (cont == lj_cont_ra) { + /* Copy result to destination slot. */ + BCReg dst = bc_a(*(frame_contpc(frame)-1)); + J->base[dst] = gotresults ? J->base[cbase+rbase] : TREF_NIL; + if (dst >= J->maxslot) J->maxslot = dst+1; + } else if (cont == lj_cont_nop) { + /* Nothing to do here. */ + } else if (cont == lj_cont_cat) { + BCReg bslot = bc_b(*(frame_contpc(frame)-1)); + TRef tr = gotresults ? J->base[cbase+rbase] : TREF_NIL; + if (bslot != cbase-2) { /* Concatenate the remainder. */ + TValue *b = J->L->base, save; /* Simulate lower frame and result. */ + J->base[cbase-2] = tr; + copyTV(J->L, &save, b-2); + if (gotresults) copyTV(J->L, b-2, b+rbase); else setnilV(b-2); + J->L->base = b - cbase; + tr = rec_cat(J, bslot, cbase-2); + b = J->L->base + cbase; /* Undo. */ + J->L->base = b; + copyTV(J->L, b-2, &save); + } + if (tr) { /* Store final result. */ + BCReg dst = bc_a(*(frame_contpc(frame)-1)); + J->base[dst] = tr; + if (dst >= J->maxslot) J->maxslot = dst+1; + } /* Otherwise continue with another __concat call. */ + } else { + /* Result type already specialized. */ + lua_assert(cont == lj_cont_condf || cont == lj_cont_condt); + } + } else { + lj_trace_err(J, LJ_TRERR_NYIRETL); /* NYI: handle return to C frame. */ + } + lua_assert(J->baseslot >= 1); +} + +/* -- Metamethod handling ------------------------------------------------- */ + +/* Prepare to record call to metamethod. */ +static BCReg rec_mm_prep(jit_State *J, ASMFunction cont) +{ + BCReg s, top = cont == lj_cont_cat ? J->maxslot : curr_proto(J->L)->framesize; +#if LJ_64 + TRef trcont = lj_ir_kptr(J, (void *)((int64_t)cont-(int64_t)lj_vm_asm_begin)); +#else + TRef trcont = lj_ir_kptr(J, (void *)cont); +#endif + J->base[top] = trcont | TREF_CONT; + J->framedepth++; + for (s = J->maxslot; s < top; s++) + J->base[s] = 0; /* Clear frame gap to avoid resurrecting previous refs. */ + return top+1; +} + +/* Record metamethod lookup. */ +int lj_record_mm_lookup(jit_State *J, RecordIndex *ix, MMS mm) +{ + RecordIndex mix; + GCtab *mt; + if (tref_istab(ix->tab)) { + mt = tabref(tabV(&ix->tabv)->metatable); + mix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_TAB_META); + } else if (tref_isudata(ix->tab)) { + int udtype = udataV(&ix->tabv)->udtype; + mt = tabref(udataV(&ix->tabv)->metatable); + /* The metatables of special userdata objects are treated as immutable. */ + if (udtype != UDTYPE_USERDATA) { + cTValue *mo; + if (LJ_HASFFI && udtype == UDTYPE_FFI_CLIB) { + /* Specialize to the C library namespace object. */ + emitir(IRTG(IR_EQ, IRT_P32), ix->tab, lj_ir_kptr(J, udataV(&ix->tabv))); + } else { + /* Specialize to the type of userdata. */ + TRef tr = emitir(IRT(IR_FLOAD, IRT_U8), ix->tab, IRFL_UDATA_UDTYPE); + emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, udtype)); + } + immutable_mt: + mo = lj_tab_getstr(mt, mmname_str(J2G(J), mm)); + if (!mo || tvisnil(mo)) + return 0; /* No metamethod. */ + /* Treat metamethod or index table as immutable, too. */ + if (!(tvisfunc(mo) || tvistab(mo))) + lj_trace_err(J, LJ_TRERR_BADTYPE); + copyTV(J->L, &ix->mobjv, mo); + ix->mobj = lj_ir_kgc(J, gcV(mo), tvisfunc(mo) ? IRT_FUNC : IRT_TAB); + ix->mtv = mt; + ix->mt = TREF_NIL; /* Dummy value for comparison semantics. */ + return 1; /* Got metamethod or index table. */ + } + mix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_UDATA_META); + } else { + /* Specialize to base metatable. Must flush mcode in lua_setmetatable(). */ + mt = tabref(basemt_obj(J2G(J), &ix->tabv)); + if (mt == NULL) { + ix->mt = TREF_NIL; + return 0; /* No metamethod. */ + } + /* The cdata metatable is treated as immutable. */ + if (LJ_HASFFI && tref_iscdata(ix->tab)) goto immutable_mt; + ix->mt = mix.tab = lj_ir_ktab(J, mt); + goto nocheck; + } + ix->mt = mt ? mix.tab : TREF_NIL; + emitir(IRTG(mt ? IR_NE : IR_EQ, IRT_TAB), mix.tab, lj_ir_knull(J, IRT_TAB)); +nocheck: + if (mt) { + GCstr *mmstr = mmname_str(J2G(J), mm); + cTValue *mo = lj_tab_getstr(mt, mmstr); + if (mo && !tvisnil(mo)) + copyTV(J->L, &ix->mobjv, mo); + ix->mtv = mt; + settabV(J->L, &mix.tabv, mt); + setstrV(J->L, &mix.keyv, mmstr); + mix.key = lj_ir_kstr(J, mmstr); + mix.val = 0; + mix.idxchain = 0; + ix->mobj = lj_record_idx(J, &mix); + return !tref_isnil(ix->mobj); /* 1 if metamethod found, 0 if not. */ + } + return 0; /* No metamethod. */ +} + +/* Record call to arithmetic metamethod. */ +static TRef rec_mm_arith(jit_State *J, RecordIndex *ix, MMS mm) +{ + /* Set up metamethod call first to save ix->tab and ix->tabv. */ + BCReg func = rec_mm_prep(J, mm == MM_concat ? lj_cont_cat : lj_cont_ra); + TRef *base = J->base + func; + TValue *basev = J->L->base + func; + base[1] = ix->tab; base[2] = ix->key; + copyTV(J->L, basev+1, &ix->tabv); + copyTV(J->L, basev+2, &ix->keyv); + if (!lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */ + if (mm != MM_unm) { + ix->tab = ix->key; + copyTV(J->L, &ix->tabv, &ix->keyv); + if (lj_record_mm_lookup(J, ix, mm)) /* Lookup mm on 2nd operand. */ + goto ok; + } + lj_trace_err(J, LJ_TRERR_NOMM); + } +ok: + lua_assert(!LJ_FR2); /* TODO_FR2: handle different frame setup. */ + base[0] = ix->mobj; + copyTV(J->L, basev+0, &ix->mobjv); + lj_record_call(J, func, 2); + return 0; /* No result yet. */ +} + +/* Record call to __len metamethod. */ +static TRef rec_mm_len(jit_State *J, TRef tr, TValue *tv) +{ + RecordIndex ix; + ix.tab = tr; + copyTV(J->L, &ix.tabv, tv); + if (lj_record_mm_lookup(J, &ix, MM_len)) { + BCReg func = rec_mm_prep(J, lj_cont_ra); + TRef *base = J->base + func; + TValue *basev = J->L->base + func; + lua_assert(!LJ_FR2); /* TODO_FR2: handle different frame setup. */ + base[0] = ix.mobj; copyTV(J->L, basev+0, &ix.mobjv); + base[1] = tr; copyTV(J->L, basev+1, tv); +#if LJ_52 + base[2] = tr; copyTV(J->L, basev+2, tv); +#else + base[2] = TREF_NIL; setnilV(basev+2); +#endif + lj_record_call(J, func, 2); + } else { + if (LJ_52 && tref_istab(tr)) + return lj_ir_call(J, IRCALL_lj_tab_len, tr); + lj_trace_err(J, LJ_TRERR_NOMM); + } + return 0; /* No result yet. */ +} + +/* Call a comparison metamethod. */ +static void rec_mm_callcomp(jit_State *J, RecordIndex *ix, int op) +{ + BCReg func = rec_mm_prep(J, (op&1) ? lj_cont_condf : lj_cont_condt); + TRef *base = J->base + func; + TValue *tv = J->L->base + func; + lua_assert(!LJ_FR2); /* TODO_FR2: handle different frame setup. */ + base[0] = ix->mobj; base[1] = ix->val; base[2] = ix->key; + copyTV(J->L, tv+0, &ix->mobjv); + copyTV(J->L, tv+1, &ix->valv); + copyTV(J->L, tv+2, &ix->keyv); + lj_record_call(J, func, 2); +} + +/* Record call to equality comparison metamethod (for tab and udata only). */ +static void rec_mm_equal(jit_State *J, RecordIndex *ix, int op) +{ + ix->tab = ix->val; + copyTV(J->L, &ix->tabv, &ix->valv); + if (lj_record_mm_lookup(J, ix, MM_eq)) { /* Lookup mm on 1st operand. */ + cTValue *bv; + TRef mo1 = ix->mobj; + TValue mo1v; + copyTV(J->L, &mo1v, &ix->mobjv); + /* Avoid the 2nd lookup and the objcmp if the metatables are equal. */ + bv = &ix->keyv; + if (tvistab(bv) && tabref(tabV(bv)->metatable) == ix->mtv) { + TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_TAB_META); + emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt); + } else if (tvisudata(bv) && tabref(udataV(bv)->metatable) == ix->mtv) { + TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_UDATA_META); + emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt); + } else { /* Lookup metamethod on 2nd operand and compare both. */ + ix->tab = ix->key; + copyTV(J->L, &ix->tabv, bv); + if (!lj_record_mm_lookup(J, ix, MM_eq) || + lj_record_objcmp(J, mo1, ix->mobj, &mo1v, &ix->mobjv)) + return; + } + rec_mm_callcomp(J, ix, op); + } +} + +/* Record call to ordered comparison metamethods (for arbitrary objects). */ +static void rec_mm_comp(jit_State *J, RecordIndex *ix, int op) +{ + ix->tab = ix->val; + copyTV(J->L, &ix->tabv, &ix->valv); + while (1) { + MMS mm = (op & 2) ? MM_le : MM_lt; /* Try __le + __lt or only __lt. */ +#if LJ_52 + if (!lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */ + ix->tab = ix->key; + copyTV(J->L, &ix->tabv, &ix->keyv); + if (!lj_record_mm_lookup(J, ix, mm)) /* Lookup mm on 2nd operand. */ + goto nomatch; + } + rec_mm_callcomp(J, ix, op); + return; +#else + if (lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */ + cTValue *bv; + TRef mo1 = ix->mobj; + TValue mo1v; + copyTV(J->L, &mo1v, &ix->mobjv); + /* Avoid the 2nd lookup and the objcmp if the metatables are equal. */ + bv = &ix->keyv; + if (tvistab(bv) && tabref(tabV(bv)->metatable) == ix->mtv) { + TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_TAB_META); + emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt); + } else if (tvisudata(bv) && tabref(udataV(bv)->metatable) == ix->mtv) { + TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_UDATA_META); + emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt); + } else { /* Lookup metamethod on 2nd operand and compare both. */ + ix->tab = ix->key; + copyTV(J->L, &ix->tabv, bv); + if (!lj_record_mm_lookup(J, ix, mm) || + lj_record_objcmp(J, mo1, ix->mobj, &mo1v, &ix->mobjv)) + goto nomatch; + } + rec_mm_callcomp(J, ix, op); + return; + } +#endif + nomatch: + /* Lookup failed. Retry with __lt and swapped operands. */ + if (!(op & 2)) break; /* Already at __lt. Interpreter will throw. */ + ix->tab = ix->key; ix->key = ix->val; ix->val = ix->tab; + copyTV(J->L, &ix->tabv, &ix->keyv); + copyTV(J->L, &ix->keyv, &ix->valv); + copyTV(J->L, &ix->valv, &ix->tabv); + op ^= 3; + } +} + +#if LJ_HASFFI +/* Setup call to cdata comparison metamethod. */ +static void rec_mm_comp_cdata(jit_State *J, RecordIndex *ix, int op, MMS mm) +{ + lj_snap_add(J); + if (tref_iscdata(ix->val)) { + ix->tab = ix->val; + copyTV(J->L, &ix->tabv, &ix->valv); + } else { + lua_assert(tref_iscdata(ix->key)); + ix->tab = ix->key; + copyTV(J->L, &ix->tabv, &ix->keyv); + } + lj_record_mm_lookup(J, ix, mm); + rec_mm_callcomp(J, ix, op); +} +#endif + +/* -- Indexed access ------------------------------------------------------ */ + +#ifdef LUAJIT_ENABLE_TABLE_BUMP +/* Bump table allocations in bytecode when they grow during recording. */ +static void rec_idx_bump(jit_State *J, RecordIndex *ix) +{ + RBCHashEntry *rbc = &J->rbchash[(ix->tab & (RBCHASH_SLOTS-1))]; + if (tref_ref(ix->tab) == rbc->ref) { + const BCIns *pc = mref(rbc->pc, const BCIns); + GCtab *tb = tabV(&ix->tabv); + uint32_t nhbits; + IRIns *ir; + if (!tvisnil(&ix->keyv)) + (void)lj_tab_set(J->L, tb, &ix->keyv); /* Grow table right now. */ + nhbits = tb->hmask > 0 ? lj_fls(tb->hmask)+1 : 0; + ir = IR(tref_ref(ix->tab)); + if (ir->o == IR_TNEW) { + uint32_t ah = bc_d(*pc); + uint32_t asize = ah & 0x7ff, hbits = ah >> 11; + if (nhbits > hbits) hbits = nhbits; + if (tb->asize > asize) { + asize = tb->asize <= 0x7ff ? tb->asize : 0x7ff; + } + if ((asize | (hbits<<11)) != ah) { /* Has the size changed? */ + /* Patch bytecode, but continue recording (for more patching). */ + setbc_d(pc, (asize | (hbits<<11))); + /* Patching TNEW operands is only safe if the trace is aborted. */ + ir->op1 = asize; ir->op2 = hbits; + J->retryrec = 1; /* Abort the trace at the end of recording. */ + } + } else if (ir->o == IR_TDUP) { + GCtab *tpl = gco2tab(proto_kgc(&gcref(rbc->pt)->pt, ~(ptrdiff_t)bc_d(*pc))); + /* Grow template table, but preserve keys with nil values. */ + if ((tb->asize > tpl->asize && (1u << nhbits)-1 == tpl->hmask) || + (tb->asize == tpl->asize && (1u << nhbits)-1 > tpl->hmask)) { + Node *node = noderef(tpl->node); + uint32_t i, hmask = tpl->hmask, asize; + TValue *array; + for (i = 0; i <= hmask; i++) { + if (!tvisnil(&node[i].key) && tvisnil(&node[i].val)) + settabV(J->L, &node[i].val, tpl); + } + if (!tvisnil(&ix->keyv) && tref_isk(ix->key)) { + TValue *o = lj_tab_set(J->L, tpl, &ix->keyv); + if (tvisnil(o)) settabV(J->L, o, tpl); + } + lj_tab_resize(J->L, tpl, tb->asize, nhbits); + node = noderef(tpl->node); + hmask = tpl->hmask; + for (i = 0; i <= hmask; i++) { + /* This is safe, since template tables only hold immutable values. */ + if (tvistab(&node[i].val)) + setnilV(&node[i].val); + } + /* The shape of the table may have changed. Clean up array part, too. */ + asize = tpl->asize; + array = tvref(tpl->array); + for (i = 0; i < asize; i++) { + if (tvistab(&array[i])) + setnilV(&array[i]); + } + J->retryrec = 1; /* Abort the trace at the end of recording. */ + } + } + } +} +#endif + +/* Record bounds-check. */ +static void rec_idx_abc(jit_State *J, TRef asizeref, TRef ikey, uint32_t asize) +{ + /* Try to emit invariant bounds checks. */ + if ((J->flags & (JIT_F_OPT_LOOP|JIT_F_OPT_ABC)) == + (JIT_F_OPT_LOOP|JIT_F_OPT_ABC)) { + IRRef ref = tref_ref(ikey); + IRIns *ir = IR(ref); + int32_t ofs = 0; + IRRef ofsref = 0; + /* Handle constant offsets. */ + if (ir->o == IR_ADD && irref_isk(ir->op2)) { + ofsref = ir->op2; + ofs = IR(ofsref)->i; + ref = ir->op1; + ir = IR(ref); + } + /* Got scalar evolution analysis results for this reference? */ + if (ref == J->scev.idx) { + int32_t stop; + lua_assert(irt_isint(J->scev.t) && ir->o == IR_SLOAD); + stop = numberVint(&(J->L->base - J->baseslot)[ir->op1 + FORL_STOP]); + /* Runtime value for stop of loop is within bounds? */ + if ((uint64_t)stop + ofs < (uint64_t)asize) { + /* Emit invariant bounds check for stop. */ + emitir(IRTG(IR_ABC, IRT_P32), asizeref, ofs == 0 ? J->scev.stop : + emitir(IRTI(IR_ADD), J->scev.stop, ofsref)); + /* Emit invariant bounds check for start, if not const or negative. */ + if (!(J->scev.dir && J->scev.start && + (int64_t)IR(J->scev.start)->i + ofs >= 0)) + emitir(IRTG(IR_ABC, IRT_P32), asizeref, ikey); + return; + } + } + } + emitir(IRTGI(IR_ABC), asizeref, ikey); /* Emit regular bounds check. */ +} + +/* Record indexed key lookup. */ +static TRef rec_idx_key(jit_State *J, RecordIndex *ix, IRRef *rbref) +{ + TRef key; + GCtab *t = tabV(&ix->tabv); + ix->oldv = lj_tab_get(J->L, t, &ix->keyv); /* Lookup previous value. */ + *rbref = 0; + + /* Integer keys are looked up in the array part first. */ + key = ix->key; + if (tref_isnumber(key)) { + int32_t k = numberVint(&ix->keyv); + if (!tvisint(&ix->keyv) && numV(&ix->keyv) != (lua_Number)k) + k = LJ_MAX_ASIZE; + if ((MSize)k < LJ_MAX_ASIZE) { /* Potential array key? */ + TRef ikey = lj_opt_narrow_index(J, key); + TRef asizeref = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_ASIZE); + if ((MSize)k < t->asize) { /* Currently an array key? */ + TRef arrayref; + rec_idx_abc(J, asizeref, ikey, t->asize); + arrayref = emitir(IRT(IR_FLOAD, IRT_P32), ix->tab, IRFL_TAB_ARRAY); + return emitir(IRT(IR_AREF, IRT_P32), arrayref, ikey); + } else { /* Currently not in array (may be an array extension)? */ + emitir(IRTGI(IR_ULE), asizeref, ikey); /* Inv. bounds check. */ + if (k == 0 && tref_isk(key)) + key = lj_ir_knum_zero(J); /* Canonicalize 0 or +-0.0 to +0.0. */ + /* And continue with the hash lookup. */ + } + } else if (!tref_isk(key)) { + /* We can rule out const numbers which failed the integerness test + ** above. But all other numbers are potential array keys. + */ + if (t->asize == 0) { /* True sparse tables have an empty array part. */ + /* Guard that the array part stays empty. */ + TRef tmp = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_ASIZE); + emitir(IRTGI(IR_EQ), tmp, lj_ir_kint(J, 0)); + } else { + lj_trace_err(J, LJ_TRERR_NYITMIX); + } + } + } + + /* Otherwise the key is located in the hash part. */ + if (t->hmask == 0) { /* Shortcut for empty hash part. */ + /* Guard that the hash part stays empty. */ + TRef tmp = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_HMASK); + emitir(IRTGI(IR_EQ), tmp, lj_ir_kint(J, 0)); + return lj_ir_kkptr(J, niltvg(J2G(J))); + } + if (tref_isinteger(key)) /* Hash keys are based on numbers, not ints. */ + key = emitir(IRTN(IR_CONV), key, IRCONV_NUM_INT); + if (tref_isk(key)) { + /* Optimize lookup of constant hash keys. */ + MSize hslot = (MSize)((char *)ix->oldv - (char *)&noderef(t->node)[0].val); + if (t->hmask > 0 && hslot <= t->hmask*(MSize)sizeof(Node) && + hslot <= 65535*(MSize)sizeof(Node)) { + TRef node, kslot, hm; + *rbref = J->cur.nins; /* Mark possible rollback point. */ + hm = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_HMASK); + emitir(IRTGI(IR_EQ), hm, lj_ir_kint(J, (int32_t)t->hmask)); + node = emitir(IRT(IR_FLOAD, IRT_P32), ix->tab, IRFL_TAB_NODE); + kslot = lj_ir_kslot(J, key, hslot / sizeof(Node)); + return emitir(IRTG(IR_HREFK, IRT_P32), node, kslot); + } + } + /* Fall back to a regular hash lookup. */ + return emitir(IRT(IR_HREF, IRT_P32), ix->tab, key); +} + +/* Determine whether a key is NOT one of the fast metamethod names. */ +static int nommstr(jit_State *J, TRef key) +{ + if (tref_isstr(key)) { + if (tref_isk(key)) { + GCstr *str = ir_kstr(IR(tref_ref(key))); + uint32_t mm; + for (mm = 0; mm <= MM_FAST; mm++) + if (mmname_str(J2G(J), mm) == str) + return 0; /* MUST be one the fast metamethod names. */ + } else { + return 0; /* Variable string key MAY be a metamethod name. */ + } + } + return 1; /* CANNOT be a metamethod name. */ +} + +/* Record indexed load/store. */ +TRef lj_record_idx(jit_State *J, RecordIndex *ix) +{ + TRef xref; + IROp xrefop, loadop; + IRRef rbref; + cTValue *oldv; + + while (!tref_istab(ix->tab)) { /* Handle non-table lookup. */ + /* Never call raw lj_record_idx() on non-table. */ + lua_assert(ix->idxchain != 0); + if (!lj_record_mm_lookup(J, ix, ix->val ? MM_newindex : MM_index)) + lj_trace_err(J, LJ_TRERR_NOMM); + handlemm: + if (tref_isfunc(ix->mobj)) { /* Handle metamethod call. */ + BCReg func = rec_mm_prep(J, ix->val ? lj_cont_nop : lj_cont_ra); + TRef *base = J->base + func; + TValue *tv = J->L->base + func; + lua_assert(!LJ_FR2); /* TODO_FR2: handle different frame setup. */ + base[0] = ix->mobj; base[1] = ix->tab; base[2] = ix->key; + setfuncV(J->L, tv+0, funcV(&ix->mobjv)); + copyTV(J->L, tv+1, &ix->tabv); + copyTV(J->L, tv+2, &ix->keyv); + if (ix->val) { + base[3] = ix->val; + copyTV(J->L, tv+3, &ix->valv); + lj_record_call(J, func, 3); /* mobj(tab, key, val) */ + return 0; + } else { + lj_record_call(J, func, 2); /* res = mobj(tab, key) */ + return 0; /* No result yet. */ + } + } + /* Otherwise retry lookup with metaobject. */ + ix->tab = ix->mobj; + copyTV(J->L, &ix->tabv, &ix->mobjv); + if (--ix->idxchain == 0) + lj_trace_err(J, LJ_TRERR_IDXLOOP); + } + + /* First catch nil and NaN keys for tables. */ + if (tvisnil(&ix->keyv) || (tvisnum(&ix->keyv) && tvisnan(&ix->keyv))) { + if (ix->val) /* Better fail early. */ + lj_trace_err(J, LJ_TRERR_STORENN); + if (tref_isk(ix->key)) { + if (ix->idxchain && lj_record_mm_lookup(J, ix, MM_index)) + goto handlemm; + return TREF_NIL; + } + } + + /* Record the key lookup. */ + xref = rec_idx_key(J, ix, &rbref); + xrefop = IR(tref_ref(xref))->o; + loadop = xrefop == IR_AREF ? IR_ALOAD : IR_HLOAD; + /* The lj_meta_tset() inconsistency is gone, but better play safe. */ + oldv = xrefop == IR_KKPTR ? (cTValue *)ir_kptr(IR(tref_ref(xref))) : ix->oldv; + + if (ix->val == 0) { /* Indexed load */ + IRType t = itype2irt(oldv); + TRef res; + if (oldv == niltvg(J2G(J))) { + emitir(IRTG(IR_EQ, IRT_P32), xref, lj_ir_kkptr(J, niltvg(J2G(J)))); + res = TREF_NIL; + } else { + res = emitir(IRTG(loadop, t), xref, 0); + } + if (tref_ref(res) < rbref) /* HREFK + load forwarded? */ + lj_ir_rollback(J, rbref); /* Rollback to eliminate hmask guard. */ + if (t == IRT_NIL && ix->idxchain && lj_record_mm_lookup(J, ix, MM_index)) + goto handlemm; + if (irtype_ispri(t)) res = TREF_PRI(t); /* Canonicalize primitives. */ + return res; + } else { /* Indexed store. */ + GCtab *mt = tabref(tabV(&ix->tabv)->metatable); + int keybarrier = tref_isgcv(ix->key) && !tref_isnil(ix->val); + if (tref_ref(xref) < rbref) /* HREFK forwarded? */ + lj_ir_rollback(J, rbref); /* Rollback to eliminate hmask guard. */ + if (tvisnil(oldv)) { /* Previous value was nil? */ + /* Need to duplicate the hasmm check for the early guards. */ + int hasmm = 0; + if (ix->idxchain && mt) { + cTValue *mo = lj_tab_getstr(mt, mmname_str(J2G(J), MM_newindex)); + hasmm = mo && !tvisnil(mo); + } + if (hasmm) + emitir(IRTG(loadop, IRT_NIL), xref, 0); /* Guard for nil value. */ + else if (xrefop == IR_HREF) + emitir(IRTG(oldv == niltvg(J2G(J)) ? IR_EQ : IR_NE, IRT_P32), + xref, lj_ir_kkptr(J, niltvg(J2G(J)))); + if (ix->idxchain && lj_record_mm_lookup(J, ix, MM_newindex)) { + lua_assert(hasmm); + goto handlemm; + } + lua_assert(!hasmm); + if (oldv == niltvg(J2G(J))) { /* Need to insert a new key. */ + TRef key = ix->key; + if (tref_isinteger(key)) /* NEWREF needs a TValue as a key. */ + key = emitir(IRTN(IR_CONV), key, IRCONV_NUM_INT); + xref = emitir(IRT(IR_NEWREF, IRT_P32), ix->tab, key); + keybarrier = 0; /* NEWREF already takes care of the key barrier. */ +#ifdef LUAJIT_ENABLE_TABLE_BUMP + if ((J->flags & JIT_F_OPT_SINK)) /* Avoid a separate flag. */ + rec_idx_bump(J, ix); +#endif + } + } else if (!lj_opt_fwd_wasnonnil(J, loadop, tref_ref(xref))) { + /* Cannot derive that the previous value was non-nil, must do checks. */ + if (xrefop == IR_HREF) /* Guard against store to niltv. */ + emitir(IRTG(IR_NE, IRT_P32), xref, lj_ir_kkptr(J, niltvg(J2G(J)))); + if (ix->idxchain) { /* Metamethod lookup required? */ + /* A check for NULL metatable is cheaper (hoistable) than a load. */ + if (!mt) { + TRef mtref = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_TAB_META); + emitir(IRTG(IR_EQ, IRT_TAB), mtref, lj_ir_knull(J, IRT_TAB)); + } else { + IRType t = itype2irt(oldv); + emitir(IRTG(loadop, t), xref, 0); /* Guard for non-nil value. */ + } + } + } else { + keybarrier = 0; /* Previous non-nil value kept the key alive. */ + } + /* Convert int to number before storing. */ + if (!LJ_DUALNUM && tref_isinteger(ix->val)) + ix->val = emitir(IRTN(IR_CONV), ix->val, IRCONV_NUM_INT); + emitir(IRT(loadop+IRDELTA_L2S, tref_type(ix->val)), xref, ix->val); + if (keybarrier || tref_isgcv(ix->val)) + emitir(IRT(IR_TBAR, IRT_NIL), ix->tab, 0); + /* Invalidate neg. metamethod cache for stores with certain string keys. */ + if (!nommstr(J, ix->key)) { + TRef fref = emitir(IRT(IR_FREF, IRT_P32), ix->tab, IRFL_TAB_NOMM); + emitir(IRT(IR_FSTORE, IRT_U8), fref, lj_ir_kint(J, 0)); + } + J->needsnap = 1; + return 0; + } +} + +static void rec_tsetm(jit_State *J, BCReg ra, BCReg rn, int32_t i) +{ + RecordIndex ix; + cTValue *basev = J->L->base; + GCtab *t = tabV(&basev[ra-1]); + settabV(J->L, &ix.tabv, t); + ix.tab = getslot(J, ra-1); + ix.idxchain = 0; +#ifdef LUAJIT_ENABLE_TABLE_BUMP + if ((J->flags & JIT_F_OPT_SINK)) { + if (t->asize < i+rn-ra) + lj_tab_reasize(J->L, t, i+rn-ra); + setnilV(&ix.keyv); + rec_idx_bump(J, &ix); + } +#endif + for (; ra < rn; i++, ra++) { + setintV(&ix.keyv, i); + ix.key = lj_ir_kint(J, i); + copyTV(J->L, &ix.valv, &basev[ra]); + ix.val = getslot(J, ra); + lj_record_idx(J, &ix); + } +} + +/* -- Upvalue access ------------------------------------------------------ */ + +/* Check whether upvalue is immutable and ok to constify. */ +static int rec_upvalue_constify(jit_State *J, GCupval *uvp) +{ + if (uvp->immutable) { + cTValue *o = uvval(uvp); + /* Don't constify objects that may retain large amounts of memory. */ +#if LJ_HASFFI + if (tviscdata(o)) { + GCcdata *cd = cdataV(o); + if (!cdataisv(cd) && !(cd->marked & LJ_GC_CDATA_FIN)) { + CType *ct = ctype_raw(ctype_ctsG(J2G(J)), cd->ctypeid); + if (!ctype_hassize(ct->info) || ct->size <= 16) + return 1; + } + return 0; + } +#else + UNUSED(J); +#endif + if (!(tvistab(o) || tvisudata(o) || tvisthread(o))) + return 1; + } + return 0; +} + +/* Record upvalue load/store. */ +static TRef rec_upvalue(jit_State *J, uint32_t uv, TRef val) +{ + GCupval *uvp = &gcref(J->fn->l.uvptr[uv])->uv; + TRef fn = getcurrf(J); + IRRef uref; + int needbarrier = 0; + if (rec_upvalue_constify(J, uvp)) { /* Try to constify immutable upvalue. */ + TRef tr, kfunc; + lua_assert(val == 0); + if (!tref_isk(fn)) { /* Late specialization of current function. */ + if (J->pt->flags >= PROTO_CLC_POLY) + goto noconstify; + kfunc = lj_ir_kfunc(J, J->fn); + emitir(IRTG(IR_EQ, IRT_FUNC), fn, kfunc); + J->base[-1] = TREF_FRAME | kfunc; + fn = kfunc; + } + tr = lj_record_constify(J, uvval(uvp)); + if (tr) + return tr; + } +noconstify: + /* Note: this effectively limits LJ_MAX_UPVAL to 127. */ + uv = (uv << 8) | (hashrot(uvp->dhash, uvp->dhash + HASH_BIAS) & 0xff); + if (!uvp->closed) { + /* In current stack? */ + if (uvval(uvp) >= tvref(J->L->stack) && + uvval(uvp) < tvref(J->L->maxstack)) { + int32_t slot = (int32_t)(uvval(uvp) - (J->L->base - J->baseslot)); + if (slot >= 0) { /* Aliases an SSA slot? */ + slot -= (int32_t)J->baseslot; /* Note: slot number may be negative! */ + /* NYI: add IR to guard that it's still aliasing the same slot. */ + if (val == 0) { + return getslot(J, slot); + } else { + J->base[slot] = val; + if (slot >= (int32_t)J->maxslot) J->maxslot = (BCReg)(slot+1); + return 0; + } + } + } + uref = tref_ref(emitir(IRTG(IR_UREFO, IRT_P32), fn, uv)); + } else { + needbarrier = 1; + uref = tref_ref(emitir(IRTG(IR_UREFC, IRT_P32), fn, uv)); + } + if (val == 0) { /* Upvalue load */ + IRType t = itype2irt(uvval(uvp)); + TRef res = emitir(IRTG(IR_ULOAD, t), uref, 0); + if (irtype_ispri(t)) res = TREF_PRI(t); /* Canonicalize primitive refs. */ + return res; + } else { /* Upvalue store. */ + /* Convert int to number before storing. */ + if (!LJ_DUALNUM && tref_isinteger(val)) + val = emitir(IRTN(IR_CONV), val, IRCONV_NUM_INT); + emitir(IRT(IR_USTORE, tref_type(val)), uref, val); + if (needbarrier && tref_isgcv(val)) + emitir(IRT(IR_OBAR, IRT_NIL), uref, val); + J->needsnap = 1; + return 0; + } +} + +/* -- Record calls to Lua functions --------------------------------------- */ + +/* Check unroll limits for calls. */ +static void check_call_unroll(jit_State *J, TraceNo lnk) +{ + cTValue *frame = J->L->base - 1; + void *pc = mref(frame_func(frame)->l.pc, void); + int32_t depth = J->framedepth; + int32_t count = 0; + if ((J->pt->flags & PROTO_VARARG)) depth--; /* Vararg frame still missing. */ + for (; depth > 0; depth--) { /* Count frames with same prototype. */ + if (frame_iscont(frame)) depth--; + frame = frame_prev(frame); + if (mref(frame_func(frame)->l.pc, void) == pc) + count++; + } + if (J->pc == J->startpc) { + if (count + J->tailcalled > J->param[JIT_P_recunroll]) { + J->pc++; + if (J->framedepth + J->retdepth == 0) + lj_record_stop(J, LJ_TRLINK_TAILREC, J->cur.traceno); /* Tail-rec. */ + else + lj_record_stop(J, LJ_TRLINK_UPREC, J->cur.traceno); /* Up-recursion. */ + } + } else { + if (count > J->param[JIT_P_callunroll]) { + if (lnk) { /* Possible tail- or up-recursion. */ + lj_trace_flush(J, lnk); /* Flush trace that only returns. */ + /* Set a small, pseudo-random hotcount for a quick retry of JFUNC*. */ + hotcount_set(J2GG(J), J->pc+1, LJ_PRNG_BITS(J, 4)); + } + lj_trace_err(J, LJ_TRERR_CUNROLL); + } + } +} + +/* Record Lua function setup. */ +static void rec_func_setup(jit_State *J) +{ + GCproto *pt = J->pt; + BCReg s, numparams = pt->numparams; + if ((pt->flags & PROTO_NOJIT)) + lj_trace_err(J, LJ_TRERR_CJITOFF); + if (J->baseslot + pt->framesize >= LJ_MAX_JSLOTS) + lj_trace_err(J, LJ_TRERR_STACKOV); + /* Fill up missing parameters with nil. */ + for (s = J->maxslot; s < numparams; s++) + J->base[s] = TREF_NIL; + /* The remaining slots should never be read before they are written. */ + J->maxslot = numparams; +} + +/* Record Lua vararg function setup. */ +static void rec_func_vararg(jit_State *J) +{ + GCproto *pt = J->pt; + BCReg s, fixargs, vframe = J->maxslot+1; + lua_assert((pt->flags & PROTO_VARARG)); + if (J->baseslot + vframe + pt->framesize >= LJ_MAX_JSLOTS) + lj_trace_err(J, LJ_TRERR_STACKOV); + J->base[vframe-1] = J->base[-1]; /* Copy function up. */ + /* Copy fixarg slots up and set their original slots to nil. */ + fixargs = pt->numparams < J->maxslot ? pt->numparams : J->maxslot; + for (s = 0; s < fixargs; s++) { + J->base[vframe+s] = J->base[s]; + J->base[s] = TREF_NIL; + } + J->maxslot = fixargs; + J->framedepth++; + J->base += vframe; + J->baseslot += vframe; +} + +/* Record entry to a Lua function. */ +static void rec_func_lua(jit_State *J) +{ + rec_func_setup(J); + check_call_unroll(J, 0); +} + +/* Record entry to an already compiled function. */ +static void rec_func_jit(jit_State *J, TraceNo lnk) +{ + GCtrace *T; + rec_func_setup(J); + T = traceref(J, lnk); + if (T->linktype == LJ_TRLINK_RETURN) { /* Trace returns to interpreter? */ + check_call_unroll(J, lnk); + /* Temporarily unpatch JFUNC* to continue recording across function. */ + J->patchins = *J->pc; + J->patchpc = (BCIns *)J->pc; + *J->patchpc = T->startins; + return; + } + J->instunroll = 0; /* Cannot continue across a compiled function. */ + if (J->pc == J->startpc && J->framedepth + J->retdepth == 0) + lj_record_stop(J, LJ_TRLINK_TAILREC, J->cur.traceno); /* Extra tail-rec. */ + else + lj_record_stop(J, LJ_TRLINK_ROOT, lnk); /* Link to the function. */ +} + +/* -- Vararg handling ----------------------------------------------------- */ + +/* Detect y = select(x, ...) idiom. */ +static int select_detect(jit_State *J) +{ + BCIns ins = J->pc[1]; + if (bc_op(ins) == BC_CALLM && bc_b(ins) == 2 && bc_c(ins) == 1) { + cTValue *func = &J->L->base[bc_a(ins)]; + if (tvisfunc(func) && funcV(func)->c.ffid == FF_select) + return 1; + } + return 0; +} + +/* Record vararg instruction. */ +static void rec_varg(jit_State *J, BCReg dst, ptrdiff_t nresults) +{ + int32_t numparams = J->pt->numparams; + ptrdiff_t nvararg = frame_delta(J->L->base-1) - numparams - 1; + lua_assert(frame_isvarg(J->L->base-1)); + if (J->framedepth > 0) { /* Simple case: varargs defined on-trace. */ + ptrdiff_t i; + if (nvararg < 0) nvararg = 0; + if (nresults == -1) { + nresults = nvararg; + J->maxslot = dst + (BCReg)nvararg; + } else if (dst + nresults > J->maxslot) { + J->maxslot = dst + (BCReg)nresults; + } + for (i = 0; i < nresults; i++) + J->base[dst+i] = i < nvararg ? getslot(J, i - nvararg - 1) : TREF_NIL; + } else { /* Unknown number of varargs passed to trace. */ + TRef fr = emitir(IRTI(IR_SLOAD), 0, IRSLOAD_READONLY|IRSLOAD_FRAME); + int32_t frofs = 8*(1+numparams)+FRAME_VARG; + if (nresults >= 0) { /* Known fixed number of results. */ + ptrdiff_t i; + if (nvararg > 0) { + ptrdiff_t nload = nvararg >= nresults ? nresults : nvararg; + TRef vbase; + if (nvararg >= nresults) + emitir(IRTGI(IR_GE), fr, lj_ir_kint(J, frofs+8*(int32_t)nresults)); + else + emitir(IRTGI(IR_EQ), fr, + lj_ir_kint(J, (int32_t)frame_ftsz(J->L->base-1))); + vbase = emitir(IRTI(IR_SUB), REF_BASE, fr); + vbase = emitir(IRT(IR_ADD, IRT_P32), vbase, lj_ir_kint(J, frofs-8)); + for (i = 0; i < nload; i++) { + IRType t = itype2irt(&J->L->base[i-1-nvararg]); + TRef aref = emitir(IRT(IR_AREF, IRT_P32), + vbase, lj_ir_kint(J, (int32_t)i)); + TRef tr = emitir(IRTG(IR_VLOAD, t), aref, 0); + if (irtype_ispri(t)) tr = TREF_PRI(t); /* Canonicalize primitives. */ + J->base[dst+i] = tr; + } + } else { + emitir(IRTGI(IR_LE), fr, lj_ir_kint(J, frofs)); + nvararg = 0; + } + for (i = nvararg; i < nresults; i++) + J->base[dst+i] = TREF_NIL; + if (dst + (BCReg)nresults > J->maxslot) + J->maxslot = dst + (BCReg)nresults; + } else if (select_detect(J)) { /* y = select(x, ...) */ + TRef tridx = J->base[dst-1]; + TRef tr = TREF_NIL; + ptrdiff_t idx = lj_ffrecord_select_mode(J, tridx, &J->L->base[dst-1]); + if (idx < 0) goto nyivarg; + if (idx != 0 && !tref_isinteger(tridx)) + tridx = emitir(IRTGI(IR_CONV), tridx, IRCONV_INT_NUM|IRCONV_INDEX); + if (idx != 0 && tref_isk(tridx)) { + emitir(IRTGI(idx <= nvararg ? IR_GE : IR_LT), + fr, lj_ir_kint(J, frofs+8*(int32_t)idx)); + frofs -= 8; /* Bias for 1-based index. */ + } else if (idx <= nvararg) { /* Compute size. */ + TRef tmp = emitir(IRTI(IR_ADD), fr, lj_ir_kint(J, -frofs)); + if (numparams) + emitir(IRTGI(IR_GE), tmp, lj_ir_kint(J, 0)); + tr = emitir(IRTI(IR_BSHR), tmp, lj_ir_kint(J, 3)); + if (idx != 0) { + tridx = emitir(IRTI(IR_ADD), tridx, lj_ir_kint(J, -1)); + rec_idx_abc(J, tr, tridx, (uint32_t)nvararg); + } + } else { + TRef tmp = lj_ir_kint(J, frofs); + if (idx != 0) { + TRef tmp2 = emitir(IRTI(IR_BSHL), tridx, lj_ir_kint(J, 3)); + tmp = emitir(IRTI(IR_ADD), tmp2, tmp); + } else { + tr = lj_ir_kint(J, 0); + } + emitir(IRTGI(IR_LT), fr, tmp); + } + if (idx != 0 && idx <= nvararg) { + IRType t; + TRef aref, vbase = emitir(IRTI(IR_SUB), REF_BASE, fr); + vbase = emitir(IRT(IR_ADD, IRT_P32), vbase, lj_ir_kint(J, frofs-8)); + t = itype2irt(&J->L->base[idx-2-nvararg]); + aref = emitir(IRT(IR_AREF, IRT_P32), vbase, tridx); + tr = emitir(IRTG(IR_VLOAD, t), aref, 0); + if (irtype_ispri(t)) tr = TREF_PRI(t); /* Canonicalize primitives. */ + } + J->base[dst-2] = tr; + J->maxslot = dst-1; + J->bcskip = 2; /* Skip CALLM + select. */ + } else { + nyivarg: + setintV(&J->errinfo, BC_VARG); + lj_trace_err_info(J, LJ_TRERR_NYIBC); + } + } +} + +/* -- Record allocations -------------------------------------------------- */ + +static TRef rec_tnew(jit_State *J, uint32_t ah) +{ + uint32_t asize = ah & 0x7ff; + uint32_t hbits = ah >> 11; + TRef tr; + if (asize == 0x7ff) asize = 0x801; + tr = emitir(IRTG(IR_TNEW, IRT_TAB), asize, hbits); +#ifdef LUAJIT_ENABLE_TABLE_BUMP + J->rbchash[(tr & (RBCHASH_SLOTS-1))].ref = tref_ref(tr); + setmref(J->rbchash[(tr & (RBCHASH_SLOTS-1))].pc, J->pc); + setgcref(J->rbchash[(tr & (RBCHASH_SLOTS-1))].pt, obj2gco(J->pt)); +#endif + return tr; +} + +/* -- Concatenation ------------------------------------------------------- */ + +static TRef rec_cat(jit_State *J, BCReg baseslot, BCReg topslot) +{ + TRef *top = &J->base[topslot]; + TValue savetv[5]; + BCReg s; + RecordIndex ix; + lua_assert(baseslot < topslot); + for (s = baseslot; s <= topslot; s++) + (void)getslot(J, s); /* Ensure all arguments have a reference. */ + if (tref_isnumber_str(top[0]) && tref_isnumber_str(top[-1])) { + TRef tr, hdr, *trp, *xbase, *base = &J->base[baseslot]; + /* First convert numbers to strings. */ + for (trp = top; trp >= base; trp--) { + if (tref_isnumber(*trp)) + *trp = emitir(IRT(IR_TOSTR, IRT_STR), *trp, + tref_isnum(*trp) ? IRTOSTR_NUM : IRTOSTR_INT); + else if (!tref_isstr(*trp)) + break; + } + xbase = ++trp; + tr = hdr = emitir(IRT(IR_BUFHDR, IRT_P32), + lj_ir_kptr(J, &J2G(J)->tmpbuf), IRBUFHDR_RESET); + do { + tr = emitir(IRT(IR_BUFPUT, IRT_P32), tr, *trp++); + } while (trp <= top); + tr = emitir(IRT(IR_BUFSTR, IRT_STR), tr, hdr); + J->maxslot = (BCReg)(xbase - J->base); + if (xbase == base) return tr; /* Return simple concatenation result. */ + /* Pass partial result. */ + topslot = J->maxslot--; + *xbase = tr; + top = xbase; + setstrV(J->L, &ix.keyv, &J2G(J)->strempty); /* Simulate string result. */ + } else { + J->maxslot = topslot-1; + copyTV(J->L, &ix.keyv, &J->L->base[topslot]); + } + copyTV(J->L, &ix.tabv, &J->L->base[topslot-1]); + ix.tab = top[-1]; + ix.key = top[0]; + memcpy(savetv, &J->L->base[topslot-1], sizeof(savetv)); /* Save slots. */ + rec_mm_arith(J, &ix, MM_concat); /* Call __concat metamethod. */ + memcpy(&J->L->base[topslot-1], savetv, sizeof(savetv)); /* Restore slots. */ + return 0; /* No result yet. */ +} + +/* -- Record bytecode ops ------------------------------------------------- */ + +/* Prepare for comparison. */ +static void rec_comp_prep(jit_State *J) +{ + /* Prevent merging with snapshot #0 (GC exit) since we fixup the PC. */ + if (J->cur.nsnap == 1 && J->cur.snap[0].ref == J->cur.nins) + emitir_raw(IRT(IR_NOP, IRT_NIL), 0, 0); + lj_snap_add(J); +} + +/* Fixup comparison. */ +static void rec_comp_fixup(jit_State *J, const BCIns *pc, int cond) +{ + BCIns jmpins = pc[1]; + const BCIns *npc = pc + 2 + (cond ? bc_j(jmpins) : 0); + SnapShot *snap = &J->cur.snap[J->cur.nsnap-1]; + /* Set PC to opposite target to avoid re-recording the comp. in side trace. */ + J->cur.snapmap[snap->mapofs + snap->nent] = SNAP_MKPC(npc); + J->needsnap = 1; + if (bc_a(jmpins) < J->maxslot) J->maxslot = bc_a(jmpins); + lj_snap_shrink(J); /* Shrink last snapshot if possible. */ +} + +/* Record the next bytecode instruction (_before_ it's executed). */ +void lj_record_ins(jit_State *J) +{ + cTValue *lbase; + RecordIndex ix; + const BCIns *pc; + BCIns ins; + BCOp op; + TRef ra, rb, rc; + + /* Perform post-processing action before recording the next instruction. */ + if (LJ_UNLIKELY(J->postproc != LJ_POST_NONE)) { + switch (J->postproc) { + case LJ_POST_FIXCOMP: /* Fixup comparison. */ + pc = (const BCIns *)(uintptr_t)J2G(J)->tmptv.u64; + rec_comp_fixup(J, pc, (!tvistruecond(&J2G(J)->tmptv2) ^ (bc_op(*pc)&1))); + /* fallthrough */ + case LJ_POST_FIXGUARD: /* Fixup and emit pending guard. */ + case LJ_POST_FIXGUARDSNAP: /* Fixup and emit pending guard and snapshot. */ + if (!tvistruecond(&J2G(J)->tmptv2)) { + J->fold.ins.o ^= 1; /* Flip guard to opposite. */ + if (J->postproc == LJ_POST_FIXGUARDSNAP) { + SnapShot *snap = &J->cur.snap[J->cur.nsnap-1]; + J->cur.snapmap[snap->mapofs+snap->nent-1]--; /* False -> true. */ + } + } + lj_opt_fold(J); /* Emit pending guard. */ + /* fallthrough */ + case LJ_POST_FIXBOOL: + if (!tvistruecond(&J2G(J)->tmptv2)) { + BCReg s; + TValue *tv = J->L->base; + for (s = 0; s < J->maxslot; s++) /* Fixup stack slot (if any). */ + if (J->base[s] == TREF_TRUE && tvisfalse(&tv[s])) { + J->base[s] = TREF_FALSE; + break; + } + } + break; + case LJ_POST_FIXCONST: + { + BCReg s; + TValue *tv = J->L->base; + for (s = 0; s < J->maxslot; s++) /* Constify stack slots (if any). */ + if (J->base[s] == TREF_NIL && !tvisnil(&tv[s])) + J->base[s] = lj_record_constify(J, &tv[s]); + } + break; + case LJ_POST_FFRETRY: /* Suppress recording of retried fast function. */ + if (bc_op(*J->pc) >= BC__MAX) + return; + break; + default: lua_assert(0); break; + } + J->postproc = LJ_POST_NONE; + } + + /* Need snapshot before recording next bytecode (e.g. after a store). */ + if (J->needsnap) { + J->needsnap = 0; + lj_snap_purge(J); + lj_snap_add(J); + J->mergesnap = 1; + } + + /* Skip some bytecodes. */ + if (LJ_UNLIKELY(J->bcskip > 0)) { + J->bcskip--; + return; + } + + /* Record only closed loops for root traces. */ + pc = J->pc; + if (J->framedepth == 0 && + (MSize)((char *)pc - (char *)J->bc_min) >= J->bc_extent) + lj_trace_err(J, LJ_TRERR_LLEAVE); + +#ifdef LUA_USE_ASSERT + rec_check_slots(J); + rec_check_ir(J); +#endif + +#if LJ_HASPROFILE + rec_profile_ins(J, pc); +#endif + + /* Keep a copy of the runtime values of var/num/str operands. */ +#define rav (&ix.valv) +#define rbv (&ix.tabv) +#define rcv (&ix.keyv) + + lbase = J->L->base; + ins = *pc; + op = bc_op(ins); + ra = bc_a(ins); + ix.val = 0; + switch (bcmode_a(op)) { + case BCMvar: + copyTV(J->L, rav, &lbase[ra]); ix.val = ra = getslot(J, ra); break; + default: break; /* Handled later. */ + } + rb = bc_b(ins); + rc = bc_c(ins); + switch (bcmode_b(op)) { + case BCMnone: rb = 0; rc = bc_d(ins); break; /* Upgrade rc to 'rd'. */ + case BCMvar: + copyTV(J->L, rbv, &lbase[rb]); ix.tab = rb = getslot(J, rb); break; + default: break; /* Handled later. */ + } + switch (bcmode_c(op)) { + case BCMvar: + copyTV(J->L, rcv, &lbase[rc]); ix.key = rc = getslot(J, rc); break; + case BCMpri: setpriV(rcv, ~rc); ix.key = rc = TREF_PRI(IRT_NIL+rc); break; + case BCMnum: { cTValue *tv = proto_knumtv(J->pt, rc); + copyTV(J->L, rcv, tv); ix.key = rc = tvisint(tv) ? lj_ir_kint(J, intV(tv)) : + lj_ir_knumint(J, numV(tv)); } break; + case BCMstr: { GCstr *s = gco2str(proto_kgc(J->pt, ~(ptrdiff_t)rc)); + setstrV(J->L, rcv, s); ix.key = rc = lj_ir_kstr(J, s); } break; + default: break; /* Handled later. */ + } + + switch (op) { + + /* -- Comparison ops ---------------------------------------------------- */ + + case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: +#if LJ_HASFFI + if (tref_iscdata(ra) || tref_iscdata(rc)) { + rec_mm_comp_cdata(J, &ix, op, ((int)op & 2) ? MM_le : MM_lt); + break; + } +#endif + /* Emit nothing for two numeric or string consts. */ + if (!(tref_isk2(ra,rc) && tref_isnumber_str(ra) && tref_isnumber_str(rc))) { + IRType ta = tref_isinteger(ra) ? IRT_INT : tref_type(ra); + IRType tc = tref_isinteger(rc) ? IRT_INT : tref_type(rc); + int irop; + if (ta != tc) { + /* Widen mixed number/int comparisons to number/number comparison. */ + if (ta == IRT_INT && tc == IRT_NUM) { + ra = emitir(IRTN(IR_CONV), ra, IRCONV_NUM_INT); + ta = IRT_NUM; + } else if (ta == IRT_NUM && tc == IRT_INT) { + rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT); + } else if (LJ_52) { + ta = IRT_NIL; /* Force metamethod for different types. */ + } else if (!((ta == IRT_FALSE || ta == IRT_TRUE) && + (tc == IRT_FALSE || tc == IRT_TRUE))) { + break; /* Interpreter will throw for two different types. */ + } + } + rec_comp_prep(J); + irop = (int)op - (int)BC_ISLT + (int)IR_LT; + if (ta == IRT_NUM) { + if ((irop & 1)) irop ^= 4; /* ISGE/ISGT are unordered. */ + if (!lj_ir_numcmp(numberVnum(rav), numberVnum(rcv), (IROp)irop)) + irop ^= 5; + } else if (ta == IRT_INT) { + if (!lj_ir_numcmp(numberVnum(rav), numberVnum(rcv), (IROp)irop)) + irop ^= 1; + } else if (ta == IRT_STR) { + if (!lj_ir_strcmp(strV(rav), strV(rcv), (IROp)irop)) irop ^= 1; + ra = lj_ir_call(J, IRCALL_lj_str_cmp, ra, rc); + rc = lj_ir_kint(J, 0); + ta = IRT_INT; + } else { + rec_mm_comp(J, &ix, (int)op); + break; + } + emitir(IRTG(irop, ta), ra, rc); + rec_comp_fixup(J, J->pc, ((int)op ^ irop) & 1); + } + break; + + case BC_ISEQV: case BC_ISNEV: + case BC_ISEQS: case BC_ISNES: + case BC_ISEQN: case BC_ISNEN: + case BC_ISEQP: case BC_ISNEP: +#if LJ_HASFFI + if (tref_iscdata(ra) || tref_iscdata(rc)) { + rec_mm_comp_cdata(J, &ix, op, MM_eq); + break; + } +#endif + /* Emit nothing for two non-table, non-udata consts. */ + if (!(tref_isk2(ra, rc) && !(tref_istab(ra) || tref_isudata(ra)))) { + int diff; + rec_comp_prep(J); + diff = lj_record_objcmp(J, ra, rc, rav, rcv); + if (diff == 2 || !(tref_istab(ra) || tref_isudata(ra))) + rec_comp_fixup(J, J->pc, ((int)op & 1) == !diff); + else if (diff == 1) /* Only check __eq if different, but same type. */ + rec_mm_equal(J, &ix, (int)op); + } + break; + + /* -- Unary test and copy ops ------------------------------------------- */ + + case BC_ISTC: case BC_ISFC: + if ((op & 1) == tref_istruecond(rc)) + rc = 0; /* Don't store if condition is not true. */ + /* fallthrough */ + case BC_IST: case BC_ISF: /* Type specialization suffices. */ + if (bc_a(pc[1]) < J->maxslot) + J->maxslot = bc_a(pc[1]); /* Shrink used slots. */ + break; + + case BC_ISTYPE: case BC_ISNUM: + /* These coercions need to correspond with lj_meta_istype(). */ + if (LJ_DUALNUM && rc == ~LJ_TNUMX+1) + ra = lj_opt_narrow_toint(J, ra); + else if (rc == ~LJ_TNUMX+2) + ra = lj_ir_tonum(J, ra); + else if (rc == ~LJ_TSTR+1) + ra = lj_ir_tostr(J, ra); + /* else: type specialization suffices. */ + J->base[bc_a(ins)] = ra; + break; + + /* -- Unary ops --------------------------------------------------------- */ + + case BC_NOT: + /* Type specialization already forces const result. */ + rc = tref_istruecond(rc) ? TREF_FALSE : TREF_TRUE; + break; + + case BC_LEN: + if (tref_isstr(rc)) + rc = emitir(IRTI(IR_FLOAD), rc, IRFL_STR_LEN); + else if (!LJ_52 && tref_istab(rc)) + rc = lj_ir_call(J, IRCALL_lj_tab_len, rc); + else + rc = rec_mm_len(J, rc, rcv); + break; + + /* -- Arithmetic ops ---------------------------------------------------- */ + + case BC_UNM: + if (tref_isnumber_str(rc)) { + rc = lj_opt_narrow_unm(J, rc, rcv); + } else { + ix.tab = rc; + copyTV(J->L, &ix.tabv, rcv); + rc = rec_mm_arith(J, &ix, MM_unm); + } + break; + + case BC_ADDNV: case BC_SUBNV: case BC_MULNV: case BC_DIVNV: case BC_MODNV: + /* Swap rb/rc and rbv/rcv. rav is temp. */ + ix.tab = rc; ix.key = rc = rb; rb = ix.tab; + copyTV(J->L, rav, rbv); + copyTV(J->L, rbv, rcv); + copyTV(J->L, rcv, rav); + if (op == BC_MODNV) + goto recmod; + /* fallthrough */ + case BC_ADDVN: case BC_SUBVN: case BC_MULVN: case BC_DIVVN: + case BC_ADDVV: case BC_SUBVV: case BC_MULVV: case BC_DIVVV: { + MMS mm = bcmode_mm(op); + if (tref_isnumber_str(rb) && tref_isnumber_str(rc)) + rc = lj_opt_narrow_arith(J, rb, rc, rbv, rcv, + (int)mm - (int)MM_add + (int)IR_ADD); + else + rc = rec_mm_arith(J, &ix, mm); + break; + } + + case BC_MODVN: case BC_MODVV: + recmod: + if (tref_isnumber_str(rb) && tref_isnumber_str(rc)) + rc = lj_opt_narrow_mod(J, rb, rc, rcv); + else + rc = rec_mm_arith(J, &ix, MM_mod); + break; + + case BC_POW: + if (tref_isnumber_str(rb) && tref_isnumber_str(rc)) + rc = lj_opt_narrow_pow(J, lj_ir_tonum(J, rb), rc, rcv); + else + rc = rec_mm_arith(J, &ix, MM_pow); + break; + + /* -- Miscellaneous ops ------------------------------------------------- */ + + case BC_CAT: + rc = rec_cat(J, rb, rc); + break; + + /* -- Constant and move ops --------------------------------------------- */ + + case BC_MOV: + /* Clear gap of method call to avoid resurrecting previous refs. */ + if (ra > J->maxslot) J->base[ra-1] = 0; + break; + case BC_KSTR: case BC_KNUM: case BC_KPRI: + break; + case BC_KSHORT: + rc = lj_ir_kint(J, (int32_t)(int16_t)rc); + break; + case BC_KNIL: + while (ra <= rc) + J->base[ra++] = TREF_NIL; + if (rc >= J->maxslot) J->maxslot = rc+1; + break; +#if LJ_HASFFI + case BC_KCDATA: + rc = lj_ir_kgc(J, proto_kgc(J->pt, ~(ptrdiff_t)rc), IRT_CDATA); + break; +#endif + + /* -- Upvalue and function ops ------------------------------------------ */ + + case BC_UGET: + rc = rec_upvalue(J, rc, 0); + break; + case BC_USETV: case BC_USETS: case BC_USETN: case BC_USETP: + rec_upvalue(J, ra, rc); + break; + + /* -- Table ops --------------------------------------------------------- */ + + case BC_GGET: case BC_GSET: + settabV(J->L, &ix.tabv, tabref(J->fn->l.env)); + ix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), getcurrf(J), IRFL_FUNC_ENV); + ix.idxchain = LJ_MAX_IDXCHAIN; + rc = lj_record_idx(J, &ix); + break; + + case BC_TGETB: case BC_TSETB: + setintV(&ix.keyv, (int32_t)rc); + ix.key = lj_ir_kint(J, (int32_t)rc); + /* fallthrough */ + case BC_TGETV: case BC_TGETS: case BC_TSETV: case BC_TSETS: + ix.idxchain = LJ_MAX_IDXCHAIN; + rc = lj_record_idx(J, &ix); + break; + case BC_TGETR: case BC_TSETR: + ix.idxchain = 0; + rc = lj_record_idx(J, &ix); + break; + + case BC_TSETM: + rec_tsetm(J, ra, (BCReg)(J->L->top - J->L->base), (int32_t)rcv->u32.lo); + break; + + case BC_TNEW: + rc = rec_tnew(J, rc); + break; + case BC_TDUP: + rc = emitir(IRTG(IR_TDUP, IRT_TAB), + lj_ir_ktab(J, gco2tab(proto_kgc(J->pt, ~(ptrdiff_t)rc))), 0); +#ifdef LUAJIT_ENABLE_TABLE_BUMP + J->rbchash[(rc & (RBCHASH_SLOTS-1))].ref = tref_ref(rc); + setmref(J->rbchash[(rc & (RBCHASH_SLOTS-1))].pc, pc); + setgcref(J->rbchash[(rc & (RBCHASH_SLOTS-1))].pt, obj2gco(J->pt)); +#endif + break; + + /* -- Calls and vararg handling ----------------------------------------- */ + + case BC_ITERC: + J->base[ra] = getslot(J, ra-3-LJ_FR2); + J->base[ra+1] = getslot(J, ra-2-LJ_FR2); + J->base[ra+2] = getslot(J, ra-1-LJ_FR2); + { /* Do the actual copy now because lj_record_call needs the values. */ + TValue *b = &J->L->base[ra]; + copyTV(J->L, b, b-3-LJ_FR2); + copyTV(J->L, b+1, b-2-LJ_FR2); + copyTV(J->L, b+2, b-1-LJ_FR2); + } + lj_record_call(J, ra, (ptrdiff_t)rc-1); + break; + + /* L->top is set to L->base+ra+rc+NARGS-1+1. See lj_dispatch_ins(). */ + case BC_CALLM: + rc = (BCReg)(J->L->top - J->L->base) - ra - LJ_FR2; + /* fallthrough */ + case BC_CALL: + lj_record_call(J, ra, (ptrdiff_t)rc-1); + break; + + case BC_CALLMT: + rc = (BCReg)(J->L->top - J->L->base) - ra - LJ_FR2; + /* fallthrough */ + case BC_CALLT: + lj_record_tailcall(J, ra, (ptrdiff_t)rc-1); + break; + + case BC_VARG: + rec_varg(J, ra, (ptrdiff_t)rb-1); + break; + + /* -- Returns ----------------------------------------------------------- */ + + case BC_RETM: + /* L->top is set to L->base+ra+rc+NRESULTS-1, see lj_dispatch_ins(). */ + rc = (BCReg)(J->L->top - J->L->base) - ra + 1; + /* fallthrough */ + case BC_RET: case BC_RET0: case BC_RET1: +#if LJ_HASPROFILE + rec_profile_ret(J); +#endif + lj_record_ret(J, ra, (ptrdiff_t)rc-1); + break; + + /* -- Loops and branches ------------------------------------------------ */ + + case BC_FORI: + if (rec_for(J, pc, 0) != LOOPEV_LEAVE) + J->loopref = J->cur.nins; + break; + case BC_JFORI: + lua_assert(bc_op(pc[(ptrdiff_t)rc-BCBIAS_J]) == BC_JFORL); + if (rec_for(J, pc, 0) != LOOPEV_LEAVE) /* Link to existing loop. */ + lj_record_stop(J, LJ_TRLINK_ROOT, bc_d(pc[(ptrdiff_t)rc-BCBIAS_J])); + /* Continue tracing if the loop is not entered. */ + break; + + case BC_FORL: + rec_loop_interp(J, pc, rec_for(J, pc+((ptrdiff_t)rc-BCBIAS_J), 1)); + break; + case BC_ITERL: + rec_loop_interp(J, pc, rec_iterl(J, *pc)); + break; + case BC_LOOP: + rec_loop_interp(J, pc, rec_loop(J, ra)); + break; + + case BC_JFORL: + rec_loop_jit(J, rc, rec_for(J, pc+bc_j(traceref(J, rc)->startins), 1)); + break; + case BC_JITERL: + rec_loop_jit(J, rc, rec_iterl(J, traceref(J, rc)->startins)); + break; + case BC_JLOOP: + rec_loop_jit(J, rc, rec_loop(J, ra)); + break; + + case BC_IFORL: + case BC_IITERL: + case BC_ILOOP: + case BC_IFUNCF: + case BC_IFUNCV: + lj_trace_err(J, LJ_TRERR_BLACKL); + break; + + case BC_JMP: + if (ra < J->maxslot) + J->maxslot = ra; /* Shrink used slots. */ + break; + + /* -- Function headers -------------------------------------------------- */ + + case BC_FUNCF: + rec_func_lua(J); + break; + case BC_JFUNCF: + rec_func_jit(J, rc); + break; + + case BC_FUNCV: + rec_func_vararg(J); + rec_func_lua(J); + break; + case BC_JFUNCV: + lua_assert(0); /* Cannot happen. No hotcall counting for varag funcs. */ + break; + + case BC_FUNCC: + case BC_FUNCCW: + lj_ffrecord_func(J); + break; + + default: + if (op >= BC__MAX) { + lj_ffrecord_func(J); + break; + } + /* fallthrough */ + case BC_ITERN: + case BC_ISNEXT: + case BC_UCLO: + case BC_FNEW: + setintV(&J->errinfo, (int32_t)op); + lj_trace_err_info(J, LJ_TRERR_NYIBC); + break; + } + + /* rc == 0 if we have no result yet, e.g. pending __index metamethod call. */ + if (bcmode_a(op) == BCMdst && rc) { + J->base[ra] = rc; + if (ra >= J->maxslot) J->maxslot = ra+1; + } + +#undef rav +#undef rbv +#undef rcv + + /* Limit the number of recorded IR instructions. */ + if (J->cur.nins > REF_FIRST+(IRRef)J->param[JIT_P_maxrecord]) + lj_trace_err(J, LJ_TRERR_TRACEOV); +} + +/* -- Recording setup ----------------------------------------------------- */ + +/* Setup recording for a root trace started by a hot loop. */ +static const BCIns *rec_setup_root(jit_State *J) +{ + /* Determine the next PC and the bytecode range for the loop. */ + const BCIns *pcj, *pc = J->pc; + BCIns ins = *pc; + BCReg ra = bc_a(ins); + switch (bc_op(ins)) { + case BC_FORL: + J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns); + pc += 1+bc_j(ins); + J->bc_min = pc; + break; + case BC_ITERL: + lua_assert(bc_op(pc[-1]) == BC_ITERC); + J->maxslot = ra + bc_b(pc[-1]) - 1; + J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns); + pc += 1+bc_j(ins); + lua_assert(bc_op(pc[-1]) == BC_JMP); + J->bc_min = pc; + break; + case BC_LOOP: + /* Only check BC range for real loops, but not for "repeat until true". */ + pcj = pc + bc_j(ins); + ins = *pcj; + if (bc_op(ins) == BC_JMP && bc_j(ins) < 0) { + J->bc_min = pcj+1 + bc_j(ins); + J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns); + } + J->maxslot = ra; + pc++; + break; + case BC_RET: + case BC_RET0: + case BC_RET1: + /* No bytecode range check for down-recursive root traces. */ + J->maxslot = ra + bc_d(ins) - 1; + break; + case BC_FUNCF: + /* No bytecode range check for root traces started by a hot call. */ + J->maxslot = J->pt->numparams; + pc++; + break; + case BC_CALLM: + case BC_CALL: + case BC_ITERC: + /* No bytecode range check for stitched traces. */ + pc++; + break; + default: + lua_assert(0); + break; + } + return pc; +} + +/* Setup for recording a new trace. */ +void lj_record_setup(jit_State *J) +{ + uint32_t i; + + /* Initialize state related to current trace. */ + memset(J->slot, 0, sizeof(J->slot)); + memset(J->chain, 0, sizeof(J->chain)); +#ifdef LUAJIT_ENABLE_TABLE_BUMP + memset(J->rbchash, 0, sizeof(J->rbchash)); +#endif + memset(J->bpropcache, 0, sizeof(J->bpropcache)); + J->scev.idx = REF_NIL; + setmref(J->scev.pc, NULL); + + J->baseslot = 1; /* Invoking function is at base[-1]. */ + J->base = J->slot + J->baseslot; + J->maxslot = 0; + J->framedepth = 0; + J->retdepth = 0; + + J->instunroll = J->param[JIT_P_instunroll]; + J->loopunroll = J->param[JIT_P_loopunroll]; + J->tailcalled = 0; + J->loopref = 0; + + J->bc_min = NULL; /* Means no limit. */ + J->bc_extent = ~(MSize)0; + + /* Emit instructions for fixed references. Also triggers initial IR alloc. */ + emitir_raw(IRT(IR_BASE, IRT_P32), J->parent, J->exitno); + for (i = 0; i <= 2; i++) { + IRIns *ir = IR(REF_NIL-i); + ir->i = 0; + ir->t.irt = (uint8_t)(IRT_NIL+i); + ir->o = IR_KPRI; + ir->prev = 0; + } + J->cur.nk = REF_TRUE; + + J->startpc = J->pc; + setmref(J->cur.startpc, J->pc); + if (J->parent) { /* Side trace. */ + GCtrace *T = traceref(J, J->parent); + TraceNo root = T->root ? T->root : J->parent; + J->cur.root = (uint16_t)root; + J->cur.startins = BCINS_AD(BC_JMP, 0, 0); + /* Check whether we could at least potentially form an extra loop. */ + if (J->exitno == 0 && T->snap[0].nent == 0) { + /* We can narrow a FORL for some side traces, too. */ + if (J->pc > proto_bc(J->pt) && bc_op(J->pc[-1]) == BC_JFORI && + bc_d(J->pc[bc_j(J->pc[-1])-1]) == root) { + lj_snap_add(J); + rec_for_loop(J, J->pc-1, &J->scev, 1); + goto sidecheck; + } + } else { + J->startpc = NULL; /* Prevent forming an extra loop. */ + } + lj_snap_replay(J, T); + sidecheck: + if (traceref(J, J->cur.root)->nchild >= J->param[JIT_P_maxside] || + T->snap[J->exitno].count >= J->param[JIT_P_hotexit] + + J->param[JIT_P_tryside]) { + lj_record_stop(J, LJ_TRLINK_INTERP, 0); + } + } else { /* Root trace. */ + J->cur.root = 0; + J->cur.startins = *J->pc; + J->pc = rec_setup_root(J); + /* Note: the loop instruction itself is recorded at the end and not + ** at the start! So snapshot #0 needs to point to the *next* instruction. + */ + lj_snap_add(J); + if (bc_op(J->cur.startins) == BC_FORL) + rec_for_loop(J, J->pc-1, &J->scev, 1); + else if (bc_op(J->cur.startins) == BC_ITERC) + J->startpc = NULL; + if (1 + J->pt->framesize >= LJ_MAX_JSLOTS) + lj_trace_err(J, LJ_TRERR_STACKOV); + } +#if LJ_HASPROFILE + J->prev_pt = NULL; + J->prev_line = -1; +#endif +#ifdef LUAJIT_ENABLE_CHECKHOOK + /* Regularly check for instruction/line hooks from compiled code and + ** exit to the interpreter if the hooks are set. + ** + ** This is a compile-time option and disabled by default, since the + ** hook checks may be quite expensive in tight loops. + ** + ** Note this is only useful if hooks are *not* set most of the time. + ** Use this only if you want to *asynchronously* interrupt the execution. + ** + ** You can set the instruction hook via lua_sethook() with a count of 1 + ** from a signal handler or another native thread. Please have a look + ** at the first few functions in luajit.c for an example (Ctrl-C handler). + */ + { + TRef tr = emitir(IRT(IR_XLOAD, IRT_U8), + lj_ir_kptr(J, &J2G(J)->hookmask), IRXLOAD_VOLATILE); + tr = emitir(IRTI(IR_BAND), tr, lj_ir_kint(J, (LUA_MASKLINE|LUA_MASKCOUNT))); + emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, 0)); + } +#endif +} + +#undef IR +#undef emitir_raw +#undef emitir + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_record.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_record.h new file mode 100644 index 00000000000..732adb47ce2 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_record.h @@ -0,0 +1,45 @@ +/* +** Trace recorder (bytecode -> SSA IR). +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_RECORD_H +#define _LJ_RECORD_H + +#include "lj_obj.h" +#include "lj_jit.h" + +#if LJ_HASJIT +/* Context for recording an indexed load/store. */ +typedef struct RecordIndex { + TValue tabv; /* Runtime value of table (or indexed object). */ + TValue keyv; /* Runtime value of key. */ + TValue valv; /* Runtime value of stored value. */ + TValue mobjv; /* Runtime value of metamethod object. */ + GCtab *mtv; /* Runtime value of metatable object. */ + cTValue *oldv; /* Runtime value of previously stored value. */ + TRef tab; /* Table (or indexed object) reference. */ + TRef key; /* Key reference. */ + TRef val; /* Value reference for a store or 0 for a load. */ + TRef mt; /* Metatable reference. */ + TRef mobj; /* Metamethod object reference. */ + int idxchain; /* Index indirections left or 0 for raw lookup. */ +} RecordIndex; + +LJ_FUNC int lj_record_objcmp(jit_State *J, TRef a, TRef b, + cTValue *av, cTValue *bv); +LJ_FUNC void lj_record_stop(jit_State *J, TraceLink linktype, TraceNo lnk); +LJ_FUNC TRef lj_record_constify(jit_State *J, cTValue *o); + +LJ_FUNC void lj_record_call(jit_State *J, BCReg func, ptrdiff_t nargs); +LJ_FUNC void lj_record_tailcall(jit_State *J, BCReg func, ptrdiff_t nargs); +LJ_FUNC void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults); + +LJ_FUNC int lj_record_mm_lookup(jit_State *J, RecordIndex *ix, MMS mm); +LJ_FUNC TRef lj_record_idx(jit_State *J, RecordIndex *ix); + +LJ_FUNC void lj_record_ins(jit_State *J); +LJ_FUNC void lj_record_setup(jit_State *J); +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_snap.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_snap.c new file mode 100644 index 00000000000..d8e7987c5c2 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_snap.c @@ -0,0 +1,870 @@ +/* +** Snapshot handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_snap_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_gc.h" +#include "lj_tab.h" +#include "lj_state.h" +#include "lj_frame.h" +#include "lj_bc.h" +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_iropt.h" +#include "lj_trace.h" +#include "lj_snap.h" +#include "lj_target.h" +#if LJ_HASFFI +#include "lj_ctype.h" +#include "lj_cdata.h" +#endif + +/* Some local macros to save typing. Undef'd at the end. */ +#define IR(ref) (&J->cur.ir[(ref)]) + +/* Pass IR on to next optimization in chain (FOLD). */ +#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) + +/* Emit raw IR without passing through optimizations. */ +#define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J)) + +/* -- Snapshot buffer allocation ------------------------------------------ */ + +/* Grow snapshot buffer. */ +void lj_snap_grow_buf_(jit_State *J, MSize need) +{ + MSize maxsnap = (MSize)J->param[JIT_P_maxsnap]; + if (need > maxsnap) + lj_trace_err(J, LJ_TRERR_SNAPOV); + lj_mem_growvec(J->L, J->snapbuf, J->sizesnap, maxsnap, SnapShot); + J->cur.snap = J->snapbuf; +} + +/* Grow snapshot map buffer. */ +void lj_snap_grow_map_(jit_State *J, MSize need) +{ + if (need < 2*J->sizesnapmap) + need = 2*J->sizesnapmap; + else if (need < 64) + need = 64; + J->snapmapbuf = (SnapEntry *)lj_mem_realloc(J->L, J->snapmapbuf, + J->sizesnapmap*sizeof(SnapEntry), need*sizeof(SnapEntry)); + J->cur.snapmap = J->snapmapbuf; + J->sizesnapmap = need; +} + +/* -- Snapshot generation ------------------------------------------------- */ + +/* Add all modified slots to the snapshot. */ +static MSize snapshot_slots(jit_State *J, SnapEntry *map, BCReg nslots) +{ + IRRef retf = J->chain[IR_RETF]; /* Limits SLOAD restore elimination. */ + BCReg s; + MSize n = 0; + for (s = 0; s < nslots; s++) { + TRef tr = J->slot[s]; + IRRef ref = tref_ref(tr); + if (ref) { + SnapEntry sn = SNAP_TR(s, tr); + IRIns *ir = IR(ref); + if (!(sn & (SNAP_CONT|SNAP_FRAME)) && + ir->o == IR_SLOAD && ir->op1 == s && ref > retf) { + /* No need to snapshot unmodified non-inherited slots. */ + if (!(ir->op2 & IRSLOAD_INHERIT)) + continue; + /* No need to restore readonly slots and unmodified non-parent slots. */ + if (!(LJ_DUALNUM && (ir->op2 & IRSLOAD_CONVERT)) && + (ir->op2 & (IRSLOAD_READONLY|IRSLOAD_PARENT)) != IRSLOAD_PARENT) + sn |= SNAP_NORESTORE; + } + if (LJ_SOFTFP && irt_isnum(ir->t)) + sn |= SNAP_SOFTFPNUM; + map[n++] = sn; + } + } + return n; +} + +/* Add frame links at the end of the snapshot. */ +static BCReg snapshot_framelinks(jit_State *J, SnapEntry *map) +{ + cTValue *frame = J->L->base - 1; + cTValue *lim = J->L->base - J->baseslot; + GCfunc *fn = frame_func(frame); + cTValue *ftop = isluafunc(fn) ? (frame+funcproto(fn)->framesize) : J->L->top; + MSize f = 0; + lua_assert(!LJ_FR2); /* TODO_FR2: store 64 bit PCs. */ + map[f++] = SNAP_MKPC(J->pc); /* The current PC is always the first entry. */ + while (frame > lim) { /* Backwards traversal of all frames above base. */ + if (frame_islua(frame)) { + map[f++] = SNAP_MKPC(frame_pc(frame)); + frame = frame_prevl(frame); + } else if (frame_iscont(frame)) { + map[f++] = SNAP_MKFTSZ(frame_ftsz(frame)); + map[f++] = SNAP_MKPC(frame_contpc(frame)); + frame = frame_prevd(frame); + } else { + lua_assert(!frame_isc(frame)); + map[f++] = SNAP_MKFTSZ(frame_ftsz(frame)); + frame = frame_prevd(frame); + continue; + } + if (frame + funcproto(frame_func(frame))->framesize > ftop) + ftop = frame + funcproto(frame_func(frame))->framesize; + } + lua_assert(f == (MSize)(1 + J->framedepth)); + return (BCReg)(ftop - lim); +} + +/* Take a snapshot of the current stack. */ +static void snapshot_stack(jit_State *J, SnapShot *snap, MSize nsnapmap) +{ + BCReg nslots = J->baseslot + J->maxslot; + MSize nent; + SnapEntry *p; + /* Conservative estimate. */ + lj_snap_grow_map(J, nsnapmap + nslots + (MSize)J->framedepth+1); + p = &J->cur.snapmap[nsnapmap]; + nent = snapshot_slots(J, p, nslots); + snap->topslot = (uint8_t)snapshot_framelinks(J, p + nent); + snap->mapofs = (uint16_t)nsnapmap; + snap->ref = (IRRef1)J->cur.nins; + snap->nent = (uint8_t)nent; + snap->nslots = (uint8_t)nslots; + snap->count = 0; + J->cur.nsnapmap = (uint16_t)(nsnapmap + nent + 1 + J->framedepth); +} + +/* Add or merge a snapshot. */ +void lj_snap_add(jit_State *J) +{ + MSize nsnap = J->cur.nsnap; + MSize nsnapmap = J->cur.nsnapmap; + /* Merge if no ins. inbetween or if requested and no guard inbetween. */ + if (J->mergesnap ? !irt_isguard(J->guardemit) : + (nsnap > 0 && J->cur.snap[nsnap-1].ref == J->cur.nins)) { + if (nsnap == 1) { /* But preserve snap #0 PC. */ + emitir_raw(IRT(IR_NOP, IRT_NIL), 0, 0); + goto nomerge; + } + nsnapmap = J->cur.snap[--nsnap].mapofs; + } else { + nomerge: + lj_snap_grow_buf(J, nsnap+1); + J->cur.nsnap = (uint16_t)(nsnap+1); + } + J->mergesnap = 0; + J->guardemit.irt = 0; + snapshot_stack(J, &J->cur.snap[nsnap], nsnapmap); +} + +/* -- Snapshot modification ----------------------------------------------- */ + +#define SNAP_USEDEF_SLOTS (LJ_MAX_JSLOTS+LJ_STACK_EXTRA) + +/* Find unused slots with reaching-definitions bytecode data-flow analysis. */ +static BCReg snap_usedef(jit_State *J, uint8_t *udf, + const BCIns *pc, BCReg maxslot) +{ + BCReg s; + GCobj *o; + + if (maxslot == 0) return 0; +#ifdef LUAJIT_USE_VALGRIND + /* Avoid errors for harmless reads beyond maxslot. */ + memset(udf, 1, SNAP_USEDEF_SLOTS); +#else + memset(udf, 1, maxslot); +#endif + + /* Treat open upvalues as used. */ + o = gcref(J->L->openupval); + while (o) { + if (uvval(gco2uv(o)) < J->L->base) break; + udf[uvval(gco2uv(o)) - J->L->base] = 0; + o = gcref(o->gch.nextgc); + } + +#define USE_SLOT(s) udf[(s)] &= ~1 +#define DEF_SLOT(s) udf[(s)] *= 3 + + /* Scan through following bytecode and check for uses/defs. */ + lua_assert(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc); + for (;;) { + BCIns ins = *pc++; + BCOp op = bc_op(ins); + switch (bcmode_b(op)) { + case BCMvar: USE_SLOT(bc_b(ins)); break; + default: break; + } + switch (bcmode_c(op)) { + case BCMvar: USE_SLOT(bc_c(ins)); break; + case BCMrbase: + lua_assert(op == BC_CAT); + for (s = bc_b(ins); s <= bc_c(ins); s++) USE_SLOT(s); + for (; s < maxslot; s++) DEF_SLOT(s); + break; + case BCMjump: + handle_jump: { + BCReg minslot = bc_a(ins); + if (op >= BC_FORI && op <= BC_JFORL) minslot += FORL_EXT; + else if (op >= BC_ITERL && op <= BC_JITERL) minslot += bc_b(pc[-2])-1; + else if (op == BC_UCLO) { pc += bc_j(ins); break; } + for (s = minslot; s < maxslot; s++) DEF_SLOT(s); + return minslot < maxslot ? minslot : maxslot; + } + case BCMlit: + if (op == BC_JFORL || op == BC_JITERL || op == BC_JLOOP) { + goto handle_jump; + } else if (bc_isret(op)) { + BCReg top = op == BC_RETM ? maxslot : (bc_a(ins) + bc_d(ins)-1); + for (s = 0; s < bc_a(ins); s++) DEF_SLOT(s); + for (; s < top; s++) USE_SLOT(s); + for (; s < maxslot; s++) DEF_SLOT(s); + return 0; + } + break; + case BCMfunc: return maxslot; /* NYI: will abort, anyway. */ + default: break; + } + switch (bcmode_a(op)) { + case BCMvar: USE_SLOT(bc_a(ins)); break; + case BCMdst: + if (!(op == BC_ISTC || op == BC_ISFC)) DEF_SLOT(bc_a(ins)); + break; + case BCMbase: + if (op >= BC_CALLM && op <= BC_VARG) { + BCReg top = (op == BC_CALLM || op == BC_CALLMT || bc_c(ins) == 0) ? + maxslot : (bc_a(ins) + bc_c(ins)+LJ_FR2); + if (LJ_FR2) DEF_SLOT(bc_a(ins)+1); + s = bc_a(ins) - ((op == BC_ITERC || op == BC_ITERN) ? 3 : 0); + for (; s < top; s++) USE_SLOT(s); + for (; s < maxslot; s++) DEF_SLOT(s); + if (op == BC_CALLT || op == BC_CALLMT) { + for (s = 0; s < bc_a(ins); s++) DEF_SLOT(s); + return 0; + } + } else if (op == BC_KNIL) { + for (s = bc_a(ins); s <= bc_d(ins); s++) DEF_SLOT(s); + } else if (op == BC_TSETM) { + for (s = bc_a(ins)-1; s < maxslot; s++) USE_SLOT(s); + } + break; + default: break; + } + lua_assert(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc); + } + +#undef USE_SLOT +#undef DEF_SLOT + + return 0; /* unreachable */ +} + +/* Purge dead slots before the next snapshot. */ +void lj_snap_purge(jit_State *J) +{ + uint8_t udf[SNAP_USEDEF_SLOTS]; + BCReg maxslot = J->maxslot; + BCReg s = snap_usedef(J, udf, J->pc, maxslot); + for (; s < maxslot; s++) + if (udf[s] != 0) + J->base[s] = 0; /* Purge dead slots. */ +} + +/* Shrink last snapshot. */ +void lj_snap_shrink(jit_State *J) +{ + SnapShot *snap = &J->cur.snap[J->cur.nsnap-1]; + SnapEntry *map = &J->cur.snapmap[snap->mapofs]; + MSize n, m, nlim, nent = snap->nent; + uint8_t udf[SNAP_USEDEF_SLOTS]; + BCReg maxslot = J->maxslot; + BCReg minslot = snap_usedef(J, udf, snap_pc(map[nent]), maxslot); + BCReg baseslot = J->baseslot; + maxslot += baseslot; + minslot += baseslot; + snap->nslots = (uint8_t)maxslot; + for (n = m = 0; n < nent; n++) { /* Remove unused slots from snapshot. */ + BCReg s = snap_slot(map[n]); + if (s < minslot || (s < maxslot && udf[s-baseslot] == 0)) + map[m++] = map[n]; /* Only copy used slots. */ + } + snap->nent = (uint8_t)m; + nlim = J->cur.nsnapmap - snap->mapofs - 1; + while (n <= nlim) map[m++] = map[n++]; /* Move PC + frame links down. */ + J->cur.nsnapmap = (uint16_t)(snap->mapofs + m); /* Free up space in map. */ +} + +/* -- Snapshot access ----------------------------------------------------- */ + +/* Initialize a Bloom Filter with all renamed refs. +** There are very few renames (often none), so the filter has +** very few bits set. This makes it suitable for negative filtering. +*/ +static BloomFilter snap_renamefilter(GCtrace *T, SnapNo lim) +{ + BloomFilter rfilt = 0; + IRIns *ir; + for (ir = &T->ir[T->nins-1]; ir->o == IR_RENAME; ir--) + if (ir->op2 <= lim) + bloomset(rfilt, ir->op1); + return rfilt; +} + +/* Process matching renames to find the original RegSP. */ +static RegSP snap_renameref(GCtrace *T, SnapNo lim, IRRef ref, RegSP rs) +{ + IRIns *ir; + for (ir = &T->ir[T->nins-1]; ir->o == IR_RENAME; ir--) + if (ir->op1 == ref && ir->op2 <= lim) + rs = ir->prev; + return rs; +} + +/* Copy RegSP from parent snapshot to the parent links of the IR. */ +IRIns *lj_snap_regspmap(GCtrace *T, SnapNo snapno, IRIns *ir) +{ + SnapShot *snap = &T->snap[snapno]; + SnapEntry *map = &T->snapmap[snap->mapofs]; + BloomFilter rfilt = snap_renamefilter(T, snapno); + MSize n = 0; + IRRef ref = 0; + for ( ; ; ir++) { + uint32_t rs; + if (ir->o == IR_SLOAD) { + if (!(ir->op2 & IRSLOAD_PARENT)) break; + for ( ; ; n++) { + lua_assert(n < snap->nent); + if (snap_slot(map[n]) == ir->op1) { + ref = snap_ref(map[n++]); + break; + } + } + } else if (LJ_SOFTFP && ir->o == IR_HIOP) { + ref++; + } else if (ir->o == IR_PVAL) { + ref = ir->op1 + REF_BIAS; + } else { + break; + } + rs = T->ir[ref].prev; + if (bloomtest(rfilt, ref)) + rs = snap_renameref(T, snapno, ref, rs); + ir->prev = (uint16_t)rs; + lua_assert(regsp_used(rs)); + } + return ir; +} + +/* -- Snapshot replay ----------------------------------------------------- */ + +/* Replay constant from parent trace. */ +static TRef snap_replay_const(jit_State *J, IRIns *ir) +{ + /* Only have to deal with constants that can occur in stack slots. */ + switch ((IROp)ir->o) { + case IR_KPRI: return TREF_PRI(irt_type(ir->t)); + case IR_KINT: return lj_ir_kint(J, ir->i); + case IR_KGC: return lj_ir_kgc(J, ir_kgc(ir), irt_t(ir->t)); + case IR_KNUM: return lj_ir_k64(J, IR_KNUM, ir_knum(ir)); + case IR_KINT64: return lj_ir_k64(J, IR_KINT64, ir_kint64(ir)); + case IR_KPTR: return lj_ir_kptr(J, ir_kptr(ir)); /* Continuation. */ + default: lua_assert(0); return TREF_NIL; break; + } +} + +/* De-duplicate parent reference. */ +static TRef snap_dedup(jit_State *J, SnapEntry *map, MSize nmax, IRRef ref) +{ + MSize j; + for (j = 0; j < nmax; j++) + if (snap_ref(map[j]) == ref) + return J->slot[snap_slot(map[j])] & ~(SNAP_CONT|SNAP_FRAME); + return 0; +} + +/* Emit parent reference with de-duplication. */ +static TRef snap_pref(jit_State *J, GCtrace *T, SnapEntry *map, MSize nmax, + BloomFilter seen, IRRef ref) +{ + IRIns *ir = &T->ir[ref]; + TRef tr; + if (irref_isk(ref)) + tr = snap_replay_const(J, ir); + else if (!regsp_used(ir->prev)) + tr = 0; + else if (!bloomtest(seen, ref) || (tr = snap_dedup(J, map, nmax, ref)) == 0) + tr = emitir(IRT(IR_PVAL, irt_type(ir->t)), ref - REF_BIAS, 0); + return tr; +} + +/* Check whether a sunk store corresponds to an allocation. Slow path. */ +static int snap_sunk_store2(jit_State *J, IRIns *ira, IRIns *irs) +{ + if (irs->o == IR_ASTORE || irs->o == IR_HSTORE || + irs->o == IR_FSTORE || irs->o == IR_XSTORE) { + IRIns *irk = IR(irs->op1); + if (irk->o == IR_AREF || irk->o == IR_HREFK) + irk = IR(irk->op1); + return (IR(irk->op1) == ira); + } + return 0; +} + +/* Check whether a sunk store corresponds to an allocation. Fast path. */ +static LJ_AINLINE int snap_sunk_store(jit_State *J, IRIns *ira, IRIns *irs) +{ + if (irs->s != 255) + return (ira + irs->s == irs); /* Fast check. */ + return snap_sunk_store2(J, ira, irs); +} + +/* Replay snapshot state to setup side trace. */ +void lj_snap_replay(jit_State *J, GCtrace *T) +{ + SnapShot *snap = &T->snap[J->exitno]; + SnapEntry *map = &T->snapmap[snap->mapofs]; + MSize n, nent = snap->nent; + BloomFilter seen = 0; + int pass23 = 0; + J->framedepth = 0; + /* Emit IR for slots inherited from parent snapshot. */ + for (n = 0; n < nent; n++) { + SnapEntry sn = map[n]; + BCReg s = snap_slot(sn); + IRRef ref = snap_ref(sn); + IRIns *ir = &T->ir[ref]; + TRef tr; + /* The bloom filter avoids O(nent^2) overhead for de-duping slots. */ + if (bloomtest(seen, ref) && (tr = snap_dedup(J, map, n, ref)) != 0) + goto setslot; + bloomset(seen, ref); + if (irref_isk(ref)) { + tr = snap_replay_const(J, ir); + } else if (!regsp_used(ir->prev)) { + pass23 = 1; + lua_assert(s != 0); + tr = s; + } else { + IRType t = irt_type(ir->t); + uint32_t mode = IRSLOAD_INHERIT|IRSLOAD_PARENT; + if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) t = IRT_NUM; + if (ir->o == IR_SLOAD) mode |= (ir->op2 & IRSLOAD_READONLY); + tr = emitir_raw(IRT(IR_SLOAD, t), s, mode); + } + setslot: + J->slot[s] = tr | (sn&(SNAP_CONT|SNAP_FRAME)); /* Same as TREF_* flags. */ + J->framedepth += ((sn & (SNAP_CONT|SNAP_FRAME)) && s); + if ((sn & SNAP_FRAME)) + J->baseslot = s+1; + } + if (pass23) { + IRIns *irlast = &T->ir[snap->ref]; + pass23 = 0; + /* Emit dependent PVALs. */ + for (n = 0; n < nent; n++) { + SnapEntry sn = map[n]; + IRRef refp = snap_ref(sn); + IRIns *ir = &T->ir[refp]; + if (regsp_reg(ir->r) == RID_SUNK) { + if (J->slot[snap_slot(sn)] != snap_slot(sn)) continue; + pass23 = 1; + lua_assert(ir->o == IR_TNEW || ir->o == IR_TDUP || + ir->o == IR_CNEW || ir->o == IR_CNEWI); + if (ir->op1 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op1); + if (ir->op2 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op2); + if (LJ_HASFFI && ir->o == IR_CNEWI) { + if (LJ_32 && refp+1 < T->nins && (ir+1)->o == IR_HIOP) + snap_pref(J, T, map, nent, seen, (ir+1)->op2); + } else { + IRIns *irs; + for (irs = ir+1; irs < irlast; irs++) + if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) { + if (snap_pref(J, T, map, nent, seen, irs->op2) == 0) + snap_pref(J, T, map, nent, seen, T->ir[irs->op2].op1); + else if ((LJ_SOFTFP || (LJ_32 && LJ_HASFFI)) && + irs+1 < irlast && (irs+1)->o == IR_HIOP) + snap_pref(J, T, map, nent, seen, (irs+1)->op2); + } + } + } else if (!irref_isk(refp) && !regsp_used(ir->prev)) { + lua_assert(ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT); + J->slot[snap_slot(sn)] = snap_pref(J, T, map, nent, seen, ir->op1); + } + } + /* Replay sunk instructions. */ + for (n = 0; pass23 && n < nent; n++) { + SnapEntry sn = map[n]; + IRRef refp = snap_ref(sn); + IRIns *ir = &T->ir[refp]; + if (regsp_reg(ir->r) == RID_SUNK) { + TRef op1, op2; + if (J->slot[snap_slot(sn)] != snap_slot(sn)) { /* De-dup allocs. */ + J->slot[snap_slot(sn)] = J->slot[J->slot[snap_slot(sn)]]; + continue; + } + op1 = ir->op1; + if (op1 >= T->nk) op1 = snap_pref(J, T, map, nent, seen, op1); + op2 = ir->op2; + if (op2 >= T->nk) op2 = snap_pref(J, T, map, nent, seen, op2); + if (LJ_HASFFI && ir->o == IR_CNEWI) { + if (LJ_32 && refp+1 < T->nins && (ir+1)->o == IR_HIOP) { + lj_needsplit(J); /* Emit joining HIOP. */ + op2 = emitir_raw(IRT(IR_HIOP, IRT_I64), op2, + snap_pref(J, T, map, nent, seen, (ir+1)->op2)); + } + J->slot[snap_slot(sn)] = emitir(ir->ot, op1, op2); + } else { + IRIns *irs; + TRef tr = emitir(ir->ot, op1, op2); + J->slot[snap_slot(sn)] = tr; + for (irs = ir+1; irs < irlast; irs++) + if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) { + IRIns *irr = &T->ir[irs->op1]; + TRef val, key = irr->op2, tmp = tr; + if (irr->o != IR_FREF) { + IRIns *irk = &T->ir[key]; + if (irr->o == IR_HREFK) + key = lj_ir_kslot(J, snap_replay_const(J, &T->ir[irk->op1]), + irk->op2); + else + key = snap_replay_const(J, irk); + if (irr->o == IR_HREFK || irr->o == IR_AREF) { + IRIns *irf = &T->ir[irr->op1]; + tmp = emitir(irf->ot, tmp, irf->op2); + } + } + tmp = emitir(irr->ot, tmp, key); + val = snap_pref(J, T, map, nent, seen, irs->op2); + if (val == 0) { + IRIns *irc = &T->ir[irs->op2]; + lua_assert(irc->o == IR_CONV && irc->op2 == IRCONV_NUM_INT); + val = snap_pref(J, T, map, nent, seen, irc->op1); + val = emitir(IRTN(IR_CONV), val, IRCONV_NUM_INT); + } else if ((LJ_SOFTFP || (LJ_32 && LJ_HASFFI)) && + irs+1 < irlast && (irs+1)->o == IR_HIOP) { + IRType t = IRT_I64; + if (LJ_SOFTFP && irt_type((irs+1)->t) == IRT_SOFTFP) + t = IRT_NUM; + lj_needsplit(J); + if (irref_isk(irs->op2) && irref_isk((irs+1)->op2)) { + uint64_t k = (uint32_t)T->ir[irs->op2].i + + ((uint64_t)T->ir[(irs+1)->op2].i << 32); + val = lj_ir_k64(J, t == IRT_I64 ? IR_KINT64 : IR_KNUM, + lj_ir_k64_find(J, k)); + } else { + val = emitir_raw(IRT(IR_HIOP, t), val, + snap_pref(J, T, map, nent, seen, (irs+1)->op2)); + } + tmp = emitir(IRT(irs->o, t), tmp, val); + continue; + } + tmp = emitir(irs->ot, tmp, val); + } else if (LJ_HASFFI && irs->o == IR_XBAR && ir->o == IR_CNEW) { + emitir(IRT(IR_XBAR, IRT_NIL), 0, 0); + } + } + } + } + } + J->base = J->slot + J->baseslot; + J->maxslot = snap->nslots - J->baseslot; + lj_snap_add(J); + if (pass23) /* Need explicit GC step _after_ initial snapshot. */ + emitir_raw(IRTG(IR_GCSTEP, IRT_NIL), 0, 0); +} + +/* -- Snapshot restore ---------------------------------------------------- */ + +static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex, + SnapNo snapno, BloomFilter rfilt, + IRIns *ir, TValue *o); + +/* Restore a value from the trace exit state. */ +static void snap_restoreval(jit_State *J, GCtrace *T, ExitState *ex, + SnapNo snapno, BloomFilter rfilt, + IRRef ref, TValue *o) +{ + IRIns *ir = &T->ir[ref]; + IRType1 t = ir->t; + RegSP rs = ir->prev; + if (irref_isk(ref)) { /* Restore constant slot. */ + lj_ir_kvalue(J->L, o, ir); + return; + } + if (LJ_UNLIKELY(bloomtest(rfilt, ref))) + rs = snap_renameref(T, snapno, ref, rs); + lua_assert(!LJ_GC64); /* TODO_GC64: handle 64 bit references. */ + if (ra_hasspill(regsp_spill(rs))) { /* Restore from spill slot. */ + int32_t *sps = &ex->spill[regsp_spill(rs)]; + if (irt_isinteger(t)) { + setintV(o, *sps); +#if !LJ_SOFTFP + } else if (irt_isnum(t)) { + o->u64 = *(uint64_t *)sps; +#endif + } else if (LJ_64 && irt_islightud(t)) { + /* 64 bit lightuserdata which may escape already has the tag bits. */ + o->u64 = *(uint64_t *)sps; + } else { + lua_assert(!irt_ispri(t)); /* PRI refs never have a spill slot. */ + setgcV(J->L, o, (GCobj *)(uintptr_t)*(GCSize *)sps, irt_toitype(t)); + } + } else { /* Restore from register. */ + Reg r = regsp_reg(rs); + if (ra_noreg(r)) { + lua_assert(ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT); + snap_restoreval(J, T, ex, snapno, rfilt, ir->op1, o); + if (LJ_DUALNUM) setnumV(o, (lua_Number)intV(o)); + return; + } else if (irt_isinteger(t)) { + setintV(o, (int32_t)ex->gpr[r-RID_MIN_GPR]); +#if !LJ_SOFTFP + } else if (irt_isnum(t)) { + setnumV(o, ex->fpr[r-RID_MIN_FPR]); +#endif + } else if (LJ_64 && irt_islightud(t)) { + /* 64 bit lightuserdata which may escape already has the tag bits. */ + o->u64 = ex->gpr[r-RID_MIN_GPR]; + } else if (irt_ispri(t)) { + setpriV(o, irt_toitype(t)); + } else { + setgcV(J->L, o, (GCobj *)ex->gpr[r-RID_MIN_GPR], irt_toitype(t)); + } + } +} + +#if LJ_HASFFI +/* Restore raw data from the trace exit state. */ +static void snap_restoredata(GCtrace *T, ExitState *ex, + SnapNo snapno, BloomFilter rfilt, + IRRef ref, void *dst, CTSize sz) +{ + IRIns *ir = &T->ir[ref]; + RegSP rs = ir->prev; + int32_t *src; + uint64_t tmp; + if (irref_isk(ref)) { + if (ir->o == IR_KNUM || ir->o == IR_KINT64) { + src = mref(ir->ptr, int32_t); + } else if (sz == 8) { + tmp = (uint64_t)(uint32_t)ir->i; + src = (int32_t *)&tmp; + } else { + src = &ir->i; + } + } else { + if (LJ_UNLIKELY(bloomtest(rfilt, ref))) + rs = snap_renameref(T, snapno, ref, rs); + if (ra_hasspill(regsp_spill(rs))) { + src = &ex->spill[regsp_spill(rs)]; + if (sz == 8 && !irt_is64(ir->t)) { + tmp = (uint64_t)(uint32_t)*src; + src = (int32_t *)&tmp; + } + } else { + Reg r = regsp_reg(rs); + if (ra_noreg(r)) { + /* Note: this assumes CNEWI is never used for SOFTFP split numbers. */ + lua_assert(sz == 8 && ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT); + snap_restoredata(T, ex, snapno, rfilt, ir->op1, dst, 4); + *(lua_Number *)dst = (lua_Number)*(int32_t *)dst; + return; + } + src = (int32_t *)&ex->gpr[r-RID_MIN_GPR]; +#if !LJ_SOFTFP + if (r >= RID_MAX_GPR) { + src = (int32_t *)&ex->fpr[r-RID_MIN_FPR]; +#if LJ_TARGET_PPC + if (sz == 4) { /* PPC FPRs are always doubles. */ + *(float *)dst = (float)*(double *)src; + return; + } +#else + if (LJ_BE && sz == 4) src++; +#endif + } +#endif + } + } + lua_assert(sz == 1 || sz == 2 || sz == 4 || sz == 8); + if (sz == 4) *(int32_t *)dst = *src; + else if (sz == 8) *(int64_t *)dst = *(int64_t *)src; + else if (sz == 1) *(int8_t *)dst = (int8_t)*src; + else *(int16_t *)dst = (int16_t)*src; +} +#endif + +/* Unsink allocation from the trace exit state. Unsink sunk stores. */ +static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex, + SnapNo snapno, BloomFilter rfilt, + IRIns *ir, TValue *o) +{ + lua_assert(ir->o == IR_TNEW || ir->o == IR_TDUP || + ir->o == IR_CNEW || ir->o == IR_CNEWI); +#if LJ_HASFFI + if (ir->o == IR_CNEW || ir->o == IR_CNEWI) { + CTState *cts = ctype_cts(J->L); + CTypeID id = (CTypeID)T->ir[ir->op1].i; + CTSize sz = lj_ctype_size(cts, id); + GCcdata *cd = lj_cdata_new(cts, id, sz); + setcdataV(J->L, o, cd); + if (ir->o == IR_CNEWI) { + uint8_t *p = (uint8_t *)cdataptr(cd); + lua_assert(sz == 4 || sz == 8); + if (LJ_32 && sz == 8 && ir+1 < T->ir + T->nins && (ir+1)->o == IR_HIOP) { + snap_restoredata(T, ex, snapno, rfilt, (ir+1)->op2, LJ_LE?p+4:p, 4); + if (LJ_BE) p += 4; + sz = 4; + } + snap_restoredata(T, ex, snapno, rfilt, ir->op2, p, sz); + } else { + IRIns *irs, *irlast = &T->ir[T->snap[snapno].ref]; + for (irs = ir+1; irs < irlast; irs++) + if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) { + IRIns *iro = &T->ir[T->ir[irs->op1].op2]; + uint8_t *p = (uint8_t *)cd; + CTSize szs; + lua_assert(irs->o == IR_XSTORE && T->ir[irs->op1].o == IR_ADD); + lua_assert(iro->o == IR_KINT || iro->o == IR_KINT64); + if (irt_is64(irs->t)) szs = 8; + else if (irt_isi8(irs->t) || irt_isu8(irs->t)) szs = 1; + else if (irt_isi16(irs->t) || irt_isu16(irs->t)) szs = 2; + else szs = 4; + if (LJ_64 && iro->o == IR_KINT64) + p += (int64_t)ir_k64(iro)->u64; + else + p += iro->i; + lua_assert(p >= (uint8_t *)cdataptr(cd) && + p + szs <= (uint8_t *)cdataptr(cd) + sz); + if (LJ_32 && irs+1 < T->ir + T->nins && (irs+1)->o == IR_HIOP) { + lua_assert(szs == 4); + snap_restoredata(T, ex, snapno, rfilt, (irs+1)->op2, LJ_LE?p+4:p,4); + if (LJ_BE) p += 4; + } + snap_restoredata(T, ex, snapno, rfilt, irs->op2, p, szs); + } + } + } else +#endif + { + IRIns *irs, *irlast; + GCtab *t = ir->o == IR_TNEW ? lj_tab_new(J->L, ir->op1, ir->op2) : + lj_tab_dup(J->L, ir_ktab(&T->ir[ir->op1])); + settabV(J->L, o, t); + irlast = &T->ir[T->snap[snapno].ref]; + for (irs = ir+1; irs < irlast; irs++) + if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) { + IRIns *irk = &T->ir[irs->op1]; + TValue tmp, *val; + lua_assert(irs->o == IR_ASTORE || irs->o == IR_HSTORE || + irs->o == IR_FSTORE); + if (irk->o == IR_FREF) { + lua_assert(irk->op2 == IRFL_TAB_META); + snap_restoreval(J, T, ex, snapno, rfilt, irs->op2, &tmp); + /* NOBARRIER: The table is new (marked white). */ + setgcref(t->metatable, obj2gco(tabV(&tmp))); + } else { + irk = &T->ir[irk->op2]; + if (irk->o == IR_KSLOT) irk = &T->ir[irk->op1]; + lj_ir_kvalue(J->L, &tmp, irk); + val = lj_tab_set(J->L, t, &tmp); + /* NOBARRIER: The table is new (marked white). */ + snap_restoreval(J, T, ex, snapno, rfilt, irs->op2, val); + if (LJ_SOFTFP && irs+1 < T->ir + T->nins && (irs+1)->o == IR_HIOP) { + snap_restoreval(J, T, ex, snapno, rfilt, (irs+1)->op2, &tmp); + val->u32.hi = tmp.u32.lo; + } + } + } + } +} + +/* Restore interpreter state from exit state with the help of a snapshot. */ +const BCIns *lj_snap_restore(jit_State *J, void *exptr) +{ + ExitState *ex = (ExitState *)exptr; + SnapNo snapno = J->exitno; /* For now, snapno == exitno. */ + GCtrace *T = traceref(J, J->parent); + SnapShot *snap = &T->snap[snapno]; + MSize n, nent = snap->nent; + SnapEntry *map = &T->snapmap[snap->mapofs]; + SnapEntry *flinks = &T->snapmap[snap_nextofs(T, snap)-1]; + ptrdiff_t ftsz0; + TValue *frame; + BloomFilter rfilt = snap_renamefilter(T, snapno); + const BCIns *pc = snap_pc(map[nent]); + lua_State *L = J->L; + + /* Set interpreter PC to the next PC to get correct error messages. */ + setcframe_pc(cframe_raw(L->cframe), pc+1); + + /* Make sure the stack is big enough for the slots from the snapshot. */ + if (LJ_UNLIKELY(L->base + snap->topslot >= tvref(L->maxstack))) { + L->top = curr_topL(L); + lj_state_growstack(L, snap->topslot - curr_proto(L)->framesize); + } + + /* Fill stack slots with data from the registers and spill slots. */ + frame = L->base-1; + ftsz0 = frame_ftsz(frame); /* Preserve link to previous frame in slot #0. */ + for (n = 0; n < nent; n++) { + SnapEntry sn = map[n]; + if (!(sn & SNAP_NORESTORE)) { + TValue *o = &frame[snap_slot(sn)]; + IRRef ref = snap_ref(sn); + IRIns *ir = &T->ir[ref]; + if (ir->r == RID_SUNK) { + MSize j; + for (j = 0; j < n; j++) + if (snap_ref(map[j]) == ref) { /* De-duplicate sunk allocations. */ + copyTV(L, o, &frame[snap_slot(map[j])]); + goto dupslot; + } + snap_unsink(J, T, ex, snapno, rfilt, ir, o); + dupslot: + continue; + } + snap_restoreval(J, T, ex, snapno, rfilt, ref, o); + if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM) && tvisint(o)) { + TValue tmp; + snap_restoreval(J, T, ex, snapno, rfilt, ref+1, &tmp); + o->u32.hi = tmp.u32.lo; + } else if ((sn & (SNAP_CONT|SNAP_FRAME))) { + lua_assert(!LJ_FR2); /* TODO_FR2: store 64 bit PCs. */ + /* Overwrite tag with frame link. */ + setframe_ftsz(o, snap_slot(sn) != 0 ? (int32_t)*flinks-- : ftsz0); + L->base = o+1; + } + } + } + lua_assert(map + nent == flinks); + + /* Compute current stack top. */ + switch (bc_op(*pc)) { + default: + if (bc_op(*pc) < BC_FUNCF) { + L->top = curr_topL(L); + break; + } + /* fallthrough */ + case BC_CALLM: case BC_CALLMT: case BC_RETM: case BC_TSETM: + L->top = frame + snap->nslots; + break; + } + return pc; +} + +#undef IR +#undef emitir_raw +#undef emitir + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_snap.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_snap.h new file mode 100644 index 00000000000..9a125be7fb9 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_snap.h @@ -0,0 +1,34 @@ +/* +** Snapshot handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_SNAP_H +#define _LJ_SNAP_H + +#include "lj_obj.h" +#include "lj_jit.h" + +#if LJ_HASJIT +LJ_FUNC void lj_snap_add(jit_State *J); +LJ_FUNC void lj_snap_purge(jit_State *J); +LJ_FUNC void lj_snap_shrink(jit_State *J); +LJ_FUNC IRIns *lj_snap_regspmap(GCtrace *T, SnapNo snapno, IRIns *ir); +LJ_FUNC void lj_snap_replay(jit_State *J, GCtrace *T); +LJ_FUNC const BCIns *lj_snap_restore(jit_State *J, void *exptr); +LJ_FUNC void lj_snap_grow_buf_(jit_State *J, MSize need); +LJ_FUNC void lj_snap_grow_map_(jit_State *J, MSize need); + +static LJ_AINLINE void lj_snap_grow_buf(jit_State *J, MSize need) +{ + if (LJ_UNLIKELY(need > J->sizesnap)) lj_snap_grow_buf_(J, need); +} + +static LJ_AINLINE void lj_snap_grow_map(jit_State *J, MSize need) +{ + if (LJ_UNLIKELY(need > J->sizesnapmap)) lj_snap_grow_map_(J, need); +} + +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_state.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_state.c new file mode 100644 index 00000000000..84b4d113a2b --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_state.c @@ -0,0 +1,300 @@ +/* +** State and stack handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +** +** Portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lj_state_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_buf.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_func.h" +#include "lj_meta.h" +#include "lj_state.h" +#include "lj_frame.h" +#if LJ_HASFFI +#include "lj_ctype.h" +#endif +#include "lj_trace.h" +#include "lj_dispatch.h" +#include "lj_vm.h" +#include "lj_lex.h" +#include "lj_alloc.h" +#include "luajit.h" + +/* -- Stack handling ------------------------------------------------------ */ + +/* Stack sizes. */ +#define LJ_STACK_MIN LUA_MINSTACK /* Min. stack size. */ +#define LJ_STACK_MAX LUAI_MAXSTACK /* Max. stack size. */ +#define LJ_STACK_START (2*LJ_STACK_MIN) /* Starting stack size. */ +#define LJ_STACK_MAXEX (LJ_STACK_MAX + 1 + LJ_STACK_EXTRA) + +/* Explanation of LJ_STACK_EXTRA: +** +** Calls to metamethods store their arguments beyond the current top +** without checking for the stack limit. This avoids stack resizes which +** would invalidate passed TValue pointers. The stack check is performed +** later by the function header. This can safely resize the stack or raise +** an error. Thus we need some extra slots beyond the current stack limit. +** +** Most metamethods need 4 slots above top (cont, mobj, arg1, arg2) plus +** one extra slot if mobj is not a function. Only lj_meta_tset needs 5 +** slots above top, but then mobj is always a function. So we can get by +** with 5 extra slots. +** LJ_FR2: We need 2 more slots for the frame PC and the continuation PC. +*/ + +/* Resize stack slots and adjust pointers in state. */ +static void resizestack(lua_State *L, MSize n) +{ + TValue *st, *oldst = tvref(L->stack); + ptrdiff_t delta; + MSize oldsize = L->stacksize; + MSize realsize = n + 1 + LJ_STACK_EXTRA; + GCobj *up; + lua_assert((MSize)(tvref(L->maxstack)-oldst)==L->stacksize-LJ_STACK_EXTRA-1); + st = (TValue *)lj_mem_realloc(L, tvref(L->stack), + (MSize)(oldsize*sizeof(TValue)), + (MSize)(realsize*sizeof(TValue))); + setmref(L->stack, st); + delta = (char *)st - (char *)oldst; + setmref(L->maxstack, st + n); + while (oldsize < realsize) /* Clear new slots. */ + setnilV(st + oldsize++); + L->stacksize = realsize; + if ((size_t)(mref(G(L)->jit_base, char) - (char *)oldst) < oldsize) + setmref(G(L)->jit_base, mref(G(L)->jit_base, char) + delta); + L->base = (TValue *)((char *)L->base + delta); + L->top = (TValue *)((char *)L->top + delta); + for (up = gcref(L->openupval); up != NULL; up = gcnext(up)) + setmref(gco2uv(up)->v, (TValue *)((char *)uvval(gco2uv(up)) + delta)); +} + +/* Relimit stack after error, in case the limit was overdrawn. */ +void lj_state_relimitstack(lua_State *L) +{ + if (L->stacksize > LJ_STACK_MAXEX && L->top-tvref(L->stack) < LJ_STACK_MAX-1) + resizestack(L, LJ_STACK_MAX); +} + +/* Try to shrink the stack (called from GC). */ +void lj_state_shrinkstack(lua_State *L, MSize used) +{ + if (L->stacksize > LJ_STACK_MAXEX) + return; /* Avoid stack shrinking while handling stack overflow. */ + if (4*used < L->stacksize && + 2*(LJ_STACK_START+LJ_STACK_EXTRA) < L->stacksize && + /* Don't shrink stack of live trace. */ + (tvref(G(L)->jit_base) == NULL || obj2gco(L) != gcref(G(L)->cur_L))) + resizestack(L, L->stacksize >> 1); +} + +/* Try to grow stack. */ +void LJ_FASTCALL lj_state_growstack(lua_State *L, MSize need) +{ + MSize n; + if (L->stacksize > LJ_STACK_MAXEX) /* Overflow while handling overflow? */ + lj_err_throw(L, LUA_ERRERR); + n = L->stacksize + need; + if (n > LJ_STACK_MAX) { + n += 2*LUA_MINSTACK; + } else if (n < 2*L->stacksize) { + n = 2*L->stacksize; + if (n >= LJ_STACK_MAX) + n = LJ_STACK_MAX; + } + resizestack(L, n); + if (L->stacksize > LJ_STACK_MAXEX) + lj_err_msg(L, LJ_ERR_STKOV); +} + +void LJ_FASTCALL lj_state_growstack1(lua_State *L) +{ + lj_state_growstack(L, 1); +} + +/* Allocate basic stack for new state. */ +static void stack_init(lua_State *L1, lua_State *L) +{ + TValue *stend, *st = lj_mem_newvec(L, LJ_STACK_START+LJ_STACK_EXTRA, TValue); + setmref(L1->stack, st); + L1->stacksize = LJ_STACK_START + LJ_STACK_EXTRA; + stend = st + L1->stacksize; + setmref(L1->maxstack, stend - LJ_STACK_EXTRA - 1); + setthreadV(L1, st++, L1); /* Needed for curr_funcisL() on empty stack. */ + if (LJ_FR2) setnilV(st++); + L1->base = L1->top = st; + while (st < stend) /* Clear new slots. */ + setnilV(st++); +} + +/* -- State handling ------------------------------------------------------ */ + +/* Open parts that may cause memory-allocation errors. */ +static TValue *cpluaopen(lua_State *L, lua_CFunction dummy, void *ud) +{ + global_State *g = G(L); + UNUSED(dummy); + UNUSED(ud); + stack_init(L, L); + /* NOBARRIER: State initialization, all objects are white. */ + setgcref(L->env, obj2gco(lj_tab_new(L, 0, LJ_MIN_GLOBAL))); + settabV(L, registry(L), lj_tab_new(L, 0, LJ_MIN_REGISTRY)); + lj_str_resize(L, LJ_MIN_STRTAB-1); + lj_meta_init(L); + lj_lex_init(L); + fixstring(lj_err_str(L, LJ_ERR_ERRMEM)); /* Preallocate memory error msg. */ + g->gc.threshold = 4*g->gc.total; + lj_trace_initstate(g); + return NULL; +} + +static void close_state(lua_State *L) +{ + global_State *g = G(L); + lj_func_closeuv(L, tvref(L->stack)); + lj_gc_freeall(g); + lua_assert(gcref(g->gc.root) == obj2gco(L)); + lua_assert(g->strnum == 0); + lj_trace_freestate(g); +#if LJ_HASFFI + lj_ctype_freestate(g); +#endif + lj_mem_freevec(g, g->strhash, g->strmask+1, GCRef); + lj_buf_free(g, &g->tmpbuf); + lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue); + lua_assert(g->gc.total == sizeof(GG_State)); +#ifndef LUAJIT_USE_SYSMALLOC + if (g->allocf == lj_alloc_f) + lj_alloc_destroy(g->allocd); + else +#endif + g->allocf(g->allocd, G2GG(g), sizeof(GG_State), 0); +} + +#if LJ_64 && !(defined(LUAJIT_USE_VALGRIND) && defined(LUAJIT_USE_SYSMALLOC)) +lua_State *lj_state_newstate(lua_Alloc f, void *ud) +#else +LUA_API lua_State *lua_newstate(lua_Alloc f, void *ud) +#endif +{ + GG_State *GG = (GG_State *)f(ud, NULL, 0, sizeof(GG_State)); + lua_State *L = &GG->L; + global_State *g = &GG->g; + if (GG == NULL || !checkptrGC(GG)) return NULL; + memset(GG, 0, sizeof(GG_State)); + L->gct = ~LJ_TTHREAD; + L->marked = LJ_GC_WHITE0 | LJ_GC_FIXED | LJ_GC_SFIXED; /* Prevent free. */ + L->dummy_ffid = FF_C; + setmref(L->glref, g); + g->gc.currentwhite = LJ_GC_WHITE0 | LJ_GC_FIXED; + g->strempty.marked = LJ_GC_WHITE0; + g->strempty.gct = ~LJ_TSTR; + g->allocf = f; + g->allocd = ud; + setgcref(g->mainthref, obj2gco(L)); + setgcref(g->uvhead.prev, obj2gco(&g->uvhead)); + setgcref(g->uvhead.next, obj2gco(&g->uvhead)); + g->strmask = ~(MSize)0; + setnilV(registry(L)); + setnilV(&g->nilnode.val); + setnilV(&g->nilnode.key); +#if !LJ_GC64 + setmref(g->nilnode.freetop, &g->nilnode); +#endif + lj_buf_init(NULL, &g->tmpbuf); + g->gc.state = GCSpause; + setgcref(g->gc.root, obj2gco(L)); + setmref(g->gc.sweep, &g->gc.root); + g->gc.total = sizeof(GG_State); + g->gc.pause = LUAI_GCPAUSE; + g->gc.stepmul = LUAI_GCMUL; + lj_dispatch_init((GG_State *)L); + L->status = LUA_ERRERR+1; /* Avoid touching the stack upon memory error. */ + if (lj_vm_cpcall(L, NULL, NULL, cpluaopen) != 0) { + /* Memory allocation error: free partial state. */ + close_state(L); + return NULL; + } + L->status = 0; + return L; +} + +static TValue *cpfinalize(lua_State *L, lua_CFunction dummy, void *ud) +{ + UNUSED(dummy); + UNUSED(ud); + lj_gc_finalize_cdata(L); + lj_gc_finalize_udata(L); + /* Frame pop omitted. */ + return NULL; +} + +LUA_API void lua_close(lua_State *L) +{ + global_State *g = G(L); + int i; + L = mainthread(g); /* Only the main thread can be closed. */ +#if LJ_HASPROFILE + luaJIT_profile_stop(L); +#endif + setgcrefnull(g->cur_L); + lj_func_closeuv(L, tvref(L->stack)); + lj_gc_separateudata(g, 1); /* Separate udata which have GC metamethods. */ +#if LJ_HASJIT + G2J(g)->flags &= ~JIT_F_ON; + G2J(g)->state = LJ_TRACE_IDLE; + lj_dispatch_update(g); +#endif + for (i = 0;;) { + hook_enter(g); + L->status = 0; + L->base = L->top = tvref(L->stack) + 1 + LJ_FR2; + L->cframe = NULL; + if (lj_vm_cpcall(L, NULL, NULL, cpfinalize) == 0) { + if (++i >= 10) break; + lj_gc_separateudata(g, 1); /* Separate udata again. */ + if (gcref(g->gc.mmudata) == NULL) /* Until nothing is left to do. */ + break; + } + } + close_state(L); +} + +lua_State *lj_state_new(lua_State *L) +{ + lua_State *L1 = lj_mem_newobj(L, lua_State); + L1->gct = ~LJ_TTHREAD; + L1->dummy_ffid = FF_C; + L1->status = 0; + L1->stacksize = 0; + setmref(L1->stack, NULL); + L1->cframe = NULL; + /* NOBARRIER: The lua_State is new (marked white). */ + setgcrefnull(L1->openupval); + setmrefr(L1->glref, L->glref); + setgcrefr(L1->env, L->env); + stack_init(L1, L); /* init stack */ + lua_assert(iswhite(obj2gco(L1))); + return L1; +} + +void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L) +{ + lua_assert(L != mainthread(g)); + if (obj2gco(L) == gcref(g->cur_L)) + setgcrefnull(g->cur_L); + lj_func_closeuv(L, tvref(L->stack)); + lua_assert(gcref(L->openupval) == NULL); + lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue); + lj_mem_freet(g, L); +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_state.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_state.h new file mode 100644 index 00000000000..687889a2613 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_state.h @@ -0,0 +1,35 @@ +/* +** State and stack handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_STATE_H +#define _LJ_STATE_H + +#include "lj_obj.h" + +#define incr_top(L) \ + (++L->top >= tvref(L->maxstack) && (lj_state_growstack1(L), 0)) + +#define savestack(L, p) ((char *)(p) - mref(L->stack, char)) +#define restorestack(L, n) ((TValue *)(mref(L->stack, char) + (n))) + +LJ_FUNC void lj_state_relimitstack(lua_State *L); +LJ_FUNC void lj_state_shrinkstack(lua_State *L, MSize used); +LJ_FUNCA void LJ_FASTCALL lj_state_growstack(lua_State *L, MSize need); +LJ_FUNC void LJ_FASTCALL lj_state_growstack1(lua_State *L); + +static LJ_AINLINE void lj_state_checkstack(lua_State *L, MSize need) +{ + if ((mref(L->maxstack, char) - (char *)L->top) <= + (ptrdiff_t)need*(ptrdiff_t)sizeof(TValue)) + lj_state_growstack(L, need); +} + +LJ_FUNC lua_State *lj_state_new(lua_State *L); +LJ_FUNC void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L); +#if LJ_64 +LJ_FUNC lua_State *lj_state_newstate(lua_Alloc f, void *ud); +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_str.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_str.c new file mode 100644 index 00000000000..3bc8079f37c --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_str.c @@ -0,0 +1,224 @@ +/* +** String handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_str_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_str.h" +#include "lj_char.h" + +/* -- String helpers ------------------------------------------------------ */ + +/* Ordered compare of strings. Assumes string data is 4-byte aligned. */ +int32_t LJ_FASTCALL lj_str_cmp(GCstr *a, GCstr *b) +{ + MSize i, n = a->len > b->len ? b->len : a->len; + for (i = 0; i < n; i += 4) { + /* Note: innocuous access up to end of string + 3. */ + uint32_t va = *(const uint32_t *)(strdata(a)+i); + uint32_t vb = *(const uint32_t *)(strdata(b)+i); + if (va != vb) { +#if LJ_LE + va = lj_bswap(va); vb = lj_bswap(vb); +#endif + i -= n; + if ((int32_t)i >= -3) { + va >>= 32+(i<<3); vb >>= 32+(i<<3); + if (va == vb) break; + } + return va < vb ? -1 : 1; + } + } + return (int32_t)(a->len - b->len); +} + +/* Fast string data comparison. Caveat: unaligned access to 1st string! */ +static LJ_AINLINE int str_fastcmp(const char *a, const char *b, MSize len) +{ + MSize i = 0; + lua_assert(len > 0); + lua_assert((((uintptr_t)a+len-1) & (LJ_PAGESIZE-1)) <= LJ_PAGESIZE-4); + do { /* Note: innocuous access up to end of string + 3. */ + uint32_t v = lj_getu32(a+i) ^ *(const uint32_t *)(b+i); + if (v) { + i -= len; +#if LJ_LE + return (int32_t)i >= -3 ? (v << (32+(i<<3))) : 1; +#else + return (int32_t)i >= -3 ? (v >> (32+(i<<3))) : 1; +#endif + } + i += 4; + } while (i < len); + return 0; +} + +/* Find fixed string p inside string s. */ +const char *lj_str_find(const char *s, const char *p, MSize slen, MSize plen) +{ + if (plen <= slen) { + if (plen == 0) { + return s; + } else { + int c = *(const uint8_t *)p++; + plen--; slen -= plen; + while (slen) { + const char *q = (const char *)memchr(s, c, slen); + if (!q) break; + if (memcmp(q+1, p, plen) == 0) return q; + q++; slen -= (MSize)(q-s); s = q; + } + } + } + return NULL; +} + +/* Check whether a string has a pattern matching character. */ +int lj_str_haspattern(GCstr *s) +{ + const char *p = strdata(s), *q = p + s->len; + while (p < q) { + int c = *(const uint8_t *)p++; + if (lj_char_ispunct(c) && strchr("^$*+?.([%-", c)) + return 1; /* Found a pattern matching char. */ + } + return 0; /* No pattern matching chars found. */ +} + +/* -- String interning ---------------------------------------------------- */ + +/* Resize the string hash table (grow and shrink). */ +void lj_str_resize(lua_State *L, MSize newmask) +{ + global_State *g = G(L); + GCRef *newhash; + MSize i; + if (g->gc.state == GCSsweepstring || newmask >= LJ_MAX_STRTAB-1) + return; /* No resizing during GC traversal or if already too big. */ + newhash = lj_mem_newvec(L, newmask+1, GCRef); + memset(newhash, 0, (newmask+1)*sizeof(GCRef)); + for (i = g->strmask; i != ~(MSize)0; i--) { /* Rehash old table. */ + GCobj *p = gcref(g->strhash[i]); + while (p) { /* Follow each hash chain and reinsert all strings. */ + MSize h = gco2str(p)->hash & newmask; + GCobj *next = gcnext(p); + /* NOBARRIER: The string table is a GC root. */ + setgcrefr(p->gch.nextgc, newhash[h]); + setgcref(newhash[h], p); + p = next; + } + } + lj_mem_freevec(g, g->strhash, g->strmask+1, GCRef); + g->strmask = newmask; + g->strhash = newhash; +} + +/* +** Lua will use at most ~(2^LUAI_HASHLIMIT) bytes from a string to +** compute its hash +*/ +#if !defined(LUAI_HASHLIMIT) +#define LUAI_HASHLIMIT 5 +#endif + +#define cast(t, exp) ((t)(exp)) +int luajittex_choose_hash_function = 0 ; +/* Intern a string and return string object. */ +GCstr *lj_str_new(lua_State *L, const char *str, size_t lenx) +{ + global_State *g; + GCstr *s; + GCobj *o; + MSize len = (MSize)lenx; + MSize a, b, h = len; + size_t step ; + size_t l1 ; + if (lenx >= LJ_MAX_STR) + lj_err_msg(L, LJ_ERR_STROV); + g = G(L); + + if (len==0) + return &g->strempty; + if (luajittex_choose_hash_function==0) { + /* Lua 5.1.5 hash function */ + /* for 5.2 max methods we also need to patch the vm eq */ + step = (len>>LUAI_HASHLIMIT)+1; /* if string is too long, don't hash all its chars */ + for (l1=len; l1>=step; l1-=step) /* compute hash */ + h = h ^ ((h<<5)+(h>>2)+cast(unsigned char, str[l1-1])); + } else { + /* LuaJIT 2.0.2 hash function */ + /* Compute string hash. Constants taken from lookup3 hash by Bob Jenkins. */ + if (len >= 4) { /* Caveat: unaligned access! */ + a = lj_getu32(str); + h ^= lj_getu32(str+len-4); + b = lj_getu32(str+(len>>1)-2); + h ^= b; h -= lj_rol(b, 14); + b += lj_getu32(str+(len>>2)-1); + } else if (len > 0) { + a = *(const uint8_t *)str; + h ^= *(const uint8_t *)(str+len-1); + b = *(const uint8_t *)(str+(len>>1)); + h ^= b; h -= lj_rol(b, 14); + } else { + /* Already done, kept for reference */ + return &g->strempty; + } + a ^= h; a -= lj_rol(h, 11); + b ^= a; b -= lj_rol(a, 25); + h ^= b; h -= lj_rol(b, 16); + } + + + /* Check if the string has already been interned. */ + o = gcref(g->strhash[h & g->strmask]); + if (LJ_LIKELY((((uintptr_t)str+len-1) & (LJ_PAGESIZE-1)) <= LJ_PAGESIZE-4)) { + while (o != NULL) { + GCstr *sx = gco2str(o); + if (sx->len == len && str_fastcmp(str, strdata(sx), len) == 0) { + /* Resurrect if dead. Can only happen with fixstring() (keywords). */ + if (isdead(g, o)) flipwhite(o); + return sx; /* Return existing string. */ + } + o = gcnext(o); + } + } else { /* Slow path: end of string is too close to a page boundary. */ + while (o != NULL) { + GCstr *sx = gco2str(o); + if (sx->len == len && memcmp(str, strdata(sx), len) == 0) { + /* Resurrect if dead. Can only happen with fixstring() (keywords). */ + if (isdead(g, o)) flipwhite(o); + return sx; /* Return existing string. */ + } + o = gcnext(o); + } + } + /* Nope, create a new string. */ + s = lj_mem_newt(L, sizeof(GCstr)+len+1, GCstr); + newwhite(g, s); + s->gct = ~LJ_TSTR; + s->len = len; + s->hash = h; + s->reserved = 0; + memcpy(strdatawr(s), str, len); + strdatawr(s)[len] = '\0'; /* Zero-terminate string. */ + /* Add it to string hash table. */ + h &= g->strmask; + s->nextgc = g->strhash[h]; + /* NOBARRIER: The string table is a GC root. */ + setgcref(g->strhash[h], obj2gco(s)); + if (g->strnum++ > g->strmask) /* Allow a 100% load factor. */ + lj_str_resize(L, (g->strmask<<1)+1); /* Grow string table. */ + return s; /* Return newly interned string. */ +} + +void LJ_FASTCALL lj_str_free(global_State *g, GCstr *s) +{ + g->strnum--; + lj_mem_free(g, s, sizestring(s)); +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_str.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_str.h new file mode 100644 index 00000000000..d8465de663b --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_str.h @@ -0,0 +1,27 @@ +/* +** String handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_STR_H +#define _LJ_STR_H + +#include + +#include "lj_obj.h" + +/* String helpers. */ +LJ_FUNC int32_t LJ_FASTCALL lj_str_cmp(GCstr *a, GCstr *b); +LJ_FUNC const char *lj_str_find(const char *s, const char *f, + MSize slen, MSize flen); +LJ_FUNC int lj_str_haspattern(GCstr *s); + +/* String interning. */ +LJ_FUNC void lj_str_resize(lua_State *L, MSize newmask); +LJ_FUNCA GCstr *lj_str_new(lua_State *L, const char *str, size_t len); +LJ_FUNC void LJ_FASTCALL lj_str_free(global_State *g, GCstr *s); + +#define lj_str_newz(L, s) (lj_str_new(L, s, strlen(s))) +#define lj_str_newlit(L, s) (lj_str_new(L, "" s, sizeof(s)-1)) + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_strfmt.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_strfmt.c new file mode 100644 index 00000000000..d54e796ae2a --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_strfmt.c @@ -0,0 +1,554 @@ +/* +** String formatting. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include + +#define lj_strfmt_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_buf.h" +#include "lj_str.h" +#include "lj_state.h" +#include "lj_char.h" +#include "lj_strfmt.h" + +/* -- Format parser ------------------------------------------------------- */ + +static const uint8_t strfmt_map[('x'-'A')+1] = { + STRFMT_A,0,0,0,STRFMT_E,0,STRFMT_G,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,STRFMT_X,0,0, + 0,0,0,0,0,0, + STRFMT_A,0,STRFMT_C,STRFMT_D,STRFMT_E,STRFMT_F,STRFMT_G,0,STRFMT_I,0,0,0,0, + 0,STRFMT_O,STRFMT_P,STRFMT_Q,0,STRFMT_S,0,STRFMT_U,0,0,STRFMT_X +}; + +SFormat LJ_FASTCALL lj_strfmt_parse(FormatState *fs) +{ + const uint8_t *p = fs->p, *e = fs->e; + fs->str = (const char *)p; + for (; p < e; p++) { + if (*p == '%') { /* Escape char? */ + if (p[1] == '%') { /* '%%'? */ + fs->p = ++p+1; + goto retlit; + } else { + SFormat sf = 0; + uint32_t c; + if (p != (const uint8_t *)fs->str) + break; + for (p++; (uint32_t)*p - ' ' <= (uint32_t)('0' - ' '); p++) { + /* Parse flags. */ + if (*p == '-') sf |= STRFMT_F_LEFT; + else if (*p == '+') sf |= STRFMT_F_PLUS; + else if (*p == '0') sf |= STRFMT_F_ZERO; + else if (*p == ' ') sf |= STRFMT_F_SPACE; + else if (*p == '#') sf |= STRFMT_F_ALT; + else break; + } + if ((uint32_t)*p - '0' < 10) { /* Parse width. */ + uint32_t width = (uint32_t)*p++ - '0'; + if ((uint32_t)*p - '0' < 10) + width = (uint32_t)*p++ - '0' + width*10; + sf |= (width << STRFMT_SH_WIDTH); + } + if (*p == '.') { /* Parse precision. */ + uint32_t prec = 0; + p++; + if ((uint32_t)*p - '0' < 10) { + prec = (uint32_t)*p++ - '0'; + if ((uint32_t)*p - '0' < 10) + prec = (uint32_t)*p++ - '0' + prec*10; + } + sf |= ((prec+1) << STRFMT_SH_PREC); + } + /* Parse conversion. */ + c = (uint32_t)*p - 'A'; + if (LJ_LIKELY(c <= (uint32_t)('x' - 'A'))) { + uint32_t sx = strfmt_map[c]; + if (sx) { + fs->p = p+1; + return (sf | sx | ((c & 0x20) ? 0 : STRFMT_F_UPPER)); + } + } + /* Return error location. */ + if (*p >= 32) p++; + fs->len = (MSize)(p - (const uint8_t *)fs->str); + fs->p = fs->e; + return STRFMT_ERR; + } + } + } + fs->p = p; +retlit: + fs->len = (MSize)(p - (const uint8_t *)fs->str); + return fs->len ? STRFMT_LIT : STRFMT_EOF; +} + +/* -- Raw conversions ----------------------------------------------------- */ + +/* Write number to bufer. */ +char * LJ_FASTCALL lj_strfmt_wnum(char *p, cTValue *o) +{ + if (LJ_LIKELY((o->u32.hi << 1) < 0xffe00000)) { /* Finite? */ +#if __BIONIC__ + if (tvismzero(o)) { *p++ = '-'; *p++ = '0'; return p; } +#endif + return p + lua_number2str(p, o->n); + } else if (((o->u32.hi & 0x000fffff) | o->u32.lo) != 0) { + *p++ = 'n'; *p++ = 'a'; *p++ = 'n'; + } else if ((o->u32.hi & 0x80000000) == 0) { + *p++ = 'i'; *p++ = 'n'; *p++ = 'f'; + } else { + *p++ = '-'; *p++ = 'i'; *p++ = 'n'; *p++ = 'f'; + } + return p; +} + +#define WINT_R(x, sh, sc) \ + { uint32_t d = (x*(((1<>sh; x -= d*sc; *p++ = (char)('0'+d); } + +/* Write integer to buffer. */ +char * LJ_FASTCALL lj_strfmt_wint(char *p, int32_t k) +{ + uint32_t u = (uint32_t)k; + if (k < 0) { u = (uint32_t)-k; *p++ = '-'; } + if (u < 10000) { + if (u < 10) goto dig1; if (u < 100) goto dig2; if (u < 1000) goto dig3; + } else { + uint32_t v = u / 10000; u -= v * 10000; + if (v < 10000) { + if (v < 10) goto dig5; if (v < 100) goto dig6; if (v < 1000) goto dig7; + } else { + uint32_t w = v / 10000; v -= w * 10000; + if (w >= 10) WINT_R(w, 10, 10) + *p++ = (char)('0'+w); + } + WINT_R(v, 23, 1000) + dig7: WINT_R(v, 12, 100) + dig6: WINT_R(v, 10, 10) + dig5: *p++ = (char)('0'+v); + } + WINT_R(u, 23, 1000) + dig3: WINT_R(u, 12, 100) + dig2: WINT_R(u, 10, 10) + dig1: *p++ = (char)('0'+u); + return p; +} +#undef WINT_R + +/* Write pointer to buffer. */ +char * LJ_FASTCALL lj_strfmt_wptr(char *p, const void *v) +{ + ptrdiff_t x = (ptrdiff_t)v; + MSize i, n = STRFMT_MAXBUF_PTR; + if (x == 0) { + *p++ = 'N'; *p++ = 'U'; *p++ = 'L'; *p++ = 'L'; + return p; + } +#if LJ_64 + /* Shorten output for 64 bit pointers. */ + n = 2+2*4+((x >> 32) ? 2+2*(lj_fls((uint32_t)(x >> 32))>>3) : 0); +#endif + p[0] = '0'; + p[1] = 'x'; + for (i = n-1; i >= 2; i--, x >>= 4) + p[i] = "0123456789abcdef"[(x & 15)]; + return p+n; +} + +/* Write ULEB128 to buffer. */ +char * LJ_FASTCALL lj_strfmt_wuleb128(char *p, uint32_t v) +{ + for (; v >= 0x80; v >>= 7) + *p++ = (char)((v & 0x7f) | 0x80); + *p++ = (char)v; + return p; +} + +/* Return string or write number to buffer and return pointer to start. */ +const char *lj_strfmt_wstrnum(char *buf, cTValue *o, MSize *lenp) +{ + if (tvisstr(o)) { + *lenp = strV(o)->len; + return strVdata(o); + } else if (tvisint(o)) { + *lenp = (MSize)(lj_strfmt_wint(buf, intV(o)) - buf); + return buf; + } else if (tvisnum(o)) { + *lenp = (MSize)(lj_strfmt_wnum(buf, o) - buf); + return buf; + } else { + return NULL; + } +} + +/* -- Unformatted conversions to buffer ----------------------------------- */ + +/* Add integer to buffer. */ +SBuf * LJ_FASTCALL lj_strfmt_putint(SBuf *sb, int32_t k) +{ + setsbufP(sb, lj_strfmt_wint(lj_buf_more(sb, STRFMT_MAXBUF_INT), k)); + return sb; +} + +#if LJ_HASJIT +/* Add number to buffer. */ +SBuf * LJ_FASTCALL lj_strfmt_putnum(SBuf *sb, cTValue *o) +{ + setsbufP(sb, lj_strfmt_wnum(lj_buf_more(sb, STRFMT_MAXBUF_NUM), o)); + return sb; +} +#endif + +SBuf * LJ_FASTCALL lj_strfmt_putptr(SBuf *sb, const void *v) +{ + setsbufP(sb, lj_strfmt_wptr(lj_buf_more(sb, STRFMT_MAXBUF_PTR), v)); + return sb; +} + +/* Add quoted string to buffer. */ +SBuf * LJ_FASTCALL lj_strfmt_putquoted(SBuf *sb, GCstr *str) +{ + const char *s = strdata(str); + MSize len = str->len; + lj_buf_putb(sb, '"'); + while (len--) { + uint32_t c = (uint32_t)(uint8_t)*s++; + char *p = lj_buf_more(sb, 4); + if (c == '"' || c == '\\' || c == '\n') { + *p++ = '\\'; + } else if (lj_char_iscntrl(c)) { /* This can only be 0-31 or 127. */ + uint32_t d; + *p++ = '\\'; + if (c >= 100 || lj_char_isdigit((uint8_t)*s)) { + *p++ = (char)('0'+(c >= 100)); if (c >= 100) c -= 100; + goto tens; + } else if (c >= 10) { + tens: + d = (c * 205) >> 11; c -= d * 10; *p++ = (char)('0'+d); + } + c += '0'; + } + *p++ = (char)c; + setsbufP(sb, p); + } + lj_buf_putb(sb, '"'); + return sb; +} + +/* -- Formatted conversions to buffer ------------------------------------- */ + +/* Add formatted char to buffer. */ +SBuf *lj_strfmt_putfchar(SBuf *sb, SFormat sf, int32_t c) +{ + MSize width = STRFMT_WIDTH(sf); + char *p = lj_buf_more(sb, width > 1 ? width : 1); + if ((sf & STRFMT_F_LEFT)) *p++ = (char)c; + while (width-- > 1) *p++ = ' '; + if (!(sf & STRFMT_F_LEFT)) *p++ = (char)c; + setsbufP(sb, p); + return sb; +} + +/* Add formatted string to buffer. */ +SBuf *lj_strfmt_putfstr(SBuf *sb, SFormat sf, GCstr *str) +{ + MSize len = str->len <= STRFMT_PREC(sf) ? str->len : STRFMT_PREC(sf); + MSize width = STRFMT_WIDTH(sf); + char *p = lj_buf_more(sb, width > len ? width : len); + if ((sf & STRFMT_F_LEFT)) p = lj_buf_wmem(p, strdata(str), len); + while (width-- > len) *p++ = ' '; + if (!(sf & STRFMT_F_LEFT)) p = lj_buf_wmem(p, strdata(str), len); + setsbufP(sb, p); + return sb; +} + +/* Add formatted signed/unsigned integer to buffer. */ +SBuf *lj_strfmt_putfxint(SBuf *sb, SFormat sf, uint64_t k) +{ + char buf[STRFMT_MAXBUF_XINT], *q = buf + sizeof(buf), *p; +#ifdef LUA_USE_ASSERT + char *ps; +#endif + MSize prefix = 0, len, prec, pprec, width, need; + + /* Figure out signed prefixes. */ + if (STRFMT_TYPE(sf) == STRFMT_INT) { + if ((int64_t)k < 0) { + k = (uint64_t)-(int64_t)k; + prefix = 256 + '-'; + } else if ((sf & STRFMT_F_PLUS)) { + prefix = 256 + '+'; + } else if ((sf & STRFMT_F_SPACE)) { + prefix = 256 + ' '; + } + } + + /* Convert number and store to fixed-size buffer in reverse order. */ + prec = STRFMT_PREC(sf); + if ((int32_t)prec >= 0) sf &= ~STRFMT_F_ZERO; + if (k == 0) { /* Special-case zero argument. */ + if (prec != 0 || + (sf & (STRFMT_T_OCT|STRFMT_F_ALT)) == (STRFMT_T_OCT|STRFMT_F_ALT)) + *--q = '0'; + } else if (!(sf & (STRFMT_T_HEX|STRFMT_T_OCT))) { /* Decimal. */ + uint32_t k2; + while ((k >> 32)) { *--q = (char)('0' + k % 10); k /= 10; } + k2 = (uint32_t)k; + do { *--q = (char)('0' + k2 % 10); k2 /= 10; } while (k2); + } else if ((sf & STRFMT_T_HEX)) { /* Hex. */ + const char *hexdig = (sf & STRFMT_F_UPPER) ? "0123456789ABCDEF" : + "0123456789abcdef"; + do { *--q = hexdig[(k & 15)]; k >>= 4; } while (k); + if ((sf & STRFMT_F_ALT)) prefix = 512 + ((sf & STRFMT_F_UPPER) ? 'X' : 'x'); + } else { /* Octal. */ + do { *--q = (char)('0' + (uint32_t)(k & 7)); k >>= 3; } while (k); + if ((sf & STRFMT_F_ALT)) *--q = '0'; + } + + /* Calculate sizes. */ + len = (MSize)(buf + sizeof(buf) - q); + if ((int32_t)len >= (int32_t)prec) prec = len; + width = STRFMT_WIDTH(sf); + pprec = prec + (prefix >> 8); + need = width > pprec ? width : pprec; + p = lj_buf_more(sb, need); +#ifdef LUA_USE_ASSERT + ps = p; +#endif + + /* Format number with leading/trailing whitespace and zeros. */ + if ((sf & (STRFMT_F_LEFT|STRFMT_F_ZERO)) == 0) + while (width-- > pprec) *p++ = ' '; + if (prefix) { + if ((char)prefix >= 'X') *p++ = '0'; + *p++ = (char)prefix; + } + if ((sf & (STRFMT_F_LEFT|STRFMT_F_ZERO)) == STRFMT_F_ZERO) + while (width-- > pprec) *p++ = '0'; + while (prec-- > len) *p++ = '0'; + while (q < buf + sizeof(buf)) *p++ = *q++; /* Add number itself. */ + if ((sf & STRFMT_F_LEFT)) + while (width-- > pprec) *p++ = ' '; + + lua_assert(need == (MSize)(p - ps)); + setsbufP(sb, p); + return sb; +} + +/* Add number formatted as signed integer to buffer. */ +SBuf *lj_strfmt_putfnum_int(SBuf *sb, SFormat sf, lua_Number n) +{ + int64_t k = (int64_t)n; + if (checki32(k) && sf == STRFMT_INT) + return lj_strfmt_putint(sb, (int32_t)k); /* Shortcut for plain %d. */ + else + return lj_strfmt_putfxint(sb, sf, (uint64_t)k); +} + +/* Add number formatted as unsigned integer to buffer. */ +SBuf *lj_strfmt_putfnum_uint(SBuf *sb, SFormat sf, lua_Number n) +{ + int64_t k; + if (n >= 9223372036854775808.0) + k = (int64_t)(n - 18446744073709551616.0); + else + k = (int64_t)n; + return lj_strfmt_putfxint(sb, sf, (uint64_t)k); +} + +/* Max. sprintf buffer size needed. At least #string.format("%.99f", -1e308). */ +#define STRFMT_FMTNUMBUF 512 + +/* Add formatted floating-point number to buffer. */ +SBuf *lj_strfmt_putfnum(SBuf *sb, SFormat sf, lua_Number n) +{ + TValue tv; + tv.n = n; + if (LJ_UNLIKELY((tv.u32.hi << 1) >= 0xffe00000)) { + /* Canonicalize output of non-finite values. */ + MSize width = STRFMT_WIDTH(sf), len = 3; + int prefix = 0, ch = (sf & STRFMT_F_UPPER) ? 0x202020 : 0; + char *p; + if (((tv.u32.hi & 0x000fffff) | tv.u32.lo) != 0) { + ch ^= ('n' << 16) | ('a' << 8) | 'n'; + if ((sf & STRFMT_F_SPACE)) prefix = ' '; + } else { + ch ^= ('i' << 16) | ('n' << 8) | 'f'; + if ((tv.u32.hi & 0x80000000)) prefix = '-'; + else if ((sf & STRFMT_F_PLUS)) prefix = '+'; + else if ((sf & STRFMT_F_SPACE)) prefix = ' '; + } + if (prefix) len = 4; + p = lj_buf_more(sb, width > len ? width : len); + if (!(sf & STRFMT_F_LEFT)) while (width-- > len) *p++ = ' '; + if (prefix) *p++ = prefix; + *p++ = (char)(ch >> 16); *p++ = (char)(ch >> 8); *p++ = (char)ch; + if ((sf & STRFMT_F_LEFT)) while (width-- > len) *p++ = ' '; + setsbufP(sb, p); + } else { /* Delegate to sprintf() for now. */ + uint8_t width = (uint8_t)STRFMT_WIDTH(sf), prec = (uint8_t)STRFMT_PREC(sf); + char fmt[1+5+2+3+1+1], *p = fmt; + *p++ = '%'; + if ((sf & STRFMT_F_LEFT)) *p++ = '-'; + if ((sf & STRFMT_F_PLUS)) *p++ = '+'; + if ((sf & STRFMT_F_ZERO)) *p++ = '0'; + if ((sf & STRFMT_F_SPACE)) *p++ = ' '; + if ((sf & STRFMT_F_ALT)) *p++ = '#'; + if (width) { + uint8_t x = width / 10, y = width % 10; + if (x) *p++ = '0' + x; + *p++ = '0' + y; + } + if (prec != 255) { + uint8_t x = prec / 10, y = prec % 10; + *p++ = '.'; + if (x) *p++ = '0' + x; + *p++ = '0' + y; + } + *p++ = (0x67666561 >> (STRFMT_FP(sf)<<3)) ^ ((sf & STRFMT_F_UPPER)?0x20:0); + *p = '\0'; + p = lj_buf_more(sb, STRFMT_FMTNUMBUF); + setsbufP(sb, p + sprintf(p, fmt, n)); + } + return sb; +} + +/* -- Conversions to strings ---------------------------------------------- */ + +/* Convert integer to string. */ +GCstr * LJ_FASTCALL lj_strfmt_int(lua_State *L, int32_t k) +{ + char buf[STRFMT_MAXBUF_INT]; + MSize len = (MSize)(lj_strfmt_wint(buf, k) - buf); + return lj_str_new(L, buf, len); +} + +/* Convert number to string. */ +GCstr * LJ_FASTCALL lj_strfmt_num(lua_State *L, cTValue *o) +{ + char buf[STRFMT_MAXBUF_NUM]; + MSize len = (MSize)(lj_strfmt_wnum(buf, o) - buf); + return lj_str_new(L, buf, len); +} + +/* Convert integer or number to string. */ +GCstr * LJ_FASTCALL lj_strfmt_number(lua_State *L, cTValue *o) +{ + return tvisint(o) ? lj_strfmt_int(L, intV(o)) : lj_strfmt_num(L, o); +} + +#if LJ_HASJIT +/* Convert char value to string. */ +GCstr * LJ_FASTCALL lj_strfmt_char(lua_State *L, int c) +{ + char buf[1]; + buf[0] = c; + return lj_str_new(L, buf, 1); +} +#endif + +/* Raw conversion of object to string. */ +GCstr * LJ_FASTCALL lj_strfmt_obj(lua_State *L, cTValue *o) +{ + if (tvisstr(o)) { + return strV(o); + } else if (tvisnumber(o)) { + return lj_strfmt_number(L, o); + } else if (tvisnil(o)) { + return lj_str_newlit(L, "nil"); + } else if (tvisfalse(o)) { + return lj_str_newlit(L, "false"); + } else if (tvistrue(o)) { + return lj_str_newlit(L, "true"); + } else { + char buf[8+2+2+16], *p = buf; + p = lj_buf_wmem(p, lj_typename(o), (MSize)strlen(lj_typename(o))); + *p++ = ':'; *p++ = ' '; + if (tvisfunc(o) && isffunc(funcV(o))) { + p = lj_buf_wmem(p, "builtin#", 8); + p = lj_strfmt_wint(p, funcV(o)->c.ffid); + } else { + p = lj_strfmt_wptr(p, lj_obj_ptr(o)); + } + return lj_str_new(L, buf, (size_t)(p - buf)); + } +} + +/* -- Internal string formatting ------------------------------------------ */ + +/* +** These functions are only used for lua_pushfstring(), lua_pushvfstring() +** and for internal string formatting (e.g. error messages). Caveat: unlike +** string.format(), only a limited subset of formats and flags are supported! +** +** LuaJIT has support for a couple more formats than Lua 5.1/5.2: +** - %d %u %o %x with full formatting, 32 bit integers only. +** - %f and other FP formats are really %.14g. +** - %s %c %p without formatting. +*/ + +/* Push formatted message as a string object to Lua stack. va_list variant. */ +const char *lj_strfmt_pushvf(lua_State *L, const char *fmt, va_list argp) +{ + SBuf *sb = lj_buf_tmp_(L); + FormatState fs; + SFormat sf; + GCstr *str; + lj_strfmt_init(&fs, fmt, (MSize)strlen(fmt)); + while ((sf = lj_strfmt_parse(&fs)) != STRFMT_EOF) { + switch (STRFMT_TYPE(sf)) { + case STRFMT_LIT: + lj_buf_putmem(sb, fs.str, fs.len); + break; + case STRFMT_INT: + lj_strfmt_putfxint(sb, sf, va_arg(argp, int32_t)); + break; + case STRFMT_UINT: + lj_strfmt_putfxint(sb, sf, va_arg(argp, uint32_t)); + break; + case STRFMT_NUM: { + TValue tv; + tv.n = va_arg(argp, lua_Number); + setsbufP(sb, lj_strfmt_wnum(lj_buf_more(sb, STRFMT_MAXBUF_NUM), &tv)); + break; + } + case STRFMT_STR: { + const char *s = va_arg(argp, char *); + if (s == NULL) s = "(null)"; + lj_buf_putmem(sb, s, (MSize)strlen(s)); + break; + } + case STRFMT_CHAR: + lj_buf_putb(sb, va_arg(argp, int)); + break; + case STRFMT_PTR: + lj_strfmt_putptr(sb, va_arg(argp, void *)); + break; + case STRFMT_ERR: + default: + lj_buf_putb(sb, '?'); + lua_assert(0); + break; + } + } + str = lj_buf_str(L, sb); + setstrV(L, L->top, str); + incr_top(L); + return strdata(str); +} + +/* Push formatted message as a string object to Lua stack. Vararg variant. */ +const char *lj_strfmt_pushf(lua_State *L, const char *fmt, ...) +{ + const char *msg; + va_list argp; + va_start(argp, fmt); + msg = lj_strfmt_pushvf(L, fmt, argp); + va_end(argp); + return msg; +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_strfmt.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_strfmt.h new file mode 100644 index 00000000000..dcfaf2e393b --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_strfmt.h @@ -0,0 +1,125 @@ +/* +** String formatting. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_STRFMT_H +#define _LJ_STRFMT_H + +#include "lj_obj.h" + +typedef uint32_t SFormat; /* Format indicator. */ + +/* Format parser state. */ +typedef struct FormatState { + const uint8_t *p; /* Current format string pointer. */ + const uint8_t *e; /* End of format string. */ + const char *str; /* Returned literal string. */ + MSize len; /* Size of literal string. */ +} FormatState; + +/* Format types (max. 16). */ +typedef enum FormatType { + STRFMT_EOF, STRFMT_ERR, STRFMT_LIT, + STRFMT_INT, STRFMT_UINT, STRFMT_NUM, STRFMT_STR, STRFMT_CHAR, STRFMT_PTR +} FormatType; + +/* Format subtypes (bits are reused). */ +#define STRFMT_T_HEX 0x0010 /* STRFMT_UINT */ +#define STRFMT_T_OCT 0x0020 /* STRFMT_UINT */ +#define STRFMT_T_FP_A 0x0000 /* STRFMT_NUM */ +#define STRFMT_T_FP_E 0x0010 /* STRFMT_NUM */ +#define STRFMT_T_FP_F 0x0020 /* STRFMT_NUM */ +#define STRFMT_T_FP_G 0x0030 /* STRFMT_NUM */ +#define STRFMT_T_QUOTED 0x0010 /* STRFMT_STR */ + +/* Format flags. */ +#define STRFMT_F_LEFT 0x0100 +#define STRFMT_F_PLUS 0x0200 +#define STRFMT_F_ZERO 0x0400 +#define STRFMT_F_SPACE 0x0800 +#define STRFMT_F_ALT 0x1000 +#define STRFMT_F_UPPER 0x2000 + +/* Format indicator fields. */ +#define STRFMT_SH_WIDTH 16 +#define STRFMT_SH_PREC 24 + +#define STRFMT_TYPE(sf) ((FormatType)((sf) & 15)) +#define STRFMT_WIDTH(sf) (((sf) >> STRFMT_SH_WIDTH) & 255u) +#define STRFMT_PREC(sf) ((((sf) >> STRFMT_SH_PREC) & 255u) - 1u) +#define STRFMT_FP(sf) (((sf) >> 4) & 3) + +/* Formats for conversion characters. */ +#define STRFMT_A (STRFMT_NUM|STRFMT_T_FP_A) +#define STRFMT_C (STRFMT_CHAR) +#define STRFMT_D (STRFMT_INT) +#define STRFMT_E (STRFMT_NUM|STRFMT_T_FP_E) +#define STRFMT_F (STRFMT_NUM|STRFMT_T_FP_F) +#define STRFMT_G (STRFMT_NUM|STRFMT_T_FP_G) +#define STRFMT_I STRFMT_D +#define STRFMT_O (STRFMT_UINT|STRFMT_T_OCT) +#define STRFMT_P (STRFMT_PTR) +#define STRFMT_Q (STRFMT_STR|STRFMT_T_QUOTED) +#define STRFMT_S (STRFMT_STR) +#define STRFMT_U (STRFMT_UINT) +#define STRFMT_X (STRFMT_UINT|STRFMT_T_HEX) + +/* Maximum buffer sizes for conversions. */ +#define STRFMT_MAXBUF_XINT (1+22) /* '0' prefix + uint64_t in octal. */ +#define STRFMT_MAXBUF_INT (1+10) /* Sign + int32_t in decimal. */ +#define STRFMT_MAXBUF_NUM LUAI_MAXNUMBER2STR +#define STRFMT_MAXBUF_PTR (2+2*sizeof(ptrdiff_t)) /* "0x" + hex ptr. */ + +/* Format parser. */ +LJ_FUNC SFormat LJ_FASTCALL lj_strfmt_parse(FormatState *fs); + +static LJ_AINLINE void lj_strfmt_init(FormatState *fs, const char *p, MSize len) +{ + fs->p = (const uint8_t *)p; + fs->e = (const uint8_t *)p + len; + lua_assert(*fs->e == 0); /* Must be NUL-terminated (may have NULs inside). */ +} + +/* Raw conversions. */ +LJ_FUNC char * LJ_FASTCALL lj_strfmt_wint(char *p, int32_t k); +LJ_FUNC char * LJ_FASTCALL lj_strfmt_wnum(char *p, cTValue *o); +LJ_FUNC char * LJ_FASTCALL lj_strfmt_wptr(char *p, const void *v); +LJ_FUNC char * LJ_FASTCALL lj_strfmt_wuleb128(char *p, uint32_t v); +LJ_FUNC const char *lj_strfmt_wstrnum(char *buf, cTValue *o, MSize *lenp); + +/* Unformatted conversions to buffer. */ +LJ_FUNC SBuf * LJ_FASTCALL lj_strfmt_putint(SBuf *sb, int32_t k); +#if LJ_HASJIT +LJ_FUNC SBuf * LJ_FASTCALL lj_strfmt_putnum(SBuf *sb, cTValue *o); +#endif +LJ_FUNC SBuf * LJ_FASTCALL lj_strfmt_putptr(SBuf *sb, const void *v); +LJ_FUNC SBuf * LJ_FASTCALL lj_strfmt_putquoted(SBuf *sb, GCstr *str); + +/* Formatted conversions to buffer. */ +LJ_FUNC SBuf *lj_strfmt_putfxint(SBuf *sb, SFormat sf, uint64_t k); +LJ_FUNC SBuf *lj_strfmt_putfnum_int(SBuf *sb, SFormat sf, lua_Number n); +LJ_FUNC SBuf *lj_strfmt_putfnum_uint(SBuf *sb, SFormat sf, lua_Number n); +LJ_FUNC SBuf *lj_strfmt_putfnum(SBuf *sb, SFormat, lua_Number n); +LJ_FUNC SBuf *lj_strfmt_putfchar(SBuf *sb, SFormat, int32_t c); +LJ_FUNC SBuf *lj_strfmt_putfstr(SBuf *sb, SFormat, GCstr *str); + +/* Conversions to strings. */ +LJ_FUNC GCstr * LJ_FASTCALL lj_strfmt_int(lua_State *L, int32_t k); +LJ_FUNCA GCstr * LJ_FASTCALL lj_strfmt_num(lua_State *L, cTValue *o); +LJ_FUNCA GCstr * LJ_FASTCALL lj_strfmt_number(lua_State *L, cTValue *o); +#if LJ_HASJIT +LJ_FUNC GCstr * LJ_FASTCALL lj_strfmt_char(lua_State *L, int c); +#endif +LJ_FUNC GCstr * LJ_FASTCALL lj_strfmt_obj(lua_State *L, cTValue *o); + +/* Internal string formatting. */ +LJ_FUNC const char *lj_strfmt_pushvf(lua_State *L, const char *fmt, + va_list argp); +LJ_FUNC const char *lj_strfmt_pushf(lua_State *L, const char *fmt, ...) +#ifdef __GNUC__ + __attribute__ ((format (printf, 2, 3))) +#endif + ; + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_strscan.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_strscan.c new file mode 100644 index 00000000000..d3c5ba9124f --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_strscan.c @@ -0,0 +1,547 @@ +/* +** String scanning. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include + +#define lj_strscan_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_char.h" +#include "lj_strscan.h" + +/* -- Scanning numbers ---------------------------------------------------- */ + +/* +** Rationale for the builtin string to number conversion library: +** +** It removes a dependency on libc's strtod(), which is a true portability +** nightmare. Mainly due to the plethora of supported OS and toolchain +** combinations. Sadly, the various implementations +** a) are often buggy, incomplete (no hex floats) and/or imprecise, +** b) sometimes crash or hang on certain inputs, +** c) return non-standard NaNs that need to be filtered out, and +** d) fail if the locale-specific decimal separator is not a dot, +** which can only be fixed with atrocious workarounds. +** +** Also, most of the strtod() implementations are hopelessly bloated, +** which is not just an I-cache hog, but a problem for static linkage +** on embedded systems, too. +** +** OTOH the builtin conversion function is very compact. Even though it +** does a lot more, like parsing long longs, octal or imaginary numbers +** and returning the result in different formats: +** a) It needs less than 3 KB (!) of machine code (on x64 with -Os), +** b) it doesn't perform any dynamic allocation and, +** c) it needs only around 600 bytes of stack space. +** +** The builtin function is faster than strtod() for typical inputs, e.g. +** "123", "1.5" or "1e6". Arguably, it's slower for very large exponents, +** which are not very common (this could be fixed, if needed). +** +** And most importantly, the builtin function is equally precise on all +** platforms. It correctly converts and rounds any input to a double. +** If this is not the case, please send a bug report -- but PLEASE verify +** that the implementation you're comparing to is not the culprit! +** +** The implementation quickly pre-scans the entire string first and +** handles simple integers on-the-fly. Otherwise, it dispatches to the +** base-specific parser. Hex and octal is straightforward. +** +** Decimal to binary conversion uses a fixed-length circular buffer in +** base 100. Some simple cases are handled directly. For other cases, the +** number in the buffer is up-scaled or down-scaled until the integer part +** is in the proper range. Then the integer part is rounded and converted +** to a double which is finally rescaled to the result. Denormals need +** special treatment to prevent incorrect 'double rounding'. +*/ + +/* Definitions for circular decimal digit buffer (base 100 = 2 digits/byte). */ +#define STRSCAN_DIG 1024 +#define STRSCAN_MAXDIG 800 /* 772 + extra are sufficient. */ +#define STRSCAN_DDIG (STRSCAN_DIG/2) +#define STRSCAN_DMASK (STRSCAN_DDIG-1) + +/* Helpers for circular buffer. */ +#define DNEXT(a) (((a)+1) & STRSCAN_DMASK) +#define DPREV(a) (((a)-1) & STRSCAN_DMASK) +#define DLEN(lo, hi) ((int32_t)(((lo)-(hi)) & STRSCAN_DMASK)) + +#define casecmp(c, k) (((c) | 0x20) == k) + +/* Final conversion to double. */ +static void strscan_double(uint64_t x, TValue *o, int32_t ex2, int32_t neg) +{ + double n; + + /* Avoid double rounding for denormals. */ + if (LJ_UNLIKELY(ex2 <= -1075 && x != 0)) { + /* NYI: all of this generates way too much code on 32 bit CPUs. */ +#if defined(__GNUC__) && LJ_64 + int32_t b = (int32_t)(__builtin_clzll(x)^63); +#else + int32_t b = (x>>32) ? 32+(int32_t)lj_fls((uint32_t)(x>>32)) : + (int32_t)lj_fls((uint32_t)x); +#endif + if ((int32_t)b + ex2 <= -1023 && (int32_t)b + ex2 >= -1075) { + uint64_t rb = (uint64_t)1 << (-1075-ex2); + if ((x & rb) && ((x & (rb+rb+rb-1)))) x += rb+rb; + x = (x & ~(rb+rb-1)); + } + } + + /* Convert to double using a signed int64_t conversion, then rescale. */ + lua_assert((int64_t)x >= 0); + n = (double)(int64_t)x; + if (neg) n = -n; + if (ex2) n = ldexp(n, ex2); + o->n = n; +} + +/* Parse hexadecimal number. */ +static StrScanFmt strscan_hex(const uint8_t *p, TValue *o, + StrScanFmt fmt, uint32_t opt, + int32_t ex2, int32_t neg, uint32_t dig) +{ + uint64_t x = 0; + uint32_t i; + + /* Scan hex digits. */ + for (i = dig > 16 ? 16 : dig ; i; i--, p++) { + uint32_t d = (*p != '.' ? *p : *++p); if (d > '9') d += 9; + x = (x << 4) + (d & 15); + } + + /* Summarize rounding-effect of excess digits. */ + for (i = 16; i < dig; i++, p++) + x |= ((*p != '.' ? *p : *++p) != '0'), ex2 += 4; + + /* Format-specific handling. */ + switch (fmt) { + case STRSCAN_INT: + if (!(opt & STRSCAN_OPT_TONUM) && x < 0x80000000u+neg) { + o->i = neg ? -(int32_t)x : (int32_t)x; + return STRSCAN_INT; /* Fast path for 32 bit integers. */ + } + if (!(opt & STRSCAN_OPT_C)) { fmt = STRSCAN_NUM; break; } + /* fallthrough */ + case STRSCAN_U32: + if (dig > 8) return STRSCAN_ERROR; + o->i = neg ? -(int32_t)x : (int32_t)x; + return STRSCAN_U32; + case STRSCAN_I64: + case STRSCAN_U64: + if (dig > 16) return STRSCAN_ERROR; + o->u64 = neg ? (uint64_t)-(int64_t)x : x; + return fmt; + default: + break; + } + + /* Reduce range, then convert to double. */ + if ((x & U64x(c0000000,0000000))) { x = (x >> 2) | (x & 3); ex2 += 2; } + strscan_double(x, o, ex2, neg); + return fmt; +} + +/* Parse octal number. */ +static StrScanFmt strscan_oct(const uint8_t *p, TValue *o, + StrScanFmt fmt, int32_t neg, uint32_t dig) +{ + uint64_t x = 0; + + /* Scan octal digits. */ + if (dig > 22 || (dig == 22 && *p > '1')) return STRSCAN_ERROR; + while (dig-- > 0) { + if (!(*p >= '0' && *p <= '7')) return STRSCAN_ERROR; + x = (x << 3) + (*p++ & 7); + } + + /* Format-specific handling. */ + switch (fmt) { + case STRSCAN_INT: + if (x >= 0x80000000u+neg) fmt = STRSCAN_U32; + /* fallthrough */ + case STRSCAN_U32: + if ((x >> 32)) return STRSCAN_ERROR; + o->i = neg ? -(int32_t)x : (int32_t)x; + break; + default: + case STRSCAN_I64: + case STRSCAN_U64: + o->u64 = neg ? (uint64_t)-(int64_t)x : x; + break; + } + return fmt; +} + +/* Parse decimal number. */ +static StrScanFmt strscan_dec(const uint8_t *p, TValue *o, + StrScanFmt fmt, uint32_t opt, + int32_t ex10, int32_t neg, uint32_t dig) +{ + uint8_t xi[STRSCAN_DDIG], *xip = xi; + + if (dig) { + uint32_t i = dig; + if (i > STRSCAN_MAXDIG) { + ex10 += (int32_t)(i - STRSCAN_MAXDIG); + i = STRSCAN_MAXDIG; + } + /* Scan unaligned leading digit. */ + if (((ex10^i) & 1)) + *xip++ = ((*p != '.' ? *p : *++p) & 15), i--, p++; + /* Scan aligned double-digits. */ + for ( ; i > 1; i -= 2) { + uint32_t d = 10 * ((*p != '.' ? *p : *++p) & 15); p++; + *xip++ = d + ((*p != '.' ? *p : *++p) & 15); p++; + } + /* Scan and realign trailing digit. */ + if (i) *xip++ = 10 * ((*p != '.' ? *p : *++p) & 15), ex10--, dig++, p++; + + /* Summarize rounding-effect of excess digits. */ + if (dig > STRSCAN_MAXDIG) { + do { + if ((*p != '.' ? *p : *++p) != '0') { xip[-1] |= 1; break; } + p++; + } while (--dig > STRSCAN_MAXDIG); + dig = STRSCAN_MAXDIG; + } else { /* Simplify exponent. */ + while (ex10 > 0 && dig <= 18) *xip++ = 0, ex10 -= 2, dig += 2; + } + } else { /* Only got zeros. */ + ex10 = 0; + xi[0] = 0; + } + + /* Fast path for numbers in integer format (but handles e.g. 1e6, too). */ + if (dig <= 20 && ex10 == 0) { + uint8_t *xis; + uint64_t x = xi[0]; + double n; + for (xis = xi+1; xis < xip; xis++) x = x * 100 + *xis; + if (!(dig == 20 && (xi[0] > 18 || (int64_t)x >= 0))) { /* No overflow? */ + /* Format-specific handling. */ + switch (fmt) { + case STRSCAN_INT: + if (!(opt & STRSCAN_OPT_TONUM) && x < 0x80000000u+neg) { + o->i = neg ? -(int32_t)x : (int32_t)x; + return STRSCAN_INT; /* Fast path for 32 bit integers. */ + } + if (!(opt & STRSCAN_OPT_C)) { fmt = STRSCAN_NUM; goto plainnumber; } + /* fallthrough */ + case STRSCAN_U32: + if ((x >> 32) != 0) return STRSCAN_ERROR; + o->i = neg ? -(int32_t)x : (int32_t)x; + return STRSCAN_U32; + case STRSCAN_I64: + case STRSCAN_U64: + o->u64 = neg ? (uint64_t)-(int64_t)x : x; + return fmt; + default: + plainnumber: /* Fast path for plain numbers < 2^63. */ + if ((int64_t)x < 0) break; + n = (double)(int64_t)x; + if (neg) n = -n; + o->n = n; + return fmt; + } + } + } + + /* Slow non-integer path. */ + if (fmt == STRSCAN_INT) { + if ((opt & STRSCAN_OPT_C)) return STRSCAN_ERROR; + fmt = STRSCAN_NUM; + } else if (fmt > STRSCAN_INT) { + return STRSCAN_ERROR; + } + { + uint32_t hi = 0, lo = (uint32_t)(xip-xi); + int32_t ex2 = 0, idig = (int32_t)lo + (ex10 >> 1); + + lua_assert(lo > 0 && (ex10 & 1) == 0); + + /* Handle simple overflow/underflow. */ + if (idig > 310/2) { if (neg) setminfV(o); else setpinfV(o); return fmt; } + else if (idig < -326/2) { o->n = neg ? -0.0 : 0.0; return fmt; } + + /* Scale up until we have at least 17 or 18 integer part digits. */ + while (idig < 9 && idig < DLEN(lo, hi)) { + uint32_t i, cy = 0; + ex2 -= 6; + for (i = DPREV(lo); ; i = DPREV(i)) { + uint32_t d = (xi[i] << 6) + cy; + cy = (((d >> 2) * 5243) >> 17); d = d - cy * 100; /* Div/mod 100. */ + xi[i] = (uint8_t)d; + if (i == hi) break; + if (d == 0 && i == DPREV(lo)) lo = i; + } + if (cy) { + hi = DPREV(hi); + if (xi[DPREV(lo)] == 0) lo = DPREV(lo); + else if (hi == lo) { lo = DPREV(lo); xi[DPREV(lo)] |= xi[lo]; } + xi[hi] = (uint8_t)cy; idig++; + } + } + + /* Scale down until no more than 17 or 18 integer part digits remain. */ + while (idig > 9) { + uint32_t i = hi, cy = 0; + ex2 += 6; + do { + cy += xi[i]; + xi[i] = (cy >> 6); + cy = 100 * (cy & 0x3f); + if (xi[i] == 0 && i == hi) hi = DNEXT(hi), idig--; + i = DNEXT(i); + } while (i != lo); + while (cy) { + if (hi == lo) { xi[DPREV(lo)] |= 1; break; } + xi[lo] = (cy >> 6); lo = DNEXT(lo); + cy = 100 * (cy & 0x3f); + } + } + + /* Collect integer part digits and convert to rescaled double. */ + { + uint64_t x = xi[hi]; + uint32_t i; + for (i = DNEXT(hi); --idig > 0 && i != lo; i = DNEXT(i)) + x = x * 100 + xi[i]; + if (i == lo) { + while (--idig >= 0) x = x * 100; + } else { /* Gather round bit from remaining digits. */ + x <<= 1; ex2--; + do { + if (xi[i]) { x |= 1; break; } + i = DNEXT(i); + } while (i != lo); + } + strscan_double(x, o, ex2, neg); + } + } + return fmt; +} + +/* Parse binary number. */ +static StrScanFmt strscan_bin(const uint8_t *p, TValue *o, + StrScanFmt fmt, uint32_t opt, + int32_t ex2, int32_t neg, uint32_t dig) +{ + uint64_t x = 0; + uint32_t i; + + if (ex2 || dig > 64) return STRSCAN_ERROR; + + /* Scan binary digits. */ + for (i = dig; i; i--, p++) { + if ((*p & ~1) != '0') return STRSCAN_ERROR; + x = (x << 1) | (*p & 1); + } + + /* Format-specific handling. */ + switch (fmt) { + case STRSCAN_INT: + if (!(opt & STRSCAN_OPT_TONUM) && x < 0x80000000u+neg) { + o->i = neg ? -(int32_t)x : (int32_t)x; + return STRSCAN_INT; /* Fast path for 32 bit integers. */ + } + if (!(opt & STRSCAN_OPT_C)) { fmt = STRSCAN_NUM; break; } + /* fallthrough */ + case STRSCAN_U32: + if (dig > 32) return STRSCAN_ERROR; + o->i = neg ? -(int32_t)x : (int32_t)x; + return STRSCAN_U32; + case STRSCAN_I64: + case STRSCAN_U64: + o->u64 = neg ? (uint64_t)-(int64_t)x : x; + return fmt; + default: + break; + } + + /* Reduce range, then convert to double. */ + if ((x & U64x(c0000000,0000000))) { x = (x >> 2) | (x & 3); ex2 += 2; } + strscan_double(x, o, ex2, neg); + return fmt; +} + +/* Scan string containing a number. Returns format. Returns value in o. */ +StrScanFmt lj_strscan_scan(const uint8_t *p, TValue *o, uint32_t opt) +{ + int32_t neg = 0; + + /* Remove leading space, parse sign and non-numbers. */ + if (LJ_UNLIKELY(!lj_char_isdigit(*p))) { + while (lj_char_isspace(*p)) p++; + if (*p == '+' || *p == '-') neg = (*p++ == '-'); + if (LJ_UNLIKELY(*p >= 'A')) { /* Parse "inf", "infinity" or "nan". */ + TValue tmp; + setnanV(&tmp); + if (casecmp(p[0],'i') && casecmp(p[1],'n') && casecmp(p[2],'f')) { + if (neg) setminfV(&tmp); else setpinfV(&tmp); + p += 3; + if (casecmp(p[0],'i') && casecmp(p[1],'n') && casecmp(p[2],'i') && + casecmp(p[3],'t') && casecmp(p[4],'y')) p += 5; + } else if (casecmp(p[0],'n') && casecmp(p[1],'a') && casecmp(p[2],'n')) { + p += 3; + } + while (lj_char_isspace(*p)) p++; + if (*p) return STRSCAN_ERROR; + o->u64 = tmp.u64; + return STRSCAN_NUM; + } + } + + /* Parse regular number. */ + { + StrScanFmt fmt = STRSCAN_INT; + int cmask = LJ_CHAR_DIGIT; + int base = (opt & STRSCAN_OPT_C) && *p == '0' ? 0 : 10; + const uint8_t *sp, *dp = NULL; + uint32_t dig = 0, hasdig = 0, x = 0; + int32_t ex = 0; + + /* Determine base and skip leading zeros. */ + if (LJ_UNLIKELY(*p <= '0')) { + if (*p == '0') { + if (casecmp(p[1], 'x')) + base = 16, cmask = LJ_CHAR_XDIGIT, p += 2; + else if (casecmp(p[1], 'b')) + base = 2, cmask = LJ_CHAR_DIGIT, p += 2; + } + for ( ; ; p++) { + if (*p == '0') { + hasdig = 1; + } else if (*p == '.') { + if (dp) return STRSCAN_ERROR; + dp = p; + } else { + break; + } + } + } + + /* Preliminary digit and decimal point scan. */ + for (sp = p; ; p++) { + if (LJ_LIKELY(lj_char_isa(*p, cmask))) { + x = x * 10 + (*p & 15); /* For fast path below. */ + dig++; + } else if (*p == '.') { + if (dp) return STRSCAN_ERROR; + dp = p; + } else { + break; + } + } + if (!(hasdig | dig)) return STRSCAN_ERROR; + + /* Handle decimal point. */ + if (dp) { + fmt = STRSCAN_NUM; + if (dig) { + ex = (int32_t)(dp-(p-1)); dp = p-1; + while (ex < 0 && *dp-- == '0') ex++, dig--; /* Skip trailing zeros. */ + if (base == 16) ex *= 4; + } + } + + /* Parse exponent. */ + if (base >= 10 && casecmp(*p, (uint32_t)(base == 16 ? 'p' : 'e'))) { + uint32_t xx; + int negx = 0; + fmt = STRSCAN_NUM; p++; + if (*p == '+' || *p == '-') negx = (*p++ == '-'); + if (!lj_char_isdigit(*p)) return STRSCAN_ERROR; + xx = (*p++ & 15); + while (lj_char_isdigit(*p)) { + if (xx < 65536) xx = xx * 10 + (*p & 15); + p++; + } + ex += negx ? -(int32_t)xx : (int32_t)xx; + } + + /* Parse suffix. */ + if (*p) { + /* I (IMAG), U (U32), LL (I64), ULL/LLU (U64), L (long), UL/LU (ulong). */ + /* NYI: f (float). Not needed until cp_number() handles non-integers. */ + if (casecmp(*p, 'i')) { + if (!(opt & STRSCAN_OPT_IMAG)) return STRSCAN_ERROR; + p++; fmt = STRSCAN_IMAG; + } else if (fmt == STRSCAN_INT) { + if (casecmp(*p, 'u')) p++, fmt = STRSCAN_U32; + if (casecmp(*p, 'l')) { + p++; + if (casecmp(*p, 'l')) p++, fmt += STRSCAN_I64 - STRSCAN_INT; + else if (!(opt & STRSCAN_OPT_C)) return STRSCAN_ERROR; + else if (sizeof(long) == 8) fmt += STRSCAN_I64 - STRSCAN_INT; + } + if (casecmp(*p, 'u') && (fmt == STRSCAN_INT || fmt == STRSCAN_I64)) + p++, fmt += STRSCAN_U32 - STRSCAN_INT; + if ((fmt == STRSCAN_U32 && !(opt & STRSCAN_OPT_C)) || + (fmt >= STRSCAN_I64 && !(opt & STRSCAN_OPT_LL))) + return STRSCAN_ERROR; + } + while (lj_char_isspace(*p)) p++; + if (*p) return STRSCAN_ERROR; + } + + /* Fast path for decimal 32 bit integers. */ + if (fmt == STRSCAN_INT && base == 10 && + (dig < 10 || (dig == 10 && *sp <= '2' && x < 0x80000000u+neg))) { + int32_t y = neg ? -(int32_t)x : (int32_t)x; + if ((opt & STRSCAN_OPT_TONUM)) { + o->n = (double)y; + return STRSCAN_NUM; + } else { + o->i = y; + return STRSCAN_INT; + } + } + + /* Dispatch to base-specific parser. */ + if (base == 0 && !(fmt == STRSCAN_NUM || fmt == STRSCAN_IMAG)) + return strscan_oct(sp, o, fmt, neg, dig); + if (base == 16) + fmt = strscan_hex(sp, o, fmt, opt, ex, neg, dig); + else if (base == 2) + fmt = strscan_bin(sp, o, fmt, opt, ex, neg, dig); + else + fmt = strscan_dec(sp, o, fmt, opt, ex, neg, dig); + + /* Try to convert number to integer, if requested. */ + if (fmt == STRSCAN_NUM && (opt & STRSCAN_OPT_TOINT)) { + double n = o->n; + int32_t i = lj_num2int(n); + if (n == (lua_Number)i) { o->i = i; return STRSCAN_INT; } + } + return fmt; + } +} + +int LJ_FASTCALL lj_strscan_num(GCstr *str, TValue *o) +{ + StrScanFmt fmt = lj_strscan_scan((const uint8_t *)strdata(str), o, + STRSCAN_OPT_TONUM); + lua_assert(fmt == STRSCAN_ERROR || fmt == STRSCAN_NUM); + return (fmt != STRSCAN_ERROR); +} + +#if LJ_DUALNUM +int LJ_FASTCALL lj_strscan_number(GCstr *str, TValue *o) +{ + StrScanFmt fmt = lj_strscan_scan((const uint8_t *)strdata(str), o, + STRSCAN_OPT_TOINT); + lua_assert(fmt == STRSCAN_ERROR || fmt == STRSCAN_NUM || fmt == STRSCAN_INT); + if (fmt == STRSCAN_INT) setitype(o, LJ_TISNUM); + return (fmt != STRSCAN_ERROR); +} +#endif + +#undef DNEXT +#undef DPREV +#undef DLEN + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_strscan.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_strscan.h new file mode 100644 index 00000000000..77606893640 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_strscan.h @@ -0,0 +1,39 @@ +/* +** String scanning. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_STRSCAN_H +#define _LJ_STRSCAN_H + +#include "lj_obj.h" + +/* Options for accepted/returned formats. */ +#define STRSCAN_OPT_TOINT 0x01 /* Convert to int32_t, if possible. */ +#define STRSCAN_OPT_TONUM 0x02 /* Always convert to double. */ +#define STRSCAN_OPT_IMAG 0x04 +#define STRSCAN_OPT_LL 0x08 +#define STRSCAN_OPT_C 0x10 + +/* Returned format. */ +typedef enum { + STRSCAN_ERROR, + STRSCAN_NUM, STRSCAN_IMAG, + STRSCAN_INT, STRSCAN_U32, STRSCAN_I64, STRSCAN_U64, +} StrScanFmt; + +LJ_FUNC StrScanFmt lj_strscan_scan(const uint8_t *p, TValue *o, uint32_t opt); +LJ_FUNC int LJ_FASTCALL lj_strscan_num(GCstr *str, TValue *o); +#if LJ_DUALNUM +LJ_FUNC int LJ_FASTCALL lj_strscan_number(GCstr *str, TValue *o); +#else +#define lj_strscan_number(s, o) lj_strscan_num((s), (o)) +#endif + +/* Check for number or convert string to number/int in-place (!). */ +static LJ_AINLINE int lj_strscan_numberobj(TValue *o) +{ + return tvisnumber(o) || (tvisstr(o) && lj_strscan_number(strV(o), o)); +} + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_tab.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_tab.c new file mode 100644 index 00000000000..88bf1089b5b --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_tab.c @@ -0,0 +1,666 @@ +/* +** Table handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lj_tab_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_tab.h" + +/* -- Object hashing ------------------------------------------------------ */ + +/* Hash values are masked with the table hash mask and used as an index. */ +static LJ_AINLINE Node *hashmask(const GCtab *t, uint32_t hash) +{ + Node *n = noderef(t->node); + return &n[hash & t->hmask]; +} + +/* String hashes are precomputed when they are interned. */ +#define hashstr(t, s) hashmask(t, (s)->hash) + +#define hashlohi(t, lo, hi) hashmask((t), hashrot((lo), (hi))) +#define hashnum(t, o) hashlohi((t), (o)->u32.lo, ((o)->u32.hi << 1)) +#define hashptr(t, p) hashlohi((t), u32ptr(p), u32ptr(p) + HASH_BIAS) +#if LJ_GC64 +#define hashgcref(t, r) \ + hashlohi((t), (uint32_t)gcrefu(r), (uint32_t)(gcrefu(r) >> 32)) +#else +#define hashgcref(t, r) hashlohi((t), gcrefu(r), gcrefu(r) + HASH_BIAS) +#endif + +/* Hash an arbitrary key and return its anchor position in the hash table. */ +static Node *hashkey(const GCtab *t, cTValue *key) +{ + lua_assert(!tvisint(key)); + if (tvisstr(key)) + return hashstr(t, strV(key)); + else if (tvisnum(key)) + return hashnum(t, key); + else if (tvisbool(key)) + return hashmask(t, boolV(key)); + else + return hashgcref(t, key->gcr); + /* Only hash 32 bits of lightuserdata on a 64 bit CPU. Good enough? */ +} + +/* -- Table creation and destruction -------------------------------------- */ + +/* Create new hash part for table. */ +static LJ_AINLINE void newhpart(lua_State *L, GCtab *t, uint32_t hbits) +{ + uint32_t hsize; + Node *node; + lua_assert(hbits != 0); + if (hbits > LJ_MAX_HBITS) + lj_err_msg(L, LJ_ERR_TABOV); + hsize = 1u << hbits; + node = lj_mem_newvec(L, hsize, Node); + setmref(t->node, node); + setfreetop(t, node, &node[hsize]); + t->hmask = hsize-1; +} + +/* +** Q: Why all of these copies of t->hmask, t->node etc. to local variables? +** A: Because alias analysis for C is _really_ tough. +** Even state-of-the-art C compilers won't produce good code without this. +*/ + +/* Clear hash part of table. */ +static LJ_AINLINE void clearhpart(GCtab *t) +{ + uint32_t i, hmask = t->hmask; + Node *node = noderef(t->node); + lua_assert(t->hmask != 0); + for (i = 0; i <= hmask; i++) { + Node *n = &node[i]; + setmref(n->next, NULL); + setnilV(&n->key); + setnilV(&n->val); + } +} + +/* Clear array part of table. */ +static LJ_AINLINE void clearapart(GCtab *t) +{ + uint32_t i, asize = t->asize; + TValue *array = tvref(t->array); + for (i = 0; i < asize; i++) + setnilV(&array[i]); +} + +/* Create a new table. Note: the slots are not initialized (yet). */ +static GCtab *newtab(lua_State *L, uint32_t asize, uint32_t hbits) +{ + GCtab *t; + /* First try to colocate the array part. */ + if (LJ_MAX_COLOSIZE != 0 && asize > 0 && asize <= LJ_MAX_COLOSIZE) { + Node *nilnode; + lua_assert((sizeof(GCtab) & 7) == 0); + t = (GCtab *)lj_mem_newgco(L, sizetabcolo(asize)); + t->gct = ~LJ_TTAB; + t->nomm = (uint8_t)~0; + t->colo = (int8_t)asize; + setmref(t->array, (TValue *)((char *)t + sizeof(GCtab))); + setgcrefnull(t->metatable); + t->asize = asize; + t->hmask = 0; + nilnode = &G(L)->nilnode; + setmref(t->node, nilnode); +#if LJ_GC64 + setmref(t->freetop, nilnode); +#endif + } else { /* Otherwise separately allocate the array part. */ + Node *nilnode; + t = lj_mem_newobj(L, GCtab); + t->gct = ~LJ_TTAB; + t->nomm = (uint8_t)~0; + t->colo = 0; + setmref(t->array, NULL); + setgcrefnull(t->metatable); + t->asize = 0; /* In case the array allocation fails. */ + t->hmask = 0; + nilnode = &G(L)->nilnode; + setmref(t->node, nilnode); +#if LJ_GC64 + setmref(t->freetop, nilnode); +#endif + if (asize > 0) { + if (asize > LJ_MAX_ASIZE) + lj_err_msg(L, LJ_ERR_TABOV); + setmref(t->array, lj_mem_newvec(L, asize, TValue)); + t->asize = asize; + } + } + if (hbits) + newhpart(L, t, hbits); + return t; +} + +/* Create a new table. +** +** IMPORTANT NOTE: The API differs from lua_createtable()! +** +** The array size is non-inclusive. E.g. asize=128 creates array slots +** for 0..127, but not for 128. If you need slots 1..128, pass asize=129 +** (slot 0 is wasted in this case). +** +** The hash size is given in hash bits. hbits=0 means no hash part. +** hbits=1 creates 2 hash slots, hbits=2 creates 4 hash slots and so on. +*/ +GCtab *lj_tab_new(lua_State *L, uint32_t asize, uint32_t hbits) +{ + GCtab *t = newtab(L, asize, hbits); + clearapart(t); + if (t->hmask > 0) clearhpart(t); + return t; +} + +/* The API of this function conforms to lua_createtable(). */ +GCtab *lj_tab_new_ah(lua_State *L, int32_t a, int32_t h) +{ + return lj_tab_new(L, (uint32_t)(a > 0 ? a+1 : 0), hsize2hbits(h)); +} + +#if LJ_HASJIT +GCtab * LJ_FASTCALL lj_tab_new1(lua_State *L, uint32_t ahsize) +{ + GCtab *t = newtab(L, ahsize & 0xffffff, ahsize >> 24); + clearapart(t); + if (t->hmask > 0) clearhpart(t); + return t; +} +#endif + +/* Duplicate a table. */ +GCtab * LJ_FASTCALL lj_tab_dup(lua_State *L, const GCtab *kt) +{ + GCtab *t; + uint32_t asize, hmask; + t = newtab(L, kt->asize, kt->hmask > 0 ? lj_fls(kt->hmask)+1 : 0); + lua_assert(kt->asize == t->asize && kt->hmask == t->hmask); + t->nomm = 0; /* Keys with metamethod names may be present. */ + asize = kt->asize; + if (asize > 0) { + TValue *array = tvref(t->array); + TValue *karray = tvref(kt->array); + if (asize < 64) { /* An inlined loop beats memcpy for < 512 bytes. */ + uint32_t i; + for (i = 0; i < asize; i++) + copyTV(L, &array[i], &karray[i]); + } else { + memcpy(array, karray, asize*sizeof(TValue)); + } + } + hmask = kt->hmask; + if (hmask > 0) { + uint32_t i; + Node *node = noderef(t->node); + Node *knode = noderef(kt->node); + ptrdiff_t d = (char *)node - (char *)knode; + setfreetop(t, node, (Node *)((char *)getfreetop(kt, knode) + d)); + for (i = 0; i <= hmask; i++) { + Node *kn = &knode[i]; + Node *n = &node[i]; + Node *next = nextnode(kn); + /* Don't use copyTV here, since it asserts on a copy of a dead key. */ + n->val = kn->val; n->key = kn->key; + setmref(n->next, next == NULL? next : (Node *)((char *)next + d)); + } + } + return t; +} + +/* Clear a table. */ +void LJ_FASTCALL lj_tab_clear(GCtab *t) +{ + clearapart(t); + if (t->hmask > 0) { + Node *node = noderef(t->node); + setfreetop(t, node, &node[t->hmask+1]); + clearhpart(t); + } +} + +/* Free a table. */ +void LJ_FASTCALL lj_tab_free(global_State *g, GCtab *t) +{ + if (t->hmask > 0) + lj_mem_freevec(g, noderef(t->node), t->hmask+1, Node); + if (t->asize > 0 && LJ_MAX_COLOSIZE != 0 && t->colo <= 0) + lj_mem_freevec(g, tvref(t->array), t->asize, TValue); + if (LJ_MAX_COLOSIZE != 0 && t->colo) + lj_mem_free(g, t, sizetabcolo((uint32_t)t->colo & 0x7f)); + else + lj_mem_freet(g, t); +} + +/* -- Table resizing ------------------------------------------------------ */ + +/* Resize a table to fit the new array/hash part sizes. */ +void lj_tab_resize(lua_State *L, GCtab *t, uint32_t asize, uint32_t hbits) +{ + Node *oldnode = noderef(t->node); + uint32_t oldasize = t->asize; + uint32_t oldhmask = t->hmask; + if (asize > oldasize) { /* Array part grows? */ + TValue *array; + uint32_t i; + if (asize > LJ_MAX_ASIZE) + lj_err_msg(L, LJ_ERR_TABOV); + if (LJ_MAX_COLOSIZE != 0 && t->colo > 0) { + /* A colocated array must be separated and copied. */ + TValue *oarray = tvref(t->array); + array = lj_mem_newvec(L, asize, TValue); + t->colo = (int8_t)(t->colo | 0x80); /* Mark as separated (colo < 0). */ + for (i = 0; i < oldasize; i++) + copyTV(L, &array[i], &oarray[i]); + } else { + array = (TValue *)lj_mem_realloc(L, tvref(t->array), + oldasize*sizeof(TValue), asize*sizeof(TValue)); + } + setmref(t->array, array); + t->asize = asize; + for (i = oldasize; i < asize; i++) /* Clear newly allocated slots. */ + setnilV(&array[i]); + } + /* Create new (empty) hash part. */ + if (hbits) { + newhpart(L, t, hbits); + clearhpart(t); + } else { + global_State *g = G(L); + setmref(t->node, &g->nilnode); +#if LJ_GC64 + setmref(t->freetop, &g->nilnode); +#endif + t->hmask = 0; + } + if (asize < oldasize) { /* Array part shrinks? */ + TValue *array = tvref(t->array); + uint32_t i; + t->asize = asize; /* Note: This 'shrinks' even colocated arrays. */ + for (i = asize; i < oldasize; i++) /* Reinsert old array values. */ + if (!tvisnil(&array[i])) + copyTV(L, lj_tab_setinth(L, t, (int32_t)i), &array[i]); + /* Physically shrink only separated arrays. */ + if (LJ_MAX_COLOSIZE != 0 && t->colo <= 0) + setmref(t->array, lj_mem_realloc(L, array, + oldasize*sizeof(TValue), asize*sizeof(TValue))); + } + if (oldhmask > 0) { /* Reinsert pairs from old hash part. */ + global_State *g; + uint32_t i; + for (i = 0; i <= oldhmask; i++) { + Node *n = &oldnode[i]; + if (!tvisnil(&n->val)) + copyTV(L, lj_tab_set(L, t, &n->key), &n->val); + } + g = G(L); + lj_mem_freevec(g, oldnode, oldhmask+1, Node); + } +} + +static uint32_t countint(cTValue *key, uint32_t *bins) +{ + lua_assert(!tvisint(key)); + if (tvisnum(key)) { + lua_Number nk = numV(key); + int32_t k = lj_num2int(nk); + if ((uint32_t)k < LJ_MAX_ASIZE && nk == (lua_Number)k) { + bins[(k > 2 ? lj_fls((uint32_t)(k-1)) : 0)]++; + return 1; + } + } + return 0; +} + +static uint32_t countarray(const GCtab *t, uint32_t *bins) +{ + uint32_t na, b, i; + if (t->asize == 0) return 0; + for (na = i = b = 0; b < LJ_MAX_ABITS; b++) { + uint32_t n, top = 2u << b; + TValue *array; + if (top >= t->asize) { + top = t->asize-1; + if (i > top) + break; + } + array = tvref(t->array); + for (n = 0; i <= top; i++) + if (!tvisnil(&array[i])) + n++; + bins[b] += n; + na += n; + } + return na; +} + +static uint32_t counthash(const GCtab *t, uint32_t *bins, uint32_t *narray) +{ + uint32_t total, na, i, hmask = t->hmask; + Node *node = noderef(t->node); + for (total = na = 0, i = 0; i <= hmask; i++) { + Node *n = &node[i]; + if (!tvisnil(&n->val)) { + na += countint(&n->key, bins); + total++; + } + } + *narray += na; + return total; +} + +static uint32_t bestasize(uint32_t bins[], uint32_t *narray) +{ + uint32_t b, sum, na = 0, sz = 0, nn = *narray; + for (b = 0, sum = 0; 2*nn > (1u< 0 && 2*(sum += bins[b]) > (1u<hmask > 0 ? lj_fls(t->hmask)+1 : 0); +} + +/* -- Table getters ------------------------------------------------------- */ + +cTValue * LJ_FASTCALL lj_tab_getinth(GCtab *t, int32_t key) +{ + TValue k; + Node *n; + k.n = (lua_Number)key; + n = hashnum(t, &k); + do { + if (tvisnum(&n->key) && n->key.n == k.n) + return &n->val; + } while ((n = nextnode(n))); + return NULL; +} + +cTValue *lj_tab_getstr(GCtab *t, GCstr *key) +{ + Node *n = hashstr(t, key); + do { + if (tvisstr(&n->key) && strV(&n->key) == key) + return &n->val; + } while ((n = nextnode(n))); + return NULL; +} + +cTValue *lj_tab_get(lua_State *L, GCtab *t, cTValue *key) +{ + if (tvisstr(key)) { + cTValue *tv = lj_tab_getstr(t, strV(key)); + if (tv) + return tv; + } else if (tvisint(key)) { + cTValue *tv = lj_tab_getint(t, intV(key)); + if (tv) + return tv; + } else if (tvisnum(key)) { + lua_Number nk = numV(key); + int32_t k = lj_num2int(nk); + if (nk == (lua_Number)k) { + cTValue *tv = lj_tab_getint(t, k); + if (tv) + return tv; + } else { + goto genlookup; /* Else use the generic lookup. */ + } + } else if (!tvisnil(key)) { + Node *n; + genlookup: + n = hashkey(t, key); + do { + if (lj_obj_equal(&n->key, key)) + return &n->val; + } while ((n = nextnode(n))); + } + return niltv(L); +} + +/* -- Table setters ------------------------------------------------------- */ + +/* Insert new key. Use Brent's variation to optimize the chain length. */ +TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key) +{ + Node *n = hashkey(t, key); + if (!tvisnil(&n->val) || t->hmask == 0) { + Node *nodebase = noderef(t->node); + Node *collide, *freenode = getfreetop(t, nodebase); + lua_assert(freenode >= nodebase && freenode <= nodebase+t->hmask+1); + do { + if (freenode == nodebase) { /* No free node found? */ + rehashtab(L, t, key); /* Rehash table. */ + return lj_tab_set(L, t, key); /* Retry key insertion. */ + } + } while (!tvisnil(&(--freenode)->key)); + setfreetop(t, nodebase, freenode); + lua_assert(freenode != &G(L)->nilnode); + collide = hashkey(t, &n->key); + if (collide != n) { /* Colliding node not the main node? */ + while (noderef(collide->next) != n) /* Find predecessor. */ + collide = nextnode(collide); + setmref(collide->next, freenode); /* Relink chain. */ + /* Copy colliding node into free node and free main node. */ + freenode->val = n->val; + freenode->key = n->key; + freenode->next = n->next; + setmref(n->next, NULL); + setnilV(&n->val); + /* Rechain pseudo-resurrected string keys with colliding hashes. */ + while (nextnode(freenode)) { + Node *nn = nextnode(freenode); + if (tvisstr(&nn->key) && !tvisnil(&nn->val) && + hashstr(t, strV(&nn->key)) == n) { + freenode->next = nn->next; + nn->next = n->next; + setmref(n->next, nn); + } else { + freenode = nn; + } + } + } else { /* Otherwise use free node. */ + setmrefr(freenode->next, n->next); /* Insert into chain. */ + setmref(n->next, freenode); + n = freenode; + } + } + n->key.u64 = key->u64; + if (LJ_UNLIKELY(tvismzero(&n->key))) + n->key.u64 = 0; + lj_gc_anybarriert(L, t); + lua_assert(tvisnil(&n->val)); + return &n->val; +} + +TValue *lj_tab_setinth(lua_State *L, GCtab *t, int32_t key) +{ + TValue k; + Node *n; + k.n = (lua_Number)key; + n = hashnum(t, &k); + do { + if (tvisnum(&n->key) && n->key.n == k.n) + return &n->val; + } while ((n = nextnode(n))); + return lj_tab_newkey(L, t, &k); +} + +TValue *lj_tab_setstr(lua_State *L, GCtab *t, GCstr *key) +{ + TValue k; + Node *n = hashstr(t, key); + do { + if (tvisstr(&n->key) && strV(&n->key) == key) + return &n->val; + } while ((n = nextnode(n))); + setstrV(L, &k, key); + return lj_tab_newkey(L, t, &k); +} + +TValue *lj_tab_set(lua_State *L, GCtab *t, cTValue *key) +{ + Node *n; + t->nomm = 0; /* Invalidate negative metamethod cache. */ + if (tvisstr(key)) { + return lj_tab_setstr(L, t, strV(key)); + } else if (tvisint(key)) { + return lj_tab_setint(L, t, intV(key)); + } else if (tvisnum(key)) { + lua_Number nk = numV(key); + int32_t k = lj_num2int(nk); + if (nk == (lua_Number)k) + return lj_tab_setint(L, t, k); + if (tvisnan(key)) + lj_err_msg(L, LJ_ERR_NANIDX); + /* Else use the generic lookup. */ + } else if (tvisnil(key)) { + lj_err_msg(L, LJ_ERR_NILIDX); + } + n = hashkey(t, key); + do { + if (lj_obj_equal(&n->key, key)) + return &n->val; + } while ((n = nextnode(n))); + return lj_tab_newkey(L, t, key); +} + +/* -- Table traversal ----------------------------------------------------- */ + +/* Get the traversal index of a key. */ +static uint32_t keyindex(lua_State *L, GCtab *t, cTValue *key) +{ + TValue tmp; + if (tvisint(key)) { + int32_t k = intV(key); + if ((uint32_t)k < t->asize) + return (uint32_t)k; /* Array key indexes: [0..t->asize-1] */ + setnumV(&tmp, (lua_Number)k); + key = &tmp; + } else if (tvisnum(key)) { + lua_Number nk = numV(key); + int32_t k = lj_num2int(nk); + if ((uint32_t)k < t->asize && nk == (lua_Number)k) + return (uint32_t)k; /* Array key indexes: [0..t->asize-1] */ + } + if (!tvisnil(key)) { + Node *n = hashkey(t, key); + do { + if (lj_obj_equal(&n->key, key)) + return t->asize + (uint32_t)(n - noderef(t->node)); + /* Hash key indexes: [t->asize..t->asize+t->nmask] */ + } while ((n = nextnode(n))); + if (key->u32.hi == 0xfffe7fff) /* ITERN was despecialized while running. */ + return key->u32.lo - 1; + lj_err_msg(L, LJ_ERR_NEXTIDX); + return 0; /* unreachable */ + } + return ~0u; /* A nil key starts the traversal. */ +} + +/* Advance to the next step in a table traversal. */ +int lj_tab_next(lua_State *L, GCtab *t, TValue *key) +{ + uint32_t i = keyindex(L, t, key); /* Find predecessor key index. */ + for (i++; i < t->asize; i++) /* First traverse the array keys. */ + if (!tvisnil(arrayslot(t, i))) { + setintV(key, i); + copyTV(L, key+1, arrayslot(t, i)); + return 1; + } + for (i -= t->asize; i <= t->hmask; i++) { /* Then traverse the hash keys. */ + Node *n = &noderef(t->node)[i]; + if (!tvisnil(&n->val)) { + copyTV(L, key, &n->key); + copyTV(L, key+1, &n->val); + return 1; + } + } + return 0; /* End of traversal. */ +} + +/* -- Table length calculation -------------------------------------------- */ + +static MSize unbound_search(GCtab *t, MSize j) +{ + cTValue *tv; + MSize i = j; /* i is zero or a present index */ + j++; + /* find `i' and `j' such that i is present and j is not */ + while ((tv = lj_tab_getint(t, (int32_t)j)) && !tvisnil(tv)) { + i = j; + j *= 2; + if (j > (MSize)(INT_MAX-2)) { /* overflow? */ + /* table was built with bad purposes: resort to linear search */ + i = 1; + while ((tv = lj_tab_getint(t, (int32_t)i)) && !tvisnil(tv)) i++; + return i - 1; + } + } + /* now do a binary search between them */ + while (j - i > 1) { + MSize m = (i+j)/2; + cTValue *tvb = lj_tab_getint(t, (int32_t)m); + if (tvb && !tvisnil(tvb)) i = m; else j = m; + } + return i; +} + +/* +** Try to find a boundary in table `t'. A `boundary' is an integer index +** such that t[i] is non-nil and t[i+1] is nil (and 0 if t[1] is nil). +*/ +MSize LJ_FASTCALL lj_tab_len(GCtab *t) +{ + MSize j = (MSize)t->asize; + if (j > 1 && tvisnil(arrayslot(t, j-1))) { + MSize i = 1; + while (j - i > 1) { + MSize m = (i+j)/2; + if (tvisnil(arrayslot(t, m-1))) j = m; else i = m; + } + return i-1; + } + if (j) j--; + if (t->hmask <= 0) + return j; + return unbound_search(t, j); +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_tab.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_tab.h new file mode 100644 index 00000000000..7cf031be9af --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_tab.h @@ -0,0 +1,73 @@ +/* +** Table handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_TAB_H +#define _LJ_TAB_H + +#include "lj_obj.h" + +/* Hash constants. Tuned using a brute force search. */ +#define HASH_BIAS (-0x04c11db7) +#define HASH_ROT1 14 +#define HASH_ROT2 5 +#define HASH_ROT3 13 + +/* Scramble the bits of numbers and pointers. */ +static LJ_AINLINE uint32_t hashrot(uint32_t lo, uint32_t hi) +{ +#if LJ_TARGET_X86ORX64 + /* Prefer variant that compiles well for a 2-operand CPU. */ + lo ^= hi; hi = lj_rol(hi, HASH_ROT1); + lo -= hi; hi = lj_rol(hi, HASH_ROT2); + hi ^= lo; hi -= lj_rol(lo, HASH_ROT3); +#else + lo ^= hi; + lo = lo - lj_rol(hi, HASH_ROT1); + hi = lo ^ lj_rol(hi, HASH_ROT1 + HASH_ROT2); + hi = hi - lj_rol(lo, HASH_ROT3); +#endif + return hi; +} + +#define hsize2hbits(s) ((s) ? ((s)==1 ? 1 : 1+lj_fls((uint32_t)((s)-1))) : 0) + +LJ_FUNCA GCtab *lj_tab_new(lua_State *L, uint32_t asize, uint32_t hbits); +LJ_FUNC GCtab *lj_tab_new_ah(lua_State *L, int32_t a, int32_t h); +#if LJ_HASJIT +LJ_FUNC GCtab * LJ_FASTCALL lj_tab_new1(lua_State *L, uint32_t ahsize); +#endif +LJ_FUNCA GCtab * LJ_FASTCALL lj_tab_dup(lua_State *L, const GCtab *kt); +LJ_FUNC void LJ_FASTCALL lj_tab_clear(GCtab *t); +LJ_FUNC void LJ_FASTCALL lj_tab_free(global_State *g, GCtab *t); +#if LJ_HASFFI +LJ_FUNC void lj_tab_rehash(lua_State *L, GCtab *t); +#endif +LJ_FUNC void lj_tab_resize(lua_State *L, GCtab *t, uint32_t asize, uint32_t hbits); +LJ_FUNCA void lj_tab_reasize(lua_State *L, GCtab *t, uint32_t nasize); + +/* Caveat: all getters except lj_tab_get() can return NULL! */ + +LJ_FUNCA cTValue * LJ_FASTCALL lj_tab_getinth(GCtab *t, int32_t key); +LJ_FUNC cTValue *lj_tab_getstr(GCtab *t, GCstr *key); +LJ_FUNCA cTValue *lj_tab_get(lua_State *L, GCtab *t, cTValue *key); + +/* Caveat: all setters require a write barrier for the stored value. */ + +LJ_FUNCA TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key); +LJ_FUNCA TValue *lj_tab_setinth(lua_State *L, GCtab *t, int32_t key); +LJ_FUNC TValue *lj_tab_setstr(lua_State *L, GCtab *t, GCstr *key); +LJ_FUNC TValue *lj_tab_set(lua_State *L, GCtab *t, cTValue *key); + +#define inarray(t, key) ((MSize)(key) < (MSize)(t)->asize) +#define arrayslot(t, i) (&tvref((t)->array)[(i)]) +#define lj_tab_getint(t, key) \ + (inarray((t), (key)) ? arrayslot((t), (key)) : lj_tab_getinth((t), (key))) +#define lj_tab_setint(L, t, key) \ + (inarray((t), (key)) ? arrayslot((t), (key)) : lj_tab_setinth(L, (t), (key))) + +LJ_FUNCA int lj_tab_next(lua_State *L, GCtab *t, TValue *key); +LJ_FUNCA MSize LJ_FASTCALL lj_tab_len(GCtab *t); + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_target.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_target.h new file mode 100644 index 00000000000..0daecb11c86 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_target.h @@ -0,0 +1,164 @@ +/* +** Definitions for target CPU. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_TARGET_H +#define _LJ_TARGET_H + +#include "lj_def.h" +#include "lj_arch.h" + +/* -- Registers and spill slots ------------------------------------------- */ + +/* Register type (uint8_t in ir->r). */ +typedef uint32_t Reg; + +/* The hi-bit is NOT set for an allocated register. This means the value +** can be directly used without masking. The hi-bit is set for a register +** allocation hint or for RID_INIT, RID_SINK or RID_SUNK. +*/ +#define RID_NONE 0x80 +#define RID_MASK 0x7f +#define RID_INIT (RID_NONE|RID_MASK) +#define RID_SINK (RID_INIT-1) +#define RID_SUNK (RID_INIT-2) + +#define ra_noreg(r) ((r) & RID_NONE) +#define ra_hasreg(r) (!((r) & RID_NONE)) + +/* The ra_hashint() macro assumes a previous test for ra_noreg(). */ +#define ra_hashint(r) ((r) < RID_SUNK) +#define ra_gethint(r) ((Reg)((r) & RID_MASK)) +#define ra_sethint(rr, r) rr = (uint8_t)((r)|RID_NONE) +#define ra_samehint(r1, r2) (ra_gethint((r1)^(r2)) == 0) + +/* Spill slot 0 means no spill slot has been allocated. */ +#define SPS_NONE 0 + +#define ra_hasspill(s) ((s) != SPS_NONE) + +/* Combined register and spill slot (uint16_t in ir->prev). */ +typedef uint32_t RegSP; + +#define REGSP(r, s) ((r) + ((s) << 8)) +#define REGSP_HINT(r) ((r)|RID_NONE) +#define REGSP_INIT REGSP(RID_INIT, 0) + +#define regsp_reg(rs) ((rs) & 255) +#define regsp_spill(rs) ((rs) >> 8) +#define regsp_used(rs) \ + (((rs) & ~REGSP(RID_MASK, 0)) != REGSP(RID_NONE, 0)) + +/* -- Register sets ------------------------------------------------------- */ + +/* Bitset for registers. 32 registers suffice for most architectures. +** Note that one set holds bits for both GPRs and FPRs. +*/ +#if LJ_TARGET_PPC || LJ_TARGET_MIPS +typedef uint64_t RegSet; +#else +typedef uint32_t RegSet; +#endif + +#define RID2RSET(r) (((RegSet)1) << (r)) +#define RSET_EMPTY ((RegSet)0) +#define RSET_RANGE(lo, hi) ((RID2RSET((hi)-(lo))-1) << (lo)) + +#define rset_test(rs, r) ((int)((rs) >> (r)) & 1) +#define rset_set(rs, r) (rs |= RID2RSET(r)) +#define rset_clear(rs, r) (rs &= ~RID2RSET(r)) +#define rset_exclude(rs, r) (rs & ~RID2RSET(r)) +#if LJ_TARGET_PPC || LJ_TARGET_MIPS +#define rset_picktop(rs) ((Reg)(__builtin_clzll(rs)^63)) +#define rset_pickbot(rs) ((Reg)__builtin_ctzll(rs)) +#else +#define rset_picktop(rs) ((Reg)lj_fls(rs)) +#define rset_pickbot(rs) ((Reg)lj_ffs(rs)) +#endif + +/* -- Register allocation cost -------------------------------------------- */ + +/* The register allocation heuristic keeps track of the cost for allocating +** a specific register: +** +** A free register (obviously) has a cost of 0 and a 1-bit in the free mask. +** +** An already allocated register has the (non-zero) IR reference in the lowest +** bits and the result of a blended cost-model in the higher bits. +** +** The allocator first checks the free mask for a hit. Otherwise an (unrolled) +** linear search for the minimum cost is used. The search doesn't need to +** keep track of the position of the minimum, which makes it very fast. +** The lowest bits of the minimum cost show the desired IR reference whose +** register is the one to evict. +** +** Without the cost-model this degenerates to the standard heuristics for +** (reverse) linear-scan register allocation. Since code generation is done +** in reverse, a live interval extends from the last use to the first def. +** For an SSA IR the IR reference is the first (and only) def and thus +** trivially marks the end of the interval. The LSRA heuristics says to pick +** the register whose live interval has the furthest extent, i.e. the lowest +** IR reference in our case. +** +** A cost-model should take into account other factors, like spill-cost and +** restore- or rematerialization-cost, which depend on the kind of instruction. +** E.g. constants have zero spill costs, variant instructions have higher +** costs than invariants and PHIs should preferably never be spilled. +** +** Here's a first cut at simple, but effective blended cost-model for R-LSRA: +** - Due to careful design of the IR, constants already have lower IR +** references than invariants and invariants have lower IR references +** than variants. +** - The cost in the upper 16 bits is the sum of the IR reference and a +** weighted score. The score currently only takes into account whether +** the IRT_ISPHI bit is set in the instruction type. +** - The PHI weight is the minimum distance (in IR instructions) a PHI +** reference has to be further apart from a non-PHI reference to be spilled. +** - It should be a power of two (for speed) and must be between 2 and 32768. +** Good values for the PHI weight seem to be between 40 and 150. +** - Further study is required. +*/ +#define REGCOST_PHI_WEIGHT 64 + +/* Cost for allocating a specific register. */ +typedef uint32_t RegCost; + +/* Note: assumes 16 bit IRRef1. */ +#define REGCOST(cost, ref) ((RegCost)(ref) + ((RegCost)(cost) << 16)) +#define regcost_ref(rc) ((IRRef1)(rc)) + +#define REGCOST_T(t) \ + ((RegCost)((t)&IRT_ISPHI) * (((RegCost)(REGCOST_PHI_WEIGHT)<<16)/IRT_ISPHI)) +#define REGCOST_REF_T(ref, t) (REGCOST((ref), (ref)) + REGCOST_T((t))) + +/* -- Target-specific definitions ----------------------------------------- */ + +#if LJ_TARGET_X86ORX64 +#include "lj_target_x86.h" +#elif LJ_TARGET_ARM +#include "lj_target_arm.h" +#elif LJ_TARGET_ARM64 +#include "lj_target_arm64.h" +#elif LJ_TARGET_PPC +#include "lj_target_ppc.h" +#elif LJ_TARGET_MIPS +#include "lj_target_mips.h" +#else +#error "Missing include for target CPU" +#endif + +#ifdef EXITSTUBS_PER_GROUP +/* Return the address of an exit stub. */ +static LJ_AINLINE char *exitstub_addr_(char **group, uint32_t exitno) +{ + lua_assert(group[exitno / EXITSTUBS_PER_GROUP] != NULL); + return (char *)group[exitno / EXITSTUBS_PER_GROUP] + + EXITSTUB_SPACING*(exitno % EXITSTUBS_PER_GROUP); +} +/* Avoid dependence on lj_jit.h if only including lj_target.h. */ +#define exitstub_addr(J, exitno) \ + ((MCode *)exitstub_addr_((char **)((J)->exitstubgroup), (exitno))) +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_target_arm.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_target_arm.h new file mode 100644 index 00000000000..0a243b3740b --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_target_arm.h @@ -0,0 +1,270 @@ +/* +** Definitions for ARM CPUs. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_TARGET_ARM_H +#define _LJ_TARGET_ARM_H + +/* -- Registers IDs ------------------------------------------------------- */ + +#define GPRDEF(_) \ + _(R0) _(R1) _(R2) _(R3) _(R4) _(R5) _(R6) _(R7) \ + _(R8) _(R9) _(R10) _(R11) _(R12) _(SP) _(LR) _(PC) +#if LJ_SOFTFP +#define FPRDEF(_) +#else +#define FPRDEF(_) \ + _(D0) _(D1) _(D2) _(D3) _(D4) _(D5) _(D6) _(D7) \ + _(D8) _(D9) _(D10) _(D11) _(D12) _(D13) _(D14) _(D15) +#endif +#define VRIDDEF(_) + +#define RIDENUM(name) RID_##name, + +enum { + GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */ + FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */ + RID_MAX, + RID_TMP = RID_LR, + + /* Calling conventions. */ + RID_RET = RID_R0, + RID_RETLO = RID_R0, + RID_RETHI = RID_R1, +#if LJ_SOFTFP + RID_FPRET = RID_R0, +#else + RID_FPRET = RID_D0, +#endif + + /* These definitions must match with the *.dasc file(s): */ + RID_BASE = RID_R9, /* Interpreter BASE. */ + RID_LPC = RID_R6, /* Interpreter PC. */ + RID_DISPATCH = RID_R7, /* Interpreter DISPATCH table. */ + RID_LREG = RID_R8, /* Interpreter L. */ + + /* Register ranges [min, max) and number of registers. */ + RID_MIN_GPR = RID_R0, + RID_MAX_GPR = RID_PC+1, + RID_MIN_FPR = RID_MAX_GPR, +#if LJ_SOFTFP + RID_MAX_FPR = RID_MIN_FPR, +#else + RID_MAX_FPR = RID_D15+1, +#endif + RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR, + RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR +}; + +#define RID_NUM_KREF RID_NUM_GPR +#define RID_MIN_KREF RID_R0 + +/* -- Register sets ------------------------------------------------------- */ + +/* Make use of all registers, except sp, lr and pc. */ +#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_R12+1)) +#define RSET_GPREVEN \ + (RID2RSET(RID_R0)|RID2RSET(RID_R2)|RID2RSET(RID_R4)|RID2RSET(RID_R6)| \ + RID2RSET(RID_R8)|RID2RSET(RID_R10)) +#define RSET_GPRODD \ + (RID2RSET(RID_R1)|RID2RSET(RID_R3)|RID2RSET(RID_R5)|RID2RSET(RID_R7)| \ + RID2RSET(RID_R9)|RID2RSET(RID_R11)) +#if LJ_SOFTFP +#define RSET_FPR 0 +#else +#define RSET_FPR (RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR)) +#endif +#define RSET_ALL (RSET_GPR|RSET_FPR) +#define RSET_INIT RSET_ALL + +/* ABI-specific register sets. lr is an implicit scratch register. */ +#define RSET_SCRATCH_GPR_ (RSET_RANGE(RID_R0, RID_R3+1)|RID2RSET(RID_R12)) +#ifdef __APPLE__ +#define RSET_SCRATCH_GPR (RSET_SCRATCH_GPR_|RID2RSET(RID_R9)) +#else +#define RSET_SCRATCH_GPR RSET_SCRATCH_GPR_ +#endif +#if LJ_SOFTFP +#define RSET_SCRATCH_FPR 0 +#else +#define RSET_SCRATCH_FPR (RSET_RANGE(RID_D0, RID_D7+1)) +#endif +#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR) +#define REGARG_FIRSTGPR RID_R0 +#define REGARG_LASTGPR RID_R3 +#define REGARG_NUMGPR 4 +#if LJ_ABI_SOFTFP +#define REGARG_FIRSTFPR 0 +#define REGARG_LASTFPR 0 +#define REGARG_NUMFPR 0 +#else +#define REGARG_FIRSTFPR RID_D0 +#define REGARG_LASTFPR RID_D7 +#define REGARG_NUMFPR 8 +#endif + +/* -- Spill slots --------------------------------------------------------- */ + +/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs. +** +** SPS_FIXED: Available fixed spill slots in interpreter frame. +** This definition must match with the *.dasc file(s). +** +** SPS_FIRST: First spill slot for general use. Reserve min. two 32 bit slots. +*/ +#define SPS_FIXED 2 +#define SPS_FIRST 2 + +#define SPOFS_TMP 0 + +#define sps_scale(slot) (4 * (int32_t)(slot)) +#define sps_align(slot) (((slot) - SPS_FIXED + 1) & ~1) + +/* -- Exit state ---------------------------------------------------------- */ + +/* This definition must match with the *.dasc file(s). */ +typedef struct { +#if !LJ_SOFTFP + lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */ +#endif + int32_t gpr[RID_NUM_GPR]; /* General-purpose registers. */ + int32_t spill[256]; /* Spill slots. */ +} ExitState; + +/* PC after instruction that caused an exit. Used to find the trace number. */ +#define EXITSTATE_PCREG RID_PC +/* Highest exit + 1 indicates stack check. */ +#define EXITSTATE_CHECKEXIT 1 + +#define EXITSTUB_SPACING 4 +#define EXITSTUBS_PER_GROUP 32 + +/* -- Instructions -------------------------------------------------------- */ + +/* Instruction fields. */ +#define ARMF_CC(ai, cc) (((ai) ^ ARMI_CCAL) | ((cc) << 28)) +#define ARMF_N(r) ((r) << 16) +#define ARMF_D(r) ((r) << 12) +#define ARMF_S(r) ((r) << 8) +#define ARMF_M(r) (r) +#define ARMF_SH(sh, n) (((sh) << 5) | ((n) << 7)) +#define ARMF_RSH(sh, r) (0x10 | ((sh) << 5) | ARMF_S(r)) + +typedef enum ARMIns { + ARMI_CCAL = 0xe0000000, + ARMI_S = 0x000100000, + ARMI_K12 = 0x02000000, + ARMI_KNEG = 0x00200000, + ARMI_LS_W = 0x00200000, + ARMI_LS_U = 0x00800000, + ARMI_LS_P = 0x01000000, + ARMI_LS_R = 0x02000000, + ARMI_LSX_I = 0x00400000, + + ARMI_AND = 0xe0000000, + ARMI_EOR = 0xe0200000, + ARMI_SUB = 0xe0400000, + ARMI_RSB = 0xe0600000, + ARMI_ADD = 0xe0800000, + ARMI_ADC = 0xe0a00000, + ARMI_SBC = 0xe0c00000, + ARMI_RSC = 0xe0e00000, + ARMI_TST = 0xe1100000, + ARMI_TEQ = 0xe1300000, + ARMI_CMP = 0xe1500000, + ARMI_CMN = 0xe1700000, + ARMI_ORR = 0xe1800000, + ARMI_MOV = 0xe1a00000, + ARMI_BIC = 0xe1c00000, + ARMI_MVN = 0xe1e00000, + + ARMI_NOP = 0xe1a00000, + + ARMI_MUL = 0xe0000090, + ARMI_SMULL = 0xe0c00090, + + ARMI_LDR = 0xe4100000, + ARMI_LDRB = 0xe4500000, + ARMI_LDRH = 0xe01000b0, + ARMI_LDRSB = 0xe01000d0, + ARMI_LDRSH = 0xe01000f0, + ARMI_LDRD = 0xe00000d0, + ARMI_STR = 0xe4000000, + ARMI_STRB = 0xe4400000, + ARMI_STRH = 0xe00000b0, + ARMI_STRD = 0xe00000f0, + ARMI_PUSH = 0xe92d0000, + + ARMI_B = 0xea000000, + ARMI_BL = 0xeb000000, + ARMI_BLX = 0xfa000000, + ARMI_BLXr = 0xe12fff30, + + /* ARMv6 */ + ARMI_REV = 0xe6bf0f30, + ARMI_SXTB = 0xe6af0070, + ARMI_SXTH = 0xe6bf0070, + ARMI_UXTB = 0xe6ef0070, + ARMI_UXTH = 0xe6ff0070, + + /* ARMv6T2 */ + ARMI_MOVW = 0xe3000000, + ARMI_MOVT = 0xe3400000, + + /* VFP */ + ARMI_VMOV_D = 0xeeb00b40, + ARMI_VMOV_S = 0xeeb00a40, + ARMI_VMOVI_D = 0xeeb00b00, + + ARMI_VMOV_R_S = 0xee100a10, + ARMI_VMOV_S_R = 0xee000a10, + ARMI_VMOV_RR_D = 0xec500b10, + ARMI_VMOV_D_RR = 0xec400b10, + + ARMI_VADD_D = 0xee300b00, + ARMI_VSUB_D = 0xee300b40, + ARMI_VMUL_D = 0xee200b00, + ARMI_VMLA_D = 0xee000b00, + ARMI_VMLS_D = 0xee000b40, + ARMI_VNMLS_D = 0xee100b00, + ARMI_VDIV_D = 0xee800b00, + + ARMI_VABS_D = 0xeeb00bc0, + ARMI_VNEG_D = 0xeeb10b40, + ARMI_VSQRT_D = 0xeeb10bc0, + + ARMI_VCMP_D = 0xeeb40b40, + ARMI_VCMPZ_D = 0xeeb50b40, + + ARMI_VMRS = 0xeef1fa10, + + ARMI_VCVT_S32_F32 = 0xeebd0ac0, + ARMI_VCVT_S32_F64 = 0xeebd0bc0, + ARMI_VCVT_U32_F32 = 0xeebc0ac0, + ARMI_VCVT_U32_F64 = 0xeebc0bc0, + ARMI_VCVT_F32_S32 = 0xeeb80ac0, + ARMI_VCVT_F64_S32 = 0xeeb80bc0, + ARMI_VCVT_F32_U32 = 0xeeb80a40, + ARMI_VCVT_F64_U32 = 0xeeb80b40, + ARMI_VCVT_F32_F64 = 0xeeb70bc0, + ARMI_VCVT_F64_F32 = 0xeeb70ac0, + + ARMI_VLDR_S = 0xed100a00, + ARMI_VLDR_D = 0xed100b00, + ARMI_VSTR_S = 0xed000a00, + ARMI_VSTR_D = 0xed000b00, +} ARMIns; + +typedef enum ARMShift { + ARMSH_LSL, ARMSH_LSR, ARMSH_ASR, ARMSH_ROR +} ARMShift; + +/* ARM condition codes. */ +typedef enum ARMCC { + CC_EQ, CC_NE, CC_CS, CC_CC, CC_MI, CC_PL, CC_VS, CC_VC, + CC_HI, CC_LS, CC_GE, CC_LT, CC_GT, CC_LE, CC_AL, + CC_HS = CC_CS, CC_LO = CC_CC +} ARMCC; + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_target_arm64.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_target_arm64.h new file mode 100644 index 00000000000..99e0adc9d66 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_target_arm64.h @@ -0,0 +1,97 @@ +/* +** Definitions for ARM64 CPUs. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_TARGET_ARM64_H +#define _LJ_TARGET_ARM64_H + +/* -- Registers IDs ------------------------------------------------------- */ + +#define GPRDEF(_) \ + _(X0) _(X1) _(X2) _(X3) _(X4) _(X5) _(X6) _(X7) \ + _(X8) _(X9) _(X10) _(X11) _(X12) _(X13) _(X14) _(X15) \ + _(X16) _(X17) _(X18) _(X19) _(X20) _(X21) _(X22) _(X23) \ + _(X24) _(X25) _(X26) _(X27) _(X28) _(FP) _(LR) _(SP) +#define FPRDEF(_) \ + _(D0) _(D1) _(D2) _(D3) _(D4) _(D5) _(D6) _(D7) \ + _(D8) _(D9) _(D10) _(D11) _(D12) _(D13) _(D14) _(D15) \ + _(D16) _(D17) _(D18) _(D19) _(D20) _(D21) _(D22) _(D23) \ + _(D24) _(D25) _(D26) _(D27) _(D28) _(D29) _(D30) _(D31) +#define VRIDDEF(_) + +#define RIDENUM(name) RID_##name, + +enum { + GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */ + FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */ + RID_MAX, + RID_TMP = RID_LR, + RID_ZERO = RID_SP, + + /* Calling conventions. */ + RID_RET = RID_X0, + RID_FPRET = RID_D0, + + /* These definitions must match with the *.dasc file(s): */ + RID_BASE = RID_X19, /* Interpreter BASE. */ + RID_LPC = RID_X21, /* Interpreter PC. */ + RID_GL = RID_X22, /* Interpreter GL. */ + RID_LREG = RID_X23, /* Interpreter L. */ + + /* Register ranges [min, max) and number of registers. */ + RID_MIN_GPR = RID_X0, + RID_MAX_GPR = RID_SP+1, + RID_MIN_FPR = RID_MAX_GPR, + RID_MAX_FPR = RID_D31+1, + RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR, + RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR +}; + +#define RID_NUM_KREF RID_NUM_GPR +#define RID_MIN_KREF RID_X0 + +/* -- Register sets ------------------------------------------------------- */ + +/* Make use of all registers, except for x18, fp, lr and sp. */ +#define RSET_FIXED \ + (RID2RSET(RID_X18)|RID2RSET(RID_FP)|RID2RSET(RID_LR)|RID2RSET(RID_SP)) +#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR) - RSET_FIXED) +#define RSET_FPR RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR) +#define RSET_ALL (RSET_GPR|RSET_FPR) +#define RSET_INIT RSET_ALL + +/* lr is an implicit scratch register. */ +#define RSET_SCRATCH_GPR (RSET_RANGE(RID_X0, RID_X17+1)) +#define RSET_SCRATCH_FPR \ + (RSET_RANGE(RID_D0, RID_D7+1)|RSET_RANGE(RID_D16, RID_D31+1)) +#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR) +#define REGARG_FIRSTGPR RID_X0 +#define REGARG_LASTGPR RID_X7 +#define REGARG_NUMGPR 8 +#define REGARG_FIRSTFPR RID_D0 +#define REGARG_LASTFPR RID_D7 +#define REGARG_NUMFPR 8 + +/* -- Instructions -------------------------------------------------------- */ + +/* Instruction fields. */ +#define A64F_D(r) (r) +#define A64F_N(r) ((r) << 5) +#define A64F_A(r) ((r) << 10) +#define A64F_M(r) ((r) << 16) +#define A64F_U16(x) ((x) << 5) +#define A64F_S26(x) (x) +#define A64F_S19(x) ((x) << 5) + +typedef enum A64Ins { + A64I_MOVZw = 0x52800000, + A64I_MOVZx = 0xd2800000, + A64I_LDRLw = 0x18000000, + A64I_LDRLx = 0x58000000, + A64I_NOP = 0xd503201f, + A64I_B = 0x14000000, + A64I_BR = 0xd61f0000, +} A64Ins; + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_target_mips.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_target_mips.h new file mode 100644 index 00000000000..76645bcac5e --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_target_mips.h @@ -0,0 +1,260 @@ +/* +** Definitions for MIPS CPUs. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_TARGET_MIPS_H +#define _LJ_TARGET_MIPS_H + +/* -- Registers IDs ------------------------------------------------------- */ + +#define GPRDEF(_) \ + _(R0) _(R1) _(R2) _(R3) _(R4) _(R5) _(R6) _(R7) \ + _(R8) _(R9) _(R10) _(R11) _(R12) _(R13) _(R14) _(R15) \ + _(R16) _(R17) _(R18) _(R19) _(R20) _(R21) _(R22) _(R23) \ + _(R24) _(R25) _(SYS1) _(SYS2) _(R28) _(SP) _(R30) _(RA) +#define FPRDEF(_) \ + _(F0) _(F1) _(F2) _(F3) _(F4) _(F5) _(F6) _(F7) \ + _(F8) _(F9) _(F10) _(F11) _(F12) _(F13) _(F14) _(F15) \ + _(F16) _(F17) _(F18) _(F19) _(F20) _(F21) _(F22) _(F23) \ + _(F24) _(F25) _(F26) _(F27) _(F28) _(F29) _(F30) _(F31) +#define VRIDDEF(_) + +#define RIDENUM(name) RID_##name, + +enum { + GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */ + FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */ + RID_MAX, + RID_ZERO = RID_R0, + RID_TMP = RID_RA, + + /* Calling conventions. */ + RID_RET = RID_R2, +#if LJ_LE + RID_RETHI = RID_R3, + RID_RETLO = RID_R2, +#else + RID_RETHI = RID_R2, + RID_RETLO = RID_R3, +#endif + RID_FPRET = RID_F0, + RID_CFUNCADDR = RID_R25, + + /* These definitions must match with the *.dasc file(s): */ + RID_BASE = RID_R16, /* Interpreter BASE. */ + RID_LPC = RID_R18, /* Interpreter PC. */ + RID_DISPATCH = RID_R19, /* Interpreter DISPATCH table. */ + RID_LREG = RID_R20, /* Interpreter L. */ + RID_JGL = RID_R30, /* On-trace: global_State + 32768. */ + + /* Register ranges [min, max) and number of registers. */ + RID_MIN_GPR = RID_R0, + RID_MAX_GPR = RID_RA+1, + RID_MIN_FPR = RID_F0, + RID_MAX_FPR = RID_F31+1, + RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR, + RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR /* Only even regs are used. */ +}; + +#define RID_NUM_KREF RID_NUM_GPR +#define RID_MIN_KREF RID_R0 + +/* -- Register sets ------------------------------------------------------- */ + +/* Make use of all registers, except ZERO, TMP, SP, SYS1, SYS2 and JGL. */ +#define RSET_FIXED \ + (RID2RSET(RID_ZERO)|RID2RSET(RID_TMP)|RID2RSET(RID_SP)|\ + RID2RSET(RID_SYS1)|RID2RSET(RID_SYS2)|RID2RSET(RID_JGL)) +#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR) - RSET_FIXED) +#define RSET_FPR \ + (RID2RSET(RID_F0)|RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(RID_F6)|\ + RID2RSET(RID_F8)|RID2RSET(RID_F10)|RID2RSET(RID_F12)|RID2RSET(RID_F14)|\ + RID2RSET(RID_F16)|RID2RSET(RID_F18)|RID2RSET(RID_F20)|RID2RSET(RID_F22)|\ + RID2RSET(RID_F24)|RID2RSET(RID_F26)|RID2RSET(RID_F28)|RID2RSET(RID_F30)) +#define RSET_ALL (RSET_GPR|RSET_FPR) +#define RSET_INIT RSET_ALL + +#define RSET_SCRATCH_GPR \ + (RSET_RANGE(RID_R1, RID_R15+1)|\ + RID2RSET(RID_R24)|RID2RSET(RID_R25)|RID2RSET(RID_R28)) +#define RSET_SCRATCH_FPR \ + (RID2RSET(RID_F0)|RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(RID_F6)|\ + RID2RSET(RID_F8)|RID2RSET(RID_F10)|RID2RSET(RID_F12)|RID2RSET(RID_F14)|\ + RID2RSET(RID_F16)|RID2RSET(RID_F18)) +#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR) +#define REGARG_FIRSTGPR RID_R4 +#define REGARG_LASTGPR RID_R7 +#define REGARG_NUMGPR 4 +#define REGARG_FIRSTFPR RID_F12 +#define REGARG_LASTFPR RID_F14 +#define REGARG_NUMFPR 2 + +/* -- Spill slots --------------------------------------------------------- */ + +/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs. +** +** SPS_FIXED: Available fixed spill slots in interpreter frame. +** This definition must match with the *.dasc file(s). +** +** SPS_FIRST: First spill slot for general use. +*/ +#define SPS_FIXED 5 +#define SPS_FIRST 4 + +#define SPOFS_TMP 0 + +#define sps_scale(slot) (4 * (int32_t)(slot)) +#define sps_align(slot) (((slot) - SPS_FIXED + 1) & ~1) + +/* -- Exit state ---------------------------------------------------------- */ + +/* This definition must match with the *.dasc file(s). */ +typedef struct { + lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */ + int32_t gpr[RID_NUM_GPR]; /* General-purpose registers. */ + int32_t spill[256]; /* Spill slots. */ +} ExitState; + +/* Highest exit + 1 indicates stack check. */ +#define EXITSTATE_CHECKEXIT 1 + +/* Return the address of a per-trace exit stub. */ +static LJ_AINLINE uint32_t *exitstub_trace_addr_(uint32_t *p) +{ + while (*p == 0x00000000) p++; /* Skip MIPSI_NOP. */ + return p; +} +/* Avoid dependence on lj_jit.h if only including lj_target.h. */ +#define exitstub_trace_addr(T, exitno) \ + exitstub_trace_addr_((MCode *)((char *)(T)->mcode + (T)->szmcode)) + +/* -- Instructions -------------------------------------------------------- */ + +/* Instruction fields. */ +#define MIPSF_S(r) ((r) << 21) +#define MIPSF_T(r) ((r) << 16) +#define MIPSF_D(r) ((r) << 11) +#define MIPSF_R(r) ((r) << 21) +#define MIPSF_H(r) ((r) << 16) +#define MIPSF_G(r) ((r) << 11) +#define MIPSF_F(r) ((r) << 6) +#define MIPSF_A(n) ((n) << 6) +#define MIPSF_M(n) ((n) << 11) + +typedef enum MIPSIns { + /* Integer instructions. */ + MIPSI_MOVE = 0x00000021, + MIPSI_NOP = 0x00000000, + + MIPSI_LI = 0x24000000, + MIPSI_LU = 0x34000000, + MIPSI_LUI = 0x3c000000, + + MIPSI_ADDIU = 0x24000000, + MIPSI_ANDI = 0x30000000, + MIPSI_ORI = 0x34000000, + MIPSI_XORI = 0x38000000, + MIPSI_SLTI = 0x28000000, + MIPSI_SLTIU = 0x2c000000, + + MIPSI_ADDU = 0x00000021, + MIPSI_SUBU = 0x00000023, + MIPSI_MUL = 0x70000002, + MIPSI_AND = 0x00000024, + MIPSI_OR = 0x00000025, + MIPSI_XOR = 0x00000026, + MIPSI_NOR = 0x00000027, + MIPSI_SLT = 0x0000002a, + MIPSI_SLTU = 0x0000002b, + MIPSI_MOVZ = 0x0000000a, + MIPSI_MOVN = 0x0000000b, + MIPSI_MFHI = 0x00000010, + MIPSI_MFLO = 0x00000012, + MIPSI_MULT = 0x00000018, + + MIPSI_SLL = 0x00000000, + MIPSI_SRL = 0x00000002, + MIPSI_SRA = 0x00000003, + MIPSI_ROTR = 0x00200002, /* MIPS32R2 */ + MIPSI_SLLV = 0x00000004, + MIPSI_SRLV = 0x00000006, + MIPSI_SRAV = 0x00000007, + MIPSI_ROTRV = 0x00000046, /* MIPS32R2 */ + + MIPSI_SEB = 0x7c000420, /* MIPS32R2 */ + MIPSI_SEH = 0x7c000620, /* MIPS32R2 */ + MIPSI_WSBH = 0x7c0000a0, /* MIPS32R2 */ + + MIPSI_B = 0x10000000, + MIPSI_J = 0x08000000, + MIPSI_JAL = 0x0c000000, + MIPSI_JR = 0x00000008, + MIPSI_JALR = 0x0000f809, + + MIPSI_BEQ = 0x10000000, + MIPSI_BNE = 0x14000000, + MIPSI_BLEZ = 0x18000000, + MIPSI_BGTZ = 0x1c000000, + MIPSI_BLTZ = 0x04000000, + MIPSI_BGEZ = 0x04010000, + + /* Load/store instructions. */ + MIPSI_LW = 0x8c000000, + MIPSI_SW = 0xac000000, + MIPSI_LB = 0x80000000, + MIPSI_SB = 0xa0000000, + MIPSI_LH = 0x84000000, + MIPSI_SH = 0xa4000000, + MIPSI_LBU = 0x90000000, + MIPSI_LHU = 0x94000000, + MIPSI_LWC1 = 0xc4000000, + MIPSI_SWC1 = 0xe4000000, + MIPSI_LDC1 = 0xd4000000, + MIPSI_SDC1 = 0xf4000000, + + /* FP instructions. */ + MIPSI_MOV_S = 0x46000006, + MIPSI_MOV_D = 0x46200006, + MIPSI_MOVT_D = 0x46210011, + MIPSI_MOVF_D = 0x46200011, + + MIPSI_ABS_D = 0x46200005, + MIPSI_NEG_D = 0x46200007, + + MIPSI_ADD_D = 0x46200000, + MIPSI_SUB_D = 0x46200001, + MIPSI_MUL_D = 0x46200002, + MIPSI_DIV_D = 0x46200003, + MIPSI_SQRT_D = 0x46200004, + + MIPSI_ADD_S = 0x46000000, + MIPSI_SUB_S = 0x46000001, + + MIPSI_CVT_D_S = 0x46000021, + MIPSI_CVT_W_S = 0x46000024, + MIPSI_CVT_S_D = 0x46200020, + MIPSI_CVT_W_D = 0x46200024, + MIPSI_CVT_S_W = 0x46800020, + MIPSI_CVT_D_W = 0x46800021, + + MIPSI_TRUNC_W_S = 0x4600000d, + MIPSI_TRUNC_W_D = 0x4620000d, + MIPSI_FLOOR_W_S = 0x4600000f, + MIPSI_FLOOR_W_D = 0x4620000f, + + MIPSI_MFC1 = 0x44000000, + MIPSI_MTC1 = 0x44800000, + + MIPSI_BC1F = 0x45000000, + MIPSI_BC1T = 0x45010000, + + MIPSI_C_EQ_D = 0x46200032, + MIPSI_C_OLT_D = 0x46200034, + MIPSI_C_ULT_D = 0x46200035, + MIPSI_C_OLE_D = 0x46200036, + MIPSI_C_ULE_D = 0x46200037, + +} MIPSIns; + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_target_ppc.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_target_ppc.h new file mode 100644 index 00000000000..99867688b7d --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_target_ppc.h @@ -0,0 +1,280 @@ +/* +** Definitions for PPC CPUs. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_TARGET_PPC_H +#define _LJ_TARGET_PPC_H + +/* -- Registers IDs ------------------------------------------------------- */ + +#define GPRDEF(_) \ + _(R0) _(SP) _(SYS1) _(R3) _(R4) _(R5) _(R6) _(R7) \ + _(R8) _(R9) _(R10) _(R11) _(R12) _(SYS2) _(R14) _(R15) \ + _(R16) _(R17) _(R18) _(R19) _(R20) _(R21) _(R22) _(R23) \ + _(R24) _(R25) _(R26) _(R27) _(R28) _(R29) _(R30) _(R31) +#define FPRDEF(_) \ + _(F0) _(F1) _(F2) _(F3) _(F4) _(F5) _(F6) _(F7) \ + _(F8) _(F9) _(F10) _(F11) _(F12) _(F13) _(F14) _(F15) \ + _(F16) _(F17) _(F18) _(F19) _(F20) _(F21) _(F22) _(F23) \ + _(F24) _(F25) _(F26) _(F27) _(F28) _(F29) _(F30) _(F31) +#define VRIDDEF(_) + +#define RIDENUM(name) RID_##name, + +enum { + GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */ + FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */ + RID_MAX, + RID_TMP = RID_R0, + + /* Calling conventions. */ + RID_RET = RID_R3, + RID_RETHI = RID_R3, + RID_RETLO = RID_R4, + RID_FPRET = RID_F1, + + /* These definitions must match with the *.dasc file(s): */ + RID_BASE = RID_R14, /* Interpreter BASE. */ + RID_LPC = RID_R16, /* Interpreter PC. */ + RID_DISPATCH = RID_R17, /* Interpreter DISPATCH table. */ + RID_LREG = RID_R18, /* Interpreter L. */ + RID_JGL = RID_R31, /* On-trace: global_State + 32768. */ + + /* Register ranges [min, max) and number of registers. */ + RID_MIN_GPR = RID_R0, + RID_MAX_GPR = RID_R31+1, + RID_MIN_FPR = RID_F0, + RID_MAX_FPR = RID_F31+1, + RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR, + RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR +}; + +#define RID_NUM_KREF RID_NUM_GPR +#define RID_MIN_KREF RID_R0 + +/* -- Register sets ------------------------------------------------------- */ + +/* Make use of all registers, except TMP, SP, SYS1, SYS2 and JGL. */ +#define RSET_FIXED \ + (RID2RSET(RID_TMP)|RID2RSET(RID_SP)|RID2RSET(RID_SYS1)|\ + RID2RSET(RID_SYS2)|RID2RSET(RID_JGL)) +#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR) - RSET_FIXED) +#define RSET_FPR RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR) +#define RSET_ALL (RSET_GPR|RSET_FPR) +#define RSET_INIT RSET_ALL + +#define RSET_SCRATCH_GPR (RSET_RANGE(RID_R3, RID_R12+1)) +#define RSET_SCRATCH_FPR (RSET_RANGE(RID_F0, RID_F13+1)) +#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR) +#define REGARG_FIRSTGPR RID_R3 +#define REGARG_LASTGPR RID_R10 +#define REGARG_NUMGPR 8 +#define REGARG_FIRSTFPR RID_F1 +#define REGARG_LASTFPR RID_F8 +#define REGARG_NUMFPR 8 + +/* -- Spill slots --------------------------------------------------------- */ + +/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs. +** +** SPS_FIXED: Available fixed spill slots in interpreter frame. +** This definition must match with the *.dasc file(s). +** +** SPS_FIRST: First spill slot for general use. +** [sp+12] tmplo word \ +** [sp+ 8] tmphi word / tmp dword, parameter area for callee +** [sp+ 4] tmpw, LR of callee +** [sp+ 0] stack chain +*/ +#define SPS_FIXED 7 +#define SPS_FIRST 4 + +/* Stack offsets for temporary slots. Used for FP<->int conversions etc. */ +#define SPOFS_TMPW 4 +#define SPOFS_TMP 8 +#define SPOFS_TMPHI 8 +#define SPOFS_TMPLO 12 + +#define sps_scale(slot) (4 * (int32_t)(slot)) +#define sps_align(slot) (((slot) - SPS_FIXED + 3) & ~3) + +/* -- Exit state ---------------------------------------------------------- */ + +/* This definition must match with the *.dasc file(s). */ +typedef struct { + lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */ + intptr_t gpr[RID_NUM_GPR]; /* General-purpose registers. */ + int32_t spill[256]; /* Spill slots. */ +} ExitState; + +/* Highest exit + 1 indicates stack check. */ +#define EXITSTATE_CHECKEXIT 1 + +/* Return the address of a per-trace exit stub. */ +static LJ_AINLINE uint32_t *exitstub_trace_addr_(uint32_t *p, uint32_t exitno) +{ + while (*p == 0x60000000) p++; /* Skip PPCI_NOP. */ + return p + 3 + exitno; +} +/* Avoid dependence on lj_jit.h if only including lj_target.h. */ +#define exitstub_trace_addr(T, exitno) \ + exitstub_trace_addr_((MCode *)((char *)(T)->mcode + (T)->szmcode), (exitno)) + +/* -- Instructions -------------------------------------------------------- */ + +/* Instruction fields. */ +#define PPCF_CC(cc) ((((cc) & 3) << 16) | (((cc) & 4) << 22)) +#define PPCF_T(r) ((r) << 21) +#define PPCF_A(r) ((r) << 16) +#define PPCF_B(r) ((r) << 11) +#define PPCF_C(r) ((r) << 6) +#define PPCF_MB(n) ((n) << 6) +#define PPCF_ME(n) ((n) << 1) +#define PPCF_Y 0x00200000 +#define PPCF_DOT 0x00000001 + +typedef enum PPCIns { + /* Integer instructions. */ + PPCI_MR = 0x7c000378, + PPCI_NOP = 0x60000000, + + PPCI_LI = 0x38000000, + PPCI_LIS = 0x3c000000, + + PPCI_ADD = 0x7c000214, + PPCI_ADDC = 0x7c000014, + PPCI_ADDO = 0x7c000614, + PPCI_ADDE = 0x7c000114, + PPCI_ADDZE = 0x7c000194, + PPCI_ADDME = 0x7c0001d4, + PPCI_ADDI = 0x38000000, + PPCI_ADDIS = 0x3c000000, + PPCI_ADDIC = 0x30000000, + PPCI_ADDICDOT = 0x34000000, + + PPCI_SUBF = 0x7c000050, + PPCI_SUBFC = 0x7c000010, + PPCI_SUBFO = 0x7c000450, + PPCI_SUBFE = 0x7c000110, + PPCI_SUBFZE = 0x7c000190, + PPCI_SUBFME = 0x7c0001d0, + PPCI_SUBFIC = 0x20000000, + + PPCI_NEG = 0x7c0000d0, + + PPCI_AND = 0x7c000038, + PPCI_ANDC = 0x7c000078, + PPCI_NAND = 0x7c0003b8, + PPCI_ANDIDOT = 0x70000000, + PPCI_ANDISDOT = 0x74000000, + + PPCI_OR = 0x7c000378, + PPCI_NOR = 0x7c0000f8, + PPCI_ORI = 0x60000000, + PPCI_ORIS = 0x64000000, + + PPCI_XOR = 0x7c000278, + PPCI_EQV = 0x7c000238, + PPCI_XORI = 0x68000000, + PPCI_XORIS = 0x6c000000, + + PPCI_CMPW = 0x7c000000, + PPCI_CMPLW = 0x7c000040, + PPCI_CMPWI = 0x2c000000, + PPCI_CMPLWI = 0x28000000, + + PPCI_MULLW = 0x7c0001d6, + PPCI_MULLI = 0x1c000000, + PPCI_MULLWO = 0x7c0005d6, + + PPCI_EXTSB = 0x7c000774, + PPCI_EXTSH = 0x7c000734, + + PPCI_SLW = 0x7c000030, + PPCI_SRW = 0x7c000430, + PPCI_SRAW = 0x7c000630, + PPCI_SRAWI = 0x7c000670, + + PPCI_RLWNM = 0x5c000000, + PPCI_RLWINM = 0x54000000, + PPCI_RLWIMI = 0x50000000, + + PPCI_B = 0x48000000, + PPCI_BL = 0x48000001, + PPCI_BC = 0x40800000, + PPCI_BCL = 0x40800001, + PPCI_BCTR = 0x4e800420, + PPCI_BCTRL = 0x4e800421, + + PPCI_CRANDC = 0x4c000102, + PPCI_CRXOR = 0x4c000182, + PPCI_CRAND = 0x4c000202, + PPCI_CREQV = 0x4c000242, + PPCI_CRORC = 0x4c000342, + PPCI_CROR = 0x4c000382, + + PPCI_MFLR = 0x7c0802a6, + PPCI_MTCTR = 0x7c0903a6, + + PPCI_MCRXR = 0x7c000400, + + /* Load/store instructions. */ + PPCI_LWZ = 0x80000000, + PPCI_LBZ = 0x88000000, + PPCI_STW = 0x90000000, + PPCI_STB = 0x98000000, + PPCI_LHZ = 0xa0000000, + PPCI_LHA = 0xa8000000, + PPCI_STH = 0xb0000000, + + PPCI_STWU = 0x94000000, + + PPCI_LFS = 0xc0000000, + PPCI_LFD = 0xc8000000, + PPCI_STFS = 0xd0000000, + PPCI_STFD = 0xd8000000, + + PPCI_LWZX = 0x7c00002e, + PPCI_LBZX = 0x7c0000ae, + PPCI_STWX = 0x7c00012e, + PPCI_STBX = 0x7c0001ae, + PPCI_LHZX = 0x7c00022e, + PPCI_LHAX = 0x7c0002ae, + PPCI_STHX = 0x7c00032e, + + PPCI_LWBRX = 0x7c00042c, + PPCI_STWBRX = 0x7c00052c, + + PPCI_LFSX = 0x7c00042e, + PPCI_LFDX = 0x7c0004ae, + PPCI_STFSX = 0x7c00052e, + PPCI_STFDX = 0x7c0005ae, + + /* FP instructions. */ + PPCI_FMR = 0xfc000090, + PPCI_FNEG = 0xfc000050, + PPCI_FABS = 0xfc000210, + + PPCI_FRSP = 0xfc000018, + PPCI_FCTIWZ = 0xfc00001e, + + PPCI_FADD = 0xfc00002a, + PPCI_FSUB = 0xfc000028, + PPCI_FMUL = 0xfc000032, + PPCI_FDIV = 0xfc000024, + PPCI_FSQRT = 0xfc00002c, + + PPCI_FMADD = 0xfc00003a, + PPCI_FMSUB = 0xfc000038, + PPCI_FNMSUB = 0xfc00003c, + + PPCI_FCMPU = 0xfc000000, + PPCI_FSEL = 0xfc00002e, +} PPCIns; + +typedef enum PPCCC { + CC_GE, CC_LE, CC_NE, CC_NS, CC_LT, CC_GT, CC_EQ, CC_SO +} PPCCC; + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_target_x86.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_target_x86.h new file mode 100644 index 00000000000..fc9d37024b0 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_target_x86.h @@ -0,0 +1,345 @@ +/* +** Definitions for x86 and x64 CPUs. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_TARGET_X86_H +#define _LJ_TARGET_X86_H + +/* -- Registers IDs ------------------------------------------------------- */ + +#if LJ_64 +#define GPRDEF(_) \ + _(EAX) _(ECX) _(EDX) _(EBX) _(ESP) _(EBP) _(ESI) _(EDI) \ + _(R8D) _(R9D) _(R10D) _(R11D) _(R12D) _(R13D) _(R14D) _(R15D) +#define FPRDEF(_) \ + _(XMM0) _(XMM1) _(XMM2) _(XMM3) _(XMM4) _(XMM5) _(XMM6) _(XMM7) \ + _(XMM8) _(XMM9) _(XMM10) _(XMM11) _(XMM12) _(XMM13) _(XMM14) _(XMM15) +#else +#define GPRDEF(_) \ + _(EAX) _(ECX) _(EDX) _(EBX) _(ESP) _(EBP) _(ESI) _(EDI) +#define FPRDEF(_) \ + _(XMM0) _(XMM1) _(XMM2) _(XMM3) _(XMM4) _(XMM5) _(XMM6) _(XMM7) +#endif +#define VRIDDEF(_) \ + _(MRM) + +#define RIDENUM(name) RID_##name, + +enum { + GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */ + FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */ + RID_MAX, + RID_MRM = RID_MAX, /* Pseudo-id for ModRM operand. */ + + /* Calling conventions. */ + RID_SP = RID_ESP, + RID_RET = RID_EAX, +#if LJ_64 + RID_FPRET = RID_XMM0, +#else + RID_RETLO = RID_EAX, + RID_RETHI = RID_EDX, +#endif + + /* These definitions must match with the *.dasc file(s): */ + RID_BASE = RID_EDX, /* Interpreter BASE. */ +#if LJ_64 && !LJ_ABI_WIN + RID_LPC = RID_EBX, /* Interpreter PC. */ + RID_DISPATCH = RID_R14D, /* Interpreter DISPATCH table. */ +#else + RID_LPC = RID_ESI, /* Interpreter PC. */ + RID_DISPATCH = RID_EBX, /* Interpreter DISPATCH table. */ +#endif + + /* Register ranges [min, max) and number of registers. */ + RID_MIN_GPR = RID_EAX, + RID_MIN_FPR = RID_XMM0, + RID_MAX_GPR = RID_MIN_FPR, + RID_MAX_FPR = RID_MAX, + RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR, + RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR, +}; + +/* -- Register sets ------------------------------------------------------- */ + +/* Make use of all registers, except the stack pointer. */ +#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR)-RID2RSET(RID_ESP)) +#define RSET_FPR (RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR)) +#define RSET_ALL (RSET_GPR|RSET_FPR) +#define RSET_INIT RSET_ALL + +#if LJ_64 +/* Note: this requires the use of FORCE_REX! */ +#define RSET_GPR8 RSET_GPR +#else +#define RSET_GPR8 (RSET_RANGE(RID_EAX, RID_EBX+1)) +#endif + +/* ABI-specific register sets. */ +#define RSET_ACD (RID2RSET(RID_EAX)|RID2RSET(RID_ECX)|RID2RSET(RID_EDX)) +#if LJ_64 +#if LJ_ABI_WIN +/* Windows x64 ABI. */ +#define RSET_SCRATCH \ + (RSET_ACD|RSET_RANGE(RID_R8D, RID_R11D+1)|RSET_RANGE(RID_XMM0, RID_XMM5+1)) +#define REGARG_GPRS \ + (RID_ECX|((RID_EDX|((RID_R8D|(RID_R9D<<5))<<5))<<5)) +#define REGARG_NUMGPR 4 +#define REGARG_NUMFPR 4 +#define REGARG_FIRSTFPR RID_XMM0 +#define REGARG_LASTFPR RID_XMM3 +#define STACKARG_OFS (4*8) +#else +/* The rest of the civilized x64 world has a common ABI. */ +#define RSET_SCRATCH \ + (RSET_ACD|RSET_RANGE(RID_ESI, RID_R11D+1)|RSET_FPR) +#define REGARG_GPRS \ + (RID_EDI|((RID_ESI|((RID_EDX|((RID_ECX|((RID_R8D|(RID_R9D \ + <<5))<<5))<<5))<<5))<<5)) +#define REGARG_NUMGPR 6 +#define REGARG_NUMFPR 8 +#define REGARG_FIRSTFPR RID_XMM0 +#define REGARG_LASTFPR RID_XMM7 +#define STACKARG_OFS 0 +#endif +#else +/* Common x86 ABI. */ +#define RSET_SCRATCH (RSET_ACD|RSET_FPR) +#define REGARG_GPRS (RID_ECX|(RID_EDX<<5)) /* Fastcall only. */ +#define REGARG_NUMGPR 2 /* Fastcall only. */ +#define REGARG_NUMFPR 0 +#define STACKARG_OFS 0 +#endif + +#if LJ_64 +/* Prefer the low 8 regs of each type to reduce REX prefixes. */ +#undef rset_picktop +#define rset_picktop(rs) (lj_fls(lj_bswap(rs)) ^ 0x18) +#endif + +/* -- Spill slots --------------------------------------------------------- */ + +/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs. +** +** SPS_FIXED: Available fixed spill slots in interpreter frame. +** This definition must match with the *.dasc file(s). +** +** SPS_FIRST: First spill slot for general use. Reserve min. two 32 bit slots. +*/ +#if LJ_64 +#if LJ_ABI_WIN +#define SPS_FIXED (4*2) +#define SPS_FIRST (4*2) /* Don't use callee register save area. */ +#else +#if LJ_GC64 +#define SPS_FIXED 2 +#else +#define SPS_FIXED 4 +#endif +#define SPS_FIRST 2 +#endif +#else +#define SPS_FIXED 6 +#define SPS_FIRST 2 +#endif + +#define SPOFS_TMP 0 + +#define sps_scale(slot) (4 * (int32_t)(slot)) +#define sps_align(slot) (((slot) - SPS_FIXED + 3) & ~3) + +/* -- Exit state ---------------------------------------------------------- */ + +/* This definition must match with the *.dasc file(s). */ +typedef struct { + lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */ + intptr_t gpr[RID_NUM_GPR]; /* General-purpose registers. */ + int32_t spill[256]; /* Spill slots. */ +} ExitState; + +/* Limited by the range of a short fwd jump (127): (2+2)*(32-1)-2 = 122. */ +#define EXITSTUB_SPACING (2+2) +#define EXITSTUBS_PER_GROUP 32 + +/* -- x86 ModRM operand encoding ------------------------------------------ */ + +typedef enum { + XM_OFS0 = 0x00, XM_OFS8 = 0x40, XM_OFS32 = 0x80, XM_REG = 0xc0, + XM_SCALE1 = 0x00, XM_SCALE2 = 0x40, XM_SCALE4 = 0x80, XM_SCALE8 = 0xc0, + XM_MASK = 0xc0 +} x86Mode; + +/* Structure to hold variable ModRM operand. */ +typedef struct { + int32_t ofs; /* Offset. */ + uint8_t base; /* Base register or RID_NONE. */ + uint8_t idx; /* Index register or RID_NONE. */ + uint8_t scale; /* Index scale (XM_SCALE1 .. XM_SCALE8). */ +} x86ModRM; + +/* -- Opcodes ------------------------------------------------------------- */ + +/* Macros to construct variable-length x86 opcodes. -(len+1) is in LSB. */ +#define XO_(o) ((uint32_t)(0x0000fe + (0x##o<<24))) +#define XO_FPU(a,b) ((uint32_t)(0x00fd + (0x##a<<16)+(0x##b<<24))) +#define XO_0f(o) ((uint32_t)(0x0f00fd + (0x##o<<24))) +#define XO_66(o) ((uint32_t)(0x6600fd + (0x##o<<24))) +#define XO_660f(o) ((uint32_t)(0x0f66fc + (0x##o<<24))) +#define XO_f20f(o) ((uint32_t)(0x0ff2fc + (0x##o<<24))) +#define XO_f30f(o) ((uint32_t)(0x0ff3fc + (0x##o<<24))) + +/* This list of x86 opcodes is not intended to be complete. Opcodes are only +** included when needed. Take a look at DynASM or jit.dis_x86 to see the +** whole mess. +*/ +typedef enum { + /* Fixed length opcodes. XI_* prefix. */ + XI_NOP = 0x90, + XI_XCHGa = 0x90, + XI_CALL = 0xe8, + XI_JMP = 0xe9, + XI_JMPs = 0xeb, + XI_PUSH = 0x50, /* Really 50+r. */ + XI_JCCs = 0x70, /* Really 7x. */ + XI_JCCn = 0x80, /* Really 0f8x. */ + XI_LEA = 0x8d, + XI_MOVrib = 0xb0, /* Really b0+r. */ + XI_MOVri = 0xb8, /* Really b8+r. */ + XI_ARITHib = 0x80, + XI_ARITHi = 0x81, + XI_ARITHi8 = 0x83, + XI_PUSHi8 = 0x6a, + XI_TESTb = 0x84, + XI_TEST = 0x85, + XI_MOVmi = 0xc7, + XI_GROUP5 = 0xff, + + /* Note: little-endian byte-order! */ + XI_FLDZ = 0xeed9, + XI_FLD1 = 0xe8d9, + XI_FLDLG2 = 0xecd9, + XI_FLDLN2 = 0xedd9, + XI_FDUP = 0xc0d9, /* Really fld st0. */ + XI_FPOP = 0xd8dd, /* Really fstp st0. */ + XI_FPOP1 = 0xd9dd, /* Really fstp st1. */ + XI_FRNDINT = 0xfcd9, + XI_FSIN = 0xfed9, + XI_FCOS = 0xffd9, + XI_FPTAN = 0xf2d9, + XI_FPATAN = 0xf3d9, + XI_FSCALE = 0xfdd9, + XI_FYL2X = 0xf1d9, + + /* Variable-length opcodes. XO_* prefix. */ + XO_MOV = XO_(8b), + XO_MOVto = XO_(89), + XO_MOVtow = XO_66(89), + XO_MOVtob = XO_(88), + XO_MOVmi = XO_(c7), + XO_MOVmib = XO_(c6), + XO_LEA = XO_(8d), + XO_ARITHib = XO_(80), + XO_ARITHi = XO_(81), + XO_ARITHi8 = XO_(83), + XO_ARITHiw8 = XO_66(83), + XO_SHIFTi = XO_(c1), + XO_SHIFT1 = XO_(d1), + XO_SHIFTcl = XO_(d3), + XO_IMUL = XO_0f(af), + XO_IMULi = XO_(69), + XO_IMULi8 = XO_(6b), + XO_CMP = XO_(3b), + XO_TESTb = XO_(84), + XO_TEST = XO_(85), + XO_GROUP3b = XO_(f6), + XO_GROUP3 = XO_(f7), + XO_GROUP5b = XO_(fe), + XO_GROUP5 = XO_(ff), + XO_MOVZXb = XO_0f(b6), + XO_MOVZXw = XO_0f(b7), + XO_MOVSXb = XO_0f(be), + XO_MOVSXw = XO_0f(bf), + XO_MOVSXd = XO_(63), + XO_BSWAP = XO_0f(c8), + XO_CMOV = XO_0f(40), + + XO_MOVSD = XO_f20f(10), + XO_MOVSDto = XO_f20f(11), + XO_MOVSS = XO_f30f(10), + XO_MOVSSto = XO_f30f(11), + XO_MOVLPD = XO_660f(12), + XO_MOVAPS = XO_0f(28), + XO_XORPS = XO_0f(57), + XO_ANDPS = XO_0f(54), + XO_ADDSD = XO_f20f(58), + XO_SUBSD = XO_f20f(5c), + XO_MULSD = XO_f20f(59), + XO_DIVSD = XO_f20f(5e), + XO_SQRTSD = XO_f20f(51), + XO_MINSD = XO_f20f(5d), + XO_MAXSD = XO_f20f(5f), + XO_ROUNDSD = 0x0b3a0ffc, /* Really 66 0f 3a 0b. See asm_fpmath. */ + XO_UCOMISD = XO_660f(2e), + XO_CVTSI2SD = XO_f20f(2a), + XO_CVTTSD2SI= XO_f20f(2c), + XO_CVTSI2SS = XO_f30f(2a), + XO_CVTTSS2SI= XO_f30f(2c), + XO_CVTSS2SD = XO_f30f(5a), + XO_CVTSD2SS = XO_f20f(5a), + XO_ADDSS = XO_f30f(58), + XO_MOVD = XO_660f(6e), + XO_MOVDto = XO_660f(7e), + + XO_FLDd = XO_(d9), XOg_FLDd = 0, + XO_FLDq = XO_(dd), XOg_FLDq = 0, + XO_FILDd = XO_(db), XOg_FILDd = 0, + XO_FILDq = XO_(df), XOg_FILDq = 5, + XO_FSTPd = XO_(d9), XOg_FSTPd = 3, + XO_FSTPq = XO_(dd), XOg_FSTPq = 3, + XO_FISTPq = XO_(df), XOg_FISTPq = 7, + XO_FISTTPq = XO_(dd), XOg_FISTTPq = 1, + XO_FADDq = XO_(dc), XOg_FADDq = 0, + XO_FLDCW = XO_(d9), XOg_FLDCW = 5, + XO_FNSTCW = XO_(d9), XOg_FNSTCW = 7 +} x86Op; + +/* x86 opcode groups. */ +typedef uint32_t x86Group; + +#define XG_(i8, i, g) ((x86Group)(((i8) << 16) + ((i) << 8) + (g))) +#define XG_ARITHi(g) XG_(XI_ARITHi8, XI_ARITHi, g) +#define XG_TOXOi(xg) ((x86Op)(0x000000fe + (((xg)<<16) & 0xff000000))) +#define XG_TOXOi8(xg) ((x86Op)(0x000000fe + (((xg)<<8) & 0xff000000))) + +#define XO_ARITH(a) ((x86Op)(0x030000fe + ((a)<<27))) +#define XO_ARITHw(a) ((x86Op)(0x036600fd + ((a)<<27))) + +typedef enum { + XOg_ADD, XOg_OR, XOg_ADC, XOg_SBB, XOg_AND, XOg_SUB, XOg_XOR, XOg_CMP, + XOg_X_IMUL +} x86Arith; + +typedef enum { + XOg_ROL, XOg_ROR, XOg_RCL, XOg_RCR, XOg_SHL, XOg_SHR, XOg_SAL, XOg_SAR +} x86Shift; + +typedef enum { + XOg_TEST, XOg_TEST_, XOg_NOT, XOg_NEG, XOg_MUL, XOg_IMUL, XOg_DIV, XOg_IDIV +} x86Group3; + +typedef enum { + XOg_INC, XOg_DEC, XOg_CALL, XOg_CALLfar, XOg_JMP, XOg_JMPfar, XOg_PUSH +} x86Group5; + +/* x86 condition codes. */ +typedef enum { + CC_O, CC_NO, CC_B, CC_NB, CC_E, CC_NE, CC_BE, CC_NBE, + CC_S, CC_NS, CC_P, CC_NP, CC_L, CC_NL, CC_LE, CC_NLE, + CC_C = CC_B, CC_NAE = CC_C, CC_NC = CC_NB, CC_AE = CC_NB, + CC_Z = CC_E, CC_NZ = CC_NE, CC_NA = CC_BE, CC_A = CC_NBE, + CC_PE = CC_P, CC_PO = CC_NP, CC_NGE = CC_L, CC_GE = CC_NL, + CC_NG = CC_LE, CC_G = CC_NLE +} x86CC; + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_trace.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_trace.c new file mode 100644 index 00000000000..42f4321d5d2 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_trace.c @@ -0,0 +1,859 @@ +/* +** Trace management. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_trace_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_debug.h" +#include "lj_str.h" +#include "lj_frame.h" +#include "lj_state.h" +#include "lj_bc.h" +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_iropt.h" +#include "lj_mcode.h" +#include "lj_trace.h" +#include "lj_snap.h" +#include "lj_gdbjit.h" +#include "lj_record.h" +#include "lj_asm.h" +#include "lj_dispatch.h" +#include "lj_vm.h" +#include "lj_vmevent.h" +#include "lj_target.h" + +/* -- Error handling ------------------------------------------------------ */ + +/* Synchronous abort with error message. */ +void lj_trace_err(jit_State *J, TraceError e) +{ + setnilV(&J->errinfo); /* No error info. */ + setintV(J->L->top++, (int32_t)e); + lj_err_throw(J->L, LUA_ERRRUN); +} + +/* Synchronous abort with error message and error info. */ +void lj_trace_err_info(jit_State *J, TraceError e) +{ + setintV(J->L->top++, (int32_t)e); + lj_err_throw(J->L, LUA_ERRRUN); +} + +/* -- Trace management ---------------------------------------------------- */ + +/* The current trace is first assembled in J->cur. The variable length +** arrays point to shared, growable buffers (J->irbuf etc.). When trace +** recording ends successfully, the current trace and its data structures +** are copied to a new (compact) GCtrace object. +*/ + +/* Find a free trace number. */ +static TraceNo trace_findfree(jit_State *J) +{ + MSize osz, lim; + if (J->freetrace == 0) + J->freetrace = 1; + for (; J->freetrace < J->sizetrace; J->freetrace++) + if (traceref(J, J->freetrace) == NULL) + return J->freetrace++; + /* Need to grow trace array. */ + lim = (MSize)J->param[JIT_P_maxtrace] + 1; + if (lim < 2) lim = 2; else if (lim > 65535) lim = 65535; + osz = J->sizetrace; + if (osz >= lim) + return 0; /* Too many traces. */ + lj_mem_growvec(J->L, J->trace, J->sizetrace, lim, GCRef); + for (; osz < J->sizetrace; osz++) + setgcrefnull(J->trace[osz]); + return J->freetrace; +} + +#define TRACE_APPENDVEC(field, szfield, tp) \ + T->field = (tp *)p; \ + memcpy(p, J->cur.field, J->cur.szfield*sizeof(tp)); \ + p += J->cur.szfield*sizeof(tp); + +#ifdef LUAJIT_USE_PERFTOOLS +/* +** Create symbol table of JIT-compiled code. For use with Linux perf tools. +** Example usage: +** perf record -f -e cycles luajit test.lua +** perf report -s symbol +** rm perf.data /tmp/perf-*.map +*/ +#include +#include + +static void perftools_addtrace(GCtrace *T) +{ + static FILE *fp; + GCproto *pt = &gcref(T->startpt)->pt; + const BCIns *startpc = mref(T->startpc, const BCIns); + const char *name = proto_chunknamestr(pt); + BCLine lineno; + if (name[0] == '@' || name[0] == '=') + name++; + else + name = "(string)"; + lua_assert(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc); + lineno = lj_debug_line(pt, proto_bcpos(pt, startpc)); + if (!fp) { + char fname[40]; + sprintf(fname, "/tmp/perf-%d.map", getpid()); + if (!(fp = fopen(fname, "w"))) return; + setlinebuf(fp); + } + fprintf(fp, "%lx %x TRACE_%d::%s:%u\n", + (long)T->mcode, T->szmcode, T->traceno, name, lineno); +} +#endif + +/* Allocate space for copy of trace. */ +static GCtrace *trace_save_alloc(jit_State *J) +{ + size_t sztr = ((sizeof(GCtrace)+7)&~7); + size_t szins = (J->cur.nins-J->cur.nk)*sizeof(IRIns); + size_t sz = sztr + szins + + J->cur.nsnap*sizeof(SnapShot) + + J->cur.nsnapmap*sizeof(SnapEntry); + return lj_mem_newt(J->L, (MSize)sz, GCtrace); +} + +/* Save current trace by copying and compacting it. */ +static void trace_save(jit_State *J, GCtrace *T) +{ + size_t sztr = ((sizeof(GCtrace)+7)&~7); + size_t szins = (J->cur.nins-J->cur.nk)*sizeof(IRIns); + char *p = (char *)T + sztr; + memcpy(T, &J->cur, sizeof(GCtrace)); + setgcrefr(T->nextgc, J2G(J)->gc.root); + setgcrefp(J2G(J)->gc.root, T); + newwhite(J2G(J), T); + T->gct = ~LJ_TTRACE; + T->ir = (IRIns *)p - J->cur.nk; + memcpy(p, J->cur.ir+J->cur.nk, szins); + p += szins; + TRACE_APPENDVEC(snap, nsnap, SnapShot) + TRACE_APPENDVEC(snapmap, nsnapmap, SnapEntry) + J->cur.traceno = 0; + setgcrefp(J->trace[T->traceno], T); + lj_gc_barriertrace(J2G(J), T->traceno); + lj_gdbjit_addtrace(J, T); +#ifdef LUAJIT_USE_PERFTOOLS + perftools_addtrace(T); +#endif +} + +void LJ_FASTCALL lj_trace_free(global_State *g, GCtrace *T) +{ + jit_State *J = G2J(g); + if (T->traceno) { + lj_gdbjit_deltrace(J, T); + if (T->traceno < J->freetrace) + J->freetrace = T->traceno; + setgcrefnull(J->trace[T->traceno]); + } + lj_mem_free(g, T, + ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) + + T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry)); +} + +/* Re-enable compiling a prototype by unpatching any modified bytecode. */ +void lj_trace_reenableproto(GCproto *pt) +{ + if ((pt->flags & PROTO_ILOOP)) { + BCIns *bc = proto_bc(pt); + BCPos i, sizebc = pt->sizebc;; + pt->flags &= ~PROTO_ILOOP; + if (bc_op(bc[0]) == BC_IFUNCF) + setbc_op(&bc[0], BC_FUNCF); + for (i = 1; i < sizebc; i++) { + BCOp op = bc_op(bc[i]); + if (op == BC_IFORL || op == BC_IITERL || op == BC_ILOOP) + setbc_op(&bc[i], (int)op+(int)BC_LOOP-(int)BC_ILOOP); + } + } +} + +/* Unpatch the bytecode modified by a root trace. */ +static void trace_unpatch(jit_State *J, GCtrace *T) +{ + BCOp op = bc_op(T->startins); + BCIns *pc = mref(T->startpc, BCIns); + UNUSED(J); + if (op == BC_JMP) + return; /* No need to unpatch branches in parent traces (yet). */ + switch (bc_op(*pc)) { + case BC_JFORL: + lua_assert(traceref(J, bc_d(*pc)) == T); + *pc = T->startins; + pc += bc_j(T->startins); + lua_assert(bc_op(*pc) == BC_JFORI); + setbc_op(pc, BC_FORI); + break; + case BC_JITERL: + case BC_JLOOP: + lua_assert(op == BC_ITERL || op == BC_LOOP || bc_isret(op)); + *pc = T->startins; + break; + case BC_JMP: + lua_assert(op == BC_ITERL); + pc += bc_j(*pc)+2; + if (bc_op(*pc) == BC_JITERL) { + lua_assert(traceref(J, bc_d(*pc)) == T); + *pc = T->startins; + } + break; + case BC_JFUNCF: + lua_assert(op == BC_FUNCF); + *pc = T->startins; + break; + default: /* Already unpatched. */ + break; + } +} + +/* Flush a root trace. */ +static void trace_flushroot(jit_State *J, GCtrace *T) +{ + GCproto *pt = &gcref(T->startpt)->pt; + lua_assert(T->root == 0 && pt != NULL); + /* First unpatch any modified bytecode. */ + trace_unpatch(J, T); + /* Unlink root trace from chain anchored in prototype. */ + if (pt->trace == T->traceno) { /* Trace is first in chain. Easy. */ + pt->trace = T->nextroot; + } else if (pt->trace) { /* Otherwise search in chain of root traces. */ + GCtrace *T2 = traceref(J, pt->trace); + if (T2) { + for (; T2->nextroot; T2 = traceref(J, T2->nextroot)) + if (T2->nextroot == T->traceno) { + T2->nextroot = T->nextroot; /* Unlink from chain. */ + break; + } + } + } +} + +/* Flush a trace. Only root traces are considered. */ +void lj_trace_flush(jit_State *J, TraceNo traceno) +{ + if (traceno > 0 && traceno < J->sizetrace) { + GCtrace *T = traceref(J, traceno); + if (T && T->root == 0) + trace_flushroot(J, T); + } +} + +/* Flush all traces associated with a prototype. */ +void lj_trace_flushproto(global_State *g, GCproto *pt) +{ + while (pt->trace != 0) + trace_flushroot(G2J(g), traceref(G2J(g), pt->trace)); +} + +/* Flush all traces. */ +int lj_trace_flushall(lua_State *L) +{ + jit_State *J = L2J(L); + ptrdiff_t i; + if ((J2G(J)->hookmask & HOOK_GC)) + return 1; + for (i = (ptrdiff_t)J->sizetrace-1; i > 0; i--) { + GCtrace *T = traceref(J, i); + if (T) { + if (T->root == 0) + trace_flushroot(J, T); + lj_gdbjit_deltrace(J, T); + T->traceno = 0; + setgcrefnull(J->trace[i]); + } + } + J->cur.traceno = 0; + J->freetrace = 0; + /* Clear penalty cache. */ + memset(J->penalty, 0, sizeof(J->penalty)); + /* Free the whole machine code and invalidate all exit stub groups. */ + lj_mcode_free(J); + memset(J->exitstubgroup, 0, sizeof(J->exitstubgroup)); + lj_vmevent_send(L, TRACE, + setstrV(L, L->top++, lj_str_newlit(L, "flush")); + ); + return 0; +} + +/* Initialize JIT compiler state. */ +void lj_trace_initstate(global_State *g) +{ + jit_State *J = G2J(g); + TValue *tv; + /* Initialize SIMD constants. */ + tv = LJ_KSIMD(J, LJ_KSIMD_ABS); + tv[0].u64 = U64x(7fffffff,ffffffff); + tv[1].u64 = U64x(7fffffff,ffffffff); + tv = LJ_KSIMD(J, LJ_KSIMD_NEG); + tv[0].u64 = U64x(80000000,00000000); + tv[1].u64 = U64x(80000000,00000000); +} + +/* Free everything associated with the JIT compiler state. */ +void lj_trace_freestate(global_State *g) +{ + jit_State *J = G2J(g); +#ifdef LUA_USE_ASSERT + { /* This assumes all traces have already been freed. */ + ptrdiff_t i; + for (i = 1; i < (ptrdiff_t)J->sizetrace; i++) + lua_assert(i == (ptrdiff_t)J->cur.traceno || traceref(J, i) == NULL); + } +#endif + lj_mcode_free(J); + lj_ir_k64_freeall(J); + lj_mem_freevec(g, J->snapmapbuf, J->sizesnapmap, SnapEntry); + lj_mem_freevec(g, J->snapbuf, J->sizesnap, SnapShot); + lj_mem_freevec(g, J->irbuf + J->irbotlim, J->irtoplim - J->irbotlim, IRIns); + lj_mem_freevec(g, J->trace, J->sizetrace, GCRef); +} + +/* -- Penalties and blacklisting ------------------------------------------ */ + +/* Blacklist a bytecode instruction. */ +static void blacklist_pc(GCproto *pt, BCIns *pc) +{ + setbc_op(pc, (int)bc_op(*pc)+(int)BC_ILOOP-(int)BC_LOOP); + pt->flags |= PROTO_ILOOP; +} + +/* Penalize a bytecode instruction. */ +static void penalty_pc(jit_State *J, GCproto *pt, BCIns *pc, TraceError e) +{ + uint32_t i, val = PENALTY_MIN; + for (i = 0; i < PENALTY_SLOTS; i++) + if (mref(J->penalty[i].pc, const BCIns) == pc) { /* Cache slot found? */ + /* First try to bump its hotcount several times. */ + val = ((uint32_t)J->penalty[i].val << 1) + + LJ_PRNG_BITS(J, PENALTY_RNDBITS); + if (val > PENALTY_MAX) { + blacklist_pc(pt, pc); /* Blacklist it, if that didn't help. */ + return; + } + goto setpenalty; + } + /* Assign a new penalty cache slot. */ + i = J->penaltyslot; + J->penaltyslot = (J->penaltyslot + 1) & (PENALTY_SLOTS-1); + setmref(J->penalty[i].pc, pc); +setpenalty: + J->penalty[i].val = (uint16_t)val; + J->penalty[i].reason = e; + hotcount_set(J2GG(J), pc+1, val); +} + +/* -- Trace compiler state machine ---------------------------------------- */ + +/* Start tracing. */ +static void trace_start(jit_State *J) +{ + lua_State *L; + TraceNo traceno; + + if ((J->pt->flags & PROTO_NOJIT)) { /* JIT disabled for this proto? */ + if (J->parent == 0 && J->exitno == 0) { + /* Lazy bytecode patching to disable hotcount events. */ + lua_assert(bc_op(*J->pc) == BC_FORL || bc_op(*J->pc) == BC_ITERL || + bc_op(*J->pc) == BC_LOOP || bc_op(*J->pc) == BC_FUNCF); + setbc_op(J->pc, (int)bc_op(*J->pc)+(int)BC_ILOOP-(int)BC_LOOP); + J->pt->flags |= PROTO_ILOOP; + } + J->state = LJ_TRACE_IDLE; /* Silently ignored. */ + return; + } + + /* Get a new trace number. */ + traceno = trace_findfree(J); + if (LJ_UNLIKELY(traceno == 0)) { /* No free trace? */ + lua_assert((J2G(J)->hookmask & HOOK_GC) == 0); + lj_trace_flushall(J->L); + J->state = LJ_TRACE_IDLE; /* Silently ignored. */ + return; + } + setgcrefp(J->trace[traceno], &J->cur); + + /* Setup enough of the current trace to be able to send the vmevent. */ + memset(&J->cur, 0, sizeof(GCtrace)); + J->cur.traceno = traceno; + J->cur.nins = J->cur.nk = REF_BASE; + J->cur.ir = J->irbuf; + J->cur.snap = J->snapbuf; + J->cur.snapmap = J->snapmapbuf; + J->mergesnap = 0; + J->needsnap = 0; + J->bcskip = 0; + J->guardemit.irt = 0; + J->postproc = LJ_POST_NONE; + lj_resetsplit(J); + J->retryrec = 0; + setgcref(J->cur.startpt, obj2gco(J->pt)); + + L = J->L; + lj_vmevent_send(L, TRACE, + setstrV(L, L->top++, lj_str_newlit(L, "start")); + setintV(L->top++, traceno); + setfuncV(L, L->top++, J->fn); + setintV(L->top++, proto_bcpos(J->pt, J->pc)); + if (J->parent) { + setintV(L->top++, J->parent); + setintV(L->top++, J->exitno); + } + ); + lj_record_setup(J); +} + +/* Stop tracing. */ +static void trace_stop(jit_State *J) +{ + BCIns *pc = mref(J->cur.startpc, BCIns); + BCOp op = bc_op(J->cur.startins); + GCproto *pt = &gcref(J->cur.startpt)->pt; + TraceNo traceno = J->cur.traceno; + GCtrace *T = trace_save_alloc(J); /* Do this first. May throw OOM. */ + lua_State *L; + + switch (op) { + case BC_FORL: + setbc_op(pc+bc_j(J->cur.startins), BC_JFORI); /* Patch FORI, too. */ + /* fallthrough */ + case BC_LOOP: + case BC_ITERL: + case BC_FUNCF: + /* Patch bytecode of starting instruction in root trace. */ + setbc_op(pc, (int)op+(int)BC_JLOOP-(int)BC_LOOP); + setbc_d(pc, traceno); + addroot: + /* Add to root trace chain in prototype. */ + J->cur.nextroot = pt->trace; + pt->trace = (TraceNo1)traceno; + break; + case BC_RET: + case BC_RET0: + case BC_RET1: + *pc = BCINS_AD(BC_JLOOP, J->cur.snap[0].nslots, traceno); + goto addroot; + case BC_JMP: + /* Patch exit branch in parent to side trace entry. */ + lua_assert(J->parent != 0 && J->cur.root != 0); + lj_asm_patchexit(J, traceref(J, J->parent), J->exitno, J->cur.mcode); + /* Avoid compiling a side trace twice (stack resizing uses parent exit). */ + traceref(J, J->parent)->snap[J->exitno].count = SNAPCOUNT_DONE; + /* Add to side trace chain in root trace. */ + { + GCtrace *root = traceref(J, J->cur.root); + root->nchild++; + J->cur.nextside = root->nextside; + root->nextside = (TraceNo1)traceno; + } + break; + case BC_CALLM: + case BC_CALL: + case BC_ITERC: + /* Trace stitching: patch link of previous trace. */ + traceref(J, J->exitno)->link = traceno; + break; + default: + lua_assert(0); + break; + } + + /* Commit new mcode only after all patching is done. */ + lj_mcode_commit(J, J->cur.mcode); + J->postproc = LJ_POST_NONE; + trace_save(J, T); + + L = J->L; + lj_vmevent_send(L, TRACE, + setstrV(L, L->top++, lj_str_newlit(L, "stop")); + setintV(L->top++, traceno); + setfuncV(L, L->top++, J->fn); + ); +} + +/* Start a new root trace for down-recursion. */ +static int trace_downrec(jit_State *J) +{ + /* Restart recording at the return instruction. */ + lua_assert(J->pt != NULL); + lua_assert(bc_isret(bc_op(*J->pc))); + if (bc_op(*J->pc) == BC_RETM) + return 0; /* NYI: down-recursion with RETM. */ + J->parent = 0; + J->exitno = 0; + J->state = LJ_TRACE_RECORD; + trace_start(J); + return 1; +} + +/* Abort tracing. */ +static int trace_abort(jit_State *J) +{ + lua_State *L = J->L; + TraceError e = LJ_TRERR_RECERR; + TraceNo traceno; + + J->postproc = LJ_POST_NONE; + lj_mcode_abort(J); + if (tvisnumber(L->top-1)) + e = (TraceError)numberVint(L->top-1); + if (e == LJ_TRERR_MCODELM) { + L->top--; /* Remove error object */ + J->state = LJ_TRACE_ASM; + return 1; /* Retry ASM with new MCode area. */ + } + /* Penalize or blacklist starting bytecode instruction. */ + if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))) { + if (J->exitno == 0) { + BCIns *startpc = mref(J->cur.startpc, BCIns); + if (e == LJ_TRERR_RETRY) + hotcount_set(J2GG(J), startpc+1, 1); /* Immediate retry. */ + else + penalty_pc(J, &gcref(J->cur.startpt)->pt, startpc, e); + } else { + traceref(J, J->exitno)->link = J->exitno; /* Self-link is blacklisted. */ + } + } + + /* Is there anything to abort? */ + traceno = J->cur.traceno; + if (traceno) { + ptrdiff_t errobj = savestack(L, L->top-1); /* Stack may be resized. */ + J->cur.link = 0; + J->cur.linktype = LJ_TRLINK_NONE; + lj_vmevent_send(L, TRACE, + TValue *frame; + const BCIns *pc; + GCfunc *fn; + setstrV(L, L->top++, lj_str_newlit(L, "abort")); + setintV(L->top++, traceno); + /* Find original Lua function call to generate a better error message. */ + frame = J->L->base-1; + pc = J->pc; + while (!isluafunc(frame_func(frame))) { + pc = (frame_iscont(frame) ? frame_contpc(frame) : frame_pc(frame)) - 1; + frame = frame_prev(frame); + } + fn = frame_func(frame); + setfuncV(L, L->top++, fn); + setintV(L->top++, proto_bcpos(funcproto(fn), pc)); + copyTV(L, L->top++, restorestack(L, errobj)); + copyTV(L, L->top++, &J->errinfo); + ); + /* Drop aborted trace after the vmevent (which may still access it). */ + setgcrefnull(J->trace[traceno]); + if (traceno < J->freetrace) + J->freetrace = traceno; + J->cur.traceno = 0; + } + L->top--; /* Remove error object */ + if (e == LJ_TRERR_DOWNREC) + return trace_downrec(J); + else if (e == LJ_TRERR_MCODEAL) + lj_trace_flushall(L); + return 0; +} + +/* Perform pending re-patch of a bytecode instruction. */ +static LJ_AINLINE void trace_pendpatch(jit_State *J, int force) +{ + if (LJ_UNLIKELY(J->patchpc)) { + if (force || J->bcskip == 0) { + *J->patchpc = J->patchins; + J->patchpc = NULL; + } else { + J->bcskip = 0; + } + } +} + +/* State machine for the trace compiler. Protected callback. */ +static TValue *trace_state(lua_State *L, lua_CFunction dummy, void *ud) +{ + jit_State *J = (jit_State *)ud; + UNUSED(dummy); + do { + retry: + switch (J->state) { + case LJ_TRACE_START: + J->state = LJ_TRACE_RECORD; /* trace_start() may change state. */ + trace_start(J); + lj_dispatch_update(J2G(J)); + break; + + case LJ_TRACE_RECORD: + trace_pendpatch(J, 0); + setvmstate(J2G(J), RECORD); + lj_vmevent_send_(L, RECORD, + /* Save/restore tmptv state for trace recorder. */ + TValue savetv = J2G(J)->tmptv; + TValue savetv2 = J2G(J)->tmptv2; + setintV(L->top++, J->cur.traceno); + setfuncV(L, L->top++, J->fn); + setintV(L->top++, J->pt ? (int32_t)proto_bcpos(J->pt, J->pc) : -1); + setintV(L->top++, J->framedepth); + , + J2G(J)->tmptv = savetv; + J2G(J)->tmptv2 = savetv2; + ); + lj_record_ins(J); + break; + + case LJ_TRACE_END: + trace_pendpatch(J, 1); + J->loopref = 0; + if ((J->flags & JIT_F_OPT_LOOP) && + J->cur.link == J->cur.traceno && J->framedepth + J->retdepth == 0) { + setvmstate(J2G(J), OPT); + lj_opt_dce(J); + if (lj_opt_loop(J)) { /* Loop optimization failed? */ + J->cur.link = 0; + J->cur.linktype = LJ_TRLINK_NONE; + J->loopref = J->cur.nins; + J->state = LJ_TRACE_RECORD; /* Try to continue recording. */ + break; + } + J->loopref = J->chain[IR_LOOP]; /* Needed by assembler. */ + } + lj_opt_split(J); + lj_opt_sink(J); + if (!J->loopref) J->cur.snap[J->cur.nsnap-1].count = SNAPCOUNT_DONE; + J->state = LJ_TRACE_ASM; + break; + + case LJ_TRACE_ASM: + setvmstate(J2G(J), ASM); + lj_asm_trace(J, &J->cur); + trace_stop(J); + setvmstate(J2G(J), INTERP); + J->state = LJ_TRACE_IDLE; + lj_dispatch_update(J2G(J)); + return NULL; + + default: /* Trace aborted asynchronously. */ + setintV(L->top++, (int32_t)LJ_TRERR_RECERR); + /* fallthrough */ + case LJ_TRACE_ERR: + trace_pendpatch(J, 1); + if (trace_abort(J)) + goto retry; + setvmstate(J2G(J), INTERP); + J->state = LJ_TRACE_IDLE; + lj_dispatch_update(J2G(J)); + return NULL; + } + } while (J->state > LJ_TRACE_RECORD); + return NULL; +} + +/* -- Event handling ------------------------------------------------------ */ + +/* A bytecode instruction is about to be executed. Record it. */ +void lj_trace_ins(jit_State *J, const BCIns *pc) +{ + /* Note: J->L must already be set. pc is the true bytecode PC here. */ + J->pc = pc; + J->fn = curr_func(J->L); + J->pt = isluafunc(J->fn) ? funcproto(J->fn) : NULL; + while (lj_vm_cpcall(J->L, NULL, (void *)J, trace_state) != 0) + J->state = LJ_TRACE_ERR; +} + +/* A hotcount triggered. Start recording a root trace. */ +void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc) +{ + /* Note: pc is the interpreter bytecode PC here. It's offset by 1. */ + ERRNO_SAVE + /* Reset hotcount. */ + hotcount_set(J2GG(J), pc, J->param[JIT_P_hotloop]*HOTCOUNT_LOOP); + /* Only start a new trace if not recording or inside __gc call or vmevent. */ + if (J->state == LJ_TRACE_IDLE && + !(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) { + J->parent = 0; /* Root trace. */ + J->exitno = 0; + J->state = LJ_TRACE_START; + lj_trace_ins(J, pc-1); + } + ERRNO_RESTORE +} + +/* Check for a hot side exit. If yes, start recording a side trace. */ +static void trace_hotside(jit_State *J, const BCIns *pc) +{ + SnapShot *snap = &traceref(J, J->parent)->snap[J->exitno]; + if (!(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT)) && + isluafunc(curr_func(J->L)) && + snap->count != SNAPCOUNT_DONE && + ++snap->count >= J->param[JIT_P_hotexit]) { + lua_assert(J->state == LJ_TRACE_IDLE); + /* J->parent is non-zero for a side trace. */ + J->state = LJ_TRACE_START; + lj_trace_ins(J, pc); + } +} + +/* Stitch a new trace to the previous trace. */ +void LJ_FASTCALL lj_trace_stitch(jit_State *J, const BCIns *pc) +{ + /* Only start a new trace if not recording or inside __gc call or vmevent. */ + if (J->state == LJ_TRACE_IDLE && + !(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) { + J->parent = 0; /* Have to treat it like a root trace. */ + /* J->exitno is set to the invoking trace. */ + J->state = LJ_TRACE_START; + lj_trace_ins(J, pc); + } +} + + +/* Tiny struct to pass data to protected call. */ +typedef struct ExitDataCP { + jit_State *J; + void *exptr; /* Pointer to exit state. */ + const BCIns *pc; /* Restart interpreter at this PC. */ +} ExitDataCP; + +/* Need to protect lj_snap_restore because it may throw. */ +static TValue *trace_exit_cp(lua_State *L, lua_CFunction dummy, void *ud) +{ + ExitDataCP *exd = (ExitDataCP *)ud; + cframe_errfunc(L->cframe) = -1; /* Inherit error function. */ + exd->pc = lj_snap_restore(exd->J, exd->exptr); + UNUSED(dummy); + return NULL; +} + +#ifndef LUAJIT_DISABLE_VMEVENT +/* Push all registers from exit state. */ +static void trace_exit_regs(lua_State *L, ExitState *ex) +{ + int32_t i; + setintV(L->top++, RID_NUM_GPR); + setintV(L->top++, RID_NUM_FPR); + for (i = 0; i < RID_NUM_GPR; i++) { + if (sizeof(ex->gpr[i]) == sizeof(int32_t)) + setintV(L->top++, (int32_t)ex->gpr[i]); + else + setnumV(L->top++, (lua_Number)ex->gpr[i]); + } +#if !LJ_SOFTFP + for (i = 0; i < RID_NUM_FPR; i++) { + setnumV(L->top, ex->fpr[i]); + if (LJ_UNLIKELY(tvisnan(L->top))) + setnanV(L->top); + L->top++; + } +#endif +} +#endif + +#ifdef EXITSTATE_PCREG +/* Determine trace number from pc of exit instruction. */ +static TraceNo trace_exit_find(jit_State *J, MCode *pc) +{ + TraceNo traceno; + for (traceno = 1; traceno < J->sizetrace; traceno++) { + GCtrace *T = traceref(J, traceno); + if (T && pc >= T->mcode && pc < (MCode *)((char *)T->mcode + T->szmcode)) + return traceno; + } + lua_assert(0); + return 0; +} +#endif + +/* A trace exited. Restore interpreter state. */ +int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr) +{ + ERRNO_SAVE + lua_State *L = J->L; + ExitState *ex = (ExitState *)exptr; + ExitDataCP exd; + int errcode; + const BCIns *pc; + void *cf; + GCtrace *T; +#ifdef EXITSTATE_PCREG + J->parent = trace_exit_find(J, (MCode *)(intptr_t)ex->gpr[EXITSTATE_PCREG]); +#endif + T = traceref(J, J->parent); UNUSED(T); +#ifdef EXITSTATE_CHECKEXIT + if (J->exitno == T->nsnap) { /* Treat stack check like a parent exit. */ + lua_assert(T->root != 0); + J->exitno = T->ir[REF_BASE].op2; + J->parent = T->ir[REF_BASE].op1; + T = traceref(J, J->parent); + } +#endif + lua_assert(T != NULL && J->exitno < T->nsnap); + exd.J = J; + exd.exptr = exptr; + errcode = lj_vm_cpcall(L, NULL, &exd, trace_exit_cp); + if (errcode) + return -errcode; /* Return negated error code. */ + + if (!(LJ_HASPROFILE && (G(L)->hookmask & HOOK_PROFILE))) + lj_vmevent_send(L, TEXIT, + lj_state_checkstack(L, 4+RID_NUM_GPR+RID_NUM_FPR+LUA_MINSTACK); + setintV(L->top++, J->parent); + setintV(L->top++, J->exitno); + trace_exit_regs(L, ex); + ); + + pc = exd.pc; + cf = cframe_raw(L->cframe); + setcframe_pc(cf, pc); + if (LJ_HASPROFILE && (G(L)->hookmask & HOOK_PROFILE)) { + /* Just exit to interpreter. */ + } else if (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize) { + if (!(G(L)->hookmask & HOOK_GC)) + lj_gc_step(L); /* Exited because of GC: drive GC forward. */ + } else { + trace_hotside(J, pc); + } + if (bc_op(*pc) == BC_JLOOP) { + BCIns *retpc = &traceref(J, bc_d(*pc))->startins; + if (bc_isret(bc_op(*retpc))) { + if (J->state == LJ_TRACE_RECORD) { + J->patchins = *pc; + J->patchpc = (BCIns *)pc; + *J->patchpc = *retpc; + J->bcskip = 1; + } else { + pc = retpc; + setcframe_pc(cf, pc); + } + } + } + /* Return MULTRES or 0. */ + ERRNO_RESTORE + switch (bc_op(*pc)) { + case BC_CALLM: case BC_CALLMT: + return (int)((BCReg)(L->top - L->base) - bc_a(*pc) - bc_c(*pc) + LJ_FR2); + case BC_RETM: + return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc) - bc_d(*pc)); + case BC_TSETM: + return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc)); + default: + if (bc_op(*pc) >= BC_FUNCF) + return (int)((BCReg)(L->top - L->base) + 1); + return 0; + } +} + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_trace.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_trace.h new file mode 100644 index 00000000000..9eaf91b0e33 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_trace.h @@ -0,0 +1,54 @@ +/* +** Trace management. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_TRACE_H +#define _LJ_TRACE_H + +#include "lj_obj.h" + +#if LJ_HASJIT +#include "lj_jit.h" +#include "lj_dispatch.h" + +/* Trace errors. */ +typedef enum { +#define TREDEF(name, msg) LJ_TRERR_##name, +#include "lj_traceerr.h" + LJ_TRERR__MAX +} TraceError; + +LJ_FUNC_NORET void lj_trace_err(jit_State *J, TraceError e); +LJ_FUNC_NORET void lj_trace_err_info(jit_State *J, TraceError e); + +/* Trace management. */ +LJ_FUNC void LJ_FASTCALL lj_trace_free(global_State *g, GCtrace *T); +LJ_FUNC void lj_trace_reenableproto(GCproto *pt); +LJ_FUNC void lj_trace_flushproto(global_State *g, GCproto *pt); +LJ_FUNC void lj_trace_flush(jit_State *J, TraceNo traceno); +LJ_FUNC int lj_trace_flushall(lua_State *L); +LJ_FUNC void lj_trace_initstate(global_State *g); +LJ_FUNC void lj_trace_freestate(global_State *g); + +/* Event handling. */ +LJ_FUNC void lj_trace_ins(jit_State *J, const BCIns *pc); +LJ_FUNCA void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc); +LJ_FUNCA void LJ_FASTCALL lj_trace_stitch(jit_State *J, const BCIns *pc); +LJ_FUNCA int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr); + +/* Signal asynchronous abort of trace or end of trace. */ +#define lj_trace_abort(g) (G2J(g)->state &= ~LJ_TRACE_ACTIVE) +#define lj_trace_end(J) (J->state = LJ_TRACE_END) + +#else + +#define lj_trace_flushall(L) (UNUSED(L), 0) +#define lj_trace_initstate(g) UNUSED(g) +#define lj_trace_freestate(g) UNUSED(g) +#define lj_trace_abort(g) UNUSED(g) +#define lj_trace_end(J) UNUSED(J) + +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_traceerr.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_traceerr.h new file mode 100644 index 00000000000..12e90d0395c --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_traceerr.h @@ -0,0 +1,63 @@ +/* +** Trace compiler error messages. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +/* This file may be included multiple times with different TREDEF macros. */ + +/* Recording. */ +TREDEF(RECERR, "error thrown or hook called during recording") +TREDEF(TRACEUV, "trace too short") +TREDEF(TRACEOV, "trace too long") +TREDEF(STACKOV, "trace too deep") +TREDEF(SNAPOV, "too many snapshots") +TREDEF(BLACKL, "blacklisted") +TREDEF(RETRY, "retry recording") +TREDEF(NYIBC, "NYI: bytecode %d") + +/* Recording loop ops. */ +TREDEF(LLEAVE, "leaving loop in root trace") +TREDEF(LINNER, "inner loop in root trace") +TREDEF(LUNROLL, "loop unroll limit reached") + +/* Recording calls/returns. */ +TREDEF(BADTYPE, "bad argument type") +TREDEF(CJITOFF, "JIT compilation disabled for function") +TREDEF(CUNROLL, "call unroll limit reached") +TREDEF(DOWNREC, "down-recursion, restarting") +TREDEF(NYICF, "NYI: C function %s") +TREDEF(NYIFF, "NYI: FastFunc %s") +TREDEF(NYIFFU, "NYI: unsupported variant of FastFunc %s") +TREDEF(NYIRETL, "NYI: return to lower frame") + +/* Recording indexed load/store. */ +TREDEF(STORENN, "store with nil or NaN key") +TREDEF(NOMM, "missing metamethod") +TREDEF(IDXLOOP, "looping index lookup") +TREDEF(NYITMIX, "NYI: mixed sparse/dense table") + +/* Recording C data operations. */ +TREDEF(NOCACHE, "symbol not in cache") +TREDEF(NYICONV, "NYI: unsupported C type conversion") +TREDEF(NYICALL, "NYI: unsupported C function type") + +/* Optimizations. */ +TREDEF(GFAIL, "guard would always fail") +TREDEF(PHIOV, "too many PHIs") +TREDEF(TYPEINS, "persistent type instability") + +/* Assembler. */ +TREDEF(MCODEAL, "failed to allocate mcode memory") +TREDEF(MCODEOV, "machine code too long") +TREDEF(MCODELM, "hit mcode limit (retrying)") +TREDEF(SPILLOV, "too many spill slots") +TREDEF(BADRA, "inconsistent register allocation") +TREDEF(NYIIR, "NYI: cannot assemble IR instruction %d") +TREDEF(NYIPHI, "NYI: PHI shuffling too complex") +TREDEF(NYICOAL, "NYI: register coalescing too complex") + +#undef TREDEF + +/* Detecting unused error messages: + awk -F, '/^TREDEF/ { gsub(/TREDEF./, ""); printf "grep -q LJ_TRERR_%s *.[ch] || echo %s\n", $1, $1}' lj_traceerr.h | sh +*/ diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_udata.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_udata.c new file mode 100644 index 00000000000..d401a3d8e85 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_udata.c @@ -0,0 +1,34 @@ +/* +** Userdata handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_udata_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_udata.h" + +GCudata *lj_udata_new(lua_State *L, MSize sz, GCtab *env) +{ + GCudata *ud = lj_mem_newt(L, sizeof(GCudata) + sz, GCudata); + global_State *g = G(L); + newwhite(g, ud); /* Not finalized. */ + ud->gct = ~LJ_TUDATA; + ud->udtype = UDTYPE_USERDATA; + ud->len = sz; + /* NOBARRIER: The GCudata is new (marked white). */ + setgcrefnull(ud->metatable); + setgcref(ud->env, obj2gco(env)); + /* Chain to userdata list (after main thread). */ + setgcrefr(ud->nextgc, mainthread(g)->nextgc); + setgcref(mainthread(g)->nextgc, obj2gco(ud)); + return ud; +} + +void LJ_FASTCALL lj_udata_free(global_State *g, GCudata *ud) +{ + lj_mem_free(g, ud, sizeudata(ud)); +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_udata.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_udata.h new file mode 100644 index 00000000000..676e970f488 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_udata.h @@ -0,0 +1,14 @@ +/* +** Userdata handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_UDATA_H +#define _LJ_UDATA_H + +#include "lj_obj.h" + +LJ_FUNC GCudata *lj_udata_new(lua_State *L, MSize sz, GCtab *env); +LJ_FUNC void LJ_FASTCALL lj_udata_free(global_State *g, GCudata *ud); + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_vm.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_vm.h new file mode 100644 index 00000000000..b31e22f70f7 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_vm.h @@ -0,0 +1,114 @@ +/* +** Assembler VM interface definitions. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_VM_H +#define _LJ_VM_H + +#include "lj_obj.h" + +/* Entry points for ASM parts of VM. */ +LJ_ASMF void lj_vm_call(lua_State *L, TValue *base, int nres1); +LJ_ASMF int lj_vm_pcall(lua_State *L, TValue *base, int nres1, ptrdiff_t ef); +typedef TValue *(*lua_CPFunction)(lua_State *L, lua_CFunction func, void *ud); +LJ_ASMF int lj_vm_cpcall(lua_State *L, lua_CFunction func, void *ud, + lua_CPFunction cp); +LJ_ASMF int lj_vm_resume(lua_State *L, TValue *base, int nres1, ptrdiff_t ef); +LJ_ASMF_NORET void LJ_FASTCALL lj_vm_unwind_c(void *cframe, int errcode); +LJ_ASMF_NORET void LJ_FASTCALL lj_vm_unwind_ff(void *cframe); +LJ_ASMF void lj_vm_unwind_c_eh(void); +LJ_ASMF void lj_vm_unwind_ff_eh(void); +#if LJ_TARGET_X86ORX64 +LJ_ASMF void lj_vm_unwind_rethrow(void); +#endif + +/* Miscellaneous functions. */ +#if LJ_TARGET_X86ORX64 +LJ_ASMF int lj_vm_cpuid(uint32_t f, uint32_t res[4]); +#endif +#if LJ_TARGET_PPC +void lj_vm_cachesync(void *start, void *end); +#endif +LJ_ASMF double lj_vm_foldarith(double x, double y, int op); +#if LJ_HASJIT +LJ_ASMF double lj_vm_foldfpm(double x, int op); +#endif +#if !LJ_ARCH_HASFPU +/* Declared in lj_obj.h: LJ_ASMF int32_t lj_vm_tobit(double x); */ +#endif + +/* Dispatch targets for recording and hooks. */ +LJ_ASMF void lj_vm_record(void); +LJ_ASMF void lj_vm_inshook(void); +LJ_ASMF void lj_vm_rethook(void); +LJ_ASMF void lj_vm_callhook(void); +LJ_ASMF void lj_vm_profhook(void); + +/* Trace exit handling. */ +LJ_ASMF void lj_vm_exit_handler(void); +LJ_ASMF void lj_vm_exit_interp(void); + +/* Internal math helper functions. */ +#if LJ_TARGET_PPC || LJ_TARGET_ARM64 +#define lj_vm_floor floor +#define lj_vm_ceil ceil +#else +LJ_ASMF double lj_vm_floor(double); +LJ_ASMF double lj_vm_ceil(double); +#if LJ_TARGET_ARM +LJ_ASMF double lj_vm_floor_sf(double); +LJ_ASMF double lj_vm_ceil_sf(double); +#endif +#endif +#ifdef LUAJIT_NO_LOG2 +LJ_ASMF double lj_vm_log2(double); +#else +#define lj_vm_log2 log2 +#endif + +#if LJ_HASJIT +#if LJ_TARGET_X86ORX64 +LJ_ASMF void lj_vm_floor_sse(void); +LJ_ASMF void lj_vm_ceil_sse(void); +LJ_ASMF void lj_vm_trunc_sse(void); +LJ_ASMF void lj_vm_powi_sse(void); +#define lj_vm_powi NULL +#else +LJ_ASMF double lj_vm_powi(double, int32_t); +#endif +#if LJ_TARGET_PPC || LJ_TARGET_ARM64 +#define lj_vm_trunc trunc +#else +LJ_ASMF double lj_vm_trunc(double); +#if LJ_TARGET_ARM +LJ_ASMF double lj_vm_trunc_sf(double); +#endif +#endif +#ifdef LUAJIT_NO_EXP2 +LJ_ASMF double lj_vm_exp2(double); +#else +#define lj_vm_exp2 exp2 +#endif +LJ_ASMF int32_t LJ_FASTCALL lj_vm_modi(int32_t, int32_t); +#if LJ_HASFFI +LJ_ASMF int lj_vm_errno(void); +#endif +#endif + +/* Continuations for metamethods. */ +LJ_ASMF void lj_cont_cat(void); /* Continue with concatenation. */ +LJ_ASMF void lj_cont_ra(void); /* Store result in RA from instruction. */ +LJ_ASMF void lj_cont_nop(void); /* Do nothing, just continue execution. */ +LJ_ASMF void lj_cont_condt(void); /* Branch if result is true. */ +LJ_ASMF void lj_cont_condf(void); /* Branch if result is false. */ +LJ_ASMF void lj_cont_hook(void); /* Continue from hook yield. */ +LJ_ASMF void lj_cont_stitch(void); /* Trace stitching. */ + +/* Start of the ASM code. */ +LJ_ASMF char lj_vm_asm_begin[]; + +/* Bytecode offsets are relative to lj_vm_asm_begin. */ +#define makeasmfunc(ofs) ((ASMFunction)(lj_vm_asm_begin + (ofs))) + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_vmevent.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_vmevent.c new file mode 100644 index 00000000000..87ebcfbd286 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_vmevent.c @@ -0,0 +1,58 @@ +/* +** VM event handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include + +#define lj_vmevent_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_state.h" +#include "lj_dispatch.h" +#include "lj_vm.h" +#include "lj_vmevent.h" + +ptrdiff_t lj_vmevent_prepare(lua_State *L, VMEvent ev) +{ + global_State *g = G(L); + GCstr *s = lj_str_newlit(L, LJ_VMEVENTS_REGKEY); + cTValue *tv = lj_tab_getstr(tabV(registry(L)), s); + if (tvistab(tv)) { + int hash = VMEVENT_HASH(ev); + tv = lj_tab_getint(tabV(tv), hash); + if (tv && tvisfunc(tv)) { + lj_state_checkstack(L, LUA_MINSTACK); + setfuncV(L, L->top++, funcV(tv)); + if (LJ_FR2) setnilV(L->top++); + return savestack(L, L->top); + } + } + g->vmevmask &= ~VMEVENT_MASK(ev); /* No handler: cache this fact. */ + return 0; +} + +void lj_vmevent_call(lua_State *L, ptrdiff_t argbase) +{ + global_State *g = G(L); + uint8_t oldmask = g->vmevmask; + uint8_t oldh = hook_save(g); + int status; + g->vmevmask = 0; /* Disable all events. */ + hook_vmevent(g); + status = lj_vm_pcall(L, restorestack(L, argbase), 0+1, 0); + if (LJ_UNLIKELY(status)) { + /* Really shouldn't use stderr here, but where else to complain? */ + L->top--; + fputs("VM handler failed: ", stderr); + fputs(tvisstr(L->top) ? strVdata(L->top) : "?", stderr); + fputc('\n', stderr); + } + hook_restore(g, oldh); + if (g->vmevmask != VMEVENT_NOCACHE) + g->vmevmask = oldmask; /* Restore event mask, but not if not modified. */ +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_vmevent.h b/Build/source/libs/luajit/LuaJIT-src/src/lj_vmevent.h new file mode 100644 index 00000000000..231e00ecdf9 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_vmevent.h @@ -0,0 +1,59 @@ +/* +** VM event handling. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_VMEVENT_H +#define _LJ_VMEVENT_H + +#include "lj_obj.h" + +/* Registry key for VM event handler table. */ +#define LJ_VMEVENTS_REGKEY "_VMEVENTS" +#define LJ_VMEVENTS_HSIZE 4 + +#define VMEVENT_MASK(ev) ((uint8_t)1 << ((int)(ev) & 7)) +#define VMEVENT_HASH(ev) ((int)(ev) & ~7) +#define VMEVENT_HASHIDX(h) ((int)(h) << 3) +#define VMEVENT_NOCACHE 255 + +#define VMEVENT_DEF(name, hash) \ + LJ_VMEVENT_##name##_, \ + LJ_VMEVENT_##name = ((LJ_VMEVENT_##name##_) & 7)|((hash) << 3) + +/* VM event IDs. */ +typedef enum { + VMEVENT_DEF(BC, 0x00003883), + VMEVENT_DEF(TRACE, 0xb2d91467), + VMEVENT_DEF(RECORD, 0x9284bf4f), + VMEVENT_DEF(TEXIT, 0xb29df2b0), + LJ_VMEVENT__MAX +} VMEvent; + +#ifdef LUAJIT_DISABLE_VMEVENT +#define lj_vmevent_send(L, ev, args) UNUSED(L) +#define lj_vmevent_send_(L, ev, args, post) UNUSED(L) +#else +#define lj_vmevent_send(L, ev, args) \ + if (G(L)->vmevmask & VMEVENT_MASK(LJ_VMEVENT_##ev)) { \ + ptrdiff_t argbase = lj_vmevent_prepare(L, LJ_VMEVENT_##ev); \ + if (argbase) { \ + args \ + lj_vmevent_call(L, argbase); \ + } \ + } +#define lj_vmevent_send_(L, ev, args, post) \ + if (G(L)->vmevmask & VMEVENT_MASK(LJ_VMEVENT_##ev)) { \ + ptrdiff_t argbase = lj_vmevent_prepare(L, LJ_VMEVENT_##ev); \ + if (argbase) { \ + args \ + lj_vmevent_call(L, argbase); \ + post \ + } \ + } + +LJ_FUNC ptrdiff_t lj_vmevent_prepare(lua_State *L, VMEvent ev); +LJ_FUNC void lj_vmevent_call(lua_State *L, ptrdiff_t argbase); +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_vmmath.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_vmmath.c new file mode 100644 index 00000000000..ecad2950d93 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_vmmath.c @@ -0,0 +1,152 @@ +/* +** Math helper functions for assembler VM. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_vmmath_c +#define LUA_CORE + +#include +#include + +#include "lj_obj.h" +#include "lj_ir.h" +#include "lj_vm.h" + +/* -- Wrapper functions --------------------------------------------------- */ + +#if LJ_TARGET_X86 && __ELF__ && __PIC__ +/* Wrapper functions to deal with the ELF/x86 PIC disaster. */ +LJ_FUNCA double lj_wrap_log(double x) { return log(x); } +LJ_FUNCA double lj_wrap_log10(double x) { return log10(x); } +LJ_FUNCA double lj_wrap_exp(double x) { return exp(x); } +LJ_FUNCA double lj_wrap_sin(double x) { return sin(x); } +LJ_FUNCA double lj_wrap_cos(double x) { return cos(x); } +LJ_FUNCA double lj_wrap_tan(double x) { return tan(x); } +LJ_FUNCA double lj_wrap_asin(double x) { return asin(x); } +LJ_FUNCA double lj_wrap_acos(double x) { return acos(x); } +LJ_FUNCA double lj_wrap_atan(double x) { return atan(x); } +LJ_FUNCA double lj_wrap_sinh(double x) { return sinh(x); } +LJ_FUNCA double lj_wrap_cosh(double x) { return cosh(x); } +LJ_FUNCA double lj_wrap_tanh(double x) { return tanh(x); } +LJ_FUNCA double lj_wrap_atan2(double x, double y) { return atan2(x, y); } +LJ_FUNCA double lj_wrap_pow(double x, double y) { return pow(x, y); } +LJ_FUNCA double lj_wrap_fmod(double x, double y) { return fmod(x, y); } +#endif + +/* -- Helper functions for generated machine code ------------------------- */ + +double lj_vm_foldarith(double x, double y, int op) +{ + switch (op) { + case IR_ADD - IR_ADD: return x+y; break; + case IR_SUB - IR_ADD: return x-y; break; + case IR_MUL - IR_ADD: return x*y; break; + case IR_DIV - IR_ADD: return x/y; break; + case IR_MOD - IR_ADD: return x-lj_vm_floor(x/y)*y; break; + case IR_POW - IR_ADD: return pow(x, y); break; + case IR_NEG - IR_ADD: return -x; break; + case IR_ABS - IR_ADD: return fabs(x); break; +#if LJ_HASJIT + case IR_ATAN2 - IR_ADD: return atan2(x, y); break; + case IR_LDEXP - IR_ADD: return ldexp(x, (int)y); break; + case IR_MIN - IR_ADD: return x > y ? y : x; break; + case IR_MAX - IR_ADD: return x < y ? y : x; break; +#endif + default: return x; + } +} + +#if LJ_HASJIT + +#ifdef LUAJIT_NO_LOG2 +double lj_vm_log2(double a) +{ + return log(a) * 1.4426950408889634074; +} +#endif + +#ifdef LUAJIT_NO_EXP2 +double lj_vm_exp2(double a) +{ + return exp(a * 0.6931471805599453); +} +#endif + +#if !(LJ_TARGET_ARM || LJ_TARGET_ARM64 || LJ_TARGET_PPC) +int32_t LJ_FASTCALL lj_vm_modi(int32_t a, int32_t b) +{ + uint32_t y, ua, ub; + lua_assert(b != 0); /* This must be checked before using this function. */ + ua = a < 0 ? (uint32_t)-a : (uint32_t)a; + ub = b < 0 ? (uint32_t)-b : (uint32_t)b; + y = ua % ub; + if (y != 0 && (a^b) < 0) y = y - ub; + if (((int32_t)y^b) < 0) y = (uint32_t)-(int32_t)y; + return (int32_t)y; +} +#endif + +#if !LJ_TARGET_X86ORX64 +/* Unsigned x^k. */ +static double lj_vm_powui(double x, uint32_t k) +{ + double y; + lua_assert(k != 0); + for (; (k & 1) == 0; k >>= 1) x *= x; + y = x; + if ((k >>= 1) != 0) { + for (;;) { + x *= x; + if (k == 1) break; + if (k & 1) y *= x; + k >>= 1; + } + y *= x; + } + return y; +} + +/* Signed x^k. */ +double lj_vm_powi(double x, int32_t k) +{ + if (k > 1) + return lj_vm_powui(x, (uint32_t)k); + else if (k == 1) + return x; + else if (k == 0) + return 1.0; + else + return 1.0 / lj_vm_powui(x, (uint32_t)-k); +} +#endif + +/* Computes fpm(x) for extended math functions. */ +double lj_vm_foldfpm(double x, int fpm) +{ + switch (fpm) { + case IRFPM_FLOOR: return lj_vm_floor(x); + case IRFPM_CEIL: return lj_vm_ceil(x); + case IRFPM_TRUNC: return lj_vm_trunc(x); + case IRFPM_SQRT: return sqrt(x); + case IRFPM_EXP: return exp(x); + case IRFPM_EXP2: return lj_vm_exp2(x); + case IRFPM_LOG: return log(x); + case IRFPM_LOG2: return lj_vm_log2(x); + case IRFPM_LOG10: return log10(x); + case IRFPM_SIN: return sin(x); + case IRFPM_COS: return cos(x); + case IRFPM_TAN: return tan(x); + default: lua_assert(0); + } + return 0; +} + +#if LJ_HASFFI +int lj_vm_errno(void) +{ + return errno; +} +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/ljamalg.c b/Build/source/libs/luajit/LuaJIT-src/src/ljamalg.c new file mode 100644 index 00000000000..be0c52d743e --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/ljamalg.c @@ -0,0 +1,96 @@ +/* +** LuaJIT core and libraries amalgamation. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +/* ++--------------------------------------------------------------------------+ +| WARNING: Compiling the amalgamation needs a lot of virtual memory | +| (around 300 MB with GCC 4.x)! If you don't have enough physical memory | +| your machine will start swapping to disk and the compile will not finish | +| within a reasonable amount of time. | +| So either compile on a bigger machine or use the non-amalgamated build. | ++--------------------------------------------------------------------------+ +*/ + +#define ljamalg_c +#define LUA_CORE + +/* To get the mremap prototype. Must be defined before any system includes. */ +#if defined(__linux__) && !defined(_GNU_SOURCE) +#define _GNU_SOURCE +#endif + +#ifndef WINVER +#define WINVER 0x0501 +#endif + +#include "lua.h" +#include "lauxlib.h" + +#include "lj_gc.c" +#include "lj_err.c" +#include "lj_char.c" +#include "lj_bc.c" +#include "lj_obj.c" +#include "lj_buf.c" +#include "lj_str.c" +#include "lj_tab.c" +#include "lj_func.c" +#include "lj_udata.c" +#include "lj_meta.c" +#include "lj_debug.c" +#include "lj_state.c" +#include "lj_dispatch.c" +#include "lj_vmevent.c" +#include "lj_vmmath.c" +#include "lj_strscan.c" +#include "lj_strfmt.c" +#include "lj_api.c" +#include "lj_profile.c" +#include "lj_lex.c" +#include "lj_parse.c" +#include "lj_bcread.c" +#include "lj_bcwrite.c" +#include "lj_load.c" +#include "lj_ctype.c" +#include "lj_cdata.c" +#include "lj_cconv.c" +#include "lj_ccall.c" +#include "lj_ccallback.c" +#include "lj_carith.c" +#include "lj_clib.c" +#include "lj_cparse.c" +#include "lj_lib.c" +#include "lj_ir.c" +#include "lj_opt_mem.c" +#include "lj_opt_fold.c" +#include "lj_opt_narrow.c" +#include "lj_opt_dce.c" +#include "lj_opt_loop.c" +#include "lj_opt_split.c" +#include "lj_opt_sink.c" +#include "lj_mcode.c" +#include "lj_snap.c" +#include "lj_record.c" +#include "lj_crecord.c" +#include "lj_ffrecord.c" +#include "lj_asm.c" +#include "lj_trace.c" +#include "lj_gdbjit.c" +#include "lj_alloc.c" + +#include "lib_aux.c" +#include "lib_base.c" +#include "lib_math.c" +#include "lib_string.c" +#include "lib_table.c" +#include "lib_io.c" +#include "lib_os.c" +#include "lib_package.c" +#include "lib_debug.c" +#include "lib_bit.c" +#include "lib_jit.c" +#include "lib_ffi.c" +#include "lib_init.c" + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lua.h b/Build/source/libs/luajit/LuaJIT-src/src/lua.h new file mode 100644 index 00000000000..64a930e3e38 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lua.h @@ -0,0 +1,406 @@ +/* +** $Id: lua.h,v 1.218.1.5 2008/08/06 13:30:12 roberto Exp $ +** Lua - An Extensible Extension Language +** Lua.org, PUC-Rio, Brazil (http://www.lua.org) +** See Copyright Notice at the end of this file +*/ + + +#ifndef lua_h +#define lua_h + +#include +#include + + +#include "luaconf.h" + + +#define LUA_VERSION "Lua 5.1" +#define LUA_RELEASE "Lua 5.1.4" +#define LUA_VERSION_NUM 501 +#define LUA_COPYRIGHT "Copyright (C) 1994-2008 Lua.org, PUC-Rio" +#define LUA_AUTHORS "R. Ierusalimschy, L. H. de Figueiredo & W. Celes" + + +/* mark for precompiled code (`Lua') */ +#define LUA_SIGNATURE "\033Lua" + +/* option for multiple returns in `lua_pcall' and `lua_call' */ +#define LUA_MULTRET (-1) + + +/* +** pseudo-indices +*/ +#define LUA_REGISTRYINDEX (-10000) +#define LUA_ENVIRONINDEX (-10001) +#define LUA_GLOBALSINDEX (-10002) +#define lua_upvalueindex(i) (LUA_GLOBALSINDEX-(i)) + + +/* thread status; 0 is OK */ +#define LUA_YIELD 1 +#define LUA_ERRRUN 2 +#define LUA_ERRSYNTAX 3 +#define LUA_ERRMEM 4 +#define LUA_ERRERR 5 + + +typedef struct lua_State lua_State; + +typedef int (*lua_CFunction) (lua_State *L); + + +/* +** functions that read/write blocks when loading/dumping Lua chunks +*/ +typedef const char * (*lua_Reader) (lua_State *L, void *ud, size_t *sz); + +typedef int (*lua_Writer) (lua_State *L, const void* p, size_t sz, void* ud); + + +/* +** prototype for memory-allocation functions +*/ +typedef void * (*lua_Alloc) (void *ud, void *ptr, size_t osize, size_t nsize); + + +/* +** basic types +*/ +#define LUA_TNONE (-1) + +#define LUA_TNIL 0 +#define LUA_TBOOLEAN 1 +#define LUA_TLIGHTUSERDATA 2 +#define LUA_TNUMBER 3 +#define LUA_TSTRING 4 +#define LUA_TTABLE 5 +#define LUA_TFUNCTION 6 +#define LUA_TUSERDATA 7 +#define LUA_TTHREAD 8 + + + +/* minimum Lua stack available to a C function */ +#define LUA_MINSTACK 20 + + +/* +** generic extra include file +*/ +#if defined(LUA_USER_H) +#include LUA_USER_H +#endif + + +/* type of numbers in Lua */ +typedef LUA_NUMBER lua_Number; + + +/* type for integer functions */ +typedef LUA_INTEGER lua_Integer; + + +/* communication with LuaJiTTeX */ +LUA_API int luajittex_choose_hash_function; + + +/* +** state manipulation +*/ +LUA_API lua_State *(lua_newstate) (lua_Alloc f, void *ud); +LUA_API void (lua_close) (lua_State *L); +LUA_API lua_State *(lua_newthread) (lua_State *L); + +LUA_API lua_CFunction (lua_atpanic) (lua_State *L, lua_CFunction panicf); + + +/* +** basic stack manipulation +*/ +LUA_API int (lua_gettop) (lua_State *L); +LUA_API void (lua_settop) (lua_State *L, int idx); +LUA_API void (lua_pushvalue) (lua_State *L, int idx); +LUA_API void (lua_remove) (lua_State *L, int idx); +LUA_API void (lua_insert) (lua_State *L, int idx); +LUA_API void (lua_replace) (lua_State *L, int idx); +LUA_API int (lua_checkstack) (lua_State *L, int sz); + +LUA_API void (lua_xmove) (lua_State *from, lua_State *to, int n); + + +/* +** access functions (stack -> C) +*/ + +LUA_API int (lua_isnumber) (lua_State *L, int idx); +LUA_API int (lua_isstring) (lua_State *L, int idx); +LUA_API int (lua_iscfunction) (lua_State *L, int idx); +LUA_API int (lua_isuserdata) (lua_State *L, int idx); +LUA_API int (lua_type) (lua_State *L, int idx); +LUA_API const char *(lua_typename) (lua_State *L, int tp); + +LUA_API int (lua_equal) (lua_State *L, int idx1, int idx2); +LUA_API int (lua_rawequal) (lua_State *L, int idx1, int idx2); +LUA_API int (lua_lessthan) (lua_State *L, int idx1, int idx2); + +LUA_API lua_Number (lua_tonumber) (lua_State *L, int idx); +LUA_API lua_Integer (lua_tointeger) (lua_State *L, int idx); +LUA_API int (lua_toboolean) (lua_State *L, int idx); +LUA_API const char *(lua_tolstring) (lua_State *L, int idx, size_t *len); +LUA_API size_t (lua_objlen) (lua_State *L, int idx); +LUA_API lua_CFunction (lua_tocfunction) (lua_State *L, int idx); +LUA_API void *(lua_touserdata) (lua_State *L, int idx); +LUA_API lua_State *(lua_tothread) (lua_State *L, int idx); +LUA_API const void *(lua_topointer) (lua_State *L, int idx); + + +/* +** push functions (C -> stack) +*/ +LUA_API void (lua_pushnil) (lua_State *L); +LUA_API void (lua_pushnumber) (lua_State *L, lua_Number n); +LUA_API void (lua_pushinteger) (lua_State *L, lua_Integer n); +LUA_API void (lua_pushlstring) (lua_State *L, const char *s, size_t l); +LUA_API void (lua_pushstring) (lua_State *L, const char *s); +LUA_API const char *(lua_pushvfstring) (lua_State *L, const char *fmt, + va_list argp); +LUA_API const char *(lua_pushfstring) (lua_State *L, const char *fmt, ...); +LUA_API void (lua_pushcclosure) (lua_State *L, lua_CFunction fn, int n); +LUA_API void (lua_pushboolean) (lua_State *L, int b); +LUA_API void (lua_pushlightuserdata) (lua_State *L, void *p); +LUA_API int (lua_pushthread) (lua_State *L); + + +/* +** get functions (Lua -> stack) +*/ +LUA_API void (lua_gettable) (lua_State *L, int idx); +LUA_API void (lua_getfield) (lua_State *L, int idx, const char *k); +LUA_API void (lua_rawget) (lua_State *L, int idx); +LUA_API void (lua_rawgeti) (lua_State *L, int idx, int n); +LUA_API void (lua_createtable) (lua_State *L, int narr, int nrec); +LUA_API void *(lua_newuserdata) (lua_State *L, size_t sz); +LUA_API int (lua_getmetatable) (lua_State *L, int objindex); +LUA_API void (lua_getfenv) (lua_State *L, int idx); + + +/* +** set functions (stack -> Lua) +*/ +LUA_API void (lua_settable) (lua_State *L, int idx); +LUA_API void (lua_setfield) (lua_State *L, int idx, const char *k); +LUA_API void (lua_rawset) (lua_State *L, int idx); +LUA_API void (lua_rawseti) (lua_State *L, int idx, int n); +LUA_API int (lua_setmetatable) (lua_State *L, int objindex); +LUA_API int (lua_setfenv) (lua_State *L, int idx); + + +/* +** `load' and `call' functions (load and run Lua code) +*/ +LUA_API void (lua_call) (lua_State *L, int nargs, int nresults); +LUA_API int (lua_pcall) (lua_State *L, int nargs, int nresults, int errfunc); +LUA_API int (lua_cpcall) (lua_State *L, lua_CFunction func, void *ud); +LUA_API int (lua_load) (lua_State *L, lua_Reader reader, void *dt, + const char *chunkname); + +LUA_API int (lua_dump) (lua_State *L, lua_Writer writer, void *data); + + +/* +** coroutine functions +*/ +LUA_API int (lua_yield) (lua_State *L, int nresults); +LUA_API int (lua_resume) (lua_State *L, int narg); +LUA_API int (lua_status) (lua_State *L); + +/* +** garbage-collection function and options +*/ + +#define LUA_GCSTOP 0 +#define LUA_GCRESTART 1 +#define LUA_GCCOLLECT 2 +#define LUA_GCCOUNT 3 +#define LUA_GCCOUNTB 4 +#define LUA_GCSTEP 5 +#define LUA_GCSETPAUSE 6 +#define LUA_GCSETSTEPMUL 7 + +LUA_API int (lua_gc) (lua_State *L, int what, int data); + + +/* +** miscellaneous functions +*/ + +LUA_API int (lua_error) (lua_State *L); + +LUA_API int (lua_next) (lua_State *L, int idx); + +LUA_API void (lua_concat) (lua_State *L, int n); + +LUA_API lua_Alloc (lua_getallocf) (lua_State *L, void **ud); +LUA_API void lua_setallocf (lua_State *L, lua_Alloc f, void *ud); + + + +/* +** =============================================================== +** some useful macros +** =============================================================== +*/ + +#define lua_pop(L,n) lua_settop(L, -(n)-1) + +#define lua_newtable(L) lua_createtable(L, 0, 0) + +#define lua_register(L,n,f) (lua_pushcfunction(L, (f)), lua_setglobal(L, (n))) + +#define lua_pushcfunction(L,f) lua_pushcclosure(L, (f), 0) + +#define lua_strlen(L,i) lua_objlen(L, (i)) + +#define lua_isfunction(L,n) (lua_type(L, (n)) == LUA_TFUNCTION) +#define lua_istable(L,n) (lua_type(L, (n)) == LUA_TTABLE) +#define lua_islightuserdata(L,n) (lua_type(L, (n)) == LUA_TLIGHTUSERDATA) +#define lua_isnil(L,n) (lua_type(L, (n)) == LUA_TNIL) +#define lua_isboolean(L,n) (lua_type(L, (n)) == LUA_TBOOLEAN) +#define lua_isthread(L,n) (lua_type(L, (n)) == LUA_TTHREAD) +#define lua_isnone(L,n) (lua_type(L, (n)) == LUA_TNONE) +#define lua_isnoneornil(L, n) (lua_type(L, (n)) <= 0) + +#define lua_pushliteral(L, s) \ + lua_pushlstring(L, "" s, (sizeof(s)/sizeof(char))-1) + +#define lua_setglobal(L,s) lua_setfield(L, LUA_GLOBALSINDEX, (s)) +#define lua_getglobal(L,s) lua_getfield(L, LUA_GLOBALSINDEX, (s)) + +#define lua_tostring(L,i) lua_tolstring(L, (i), NULL) + + + +/* +** compatibility macros and functions +*/ + +#define lua_open() luaL_newstate() + +#define lua_getregistry(L) lua_pushvalue(L, LUA_REGISTRYINDEX) + +#define lua_getgccount(L) lua_gc(L, LUA_GCCOUNT, 0) + +#define lua_Chunkreader lua_Reader +#define lua_Chunkwriter lua_Writer + + +/* hack */ +LUA_API void lua_setlevel (lua_State *from, lua_State *to); + + +/* +** {====================================================================== +** Debug API +** ======================================================================= +*/ + + +/* +** Event codes +*/ +#define LUA_HOOKCALL 0 +#define LUA_HOOKRET 1 +#define LUA_HOOKLINE 2 +#define LUA_HOOKCOUNT 3 +#define LUA_HOOKTAILRET 4 + + +/* +** Event masks +*/ +#define LUA_MASKCALL (1 << LUA_HOOKCALL) +#define LUA_MASKRET (1 << LUA_HOOKRET) +#define LUA_MASKLINE (1 << LUA_HOOKLINE) +#define LUA_MASKCOUNT (1 << LUA_HOOKCOUNT) + +typedef struct lua_Debug lua_Debug; /* activation record */ + + +/* Functions to be called by the debuger in specific events */ +typedef void (*lua_Hook) (lua_State *L, lua_Debug *ar); + + +LUA_API int lua_getstack (lua_State *L, int level, lua_Debug *ar); +LUA_API int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar); +LUA_API const char *lua_getlocal (lua_State *L, const lua_Debug *ar, int n); +LUA_API const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n); +LUA_API const char *lua_getupvalue (lua_State *L, int funcindex, int n); +LUA_API const char *lua_setupvalue (lua_State *L, int funcindex, int n); +LUA_API int lua_sethook (lua_State *L, lua_Hook func, int mask, int count); +LUA_API lua_Hook lua_gethook (lua_State *L); +LUA_API int lua_gethookmask (lua_State *L); +LUA_API int lua_gethookcount (lua_State *L); + +/* From Lua 5.2. */ +LUA_API void *lua_upvalueid (lua_State *L, int idx, int n); +LUA_API void lua_upvaluejoin (lua_State *L, int idx1, int n1, int idx2, int n2); +LUA_API int lua_loadx (lua_State *L, lua_Reader reader, void *dt, + const char *chunkname, const char *mode); + + +#define LUA_OPEQ 0 +#define LUA_OPLT 1 +#define LUA_OPLE 2 +#define LUA_OK 0 + +/* see http://comments.gmane.org/gmane.comp.programming.swig/18673 */ +# define lua_rawlen lua_objlen + + + +struct lua_Debug { + int event; + const char *name; /* (n) */ + const char *namewhat; /* (n) `global', `local', `field', `method' */ + const char *what; /* (S) `Lua', `C', `main', `tail' */ + const char *source; /* (S) */ + int currentline; /* (l) */ + int nups; /* (u) number of upvalues */ + int linedefined; /* (S) */ + int lastlinedefined; /* (S) */ + char short_src[LUA_IDSIZE]; /* (S) */ + /* private part */ + int i_ci; /* active function */ +}; + +/* }====================================================================== */ + + +/****************************************************************************** +* Copyright (C) 1994-2008 Lua.org, PUC-Rio. All rights reserved. +* +* Permission is hereby granted, free of charge, to any person obtaining +* a copy of this software and associated documentation files (the +* "Software"), to deal in the Software without restriction, including +* without limitation the rights to use, copy, modify, merge, publish, +* distribute, sublicense, and/or sell copies of the Software, and to +* permit persons to whom the Software is furnished to do so, subject to +* the following conditions: +* +* The above copyright notice and this permission notice shall be +* included in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +******************************************************************************/ + + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lua.hpp b/Build/source/libs/luajit/LuaJIT-src/src/lua.hpp new file mode 100644 index 00000000000..07e9002dc5a --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lua.hpp @@ -0,0 +1,9 @@ +// C++ wrapper for LuaJIT header files. + +extern "C" { +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" +#include "luajit.h" +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/luaconf.h b/Build/source/libs/luajit/LuaJIT-src/src/luaconf.h new file mode 100644 index 00000000000..79f514825f8 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/luaconf.h @@ -0,0 +1,156 @@ +/* +** Configuration header. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef luaconf_h +#define luaconf_h + +#ifndef WINVER +#define WINVER 0x0501 +#endif +#include +#include + +/* Default path for loading Lua and C modules with require(). */ +#if defined(_WIN32) +/* +** In Windows, any exclamation mark ('!') in the path is replaced by the +** path of the directory of the executable file of the current process. +*/ +#define LUA_LDIR "!\\lua\\" +#define LUA_CDIR "!\\" +#define LUA_PATH_DEFAULT \ + ".\\?.lua;" LUA_LDIR"?.lua;" LUA_LDIR"?\\init.lua;" +#define LUA_CPATH_DEFAULT \ + ".\\?.dll;" LUA_CDIR"?.dll;" LUA_CDIR"loadall.dll" +#else +/* +** Note to distribution maintainers: do NOT patch the following lines! +** Please read ../doc/install.html#distro and pass PREFIX=/usr instead. +*/ +#ifndef LUA_MULTILIB +#define LUA_MULTILIB "lib" +#endif +#ifndef LUA_LMULTILIB +#define LUA_LMULTILIB "lib" +#endif +#define LUA_LROOT "/usr/local" +#define LUA_LUADIR "/lua/5.1/" +#define LUA_LJDIR "/luajit-2.1.0-beta1/" + +#ifdef LUA_ROOT +#define LUA_JROOT LUA_ROOT +#define LUA_RLDIR LUA_ROOT "/share" LUA_LUADIR +#define LUA_RCDIR LUA_ROOT "/" LUA_MULTILIB LUA_LUADIR +#define LUA_RLPATH ";" LUA_RLDIR "?.lua;" LUA_RLDIR "?/init.lua" +#define LUA_RCPATH ";" LUA_RCDIR "?.so" +#else +#define LUA_JROOT LUA_LROOT +#define LUA_RLPATH +#define LUA_RCPATH +#endif + +#define LUA_JPATH ";" LUA_JROOT "/share" LUA_LJDIR "?.lua" +#define LUA_LLDIR LUA_LROOT "/share" LUA_LUADIR +#define LUA_LCDIR LUA_LROOT "/" LUA_LMULTILIB LUA_LUADIR +#define LUA_LLPATH ";" LUA_LLDIR "?.lua;" LUA_LLDIR "?/init.lua" +#define LUA_LCPATH1 ";" LUA_LCDIR "?.so" +#define LUA_LCPATH2 ";" LUA_LCDIR "loadall.so" + +#define LUA_PATH_DEFAULT "./?.lua" LUA_JPATH LUA_LLPATH LUA_RLPATH +#define LUA_CPATH_DEFAULT "./?.so" LUA_LCPATH1 LUA_RCPATH LUA_LCPATH2 +#endif + +/* Environment variable names for path overrides and initialization code. */ +#define LUA_PATH "LUA_PATH" +#define LUA_CPATH "LUA_CPATH" +#define LUA_INIT "LUA_INIT" + +/* Special file system characters. */ +#if defined(_WIN32) +#define LUA_DIRSEP "\\" +#else +#define LUA_DIRSEP "/" +#endif +#define LUA_PATHSEP ";" +#define LUA_PATH_MARK "?" +#define LUA_EXECDIR "!" +#define LUA_IGMARK "-" +#define LUA_PATH_CONFIG \ + LUA_DIRSEP "\n" LUA_PATHSEP "\n" LUA_PATH_MARK "\n" \ + LUA_EXECDIR "\n" LUA_IGMARK + +/* Quoting in error messages. */ +#define LUA_QL(x) "'" x "'" +#define LUA_QS LUA_QL("%s") + +/* Various tunables. */ +#define LUAI_MAXSTACK 65500 /* Max. # of stack slots for a thread (<64K). */ +#define LUAI_MAXCSTACK 8000 /* Max. # of stack slots for a C func (<10K). */ +#define LUAI_GCPAUSE 200 /* Pause GC until memory is at 200%. */ +#define LUAI_GCMUL 200 /* Run GC at 200% of allocation speed. */ +#define LUA_MAXCAPTURES 32 /* Max. pattern captures. */ + +/* Compatibility with older library function names. */ +#define LUA_COMPAT_MOD /* OLD: math.mod, NEW: math.fmod */ +#define LUA_COMPAT_GFIND /* OLD: string.gfind, NEW: string.gmatch */ + +/* Configuration for the frontend (the luajit executable). */ +#if defined(luajit_c) +#define LUA_PROGNAME "luajit" /* Fallback frontend name. */ +#define LUA_PROMPT "> " /* Interactive prompt. */ +#define LUA_PROMPT2 ">> " /* Continuation prompt. */ +#define LUA_MAXINPUT 512 /* Max. input line length. */ +#endif + +/* Note: changing the following defines breaks the Lua 5.1 ABI. */ +#define LUA_INTEGER ptrdiff_t +#define LUA_IDSIZE 60 /* Size of lua_Debug.short_src. */ +/* +** Size of lauxlib and io.* on-stack buffers. Weird workaround to avoid using +** unreasonable amounts of stack space, but still retain ABI compatibility. +** Blame Lua for depending on BUFSIZ in the ABI, blame **** for wrecking it. +*/ +#define LUAL_BUFFERSIZE (BUFSIZ > 16384 ? 8192 : BUFSIZ) + +/* The following defines are here only for compatibility with luaconf.h +** from the standard Lua distribution. They must not be changed for LuaJIT. +*/ +#define LUA_NUMBER_DOUBLE +#define LUA_NUMBER double +#define LUAI_UACNUMBER double +#define LUA_NUMBER_SCAN "%lf" +#define LUA_NUMBER_FMT "%.14g" +#define lua_number2str(s, n) sprintf((s), LUA_NUMBER_FMT, (n)) +#define LUAI_MAXNUMBER2STR 32 +#define LUA_INTFRMLEN "l" +#define LUA_INTFRM_T long + +/* Linkage of public API functions. */ +#if defined(LUA_BUILD_AS_DLL) +#if defined(LUA_CORE) || defined(LUA_LIB) +#define LUA_API __declspec(dllexport) +#else +#define LUA_API __declspec(dllimport) +#endif +#else +#define LUA_API extern +#endif + +#define LUALIB_API LUA_API + +/* Support for internal assertions. */ +#if defined(LUA_USE_ASSERT) || defined(LUA_USE_APICHECK) +#include +#endif +#ifdef LUA_USE_ASSERT +#define lua_assert(x) assert(x) +#endif +#ifdef LUA_USE_APICHECK +#define luai_apicheck(L, o) { (void)L; assert(o); } +#else +#define luai_apicheck(L, o) { (void)L; } +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/luajit.c b/Build/source/libs/luajit/LuaJIT-src/src/luajit.c new file mode 100644 index 00000000000..0ebc73005f8 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/luajit.c @@ -0,0 +1,570 @@ +/* +** LuaJIT frontend. Runs commands, scripts, read-eval-print (REPL) etc. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#include +#include +#include + +#define luajit_c + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" +#include "luajit.h" + +#include "lj_arch.h" + +#if LJ_TARGET_POSIX +#include +#define lua_stdin_is_tty() isatty(0) +#elif LJ_TARGET_WINDOWS +#include +#ifdef __BORLANDC__ +#define lua_stdin_is_tty() isatty(_fileno(stdin)) +#else +#define lua_stdin_is_tty() _isatty(_fileno(stdin)) +#endif +#else +#define lua_stdin_is_tty() 1 +#endif + +#if !LJ_TARGET_CONSOLE +#include +#endif + +static lua_State *globalL = NULL; +static const char *progname = LUA_PROGNAME; + +#if !LJ_TARGET_CONSOLE +static void lstop(lua_State *L, lua_Debug *ar) +{ + (void)ar; /* unused arg. */ + lua_sethook(L, NULL, 0, 0); + /* Avoid luaL_error -- a C hook doesn't add an extra frame. */ + luaL_where(L, 0); + lua_pushfstring(L, "%sinterrupted!", lua_tostring(L, -1)); + lua_error(L); +} + +static void laction(int i) +{ + signal(i, SIG_DFL); /* if another SIGINT happens before lstop, + terminate process (default action) */ + lua_sethook(globalL, lstop, LUA_MASKCALL | LUA_MASKRET | LUA_MASKCOUNT, 1); +} +#endif + +static void print_usage(void) +{ + fputs("usage: ", stderr); + fputs(progname, stderr); + fputs(" [options]... [script [args]...].\n" + "Available options are:\n" + " -e chunk Execute string " LUA_QL("chunk") ".\n" + " -l name Require library " LUA_QL("name") ".\n" + " -b ... Save or list bytecode.\n" + " -j cmd Perform LuaJIT control command.\n" + " -O[opt] Control LuaJIT optimizations.\n" + " -i Enter interactive mode after executing " LUA_QL("script") ".\n" + " -v Show version information.\n" + " -E Ignore environment variables.\n" + " -- Stop handling options.\n" + " - Execute stdin and stop handling options.\n", stderr); + fflush(stderr); +} + +static void l_message(const char *pname, const char *msg) +{ + if (pname) { fputs(pname, stderr); fputc(':', stderr); fputc(' ', stderr); } + fputs(msg, stderr); fputc('\n', stderr); + fflush(stderr); +} + +static int report(lua_State *L, int status) +{ + if (status && !lua_isnil(L, -1)) { + const char *msg = lua_tostring(L, -1); + if (msg == NULL) msg = "(error object is not a string)"; + l_message(progname, msg); + lua_pop(L, 1); + } + return status; +} + +static int traceback(lua_State *L) +{ + if (!lua_isstring(L, 1)) { /* Non-string error object? Try metamethod. */ + if (lua_isnoneornil(L, 1) || + !luaL_callmeta(L, 1, "__tostring") || + !lua_isstring(L, -1)) + return 1; /* Return non-string error object. */ + lua_remove(L, 1); /* Replace object by result of __tostring metamethod. */ + } + luaL_traceback(L, L, lua_tostring(L, 1), 1); + return 1; +} + +static int docall(lua_State *L, int narg, int clear) +{ + int status; + int base = lua_gettop(L) - narg; /* function index */ + lua_pushcfunction(L, traceback); /* push traceback function */ + lua_insert(L, base); /* put it under chunk and args */ +#if !LJ_TARGET_CONSOLE + signal(SIGINT, laction); +#endif + status = lua_pcall(L, narg, (clear ? 0 : LUA_MULTRET), base); +#if !LJ_TARGET_CONSOLE + signal(SIGINT, SIG_DFL); +#endif + lua_remove(L, base); /* remove traceback function */ + /* force a complete garbage collection in case of errors */ + if (status != 0) lua_gc(L, LUA_GCCOLLECT, 0); + return status; +} + +static void print_version(void) +{ + fputs(LUAJIT_VERSION " -- " LUAJIT_COPYRIGHT ". " LUAJIT_URL "\n", stdout); +} + +static void print_jit_status(lua_State *L) +{ + int n; + const char *s; + lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); + lua_getfield(L, -1, "jit"); /* Get jit.* module table. */ + lua_remove(L, -2); + lua_getfield(L, -1, "status"); + lua_remove(L, -2); + n = lua_gettop(L); + lua_call(L, 0, LUA_MULTRET); + fputs(lua_toboolean(L, n) ? "JIT: ON" : "JIT: OFF", stdout); + for (n++; (s = lua_tostring(L, n)); n++) { + putc(' ', stdout); + fputs(s, stdout); + } + putc('\n', stdout); +} + +static int getargs(lua_State *L, char **argv, int n) +{ + int narg; + int i; + int argc = 0; + while (argv[argc]) argc++; /* count total number of arguments */ + narg = argc - (n + 1); /* number of arguments to the script */ + luaL_checkstack(L, narg + 3, "too many arguments to script"); + for (i = n+1; i < argc; i++) + lua_pushstring(L, argv[i]); + lua_createtable(L, narg, n + 1); + for (i = 0; i < argc; i++) { + lua_pushstring(L, argv[i]); + lua_rawseti(L, -2, i - n); + } + return narg; +} + +static int dofile(lua_State *L, const char *name) +{ + int status = luaL_loadfile(L, name) || docall(L, 0, 1); + return report(L, status); +} + +static int dostring(lua_State *L, const char *s, const char *name) +{ + int status = luaL_loadbuffer(L, s, strlen(s), name) || docall(L, 0, 1); + return report(L, status); +} + +static int dolibrary(lua_State *L, const char *name) +{ + lua_getglobal(L, "require"); + lua_pushstring(L, name); + return report(L, docall(L, 1, 1)); +} + +static void write_prompt(lua_State *L, int firstline) +{ + const char *p; + lua_getfield(L, LUA_GLOBALSINDEX, firstline ? "_PROMPT" : "_PROMPT2"); + p = lua_tostring(L, -1); + if (p == NULL) p = firstline ? LUA_PROMPT : LUA_PROMPT2; + fputs(p, stdout); + fflush(stdout); + lua_pop(L, 1); /* remove global */ +} + +static int incomplete(lua_State *L, int status) +{ + if (status == LUA_ERRSYNTAX) { + size_t lmsg; + const char *msg = lua_tolstring(L, -1, &lmsg); + const char *tp = msg + lmsg - (sizeof(LUA_QL("")) - 1); + if (strstr(msg, LUA_QL("")) == tp) { + lua_pop(L, 1); + return 1; + } + } + return 0; /* else... */ +} + +static int pushline(lua_State *L, int firstline) +{ + char buf[LUA_MAXINPUT]; + write_prompt(L, firstline); + if (fgets(buf, LUA_MAXINPUT, stdin)) { + size_t len = strlen(buf); + if (len > 0 && buf[len-1] == '\n') + buf[len-1] = '\0'; + if (firstline && buf[0] == '=') + lua_pushfstring(L, "return %s", buf+1); + else + lua_pushstring(L, buf); + return 1; + } + return 0; +} + +static int loadline(lua_State *L) +{ + int status; + lua_settop(L, 0); + if (!pushline(L, 1)) + return -1; /* no input */ + for (;;) { /* repeat until gets a complete line */ + status = luaL_loadbuffer(L, lua_tostring(L, 1), lua_strlen(L, 1), "=stdin"); + if (!incomplete(L, status)) break; /* cannot try to add lines? */ + if (!pushline(L, 0)) /* no more input? */ + return -1; + lua_pushliteral(L, "\n"); /* add a new line... */ + lua_insert(L, -2); /* ...between the two lines */ + lua_concat(L, 3); /* join them */ + } + lua_remove(L, 1); /* remove line */ + return status; +} + +static void dotty(lua_State *L) +{ + int status; + const char *oldprogname = progname; + progname = NULL; + while ((status = loadline(L)) != -1) { + if (status == 0) status = docall(L, 0, 0); + report(L, status); + if (status == 0 && lua_gettop(L) > 0) { /* any result to print? */ + lua_getglobal(L, "print"); + lua_insert(L, 1); + if (lua_pcall(L, lua_gettop(L)-1, 0, 0) != 0) + l_message(progname, + lua_pushfstring(L, "error calling " LUA_QL("print") " (%s)", + lua_tostring(L, -1))); + } + } + lua_settop(L, 0); /* clear stack */ + fputs("\n", stdout); + fflush(stdout); + progname = oldprogname; +} + +static int handle_script(lua_State *L, char **argv, int n) +{ + int status; + const char *fname; + int narg = getargs(L, argv, n); /* collect arguments */ + lua_setglobal(L, "arg"); + fname = argv[n]; + if (strcmp(fname, "-") == 0 && strcmp(argv[n-1], "--") != 0) + fname = NULL; /* stdin */ + status = luaL_loadfile(L, fname); + lua_insert(L, -(narg+1)); + if (status == 0) + status = docall(L, narg, 0); + else + lua_pop(L, narg); + return report(L, status); +} + +/* Load add-on module. */ +static int loadjitmodule(lua_State *L) +{ + lua_getglobal(L, "require"); + lua_pushliteral(L, "jit."); + lua_pushvalue(L, -3); + lua_concat(L, 2); + if (lua_pcall(L, 1, 1, 0)) { + const char *msg = lua_tostring(L, -1); + if (msg && !strncmp(msg, "module ", 7)) + goto nomodule; + return report(L, 1); + } + lua_getfield(L, -1, "start"); + if (lua_isnil(L, -1)) { + nomodule: + l_message(progname, + "unknown luaJIT command or jit.* modules not installed"); + return 1; + } + lua_remove(L, -2); /* Drop module table. */ + return 0; +} + +/* Run command with options. */ +static int runcmdopt(lua_State *L, const char *opt) +{ + int narg = 0; + if (opt && *opt) { + for (;;) { /* Split arguments. */ + const char *p = strchr(opt, ','); + narg++; + if (!p) break; + if (p == opt) + lua_pushnil(L); + else + lua_pushlstring(L, opt, (size_t)(p - opt)); + opt = p + 1; + } + if (*opt) + lua_pushstring(L, opt); + else + lua_pushnil(L); + } + return report(L, lua_pcall(L, narg, 0, 0)); +} + +/* JIT engine control command: try jit library first or load add-on module. */ +static int dojitcmd(lua_State *L, const char *cmd) +{ + const char *opt = strchr(cmd, '='); + lua_pushlstring(L, cmd, opt ? (size_t)(opt - cmd) : strlen(cmd)); + lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); + lua_getfield(L, -1, "jit"); /* Get jit.* module table. */ + lua_remove(L, -2); + lua_pushvalue(L, -2); + lua_gettable(L, -2); /* Lookup library function. */ + if (!lua_isfunction(L, -1)) { + lua_pop(L, 2); /* Drop non-function and jit.* table, keep module name. */ + if (loadjitmodule(L)) + return 1; + } else { + lua_remove(L, -2); /* Drop jit.* table. */ + } + lua_remove(L, -2); /* Drop module name. */ + return runcmdopt(L, opt ? opt+1 : opt); +} + +/* Optimization flags. */ +static int dojitopt(lua_State *L, const char *opt) +{ + lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); + lua_getfield(L, -1, "jit.opt"); /* Get jit.opt.* module table. */ + lua_remove(L, -2); + lua_getfield(L, -1, "start"); + lua_remove(L, -2); + return runcmdopt(L, opt); +} + +/* Save or list bytecode. */ +static int dobytecode(lua_State *L, char **argv) +{ + int narg = 0; + lua_pushliteral(L, "bcsave"); + if (loadjitmodule(L)) + return 1; + if (argv[0][2]) { + narg++; + argv[0][1] = '-'; + lua_pushstring(L, argv[0]+1); + } + for (argv++; *argv != NULL; narg++, argv++) + lua_pushstring(L, *argv); + return report(L, lua_pcall(L, narg, 0, 0)); +} + +/* check that argument has no extra characters at the end */ +#define notail(x) {if ((x)[2] != '\0') return -1;} + +#define FLAGS_INTERACTIVE 1 +#define FLAGS_VERSION 2 +#define FLAGS_EXEC 4 +#define FLAGS_OPTION 8 +#define FLAGS_NOENV 16 + +static int collectargs(char **argv, int *flags) +{ + int i; + for (i = 1; argv[i] != NULL; i++) { + if (argv[i][0] != '-') /* Not an option? */ + return i; + switch (argv[i][1]) { /* Check option. */ + case '-': + notail(argv[i]); + return (argv[i+1] != NULL ? i+1 : 0); + case '\0': + return i; + case 'i': + notail(argv[i]); + *flags |= FLAGS_INTERACTIVE; + /* fallthrough */ + case 'v': + notail(argv[i]); + *flags |= FLAGS_VERSION; + break; + case 'e': + *flags |= FLAGS_EXEC; + case 'j': /* LuaJIT extension */ + case 'l': + *flags |= FLAGS_OPTION; + if (argv[i][2] == '\0') { + i++; + if (argv[i] == NULL) return -1; + } + break; + case 'O': break; /* LuaJIT extension */ + case 'b': /* LuaJIT extension */ + if (*flags) return -1; + *flags |= FLAGS_EXEC; + return 0; + case 'E': + *flags |= FLAGS_NOENV; + break; + default: return -1; /* invalid option */ + } + } + return 0; +} + +static int runargs(lua_State *L, char **argv, int n) +{ + int i; + for (i = 1; i < n; i++) { + if (argv[i] == NULL) continue; + lua_assert(argv[i][0] == '-'); + switch (argv[i][1]) { /* option */ + case 'e': { + const char *chunk = argv[i] + 2; + if (*chunk == '\0') chunk = argv[++i]; + lua_assert(chunk != NULL); + if (dostring(L, chunk, "=(command line)") != 0) + return 1; + break; + } + case 'l': { + const char *filename = argv[i] + 2; + if (*filename == '\0') filename = argv[++i]; + lua_assert(filename != NULL); + if (dolibrary(L, filename)) + return 1; /* stop if file fails */ + break; + } + case 'j': { /* LuaJIT extension */ + const char *cmd = argv[i] + 2; + if (*cmd == '\0') cmd = argv[++i]; + lua_assert(cmd != NULL); + if (dojitcmd(L, cmd)) + return 1; + break; + } + case 'O': /* LuaJIT extension */ + if (dojitopt(L, argv[i] + 2)) + return 1; + break; + case 'b': /* LuaJIT extension */ + return dobytecode(L, argv+i); + default: break; + } + } + return 0; +} + +static int handle_luainit(lua_State *L) +{ +#if LJ_TARGET_CONSOLE + const char *init = NULL; +#else + const char *init = getenv(LUA_INIT); +#endif + if (init == NULL) + return 0; /* status OK */ + else if (init[0] == '@') + return dofile(L, init+1); + else + return dostring(L, init, "=" LUA_INIT); +} + +static struct Smain { + char **argv; + int argc; + int status; +} smain; + +static int pmain(lua_State *L) +{ + struct Smain *s = &smain; + char **argv = s->argv; + int script; + int flags = 0; + globalL = L; + if (argv[0] && argv[0][0]) progname = argv[0]; + LUAJIT_VERSION_SYM(); /* linker-enforced version check */ + script = collectargs(argv, &flags); + if (script < 0) { /* invalid args? */ + print_usage(); + s->status = 1; + return 0; + } + if ((flags & FLAGS_NOENV)) { + lua_pushboolean(L, 1); + lua_setfield(L, LUA_REGISTRYINDEX, "LUA_NOENV"); + } + lua_gc(L, LUA_GCSTOP, 0); /* stop collector during initialization */ + luaL_openlibs(L); /* open libraries */ + lua_gc(L, LUA_GCRESTART, -1); + if (!(flags & FLAGS_NOENV)) { + s->status = handle_luainit(L); + if (s->status != 0) return 0; + } + if ((flags & FLAGS_VERSION)) print_version(); + s->status = runargs(L, argv, (script > 0) ? script : s->argc); + if (s->status != 0) return 0; + if (script) { + s->status = handle_script(L, argv, script); + if (s->status != 0) return 0; + } + if ((flags & FLAGS_INTERACTIVE)) { + print_jit_status(L); + dotty(L); + } else if (script == 0 && !(flags & (FLAGS_EXEC|FLAGS_VERSION))) { + if (lua_stdin_is_tty()) { + print_version(); + print_jit_status(L); + dotty(L); + } else { + dofile(L, NULL); /* executes stdin as a file */ + } + } + return 0; +} + +int main(int argc, char **argv) +{ + int status; + lua_State *L = lua_open(); /* create state */ + if (L == NULL) { + l_message(argv[0], "cannot create state: not enough memory"); + return EXIT_FAILURE; + } + smain.argc = argc; + smain.argv = argv; + status = lua_cpcall(L, pmain, NULL); + report(L, status); + lua_close(L); + return (status || smain.status) ? EXIT_FAILURE : EXIT_SUCCESS; +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/luajit.h b/Build/source/libs/luajit/LuaJIT-src/src/luajit.h new file mode 100644 index 00000000000..960418519ee --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/luajit.h @@ -0,0 +1,79 @@ +/* +** LuaJIT -- a Just-In-Time Compiler for Lua. http://luajit.org/ +** +** Copyright (C) 2005-2015 Mike Pall. All rights reserved. +** +** Permission is hereby granted, free of charge, to any person obtaining +** a copy of this software and associated documentation files (the +** "Software"), to deal in the Software without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Software, and to +** permit persons to whom the Software is furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be +** included in all copies or substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +** +** [ MIT license: http://www.opensource.org/licenses/mit-license.php ] +*/ + +#ifndef _LUAJIT_H +#define _LUAJIT_H + +#include "lua.h" + +#define LUAJIT_VERSION "LuaJIT 2.1.0-beta1" +#define LUAJIT_VERSION_NUM 20100 /* Version 2.1.0 = 02.01.00. */ +#define LUAJIT_VERSION_SYM luaJIT_version_2_1_0_beta1 +#define LUAJIT_COPYRIGHT "Copyright (C) 2005-2015 Mike Pall" +#define LUAJIT_URL "http://luajit.org/" + +/* Modes for luaJIT_setmode. */ +#define LUAJIT_MODE_MASK 0x00ff + +enum { + LUAJIT_MODE_ENGINE, /* Set mode for whole JIT engine. */ + LUAJIT_MODE_DEBUG, /* Set debug mode (idx = level). */ + + LUAJIT_MODE_FUNC, /* Change mode for a function. */ + LUAJIT_MODE_ALLFUNC, /* Recurse into subroutine protos. */ + LUAJIT_MODE_ALLSUBFUNC, /* Change only the subroutines. */ + + LUAJIT_MODE_TRACE, /* Flush a compiled trace. */ + + LUAJIT_MODE_WRAPCFUNC = 0x10, /* Set wrapper mode for C function calls. */ + + LUAJIT_MODE_MAX +}; + +/* Flags or'ed in to the mode. */ +#define LUAJIT_MODE_OFF 0x0000 /* Turn feature off. */ +#define LUAJIT_MODE_ON 0x0100 /* Turn feature on. */ +#define LUAJIT_MODE_FLUSH 0x0200 /* Flush JIT-compiled code. */ + +/* LuaJIT public C API. */ + +/* Control the JIT engine. */ +LUA_API int luaJIT_setmode(lua_State *L, int idx, int mode); + +/* Low-overhead profiling API. */ +typedef void (*luaJIT_profile_callback)(void *data, lua_State *L, + int samples, int vmstate); +LUA_API void luaJIT_profile_start(lua_State *L, const char *mode, + luaJIT_profile_callback cb, void *data); +LUA_API void luaJIT_profile_stop(lua_State *L); +LUA_API const char *luaJIT_profile_dumpstack(lua_State *L, const char *fmt, + int depth, size_t *len); + +/* Enforce (dynamic) linker error for version mismatches. Call from main. */ +LUA_API void LUAJIT_VERSION_SYM(void); + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lualib.h b/Build/source/libs/luajit/LuaJIT-src/src/lualib.h new file mode 100644 index 00000000000..58dd3349aa9 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/lualib.h @@ -0,0 +1,47 @@ +/* +** Standard library header. +** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LUALIB_H +#define _LUALIB_H + +#include "lua.h" + +#define LUA_FILEHANDLE "FILE*" + +#define LUA_COLIBNAME "coroutine" +#define LUA_MATHLIBNAME "math" +#define LUA_STRLIBNAME "string" +#define LUA_TABLIBNAME "table" +#define LUA_IOLIBNAME "io" +#define LUA_OSLIBNAME "os" +#define LUA_LOADLIBNAME "package" +#define LUA_DBLIBNAME "debug" +#define LUA_BITLIBNAME "bit" +#define LUA_JITLIBNAME "jit" +#define LUA_FFILIBNAME "ffi" + +#define LUA_BITLIBNAME_32 "bit32" + +LUALIB_API int luaopen_base(lua_State *L); +LUALIB_API int luaopen_math(lua_State *L); +LUALIB_API int luaopen_string(lua_State *L); +LUALIB_API int luaopen_table(lua_State *L); +LUALIB_API int luaopen_io(lua_State *L); +LUALIB_API int luaopen_os(lua_State *L); +LUALIB_API int luaopen_package(lua_State *L); +LUALIB_API int luaopen_debug(lua_State *L); +LUALIB_API int luaopen_bit(lua_State *L); +LUALIB_API int luaopen_jit(lua_State *L); +LUALIB_API int luaopen_ffi(lua_State *L); + +LUALIB_API int luaopen_bit32(lua_State *L); + +LUALIB_API void luaL_openlibs(lua_State *L); + +#ifndef lua_assert +#define lua_assert(x) ((void)0) +#endif + +#endif diff --git a/Build/source/libs/luajit/LuaJIT-src/src/msvcbuild.bat b/Build/source/libs/luajit/LuaJIT-src/src/msvcbuild.bat new file mode 100644 index 00000000000..0360d7e33b8 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/msvcbuild.bat @@ -0,0 +1,114 @@ +@rem Script to build LuaJIT with MSVC. +@rem Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +@rem +@rem Either open a "Visual Studio .NET Command Prompt" +@rem (Note that the Express Edition does not contain an x64 compiler) +@rem -or- +@rem Open a "Windows SDK Command Shell" and set the compiler environment: +@rem setenv /release /x86 +@rem -or- +@rem setenv /release /x64 +@rem +@rem Then cd to this directory and run this script. + +@if not defined INCLUDE goto :FAIL + +@setlocal +@set LJCOMPILE=cl /nologo /c /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE +@set LJLINK=link /nologo +@set LJMT=mt /nologo +@set LJLIB=lib /nologo /nodefaultlib +@set DASMDIR=..\dynasm +@set DASM=%DASMDIR%\dynasm.lua +@set LJDLLNAME=lua51.dll +@set LJLIBNAME=lua51.lib +@set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c + +%LJCOMPILE% host\minilua.c +@if errorlevel 1 goto :BAD +%LJLINK% /out:minilua.exe minilua.obj +@if errorlevel 1 goto :BAD +if exist minilua.exe.manifest^ + %LJMT% -manifest minilua.exe.manifest -outputresource:minilua.exe + +@set DASMFLAGS=-D WIN -D JIT -D FFI -D P64 +@set LJARCH=x64 +@minilua +@if errorlevel 8 goto :X64 +@set DASMFLAGS=-D WIN -D JIT -D FFI +@set LJARCH=x86 +@set LJCOMPILE=%LJCOMPILE% /arch:SSE2 +:X64 +minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h vm_x86.dasc +@if errorlevel 1 goto :BAD + +%LJCOMPILE% /I "." /I %DASMDIR% host\buildvm*.c +@if errorlevel 1 goto :BAD +%LJLINK% /out:buildvm.exe buildvm*.obj +@if errorlevel 1 goto :BAD +if exist buildvm.exe.manifest^ + %LJMT% -manifest buildvm.exe.manifest -outputresource:buildvm.exe + +buildvm -m peobj -o lj_vm.obj +@if errorlevel 1 goto :BAD +buildvm -m bcdef -o lj_bcdef.h %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m ffdef -o lj_ffdef.h %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m libdef -o lj_libdef.h %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m recdef -o lj_recdef.h %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m vmdef -o jit\vmdef.lua %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m folddef -o lj_folddef.h lj_opt_fold.c +@if errorlevel 1 goto :BAD + +@if "%1" neq "debug" goto :NODEBUG +@shift +@set LJCOMPILE=%LJCOMPILE% /Zi +@set LJLINK=%LJLINK% /debug +:NODEBUG +@if "%1"=="amalg" goto :AMALGDLL +@if "%1"=="static" goto :STATIC +%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL lj_*.c lib_*.c +@if errorlevel 1 goto :BAD +%LJLINK% /DLL /out:%LJDLLNAME% lj_*.obj lib_*.obj +@if errorlevel 1 goto :BAD +@goto :MTDLL +:STATIC +%LJCOMPILE% lj_*.c lib_*.c +@if errorlevel 1 goto :BAD +%LJLIB% /OUT:%LJLIBNAME% lj_*.obj lib_*.obj +@if errorlevel 1 goto :BAD +@goto :MTDLL +:AMALGDLL +%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL ljamalg.c +@if errorlevel 1 goto :BAD +%LJLINK% /DLL /out:%LJDLLNAME% ljamalg.obj lj_vm.obj +@if errorlevel 1 goto :BAD +:MTDLL +if exist %LJDLLNAME%.manifest^ + %LJMT% -manifest %LJDLLNAME%.manifest -outputresource:%LJDLLNAME%;2 + +%LJCOMPILE% luajit.c +@if errorlevel 1 goto :BAD +%LJLINK% /out:luajit.exe luajit.obj %LJLIBNAME% +@if errorlevel 1 goto :BAD +if exist luajit.exe.manifest^ + %LJMT% -manifest luajit.exe.manifest -outputresource:luajit.exe + +@del *.obj *.manifest minilua.exe buildvm.exe +@echo. +@echo === Successfully built LuaJIT for Windows/%LJARCH% === + +@goto :END +:BAD +@echo. +@echo ******************************************************* +@echo *** Build FAILED -- Please check the error messages *** +@echo ******************************************************* +@goto :END +:FAIL +@echo You must open a "Visual Studio .NET Command Prompt" to run this script +:END diff --git a/Build/source/libs/luajit/LuaJIT-src/src/ps4build.bat b/Build/source/libs/luajit/LuaJIT-src/src/ps4build.bat new file mode 100644 index 00000000000..337a44fa72d --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/ps4build.bat @@ -0,0 +1,103 @@ +@rem Script to build LuaJIT with the PS4 SDK. +@rem Donated to the public domain. +@rem +@rem Open a "Visual Studio .NET Command Prompt" (64 bit host compiler) +@rem Then cd to this directory and run this script. + +@if not defined INCLUDE goto :FAIL +@if not defined SCE_ORBIS_SDK_DIR goto :FAIL + +@setlocal +@rem ---- Host compiler ---- +@set LJCOMPILE=cl /nologo /c /MD /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE +@set LJLINK=link /nologo +@set LJMT=mt /nologo +@set DASMDIR=..\dynasm +@set DASM=%DASMDIR%\dynasm.lua +@set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c + +%LJCOMPILE% host\minilua.c +@if errorlevel 1 goto :BAD +%LJLINK% /out:minilua.exe minilua.obj +@if errorlevel 1 goto :BAD +if exist minilua.exe.manifest^ + %LJMT% -manifest minilua.exe.manifest -outputresource:minilua.exe + +@rem Check for 64 bit host compiler. +@minilua +@if not errorlevel 8 goto :FAIL + +@set DASMFLAGS=-D P64 -D NO_UNWIND +minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h vm_x86.dasc +@if errorlevel 1 goto :BAD + +%LJCOMPILE% /I "." /I %DASMDIR% -DLUAJIT_TARGET=LUAJIT_ARCH_X64 -DLUAJIT_OS=LUAJIT_OS_OTHER -DLUAJIT_DISABLE_JIT -DLUAJIT_DISABLE_FFI -DLUAJIT_NO_UNWIND host\buildvm*.c +@if errorlevel 1 goto :BAD +%LJLINK% /out:buildvm.exe buildvm*.obj +@if errorlevel 1 goto :BAD +if exist buildvm.exe.manifest^ + %LJMT% -manifest buildvm.exe.manifest -outputresource:buildvm.exe + +buildvm -m elfasm -o lj_vm.s +@if errorlevel 1 goto :BAD +buildvm -m bcdef -o lj_bcdef.h %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m ffdef -o lj_ffdef.h %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m libdef -o lj_libdef.h %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m recdef -o lj_recdef.h %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m vmdef -o jit\vmdef.lua %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m folddef -o lj_folddef.h lj_opt_fold.c +@if errorlevel 1 goto :BAD + +@rem ---- Cross compiler ---- +@set LJCOMPILE="%SCE_ORBIS_SDK_DIR%\host_tools\bin\orbis-clang" -c -Wall -DLUAJIT_DISABLE_FFI +@set LJLIB="%SCE_ORBIS_SDK_DIR%\host_tools\bin\orbis-ar" rcus +@set INCLUDE="" + +orbis-as -o lj_vm.o lj_vm.s + +@if "%1" neq "debug" goto :NODEBUG +@shift +@set LJCOMPILE=%LJCOMPILE% -g -O0 +@set TARGETLIB=libluajitD.a +goto :BUILD +:NODEBUG +@set LJCOMPILE=%LJCOMPILE% -O2 +@set TARGETLIB=libluajit.a +:BUILD +del %TARGETLIB% +@if "%1"=="amalg" goto :AMALG +for %%f in (lj_*.c lib_*.c) do ( + %LJCOMPILE% %%f + @if errorlevel 1 goto :BAD +) + +%LJLIB% %TARGETLIB% lj_*.o lib_*.o +@if errorlevel 1 goto :BAD +@goto :NOAMALG +:AMALG +%LJCOMPILE% ljamalg.c +@if errorlevel 1 goto :BAD +%LJLIB% %TARGETLIB% ljamalg.o lj_vm.o +@if errorlevel 1 goto :BAD +:NOAMALG + +@del *.o *.obj *.manifest minilua.exe buildvm.exe +@echo. +@echo === Successfully built LuaJIT for PS4 === + +@goto :END +:BAD +@echo. +@echo ******************************************************* +@echo *** Build FAILED -- Please check the error messages *** +@echo ******************************************************* +@goto :END +:FAIL +@echo To run this script you must open a "Visual Studio .NET Command Prompt" +@echo (64 bit host compiler). The PS4 Orbis SDK must be installed, too. +:END diff --git a/Build/source/libs/luajit/LuaJIT-src/src/psvitabuild.bat b/Build/source/libs/luajit/LuaJIT-src/src/psvitabuild.bat new file mode 100644 index 00000000000..3991dc65353 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/psvitabuild.bat @@ -0,0 +1,93 @@ +@rem Script to build LuaJIT with the PS Vita SDK. +@rem Donated to the public domain. +@rem +@rem Open a "Visual Studio .NET Command Prompt" (32 bit host compiler) +@rem Then cd to this directory and run this script. + +@if not defined INCLUDE goto :FAIL +@if not defined SCE_PSP2_SDK_DIR goto :FAIL + +@setlocal +@rem ---- Host compiler ---- +@set LJCOMPILE=cl /nologo /c /MD /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE +@set LJLINK=link /nologo +@set LJMT=mt /nologo +@set DASMDIR=..\dynasm +@set DASM=%DASMDIR%\dynasm.lua +@set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c + +%LJCOMPILE% host\minilua.c +@if errorlevel 1 goto :BAD +%LJLINK% /out:minilua.exe minilua.obj +@if errorlevel 1 goto :BAD +if exist minilua.exe.manifest^ + %LJMT% -manifest minilua.exe.manifest -outputresource:minilua.exe + +@rem Check for 32 bit host compiler. +@minilua +@if errorlevel 8 goto :FAIL + +@set DASMFLAGS=-D FPU -D HFABI +minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h vm_arm.dasc +@if errorlevel 1 goto :BAD + +%LJCOMPILE% /I "." /I %DASMDIR% -DLUAJIT_TARGET=LUAJIT_ARCH_ARM -DLUAJIT_OS=LUAJIT_OS_OTHER -DLUAJIT_DISABLE_JIT -DLUAJIT_DISABLE_FFI -DLJ_TARGET_PSVITA=1 host\buildvm*.c +@if errorlevel 1 goto :BAD +%LJLINK% /out:buildvm.exe buildvm*.obj +@if errorlevel 1 goto :BAD +if exist buildvm.exe.manifest^ + %LJMT% -manifest buildvm.exe.manifest -outputresource:buildvm.exe + +buildvm -m elfasm -o lj_vm.s +@if errorlevel 1 goto :BAD +buildvm -m bcdef -o lj_bcdef.h %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m ffdef -o lj_ffdef.h %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m libdef -o lj_libdef.h %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m recdef -o lj_recdef.h %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m vmdef -o jit\vmdef.lua %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m folddef -o lj_folddef.h lj_opt_fold.c +@if errorlevel 1 goto :BAD + +@rem ---- Cross compiler ---- +@set LJCOMPILE="%SCE_PSP2_SDK_DIR%\host_tools\build\bin\psp2snc" -c -w -DLUAJIT_DISABLE_FFI -DLUAJIT_USE_SYSMALLOC +@set LJLIB="%SCE_PSP2_SDK_DIR%\host_tools\build\bin\psp2ld32" -r --output= +@set INCLUDE="" + +"%SCE_PSP2_SDK_DIR%\host_tools\build\bin\psp2as" -o lj_vm.o lj_vm.s + +@if "%1" neq "debug" goto :NODEBUG +@shift +@set LJCOMPILE=%LJCOMPILE% -g -O0 +@set TARGETLIB=libluajitD.a +goto :BUILD +:NODEBUG +@set LJCOMPILE=%LJCOMPILE% -O2 +@set TARGETLIB=libluajit.a +:BUILD +del %TARGETLIB% + +%LJCOMPILE% ljamalg.c +@if errorlevel 1 goto :BAD +%LJLIB%%TARGETLIB% ljamalg.o lj_vm.o +@if errorlevel 1 goto :BAD + +@del *.o *.obj *.manifest minilua.exe buildvm.exe +@echo. +@echo === Successfully built LuaJIT for PS Vita === + +@goto :END +:BAD +@echo. +@echo ******************************************************* +@echo *** Build FAILED -- Please check the error messages *** +@echo ******************************************************* +@goto :END +:FAIL +@echo To run this script you must open a "Visual Studio .NET Command Prompt" +@echo (32 bit host compiler). The PS Vita SDK must be installed, too. +:END diff --git a/Build/source/libs/luajit/LuaJIT-src/src/vm_arm.dasc b/Build/source/libs/luajit/LuaJIT-src/src/vm_arm.dasc new file mode 100644 index 00000000000..0bd9b147ad3 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/vm_arm.dasc @@ -0,0 +1,4585 @@ +|// Low-level VM code for ARM CPUs. +|// Bytecode interpreter, fast functions and helper functions. +|// Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +| +|.arch arm +|.section code_op, code_sub +| +|.actionlist build_actionlist +|.globals GLOB_ +|.globalnames globnames +|.externnames extnames +| +|// Note: The ragged indentation of the instructions is intentional. +|// The starting columns indicate data dependencies. +| +|//----------------------------------------------------------------------- +| +|// Fixed register assignments for the interpreter. +| +|// The following must be C callee-save. +|.define MASKR8, r4 // 255*8 constant for fast bytecode decoding. +|.define KBASE, r5 // Constants of current Lua function. +|.define PC, r6 // Next PC. +|.define DISPATCH, r7 // Opcode dispatch table. +|.define LREG, r8 // Register holding lua_State (also in SAVE_L). +| +|// C callee-save in EABI, but often refetched. Temporary in iOS 3.0+. +|.define BASE, r9 // Base of current Lua stack frame. +| +|// The following temporaries are not saved across C calls, except for RA/RC. +|.define RA, r10 // Callee-save. +|.define RC, r11 // Callee-save. +|.define RB, r12 +|.define OP, r12 // Overlaps RB, must not be lr. +|.define INS, lr +| +|// Calling conventions. Also used as temporaries. +|.define CARG1, r0 +|.define CARG2, r1 +|.define CARG3, r2 +|.define CARG4, r3 +|.define CARG12, r0 // For 1st soft-fp double. +|.define CARG34, r2 // For 2nd soft-fp double. +| +|.define CRET1, r0 +|.define CRET2, r1 +| +|// Stack layout while in interpreter. Must match with lj_frame.h. +|.define SAVE_R4, [sp, #28] +|.define CFRAME_SPACE, #28 +|.define SAVE_ERRF, [sp, #24] +|.define SAVE_NRES, [sp, #20] +|.define SAVE_CFRAME, [sp, #16] +|.define SAVE_L, [sp, #12] +|.define SAVE_PC, [sp, #8] +|.define SAVE_MULTRES, [sp, #4] +|.define ARG5, [sp] +| +|.define TMPDhi, [sp, #4] +|.define TMPDlo, [sp] +|.define TMPD, [sp] +|.define TMPDp, sp +| +|.if FPU +|.macro saveregs +| push {r5, r6, r7, r8, r9, r10, r11, lr} +| vpush {d8-d15} +| sub sp, sp, CFRAME_SPACE+4 +| str r4, SAVE_R4 +|.endmacro +|.macro restoreregs_ret +| ldr r4, SAVE_R4 +| add sp, sp, CFRAME_SPACE+4 +| vpop {d8-d15} +| pop {r5, r6, r7, r8, r9, r10, r11, pc} +|.endmacro +|.else +|.macro saveregs +| push {r4, r5, r6, r7, r8, r9, r10, r11, lr} +| sub sp, sp, CFRAME_SPACE +|.endmacro +|.macro restoreregs_ret +| add sp, sp, CFRAME_SPACE +| pop {r4, r5, r6, r7, r8, r9, r10, r11, pc} +|.endmacro +|.endif +| +|// Type definitions. Some of these are only used for documentation. +|.type L, lua_State, LREG +|.type GL, global_State +|.type TVALUE, TValue +|.type GCOBJ, GCobj +|.type STR, GCstr +|.type TAB, GCtab +|.type LFUNC, GCfuncL +|.type CFUNC, GCfuncC +|.type PROTO, GCproto +|.type UPVAL, GCupval +|.type NODE, Node +|.type NARGS8, int +|.type TRACE, GCtrace +|.type SBUF, SBuf +| +|//----------------------------------------------------------------------- +| +|// Trap for not-yet-implemented parts. +|.macro NYI; ud; .endmacro +| +|//----------------------------------------------------------------------- +| +|// Access to frame relative to BASE. +|.define FRAME_FUNC, #-8 +|.define FRAME_PC, #-4 +| +|.macro decode_RA8, dst, ins; and dst, MASKR8, ins, lsr #5; .endmacro +|.macro decode_RB8, dst, ins; and dst, MASKR8, ins, lsr #21; .endmacro +|.macro decode_RC8, dst, ins; and dst, MASKR8, ins, lsr #13; .endmacro +|.macro decode_RD, dst, ins; lsr dst, ins, #16; .endmacro +|.macro decode_OP, dst, ins; and dst, ins, #255; .endmacro +| +|// Instruction fetch. +|.macro ins_NEXT1 +| ldrb OP, [PC] +|.endmacro +|.macro ins_NEXT2 +| ldr INS, [PC], #4 +|.endmacro +|// Instruction decode+dispatch. +|.macro ins_NEXT3 +| ldr OP, [DISPATCH, OP, lsl #2] +| decode_RA8 RA, INS +| decode_RD RC, INS +| bx OP +|.endmacro +|.macro ins_NEXT +| ins_NEXT1 +| ins_NEXT2 +| ins_NEXT3 +|.endmacro +| +|// Instruction footer. +|.if 1 +| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use. +| .define ins_next, ins_NEXT +| .define ins_next_, ins_NEXT +| .define ins_next1, ins_NEXT1 +| .define ins_next2, ins_NEXT2 +| .define ins_next3, ins_NEXT3 +|.else +| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch. +| // Affects only certain kinds of benchmarks (and only with -j off). +| .macro ins_next +| b ->ins_next +| .endmacro +| .macro ins_next1 +| .endmacro +| .macro ins_next2 +| .endmacro +| .macro ins_next3 +| b ->ins_next +| .endmacro +| .macro ins_next_ +| ->ins_next: +| ins_NEXT +| .endmacro +|.endif +| +|// Avoid register name substitution for field name. +#define field_pc pc +| +|// Call decode and dispatch. +|.macro ins_callt +| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC +| ldr PC, LFUNC:CARG3->field_pc +| ldrb OP, [PC] // STALL: load PC. early PC. +| ldr INS, [PC], #4 +| ldr OP, [DISPATCH, OP, lsl #2] // STALL: load OP. early OP. +| decode_RA8 RA, INS +| add RA, RA, BASE +| bx OP +|.endmacro +| +|.macro ins_call +| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, PC = caller PC +| str PC, [BASE, FRAME_PC] +| ins_callt // STALL: locked PC. +|.endmacro +| +|//----------------------------------------------------------------------- +| +|// Macros to test operand types. +|.macro checktp, reg, tp; cmn reg, #-tp; .endmacro +|.macro checktpeq, reg, tp; cmneq reg, #-tp; .endmacro +|.macro checktpne, reg, tp; cmnne reg, #-tp; .endmacro +|.macro checkstr, reg, target; checktp reg, LJ_TSTR; bne target; .endmacro +|.macro checktab, reg, target; checktp reg, LJ_TTAB; bne target; .endmacro +|.macro checkfunc, reg, target; checktp reg, LJ_TFUNC; bne target; .endmacro +| +|// Assumes DISPATCH is relative to GL. +#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field)) +#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field)) +| +#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto)) +| +|.macro hotcheck, delta +| lsr CARG1, PC, #1 +| and CARG1, CARG1, #126 +| sub CARG1, CARG1, #-GG_DISP2HOT +| ldrh CARG2, [DISPATCH, CARG1] +| subs CARG2, CARG2, #delta +| strh CARG2, [DISPATCH, CARG1] +|.endmacro +| +|.macro hotloop +| hotcheck HOTCOUNT_LOOP +| blo ->vm_hotloop +|.endmacro +| +|.macro hotcall +| hotcheck HOTCOUNT_CALL +| blo ->vm_hotcall +|.endmacro +| +|// Set current VM state. +|.macro mv_vmstate, reg, st; mvn reg, #LJ_VMST_..st; .endmacro +|.macro st_vmstate, reg; str reg, [DISPATCH, #DISPATCH_GL(vmstate)]; .endmacro +| +|// Move table write barrier back. Overwrites mark and tmp. +|.macro barrierback, tab, mark, tmp +| ldr tmp, [DISPATCH, #DISPATCH_GL(gc.grayagain)] +| bic mark, mark, #LJ_GC_BLACK // black2gray(tab) +| str tab, [DISPATCH, #DISPATCH_GL(gc.grayagain)] +| strb mark, tab->marked +| str tmp, tab->gclist +|.endmacro +| +|.macro .IOS, a, b +|.if IOS +| a, b +|.endif +|.endmacro +| +|//----------------------------------------------------------------------- + +#if !LJ_DUALNUM +#error "Only dual-number mode supported for ARM target" +#endif + +/* Generate subroutines used by opcodes and other parts of the VM. */ +/* The .code_sub section should be last to help static branch prediction. */ +static void build_subroutines(BuildCtx *ctx) +{ + |.code_sub + | + |//----------------------------------------------------------------------- + |//-- Return handling ---------------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_returnp: + | // See vm_return. Also: RB = previous base. + | tst PC, #FRAME_P + | beq ->cont_dispatch + | + | // Return from pcall or xpcall fast func. + | ldr PC, [RB, FRAME_PC] // Fetch PC of previous frame. + | mvn CARG2, #~LJ_TTRUE + | mov BASE, RB + | // Prepending may overwrite the pcall frame, so do it at the end. + | str CARG2, [RA, FRAME_PC] // Prepend true to results. + | sub RA, RA, #8 + | + |->vm_returnc: + | adds RC, RC, #8 // RC = (nresults+1)*8. + | mov CRET1, #LUA_YIELD + | beq ->vm_unwind_c_eh + | str RC, SAVE_MULTRES + | ands CARG1, PC, #FRAME_TYPE + | beq ->BC_RET_Z // Handle regular return to Lua. + | + |->vm_return: + | // BASE = base, RA = resultptr, RC/MULTRES = (nresults+1)*8, PC = return + | // CARG1 = PC & FRAME_TYPE + | bic RB, PC, #FRAME_TYPEP + | cmp CARG1, #FRAME_C + | sub RB, BASE, RB // RB = previous base. + | bne ->vm_returnp + | + | str RB, L->base + | ldr KBASE, SAVE_NRES + | mv_vmstate CARG4, C + | sub BASE, BASE, #8 + | subs CARG3, RC, #8 + | lsl KBASE, KBASE, #3 // KBASE = (nresults_wanted+1)*8 + | st_vmstate CARG4 + | beq >2 + |1: + | subs CARG3, CARG3, #8 + | ldrd CARG12, [RA], #8 + | strd CARG12, [BASE], #8 + | bne <1 + |2: + | cmp KBASE, RC // More/less results wanted? + | bne >6 + |3: + | str BASE, L->top // Store new top. + | + |->vm_leave_cp: + | ldr RC, SAVE_CFRAME // Restore previous C frame. + | mov CRET1, #0 // Ok return status for vm_pcall. + | str RC, L->cframe + | + |->vm_leave_unw: + | restoreregs_ret + | + |6: + | blt >7 // Less results wanted? + | // More results wanted. Check stack size and fill up results with nil. + | ldr CARG3, L->maxstack + | mvn CARG2, #~LJ_TNIL + | cmp BASE, CARG3 + | bhs >8 + | str CARG2, [BASE, #4] + | add RC, RC, #8 + | add BASE, BASE, #8 + | b <2 + | + |7: // Less results wanted. + | sub CARG1, RC, KBASE + | cmp KBASE, #0 // LUA_MULTRET+1 case? + | subne BASE, BASE, CARG1 // Either keep top or shrink it. + | b <3 + | + |8: // Corner case: need to grow stack for filling up results. + | // This can happen if: + | // - A C function grows the stack (a lot). + | // - The GC shrinks the stack in between. + | // - A return back from a lua_call() with (high) nresults adjustment. + | str BASE, L->top // Save current top held in BASE (yes). + | lsr CARG2, KBASE, #3 + | mov CARG1, L + | bl extern lj_state_growstack // (lua_State *L, int n) + | ldr BASE, L->top // Need the (realloced) L->top in BASE. + | b <2 + | + |->vm_unwind_c: // Unwind C stack, return from vm_pcall. + | // (void *cframe, int errcode) + | mov sp, CARG1 + | mov CRET1, CARG2 + |->vm_unwind_c_eh: // Landing pad for external unwinder. + | ldr L, SAVE_L + | mv_vmstate CARG4, C + | ldr GL:CARG3, L->glref + | str CARG4, GL:CARG3->vmstate + | b ->vm_leave_unw + | + |->vm_unwind_ff: // Unwind C stack, return from ff pcall. + | // (void *cframe) + | bic CARG1, CARG1, #~CFRAME_RAWMASK // Use two steps: bic sp is deprecated. + | mov sp, CARG1 + |->vm_unwind_ff_eh: // Landing pad for external unwinder. + | ldr L, SAVE_L + | mov MASKR8, #255 + | mov RC, #16 // 2 results: false + error message. + | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8. + | ldr BASE, L->base + | ldr DISPATCH, L->glref // Setup pointer to dispatch table. + | mvn CARG1, #~LJ_TFALSE + | sub RA, BASE, #8 // Results start at BASE-8. + | ldr PC, [BASE, FRAME_PC] // Fetch PC of previous frame. + | add DISPATCH, DISPATCH, #GG_G2DISP + | mv_vmstate CARG2, INTERP + | str CARG1, [BASE, #-4] // Prepend false to error message. + | st_vmstate CARG2 + | b ->vm_returnc + | + |//----------------------------------------------------------------------- + |//-- Grow stack for calls ----------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_growstack_c: // Grow stack for C function. + | // CARG1 = L + | mov CARG2, #LUA_MINSTACK + | b >2 + | + |->vm_growstack_l: // Grow stack for Lua function. + | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC + | add RC, BASE, RC + | sub RA, RA, BASE + | mov CARG1, L + | str BASE, L->base + | add PC, PC, #4 // Must point after first instruction. + | str RC, L->top + | lsr CARG2, RA, #3 + |2: + | // L->base = new base, L->top = top + | str PC, SAVE_PC + | bl extern lj_state_growstack // (lua_State *L, int n) + | ldr BASE, L->base + | ldr RC, L->top + | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] + | sub NARGS8:RC, RC, BASE + | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC + | ins_callt // Just retry the call. + | + |//----------------------------------------------------------------------- + |//-- Entry points into the assembler VM --------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_resume: // Setup C frame and resume thread. + | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0) + | saveregs + | mov L, CARG1 + | ldr DISPATCH, L:CARG1->glref // Setup pointer to dispatch table. + | mov BASE, CARG2 + | add DISPATCH, DISPATCH, #GG_G2DISP + | str L, SAVE_L + | mov PC, #FRAME_CP + | str CARG3, SAVE_NRES + | add CARG2, sp, #CFRAME_RESUME + | ldrb CARG1, L->status + | str CARG3, SAVE_ERRF + | str L, SAVE_PC // Any value outside of bytecode is ok. + | str CARG3, SAVE_CFRAME + | cmp CARG1, #0 + | str CARG2, L->cframe + | beq >3 + | + | // Resume after yield (like a return). + | str L, [DISPATCH, #DISPATCH_GL(cur_L)] + | mov RA, BASE + | ldr BASE, L->base + | ldr CARG1, L->top + | mov MASKR8, #255 + | strb CARG3, L->status + | sub RC, CARG1, BASE + | ldr PC, [BASE, FRAME_PC] + | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8. + | mv_vmstate CARG2, INTERP + | add RC, RC, #8 + | ands CARG1, PC, #FRAME_TYPE + | st_vmstate CARG2 + | str RC, SAVE_MULTRES + | beq ->BC_RET_Z + | b ->vm_return + | + |->vm_pcall: // Setup protected C frame and enter VM. + | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef) + | saveregs + | mov PC, #FRAME_CP + | str CARG4, SAVE_ERRF + | b >1 + | + |->vm_call: // Setup C frame and enter VM. + | // (lua_State *L, TValue *base, int nres1) + | saveregs + | mov PC, #FRAME_C + | + |1: // Entry point for vm_pcall above (PC = ftype). + | ldr RC, L:CARG1->cframe + | str CARG3, SAVE_NRES + | mov L, CARG1 + | str CARG1, SAVE_L + | ldr DISPATCH, L->glref // Setup pointer to dispatch table. + | mov BASE, CARG2 + | str CARG1, SAVE_PC // Any value outside of bytecode is ok. + | str RC, SAVE_CFRAME + | add DISPATCH, DISPATCH, #GG_G2DISP + | str sp, L->cframe // Add our C frame to cframe chain. + | + |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype). + | str L, [DISPATCH, #DISPATCH_GL(cur_L)] + | ldr RB, L->base // RB = old base (for vmeta_call). + | ldr CARG1, L->top + | mov MASKR8, #255 + | add PC, PC, BASE + | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8. + | sub PC, PC, RB // PC = frame delta + frame type + | mv_vmstate CARG2, INTERP + | sub NARGS8:RC, CARG1, BASE + | st_vmstate CARG2 + | + |->vm_call_dispatch: + | // RB = old base, BASE = new base, RC = nargs*8, PC = caller PC + | ldrd CARG34, [BASE, FRAME_FUNC] + | checkfunc CARG4, ->vmeta_call + | + |->vm_call_dispatch_f: + | ins_call + | // BASE = new base, CARG3 = func, RC = nargs*8, PC = caller PC + | + |->vm_cpcall: // Setup protected C frame, call C. + | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp) + | saveregs + | mov L, CARG1 + | ldr RA, L:CARG1->stack + | str CARG1, SAVE_L + | ldr DISPATCH, L->glref // Setup pointer to dispatch table. + | ldr RB, L->top + | str CARG1, SAVE_PC // Any value outside of bytecode is ok. + | ldr RC, L->cframe + | add DISPATCH, DISPATCH, #GG_G2DISP + | sub RA, RA, RB // Compute -savestack(L, L->top). + | mov RB, #0 + | str RA, SAVE_NRES // Neg. delta means cframe w/o frame. + | str RB, SAVE_ERRF // No error function. + | str RC, SAVE_CFRAME + | str sp, L->cframe // Add our C frame to cframe chain. + | str L, [DISPATCH, #DISPATCH_GL(cur_L)] + | blx CARG4 // (lua_State *L, lua_CFunction func, void *ud) + | movs BASE, CRET1 + | mov PC, #FRAME_CP + | bne <3 // Else continue with the call. + | b ->vm_leave_cp // No base? Just remove C frame. + | + |//----------------------------------------------------------------------- + |//-- Metamethod handling ------------------------------------------------ + |//----------------------------------------------------------------------- + | + |//-- Continuation dispatch ---------------------------------------------- + | + |->cont_dispatch: + | // BASE = meta base, RA = resultptr, RC = (nresults+1)*8 + | ldr LFUNC:CARG3, [RB, FRAME_FUNC] + | ldr CARG1, [BASE, #-16] // Get continuation. + | mov CARG4, BASE + | mov BASE, RB // Restore caller BASE. + |.if FFI + | cmp CARG1, #1 + |.endif + | ldr PC, [CARG4, #-12] // Restore PC from [cont|PC]. + | ldr CARG3, LFUNC:CARG3->field_pc + | mvn INS, #~LJ_TNIL + | add CARG2, RA, RC + | str INS, [CARG2, #-4] // Ensure one valid arg. + |.if FFI + | bls >1 + |.endif + | ldr KBASE, [CARG3, #PC2PROTO(k)] + | // BASE = base, RA = resultptr, CARG4 = meta base + | bx CARG1 + | + |.if FFI + |1: + | beq ->cont_ffi_callback // cont = 1: return from FFI callback. + | // cont = 0: tailcall from C function. + | sub CARG4, CARG4, #16 + | sub RC, CARG4, BASE + | b ->vm_call_tail + |.endif + | + |->cont_cat: // RA = resultptr, CARG4 = meta base + | ldr INS, [PC, #-4] + | sub CARG2, CARG4, #16 + | ldrd CARG34, [RA] + | str BASE, L->base + | decode_RB8 RC, INS + | decode_RA8 RA, INS + | add CARG1, BASE, RC + | subs CARG1, CARG2, CARG1 + | strdne CARG34, [CARG2] + | movne CARG3, CARG1 + | bne ->BC_CAT_Z + | strd CARG34, [BASE, RA] + | b ->cont_nop + | + |//-- Table indexing metamethods ----------------------------------------- + | + |->vmeta_tgets1: + | add CARG2, BASE, RB + | b >2 + | + |->vmeta_tgets: + | sub CARG2, DISPATCH, #-DISPATCH_GL(tmptv) + | mvn CARG4, #~LJ_TTAB + | str TAB:RB, [CARG2] + | str CARG4, [CARG2, #4] + |2: + | mvn CARG4, #~LJ_TSTR + | str STR:RC, TMPDlo + | str CARG4, TMPDhi + | mov CARG3, TMPDp + | b >1 + | + |->vmeta_tgetb: // RC = index + | decode_RB8 RB, INS + | str RC, TMPDlo + | mvn CARG4, #~LJ_TISNUM + | add CARG2, BASE, RB + | str CARG4, TMPDhi + | mov CARG3, TMPDp + | b >1 + | + |->vmeta_tgetv: + | add CARG2, BASE, RB + | add CARG3, BASE, RC + |1: + | str BASE, L->base + | mov CARG1, L + | str PC, SAVE_PC + | bl extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k) + | // Returns TValue * (finished) or NULL (metamethod). + | .IOS ldr BASE, L->base + | cmp CRET1, #0 + | beq >3 + | ldrd CARG34, [CRET1] + | ins_next1 + | ins_next2 + | strd CARG34, [BASE, RA] + | ins_next3 + | + |3: // Call __index metamethod. + | // BASE = base, L->top = new base, stack = cont/func/t/k + | rsb CARG1, BASE, #FRAME_CONT + | ldr BASE, L->top + | mov NARGS8:RC, #16 // 2 args for func(t, k). + | str PC, [BASE, #-12] // [cont|PC] + | add PC, CARG1, BASE + | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here. + | b ->vm_call_dispatch_f + | + |->vmeta_tgetr: + | .IOS mov RC, BASE + | bl extern lj_tab_getinth // (GCtab *t, int32_t key) + | // Returns cTValue * or NULL. + | .IOS mov BASE, RC + | cmp CRET1, #0 + | ldrdne CARG12, [CRET1] + | mvneq CARG2, #~LJ_TNIL + | b ->BC_TGETR_Z + | + |//----------------------------------------------------------------------- + | + |->vmeta_tsets1: + | add CARG2, BASE, RB + | b >2 + | + |->vmeta_tsets: + | sub CARG2, DISPATCH, #-DISPATCH_GL(tmptv) + | mvn CARG4, #~LJ_TTAB + | str TAB:RB, [CARG2] + | str CARG4, [CARG2, #4] + |2: + | mvn CARG4, #~LJ_TSTR + | str STR:RC, TMPDlo + | str CARG4, TMPDhi + | mov CARG3, TMPDp + | b >1 + | + |->vmeta_tsetb: // RC = index + | decode_RB8 RB, INS + | str RC, TMPDlo + | mvn CARG4, #~LJ_TISNUM + | add CARG2, BASE, RB + | str CARG4, TMPDhi + | mov CARG3, TMPDp + | b >1 + | + |->vmeta_tsetv: + | add CARG2, BASE, RB + | add CARG3, BASE, RC + |1: + | str BASE, L->base + | mov CARG1, L + | str PC, SAVE_PC + | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k) + | // Returns TValue * (finished) or NULL (metamethod). + | .IOS ldr BASE, L->base + | cmp CRET1, #0 + | ldrd CARG34, [BASE, RA] + | beq >3 + | ins_next1 + | // NOBARRIER: lj_meta_tset ensures the table is not black. + | strd CARG34, [CRET1] + | ins_next2 + | ins_next3 + | + |3: // Call __newindex metamethod. + | // BASE = base, L->top = new base, stack = cont/func/t/k/(v) + | rsb CARG1, BASE, #FRAME_CONT + | ldr BASE, L->top + | mov NARGS8:RC, #24 // 3 args for func(t, k, v). + | strd CARG34, [BASE, #16] // Copy value to third argument. + | str PC, [BASE, #-12] // [cont|PC] + | add PC, CARG1, BASE + | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here. + | b ->vm_call_dispatch_f + | + |->vmeta_tsetr: + | str BASE, L->base + | .IOS mov RC, BASE + | str PC, SAVE_PC + | bl extern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key) + | // Returns TValue *. + | .IOS mov BASE, RC + | b ->BC_TSETR_Z + | + |//-- Comparison metamethods --------------------------------------------- + | + |->vmeta_comp: + | mov CARG1, L + | sub PC, PC, #4 + | mov CARG2, RA + | str BASE, L->base + | mov CARG3, RC + | str PC, SAVE_PC + | decode_OP CARG4, INS + | bl extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op) + | // Returns 0/1 or TValue * (metamethod). + |3: + | .IOS ldr BASE, L->base + | cmp CRET1, #1 + | bhi ->vmeta_binop + |4: + | ldrh RB, [PC, #2] + | add PC, PC, #4 + | add RB, PC, RB, lsl #2 + | subhs PC, RB, #0x20000 + |->cont_nop: + | ins_next + | + |->cont_ra: // RA = resultptr + | ldr INS, [PC, #-4] + | ldrd CARG12, [RA] + | decode_RA8 CARG3, INS + | strd CARG12, [BASE, CARG3] + | b ->cont_nop + | + |->cont_condt: // RA = resultptr + | ldr CARG2, [RA, #4] + | mvn CARG1, #~LJ_TTRUE + | cmp CARG1, CARG2 // Branch if result is true. + | b <4 + | + |->cont_condf: // RA = resultptr + | ldr CARG2, [RA, #4] + | checktp CARG2, LJ_TFALSE // Branch if result is false. + | b <4 + | + |->vmeta_equal: + | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV. + | sub PC, PC, #4 + | str BASE, L->base + | mov CARG1, L + | str PC, SAVE_PC + | bl extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne) + | // Returns 0/1 or TValue * (metamethod). + | b <3 + | + |->vmeta_equal_cd: + |.if FFI + | sub PC, PC, #4 + | str BASE, L->base + | mov CARG1, L + | mov CARG2, INS + | str PC, SAVE_PC + | bl extern lj_meta_equal_cd // (lua_State *L, BCIns op) + | // Returns 0/1 or TValue * (metamethod). + | b <3 + |.endif + | + |->vmeta_istype: + | sub PC, PC, #4 + | str BASE, L->base + | mov CARG1, L + | lsr CARG2, RA, #3 + | mov CARG3, RC + | str PC, SAVE_PC + | bl extern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp) + | .IOS ldr BASE, L->base + | b ->cont_nop + | + |//-- Arithmetic metamethods --------------------------------------------- + | + |->vmeta_arith_vn: + | decode_RB8 RB, INS + | decode_RC8 RC, INS + | add CARG3, BASE, RB + | add CARG4, KBASE, RC + | b >1 + | + |->vmeta_arith_nv: + | decode_RB8 RB, INS + | decode_RC8 RC, INS + | add CARG4, BASE, RB + | add CARG3, KBASE, RC + | b >1 + | + |->vmeta_unm: + | ldr INS, [PC, #-8] + | sub PC, PC, #4 + | add CARG3, BASE, RC + | add CARG4, BASE, RC + | b >1 + | + |->vmeta_arith_vv: + | decode_RB8 RB, INS + | decode_RC8 RC, INS + | add CARG3, BASE, RB + | add CARG4, BASE, RC + |1: + | decode_OP OP, INS + | add CARG2, BASE, RA + | str BASE, L->base + | mov CARG1, L + | str PC, SAVE_PC + | str OP, ARG5 + | bl extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op) + | // Returns NULL (finished) or TValue * (metamethod). + | .IOS ldr BASE, L->base + | cmp CRET1, #0 + | beq ->cont_nop + | + | // Call metamethod for binary op. + |->vmeta_binop: + | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2 + | sub CARG2, CRET1, BASE + | str PC, [CRET1, #-12] // [cont|PC] + | add PC, CARG2, #FRAME_CONT + | mov BASE, CRET1 + | mov NARGS8:RC, #16 // 2 args for func(o1, o2). + | b ->vm_call_dispatch + | + |->vmeta_len: + | add CARG2, BASE, RC + | str BASE, L->base + | mov CARG1, L + | str PC, SAVE_PC + | bl extern lj_meta_len // (lua_State *L, TValue *o) + | // Returns NULL (retry) or TValue * (metamethod base). + | .IOS ldr BASE, L->base +#if LJ_52 + | cmp CRET1, #0 + | bne ->vmeta_binop // Binop call for compatibility. + | ldr TAB:CARG1, [BASE, RC] + | b ->BC_LEN_Z +#else + | b ->vmeta_binop // Binop call for compatibility. +#endif + | + |//-- Call metamethod ---------------------------------------------------- + | + |->vmeta_call: // Resolve and call __call metamethod. + | // RB = old base, BASE = new base, RC = nargs*8 + | mov CARG1, L + | str RB, L->base // This is the callers base! + | sub CARG2, BASE, #8 + | str PC, SAVE_PC + | add CARG3, BASE, NARGS8:RC + | .IOS mov RA, BASE + | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top) + | .IOS mov BASE, RA + | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here. + | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now. + | ins_call + | + |->vmeta_callt: // Resolve __call for BC_CALLT. + | // BASE = old base, RA = new base, RC = nargs*8 + | mov CARG1, L + | str BASE, L->base + | sub CARG2, RA, #8 + | str PC, SAVE_PC + | add CARG3, RA, NARGS8:RC + | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top) + | .IOS ldr BASE, L->base + | ldr LFUNC:CARG3, [RA, FRAME_FUNC] // Guaranteed to be a function here. + | ldr PC, [BASE, FRAME_PC] + | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now. + | b ->BC_CALLT2_Z + | + |//-- Argument coercion for 'for' statement ------------------------------ + | + |->vmeta_for: + | mov CARG1, L + | str BASE, L->base + | mov CARG2, RA + | str PC, SAVE_PC + | bl extern lj_meta_for // (lua_State *L, TValue *base) + | .IOS ldr BASE, L->base + |.if JIT + | ldrb OP, [PC, #-4] + |.endif + | ldr INS, [PC, #-4] + |.if JIT + | cmp OP, #BC_JFORI + |.endif + | decode_RA8 RA, INS + | decode_RD RC, INS + |.if JIT + | beq =>BC_JFORI + |.endif + | b =>BC_FORI + | + |//----------------------------------------------------------------------- + |//-- Fast functions ----------------------------------------------------- + |//----------------------------------------------------------------------- + | + |.macro .ffunc, name + |->ff_ .. name: + |.endmacro + | + |.macro .ffunc_1, name + |->ff_ .. name: + | ldrd CARG12, [BASE] + | cmp NARGS8:RC, #8 + | blo ->fff_fallback + |.endmacro + | + |.macro .ffunc_2, name + |->ff_ .. name: + | ldrd CARG12, [BASE] + | ldrd CARG34, [BASE, #8] + | cmp NARGS8:RC, #16 + | blo ->fff_fallback + |.endmacro + | + |.macro .ffunc_n, name + | .ffunc_1 name + | checktp CARG2, LJ_TISNUM + | bhs ->fff_fallback + |.endmacro + | + |.macro .ffunc_nn, name + | .ffunc_2 name + | checktp CARG2, LJ_TISNUM + | cmnlo CARG4, #-LJ_TISNUM + | bhs ->fff_fallback + |.endmacro + | + |.macro .ffunc_d, name + | .ffunc name + | ldr CARG2, [BASE, #4] + | cmp NARGS8:RC, #8 + | vldr d0, [BASE] + | blo ->fff_fallback + | checktp CARG2, LJ_TISNUM + | bhs ->fff_fallback + |.endmacro + | + |.macro .ffunc_dd, name + | .ffunc name + | ldr CARG2, [BASE, #4] + | ldr CARG4, [BASE, #12] + | cmp NARGS8:RC, #16 + | vldr d0, [BASE] + | vldr d1, [BASE, #8] + | blo ->fff_fallback + | checktp CARG2, LJ_TISNUM + | cmnlo CARG4, #-LJ_TISNUM + | bhs ->fff_fallback + |.endmacro + | + |// Inlined GC threshold check. Caveat: uses CARG1 and CARG2. + |.macro ffgccheck + | ldr CARG1, [DISPATCH, #DISPATCH_GL(gc.total)] + | ldr CARG2, [DISPATCH, #DISPATCH_GL(gc.threshold)] + | cmp CARG1, CARG2 + | blge ->fff_gcstep + |.endmacro + | + |//-- Base library: checks ----------------------------------------------- + | + |.ffunc_1 assert + | checktp CARG2, LJ_TTRUE + | bhi ->fff_fallback + | ldr PC, [BASE, FRAME_PC] + | strd CARG12, [BASE, #-8] + | mov RB, BASE + | subs RA, NARGS8:RC, #8 + | add RC, NARGS8:RC, #8 // Compute (nresults+1)*8. + | beq ->fff_res // Done if exactly 1 argument. + |1: + | ldrd CARG12, [RB, #8] + | subs RA, RA, #8 + | strd CARG12, [RB], #8 + | bne <1 + | b ->fff_res + | + |.ffunc type + | ldr CARG2, [BASE, #4] + | cmp NARGS8:RC, #8 + | blo ->fff_fallback + | checktp CARG2, LJ_TISNUM + | mvnlo CARG2, #~LJ_TISNUM + | rsb CARG4, CARG2, #(int)(offsetof(GCfuncC, upvalue)>>3)-1 + | lsl CARG4, CARG4, #3 + | ldrd CARG12, [CFUNC:CARG3, CARG4] + | b ->fff_restv + | + |//-- Base library: getters and setters --------------------------------- + | + |.ffunc_1 getmetatable + | checktp CARG2, LJ_TTAB + | cmnne CARG2, #-LJ_TUDATA + | bne >6 + |1: // Field metatable must be at same offset for GCtab and GCudata! + | ldr TAB:RB, TAB:CARG1->metatable + |2: + | mvn CARG2, #~LJ_TNIL + | ldr STR:RC, [DISPATCH, #DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])] + | cmp TAB:RB, #0 + | beq ->fff_restv + | ldr CARG3, TAB:RB->hmask + | ldr CARG4, STR:RC->hash + | ldr NODE:INS, TAB:RB->node + | and CARG3, CARG3, CARG4 // idx = str->hash & tab->hmask + | add CARG3, CARG3, CARG3, lsl #1 + | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8 + |3: // Rearranged logic, because we expect _not_ to find the key. + | ldrd CARG34, NODE:INS->key // STALL: early NODE:INS. + | ldrd CARG12, NODE:INS->val + | ldr NODE:INS, NODE:INS->next + | checktp CARG4, LJ_TSTR + | cmpeq CARG3, STR:RC + | beq >5 + | cmp NODE:INS, #0 + | bne <3 + |4: + | mov CARG1, RB // Use metatable as default result. + | mvn CARG2, #~LJ_TTAB + | b ->fff_restv + |5: + | checktp CARG2, LJ_TNIL + | bne ->fff_restv + | b <4 + | + |6: + | checktp CARG2, LJ_TISNUM + | mvnhs CARG2, CARG2 + | movlo CARG2, #~LJ_TISNUM + | add CARG4, DISPATCH, CARG2, lsl #2 + | ldr TAB:RB, [CARG4, #DISPATCH_GL(gcroot[GCROOT_BASEMT])] + | b <2 + | + |.ffunc_2 setmetatable + | // Fast path: no mt for table yet and not clearing the mt. + | checktp CARG2, LJ_TTAB + | ldreq TAB:RB, TAB:CARG1->metatable + | checktpeq CARG4, LJ_TTAB + | ldrbeq CARG4, TAB:CARG1->marked + | cmpeq TAB:RB, #0 + | bne ->fff_fallback + | tst CARG4, #LJ_GC_BLACK // isblack(table) + | str TAB:CARG3, TAB:CARG1->metatable + | beq ->fff_restv + | barrierback TAB:CARG1, CARG4, CARG3 + | b ->fff_restv + | + |.ffunc rawget + | ldrd CARG34, [BASE] + | cmp NARGS8:RC, #16 + | blo ->fff_fallback + | mov CARG2, CARG3 + | checktab CARG4, ->fff_fallback + | mov CARG1, L + | add CARG3, BASE, #8 + | .IOS mov RA, BASE + | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key) + | // Returns cTValue *. + | .IOS mov BASE, RA + | ldrd CARG12, [CRET1] + | b ->fff_restv + | + |//-- Base library: conversions ------------------------------------------ + | + |.ffunc tonumber + | // Only handles the number case inline (without a base argument). + | ldrd CARG12, [BASE] + | cmp NARGS8:RC, #8 + | bne ->fff_fallback + | checktp CARG2, LJ_TISNUM + | bls ->fff_restv + | b ->fff_fallback + | + |.ffunc_1 tostring + | // Only handles the string or number case inline. + | checktp CARG2, LJ_TSTR + | // A __tostring method in the string base metatable is ignored. + | beq ->fff_restv + | // Handle numbers inline, unless a number base metatable is present. + | ldr CARG4, [DISPATCH, #DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])] + | str BASE, L->base + | checktp CARG2, LJ_TISNUM + | cmpls CARG4, #0 + | str PC, SAVE_PC // Redundant (but a defined value). + | bhi ->fff_fallback + | ffgccheck + | mov CARG1, L + | mov CARG2, BASE + | bl extern lj_strfmt_number // (lua_State *L, cTValue *o) + | // Returns GCstr *. + | ldr BASE, L->base + | mvn CARG2, #~LJ_TSTR + | b ->fff_restv + | + |//-- Base library: iterators ------------------------------------------- + | + |.ffunc_1 next + | mvn CARG4, #~LJ_TNIL + | checktab CARG2, ->fff_fallback + | strd CARG34, [BASE, NARGS8:RC] // Set missing 2nd arg to nil. + | ldr PC, [BASE, FRAME_PC] + | mov CARG2, CARG1 + | str BASE, L->base // Add frame since C call can throw. + | mov CARG1, L + | str BASE, L->top // Dummy frame length is ok. + | add CARG3, BASE, #8 + | str PC, SAVE_PC + | bl extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key) + | // Returns 0 at end of traversal. + | .IOS ldr BASE, L->base + | cmp CRET1, #0 + | mvneq CRET2, #~LJ_TNIL + | beq ->fff_restv // End of traversal: return nil. + | ldrd CARG12, [BASE, #8] // Copy key and value to results. + | ldrd CARG34, [BASE, #16] + | mov RC, #(2+1)*8 + | strd CARG12, [BASE, #-8] + | strd CARG34, [BASE] + | b ->fff_res + | + |.ffunc_1 pairs + | checktab CARG2, ->fff_fallback +#if LJ_52 + | ldr TAB:RB, TAB:CARG1->metatable +#endif + | ldrd CFUNC:CARG34, CFUNC:CARG3->upvalue[0] + | ldr PC, [BASE, FRAME_PC] +#if LJ_52 + | cmp TAB:RB, #0 + | bne ->fff_fallback +#endif + | mvn CARG2, #~LJ_TNIL + | mov RC, #(3+1)*8 + | strd CFUNC:CARG34, [BASE, #-8] + | str CARG2, [BASE, #12] + | b ->fff_res + | + |.ffunc_2 ipairs_aux + | checktp CARG2, LJ_TTAB + | checktpeq CARG4, LJ_TISNUM + | bne ->fff_fallback + | ldr RB, TAB:CARG1->asize + | ldr RC, TAB:CARG1->array + | add CARG3, CARG3, #1 + | ldr PC, [BASE, FRAME_PC] + | cmp CARG3, RB + | add RC, RC, CARG3, lsl #3 + | strd CARG34, [BASE, #-8] + | ldrdlo CARG12, [RC] + | mov RC, #(0+1)*8 + | bhs >2 // Not in array part? + |1: + | checktp CARG2, LJ_TNIL + | movne RC, #(2+1)*8 + | strdne CARG12, [BASE] + | b ->fff_res + |2: // Check for empty hash part first. Otherwise call C function. + | ldr RB, TAB:CARG1->hmask + | mov CARG2, CARG3 + | cmp RB, #0 + | beq ->fff_res + | .IOS mov RA, BASE + | bl extern lj_tab_getinth // (GCtab *t, int32_t key) + | // Returns cTValue * or NULL. + | .IOS mov BASE, RA + | cmp CRET1, #0 + | beq ->fff_res + | ldrd CARG12, [CRET1] + | b <1 + | + |.ffunc_1 ipairs + | checktab CARG2, ->fff_fallback +#if LJ_52 + | ldr TAB:RB, TAB:CARG1->metatable +#endif + | ldrd CFUNC:CARG34, CFUNC:CARG3->upvalue[0] + | ldr PC, [BASE, FRAME_PC] +#if LJ_52 + | cmp TAB:RB, #0 + | bne ->fff_fallback +#endif + | mov CARG1, #0 + | mvn CARG2, #~LJ_TISNUM + | mov RC, #(3+1)*8 + | strd CFUNC:CARG34, [BASE, #-8] + | strd CARG12, [BASE, #8] + | b ->fff_res + | + |//-- Base library: catch errors ---------------------------------------- + | + |.ffunc pcall + | ldrb RA, [DISPATCH, #DISPATCH_GL(hookmask)] + | cmp NARGS8:RC, #8 + | blo ->fff_fallback + | tst RA, #HOOK_ACTIVE // Remember active hook before pcall. + | mov RB, BASE + | add BASE, BASE, #8 + | moveq PC, #8+FRAME_PCALL + | movne PC, #8+FRAME_PCALLH + | sub NARGS8:RC, NARGS8:RC, #8 + | b ->vm_call_dispatch + | + |.ffunc_2 xpcall + | ldrb RA, [DISPATCH, #DISPATCH_GL(hookmask)] + | checkfunc CARG4, ->fff_fallback // Traceback must be a function. + | mov RB, BASE + | strd CARG12, [BASE, #8] // Swap function and traceback. + | strd CARG34, [BASE] + | tst RA, #HOOK_ACTIVE // Remember active hook before pcall. + | add BASE, BASE, #16 + | moveq PC, #16+FRAME_PCALL + | movne PC, #16+FRAME_PCALLH + | sub NARGS8:RC, NARGS8:RC, #16 + | b ->vm_call_dispatch + | + |//-- Coroutine library -------------------------------------------------- + | + |.macro coroutine_resume_wrap, resume + |.if resume + |.ffunc_1 coroutine_resume + | checktp CARG2, LJ_TTHREAD + | bne ->fff_fallback + |.else + |.ffunc coroutine_wrap_aux + | ldr L:CARG1, CFUNC:CARG3->upvalue[0].gcr + |.endif + | ldr PC, [BASE, FRAME_PC] + | str BASE, L->base + | ldr CARG2, L:CARG1->top + | ldrb RA, L:CARG1->status + | ldr RB, L:CARG1->base + | add CARG3, CARG2, NARGS8:RC + | add CARG4, CARG2, RA + | str PC, SAVE_PC + | cmp CARG4, RB + | beq ->fff_fallback + | ldr CARG4, L:CARG1->maxstack + | ldr RB, L:CARG1->cframe + | cmp RA, #LUA_YIELD + | cmpls CARG3, CARG4 + | cmpls RB, #0 + | bhi ->fff_fallback + |1: + |.if resume + | sub CARG3, CARG3, #8 // Keep resumed thread in stack for GC. + | add BASE, BASE, #8 + | sub NARGS8:RC, NARGS8:RC, #8 + |.endif + | str CARG3, L:CARG1->top + | str BASE, L->top + |2: // Move args to coroutine. + | ldrd CARG34, [BASE, RB] + | cmp RB, NARGS8:RC + | strdne CARG34, [CARG2, RB] + | add RB, RB, #8 + | bne <2 + | + | mov CARG3, #0 + | mov L:RA, L:CARG1 + | mov CARG4, #0 + | bl ->vm_resume // (lua_State *L, TValue *base, 0, 0) + | // Returns thread status. + |4: + | ldr CARG3, L:RA->base + | mv_vmstate CARG2, INTERP + | ldr CARG4, L:RA->top + | cmp CRET1, #LUA_YIELD + | ldr BASE, L->base + | str L, [DISPATCH, #DISPATCH_GL(cur_L)] + | st_vmstate CARG2 + | bhi >8 + | subs RC, CARG4, CARG3 + | ldr CARG1, L->maxstack + | add CARG2, BASE, RC + | beq >6 // No results? + | cmp CARG2, CARG1 + | mov RB, #0 + | bhi >9 // Need to grow stack? + | + | sub CARG4, RC, #8 + | str CARG3, L:RA->top // Clear coroutine stack. + |5: // Move results from coroutine. + | ldrd CARG12, [CARG3, RB] + | cmp RB, CARG4 + | strd CARG12, [BASE, RB] + | add RB, RB, #8 + | bne <5 + |6: + |.if resume + | mvn CARG3, #~LJ_TTRUE + | add RC, RC, #16 + |7: + | str CARG3, [BASE, #-4] // Prepend true/false to results. + | sub RA, BASE, #8 + |.else + | mov RA, BASE + | add RC, RC, #8 + |.endif + | ands CARG1, PC, #FRAME_TYPE + | str PC, SAVE_PC + | str RC, SAVE_MULTRES + | beq ->BC_RET_Z + | b ->vm_return + | + |8: // Coroutine returned with error (at co->top-1). + |.if resume + | ldrd CARG12, [CARG4, #-8]! + | mvn CARG3, #~LJ_TFALSE + | mov RC, #(2+1)*8 + | str CARG4, L:RA->top // Remove error from coroutine stack. + | strd CARG12, [BASE] // Copy error message. + | b <7 + |.else + | mov CARG1, L + | mov CARG2, L:RA + | bl extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co) + | // Never returns. + |.endif + | + |9: // Handle stack expansion on return from yield. + | mov CARG1, L + | lsr CARG2, RC, #3 + | bl extern lj_state_growstack // (lua_State *L, int n) + | mov CRET1, #0 + | b <4 + |.endmacro + | + | coroutine_resume_wrap 1 // coroutine.resume + | coroutine_resume_wrap 0 // coroutine.wrap + | + |.ffunc coroutine_yield + | ldr CARG1, L->cframe + | add CARG2, BASE, NARGS8:RC + | str BASE, L->base + | tst CARG1, #CFRAME_RESUME + | str CARG2, L->top + | mov CRET1, #LUA_YIELD + | mov CARG3, #0 + | beq ->fff_fallback + | str CARG3, L->cframe + | strb CRET1, L->status + | b ->vm_leave_unw + | + |//-- Math library ------------------------------------------------------- + | + |.macro math_round, func + | .ffunc_1 math_ .. func + | checktp CARG2, LJ_TISNUM + | beq ->fff_restv + | bhi ->fff_fallback + | // Round FP value and normalize result. + | lsl CARG3, CARG2, #1 + | adds RB, CARG3, #0x00200000 + | bpl >2 // |x| < 1? + | mvn CARG4, #0x3e0 + | subs RB, CARG4, RB, asr #21 + | lsl CARG4, CARG2, #11 + | lsl CARG3, CARG1, #11 + | orr CARG4, CARG4, #0x80000000 + | rsb INS, RB, #32 + | orr CARG4, CARG4, CARG1, lsr #21 + | bls >3 // |x| >= 2^31? + | orr CARG3, CARG3, CARG4, lsl INS + | lsr CARG1, CARG4, RB + |.if "func" == "floor" + | tst CARG3, CARG2, asr #31 + | addne CARG1, CARG1, #1 + |.else + | bics CARG3, CARG3, CARG2, asr #31 + | addsne CARG1, CARG1, #1 + | ldrdvs CARG12, >9 + | bvs ->fff_restv + |.endif + | cmp CARG2, #0 + | rsblt CARG1, CARG1, #0 + |1: + | mvn CARG2, #~LJ_TISNUM + | b ->fff_restv + | + |2: // |x| < 1 + | bcs ->fff_restv // |x| is not finite. + | orr CARG3, CARG3, CARG1 // ztest = abs(hi) | lo + |.if "func" == "floor" + | tst CARG3, CARG2, asr #31 // return (ztest & sign) == 0 ? 0 : -1 + | moveq CARG1, #0 + | mvnne CARG1, #0 + |.else + | bics CARG3, CARG3, CARG2, asr #31 // return (ztest & ~sign) == 0 ? 0 : 1 + | moveq CARG1, #0 + | movne CARG1, #1 + |.endif + | mvn CARG2, #~LJ_TISNUM + | b ->fff_restv + | + |3: // |x| >= 2^31. Check for x == -(2^31). + | cmpeq CARG4, #0x80000000 + |.if "func" == "floor" + | cmpeq CARG3, #0 + |.endif + | bne >4 + | cmp CARG2, #0 + | movmi CARG1, #0x80000000 + | bmi <1 + |4: + | bl ->vm_..func.._sf + | b ->fff_restv + |.endmacro + | + | math_round floor + | math_round ceil + | + |.align 8 + |9: + | .long 0x00000000, 0x41e00000 // 2^31. + | + |.ffunc_1 math_abs + | checktp CARG2, LJ_TISNUM + | bhi ->fff_fallback + | bicne CARG2, CARG2, #0x80000000 + | bne ->fff_restv + | cmp CARG1, #0 + | rsbslt CARG1, CARG1, #0 + | ldrdvs CARG12, <9 + | // Fallthrough. + | + |->fff_restv: + | // CARG12 = TValue result. + | ldr PC, [BASE, FRAME_PC] + | strd CARG12, [BASE, #-8] + |->fff_res1: + | // PC = return. + | mov RC, #(1+1)*8 + |->fff_res: + | // RC = (nresults+1)*8, PC = return. + | ands CARG1, PC, #FRAME_TYPE + | ldreq INS, [PC, #-4] + | str RC, SAVE_MULTRES + | sub RA, BASE, #8 + | bne ->vm_return + | decode_RB8 RB, INS + |5: + | cmp RB, RC // More results expected? + | bhi >6 + | decode_RA8 CARG1, INS + | ins_next1 + | ins_next2 + | // Adjust BASE. KBASE is assumed to be set for the calling frame. + | sub BASE, RA, CARG1 + | ins_next3 + | + |6: // Fill up results with nil. + | add CARG2, RA, RC + | mvn CARG1, #~LJ_TNIL + | add RC, RC, #8 + | str CARG1, [CARG2, #-4] + | b <5 + | + |.macro math_extern, func + |.if HFABI + | .ffunc_d math_ .. func + |.else + | .ffunc_n math_ .. func + |.endif + | .IOS mov RA, BASE + | bl extern func + | .IOS mov BASE, RA + |.if HFABI + | b ->fff_resd + |.else + | b ->fff_restv + |.endif + |.endmacro + | + |.macro math_extern2, func + |.if HFABI + | .ffunc_dd math_ .. func + |.else + | .ffunc_nn math_ .. func + |.endif + | .IOS mov RA, BASE + | bl extern func + | .IOS mov BASE, RA + |.if HFABI + | b ->fff_resd + |.else + | b ->fff_restv + |.endif + |.endmacro + | + |.if FPU + | .ffunc_d math_sqrt + | vsqrt.f64 d0, d0 + |->fff_resd: + | ldr PC, [BASE, FRAME_PC] + | vstr d0, [BASE, #-8] + | b ->fff_res1 + |.else + | math_extern sqrt + |.endif + | + |.ffunc math_log + |.if HFABI + | ldr CARG2, [BASE, #4] + | cmp NARGS8:RC, #8 // Need exactly 1 argument. + | vldr d0, [BASE] + | bne ->fff_fallback + |.else + | ldrd CARG12, [BASE] + | cmp NARGS8:RC, #8 // Need exactly 1 argument. + | bne ->fff_fallback + |.endif + | checktp CARG2, LJ_TISNUM + | bhs ->fff_fallback + | .IOS mov RA, BASE + | bl extern log + | .IOS mov BASE, RA + |.if HFABI + | b ->fff_resd + |.else + | b ->fff_restv + |.endif + | + | math_extern log10 + | math_extern exp + | math_extern sin + | math_extern cos + | math_extern tan + | math_extern asin + | math_extern acos + | math_extern atan + | math_extern sinh + | math_extern cosh + | math_extern tanh + | math_extern2 pow + | math_extern2 atan2 + | math_extern2 fmod + | + |.if HFABI + | .ffunc math_ldexp + | ldr CARG4, [BASE, #4] + | ldrd CARG12, [BASE, #8] + | cmp NARGS8:RC, #16 + | blo ->fff_fallback + | vldr d0, [BASE] + | checktp CARG4, LJ_TISNUM + | bhs ->fff_fallback + | checktp CARG2, LJ_TISNUM + | bne ->fff_fallback + | .IOS mov RA, BASE + | bl extern ldexp // (double x, int exp) + | .IOS mov BASE, RA + | b ->fff_resd + |.else + |.ffunc_2 math_ldexp + | checktp CARG2, LJ_TISNUM + | bhs ->fff_fallback + | checktp CARG4, LJ_TISNUM + | bne ->fff_fallback + | .IOS mov RA, BASE + | bl extern ldexp // (double x, int exp) + | .IOS mov BASE, RA + | b ->fff_restv + |.endif + | + |.if HFABI + |.ffunc_d math_frexp + | mov CARG1, sp + | .IOS mov RA, BASE + | bl extern frexp + | .IOS mov BASE, RA + | ldr CARG3, [sp] + | mvn CARG4, #~LJ_TISNUM + | ldr PC, [BASE, FRAME_PC] + | vstr d0, [BASE, #-8] + | mov RC, #(2+1)*8 + | strd CARG34, [BASE] + | b ->fff_res + |.else + |.ffunc_n math_frexp + | mov CARG3, sp + | .IOS mov RA, BASE + | bl extern frexp + | .IOS mov BASE, RA + | ldr CARG3, [sp] + | mvn CARG4, #~LJ_TISNUM + | ldr PC, [BASE, FRAME_PC] + | strd CARG12, [BASE, #-8] + | mov RC, #(2+1)*8 + | strd CARG34, [BASE] + | b ->fff_res + |.endif + | + |.if HFABI + |.ffunc_d math_modf + | sub CARG1, BASE, #8 + | ldr PC, [BASE, FRAME_PC] + | .IOS mov RA, BASE + | bl extern modf + | .IOS mov BASE, RA + | mov RC, #(2+1)*8 + | vstr d0, [BASE] + | b ->fff_res + |.else + |.ffunc_n math_modf + | sub CARG3, BASE, #8 + | ldr PC, [BASE, FRAME_PC] + | .IOS mov RA, BASE + | bl extern modf + | .IOS mov BASE, RA + | mov RC, #(2+1)*8 + | strd CARG12, [BASE] + | b ->fff_res + |.endif + | + |.macro math_minmax, name, cond, fcond + |.if FPU + | .ffunc_1 name + | add RB, BASE, RC + | checktp CARG2, LJ_TISNUM + | add RA, BASE, #8 + | bne >4 + |1: // Handle integers. + | ldrd CARG34, [RA] + | cmp RA, RB + | bhs ->fff_restv + | checktp CARG4, LJ_TISNUM + | bne >3 + | cmp CARG1, CARG3 + | add RA, RA, #8 + | mov..cond CARG1, CARG3 + | b <1 + |3: // Convert intermediate result to number and continue below. + | vmov s4, CARG1 + | bhi ->fff_fallback + | vldr d1, [RA] + | vcvt.f64.s32 d0, s4 + | b >6 + | + |4: + | vldr d0, [BASE] + | bhi ->fff_fallback + |5: // Handle numbers. + | ldrd CARG34, [RA] + | vldr d1, [RA] + | cmp RA, RB + | bhs ->fff_resd + | checktp CARG4, LJ_TISNUM + | bhs >7 + |6: + | vcmp.f64 d0, d1 + | vmrs + | add RA, RA, #8 + | vmov..fcond.f64 d0, d1 + | b <5 + |7: // Convert integer to number and continue above. + | vmov s4, CARG3 + | bhi ->fff_fallback + | vcvt.f64.s32 d1, s4 + | b <6 + | + |.else + | + | .ffunc_1 name + | checktp CARG2, LJ_TISNUM + | mov RA, #8 + | bne >4 + |1: // Handle integers. + | ldrd CARG34, [BASE, RA] + | cmp RA, RC + | bhs ->fff_restv + | checktp CARG4, LJ_TISNUM + | bne >3 + | cmp CARG1, CARG3 + | add RA, RA, #8 + | mov..cond CARG1, CARG3 + | b <1 + |3: // Convert intermediate result to number and continue below. + | bhi ->fff_fallback + | bl extern __aeabi_i2d + | ldrd CARG34, [BASE, RA] + | b >6 + | + |4: + | bhi ->fff_fallback + |5: // Handle numbers. + | ldrd CARG34, [BASE, RA] + | cmp RA, RC + | bhs ->fff_restv + | checktp CARG4, LJ_TISNUM + | bhs >7 + |6: + | bl extern __aeabi_cdcmple + | add RA, RA, #8 + | mov..fcond CARG1, CARG3 + | mov..fcond CARG2, CARG4 + | b <5 + |7: // Convert integer to number and continue above. + | bhi ->fff_fallback + | strd CARG12, TMPD + | mov CARG1, CARG3 + | bl extern __aeabi_i2d + | ldrd CARG34, TMPD + | b <6 + |.endif + |.endmacro + | + | math_minmax math_min, gt, hi + | math_minmax math_max, lt, lo + | + |//-- String library ----------------------------------------------------- + | + |.ffunc string_byte // Only handle the 1-arg case here. + | ldrd CARG12, [BASE] + | ldr PC, [BASE, FRAME_PC] + | cmp NARGS8:RC, #8 + | checktpeq CARG2, LJ_TSTR // Need exactly 1 argument. + | bne ->fff_fallback + | ldr CARG3, STR:CARG1->len + | ldrb CARG1, STR:CARG1[1] // Access is always ok (NUL at end). + | mvn CARG2, #~LJ_TISNUM + | cmp CARG3, #0 + | moveq RC, #(0+1)*8 + | movne RC, #(1+1)*8 + | strd CARG12, [BASE, #-8] + | b ->fff_res + | + |.ffunc string_char // Only handle the 1-arg case here. + | ffgccheck + | ldrd CARG12, [BASE] + | ldr PC, [BASE, FRAME_PC] + | cmp NARGS8:RC, #8 // Need exactly 1 argument. + | checktpeq CARG2, LJ_TISNUM + | bicseq CARG4, CARG1, #255 + | mov CARG3, #1 + | bne ->fff_fallback + | str CARG1, TMPD + | mov CARG2, TMPDp // Points to stack. Little-endian. + |->fff_newstr: + | // CARG2 = str, CARG3 = len. + | str BASE, L->base + | mov CARG1, L + | str PC, SAVE_PC + | bl extern lj_str_new // (lua_State *L, char *str, size_t l) + |->fff_resstr: + | // Returns GCstr *. + | ldr BASE, L->base + | mvn CARG2, #~LJ_TSTR + | b ->fff_restv + | + |.ffunc string_sub + | ffgccheck + | ldrd CARG12, [BASE] + | ldrd CARG34, [BASE, #16] + | cmp NARGS8:RC, #16 + | mvn RB, #0 + | beq >1 + | blo ->fff_fallback + | checktp CARG4, LJ_TISNUM + | mov RB, CARG3 + | bne ->fff_fallback + |1: + | ldrd CARG34, [BASE, #8] + | checktp CARG2, LJ_TSTR + | ldreq CARG2, STR:CARG1->len + | checktpeq CARG4, LJ_TISNUM + | bne ->fff_fallback + | // CARG1 = str, CARG2 = str->len, CARG3 = start, RB = end + | add CARG4, CARG2, #1 + | cmp CARG3, #0 // if (start < 0) start += len+1 + | addlt CARG3, CARG3, CARG4 + | cmp CARG3, #1 // if (start < 1) start = 1 + | movlt CARG3, #1 + | cmp RB, #0 // if (end < 0) end += len+1 + | addlt RB, RB, CARG4 + | bic RB, RB, RB, asr #31 // if (end < 0) end = 0 + | cmp RB, CARG2 // if (end > len) end = len + | add CARG1, STR:CARG1, #sizeof(GCstr)-1 + | movgt RB, CARG2 + | add CARG2, CARG1, CARG3 + | subs CARG3, RB, CARG3 // len = end - start + | add CARG3, CARG3, #1 // len += 1 + | bge ->fff_newstr + |->fff_emptystr: + | sub STR:CARG1, DISPATCH, #-DISPATCH_GL(strempty) + | mvn CARG2, #~LJ_TSTR + | b ->fff_restv + | + |.macro ffstring_op, name + | .ffunc string_ .. name + | ffgccheck + | ldr CARG3, [BASE, #4] + | cmp NARGS8:RC, #8 + | ldr STR:CARG2, [BASE] + | blo ->fff_fallback + | sub SBUF:CARG1, DISPATCH, #-DISPATCH_GL(tmpbuf) + | checkstr CARG3, ->fff_fallback + | ldr CARG4, SBUF:CARG1->b + | str BASE, L->base + | str PC, SAVE_PC + | str L, SBUF:CARG1->L + | str CARG4, SBUF:CARG1->p + | bl extern lj_buf_putstr_ .. name + | bl extern lj_buf_tostr + | b ->fff_resstr + |.endmacro + | + |ffstring_op reverse + |ffstring_op lower + |ffstring_op upper + | + |//-- Bit library -------------------------------------------------------- + | + |// FP number to bit conversion for soft-float. Clobbers r0-r3. + |->vm_tobit_fb: + | bhi ->fff_fallback + |->vm_tobit: + | lsl RB, CARG2, #1 + | adds RB, RB, #0x00200000 + | movpl CARG1, #0 // |x| < 1? + | bxpl lr + | mvn CARG4, #0x3e0 + | subs RB, CARG4, RB, asr #21 + | bmi >1 // |x| >= 2^32? + | lsl CARG4, CARG2, #11 + | orr CARG4, CARG4, #0x80000000 + | orr CARG4, CARG4, CARG1, lsr #21 + | cmp CARG2, #0 + | lsr CARG1, CARG4, RB + | rsblt CARG1, CARG1, #0 + | bx lr + |1: + | add RB, RB, #21 + | lsr CARG4, CARG1, RB + | rsb RB, RB, #20 + | lsl CARG1, CARG2, #12 + | cmp CARG2, #0 + | orr CARG1, CARG4, CARG1, lsl RB + | rsblt CARG1, CARG1, #0 + | bx lr + | + |.macro .ffunc_bit, name + | .ffunc_1 bit_..name + | checktp CARG2, LJ_TISNUM + | blne ->vm_tobit_fb + |.endmacro + | + |.ffunc_bit tobit + | mvn CARG2, #~LJ_TISNUM + | b ->fff_restv + | + |.macro .ffunc_bit_op, name, ins + | .ffunc_bit name + | mov CARG3, CARG1 + | mov RA, #8 + |1: + | ldrd CARG12, [BASE, RA] + | cmp RA, NARGS8:RC + | add RA, RA, #8 + | bge >2 + | checktp CARG2, LJ_TISNUM + | blne ->vm_tobit_fb + | ins CARG3, CARG3, CARG1 + | b <1 + |.endmacro + | + |.ffunc_bit_op band, and + |.ffunc_bit_op bor, orr + |.ffunc_bit_op bxor, eor + | + |2: + | mvn CARG4, #~LJ_TISNUM + | ldr PC, [BASE, FRAME_PC] + | strd CARG34, [BASE, #-8] + | b ->fff_res1 + | + |.ffunc_bit bswap + | eor CARG3, CARG1, CARG1, ror #16 + | bic CARG3, CARG3, #0x00ff0000 + | ror CARG1, CARG1, #8 + | mvn CARG2, #~LJ_TISNUM + | eor CARG1, CARG1, CARG3, lsr #8 + | b ->fff_restv + | + |.ffunc_bit bnot + | mvn CARG1, CARG1 + | mvn CARG2, #~LJ_TISNUM + | b ->fff_restv + | + |.macro .ffunc_bit_sh, name, ins, shmod + | .ffunc bit_..name + | ldrd CARG12, [BASE, #8] + | cmp NARGS8:RC, #16 + | blo ->fff_fallback + | checktp CARG2, LJ_TISNUM + | blne ->vm_tobit_fb + |.if shmod == 0 + | and RA, CARG1, #31 + |.else + | rsb RA, CARG1, #0 + |.endif + | ldrd CARG12, [BASE] + | checktp CARG2, LJ_TISNUM + | blne ->vm_tobit_fb + | ins CARG1, CARG1, RA + | mvn CARG2, #~LJ_TISNUM + | b ->fff_restv + |.endmacro + | + |.ffunc_bit_sh lshift, lsl, 0 + |.ffunc_bit_sh rshift, lsr, 0 + |.ffunc_bit_sh arshift, asr, 0 + |.ffunc_bit_sh rol, ror, 1 + |.ffunc_bit_sh ror, ror, 0 + | + |//----------------------------------------------------------------------- + | + |->fff_fallback: // Call fast function fallback handler. + | // BASE = new base, RC = nargs*8 + | ldr CARG3, [BASE, FRAME_FUNC] + | ldr CARG2, L->maxstack + | add CARG1, BASE, NARGS8:RC + | ldr PC, [BASE, FRAME_PC] // Fallback may overwrite PC. + | str CARG1, L->top + | ldr CARG3, CFUNC:CARG3->f + | str BASE, L->base + | add CARG1, CARG1, #8*LUA_MINSTACK + | str PC, SAVE_PC // Redundant (but a defined value). + | cmp CARG1, CARG2 + | mov CARG1, L + | bhi >5 // Need to grow stack. + | blx CARG3 // (lua_State *L) + | // Either throws an error, or recovers and returns -1, 0 or nresults+1. + | ldr BASE, L->base + | cmp CRET1, #0 + | lsl RC, CRET1, #3 + | sub RA, BASE, #8 + | bgt ->fff_res // Returned nresults+1? + |1: // Returned 0 or -1: retry fast path. + | ldr CARG1, L->top + | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] + | sub NARGS8:RC, CARG1, BASE + | bne ->vm_call_tail // Returned -1? + | ins_callt // Returned 0: retry fast path. + | + |// Reconstruct previous base for vmeta_call during tailcall. + |->vm_call_tail: + | ands CARG1, PC, #FRAME_TYPE + | bic CARG2, PC, #FRAME_TYPEP + | ldreq INS, [PC, #-4] + | andeq CARG2, MASKR8, INS, lsr #5 // Conditional decode_RA8. + | addeq CARG2, CARG2, #8 + | sub RB, BASE, CARG2 + | b ->vm_call_dispatch // Resolve again for tailcall. + | + |5: // Grow stack for fallback handler. + | mov CARG2, #LUA_MINSTACK + | bl extern lj_state_growstack // (lua_State *L, int n) + | ldr BASE, L->base + | cmp CARG1, CARG1 // Set zero-flag to force retry. + | b <1 + | + |->fff_gcstep: // Call GC step function. + | // BASE = new base, RC = nargs*8 + | mov RA, lr + | str BASE, L->base + | add CARG2, BASE, NARGS8:RC + | str PC, SAVE_PC // Redundant (but a defined value). + | str CARG2, L->top + | mov CARG1, L + | bl extern lj_gc_step // (lua_State *L) + | ldr BASE, L->base + | mov lr, RA // Help return address predictor. + | ldr CFUNC:CARG3, [BASE, FRAME_FUNC] + | bx lr + | + |//----------------------------------------------------------------------- + |//-- Special dispatch targets ------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_record: // Dispatch target for recording phase. + |.if JIT + | ldrb CARG1, [DISPATCH, #DISPATCH_GL(hookmask)] + | tst CARG1, #HOOK_VMEVENT // No recording while in vmevent. + | bne >5 + | // Decrement the hookcount for consistency, but always do the call. + | ldr CARG2, [DISPATCH, #DISPATCH_GL(hookcount)] + | tst CARG1, #HOOK_ACTIVE + | bne >1 + | sub CARG2, CARG2, #1 + | tst CARG1, #LUA_MASKLINE|LUA_MASKCOUNT + | strne CARG2, [DISPATCH, #DISPATCH_GL(hookcount)] + | b >1 + |.endif + | + |->vm_rethook: // Dispatch target for return hooks. + | ldrb CARG1, [DISPATCH, #DISPATCH_GL(hookmask)] + | tst CARG1, #HOOK_ACTIVE // Hook already active? + | beq >1 + |5: // Re-dispatch to static ins. + | decode_OP OP, INS + | add OP, DISPATCH, OP, lsl #2 + | ldr pc, [OP, #GG_DISP2STATIC] + | + |->vm_inshook: // Dispatch target for instr/line hooks. + | ldrb CARG1, [DISPATCH, #DISPATCH_GL(hookmask)] + | ldr CARG2, [DISPATCH, #DISPATCH_GL(hookcount)] + | tst CARG1, #HOOK_ACTIVE // Hook already active? + | bne <5 + | tst CARG1, #LUA_MASKLINE|LUA_MASKCOUNT + | beq <5 + | subs CARG2, CARG2, #1 + | str CARG2, [DISPATCH, #DISPATCH_GL(hookcount)] + | beq >1 + | tst CARG1, #LUA_MASKLINE + | beq <5 + |1: + | mov CARG1, L + | str BASE, L->base + | mov CARG2, PC + | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC. + | bl extern lj_dispatch_ins // (lua_State *L, const BCIns *pc) + |3: + | ldr BASE, L->base + |4: // Re-dispatch to static ins. + | ldrb OP, [PC, #-4] + | ldr INS, [PC, #-4] + | add OP, DISPATCH, OP, lsl #2 + | ldr OP, [OP, #GG_DISP2STATIC] + | decode_RA8 RA, INS + | decode_RD RC, INS + | bx OP + | + |->cont_hook: // Continue from hook yield. + | ldr CARG1, [CARG4, #-24] + | add PC, PC, #4 + | str CARG1, SAVE_MULTRES // Restore MULTRES for *M ins. + | b <4 + | + |->vm_hotloop: // Hot loop counter underflow. + |.if JIT + | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Same as curr_topL(L). + | sub CARG1, DISPATCH, #-GG_DISP2J + | str PC, SAVE_PC + | ldr CARG3, LFUNC:CARG3->field_pc + | mov CARG2, PC + | str L, [DISPATCH, #DISPATCH_J(L)] + | ldrb CARG3, [CARG3, #PC2PROTO(framesize)] + | str BASE, L->base + | add CARG3, BASE, CARG3, lsl #3 + | str CARG3, L->top + | bl extern lj_trace_hot // (jit_State *J, const BCIns *pc) + | b <3 + |.endif + | + |->vm_callhook: // Dispatch target for call hooks. + | mov CARG2, PC + |.if JIT + | b >1 + |.endif + | + |->vm_hotcall: // Hot call counter underflow. + |.if JIT + | orr CARG2, PC, #1 + |1: + |.endif + | add CARG4, BASE, RC + | str PC, SAVE_PC + | mov CARG1, L + | str BASE, L->base + | sub RA, RA, BASE + | str CARG4, L->top + | bl extern lj_dispatch_call // (lua_State *L, const BCIns *pc) + | // Returns ASMFunction. + | ldr BASE, L->base + | ldr CARG4, L->top + | mov CARG2, #0 + | add RA, BASE, RA + | sub NARGS8:RC, CARG4, BASE + | str CARG2, SAVE_PC // Invalidate for subsequent line hook. + | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] + | ldr INS, [PC, #-4] + | bx CRET1 + | + |->cont_stitch: // Trace stitching. + |.if JIT + | // RA = resultptr, CARG4 = meta base + | ldr RB, SAVE_MULTRES + | ldr INS, [PC, #-4] + | ldr CARG3, [CARG4, #-24] // Save previous trace number. + | subs RB, RB, #8 + | decode_RA8 RC, INS // Call base. + | beq >2 + |1: // Move results down. + | ldrd CARG12, [RA] + | add RA, RA, #8 + | subs RB, RB, #8 + | strd CARG12, [BASE, RC] + | add RC, RC, #8 + | bne <1 + |2: + | decode_RA8 RA, INS + | decode_RB8 RB, INS + | add RA, RA, RB + | ldr CARG1, [DISPATCH, #DISPATCH_J(trace)] + |3: + | cmp RA, RC + | mvn CARG2, #~LJ_TNIL + | bhi >9 // More results wanted? + | + | ldr TRACE:RA, [CARG1, CARG3, lsl #2] + | cmp TRACE:RA, #0 + | beq ->cont_nop + | ldrh RC, TRACE:RA->link + | cmp RC, CARG3 + | beq ->cont_nop // Blacklisted. + | cmp RC, #0 + | bne =>BC_JLOOP // Jump to stitched trace. + | + | // Stitch a new trace to the previous trace. + | str CARG3, [DISPATCH, #DISPATCH_J(exitno)] + | str L, [DISPATCH, #DISPATCH_J(L)] + | str BASE, L->base + | sub CARG1, DISPATCH, #-GG_DISP2J + | mov CARG2, PC + | bl extern lj_dispatch_stitch // (jit_State *J, const BCIns *pc) + | ldr BASE, L->base + | b ->cont_nop + | + |9: // Fill up results with nil. + | strd CARG12, [BASE, RC] + | add RC, RC, #8 + | b <3 + |.endif + | + |->vm_profhook: // Dispatch target for profiler hook. +#if LJ_HASPROFILE + | mov CARG1, L + | str BASE, L->base + | mov CARG2, PC + | bl extern lj_dispatch_profile // (lua_State *L, const BCIns *pc) + | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction. + | ldr BASE, L->base + | sub PC, PC, #4 + | b ->cont_nop +#endif + | + |//----------------------------------------------------------------------- + |//-- Trace exit handler ------------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_exit_handler: + |.if JIT + | sub sp, sp, #12 + | push {r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12} + | ldr CARG1, [sp, #64] // Load original value of lr. + | ldr DISPATCH, [lr] // Load DISPATCH. + | add CARG3, sp, #64 // Recompute original value of sp. + | mv_vmstate CARG4, EXIT + | str CARG3, [sp, #52] // Store sp in RID_SP + | st_vmstate CARG4 + | ldr CARG2, [CARG1, #-4]! // Get exit instruction. + | str CARG1, [sp, #56] // Store exit pc in RID_LR and RID_PC. + | str CARG1, [sp, #60] + |.if FPU + | vpush {d0-d15} + |.endif + | lsl CARG2, CARG2, #8 + | add CARG1, CARG1, CARG2, asr #6 + | ldr CARG2, [lr, #4] // Load exit stub group offset. + | sub CARG1, CARG1, lr + | ldr L, [DISPATCH, #DISPATCH_GL(cur_L)] + | add CARG1, CARG2, CARG1, lsr #2 // Compute exit number. + | ldr BASE, [DISPATCH, #DISPATCH_GL(jit_base)] + | str CARG1, [DISPATCH, #DISPATCH_J(exitno)] + | mov CARG4, #0 + | str BASE, L->base + | str L, [DISPATCH, #DISPATCH_J(L)] + | str CARG4, [DISPATCH, #DISPATCH_GL(jit_base)] + | sub CARG1, DISPATCH, #-GG_DISP2J + | mov CARG2, sp + | bl extern lj_trace_exit // (jit_State *J, ExitState *ex) + | // Returns MULTRES (unscaled) or negated error code. + | ldr CARG2, L->cframe + | ldr BASE, L->base + | bic CARG2, CARG2, #~CFRAME_RAWMASK // Use two steps: bic sp is deprecated. + | mov sp, CARG2 + | ldr PC, SAVE_PC // Get SAVE_PC. + | str L, SAVE_L // Set SAVE_L (on-trace resume/yield). + | b >1 + |.endif + |->vm_exit_interp: + | // CARG1 = MULTRES or negated error code, BASE, PC and DISPATCH set. + |.if JIT + | ldr L, SAVE_L + |1: + | cmp CARG1, #0 + | blt >9 // Check for error from exit. + | lsl RC, CARG1, #3 + | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] + | str RC, SAVE_MULTRES + | mov CARG3, #0 + | str BASE, L->base + | ldr CARG2, LFUNC:CARG2->field_pc + | str CARG3, [DISPATCH, #DISPATCH_GL(jit_base)] + | mv_vmstate CARG4, INTERP + | ldr KBASE, [CARG2, #PC2PROTO(k)] + | // Modified copy of ins_next which handles function header dispatch, too. + | ldrb OP, [PC] + | mov MASKR8, #255 + | ldr INS, [PC], #4 + | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8. + | st_vmstate CARG4 + | cmp OP, #BC_FUNCC+2 // Fast function? + | bhs >4 + |2: + | cmp OP, #BC_FUNCF // Function header? + | ldr OP, [DISPATCH, OP, lsl #2] + | decode_RA8 RA, INS + | lsrlo RC, INS, #16 // No: Decode operands A*8 and D. + | subhs RC, RC, #8 + | addhs RA, RA, BASE // Yes: RA = BASE+framesize*8, RC = nargs*8 + | ldrhs CARG3, [BASE, FRAME_FUNC] + | bx OP + | + |4: // Check frame below fast function. + | ldr CARG1, [BASE, FRAME_PC] + | ands CARG2, CARG1, #FRAME_TYPE + | bne <2 // Trace stitching continuation? + | // Otherwise set KBASE for Lua function below fast function. + | ldr CARG3, [CARG1, #-4] + | decode_RA8 CARG1, CARG3 + | sub CARG2, BASE, CARG1 + | ldr LFUNC:CARG3, [CARG2, #-16] + | ldr CARG3, LFUNC:CARG3->field_pc + | ldr KBASE, [CARG3, #PC2PROTO(k)] + | b <2 + | + |9: // Rethrow error from the right C frame. + | rsb CARG2, CARG1, #0 + | mov CARG1, L + | bl extern lj_err_throw // (lua_State *L, int errcode) + |.endif + | + |//----------------------------------------------------------------------- + |//-- Math helper functions ---------------------------------------------- + |//----------------------------------------------------------------------- + | + |// FP value rounding. Called from JIT code. + |// + |// double lj_vm_floor/ceil/trunc(double x); + |.macro vm_round, func, hf + |.if hf == 1 + | vmov CARG1, CARG2, d0 + |.endif + | lsl CARG3, CARG2, #1 + | adds RB, CARG3, #0x00200000 + | bpl >2 // |x| < 1? + | mvn CARG4, #0x3cc + | subs RB, CARG4, RB, asr #21 // 2^0: RB = 51, 2^51: RB = 0. + | bxlo lr // |x| >= 2^52: done. + | mvn CARG4, #1 + | bic CARG3, CARG1, CARG4, lsl RB // ztest = lo & ~lomask + | and CARG1, CARG1, CARG4, lsl RB // lo &= lomask + | subs RB, RB, #32 + | bicpl CARG4, CARG2, CARG4, lsl RB // |x| <= 2^20: ztest |= hi & ~himask + | orrpl CARG3, CARG3, CARG4 + | mvnpl CARG4, #1 + | andpl CARG2, CARG2, CARG4, lsl RB // |x| <= 2^20: hi &= himask + |.if "func" == "floor" + | tst CARG3, CARG2, asr #31 // iszero = ((ztest & signmask) == 0) + |.else + | bics CARG3, CARG3, CARG2, asr #31 // iszero = ((ztest & ~signmask) == 0) + |.endif + |.if hf == 1 + | vmoveq d0, CARG1, CARG2 + |.endif + | bxeq lr // iszero: done. + | mvn CARG4, #1 + | cmp RB, #0 + | lslpl CARG3, CARG4, RB + | mvnmi CARG3, #0 + | add RB, RB, #32 + | subs CARG1, CARG1, CARG4, lsl RB // lo = lo-lomask + | sbc CARG2, CARG2, CARG3 // hi = hi-himask+carry + |.if hf == 1 + | vmov d0, CARG1, CARG2 + |.endif + | bx lr + | + |2: // |x| < 1: + | bxcs lr // |x| is not finite. + | orr CARG3, CARG3, CARG1 // ztest = (2*hi) | lo + |.if "func" == "floor" + | tst CARG3, CARG2, asr #31 // iszero = ((ztest & signmask) == 0) + |.else + | bics CARG3, CARG3, CARG2, asr #31 // iszero = ((ztest & ~signmask) == 0) + |.endif + | mov CARG1, #0 // lo = 0 + | and CARG2, CARG2, #0x80000000 + | ldrne CARG4, <9 // hi = sign(x) | (iszero ? 0.0 : 1.0) + | orrne CARG2, CARG2, CARG4 + |.if hf == 1 + | vmov d0, CARG1, CARG2 + |.endif + | bx lr + |.endmacro + | + |9: + | .long 0x3ff00000 // hiword(+1.0) + | + |->vm_floor: + |.if HFABI + | vm_round floor, 1 + |.endif + |->vm_floor_sf: + | vm_round floor, 0 + | + |->vm_ceil: + |.if HFABI + | vm_round ceil, 1 + |.endif + |->vm_ceil_sf: + | vm_round ceil, 0 + | + |.macro vm_trunc, hf + |.if JIT + |.if hf == 1 + | vmov CARG1, CARG2, d0 + |.endif + | lsl CARG3, CARG2, #1 + | adds RB, CARG3, #0x00200000 + | andpl CARG2, CARG2, #0x80000000 // |x| < 1? hi = sign(x), lo = 0. + | movpl CARG1, #0 + |.if hf == 1 + | vmovpl d0, CARG1, CARG2 + |.endif + | bxpl lr + | mvn CARG4, #0x3cc + | subs RB, CARG4, RB, asr #21 // 2^0: RB = 51, 2^51: RB = 0. + | bxlo lr // |x| >= 2^52: already done. + | mvn CARG4, #1 + | and CARG1, CARG1, CARG4, lsl RB // lo &= lomask + | subs RB, RB, #32 + | andpl CARG2, CARG2, CARG4, lsl RB // |x| <= 2^20: hi &= himask + |.if hf == 1 + | vmov d0, CARG1, CARG2 + |.endif + | bx lr + |.endif + |.endmacro + | + |->vm_trunc: + |.if HFABI + | vm_trunc 1 + |.endif + |->vm_trunc_sf: + | vm_trunc 0 + | + | // double lj_vm_mod(double dividend, double divisor); + |->vm_mod: + |.if FPU + | // Special calling convention. Also, RC (r11) is not preserved. + | vdiv.f64 d0, d6, d7 + | mov RC, lr + | vmov CARG1, CARG2, d0 + | bl ->vm_floor_sf + | vmov d0, CARG1, CARG2 + | vmul.f64 d0, d0, d7 + | mov lr, RC + | vsub.f64 d6, d6, d0 + | bx lr + |.else + | push {r0, r1, r2, r3, r4, lr} + | bl extern __aeabi_ddiv + | bl ->vm_floor_sf + | ldrd CARG34, [sp, #8] + | bl extern __aeabi_dmul + | ldrd CARG34, [sp] + | eor CARG2, CARG2, #0x80000000 + | bl extern __aeabi_dadd + | add sp, sp, #20 + | pop {pc} + |.endif + | + | // int lj_vm_modi(int dividend, int divisor); + |->vm_modi: + | ands RB, CARG1, #0x80000000 + | rsbmi CARG1, CARG1, #0 // a = |dividend| + | eor RB, RB, CARG2, asr #1 // Keep signdiff and sign(divisor). + | cmp CARG2, #0 + | rsbmi CARG2, CARG2, #0 // b = |divisor| + | subs CARG4, CARG2, #1 + | cmpne CARG1, CARG2 + | moveq CARG1, #0 // if (b == 1 || a == b) a = 0 + | tsthi CARG2, CARG4 + | andeq CARG1, CARG1, CARG4 // else if ((b & (b-1)) == 0) a &= b-1 + | bls >1 + | // Use repeated subtraction to get the remainder. + | clz CARG3, CARG1 + | clz CARG4, CARG2 + | sub CARG4, CARG4, CARG3 + | rsbs CARG3, CARG4, #31 // entry = (31-(clz(b)-clz(a)))*8 + | addne pc, pc, CARG3, lsl #3 // Duff's device. + | nop + { + int i; + for (i = 31; i >= 0; i--) { + | cmp CARG1, CARG2, lsl #i + | subhs CARG1, CARG1, CARG2, lsl #i + } + } + |1: + | cmp CARG1, #0 + | cmpne RB, #0 + | submi CARG1, CARG1, CARG2 // if (y != 0 && signdiff) y = y - b + | eors CARG2, CARG1, RB, lsl #1 + | rsbmi CARG1, CARG1, #0 // if (sign(divisor) != sign(y)) y = -y + | bx lr + | + |//----------------------------------------------------------------------- + |//-- Miscellaneous functions -------------------------------------------- + |//----------------------------------------------------------------------- + | + |//----------------------------------------------------------------------- + |//-- FFI helper functions ----------------------------------------------- + |//----------------------------------------------------------------------- + | + |// Handler for callback functions. + |// Saveregs already performed. Callback slot number in [sp], g in r12. + |->vm_ffi_callback: + |.if FFI + |.type CTSTATE, CTState, PC + | ldr CTSTATE, GL:r12->ctype_state + | add DISPATCH, r12, #GG_G2DISP + |.if FPU + | str r4, SAVE_R4 + | add r4, sp, CFRAME_SPACE+4+8*8 + | vstmdb r4!, {d8-d15} + |.endif + |.if HFABI + | add r12, CTSTATE, #offsetof(CTState, cb.fpr[8]) + |.endif + | strd CARG34, CTSTATE->cb.gpr[2] + | strd CARG12, CTSTATE->cb.gpr[0] + |.if HFABI + | vstmdb r12!, {d0-d7} + |.endif + | ldr CARG4, [sp] + | add CARG3, sp, #CFRAME_SIZE + | mov CARG1, CTSTATE + | lsr CARG4, CARG4, #3 + | str CARG3, CTSTATE->cb.stack + | mov CARG2, sp + | str CARG4, CTSTATE->cb.slot + | str CTSTATE, SAVE_PC // Any value outside of bytecode is ok. + | bl extern lj_ccallback_enter // (CTState *cts, void *cf) + | // Returns lua_State *. + | ldr BASE, L:CRET1->base + | mv_vmstate CARG2, INTERP + | ldr RC, L:CRET1->top + | mov MASKR8, #255 + | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] + | mov L, CRET1 + | sub RC, RC, BASE + | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8. + | st_vmstate CARG2 + | ins_callt + |.endif + | + |->cont_ffi_callback: // Return from FFI callback. + |.if FFI + | ldr CTSTATE, [DISPATCH, #DISPATCH_GL(ctype_state)] + | str BASE, L->base + | str CARG4, L->top + | str L, CTSTATE->L + | mov CARG1, CTSTATE + | mov CARG2, RA + | bl extern lj_ccallback_leave // (CTState *cts, TValue *o) + | ldrd CARG12, CTSTATE->cb.gpr[0] + |.if HFABI + | vldr d0, CTSTATE->cb.fpr[0] + |.endif + | b ->vm_leave_unw + |.endif + | + |->vm_ffi_call: // Call C function via FFI. + | // Caveat: needs special frame unwinding, see below. + |.if FFI + | .type CCSTATE, CCallState, r4 + | push {CCSTATE, r5, r11, lr} + | mov CCSTATE, CARG1 + | ldr CARG1, CCSTATE:CARG1->spadj + | ldrb CARG2, CCSTATE->nsp + | add CARG3, CCSTATE, #offsetof(CCallState, stack) + |.if HFABI + | add RB, CCSTATE, #offsetof(CCallState, fpr[0]) + |.endif + | mov r11, sp + | sub sp, sp, CARG1 // Readjust stack. + | subs CARG2, CARG2, #1 + |.if HFABI + | vldm RB, {d0-d7} + |.endif + | ldr RB, CCSTATE->func + | bmi >2 + |1: // Copy stack slots. + | ldr CARG4, [CARG3, CARG2, lsl #2] + | str CARG4, [sp, CARG2, lsl #2] + | subs CARG2, CARG2, #1 + | bpl <1 + |2: + | ldrd CARG12, CCSTATE->gpr[0] + | ldrd CARG34, CCSTATE->gpr[2] + | blx RB + | mov sp, r11 + |.if HFABI + | add r12, CCSTATE, #offsetof(CCallState, fpr[4]) + |.endif + | strd CRET1, CCSTATE->gpr[0] + |.if HFABI + | vstmdb r12!, {d0-d3} + |.endif + | pop {CCSTATE, r5, r11, pc} + |.endif + |// Note: vm_ffi_call must be the last function in this object file! + | + |//----------------------------------------------------------------------- +} + +/* Generate the code for a single instruction. */ +static void build_ins(BuildCtx *ctx, BCOp op, int defop) +{ + int vk = 0; + |=>defop: + + switch (op) { + + /* -- Comparison ops ---------------------------------------------------- */ + + /* Remember: all ops branch for a true comparison, fall through otherwise. */ + + case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: + | // RA = src1*8, RC = src2, JMP with RC = target + | lsl RC, RC, #3 + | ldrd CARG12, [RA, BASE]! + | ldrh RB, [PC, #2] + | ldrd CARG34, [RC, BASE]! + | add PC, PC, #4 + | add RB, PC, RB, lsl #2 + | checktp CARG2, LJ_TISNUM + | bne >3 + | checktp CARG4, LJ_TISNUM + | bne >4 + | cmp CARG1, CARG3 + if (op == BC_ISLT) { + | sublt PC, RB, #0x20000 + } else if (op == BC_ISGE) { + | subge PC, RB, #0x20000 + } else if (op == BC_ISLE) { + | suble PC, RB, #0x20000 + } else { + | subgt PC, RB, #0x20000 + } + |1: + | ins_next + | + |3: // CARG12 is not an integer. + |.if FPU + | vldr d0, [RA] + | bhi ->vmeta_comp + | // d0 is a number. + | checktp CARG4, LJ_TISNUM + | vldr d1, [RC] + | blo >5 + | bhi ->vmeta_comp + | // d0 is a number, CARG3 is an integer. + | vmov s4, CARG3 + | vcvt.f64.s32 d1, s4 + | b >5 + |4: // CARG1 is an integer, CARG34 is not an integer. + | vldr d1, [RC] + | bhi ->vmeta_comp + | // CARG1 is an integer, d1 is a number. + | vmov s4, CARG1 + | vcvt.f64.s32 d0, s4 + |5: // d0 and d1 are numbers. + | vcmp.f64 d0, d1 + | vmrs + | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't. + if (op == BC_ISLT) { + | sublo PC, RB, #0x20000 + } else if (op == BC_ISGE) { + | subhs PC, RB, #0x20000 + } else if (op == BC_ISLE) { + | subls PC, RB, #0x20000 + } else { + | subhi PC, RB, #0x20000 + } + | b <1 + |.else + | bhi ->vmeta_comp + | // CARG12 is a number. + | checktp CARG4, LJ_TISNUM + | movlo RA, RB // Save RB. + | blo >5 + | bhi ->vmeta_comp + | // CARG12 is a number, CARG3 is an integer. + | mov CARG1, CARG3 + | mov RC, RA + | mov RA, RB // Save RB. + | bl extern __aeabi_i2d + | mov CARG3, CARG1 + | mov CARG4, CARG2 + | ldrd CARG12, [RC] // Restore first operand. + | b >5 + |4: // CARG1 is an integer, CARG34 is not an integer. + | bhi ->vmeta_comp + | // CARG1 is an integer, CARG34 is a number. + | mov RA, RB // Save RB. + | bl extern __aeabi_i2d + | ldrd CARG34, [RC] // Restore second operand. + |5: // CARG12 and CARG34 are numbers. + | bl extern __aeabi_cdcmple + | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't. + if (op == BC_ISLT) { + | sublo PC, RA, #0x20000 + } else if (op == BC_ISGE) { + | subhs PC, RA, #0x20000 + } else if (op == BC_ISLE) { + | subls PC, RA, #0x20000 + } else { + | subhi PC, RA, #0x20000 + } + | b <1 + |.endif + break; + + case BC_ISEQV: case BC_ISNEV: + vk = op == BC_ISEQV; + | // RA = src1*8, RC = src2, JMP with RC = target + | lsl RC, RC, #3 + | ldrd CARG12, [RA, BASE]! + | ldrh RB, [PC, #2] + | ldrd CARG34, [RC, BASE]! + | add PC, PC, #4 + | add RB, PC, RB, lsl #2 + | checktp CARG2, LJ_TISNUM + | cmnls CARG4, #-LJ_TISNUM + if (vk) { + | bls ->BC_ISEQN_Z + } else { + | bls ->BC_ISNEN_Z + } + | // Either or both types are not numbers. + |.if FFI + | checktp CARG2, LJ_TCDATA + | checktpne CARG4, LJ_TCDATA + | beq ->vmeta_equal_cd + |.endif + | cmp CARG2, CARG4 // Compare types. + | bne >2 // Not the same type? + | checktp CARG2, LJ_TISPRI + | bhs >1 // Same type and primitive type? + | + | // Same types and not a primitive type. Compare GCobj or pvalue. + | cmp CARG1, CARG3 + if (vk) { + | bne >3 // Different GCobjs or pvalues? + |1: // Branch if same. + | sub PC, RB, #0x20000 + |2: // Different. + | ins_next + |3: + | checktp CARG2, LJ_TISTABUD + | bhi <2 // Different objects and not table/ud? + } else { + | beq >1 // Same GCobjs or pvalues? + | checktp CARG2, LJ_TISTABUD + | bhi >2 // Different objects and not table/ud? + } + | // Different tables or userdatas. Need to check __eq metamethod. + | // Field metatable must be at same offset for GCtab and GCudata! + | ldr TAB:RA, TAB:CARG1->metatable + | cmp TAB:RA, #0 + if (vk) { + | beq <2 // No metatable? + } else { + | beq >2 // No metatable? + } + | ldrb RA, TAB:RA->nomm + | mov CARG4, #1-vk // ne = 0 or 1. + | mov CARG2, CARG1 + | tst RA, #1<vmeta_equal // 'no __eq' flag not set? + if (vk) { + | b <2 + } else { + |2: // Branch if different. + | sub PC, RB, #0x20000 + |1: // Same. + | ins_next + } + break; + + case BC_ISEQS: case BC_ISNES: + vk = op == BC_ISEQS; + | // RA = src*8, RC = str_const (~), JMP with RC = target + | mvn RC, RC + | ldrd CARG12, [BASE, RA] + | ldrh RB, [PC, #2] + | ldr STR:CARG3, [KBASE, RC, lsl #2] + | add PC, PC, #4 + | add RB, PC, RB, lsl #2 + | checktp CARG2, LJ_TSTR + |.if FFI + | bne >7 + | cmp CARG1, CARG3 + |.else + | cmpeq CARG1, CARG3 + |.endif + if (vk) { + | subeq PC, RB, #0x20000 + |1: + } else { + |1: + | subne PC, RB, #0x20000 + } + | ins_next + | + |.if FFI + |7: + | checktp CARG2, LJ_TCDATA + | bne <1 + | b ->vmeta_equal_cd + |.endif + break; + + case BC_ISEQN: case BC_ISNEN: + vk = op == BC_ISEQN; + | // RA = src*8, RC = num_const (~), JMP with RC = target + | lsl RC, RC, #3 + | ldrd CARG12, [RA, BASE]! + | ldrh RB, [PC, #2] + | ldrd CARG34, [RC, KBASE]! + | add PC, PC, #4 + | add RB, PC, RB, lsl #2 + if (vk) { + |->BC_ISEQN_Z: + } else { + |->BC_ISNEN_Z: + } + | checktp CARG2, LJ_TISNUM + | bne >3 + | checktp CARG4, LJ_TISNUM + | bne >4 + | cmp CARG1, CARG3 + if (vk) { + | subeq PC, RB, #0x20000 + |1: + } else { + |1: + | subne PC, RB, #0x20000 + } + |2: + | ins_next + | + |3: // CARG12 is not an integer. + |.if FFI + | bhi >7 + |.else + if (!vk) { + | subhi PC, RB, #0x20000 + } + | bhi <2 + |.endif + |.if FPU + | checktp CARG4, LJ_TISNUM + | vmov s4, CARG3 + | vldr d0, [RA] + | vldrlo d1, [RC] + | vcvths.f64.s32 d1, s4 + | b >5 + |4: // CARG1 is an integer, d1 is a number. + | vmov s4, CARG1 + | vldr d1, [RC] + | vcvt.f64.s32 d0, s4 + |5: // d0 and d1 are numbers. + | vcmp.f64 d0, d1 + | vmrs + if (vk) { + | subeq PC, RB, #0x20000 + } else { + | subne PC, RB, #0x20000 + } + | b <2 + |.else + | // CARG12 is a number. + | checktp CARG4, LJ_TISNUM + | movlo RA, RB // Save RB. + | blo >5 + | // CARG12 is a number, CARG3 is an integer. + | mov CARG1, CARG3 + | mov RC, RA + |4: // CARG1 is an integer, CARG34 is a number. + | mov RA, RB // Save RB. + | bl extern __aeabi_i2d + | ldrd CARG34, [RC] // Restore other operand. + |5: // CARG12 and CARG34 are numbers. + | bl extern __aeabi_cdcmpeq + if (vk) { + | subeq PC, RA, #0x20000 + } else { + | subne PC, RA, #0x20000 + } + | b <2 + |.endif + | + |.if FFI + |7: + | checktp CARG2, LJ_TCDATA + | bne <1 + | b ->vmeta_equal_cd + |.endif + break; + + case BC_ISEQP: case BC_ISNEP: + vk = op == BC_ISEQP; + | // RA = src*8, RC = primitive_type (~), JMP with RC = target + | ldrd CARG12, [BASE, RA] + | ldrh RB, [PC, #2] + | add PC, PC, #4 + | mvn RC, RC + | add RB, PC, RB, lsl #2 + |.if FFI + | checktp CARG2, LJ_TCDATA + | beq ->vmeta_equal_cd + |.endif + | cmp CARG2, RC + if (vk) { + | subeq PC, RB, #0x20000 + } else { + | subne PC, RB, #0x20000 + } + | ins_next + break; + + /* -- Unary test and copy ops ------------------------------------------- */ + + case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF: + | // RA = dst*8 or unused, RC = src, JMP with RC = target + | add RC, BASE, RC, lsl #3 + | ldrh RB, [PC, #2] + | ldrd CARG12, [RC] + | add PC, PC, #4 + | add RB, PC, RB, lsl #2 + | checktp CARG2, LJ_TTRUE + if (op == BC_ISTC || op == BC_IST) { + | subls PC, RB, #0x20000 + if (op == BC_ISTC) { + | strdls CARG12, [BASE, RA] + } + } else { + | subhi PC, RB, #0x20000 + if (op == BC_ISFC) { + | strdhi CARG12, [BASE, RA] + } + } + | ins_next + break; + + case BC_ISTYPE: + | // RA = src*8, RC = -type + | ldrd CARG12, [BASE, RA] + | ins_next1 + | cmn CARG2, RC + | ins_next2 + | bne ->vmeta_istype + | ins_next3 + break; + case BC_ISNUM: + | // RA = src*8, RC = -(TISNUM-1) + | ldrd CARG12, [BASE, RA] + | ins_next1 + | checktp CARG2, LJ_TISNUM + | ins_next2 + | bhs ->vmeta_istype + | ins_next3 + break; + + /* -- Unary ops --------------------------------------------------------- */ + + case BC_MOV: + | // RA = dst*8, RC = src + | lsl RC, RC, #3 + | ins_next1 + | ldrd CARG12, [BASE, RC] + | ins_next2 + | strd CARG12, [BASE, RA] + | ins_next3 + break; + case BC_NOT: + | // RA = dst*8, RC = src + | add RC, BASE, RC, lsl #3 + | ins_next1 + | ldr CARG1, [RC, #4] + | add RA, BASE, RA + | ins_next2 + | checktp CARG1, LJ_TTRUE + | mvnls CARG2, #~LJ_TFALSE + | mvnhi CARG2, #~LJ_TTRUE + | str CARG2, [RA, #4] + | ins_next3 + break; + case BC_UNM: + | // RA = dst*8, RC = src + | lsl RC, RC, #3 + | ldrd CARG12, [BASE, RC] + | ins_next1 + | ins_next2 + | checktp CARG2, LJ_TISNUM + | bhi ->vmeta_unm + | eorne CARG2, CARG2, #0x80000000 + | bne >5 + | rsbseq CARG1, CARG1, #0 + | ldrdvs CARG12, >9 + |5: + | strd CARG12, [BASE, RA] + | ins_next3 + | + |.align 8 + |9: + | .long 0x00000000, 0x41e00000 // 2^31. + break; + case BC_LEN: + | // RA = dst*8, RC = src + | lsl RC, RC, #3 + | ldrd CARG12, [BASE, RC] + | checkstr CARG2, >2 + | ldr CARG1, STR:CARG1->len + |1: + | mvn CARG2, #~LJ_TISNUM + | ins_next1 + | ins_next2 + | strd CARG12, [BASE, RA] + | ins_next3 + |2: + | checktab CARG2, ->vmeta_len +#if LJ_52 + | ldr TAB:CARG3, TAB:CARG1->metatable + | cmp TAB:CARG3, #0 + | bne >9 + |3: +#endif + |->BC_LEN_Z: + | .IOS mov RC, BASE + | bl extern lj_tab_len // (GCtab *t) + | // Returns uint32_t (but less than 2^31). + | .IOS mov BASE, RC + | b <1 +#if LJ_52 + |9: + | ldrb CARG4, TAB:CARG3->nomm + | tst CARG4, #1<vmeta_len +#endif + break; + + /* -- Binary ops -------------------------------------------------------- */ + + |.macro ins_arithcheck, cond, ncond, target + ||if (vk == 1) { + | cmn CARG4, #-LJ_TISNUM + | cmn..cond CARG2, #-LJ_TISNUM + ||} else { + | cmn CARG2, #-LJ_TISNUM + | cmn..cond CARG4, #-LJ_TISNUM + ||} + | b..ncond target + |.endmacro + |.macro ins_arithcheck_int, target + | ins_arithcheck eq, ne, target + |.endmacro + |.macro ins_arithcheck_num, target + | ins_arithcheck lo, hs, target + |.endmacro + | + |.macro ins_arithpre + | decode_RB8 RB, INS + | decode_RC8 RC, INS + | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8 + ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + ||switch (vk) { + ||case 0: + | .if FPU + | ldrd CARG12, [RB, BASE]! + | ldrd CARG34, [RC, KBASE]! + | .else + | ldrd CARG12, [BASE, RB] + | ldrd CARG34, [KBASE, RC] + | .endif + || break; + ||case 1: + | .if FPU + | ldrd CARG34, [RB, BASE]! + | ldrd CARG12, [RC, KBASE]! + | .else + | ldrd CARG34, [BASE, RB] + | ldrd CARG12, [KBASE, RC] + | .endif + || break; + ||default: + | .if FPU + | ldrd CARG12, [RB, BASE]! + | ldrd CARG34, [RC, BASE]! + | .else + | ldrd CARG12, [BASE, RB] + | ldrd CARG34, [BASE, RC] + | .endif + || break; + ||} + |.endmacro + | + |.macro ins_arithpre_fpu, reg1, reg2 + |.if FPU + ||if (vk == 1) { + | vldr reg2, [RB] + | vldr reg1, [RC] + ||} else { + | vldr reg1, [RB] + | vldr reg2, [RC] + ||} + |.endif + |.endmacro + | + |.macro ins_arithpost_fpu, reg + | ins_next1 + | add RA, BASE, RA + | ins_next2 + | vstr reg, [RA] + | ins_next3 + |.endmacro + | + |.macro ins_arithfallback, ins + ||switch (vk) { + ||case 0: + | ins ->vmeta_arith_vn + || break; + ||case 1: + | ins ->vmeta_arith_nv + || break; + ||default: + | ins ->vmeta_arith_vv + || break; + ||} + |.endmacro + | + |.macro ins_arithdn, intins, fpins, fpcall + | ins_arithpre + |.if "intins" ~= "vm_modi" and not FPU + | ins_next1 + |.endif + | ins_arithcheck_int >5 + |.if "intins" == "smull" + | smull CARG1, RC, CARG3, CARG1 + | cmp RC, CARG1, asr #31 + | ins_arithfallback bne + |.elif "intins" == "vm_modi" + | movs CARG2, CARG3 + | ins_arithfallback beq + | bl ->vm_modi + | mvn CARG2, #~LJ_TISNUM + |.else + | intins CARG1, CARG1, CARG3 + | ins_arithfallback bvs + |.endif + |4: + |.if "intins" == "vm_modi" or FPU + | ins_next1 + |.endif + | ins_next2 + | strd CARG12, [BASE, RA] + | ins_next3 + |5: // FP variant. + | ins_arithpre_fpu d6, d7 + | ins_arithfallback ins_arithcheck_num + |.if FPU + |.if "intins" == "vm_modi" + | bl fpcall + |.else + | fpins d6, d6, d7 + |.endif + | ins_arithpost_fpu d6 + |.else + | bl fpcall + |.if "intins" ~= "vm_modi" + | ins_next1 + |.endif + | b <4 + |.endif + |.endmacro + | + |.macro ins_arithfp, fpins, fpcall + | ins_arithpre + |.if "fpins" ~= "extern" or HFABI + | ins_arithpre_fpu d0, d1 + |.endif + | ins_arithfallback ins_arithcheck_num + |.if "fpins" == "extern" + | .IOS mov RC, BASE + | bl fpcall + | .IOS mov BASE, RC + |.elif FPU + | fpins d0, d0, d1 + |.else + | bl fpcall + |.endif + |.if ("fpins" ~= "extern" or HFABI) and FPU + | ins_arithpost_fpu d0 + |.else + | ins_next1 + | ins_next2 + | strd CARG12, [BASE, RA] + | ins_next3 + |.endif + |.endmacro + + case BC_ADDVN: case BC_ADDNV: case BC_ADDVV: + | ins_arithdn adds, vadd.f64, extern __aeabi_dadd + break; + case BC_SUBVN: case BC_SUBNV: case BC_SUBVV: + | ins_arithdn subs, vsub.f64, extern __aeabi_dsub + break; + case BC_MULVN: case BC_MULNV: case BC_MULVV: + | ins_arithdn smull, vmul.f64, extern __aeabi_dmul + break; + case BC_DIVVN: case BC_DIVNV: case BC_DIVVV: + | ins_arithfp vdiv.f64, extern __aeabi_ddiv + break; + case BC_MODVN: case BC_MODNV: case BC_MODVV: + | ins_arithdn vm_modi, vm_mod, ->vm_mod + break; + case BC_POW: + | // NYI: (partial) integer arithmetic. + | ins_arithfp extern, extern pow + break; + + case BC_CAT: + | decode_RB8 RC, INS + | decode_RC8 RB, INS + | // RA = dst*8, RC = src_start*8, RB = src_end*8 (note: RB/RC swapped!) + | sub CARG3, RB, RC + | str BASE, L->base + | add CARG2, BASE, RB + |->BC_CAT_Z: + | // RA = dst*8, RC = src_start*8, CARG2 = top-1 + | mov CARG1, L + | str PC, SAVE_PC + | lsr CARG3, CARG3, #3 + | bl extern lj_meta_cat // (lua_State *L, TValue *top, int left) + | // Returns NULL (finished) or TValue * (metamethod). + | ldr BASE, L->base + | cmp CRET1, #0 + | bne ->vmeta_binop + | ldrd CARG34, [BASE, RC] + | ins_next1 + | ins_next2 + | strd CARG34, [BASE, RA] // Copy result to RA. + | ins_next3 + break; + + /* -- Constant ops ------------------------------------------------------ */ + + case BC_KSTR: + | // RA = dst*8, RC = str_const (~) + | mvn RC, RC + | ins_next1 + | ldr CARG1, [KBASE, RC, lsl #2] + | mvn CARG2, #~LJ_TSTR + | ins_next2 + | strd CARG12, [BASE, RA] + | ins_next3 + break; + case BC_KCDATA: + |.if FFI + | // RA = dst*8, RC = cdata_const (~) + | mvn RC, RC + | ins_next1 + | ldr CARG1, [KBASE, RC, lsl #2] + | mvn CARG2, #~LJ_TCDATA + | ins_next2 + | strd CARG12, [BASE, RA] + | ins_next3 + |.endif + break; + case BC_KSHORT: + | // RA = dst*8, (RC = int16_literal) + | mov CARG1, INS, asr #16 // Refetch sign-extended reg. + | mvn CARG2, #~LJ_TISNUM + | ins_next1 + | ins_next2 + | strd CARG12, [BASE, RA] + | ins_next3 + break; + case BC_KNUM: + | // RA = dst*8, RC = num_const + | lsl RC, RC, #3 + | ins_next1 + | ldrd CARG12, [KBASE, RC] + | ins_next2 + | strd CARG12, [BASE, RA] + | ins_next3 + break; + case BC_KPRI: + | // RA = dst*8, RC = primitive_type (~) + | add RA, BASE, RA + | mvn RC, RC + | ins_next1 + | ins_next2 + | str RC, [RA, #4] + | ins_next3 + break; + case BC_KNIL: + | // RA = base*8, RC = end + | add RA, BASE, RA + | add RC, BASE, RC, lsl #3 + | mvn CARG1, #~LJ_TNIL + | str CARG1, [RA, #4] + | add RA, RA, #8 + |1: + | str CARG1, [RA, #4] + | cmp RA, RC + | add RA, RA, #8 + | blt <1 + | ins_next_ + break; + + /* -- Upvalue and function ops ------------------------------------------ */ + + case BC_UGET: + | // RA = dst*8, RC = uvnum + | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] + | lsl RC, RC, #2 + | add RC, RC, #offsetof(GCfuncL, uvptr) + | ldr UPVAL:CARG2, [LFUNC:CARG2, RC] + | ldr CARG2, UPVAL:CARG2->v + | ldrd CARG34, [CARG2] + | ins_next1 + | ins_next2 + | strd CARG34, [BASE, RA] + | ins_next3 + break; + case BC_USETV: + | // RA = uvnum*8, RC = src + | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] + | lsr RA, RA, #1 + | add RA, RA, #offsetof(GCfuncL, uvptr) + | lsl RC, RC, #3 + | ldr UPVAL:CARG2, [LFUNC:CARG2, RA] + | ldrd CARG34, [BASE, RC] + | ldrb RB, UPVAL:CARG2->marked + | ldrb RC, UPVAL:CARG2->closed + | ldr CARG2, UPVAL:CARG2->v + | tst RB, #LJ_GC_BLACK // isblack(uv) + | add RB, CARG4, #-LJ_TISGCV + | cmpne RC, #0 + | strd CARG34, [CARG2] + | bne >2 // Upvalue is closed and black? + |1: + | ins_next + | + |2: // Check if new value is collectable. + | cmn RB, #-(LJ_TNUMX - LJ_TISGCV) + | ldrbhi RC, GCOBJ:CARG3->gch.marked + | bls <1 // tvisgcv(v) + | sub CARG1, DISPATCH, #-GG_DISP2G + | tst RC, #LJ_GC_WHITES + | // Crossed a write barrier. Move the barrier forward. + |.if IOS + | beq <1 + | mov RC, BASE + | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv) + | mov BASE, RC + |.else + | blne extern lj_gc_barrieruv // (global_State *g, TValue *tv) + |.endif + | b <1 + break; + case BC_USETS: + | // RA = uvnum*8, RC = str_const (~) + | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] + | lsr RA, RA, #1 + | add RA, RA, #offsetof(GCfuncL, uvptr) + | mvn RC, RC + | ldr UPVAL:CARG2, [LFUNC:CARG2, RA] + | ldr STR:CARG3, [KBASE, RC, lsl #2] + | ldrb RB, UPVAL:CARG2->marked + | ldrb RC, UPVAL:CARG2->closed + | ldr CARG2, UPVAL:CARG2->v + | mvn CARG4, #~LJ_TSTR + | tst RB, #LJ_GC_BLACK // isblack(uv) + | ldrb RB, STR:CARG3->marked + | strd CARG34, [CARG2] + | bne >2 + |1: + | ins_next + | + |2: // Check if string is white and ensure upvalue is closed. + | tst RB, #LJ_GC_WHITES // iswhite(str) + | cmpne RC, #0 + | sub CARG1, DISPATCH, #-GG_DISP2G + | // Crossed a write barrier. Move the barrier forward. + |.if IOS + | beq <1 + | mov RC, BASE + | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv) + | mov BASE, RC + |.else + | blne extern lj_gc_barrieruv // (global_State *g, TValue *tv) + |.endif + | b <1 + break; + case BC_USETN: + | // RA = uvnum*8, RC = num_const + | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] + | lsr RA, RA, #1 + | add RA, RA, #offsetof(GCfuncL, uvptr) + | lsl RC, RC, #3 + | ldr UPVAL:CARG2, [LFUNC:CARG2, RA] + | ldrd CARG34, [KBASE, RC] + | ldr CARG2, UPVAL:CARG2->v + | ins_next1 + | ins_next2 + | strd CARG34, [CARG2] + | ins_next3 + break; + case BC_USETP: + | // RA = uvnum*8, RC = primitive_type (~) + | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] + | lsr RA, RA, #1 + | add RA, RA, #offsetof(GCfuncL, uvptr) + | ldr UPVAL:CARG2, [LFUNC:CARG2, RA] + | mvn RC, RC + | ldr CARG2, UPVAL:CARG2->v + | ins_next1 + | ins_next2 + | str RC, [CARG2, #4] + | ins_next3 + break; + + case BC_UCLO: + | // RA = level*8, RC = target + | ldr CARG3, L->openupval + | add RC, PC, RC, lsl #2 + | str BASE, L->base + | cmp CARG3, #0 + | sub PC, RC, #0x20000 + | beq >1 + | mov CARG1, L + | add CARG2, BASE, RA + | bl extern lj_func_closeuv // (lua_State *L, TValue *level) + | ldr BASE, L->base + |1: + | ins_next + break; + + case BC_FNEW: + | // RA = dst*8, RC = proto_const (~) (holding function prototype) + | mvn RC, RC + | str BASE, L->base + | ldr CARG2, [KBASE, RC, lsl #2] + | str PC, SAVE_PC + | ldr CARG3, [BASE, FRAME_FUNC] + | mov CARG1, L + | // (lua_State *L, GCproto *pt, GCfuncL *parent) + | bl extern lj_func_newL_gc + | // Returns GCfuncL *. + | ldr BASE, L->base + | mvn CARG2, #~LJ_TFUNC + | ins_next1 + | ins_next2 + | strd CARG12, [BASE, RA] + | ins_next3 + break; + + /* -- Table ops --------------------------------------------------------- */ + + case BC_TNEW: + case BC_TDUP: + | // RA = dst*8, RC = (hbits|asize) | tab_const (~) + if (op == BC_TDUP) { + | mvn RC, RC + } + | ldr CARG3, [DISPATCH, #DISPATCH_GL(gc.total)] + | ldr CARG4, [DISPATCH, #DISPATCH_GL(gc.threshold)] + | str BASE, L->base + | str PC, SAVE_PC + | cmp CARG3, CARG4 + | mov CARG1, L + | bhs >5 + |1: + if (op == BC_TNEW) { + | lsl CARG2, RC, #21 + | lsr CARG3, RC, #11 + | asr RC, CARG2, #21 + | lsr CARG2, CARG2, #21 + | cmn RC, #1 + | addeq CARG2, CARG2, #2 + | bl extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits) + | // Returns GCtab *. + } else { + | ldr CARG2, [KBASE, RC, lsl #2] + | bl extern lj_tab_dup // (lua_State *L, Table *kt) + | // Returns GCtab *. + } + | ldr BASE, L->base + | mvn CARG2, #~LJ_TTAB + | ins_next1 + | ins_next2 + | strd CARG12, [BASE, RA] + | ins_next3 + |5: + | bl extern lj_gc_step_fixtop // (lua_State *L) + | mov CARG1, L + | b <1 + break; + + case BC_GGET: + | // RA = dst*8, RC = str_const (~) + case BC_GSET: + | // RA = dst*8, RC = str_const (~) + | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] + | mvn RC, RC + | ldr TAB:CARG1, LFUNC:CARG2->env + | ldr STR:RC, [KBASE, RC, lsl #2] + if (op == BC_GGET) { + | b ->BC_TGETS_Z + } else { + | b ->BC_TSETS_Z + } + break; + + case BC_TGETV: + | decode_RB8 RB, INS + | decode_RC8 RC, INS + | // RA = dst*8, RB = table*8, RC = key*8 + | ldrd TAB:CARG12, [BASE, RB] + | ldrd CARG34, [BASE, RC] + | checktab CARG2, ->vmeta_tgetv // STALL: load CARG12. + | checktp CARG4, LJ_TISNUM // Integer key? + | ldreq CARG4, TAB:CARG1->array + | ldreq CARG2, TAB:CARG1->asize + | bne >9 + | + | add CARG4, CARG4, CARG3, lsl #3 + | cmp CARG3, CARG2 // In array part? + | ldrdlo CARG34, [CARG4] + | bhs ->vmeta_tgetv + | ins_next1 // Overwrites RB! + | checktp CARG4, LJ_TNIL + | beq >5 + |1: + | ins_next2 + | strd CARG34, [BASE, RA] + | ins_next3 + | + |5: // Check for __index if table value is nil. + | ldr TAB:CARG2, TAB:CARG1->metatable + | cmp TAB:CARG2, #0 + | beq <1 // No metatable: done. + | ldrb CARG2, TAB:CARG2->nomm + | tst CARG2, #1<vmeta_tgetv + | + |9: + | checktp CARG4, LJ_TSTR // String key? + | moveq STR:RC, CARG3 + | beq ->BC_TGETS_Z + | b ->vmeta_tgetv + break; + case BC_TGETS: + | decode_RB8 RB, INS + | and RC, RC, #255 + | // RA = dst*8, RB = table*8, RC = str_const (~) + | ldrd CARG12, [BASE, RB] + | mvn RC, RC + | ldr STR:RC, [KBASE, RC, lsl #2] // STALL: early RC. + | checktab CARG2, ->vmeta_tgets1 + |->BC_TGETS_Z: + | // (TAB:RB =) TAB:CARG1 = GCtab *, STR:RC = GCstr *, RA = dst*8 + | ldr CARG3, TAB:CARG1->hmask + | ldr CARG4, STR:RC->hash + | ldr NODE:INS, TAB:CARG1->node + | mov TAB:RB, TAB:CARG1 + | and CARG3, CARG3, CARG4 // idx = str->hash & tab->hmask + | add CARG3, CARG3, CARG3, lsl #1 + | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8 + |1: + | ldrd CARG12, NODE:INS->key // STALL: early NODE:INS. + | ldrd CARG34, NODE:INS->val + | ldr NODE:INS, NODE:INS->next + | checktp CARG2, LJ_TSTR + | cmpeq CARG1, STR:RC + | bne >4 + | checktp CARG4, LJ_TNIL + | beq >5 + |3: + | ins_next1 + | ins_next2 + | strd CARG34, [BASE, RA] + | ins_next3 + | + |4: // Follow hash chain. + | cmp NODE:INS, #0 + | bne <1 + | // End of hash chain: key not found, nil result. + | + |5: // Check for __index if table value is nil. + | ldr TAB:CARG1, TAB:RB->metatable + | mov CARG3, #0 // Optional clear of undef. value (during load stall). + | mvn CARG4, #~LJ_TNIL + | cmp TAB:CARG1, #0 + | beq <3 // No metatable: done. + | ldrb CARG2, TAB:CARG1->nomm + | tst CARG2, #1<vmeta_tgets + break; + case BC_TGETB: + | decode_RB8 RB, INS + | and RC, RC, #255 + | // RA = dst*8, RB = table*8, RC = index + | ldrd CARG12, [BASE, RB] + | checktab CARG2, ->vmeta_tgetb // STALL: load CARG12. + | ldr CARG3, TAB:CARG1->asize + | ldr CARG4, TAB:CARG1->array + | lsl CARG2, RC, #3 + | cmp RC, CARG3 + | ldrdlo CARG34, [CARG4, CARG2] + | bhs ->vmeta_tgetb + | ins_next1 // Overwrites RB! + | checktp CARG4, LJ_TNIL + | beq >5 + |1: + | ins_next2 + | strd CARG34, [BASE, RA] + | ins_next3 + | + |5: // Check for __index if table value is nil. + | ldr TAB:CARG2, TAB:CARG1->metatable + | cmp TAB:CARG2, #0 + | beq <1 // No metatable: done. + | ldrb CARG2, TAB:CARG2->nomm + | tst CARG2, #1<vmeta_tgetb + break; + case BC_TGETR: + | decode_RB8 RB, INS + | decode_RC8 RC, INS + | // RA = dst*8, RB = table*8, RC = key*8 + | ldr TAB:CARG1, [BASE, RB] + | ldr CARG2, [BASE, RC] + | ldr CARG4, TAB:CARG1->array + | ldr CARG3, TAB:CARG1->asize + | add CARG4, CARG4, CARG2, lsl #3 + | cmp CARG2, CARG3 // In array part? + | bhs ->vmeta_tgetr + | ldrd CARG12, [CARG4] + |->BC_TGETR_Z: + | ins_next1 + | ins_next2 + | strd CARG12, [BASE, RA] + | ins_next3 + break; + + case BC_TSETV: + | decode_RB8 RB, INS + | decode_RC8 RC, INS + | // RA = src*8, RB = table*8, RC = key*8 + | ldrd TAB:CARG12, [BASE, RB] + | ldrd CARG34, [BASE, RC] + | checktab CARG2, ->vmeta_tsetv // STALL: load CARG12. + | checktp CARG4, LJ_TISNUM // Integer key? + | ldreq CARG2, TAB:CARG1->array + | ldreq CARG4, TAB:CARG1->asize + | bne >9 + | + | add CARG2, CARG2, CARG3, lsl #3 + | cmp CARG3, CARG4 // In array part? + | ldrlo INS, [CARG2, #4] + | bhs ->vmeta_tsetv + | ins_next1 // Overwrites RB! + | checktp INS, LJ_TNIL + | ldrb INS, TAB:CARG1->marked + | ldrd CARG34, [BASE, RA] + | beq >5 + |1: + | tst INS, #LJ_GC_BLACK // isblack(table) + | strd CARG34, [CARG2] + | bne >7 + |2: + | ins_next2 + | ins_next3 + | + |5: // Check for __newindex if previous value is nil. + | ldr TAB:RA, TAB:CARG1->metatable + | cmp TAB:RA, #0 + | beq <1 // No metatable: done. + | ldrb RA, TAB:RA->nomm + | tst RA, #1<vmeta_tsetv + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:CARG1, INS, CARG3 + | b <2 + | + |9: + | checktp CARG4, LJ_TSTR // String key? + | moveq STR:RC, CARG3 + | beq ->BC_TSETS_Z + | b ->vmeta_tsetv + break; + case BC_TSETS: + | decode_RB8 RB, INS + | and RC, RC, #255 + | // RA = src*8, RB = table*8, RC = str_const (~) + | ldrd CARG12, [BASE, RB] + | mvn RC, RC + | ldr STR:RC, [KBASE, RC, lsl #2] // STALL: early RC. + | checktab CARG2, ->vmeta_tsets1 + |->BC_TSETS_Z: + | // (TAB:RB =) TAB:CARG1 = GCtab *, STR:RC = GCstr *, RA = dst*8 + | ldr CARG3, TAB:CARG1->hmask + | ldr CARG4, STR:RC->hash + | ldr NODE:INS, TAB:CARG1->node + | mov TAB:RB, TAB:CARG1 + | and CARG3, CARG3, CARG4 // idx = str->hash & tab->hmask + | add CARG3, CARG3, CARG3, lsl #1 + | mov CARG4, #0 + | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8 + | strb CARG4, TAB:RB->nomm // Clear metamethod cache. + |1: + | ldrd CARG12, NODE:INS->key + | ldr CARG4, NODE:INS->val.it + | ldr NODE:CARG3, NODE:INS->next + | checktp CARG2, LJ_TSTR + | cmpeq CARG1, STR:RC + | bne >5 + | ldrb CARG2, TAB:RB->marked + | checktp CARG4, LJ_TNIL // Key found, but nil value? + | ldrd CARG34, [BASE, RA] + | beq >4 + |2: + | tst CARG2, #LJ_GC_BLACK // isblack(table) + | strd CARG34, NODE:INS->val + | bne >7 + |3: + | ins_next + | + |4: // Check for __newindex if previous value is nil. + | ldr TAB:CARG1, TAB:RB->metatable + | cmp TAB:CARG1, #0 + | beq <2 // No metatable: done. + | ldrb CARG1, TAB:CARG1->nomm + | tst CARG1, #1<vmeta_tsets + | + |5: // Follow hash chain. + | movs NODE:INS, NODE:CARG3 + | bne <1 + | // End of hash chain: key not found, add a new one. + | + | // But check for __newindex first. + | ldr TAB:CARG1, TAB:RB->metatable + | mov CARG3, TMPDp + | str PC, SAVE_PC + | cmp TAB:CARG1, #0 // No metatable: continue. + | str BASE, L->base + | ldrbne CARG2, TAB:CARG1->nomm + | mov CARG1, L + | beq >6 + | tst CARG2, #1<vmeta_tsets // 'no __newindex' flag NOT set: check. + |6: + | mvn CARG4, #~LJ_TSTR + | str STR:RC, TMPDlo + | mov CARG2, TAB:RB + | str CARG4, TMPDhi + | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k) + | // Returns TValue *. + | ldr BASE, L->base + | ldrd CARG34, [BASE, RA] + | strd CARG34, [CRET1] + | b <3 // No 2nd write barrier needed. + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:RB, CARG2, CARG3 + | b <3 + break; + case BC_TSETB: + | decode_RB8 RB, INS + | and RC, RC, #255 + | // RA = src*8, RB = table*8, RC = index + | ldrd CARG12, [BASE, RB] + | checktab CARG2, ->vmeta_tsetb // STALL: load CARG12. + | ldr CARG3, TAB:CARG1->asize + | ldr RB, TAB:CARG1->array + | lsl CARG2, RC, #3 + | cmp RC, CARG3 + | ldrdlo CARG34, [CARG2, RB]! + | bhs ->vmeta_tsetb + | ins_next1 // Overwrites RB! + | checktp CARG4, LJ_TNIL + | ldrb INS, TAB:CARG1->marked + | ldrd CARG34, [BASE, RA] + | beq >5 + |1: + | tst INS, #LJ_GC_BLACK // isblack(table) + | strd CARG34, [CARG2] + | bne >7 + |2: + | ins_next2 + | ins_next3 + | + |5: // Check for __newindex if previous value is nil. + | ldr TAB:RA, TAB:CARG1->metatable + | cmp TAB:RA, #0 + | beq <1 // No metatable: done. + | ldrb RA, TAB:RA->nomm + | tst RA, #1<vmeta_tsetb + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:CARG1, INS, CARG3 + | b <2 + break; + case BC_TSETR: + | decode_RB8 RB, INS + | decode_RC8 RC, INS + | // RA = src*8, RB = table*8, RC = key*8 + | ldr TAB:CARG2, [BASE, RB] + | ldr CARG3, [BASE, RC] + | ldrb INS, TAB:CARG2->marked + | ldr CARG1, TAB:CARG2->array + | ldr CARG4, TAB:CARG2->asize + | tst INS, #LJ_GC_BLACK // isblack(table) + | add CARG1, CARG1, CARG3, lsl #3 + | bne >7 + |2: + | cmp CARG3, CARG4 // In array part? + | bhs ->vmeta_tsetr + |->BC_TSETR_Z: + | ldrd CARG34, [BASE, RA] + | ins_next1 + | ins_next2 + | strd CARG34, [CARG1] + | ins_next3 + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:CARG2, INS, RB + | b <2 + break; + + case BC_TSETM: + | // RA = base*8 (table at base-1), RC = num_const (start index) + | add RA, BASE, RA + |1: + | ldr RB, SAVE_MULTRES + | ldr TAB:CARG2, [RA, #-8] // Guaranteed to be a table. + | ldr CARG1, [KBASE, RC, lsl #3] // Integer constant is in lo-word. + | subs RB, RB, #8 + | ldr CARG4, TAB:CARG2->asize + | beq >4 // Nothing to copy? + | add CARG3, CARG1, RB, lsr #3 + | cmp CARG3, CARG4 + | ldr CARG4, TAB:CARG2->array + | add RB, RA, RB + | bhi >5 + | add INS, CARG4, CARG1, lsl #3 + | ldrb CARG1, TAB:CARG2->marked + |3: // Copy result slots to table. + | ldrd CARG34, [RA], #8 + | strd CARG34, [INS], #8 + | cmp RA, RB + | blo <3 + | tst CARG1, #LJ_GC_BLACK // isblack(table) + | bne >7 + |4: + | ins_next + | + |5: // Need to resize array part. + | str BASE, L->base + | mov CARG1, L + | str PC, SAVE_PC + | bl extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize) + | // Must not reallocate the stack. + | .IOS ldr BASE, L->base + | b <1 + | + |7: // Possible table write barrier for any value. Skip valiswhite check. + | barrierback TAB:CARG2, CARG1, CARG3 + | b <4 + break; + + /* -- Calls and vararg handling ----------------------------------------- */ + + case BC_CALLM: + | // RA = base*8, (RB = nresults+1,) RC = extra_nargs + | ldr CARG1, SAVE_MULTRES + | decode_RC8 NARGS8:RC, INS + | add NARGS8:RC, NARGS8:RC, CARG1 + | b ->BC_CALL_Z + break; + case BC_CALL: + | decode_RC8 NARGS8:RC, INS + | // RA = base*8, (RB = nresults+1,) RC = (nargs+1)*8 + |->BC_CALL_Z: + | mov RB, BASE // Save old BASE for vmeta_call. + | ldrd CARG34, [BASE, RA]! + | sub NARGS8:RC, NARGS8:RC, #8 + | add BASE, BASE, #8 + | checkfunc CARG4, ->vmeta_call + | ins_call + break; + + case BC_CALLMT: + | // RA = base*8, (RB = 0,) RC = extra_nargs + | ldr CARG1, SAVE_MULTRES + | add NARGS8:RC, CARG1, RC, lsl #3 + | b ->BC_CALLT1_Z + break; + case BC_CALLT: + | lsl NARGS8:RC, RC, #3 + | // RA = base*8, (RB = 0,) RC = (nargs+1)*8 + |->BC_CALLT1_Z: + | ldrd LFUNC:CARG34, [RA, BASE]! + | sub NARGS8:RC, NARGS8:RC, #8 + | add RA, RA, #8 + | checkfunc CARG4, ->vmeta_callt + | ldr PC, [BASE, FRAME_PC] + |->BC_CALLT2_Z: + | mov RB, #0 + | ldrb CARG4, LFUNC:CARG3->ffid + | tst PC, #FRAME_TYPE + | bne >7 + |1: + | str LFUNC:CARG3, [BASE, FRAME_FUNC] // Copy function down, but keep PC. + | cmp NARGS8:RC, #0 + | beq >3 + |2: + | ldrd CARG12, [RA, RB] + | add INS, RB, #8 + | cmp INS, NARGS8:RC + | strd CARG12, [BASE, RB] + | mov RB, INS + | bne <2 + |3: + | cmp CARG4, #1 // (> FF_C) Calling a fast function? + | bhi >5 + |4: + | ins_callt + | + |5: // Tailcall to a fast function with a Lua frame below. + | ldr INS, [PC, #-4] + | decode_RA8 RA, INS + | sub CARG1, BASE, RA + | ldr LFUNC:CARG1, [CARG1, #-16] + | ldr CARG1, LFUNC:CARG1->field_pc + | ldr KBASE, [CARG1, #PC2PROTO(k)] + | b <4 + | + |7: // Tailcall from a vararg function. + | eor PC, PC, #FRAME_VARG + | tst PC, #FRAME_TYPEP // Vararg frame below? + | movne CARG4, #0 // Clear ffid if no Lua function below. + | bne <1 + | sub BASE, BASE, PC + | ldr PC, [BASE, FRAME_PC] + | tst PC, #FRAME_TYPE + | movne CARG4, #0 // Clear ffid if no Lua function below. + | b <1 + break; + + case BC_ITERC: + | // RA = base*8, (RB = nresults+1, RC = nargs+1 (2+1)) + | add RA, BASE, RA + | mov RB, BASE // Save old BASE for vmeta_call. + | ldrd CARG34, [RA, #-16] + | ldrd CARG12, [RA, #-8] + | add BASE, RA, #8 + | strd CARG34, [RA, #8] // Copy state. + | strd CARG12, [RA, #16] // Copy control var. + | // STALL: locked CARG34. + | ldrd LFUNC:CARG34, [RA, #-24] + | mov NARGS8:RC, #16 // Iterators get 2 arguments. + | // STALL: load CARG34. + | strd LFUNC:CARG34, [RA] // Copy callable. + | checkfunc CARG4, ->vmeta_call + | ins_call + break; + + case BC_ITERN: + | // RA = base*8, (RB = nresults+1, RC = nargs+1 (2+1)) + |.if JIT + | // NYI: add hotloop, record BC_ITERN. + |.endif + | add RA, BASE, RA + | ldr TAB:RB, [RA, #-16] + | ldr CARG1, [RA, #-8] // Get index from control var. + | ldr INS, TAB:RB->asize + | ldr CARG2, TAB:RB->array + | add PC, PC, #4 + |1: // Traverse array part. + | subs RC, CARG1, INS + | add CARG3, CARG2, CARG1, lsl #3 + | bhs >5 // Index points after array part? + | ldrd CARG34, [CARG3] + | checktp CARG4, LJ_TNIL + | addeq CARG1, CARG1, #1 // Skip holes in array part. + | beq <1 + | ldrh RC, [PC, #-2] + | mvn CARG2, #~LJ_TISNUM + | strd CARG34, [RA, #8] + | add RC, PC, RC, lsl #2 + | add RB, CARG1, #1 + | strd CARG12, [RA] + | sub PC, RC, #0x20000 + | str RB, [RA, #-8] // Update control var. + |3: + | ins_next + | + |5: // Traverse hash part. + | ldr CARG4, TAB:RB->hmask + | ldr NODE:RB, TAB:RB->node + |6: + | add CARG1, RC, RC, lsl #1 + | cmp RC, CARG4 // End of iteration? Branch to ITERL+1. + | add NODE:CARG3, NODE:RB, CARG1, lsl #3 // node = tab->node + idx*3*8 + | bhi <3 + | ldrd CARG12, NODE:CARG3->val + | checktp CARG2, LJ_TNIL + | add RC, RC, #1 + | beq <6 // Skip holes in hash part. + | ldrh RB, [PC, #-2] + | add RC, RC, INS + | ldrd CARG34, NODE:CARG3->key + | str RC, [RA, #-8] // Update control var. + | strd CARG12, [RA, #8] + | add RC, PC, RB, lsl #2 + | sub PC, RC, #0x20000 + | strd CARG34, [RA] + | b <3 + break; + + case BC_ISNEXT: + | // RA = base*8, RC = target (points to ITERN) + | add RA, BASE, RA + | add RC, PC, RC, lsl #2 + | ldrd CFUNC:CARG12, [RA, #-24] + | ldr CARG3, [RA, #-12] + | ldr CARG4, [RA, #-4] + | checktp CARG2, LJ_TFUNC + | ldrbeq CARG1, CFUNC:CARG1->ffid + | checktpeq CARG3, LJ_TTAB + | checktpeq CARG4, LJ_TNIL + | cmpeq CARG1, #FF_next_N + | subeq PC, RC, #0x20000 + | bne >5 + | ins_next1 + | ins_next2 + | mov CARG1, #0 + | mvn CARG2, #0x00018000 + | strd CARG1, [RA, #-8] // Initialize control var. + |1: + | ins_next3 + |5: // Despecialize bytecode if any of the checks fail. + | mov CARG1, #BC_JMP + | mov OP, #BC_ITERC + | strb CARG1, [PC, #-4] + | sub PC, RC, #0x20000 + | strb OP, [PC] // Subsumes ins_next1. + | ins_next2 + | b <1 + break; + + case BC_VARG: + | decode_RB8 RB, INS + | decode_RC8 RC, INS + | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8 + | ldr CARG1, [BASE, FRAME_PC] + | add RC, BASE, RC + | add RA, BASE, RA + | add RC, RC, #FRAME_VARG + | add CARG4, RA, RB + | sub CARG3, BASE, #8 // CARG3 = vtop + | sub RC, RC, CARG1 // RC = vbase + | // Note: RC may now be even _above_ BASE if nargs was < numparams. + | cmp RB, #0 + | sub CARG1, CARG3, RC + | beq >5 // Copy all varargs? + | sub CARG4, CARG4, #16 + |1: // Copy vararg slots to destination slots. + | cmp RC, CARG3 + | ldrdlo CARG12, [RC], #8 + | mvnhs CARG2, #~LJ_TNIL + | cmp RA, CARG4 + | strd CARG12, [RA], #8 + | blo <1 + |2: + | ins_next + | + |5: // Copy all varargs. + | ldr CARG4, L->maxstack + | cmp CARG1, #0 + | movle RB, #8 // MULTRES = (0+1)*8 + | addgt RB, CARG1, #8 + | add CARG2, RA, CARG1 + | str RB, SAVE_MULTRES + | ble <2 + | cmp CARG2, CARG4 + | bhi >7 + |6: + | ldrd CARG12, [RC], #8 + | strd CARG12, [RA], #8 + | cmp RC, CARG3 + | blo <6 + | b <2 + | + |7: // Grow stack for varargs. + | lsr CARG2, CARG1, #3 + | str RA, L->top + | mov CARG1, L + | str BASE, L->base + | sub RC, RC, BASE // Need delta, because BASE may change. + | str PC, SAVE_PC + | sub RA, RA, BASE + | bl extern lj_state_growstack // (lua_State *L, int n) + | ldr BASE, L->base + | add RA, BASE, RA + | add RC, BASE, RC + | sub CARG3, BASE, #8 + | b <6 + break; + + /* -- Returns ----------------------------------------------------------- */ + + case BC_RETM: + | // RA = results*8, RC = extra results + | ldr CARG1, SAVE_MULTRES + | ldr PC, [BASE, FRAME_PC] + | add RA, BASE, RA + | add RC, CARG1, RC, lsl #3 + | b ->BC_RETM_Z + break; + + case BC_RET: + | // RA = results*8, RC = nresults+1 + | ldr PC, [BASE, FRAME_PC] + | lsl RC, RC, #3 + | add RA, BASE, RA + |->BC_RETM_Z: + | str RC, SAVE_MULTRES + |1: + | ands CARG1, PC, #FRAME_TYPE + | eor CARG2, PC, #FRAME_VARG + | bne ->BC_RETV2_Z + | + |->BC_RET_Z: + | // BASE = base, RA = resultptr, RC = (nresults+1)*8, PC = return + | ldr INS, [PC, #-4] + | subs CARG4, RC, #8 + | sub CARG3, BASE, #8 + | beq >3 + |2: + | ldrd CARG12, [RA], #8 + | add BASE, BASE, #8 + | subs CARG4, CARG4, #8 + | strd CARG12, [BASE, #-16] + | bne <2 + |3: + | decode_RA8 RA, INS + | sub CARG4, CARG3, RA + | decode_RB8 RB, INS + | ldr LFUNC:CARG1, [CARG4, FRAME_FUNC] + |5: + | cmp RB, RC // More results expected? + | bhi >6 + | mov BASE, CARG4 + | ldr CARG2, LFUNC:CARG1->field_pc + | ins_next1 + | ins_next2 + | ldr KBASE, [CARG2, #PC2PROTO(k)] + | ins_next3 + | + |6: // Fill up results with nil. + | mvn CARG2, #~LJ_TNIL + | add BASE, BASE, #8 + | add RC, RC, #8 + | str CARG2, [BASE, #-12] + | b <5 + | + |->BC_RETV1_Z: // Non-standard return case. + | add RA, BASE, RA + |->BC_RETV2_Z: + | tst CARG2, #FRAME_TYPEP + | bne ->vm_return + | // Return from vararg function: relocate BASE down. + | sub BASE, BASE, CARG2 + | ldr PC, [BASE, FRAME_PC] + | b <1 + break; + + case BC_RET0: case BC_RET1: + | // RA = results*8, RC = nresults+1 + | ldr PC, [BASE, FRAME_PC] + | lsl RC, RC, #3 + | str RC, SAVE_MULTRES + | ands CARG1, PC, #FRAME_TYPE + | eor CARG2, PC, #FRAME_VARG + | ldreq INS, [PC, #-4] + | bne ->BC_RETV1_Z + if (op == BC_RET1) { + | ldrd CARG12, [BASE, RA] + } + | sub CARG4, BASE, #8 + | decode_RA8 RA, INS + if (op == BC_RET1) { + | strd CARG12, [CARG4] + } + | sub BASE, CARG4, RA + | decode_RB8 RB, INS + | ldr LFUNC:CARG1, [BASE, FRAME_FUNC] + |5: + | cmp RB, RC + | bhi >6 + | ldr CARG2, LFUNC:CARG1->field_pc + | ins_next1 + | ins_next2 + | ldr KBASE, [CARG2, #PC2PROTO(k)] + | ins_next3 + | + |6: // Fill up results with nil. + | sub CARG2, CARG4, #4 + | mvn CARG3, #~LJ_TNIL + | str CARG3, [CARG2, RC] + | add RC, RC, #8 + | b <5 + break; + + /* -- Loops and branches ------------------------------------------------ */ + + |.define FOR_IDX, [RA]; .define FOR_TIDX, [RA, #4] + |.define FOR_STOP, [RA, #8]; .define FOR_TSTOP, [RA, #12] + |.define FOR_STEP, [RA, #16]; .define FOR_TSTEP, [RA, #20] + |.define FOR_EXT, [RA, #24]; .define FOR_TEXT, [RA, #28] + + case BC_FORL: + |.if JIT + | hotloop + |.endif + | // Fall through. Assumes BC_IFORL follows. + break; + + case BC_JFORI: + case BC_JFORL: +#if !LJ_HASJIT + break; +#endif + case BC_FORI: + case BC_IFORL: + | // RA = base*8, RC = target (after end of loop or start of loop) + vk = (op == BC_IFORL || op == BC_JFORL); + | ldrd CARG12, [RA, BASE]! + if (op != BC_JFORL) { + | add RC, PC, RC, lsl #2 + } + if (!vk) { + | ldrd CARG34, FOR_STOP + | checktp CARG2, LJ_TISNUM + | ldr RB, FOR_TSTEP + | bne >5 + | checktp CARG4, LJ_TISNUM + | ldr CARG4, FOR_STEP + | checktpeq RB, LJ_TISNUM + | bne ->vmeta_for + | cmp CARG4, #0 + | blt >4 + | cmp CARG1, CARG3 + } else { + | ldrd CARG34, FOR_STEP + | checktp CARG2, LJ_TISNUM + | bne >5 + | adds CARG1, CARG1, CARG3 + | ldr CARG4, FOR_STOP + if (op == BC_IFORL) { + | addvs RC, PC, #0x20000 // Overflow: prevent branch. + } else { + | bvs >2 // Overflow: do not enter mcode. + } + | cmp CARG3, #0 + | blt >4 + | cmp CARG1, CARG4 + } + |1: + if (op == BC_FORI) { + | subgt PC, RC, #0x20000 + } else if (op == BC_JFORI) { + | sub PC, RC, #0x20000 + | ldrhle RC, [PC, #-2] + } else if (op == BC_IFORL) { + | suble PC, RC, #0x20000 + } + if (vk) { + | strd CARG12, FOR_IDX + } + |2: + | ins_next1 + | ins_next2 + | strd CARG12, FOR_EXT + if (op == BC_JFORI || op == BC_JFORL) { + | ble =>BC_JLOOP + } + |3: + | ins_next3 + | + |4: // Invert check for negative step. + if (!vk) { + | cmp CARG3, CARG1 + } else { + | cmp CARG4, CARG1 + } + | b <1 + | + |5: // FP loop. + if (!vk) { + | cmnlo CARG4, #-LJ_TISNUM + | cmnlo RB, #-LJ_TISNUM + | bhs ->vmeta_for + |.if FPU + | vldr d0, FOR_IDX + | vldr d1, FOR_STOP + | cmp RB, #0 + | vstr d0, FOR_EXT + |.else + | cmp RB, #0 + | strd CARG12, FOR_EXT + | blt >8 + |.endif + } else { + |.if FPU + | vldr d0, FOR_IDX + | vldr d2, FOR_STEP + | vldr d1, FOR_STOP + | cmp CARG4, #0 + | vadd.f64 d0, d0, d2 + |.else + | cmp CARG4, #0 + | blt >8 + | bl extern __aeabi_dadd + | strd CARG12, FOR_IDX + | ldrd CARG34, FOR_STOP + | strd CARG12, FOR_EXT + |.endif + } + |6: + |.if FPU + | vcmpge.f64 d0, d1 + | vcmplt.f64 d1, d0 + | vmrs + |.else + | bl extern __aeabi_cdcmple + |.endif + if (vk) { + |.if FPU + | vstr d0, FOR_IDX + | vstr d0, FOR_EXT + |.endif + } + if (op == BC_FORI) { + | subhi PC, RC, #0x20000 + } else if (op == BC_JFORI) { + | sub PC, RC, #0x20000 + | ldrhls RC, [PC, #-2] + | bls =>BC_JLOOP + } else if (op == BC_IFORL) { + | subls PC, RC, #0x20000 + } else { + | bls =>BC_JLOOP + } + | ins_next1 + | ins_next2 + | b <3 + | + |.if not FPU + |8: // Invert check for negative step. + if (vk) { + | bl extern __aeabi_dadd + | strd CARG12, FOR_IDX + | strd CARG12, FOR_EXT + } + | mov CARG3, CARG1 + | mov CARG4, CARG2 + | ldrd CARG12, FOR_STOP + | b <6 + |.endif + break; + + case BC_ITERL: + |.if JIT + | hotloop + |.endif + | // Fall through. Assumes BC_IITERL follows. + break; + + case BC_JITERL: +#if !LJ_HASJIT + break; +#endif + case BC_IITERL: + | // RA = base*8, RC = target + | ldrd CARG12, [RA, BASE]! + if (op == BC_JITERL) { + | cmn CARG2, #-LJ_TNIL // Stop if iterator returned nil. + | strdne CARG12, [RA, #-8] + | bne =>BC_JLOOP + } else { + | add RC, PC, RC, lsl #2 + | // STALL: load CARG12. + | cmn CARG2, #-LJ_TNIL // Stop if iterator returned nil. + | subne PC, RC, #0x20000 // Otherwise save control var + branch. + | strdne CARG12, [RA, #-8] + } + | ins_next + break; + + case BC_LOOP: + | // RA = base*8, RC = target (loop extent) + | // Note: RA/RC is only used by trace recorder to determine scope/extent + | // This opcode does NOT jump, it's only purpose is to detect a hot loop. + |.if JIT + | hotloop + |.endif + | // Fall through. Assumes BC_ILOOP follows. + break; + + case BC_ILOOP: + | // RA = base*8, RC = target (loop extent) + | ins_next + break; + + case BC_JLOOP: + |.if JIT + | // RA = base (ignored), RC = traceno + | ldr CARG1, [DISPATCH, #DISPATCH_J(trace)] + | mov CARG2, #0 // Traces on ARM don't store the trace number, so use 0. + | ldr TRACE:RC, [CARG1, RC, lsl #2] + | st_vmstate CARG2 + | ldr RA, TRACE:RC->mcode + | str BASE, [DISPATCH, #DISPATCH_GL(jit_base)] + | str L, [DISPATCH, #DISPATCH_GL(tmpbuf.L)] + | bx RA + |.endif + break; + + case BC_JMP: + | // RA = base*8 (only used by trace recorder), RC = target + | add RC, PC, RC, lsl #2 + | sub PC, RC, #0x20000 + | ins_next + break; + + /* -- Function headers -------------------------------------------------- */ + + case BC_FUNCF: + |.if JIT + | hotcall + |.endif + case BC_FUNCV: /* NYI: compiled vararg functions. */ + | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow. + break; + + case BC_JFUNCF: +#if !LJ_HASJIT + break; +#endif + case BC_IFUNCF: + | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8 + | ldr CARG1, L->maxstack + | ldrb CARG2, [PC, #-4+PC2PROTO(numparams)] + | ldr KBASE, [PC, #-4+PC2PROTO(k)] + | cmp RA, CARG1 + | bhi ->vm_growstack_l + if (op != BC_JFUNCF) { + | ins_next1 + | ins_next2 + } + |2: + | cmp NARGS8:RC, CARG2, lsl #3 // Check for missing parameters. + | mvn CARG4, #~LJ_TNIL + | blo >3 + if (op == BC_JFUNCF) { + | decode_RD RC, INS + | b =>BC_JLOOP + } else { + | ins_next3 + } + | + |3: // Clear missing parameters. + | strd CARG34, [BASE, NARGS8:RC] + | add NARGS8:RC, NARGS8:RC, #8 + | b <2 + break; + + case BC_JFUNCV: +#if !LJ_HASJIT + break; +#endif + | NYI // NYI: compiled vararg functions + break; /* NYI: compiled vararg functions. */ + + case BC_IFUNCV: + | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8 + | ldr CARG1, L->maxstack + | add CARG4, BASE, RC + | add RA, RA, RC + | str LFUNC:CARG3, [CARG4] // Store copy of LFUNC. + | add CARG2, RC, #8+FRAME_VARG + | ldr KBASE, [PC, #-4+PC2PROTO(k)] + | cmp RA, CARG1 + | str CARG2, [CARG4, #4] // Store delta + FRAME_VARG. + | bhs ->vm_growstack_l + | ldrb RB, [PC, #-4+PC2PROTO(numparams)] + | mov RA, BASE + | mov RC, CARG4 + | cmp RB, #0 + | add BASE, CARG4, #8 + | beq >3 + | mvn CARG3, #~LJ_TNIL + |1: + | cmp RA, RC // Less args than parameters? + | ldrdlo CARG12, [RA], #8 + | movhs CARG2, CARG3 + | strlo CARG3, [RA, #-4] // Clear old fixarg slot (help the GC). + |2: + | subs RB, RB, #1 + | strd CARG12, [CARG4, #8]! + | bne <1 + |3: + | ins_next + break; + + case BC_FUNCC: + case BC_FUNCCW: + | // BASE = new base, RA = BASE+framesize*8, CARG3 = CFUNC, RC = nargs*8 + if (op == BC_FUNCC) { + | ldr CARG4, CFUNC:CARG3->f + } else { + | ldr CARG4, [DISPATCH, #DISPATCH_GL(wrapf)] + } + | add CARG2, RA, NARGS8:RC + | ldr CARG1, L->maxstack + | add RC, BASE, NARGS8:RC + | str BASE, L->base + | cmp CARG2, CARG1 + | str RC, L->top + if (op == BC_FUNCCW) { + | ldr CARG2, CFUNC:CARG3->f + } + | mv_vmstate CARG3, C + | mov CARG1, L + | bhi ->vm_growstack_c // Need to grow stack. + | st_vmstate CARG3 + | blx CARG4 // (lua_State *L [, lua_CFunction f]) + | // Returns nresults. + | ldr BASE, L->base + | mv_vmstate CARG3, INTERP + | ldr CRET2, L->top + | str L, [DISPATCH, #DISPATCH_GL(cur_L)] + | lsl RC, CRET1, #3 + | st_vmstate CARG3 + | ldr PC, [BASE, FRAME_PC] + | sub RA, CRET2, RC // RA = L->top - nresults*8 + | b ->vm_returnc + break; + + /* ---------------------------------------------------------------------- */ + + default: + fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]); + exit(2); + break; + } +} + +static int build_backend(BuildCtx *ctx) +{ + int op; + + dasm_growpc(Dst, BC__MAX); + + build_subroutines(ctx); + + |.code_op + for (op = 0; op < BC__MAX; op++) + build_ins(ctx, (BCOp)op, op); + + return BC__MAX; +} + +/* Emit pseudo frame-info for all assembler functions. */ +static void emit_asm_debug(BuildCtx *ctx) +{ + int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code); + int i; + switch (ctx->mode) { + case BUILD_elfasm: + fprintf(ctx->fp, "\t.section .debug_frame,\"\",%%progbits\n"); + fprintf(ctx->fp, + ".Lframe0:\n" + "\t.long .LECIE0-.LSCIE0\n" + ".LSCIE0:\n" + "\t.long 0xffffffff\n" + "\t.byte 0x1\n" + "\t.string \"\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -4\n" + "\t.byte 0xe\n" /* Return address is in lr. */ + "\t.byte 0xc\n\t.uleb128 0xd\n\t.uleb128 0\n" /* def_cfa sp */ + "\t.align 2\n" + ".LECIE0:\n\n"); + fprintf(ctx->fp, + ".LSFDE0:\n" + "\t.long .LEFDE0-.LASFDE0\n" + ".LASFDE0:\n" + "\t.long .Lframe0\n" + "\t.long .Lbegin\n" + "\t.long %d\n" + "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ + "\t.byte 0x8e\n\t.uleb128 1\n", /* offset lr */ + fcofs, CFRAME_SIZE); + for (i = 11; i >= (LJ_ARCH_HASFPU ? 5 : 4); i--) /* offset r4-r11 */ + fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 2+(11-i)); +#if LJ_ARCH_HASFPU + for (i = 15; i >= 8; i--) /* offset d8-d15 */ + fprintf(ctx->fp, "\t.byte 5\n\t.uleb128 %d, %d\n", + 64+2*i, 10+2*(15-i)); + fprintf(ctx->fp, "\t.byte 0x84\n\t.uleb128 %d\n", 25); /* offset r4 */ +#endif + fprintf(ctx->fp, + "\t.align 2\n" + ".LEFDE0:\n\n"); +#if LJ_HASFFI + fprintf(ctx->fp, + ".LSFDE1:\n" + "\t.long .LEFDE1-.LASFDE1\n" + ".LASFDE1:\n" + "\t.long .Lframe0\n" + "\t.long lj_vm_ffi_call\n" + "\t.long %d\n" + "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */ + "\t.byte 0x8e\n\t.uleb128 1\n" /* offset lr */ + "\t.byte 0x8b\n\t.uleb128 2\n" /* offset r11 */ + "\t.byte 0x85\n\t.uleb128 3\n" /* offset r5 */ + "\t.byte 0x84\n\t.uleb128 4\n" /* offset r4 */ + "\t.byte 0xd\n\t.uleb128 0xb\n" /* def_cfa_register r11 */ + "\t.align 2\n" + ".LEFDE1:\n\n", (int)ctx->codesz - fcofs); +#endif + break; + default: + break; + } +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/vm_arm64.dasc b/Build/source/libs/luajit/LuaJIT-src/src/vm_arm64.dasc new file mode 100644 index 00000000000..f1251f2c48a --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/vm_arm64.dasc @@ -0,0 +1,3764 @@ +|// Low-level VM code for ARM64 CPUs. +|// Bytecode interpreter, fast functions and helper functions. +|// Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +| +|.arch arm64 +|.section code_op, code_sub +| +|.actionlist build_actionlist +|.globals GLOB_ +|.globalnames globnames +|.externnames extnames +| +|// Note: The ragged indentation of the instructions is intentional. +|// The starting columns indicate data dependencies. +| +|//----------------------------------------------------------------------- +| +|// ARM64 registers and the AAPCS64 ABI 1.0 at a glance: +|// +|// x0-x17 temp, x19-x28 callee-saved, x29 fp, x30 lr +|// x18 is reserved on most platforms. Don't use it, save it or restore it. +|// x31 doesn't exist. Register number 31 either means xzr/wzr (zero) or sp, +|// depending on the instruction. +|// v0-v7 temp, v8-v15 callee-saved (only d8-d15 preserved), v16-v31 temp +|// +|// x0-x7/v0-v7 hold parameters and results. +| +|// Fixed register assignments for the interpreter. +| +|// The following must be C callee-save. +|.define BASE, x19 // Base of current Lua stack frame. +|.define KBASE, x20 // Constants of current Lua function. +|.define PC, x21 // Next PC. +|.define GLREG, x22 // Global state. +|.define LREG, x23 // Register holding lua_State (also in SAVE_L). +|.define TISNUM, x24 // Constant LJ_TISNUM << 47. +|.define TISNUMhi, x25 // Constant LJ_TISNUM << 15. +|.define TISNIL, x26 // Constant -1LL. +|.define fp, x29 // Yes, we have to maintain a frame pointer. +| +|.define ST_INTERP, w26 // Constant -1. +| +|// The following temporaries are not saved across C calls, except for RA/RC. +|.define RA, x27 +|.define RC, x28 +|.define RB, x17 +|.define RAw, w27 +|.define RCw, w28 +|.define RBw, w17 +|.define INS, x16 +|.define INSw, w16 +|.define ITYPE, x15 +|.define TMP0, x8 +|.define TMP1, x9 +|.define TMP2, x10 +|.define TMP3, x11 +|.define TMP0w, w8 +|.define TMP1w, w9 +|.define TMP2w, w10 +|.define TMP3w, w11 +| +|// Calling conventions. Also used as temporaries. +|.define CARG1, x0 +|.define CARG2, x1 +|.define CARG3, x2 +|.define CARG4, x3 +|.define CARG5, x4 +|.define CARG1w, w0 +|.define CARG2w, w1 +|.define CARG3w, w2 +|.define CARG4w, w3 +|.define CARG5w, w4 +| +|.define FARG1, d0 +|.define FARG2, d1 +| +|.define CRET1, x0 +|.define CRET1w, w0 +| +|// Stack layout while in interpreter. Must match with lj_frame.h. +| +|.define CFRAME_SPACE, 208 +|//----- 16 byte aligned, <-- sp entering interpreter +|// Unused [sp, #204] // 32 bit values +|.define SAVE_NRES, [sp, #200] +|.define SAVE_ERRF, [sp, #196] +|.define SAVE_MULTRES, [sp, #192] +|.define TMPD, [sp, #184] // 64 bit values +|.define SAVE_L, [sp, #176] +|.define SAVE_PC, [sp, #168] +|.define SAVE_CFRAME, [sp, #160] +|.define SAVE_FPR_, 96 // 96+8*8: 64 bit FPR saves +|.define SAVE_GPR_, 16 // 16+10*8: 64 bit GPR saves +|.define SAVE_LR, [sp, #8] +|.define SAVE_FP, [sp] +|//----- 16 byte aligned, <-- sp while in interpreter. +| +|.define TMPDofs, #184 +| +|.macro save_, gpr1, gpr2, fpr1, fpr2 +| stp d..fpr1, d..fpr2, [sp, # SAVE_FPR_+(fpr1-8)*8] +| stp x..gpr1, x..gpr2, [sp, # SAVE_GPR_+(gpr1-19)*8] +|.endmacro +|.macro rest_, gpr1, gpr2, fpr1, fpr2 +| ldp d..fpr1, d..fpr2, [sp, # SAVE_FPR_+(fpr1-8)*8] +| ldp x..gpr1, x..gpr2, [sp, # SAVE_GPR_+(gpr1-19)*8] +|.endmacro +| +|.macro saveregs +| stp fp, lr, [sp, #-CFRAME_SPACE]! +| add fp, sp, #0 +| stp x19, x20, [sp, # SAVE_GPR_] +| save_ 21, 22, 8, 9 +| save_ 23, 24, 10, 11 +| save_ 25, 26, 12, 13 +| save_ 27, 28, 14, 15 +|.endmacro +|.macro restoreregs +| ldp x19, x20, [sp, # SAVE_GPR_] +| rest_ 21, 22, 8, 9 +| rest_ 23, 24, 10, 11 +| rest_ 25, 26, 12, 13 +| rest_ 27, 28, 14, 15 +| ldp fp, lr, [sp], # CFRAME_SPACE +|.endmacro +| +|// Type definitions. Some of these are only used for documentation. +|.type L, lua_State, LREG +|.type GL, global_State, GLREG +|.type TVALUE, TValue +|.type GCOBJ, GCobj +|.type STR, GCstr +|.type TAB, GCtab +|.type LFUNC, GCfuncL +|.type CFUNC, GCfuncC +|.type PROTO, GCproto +|.type UPVAL, GCupval +|.type NODE, Node +|.type NARGS8, int +|.type TRACE, GCtrace +|.type SBUF, SBuf +| +|//----------------------------------------------------------------------- +| +|// Trap for not-yet-implemented parts. +|.macro NYI; brk; .endmacro +| +|//----------------------------------------------------------------------- +| +|// Access to frame relative to BASE. +|.define FRAME_FUNC, #-16 +|.define FRAME_PC, #-8 +| +|.macro decode_RA, dst, ins; ubfx dst, ins, #8, #8; .endmacro +|.macro decode_RB, dst, ins; ubfx dst, ins, #24, #8; .endmacro +|.macro decode_RC, dst, ins; ubfx dst, ins, #16, #8; .endmacro +|.macro decode_RD, dst, ins; ubfx dst, ins, #16, #16; .endmacro +|.macro decode_RC8RD, dst, src; ubfiz dst, src, #3, #8; .endmacro +| +|// Instruction decode+dispatch. +|.macro ins_NEXT +| ldr INSw, [PC], #4 +| add TMP1, GL, INS, uxtb #3 +| decode_RA RA, INS +| ldr TMP0, [TMP1, #GG_G2DISP] +| decode_RD RC, INS +| br TMP0 +|.endmacro +| +|// Instruction footer. +|.if 1 +| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use. +| .define ins_next, ins_NEXT +| .define ins_next_, ins_NEXT +|.else +| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch. +| // Affects only certain kinds of benchmarks (and only with -j off). +| .macro ins_next +| b ->ins_next +| .endmacro +| .macro ins_next_ +| ->ins_next: +| ins_NEXT +| .endmacro +|.endif +| +|// Call decode and dispatch. +|.macro ins_callt +| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC +| ldr PC, LFUNC:CARG3->pc +| ldr INSw, [PC], #4 +| add TMP1, GL, INS, uxtb #3 +| decode_RA RA, INS +| ldr TMP0, [TMP1, #GG_G2DISP] +| add RA, BASE, RA, lsl #3 +| br TMP0 +|.endmacro +| +|.macro ins_call +| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, PC = caller PC +| str PC, [BASE, FRAME_PC] +| ins_callt +|.endmacro +| +|//----------------------------------------------------------------------- +| +|// Macros to check the TValue type and extract the GCobj. Branch on failure. +|.macro checktp, reg, tp, target +| asr ITYPE, reg, #47 +| cmn ITYPE, #-tp +| and reg, reg, #LJ_GCVMASK +| bne target +|.endmacro +|.macro checktp, dst, reg, tp, target +| asr ITYPE, reg, #47 +| cmn ITYPE, #-tp +| and dst, reg, #LJ_GCVMASK +| bne target +|.endmacro +|.macro checkstr, reg, target; checktp reg, LJ_TSTR, target; .endmacro +|.macro checktab, reg, target; checktp reg, LJ_TTAB, target; .endmacro +|.macro checkfunc, reg, target; checktp reg, LJ_TFUNC, target; .endmacro +|.macro checkint, reg, target +| cmp TISNUMhi, reg, lsr #32 +| bne target +|.endmacro +|.macro checknum, reg, target +| cmp TISNUMhi, reg, lsr #32 +| bls target +|.endmacro +|.macro checknumber, reg, target +| cmp TISNUMhi, reg, lsr #32 +| blo target +|.endmacro +| +|.macro mov_false, reg; movn reg, #0x8000, lsl #32; .endmacro +|.macro mov_true, reg; movn reg, #0x0001, lsl #48; .endmacro +| +#define GL_J(field) (GG_OFS(J) + (int)offsetof(jit_State, field)) +| +#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto)) +| +|.macro hotcheck, delta +| NYI +|.endmacro +| +|.macro hotloop +| hotcheck HOTCOUNT_LOOP +| blo ->vm_hotloop +|.endmacro +| +|.macro hotcall +| hotcheck HOTCOUNT_CALL +| blo ->vm_hotcall +|.endmacro +| +|// Set current VM state. +|.macro mv_vmstate, reg, st; movn reg, #LJ_VMST_..st; .endmacro +|.macro st_vmstate, reg; str reg, GL->vmstate; .endmacro +| +|// Move table write barrier back. Overwrites mark and tmp. +|.macro barrierback, tab, mark, tmp +| ldr tmp, GL->gc.grayagain +| and mark, mark, #~LJ_GC_BLACK // black2gray(tab) +| str tab, GL->gc.grayagain +| strb mark, tab->marked +| str tmp, tab->gclist +|.endmacro +| +|//----------------------------------------------------------------------- + +#if !LJ_DUALNUM +#error "Only dual-number mode supported for ARM64 target" +#endif + +/* Generate subroutines used by opcodes and other parts of the VM. */ +/* The .code_sub section should be last to help static branch prediction. */ +static void build_subroutines(BuildCtx *ctx) +{ + |.code_sub + | + |//----------------------------------------------------------------------- + |//-- Return handling ---------------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_returnp: + | // See vm_return. Also: RB = previous base. + | tbz PC, #2, ->cont_dispatch // (PC & FRAME_P) == 0? + | + | // Return from pcall or xpcall fast func. + | ldr PC, [RB, FRAME_PC] // Fetch PC of previous frame. + | mov_true TMP0 + | mov BASE, RB + | // Prepending may overwrite the pcall frame, so do it at the end. + | str TMP0, [RA, #-8]! // Prepend true to results. + | + |->vm_returnc: + | adds RC, RC, #8 // RC = (nresults+1)*8. + | mov CRET1, #LUA_YIELD + | beq ->vm_unwind_c_eh + | str RCw, SAVE_MULTRES + | ands CARG1, PC, #FRAME_TYPE + | beq ->BC_RET_Z // Handle regular return to Lua. + | + |->vm_return: + | // BASE = base, RA = resultptr, RC/MULTRES = (nresults+1)*8, PC = return + | // CARG1 = PC & FRAME_TYPE + | and RB, PC, #~FRAME_TYPEP + | cmp CARG1, #FRAME_C + | sub RB, BASE, RB // RB = previous base. + | bne ->vm_returnp + | + | str RB, L->base + | ldrsw CARG2, SAVE_NRES // CARG2 = nresults+1. + | mv_vmstate TMP0w, C + | sub BASE, BASE, #16 + | subs TMP2, RC, #8 + | st_vmstate TMP0w + | beq >2 + |1: + | subs TMP2, TMP2, #8 + | ldr TMP0, [RA], #8 + | str TMP0, [BASE], #8 + | bne <1 + |2: + | cmp RC, CARG2, lsl #3 // More/less results wanted? + | bne >6 + |3: + | str BASE, L->top // Store new top. + | + |->vm_leave_cp: + | ldr RC, SAVE_CFRAME // Restore previous C frame. + | mov CRET1, #0 // Ok return status for vm_pcall. + | str RC, L->cframe + | + |->vm_leave_unw: + | restoreregs + | ret + | + |6: + | bgt >7 // Less results wanted? + | // More results wanted. Check stack size and fill up results with nil. + | ldr CARG3, L->maxstack + | cmp BASE, CARG3 + | bhs >8 + | str TISNIL, [BASE], #8 + | add RC, RC, #8 + | b <2 + | + |7: // Less results wanted. + | cbz CARG2, <3 // LUA_MULTRET+1 case? + | sub CARG1, RC, CARG2, lsl #3 + | sub BASE, BASE, CARG1 // Shrink top. + | b <3 + | + |8: // Corner case: need to grow stack for filling up results. + | // This can happen if: + | // - A C function grows the stack (a lot). + | // - The GC shrinks the stack in between. + | // - A return back from a lua_call() with (high) nresults adjustment. + | str BASE, L->top // Save current top held in BASE (yes). + | mov CARG1, L + | bl extern lj_state_growstack // (lua_State *L, int n) + | ldr BASE, L->top // Need the (realloced) L->top in BASE. + | ldrsw CARG2, SAVE_NRES + | b <2 + | + |->vm_unwind_c: // Unwind C stack, return from vm_pcall. + | // (void *cframe, int errcode) + | mov sp, CARG1 + | mov CRET1, CARG2 + |->vm_unwind_c_eh: // Landing pad for external unwinder. + | ldr L, SAVE_L + | mv_vmstate TMP0w, C + | ldr GL, L->glref + | st_vmstate TMP0w + | b ->vm_leave_unw + | + |->vm_unwind_ff: // Unwind C stack, return from ff pcall. + | // (void *cframe) + | and sp, CARG1, #CFRAME_RAWMASK + |->vm_unwind_ff_eh: // Landing pad for external unwinder. + | ldr L, SAVE_L + | movz TISNUM, #(LJ_TISNUM>>1)&0xffff, lsl #48 + | movz TISNUMhi, #(LJ_TISNUM>>1)&0xffff, lsl #16 + | movn TISNIL, #0 + | mov RC, #16 // 2 results: false + error message. + | ldr BASE, L->base + | ldr GL, L->glref // Setup pointer to global state. + | mov_false TMP0 + | sub RA, BASE, #8 // Results start at BASE-8. + | ldr PC, [BASE, FRAME_PC] // Fetch PC of previous frame. + | str TMP0, [BASE, #-8] // Prepend false to error message. + | st_vmstate ST_INTERP + | b ->vm_returnc + | + |//----------------------------------------------------------------------- + |//-- Grow stack for calls ----------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_growstack_c: // Grow stack for C function. + | // CARG1 = L + | mov CARG2, #LUA_MINSTACK + | b >2 + | + |->vm_growstack_l: // Grow stack for Lua function. + | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC + | add RC, BASE, RC + | sub RA, RA, BASE + | mov CARG1, L + | stp BASE, RC, L->base + | add PC, PC, #4 // Must point after first instruction. + | lsr CARG2, RA, #3 + |2: + | // L->base = new base, L->top = top + | str PC, SAVE_PC + | bl extern lj_state_growstack // (lua_State *L, int n) + | ldp BASE, RC, L->base + | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] + | sub NARGS8:RC, RC, BASE + | and LFUNC:CARG3, CARG3, #LJ_GCVMASK + | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC + | ins_callt // Just retry the call. + | + |//----------------------------------------------------------------------- + |//-- Entry points into the assembler VM --------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_resume: // Setup C frame and resume thread. + | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0) + | saveregs + | mov L, CARG1 + | ldr GL, L->glref // Setup pointer to global state. + | mov BASE, CARG2 + | str L, SAVE_L + | mov PC, #FRAME_CP + | str wzr, SAVE_NRES + | add TMP0, sp, #CFRAME_RESUME + | ldrb TMP1w, L->status + | str wzr, SAVE_ERRF + | str L, SAVE_PC // Any value outside of bytecode is ok. + | str xzr, SAVE_CFRAME + | str TMP0, L->cframe + | cbz TMP1w, >3 + | + | // Resume after yield (like a return). + | str L, GL->cur_L + | mov RA, BASE + | ldp BASE, CARG1, L->base + | movz TISNUM, #(LJ_TISNUM>>1)&0xffff, lsl #48 + | movz TISNUMhi, #(LJ_TISNUM>>1)&0xffff, lsl #16 + | ldr PC, [BASE, FRAME_PC] + | strb wzr, L->status + | movn TISNIL, #0 + | sub RC, CARG1, BASE + | ands CARG1, PC, #FRAME_TYPE + | add RC, RC, #8 + | st_vmstate ST_INTERP + | str RCw, SAVE_MULTRES + | beq ->BC_RET_Z + | b ->vm_return + | + |->vm_pcall: // Setup protected C frame and enter VM. + | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef) + | saveregs + | mov PC, #FRAME_CP + | str CARG4w, SAVE_ERRF + | b >1 + | + |->vm_call: // Setup C frame and enter VM. + | // (lua_State *L, TValue *base, int nres1) + | saveregs + | mov PC, #FRAME_C + | + |1: // Entry point for vm_pcall above (PC = ftype). + | ldr RC, L:CARG1->cframe + | str CARG3w, SAVE_NRES + | mov L, CARG1 + | str CARG1, SAVE_L + | ldr GL, L->glref // Setup pointer to global state. + | mov BASE, CARG2 + | str CARG1, SAVE_PC // Any value outside of bytecode is ok. + | str RC, SAVE_CFRAME + | str fp, L->cframe // Add our C frame to cframe chain. + | + |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype). + | str L, GL->cur_L + | ldp RB, CARG1, L->base // RB = old base (for vmeta_call). + | movz TISNUM, #(LJ_TISNUM>>1)&0xffff, lsl #48 + | movz TISNUMhi, #(LJ_TISNUM>>1)&0xffff, lsl #16 + | add PC, PC, BASE + | movn TISNIL, #0 + | sub PC, PC, RB // PC = frame delta + frame type + | sub NARGS8:RC, CARG1, BASE + | st_vmstate ST_INTERP + | + |->vm_call_dispatch: + | // RB = old base, BASE = new base, RC = nargs*8, PC = caller PC + | ldr CARG3, [BASE, FRAME_FUNC] + | checkfunc CARG3, ->vmeta_call + | + |->vm_call_dispatch_f: + | ins_call + | // BASE = new base, CARG3 = func, RC = nargs*8, PC = caller PC + | + |->vm_cpcall: // Setup protected C frame, call C. + | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp) + | saveregs + | mov L, CARG1 + | ldr RA, L:CARG1->stack + | str CARG1, SAVE_L + | ldr GL, L->glref // Setup pointer to global state. + | ldr RB, L->top + | str CARG1, SAVE_PC // Any value outside of bytecode is ok. + | ldr RC, L->cframe + | sub RA, RA, RB // Compute -savestack(L, L->top). + | str RAw, SAVE_NRES // Neg. delta means cframe w/o frame. + | str wzr, SAVE_ERRF // No error function. + | str RC, SAVE_CFRAME + | str fp, L->cframe // Add our C frame to cframe chain. + | str L, GL->cur_L + | blr CARG4 // (lua_State *L, lua_CFunction func, void *ud) + | mov BASE, CRET1 + | mov PC, #FRAME_CP + | cbnz BASE, <3 // Else continue with the call. + | b ->vm_leave_cp // No base? Just remove C frame. + | + |//----------------------------------------------------------------------- + |//-- Metamethod handling ------------------------------------------------ + |//----------------------------------------------------------------------- + | + |//-- Continuation dispatch ---------------------------------------------- + | + |->cont_dispatch: + | // BASE = meta base, RA = resultptr, RC = (nresults+1)*8 + | ldr LFUNC:CARG3, [RB, FRAME_FUNC] + | ldr CARG1, [BASE, #-32] // Get continuation. + | mov CARG4, BASE + | mov BASE, RB // Restore caller BASE. + | and LFUNC:CARG3, CARG3, #LJ_GCVMASK + |.if FFI + | cmp CARG1, #1 + |.endif + | ldr PC, [CARG4, #-24] // Restore PC from [cont|PC]. + | ldr CARG3, LFUNC:CARG3->pc + | add TMP0, RA, RC + | str TISNIL, [TMP0, #-8] // Ensure one valid arg. + |.if FFI + | bls >1 + |.endif + | ldr KBASE, [CARG3, #PC2PROTO(k)] + | // BASE = base, RA = resultptr, CARG4 = meta base + | br CARG1 + | + |.if FFI + |1: + | beq ->cont_ffi_callback // cont = 1: return from FFI callback. + | // cont = 0: tailcall from C function. + | sub CARG4, CARG4, #32 + | sub RC, CARG4, BASE + | b ->vm_call_tail + |.endif + | + |->cont_cat: // RA = resultptr, CARG4 = meta base + | ldr INSw, [PC, #-4] + | sub CARG2, CARG4, #32 + | ldr TMP0, [RA] + | str BASE, L->base + | decode_RB RB, INS + | decode_RA RA, INS + | add TMP1, BASE, RB, lsl #3 + | subs TMP1, CARG2, TMP1 + | beq >1 + | str TMP0, [CARG2] + | lsr CARG3, TMP1, #3 + | b ->BC_CAT_Z + | + |1: + | str TMP0, [BASE, RA, lsl #3] + | b ->cont_nop + | + |//-- Table indexing metamethods ----------------------------------------- + | + |->vmeta_tgets1: + | movn CARG4, #~LJ_TSTR + | add CARG2, BASE, RB, lsl #3 + | add CARG4, STR:RC, CARG4, lsl #47 + | b >2 + | + |->vmeta_tgets: + | movk CARG2, #(LJ_TTAB>>1)&0xffff, lsl #48 + | str CARG2, GL->tmptv + | add CARG2, GL, #offsetof(global_State, tmptv) + |2: + | add CARG3, sp, TMPDofs + | str CARG4, TMPD + | b >1 + | + |->vmeta_tgetb: // RB = table, RC = index + | add RC, RC, TISNUM + | add CARG2, BASE, RB, lsl #3 + | add CARG3, sp, TMPDofs + | str RC, TMPD + | b >1 + | + |->vmeta_tgetv: // RB = table, RC = key + | add CARG2, BASE, RB, lsl #3 + | add CARG3, BASE, RC, lsl #3 + |1: + | str BASE, L->base + | mov CARG1, L + | str PC, SAVE_PC + | bl extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k) + | // Returns TValue * (finished) or NULL (metamethod). + | cbz CRET1, >3 + | ldr TMP0, [CRET1] + | str TMP0, [BASE, RA, lsl #3] + | ins_next + | + |3: // Call __index metamethod. + | // BASE = base, L->top = new base, stack = cont/func/t/k + | sub TMP1, BASE, #FRAME_CONT + | ldr BASE, L->top + | mov NARGS8:RC, #16 // 2 args for func(t, k). + | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here. + | str PC, [BASE, #-24] // [cont|PC] + | sub PC, BASE, TMP1 + | and LFUNC:CARG3, CARG3, #LJ_GCVMASK + | b ->vm_call_dispatch_f + | + |->vmeta_tgetr: + | sxtw CARG2, TMP1w + | bl extern lj_tab_getinth // (GCtab *t, int32_t key) + | // Returns cTValue * or NULL. + | mov TMP0, TISNIL + | cbz CRET1, ->BC_TGETR_Z + | ldr TMP0, [CRET1] + | b ->BC_TGETR_Z + | + |//----------------------------------------------------------------------- + | + |->vmeta_tsets1: + | movn CARG4, #~LJ_TSTR + | add CARG2, BASE, RB, lsl #3 + | add CARG4, STR:RC, CARG4, lsl #47 + | b >2 + | + |->vmeta_tsets: + | movk CARG2, #(LJ_TTAB>>1)&0xffff, lsl #48 + | str CARG2, GL->tmptv + | add CARG2, GL, #offsetof(global_State, tmptv) + |2: + | add CARG3, sp, TMPDofs + | str CARG4, TMPD + | b >1 + | + |->vmeta_tsetb: // RB = table, RC = index + | add RC, RC, TISNUM + | add CARG2, BASE, RB, lsl #3 + | add CARG3, sp, TMPDofs + | str RC, TMPD + | b >1 + | + |->vmeta_tsetv: + | add CARG2, BASE, RB, lsl #3 + | add CARG3, BASE, RC, lsl #3 + |1: + | str BASE, L->base + | mov CARG1, L + | str PC, SAVE_PC + | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k) + | // Returns TValue * (finished) or NULL (metamethod). + | ldr TMP0, [BASE, RA, lsl #3] + | cbz CRET1, >3 + | // NOBARRIER: lj_meta_tset ensures the table is not black. + | str TMP0, [CRET1] + | ins_next + | + |3: // Call __newindex metamethod. + | // BASE = base, L->top = new base, stack = cont/func/t/k/(v) + | sub TMP1, BASE, #FRAME_CONT + | ldr BASE, L->top + | mov NARGS8:RC, #24 // 3 args for func(t, k, v). + | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here. + | str TMP0, [BASE, #16] // Copy value to third argument. + | str PC, [BASE, #-24] // [cont|PC] + | sub PC, BASE, TMP1 + | and LFUNC:CARG3, CARG3, #LJ_GCVMASK + | b ->vm_call_dispatch_f + | + |->vmeta_tsetr: + | sxtw CARG3, TMP1w + | str BASE, L->base + | str PC, SAVE_PC + | bl extern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key) + | // Returns TValue *. + | b ->BC_TSETR_Z + | + |//-- Comparison metamethods --------------------------------------------- + | + |->vmeta_comp: + | add CARG2, BASE, RA, lsl #3 + | sub PC, PC, #4 + | add CARG3, BASE, RC, lsl #3 + | str BASE, L->base + | mov CARG1, L + | str PC, SAVE_PC + | uxtb CARG4w, INSw + | bl extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op) + | // Returns 0/1 or TValue * (metamethod). + |3: + | cmp CRET1, #1 + | bhi ->vmeta_binop + |4: + | ldrh RBw, [PC, #2] + | add PC, PC, #4 + | add RB, PC, RB, lsl #2 + | sub RB, RB, #0x20000 + | csel PC, PC, RB, lo + |->cont_nop: + | ins_next + | + |->cont_ra: // RA = resultptr + | ldr INSw, [PC, #-4] + | ldr TMP0, [RA] + | decode_RA TMP1, INS + | str TMP0, [BASE, TMP1, lsl #3] + | b ->cont_nop + | + |->cont_condt: // RA = resultptr + | ldr TMP0, [RA] + | mov_true TMP1 + | cmp TMP1, TMP0 // Branch if result is true. + | b <4 + | + |->cont_condf: // RA = resultptr + | ldr TMP0, [RA] + | mov_false TMP1 + | cmp TMP0, TMP1 // Branch if result is false. + | b <4 + | + |->vmeta_equal: + | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV. + | and TAB:CARG3, CARG3, #LJ_GCVMASK + | sub PC, PC, #4 + | str BASE, L->base + | mov CARG1, L + | str PC, SAVE_PC + | bl extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne) + | // Returns 0/1 or TValue * (metamethod). + | b <3 + | + |->vmeta_equal_cd: + |.if FFI + | sub PC, PC, #4 + | str BASE, L->base + | mov CARG1, L + | mov CARG2, INS + | str PC, SAVE_PC + | bl extern lj_meta_equal_cd // (lua_State *L, BCIns op) + | // Returns 0/1 or TValue * (metamethod). + | b <3 + |.endif + | + |->vmeta_istype: + | sub PC, PC, #4 + | str BASE, L->base + | mov CARG1, L + | mov CARG2, RA + | mov CARG3, RC + | str PC, SAVE_PC + | bl extern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp) + | b ->cont_nop + | + |//-- Arithmetic metamethods --------------------------------------------- + | + |->vmeta_arith_vn: + | add CARG3, BASE, RB, lsl #3 + | add CARG4, KBASE, RC, lsl #3 + | b >1 + | + |->vmeta_arith_nv: + | add CARG4, BASE, RB, lsl #3 + | add CARG3, KBASE, RC, lsl #3 + | b >1 + | + |->vmeta_unm: + | add CARG3, BASE, RC, lsl #3 + | mov CARG4, CARG3 + | b >1 + | + |->vmeta_arith_vv: + | add CARG3, BASE, RB, lsl #3 + | add CARG4, BASE, RC, lsl #3 + |1: + | uxtb CARG5w, INSw + | add CARG2, BASE, RA, lsl #3 + | str BASE, L->base + | mov CARG1, L + | str PC, SAVE_PC + | bl extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op) + | // Returns NULL (finished) or TValue * (metamethod). + | cbz CRET1, ->cont_nop + | + | // Call metamethod for binary op. + |->vmeta_binop: + | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2 + | sub TMP1, CRET1, BASE + | str PC, [CRET1, #-24] // [cont|PC] + | add PC, TMP1, #FRAME_CONT + | mov BASE, CRET1 + | mov NARGS8:RC, #16 // 2 args for func(o1, o2). + | b ->vm_call_dispatch + | + |->vmeta_len: + | add CARG2, BASE, RC, lsl #3 +#if LJ_52 + | mov TAB:RC, TAB:CARG1 // Save table (ignored for other types). +#endif + | str BASE, L->base + | mov CARG1, L + | str PC, SAVE_PC + | bl extern lj_meta_len // (lua_State *L, TValue *o) + | // Returns NULL (retry) or TValue * (metamethod base). +#if LJ_52 + | cbnz CRET1, ->vmeta_binop // Binop call for compatibility. + | mov TAB:CARG1, TAB:RC + | b ->BC_LEN_Z +#else + | b ->vmeta_binop // Binop call for compatibility. +#endif + | + |//-- Call metamethod ---------------------------------------------------- + | + |->vmeta_call: // Resolve and call __call metamethod. + | // RB = old base, BASE = new base, RC = nargs*8 + | mov CARG1, L + | str RB, L->base // This is the callers base! + | sub CARG2, BASE, #16 + | str PC, SAVE_PC + | add CARG3, BASE, NARGS8:RC + | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top) + | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here. + | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now. + | and LFUNC:CARG3, CARG3, #LJ_GCVMASK + | ins_call + | + |->vmeta_callt: // Resolve __call for BC_CALLT. + | // BASE = old base, RA = new base, RC = nargs*8 + | mov CARG1, L + | str BASE, L->base + | sub CARG2, RA, #16 + | str PC, SAVE_PC + | add CARG3, RA, NARGS8:RC + | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top) + | ldr TMP1, [RA, FRAME_FUNC] // Guaranteed to be a function here. + | ldr PC, [BASE, FRAME_PC] + | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now. + | and LFUNC:CARG3, TMP1, #LJ_GCVMASK + | b ->BC_CALLT2_Z + | + |//-- Argument coercion for 'for' statement ------------------------------ + | + |->vmeta_for: + | mov CARG1, L + | str BASE, L->base + | mov CARG2, RA + | str PC, SAVE_PC + | bl extern lj_meta_for // (lua_State *L, TValue *base) + | ldr INSw, [PC, #-4] + |.if JIT + | uxtb TMP0, INS + |.endif + | decode_RA RA, INS + | decode_RD RC, INS + |.if JIT + | cmp TMP0, #BC_JFORI + | beq =>BC_JFORI + |.endif + | b =>BC_FORI + | + |//----------------------------------------------------------------------- + |//-- Fast functions ----------------------------------------------------- + |//----------------------------------------------------------------------- + | + |.macro .ffunc, name + |->ff_ .. name: + |.endmacro + | + |.macro .ffunc_1, name + |->ff_ .. name: + | ldr CARG1, [BASE] + | cmp NARGS8:RC, #8 + | blo ->fff_fallback + |.endmacro + | + |.macro .ffunc_2, name + |->ff_ .. name: + | ldp CARG1, CARG2, [BASE] + | cmp NARGS8:RC, #16 + | blo ->fff_fallback + |.endmacro + | + |.macro .ffunc_n, name + | .ffunc name + | ldr CARG1, [BASE] + | cmp NARGS8:RC, #8 + | ldr FARG1, [BASE] + | blo ->fff_fallback + | checknum CARG1, ->fff_fallback + |.endmacro + | + |.macro .ffunc_nn, name + | .ffunc name + | ldp CARG1, CARG2, [BASE] + | cmp NARGS8:RC, #16 + | ldp FARG1, FARG2, [BASE] + | blo ->fff_fallback + | checknum CARG1, ->fff_fallback + | checknum CARG2, ->fff_fallback + |.endmacro + | + |// Inlined GC threshold check. Caveat: uses CARG1 and CARG2. + |.macro ffgccheck + | ldp CARG1, CARG2, GL->gc.total // Assumes threshold follows total. + | cmp CARG1, CARG2 + | blt >1 + | bl ->fff_gcstep + |1: + |.endmacro + | + |//-- Base library: checks ----------------------------------------------- + | + |.ffunc_1 assert + | ldr PC, [BASE, FRAME_PC] + | mov_false TMP1 + | cmp CARG1, TMP1 + | bhs ->fff_fallback + | str CARG1, [BASE, #-16] + | sub RB, BASE, #8 + | subs RA, NARGS8:RC, #8 + | add RC, NARGS8:RC, #8 // Compute (nresults+1)*8. + | cbz RA, ->fff_res // Done if exactly 1 argument. + |1: + | ldr CARG1, [RB, #16] + | sub RA, RA, #8 + | str CARG1, [RB], #8 + | cbnz RA, <1 + | b ->fff_res + | + |.ffunc_1 type + | mov TMP0, #~LJ_TISNUM + | asr ITYPE, CARG1, #47 + | cmn ITYPE, #~LJ_TISNUM + | csinv TMP1, TMP0, ITYPE, lo + | add TMP1, TMP1, #offsetof(GCfuncC, upvalue)/8 + | ldr CARG1, [CFUNC:CARG3, TMP1, lsl #3] + | b ->fff_restv + | + |//-- Base library: getters and setters --------------------------------- + | + |.ffunc_1 getmetatable + | asr ITYPE, CARG1, #47 + | cmn ITYPE, #-LJ_TTAB + | ccmn ITYPE, #-LJ_TUDATA, #4, ne + | and TAB:CARG1, CARG1, #LJ_GCVMASK + | bne >6 + |1: // Field metatable must be at same offset for GCtab and GCudata! + | ldr TAB:RB, TAB:CARG1->metatable + |2: + | mov CARG1, TISNIL + | ldr STR:RC, GL->gcroot[GCROOT_MMNAME+MM_metatable] + | cbz TAB:RB, ->fff_restv + | ldr TMP1w, TAB:RB->hmask + | ldr TMP2w, STR:RC->hash + | ldr NODE:CARG3, TAB:RB->node + | and TMP1w, TMP1w, TMP2w // idx = str->hash & tab->hmask + | add TMP1, TMP1, TMP1, lsl #1 + | movn CARG4, #~LJ_TSTR + | add NODE:CARG3, NODE:CARG3, TMP1, lsl #3 // node = tab->node + idx*3*8 + | add CARG4, STR:RC, CARG4, lsl #47 // Tagged key to look for. + |3: // Rearranged logic, because we expect _not_ to find the key. + | ldp CARG1, TMP0, NODE:CARG3->val + | ldr NODE:CARG3, NODE:CARG3->next + | cmp TMP0, CARG4 + | beq >5 + | cbnz NODE:CARG3, <3 + |4: + | mov CARG1, RB // Use metatable as default result. + | movk CARG1, #(LJ_TTAB>>1)&0xffff, lsl #48 + | b ->fff_restv + |5: + | cmp TMP0, TISNIL + | bne ->fff_restv + | b <4 + | + |6: + | movn TMP0, #~LJ_TISNUM + | cmp ITYPE, TMP0 + | csel ITYPE, ITYPE, TMP0, hs + | sub TMP1, GL, ITYPE, lsl #3 + | ldr TAB:RB, [TMP1, #offsetof(global_State, gcroot[GCROOT_BASEMT])-8] + | b <2 + | + |.ffunc_2 setmetatable + | // Fast path: no mt for table yet and not clearing the mt. + | checktp TMP1, CARG1, LJ_TTAB, ->fff_fallback + | ldr TAB:TMP0, TAB:TMP1->metatable + | asr ITYPE, CARG2, #47 + | ldrb TMP2w, TAB:TMP1->marked + | cmn ITYPE, #-LJ_TTAB + | and TAB:CARG2, CARG2, #LJ_GCVMASK + | ccmp TAB:TMP0, #0, #0, eq + | bne ->fff_fallback + | str TAB:CARG2, TAB:TMP1->metatable + | tbz TMP2w, #2, ->fff_restv // isblack(table) + | barrierback TAB:TMP1, TMP2w, TMP0 + | b ->fff_restv + | + |.ffunc rawget + | ldr CARG2, [BASE] + | cmp NARGS8:RC, #16 + | blo ->fff_fallback + | checktab CARG2, ->fff_fallback + | mov CARG1, L + | add CARG3, BASE, #8 + | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key) + | // Returns cTValue *. + | ldr CARG1, [CRET1] + | b ->fff_restv + | + |//-- Base library: conversions ------------------------------------------ + | + |.ffunc tonumber + | // Only handles the number case inline (without a base argument). + | ldr CARG1, [BASE] + | cmp NARGS8:RC, #8 + | bne ->fff_fallback + | checknumber CARG1, ->fff_fallback + | b ->fff_restv + | + |.ffunc_1 tostring + | // Only handles the string or number case inline. + | asr ITYPE, CARG1, #47 + | cmn ITYPE, #-LJ_TSTR + | // A __tostring method in the string base metatable is ignored. + | beq ->fff_restv + | // Handle numbers inline, unless a number base metatable is present. + | ldr TMP1, GL->gcroot[GCROOT_BASEMT_NUM] + | str BASE, L->base + | cmn ITYPE, #-LJ_TISNUM + | ccmp TMP1, #0, #0, ls + | str PC, SAVE_PC // Redundant (but a defined value). + | bne ->fff_fallback + | ffgccheck + | mov CARG1, L + | mov CARG2, BASE + | bl extern lj_strfmt_number // (lua_State *L, cTValue *o) + | // Returns GCstr *. + | movn TMP1, #~LJ_TSTR + | ldr BASE, L->base + | add CARG1, CARG1, TMP1, lsl #47 + | b ->fff_restv + | + |//-- Base library: iterators ------------------------------------------- + | + |.ffunc_1 next + | checktp CARG2, CARG1, LJ_TTAB, ->fff_fallback + | str TISNIL, [BASE, NARGS8:RC] // Set missing 2nd arg to nil. + | ldr PC, [BASE, FRAME_PC] + | stp BASE, BASE, L->base // Add frame since C call can throw. + | mov CARG1, L + | add CARG3, BASE, #8 + | str PC, SAVE_PC + | bl extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key) + | // Returns 0 at end of traversal. + | str TISNIL, [BASE, #-16] + | cbz CRET1, ->fff_res1 // End of traversal: return nil. + | ldp CARG1, CARG2, [BASE, #8] // Copy key and value to results. + | mov RC, #(2+1)*8 + | stp CARG1, CARG2, [BASE, #-16] + | b ->fff_res + | + |.ffunc_1 pairs + | checktp TMP1, CARG1, LJ_TTAB, ->fff_fallback +#if LJ_52 + | ldr TAB:CARG2, TAB:TMP1->metatable +#endif + | ldr CFUNC:CARG4, CFUNC:CARG3->upvalue[0] + | ldr PC, [BASE, FRAME_PC] +#if LJ_52 + | cbnz TAB:CARG2, ->fff_fallback +#endif + | mov RC, #(3+1)*8 + | stp CARG1, TISNIL, [BASE, #-8] + | str CFUNC:CARG4, [BASE, #-16] + | b ->fff_res + | + |.ffunc_2 ipairs_aux + | checktab CARG1, ->fff_fallback + | checkint CARG2, ->fff_fallback + | ldr TMP1w, TAB:CARG1->asize + | ldr CARG3, TAB:CARG1->array + | ldr TMP0w, TAB:CARG1->hmask + | add CARG2w, CARG2w, #1 + | cmp CARG2w, TMP1w + | ldr PC, [BASE, FRAME_PC] + | add TMP2, CARG2, TISNUM + | mov RC, #(0+1)*8 + | str TMP2, [BASE, #-16] + | bhs >2 // Not in array part? + | ldr TMP0, [CARG3, CARG2, lsl #3] + |1: + | mov TMP1, #(2+1)*8 + | cmp TMP0, TISNIL + | str TMP0, [BASE, #-8] + | csel RC, RC, TMP1, eq + | b ->fff_res + |2: // Check for empty hash part first. Otherwise call C function. + | cbz TMP0w, ->fff_res + | bl extern lj_tab_getinth // (GCtab *t, int32_t key) + | // Returns cTValue * or NULL. + | cbz CRET1, ->fff_res + | ldr TMP0, [CRET1] + | b <1 + | + |.ffunc_1 ipairs + | checktp TMP1, CARG1, LJ_TTAB, ->fff_fallback +#if LJ_52 + | ldr TAB:CARG2, TAB:TMP1->metatable +#endif + | ldr CFUNC:CARG4, CFUNC:CARG3->upvalue[0] + | ldr PC, [BASE, FRAME_PC] +#if LJ_52 + | cbnz TAB:CARG2, ->fff_fallback +#endif + | mov RC, #(3+1)*8 + | stp CARG1, TISNUM, [BASE, #-8] + | str CFUNC:CARG4, [BASE, #-16] + | b ->fff_res + | + |//-- Base library: catch errors ---------------------------------------- + | + |.ffunc pcall + | ldrb TMP0w, GL->hookmask + | subs NARGS8:RC, NARGS8:RC, #8 + | blo ->fff_fallback + | mov RB, BASE + | add BASE, BASE, #16 + | ubfx TMP0w, TMP0w, #HOOK_ACTIVE_SHIFT, #1 + | add PC, TMP0, #16+FRAME_PCALL + | beq ->vm_call_dispatch + |1: + | add TMP2, BASE, NARGS8:RC + |2: + | ldr TMP0, [TMP2, #-16] + | str TMP0, [TMP2, #-8]! + | cmp TMP2, BASE + | bne <2 + | b ->vm_call_dispatch + | + |.ffunc xpcall + | ldp CARG1, CARG2, [BASE] + | ldrb TMP0w, GL->hookmask + | subs NARGS8:RC, NARGS8:RC, #16 + | blo ->fff_fallback + | mov RB, BASE + | add BASE, BASE, #24 + | asr ITYPE, CARG2, #47 + | ubfx TMP0w, TMP0w, #HOOK_ACTIVE_SHIFT, #1 + | cmn ITYPE, #-LJ_TFUNC + | add PC, TMP0, #24+FRAME_PCALL + | bne ->fff_fallback // Traceback must be a function. + | stp CARG2, CARG1, [RB] // Swap function and traceback. + | cbz NARGS8:RC, ->vm_call_dispatch + | b <1 + | + |//-- Coroutine library -------------------------------------------------- + | + |.macro coroutine_resume_wrap, resume + |.if resume + |.ffunc_1 coroutine_resume + | checktp CARG1, LJ_TTHREAD, ->fff_fallback + |.else + |.ffunc coroutine_wrap_aux + | ldr L:CARG1, CFUNC:CARG3->upvalue[0].gcr + | and L:CARG1, CARG1, #LJ_GCVMASK + |.endif + | ldr PC, [BASE, FRAME_PC] + | str BASE, L->base + | ldp RB, CARG2, L:CARG1->base + | ldrb TMP1w, L:CARG1->status + | add TMP0, CARG2, TMP1 + | str PC, SAVE_PC + | cmp TMP0, RB + | beq ->fff_fallback + | cmp TMP1, #LUA_YIELD + | add TMP0, CARG2, #8 + | csel CARG2, CARG2, TMP0, hs + | ldr CARG4, L:CARG1->maxstack + | add CARG3, CARG2, NARGS8:RC + | ldr RB, L:CARG1->cframe + | ccmp CARG3, CARG4, #2, ls + | ccmp RB, #0, #2, ls + | bhi ->fff_fallback + |.if resume + | sub CARG3, CARG3, #8 // Keep resumed thread in stack for GC. + | add BASE, BASE, #8 + | sub NARGS8:RC, NARGS8:RC, #8 + |.endif + | str CARG3, L:CARG1->top + | str BASE, L->top + | cbz NARGS8:RC, >3 + |2: // Move args to coroutine. + | ldr TMP0, [BASE, RB] + | cmp RB, NARGS8:RC + | str TMP0, [CARG2, RB] + | add RB, RB, #8 + | bne <2 + |3: + | mov CARG3, #0 + | mov L:RA, L:CARG1 + | mov CARG4, #0 + | bl ->vm_resume // (lua_State *L, TValue *base, 0, 0) + | // Returns thread status. + |4: + | ldp CARG3, CARG4, L:RA->base + | cmp CRET1, #LUA_YIELD + | ldr BASE, L->base + | str L, GL->cur_L + | st_vmstate ST_INTERP + | bhi >8 + | sub RC, CARG4, CARG3 + | ldr CARG1, L->maxstack + | add CARG2, BASE, RC + | cbz RC, >6 // No results? + | cmp CARG2, CARG1 + | mov RB, #0 + | bhi >9 // Need to grow stack? + | + | sub CARG4, RC, #8 + | str CARG3, L:RA->top // Clear coroutine stack. + |5: // Move results from coroutine. + | ldr TMP0, [CARG3, RB] + | cmp RB, CARG4 + | str TMP0, [BASE, RB] + | add RB, RB, #8 + | bne <5 + |6: + |.if resume + | mov_true TMP1 + | add RC, RC, #16 + |7: + | str TMP1, [BASE, #-8] // Prepend true/false to results. + | sub RA, BASE, #8 + |.else + | mov RA, BASE + | add RC, RC, #8 + |.endif + | ands CARG1, PC, #FRAME_TYPE + | str PC, SAVE_PC + | str RCw, SAVE_MULTRES + | beq ->BC_RET_Z + | b ->vm_return + | + |8: // Coroutine returned with error (at co->top-1). + |.if resume + | ldr TMP0, [CARG4, #-8]! + | mov_false TMP1 + | mov RC, #(2+1)*8 + | str CARG4, L:RA->top // Remove error from coroutine stack. + | str TMP0, [BASE] // Copy error message. + | b <7 + |.else + | mov CARG1, L + | mov CARG2, L:RA + | bl extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co) + | // Never returns. + |.endif + | + |9: // Handle stack expansion on return from yield. + | mov CARG1, L + | lsr CARG2, RC, #3 + | bl extern lj_state_growstack // (lua_State *L, int n) + | mov CRET1, #0 + | b <4 + |.endmacro + | + | coroutine_resume_wrap 1 // coroutine.resume + | coroutine_resume_wrap 0 // coroutine.wrap + | + |.ffunc coroutine_yield + | ldr TMP0, L->cframe + | add TMP1, BASE, NARGS8:RC + | mov CRET1, #LUA_YIELD + | stp BASE, TMP1, L->base + | tbz TMP0, #0, ->fff_fallback + | str xzr, L->cframe + | strb CRET1w, L->status + | b ->vm_leave_unw + | + |//-- Math library ------------------------------------------------------- + | + |.macro math_round, func, round + | .ffunc math_ .. func + | ldr CARG1, [BASE] + | cmp NARGS8:RC, #8 + | ldr d0, [BASE] + | blo ->fff_fallback + | cmp TISNUMhi, CARG1, lsr #32 + | beq ->fff_restv + | blo ->fff_fallback + | round d0, d0 + | b ->fff_resn + |.endmacro + | + | math_round floor, frintm + | math_round ceil, frintp + | + |.ffunc_1 math_abs + | checknumber CARG1, ->fff_fallback + | and CARG1, CARG1, #U64x(7fffffff,ffffffff) + | bne ->fff_restv + | eor CARG2w, CARG1w, CARG1w, asr #31 + | movz CARG3, #0x41e0, lsl #48 // 2^31. + | subs CARG1w, CARG2w, CARG1w, asr #31 + | add CARG1, CARG1, TISNUM + | csel CARG1, CARG1, CARG3, pl + | // Fallthrough. + | + |->fff_restv: + | // CARG1 = TValue result. + | ldr PC, [BASE, FRAME_PC] + | str CARG1, [BASE, #-16] + |->fff_res1: + | // PC = return. + | mov RC, #(1+1)*8 + |->fff_res: + | // RC = (nresults+1)*8, PC = return. + | ands CARG1, PC, #FRAME_TYPE + | str RCw, SAVE_MULTRES + | sub RA, BASE, #16 + | bne ->vm_return + | ldr INSw, [PC, #-4] + | decode_RB RB, INS + |5: + | cmp RC, RB, lsl #3 // More results expected? + | blo >6 + | decode_RA TMP1, INS + | // Adjust BASE. KBASE is assumed to be set for the calling frame. + | sub BASE, RA, TMP1, lsl #3 + | ins_next + | + |6: // Fill up results with nil. + | add TMP1, RA, RC + | add RC, RC, #8 + | str TISNIL, [TMP1, #-8] + | b <5 + | + |.macro math_extern, func + | .ffunc_n math_ .. func + | bl extern func + | b ->fff_resn + |.endmacro + | + |.macro math_extern2, func + | .ffunc_nn math_ .. func + | bl extern func + | b ->fff_resn + |.endmacro + | + |.ffunc_n math_sqrt + | fsqrt d0, d0 + |->fff_resn: + | ldr PC, [BASE, FRAME_PC] + | str d0, [BASE, #-16] + | b ->fff_res1 + | + |.ffunc math_log + | ldr CARG1, [BASE] + | cmp NARGS8:RC, #8 + | ldr FARG1, [BASE] + | bne ->fff_fallback // Need exactly 1 argument. + | checknum CARG1, ->fff_fallback + | bl extern log + | b ->fff_resn + | + | math_extern log10 + | math_extern exp + | math_extern sin + | math_extern cos + | math_extern tan + | math_extern asin + | math_extern acos + | math_extern atan + | math_extern sinh + | math_extern cosh + | math_extern tanh + | math_extern2 pow + | math_extern2 atan2 + | math_extern2 fmod + | + |.ffunc_2 math_ldexp + | ldr FARG1, [BASE] + | checknum CARG1, ->fff_fallback + | checkint CARG2, ->fff_fallback + | sxtw CARG1, CARG2w + | bl extern ldexp // (double x, int exp) + | b ->fff_resn + | + |.ffunc_n math_frexp + | add CARG1, sp, TMPDofs + | bl extern frexp + | ldr CARG2w, TMPD + | ldr PC, [BASE, FRAME_PC] + | str d0, [BASE, #-16] + | mov RC, #(2+1)*8 + | add CARG2, CARG2, TISNUM + | str CARG2, [BASE, #-8] + | b ->fff_res + | + |.ffunc_n math_modf + | sub CARG1, BASE, #16 + | ldr PC, [BASE, FRAME_PC] + | bl extern modf + | mov RC, #(2+1)*8 + | str d0, [BASE, #-8] + | b ->fff_res + | + |.macro math_minmax, name, cond, fcond + | .ffunc_1 name + | add RB, BASE, RC + | add RA, BASE, #8 + | checkint CARG1, >4 + |1: // Handle integers. + | ldr CARG2, [RA] + | cmp RA, RB + | bhs ->fff_restv + | checkint CARG2, >3 + | cmp CARG1w, CARG2w + | add RA, RA, #8 + | csel CARG1, CARG2, CARG1, cond + | b <1 + |3: // Convert intermediate result to number and continue below. + | scvtf d0, CARG1w + | blo ->fff_fallback + | ldr d1, [RA] + | b >6 + | + |4: + | ldr d0, [BASE] + | blo ->fff_fallback + |5: // Handle numbers. + | ldr CARG2, [RA] + | ldr d1, [RA] + | cmp RA, RB + | bhs ->fff_resn + | checknum CARG2, >7 + |6: + | fcmp d0, d1 + | add RA, RA, #8 + | fcsel d0, d1, d0, fcond + | b <5 + |7: // Convert integer to number and continue above. + | scvtf d1, CARG2w + | blo ->fff_fallback + | b <6 + |.endmacro + | + | math_minmax math_min, gt, hi + | math_minmax math_max, lt, lo + | + |//-- String library ----------------------------------------------------- + | + |.ffunc string_byte // Only handle the 1-arg case here. + | ldp PC, CARG1, [BASE, FRAME_PC] + | cmp NARGS8:RC, #8 + | asr ITYPE, CARG1, #47 + | ccmn ITYPE, #-LJ_TSTR, #0, eq + | and STR:CARG1, CARG1, #LJ_GCVMASK + | bne ->fff_fallback + | ldrb TMP0w, STR:CARG1[1] // Access is always ok (NUL at end). + | ldr CARG3w, STR:CARG1->len + | add TMP0, TMP0, TISNUM + | str TMP0, [BASE, #-16] + | mov RC, #(0+1)*8 + | cbz CARG3, ->fff_res + | b ->fff_res1 + | + |.ffunc string_char // Only handle the 1-arg case here. + | ffgccheck + | ldp PC, CARG1, [BASE, FRAME_PC] + | cmp CARG1w, #255 + | ccmp NARGS8:RC, #8, #0, ls // Need exactly 1 argument. + | bne ->fff_fallback + | checkint CARG1, ->fff_fallback + | mov CARG3, #1 + | mov CARG2, BASE // Points to stack. Little-endian. + |->fff_newstr: + | // CARG2 = str, CARG3 = len. + | str BASE, L->base + | mov CARG1, L + | str PC, SAVE_PC + | bl extern lj_str_new // (lua_State *L, char *str, size_t l) + |->fff_resstr: + | // Returns GCstr *. + | ldr BASE, L->base + | movn TMP1, #~LJ_TSTR + | add CARG1, CARG1, TMP1, lsl #47 + | b ->fff_restv + | + |.ffunc string_sub + | ffgccheck + | ldr CARG1, [BASE] + | ldr CARG3, [BASE, #16] + | cmp NARGS8:RC, #16 + | movn RB, #0 + | beq >1 + | blo ->fff_fallback + | checkint CARG3, ->fff_fallback + | sxtw RB, CARG3w + |1: + | ldr CARG2, [BASE, #8] + | checkstr CARG1, ->fff_fallback + | ldr TMP1w, STR:CARG1->len + | checkint CARG2, ->fff_fallback + | sxtw CARG2, CARG2w + | // CARG1 = str, TMP1 = str->len, CARG2 = start, RB = end + | add TMP2, RB, TMP1 + | cmp RB, #0 + | add TMP0, CARG2, TMP1 + | csinc RB, RB, TMP2, ge // if (end < 0) end += len+1 + | cmp CARG2, #0 + | csinc CARG2, CARG2, TMP0, ge // if (start < 0) start += len+1 + | cmp RB, #0 + | csel RB, RB, xzr, ge // if (end < 0) end = 0 + | cmp CARG2, #1 + | csinc CARG2, CARG2, xzr, ge // if (start < 1) start = 1 + | cmp RB, TMP1 + | csel RB, RB, TMP1, le // if (end > len) end = len + | add CARG1, STR:CARG1, #sizeof(GCstr)-1 + | subs CARG3, RB, CARG2 // len = end - start + | add CARG2, CARG1, CARG2 + | add CARG3, CARG3, #1 // len += 1 + | bge ->fff_newstr + | add STR:CARG1, GL, #offsetof(global_State, strempty) + | movn TMP1, #~LJ_TSTR + | add CARG1, CARG1, TMP1, lsl #47 + | b ->fff_restv + | + |.macro ffstring_op, name + | .ffunc string_ .. name + | ffgccheck + | ldr CARG2, [BASE] + | cmp NARGS8:RC, #8 + | asr ITYPE, CARG2, #47 + | ccmn ITYPE, #-LJ_TSTR, #0, hs + | and STR:CARG2, CARG2, #LJ_GCVMASK + | bne ->fff_fallback + | ldr TMP0, GL->tmpbuf.b + | add SBUF:CARG1, GL, #offsetof(global_State, tmpbuf) + | str BASE, L->base + | str PC, SAVE_PC + | str L, GL->tmpbuf.L + | str TMP0, GL->tmpbuf.p + | bl extern lj_buf_putstr_ .. name + | bl extern lj_buf_tostr + | b ->fff_resstr + |.endmacro + | + |ffstring_op reverse + |ffstring_op lower + |ffstring_op upper + | + |//-- Bit library -------------------------------------------------------- + | + |// FP number to bit conversion for soft-float. Clobbers CARG1-CARG3 + |->vm_tobit_fb: + | bls ->fff_fallback + | add CARG2, CARG1, CARG1 + | mov CARG3, #1076 + | sub CARG3, CARG3, CARG2, lsr #53 + | cmp CARG3, #53 + | bhi >1 + | and CARG2, CARG2, #U64x(001fffff,ffffffff) + | orr CARG2, CARG2, #U64x(00200000,00000000) + | cmp CARG1, #0 + | lsr CARG2, CARG2, CARG3 + | cneg CARG1w, CARG2w, mi + | br lr + |1: + | mov CARG1w, #0 + | br lr + | + |.macro .ffunc_bit, name + | .ffunc_1 bit_..name + | adr lr, >1 + | checkint CARG1, ->vm_tobit_fb + |1: + |.endmacro + | + |.macro .ffunc_bit_op, name, ins + | .ffunc_bit name + | mov RA, #8 + | mov TMP0w, CARG1w + | adr lr, >2 + |1: + | ldr CARG1, [BASE, RA] + | cmp RA, NARGS8:RC + | add RA, RA, #8 + | bge >9 + | checkint CARG1, ->vm_tobit_fb + |2: + | ins TMP0w, TMP0w, CARG1w + | b <1 + |.endmacro + | + |.ffunc_bit_op band, and + |.ffunc_bit_op bor, orr + |.ffunc_bit_op bxor, eor + | + |.ffunc_bit tobit + | mov TMP0w, CARG1w + |9: // Label reused by .ffunc_bit_op users. + | add CARG1, TMP0, TISNUM + | b ->fff_restv + | + |.ffunc_bit bswap + | rev TMP0w, CARG1w + | add CARG1, TMP0, TISNUM + | b ->fff_restv + | + |.ffunc_bit bnot + | mvn TMP0w, CARG1w + | add CARG1, TMP0, TISNUM + | b ->fff_restv + | + |.macro .ffunc_bit_sh, name, ins, shmod + | .ffunc bit_..name + | ldp TMP0, CARG1, [BASE] + | cmp NARGS8:RC, #16 + | blo ->fff_fallback + | adr lr, >1 + | checkint CARG1, ->vm_tobit_fb + |1: + |.if shmod == 0 + | mov TMP1, CARG1 + |.else + | neg TMP1, CARG1 + |.endif + | mov CARG1, TMP0 + | adr lr, >2 + | checkint CARG1, ->vm_tobit_fb + |2: + | ins TMP0w, CARG1w, TMP1w + | add CARG1, TMP0, TISNUM + | b ->fff_restv + |.endmacro + | + |.ffunc_bit_sh lshift, lsl, 0 + |.ffunc_bit_sh rshift, lsr, 0 + |.ffunc_bit_sh arshift, asr, 0 + |.ffunc_bit_sh rol, ror, 1 + |.ffunc_bit_sh ror, ror, 0 + | + |//----------------------------------------------------------------------- + | + |->fff_fallback: // Call fast function fallback handler. + | // BASE = new base, RC = nargs*8 + | ldp CFUNC:CARG3, PC, [BASE, FRAME_FUNC] // Fallback may overwrite PC. + | ldr TMP2, L->maxstack + | add TMP1, BASE, NARGS8:RC + | stp BASE, TMP1, L->base + | and CFUNC:CARG3, CARG3, #LJ_GCVMASK + | add TMP1, TMP1, #8*LUA_MINSTACK + | ldr CARG3, CFUNC:CARG3->f + | str PC, SAVE_PC // Redundant (but a defined value). + | cmp TMP1, TMP2 + | mov CARG1, L + | bhi >5 // Need to grow stack. + | blr CARG3 // (lua_State *L) + | // Either throws an error, or recovers and returns -1, 0 or nresults+1. + | ldr BASE, L->base + | cmp CRET1w, #0 + | lsl RC, CRET1, #3 + | sub RA, BASE, #16 + | bgt ->fff_res // Returned nresults+1? + |1: // Returned 0 or -1: retry fast path. + | ldr CARG1, L->top + | ldr CFUNC:CARG3, [BASE, FRAME_FUNC] + | sub NARGS8:RC, CARG1, BASE + | bne ->vm_call_tail // Returned -1? + | and CFUNC:CARG3, CARG3, #LJ_GCVMASK + | ins_callt // Returned 0: retry fast path. + | + |// Reconstruct previous base for vmeta_call during tailcall. + |->vm_call_tail: + | ands TMP0, PC, #FRAME_TYPE + | and TMP1, PC, #~FRAME_TYPEP + | bne >3 + | ldrb RAw, [PC, #-3] + | lsl RA, RA, #3 + | add TMP1, RA, #16 + |3: + | sub RB, BASE, TMP1 + | b ->vm_call_dispatch // Resolve again for tailcall. + | + |5: // Grow stack for fallback handler. + | mov CARG2, #LUA_MINSTACK + | bl extern lj_state_growstack // (lua_State *L, int n) + | ldr BASE, L->base + | cmp CARG1, CARG1 // Set zero-flag to force retry. + | b <1 + | + |->fff_gcstep: // Call GC step function. + | // BASE = new base, RC = nargs*8 + | add CARG2, BASE, NARGS8:RC // Calculate L->top. + | mov RA, lr + | stp BASE, CARG2, L->base + | str PC, SAVE_PC // Redundant (but a defined value). + | mov CARG1, L + | bl extern lj_gc_step // (lua_State *L) + | ldp BASE, CARG2, L->base + | ldr CFUNC:CARG3, [BASE, FRAME_FUNC] + | mov lr, RA // Help return address predictor. + | sub NARGS8:RC, CARG2, BASE // Calculate nargs*8. + | and CFUNC:CARG3, CARG3, #LJ_GCVMASK + | ret + | + |//----------------------------------------------------------------------- + |//-- Special dispatch targets ------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_record: // Dispatch target for recording phase. + | NYI + | + |->vm_rethook: // Dispatch target for return hooks. + | ldrb TMP2w, GL->hookmask + | tbz TMP2w, #HOOK_ACTIVE_SHIFT, >1 // Hook already active? + |5: // Re-dispatch to static ins. + | ldr TMP0, [TMP1, #GG_G2DISP+GG_DISP2STATIC] + | br TMP0 + | + |->vm_inshook: // Dispatch target for instr/line hooks. + | ldrb TMP2w, GL->hookmask + | ldr TMP3w, GL->hookcount + | tbnz TMP2w, #HOOK_ACTIVE_SHIFT, <5 // Hook already active? + | tst TMP2w, #LUA_MASKLINE|LUA_MASKCOUNT + | beq <5 + | sub TMP3w, TMP3w, #1 + | str TMP3w, GL->hookcount + | cbz TMP3w, >1 + | tbz TMP2w, #LUA_HOOKLINE, <5 + |1: + | mov CARG1, L + | str BASE, L->base + | mov CARG2, PC + | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC. + | bl extern lj_dispatch_ins // (lua_State *L, const BCIns *pc) + |3: + | ldr BASE, L->base + |4: // Re-dispatch to static ins. + | ldr INSw, [PC, #-4] + | add TMP1, GL, INS, uxtb #3 + | decode_RA RA, INS + | ldr TMP0, [TMP1, #GG_G2DISP+GG_DISP2STATIC] + | decode_RD RC, INS + | br TMP0 + | + |->cont_hook: // Continue from hook yield. + | ldr CARG1, [CARG4, #-40] + | add PC, PC, #4 + | str CARG1w, SAVE_MULTRES // Restore MULTRES for *M ins. + | b <4 + | + |->vm_hotloop: // Hot loop counter underflow. + | NYI + | + |->vm_callhook: // Dispatch target for call hooks. + | mov CARG2, PC + |.if JIT + | b >1 + |.endif + | + |->vm_hotcall: // Hot call counter underflow. + |.if JIT + | orr CARG2, PC, #1 + |1: + |.endif + | add TMP1, BASE, NARGS8:RC + | str PC, SAVE_PC + | mov CARG1, L + | sub RA, RA, BASE + | stp BASE, TMP1, L->base + | bl extern lj_dispatch_call // (lua_State *L, const BCIns *pc) + | // Returns ASMFunction. + | ldp BASE, TMP1, L->base + | str xzr, SAVE_PC // Invalidate for subsequent line hook. + | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] + | add RA, BASE, RA + | sub NARGS8:RC, TMP1, BASE + | ldr INSw, [PC, #-4] + | and LFUNC:CARG3, CARG3, #LJ_GCVMASK + | br CRET1 + | + |->cont_stitch: // Trace stitching. + | NYI + | + |->vm_profhook: // Dispatch target for profiler hook. +#if LJ_HASPROFILE + | mov CARG1, L + | str BASE, L->base + | mov CARG2, PC + | bl extern lj_dispatch_profile // (lua_State *L, const BCIns *pc) + | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction. + | ldr BASE, L->base + | sub PC, PC, #4 + | b ->cont_nop +#endif + | + |//----------------------------------------------------------------------- + |//-- Trace exit handler ------------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_exit_handler: + | NYI + |->vm_exit_interp: + | NYI + | + |//----------------------------------------------------------------------- + |//-- Math helper functions ---------------------------------------------- + |//----------------------------------------------------------------------- + | + | // int lj_vm_modi(int dividend, int divisor); + |->vm_modi: + | eor CARG4w, CARG1w, CARG2w + | cmp CARG4w, #0 + | eor CARG3w, CARG1w, CARG1w, asr #31 + | eor CARG4w, CARG2w, CARG2w, asr #31 + | sub CARG3w, CARG3w, CARG1w, asr #31 + | sub CARG4w, CARG4w, CARG2w, asr #31 + | udiv CARG1w, CARG3w, CARG4w + | msub CARG1w, CARG1w, CARG4w, CARG3w + | ccmp CARG1w, #0, #4, mi + | sub CARG3w, CARG1w, CARG4w + | csel CARG1w, CARG1w, CARG3w, eq + | eor CARG3w, CARG1w, CARG2w + | cmp CARG3w, #0 + | cneg CARG1w, CARG1w, mi + | ret + | + |//----------------------------------------------------------------------- + |//-- Miscellaneous functions -------------------------------------------- + |//----------------------------------------------------------------------- + | + |//----------------------------------------------------------------------- + |//-- FFI helper functions ----------------------------------------------- + |//----------------------------------------------------------------------- + | + |// Handler for callback functions. + |// Saveregs already performed. Callback slot number in [sp], g in r12. + |->vm_ffi_callback: + |.if FFI + |.type CTSTATE, CTState, PC + | saveregs + | ldr CTSTATE, GL:x10->ctype_state + | mov GL, x10 + | add x10, sp, # CFRAME_SPACE + | str w9, CTSTATE->cb.slot + | stp x0, x1, CTSTATE->cb.gpr[0] + | stp d0, d1, CTSTATE->cb.fpr[0] + | stp x2, x3, CTSTATE->cb.gpr[2] + | stp d2, d3, CTSTATE->cb.fpr[2] + | stp x4, x5, CTSTATE->cb.gpr[4] + | stp d4, d5, CTSTATE->cb.fpr[4] + | stp x6, x7, CTSTATE->cb.gpr[6] + | stp d6, d7, CTSTATE->cb.fpr[6] + | str x10, CTSTATE->cb.stack + | mov CARG1, CTSTATE + | str CTSTATE, SAVE_PC // Any value outside of bytecode is ok. + | mov CARG2, sp + | bl extern lj_ccallback_enter // (CTState *cts, void *cf) + | // Returns lua_State *. + | ldp BASE, RC, L:CRET1->base + | movz TISNUM, #(LJ_TISNUM>>1)&0xffff, lsl #48 + | movz TISNUMhi, #(LJ_TISNUM>>1)&0xffff, lsl #16 + | movn TISNIL, #0 + | mov L, CRET1 + | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] + | sub RC, RC, BASE + | st_vmstate ST_INTERP + | and LFUNC:CARG3, CARG3, #LJ_GCVMASK + | ins_callt + |.endif + | + |->cont_ffi_callback: // Return from FFI callback. + |.if FFI + | ldr CTSTATE, GL->ctype_state + | stp BASE, CARG4, L->base + | str L, CTSTATE->L + | mov CARG1, CTSTATE + | mov CARG2, RA + | bl extern lj_ccallback_leave // (CTState *cts, TValue *o) + | ldp x0, x1, CTSTATE->cb.gpr[0] + | ldp d0, d1, CTSTATE->cb.fpr[0] + | b ->vm_leave_unw + |.endif + | + |->vm_ffi_call: // Call C function via FFI. + | // Caveat: needs special frame unwinding, see below. + |.if FFI + | .type CCSTATE, CCallState, x19 + | stp fp, lr, [sp, #-32]! + | add fp, sp, #0 + | str CCSTATE, [sp, #16] + | mov CCSTATE, x0 + | ldr TMP0w, CCSTATE:x0->spadj + | ldrb TMP1w, CCSTATE->nsp + | add TMP2, CCSTATE, #offsetof(CCallState, stack) + | subs TMP1, TMP1, #1 + | ldr TMP3, CCSTATE->func + | sub sp, fp, TMP0 + | bmi >2 + |1: // Copy stack slots + | ldr TMP0, [TMP2, TMP1, lsl #3] + | str TMP0, [sp, TMP1, lsl #3] + | subs TMP1, TMP1, #1 + | bpl <1 + |2: + | ldp x0, x1, CCSTATE->gpr[0] + | ldp d0, d1, CCSTATE->fpr[0] + | ldp x2, x3, CCSTATE->gpr[2] + | ldp d2, d3, CCSTATE->fpr[2] + | ldp x4, x5, CCSTATE->gpr[4] + | ldp d4, d5, CCSTATE->fpr[4] + | ldp x6, x7, CCSTATE->gpr[6] + | ldp d6, d7, CCSTATE->fpr[6] + | ldr x8, CCSTATE->retp + | blr TMP3 + | mov sp, fp + | stp x0, x1, CCSTATE->gpr[0] + | stp d0, d1, CCSTATE->fpr[0] + | stp d2, d3, CCSTATE->fpr[2] + | ldr CCSTATE, [sp, #16] + | ldp fp, lr, [sp], #32 + | ret + |.endif + |// Note: vm_ffi_call must be the last function in this object file! + | + |//----------------------------------------------------------------------- +} + +/* Generate the code for a single instruction. */ +static void build_ins(BuildCtx *ctx, BCOp op, int defop) +{ + int vk = 0; + |=>defop: + + switch (op) { + + /* -- Comparison ops ---------------------------------------------------- */ + + /* Remember: all ops branch for a true comparison, fall through otherwise. */ + + case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: + | // RA = src1, RC = src2, JMP with RC = target + | ldr CARG1, [BASE, RA, lsl #3] + | ldrh RBw, [PC, #2] + | ldr CARG2, [BASE, RC, lsl #3] + | add PC, PC, #4 + | add RB, PC, RB, lsl #2 + | sub RB, RB, #0x20000 + | checkint CARG1, >3 + | checkint CARG2, >4 + | cmp CARG1w, CARG2w + if (op == BC_ISLT) { + | csel PC, RB, PC, lt + } else if (op == BC_ISGE) { + | csel PC, RB, PC, ge + } else if (op == BC_ISLE) { + | csel PC, RB, PC, le + } else { + | csel PC, RB, PC, gt + } + |1: + | ins_next + | + |3: // RA not int. + | ldr FARG1, [BASE, RA, lsl #3] + | blo ->vmeta_comp + | ldr FARG2, [BASE, RC, lsl #3] + | cmp TISNUMhi, CARG2, lsr #32 + | bhi >5 + | bne ->vmeta_comp + | // RA number, RC int. + | scvtf FARG2, CARG2w + | b >5 + | + |4: // RA int, RC not int + | ldr FARG2, [BASE, RC, lsl #3] + | blo ->vmeta_comp + | // RA int, RC number. + | scvtf FARG1, CARG1w + | + |5: // RA number, RC number + | fcmp FARG1, FARG2 + | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't. + if (op == BC_ISLT) { + | csel PC, RB, PC, lo + } else if (op == BC_ISGE) { + | csel PC, RB, PC, hs + } else if (op == BC_ISLE) { + | csel PC, RB, PC, ls + } else { + | csel PC, RB, PC, hi + } + | b <1 + break; + + case BC_ISEQV: case BC_ISNEV: + vk = op == BC_ISEQV; + | // RA = src1, RC = src2, JMP with RC = target + | ldr CARG1, [BASE, RA, lsl #3] + | add RC, BASE, RC, lsl #3 + | ldrh RBw, [PC, #2] + | ldr CARG3, [RC] + | add PC, PC, #4 + | add RB, PC, RB, lsl #2 + | sub RB, RB, #0x20000 + | asr ITYPE, CARG3, #47 + | cmn ITYPE, #-LJ_TISNUM + if (vk) { + | bls ->BC_ISEQN_Z + } else { + | bls ->BC_ISNEN_Z + } + | // RC is not a number. + | asr TMP0, CARG1, #47 + |.if FFI + | // Check if RC or RA is a cdata. + | cmn ITYPE, #-LJ_TCDATA + | ccmn TMP0, #-LJ_TCDATA, #4, ne + | beq ->vmeta_equal_cd + |.endif + | cmp CARG1, CARG3 + | bne >2 + | // Tag and value are equal. + if (vk) { + |->BC_ISEQV_Z: + | mov PC, RB // Perform branch. + } + |1: + | ins_next + | + |2: // Check if the tags are the same and it's a table or userdata. + | cmp ITYPE, TMP0 + | ccmn ITYPE, #-LJ_TISTABUD, #2, eq + if (vk) { + | bhi <1 + } else { + | bhi ->BC_ISEQV_Z // Reuse code from opposite instruction. + } + | // Different tables or userdatas. Need to check __eq metamethod. + | // Field metatable must be at same offset for GCtab and GCudata! + | and TAB:CARG2, CARG1, #LJ_GCVMASK + | ldr TAB:TMP2, TAB:CARG2->metatable + if (vk) { + | cbz TAB:TMP2, <1 // No metatable? + | ldrb TMP1w, TAB:TMP2->nomm + | mov CARG4, #0 // ne = 0 + | tbnz TMP1w, #MM_eq, <1 // 'no __eq' flag set: done. + } else { + | cbz TAB:TMP2, ->BC_ISEQV_Z // No metatable? + | ldrb TMP1w, TAB:TMP2->nomm + | mov CARG4, #1 // ne = 1. + | tbnz TMP1w, #MM_eq, ->BC_ISEQV_Z // 'no __eq' flag set: done. + } + | b ->vmeta_equal + break; + + case BC_ISEQS: case BC_ISNES: + vk = op == BC_ISEQS; + | // RA = src, RC = str_const (~), JMP with RC = target + | ldr CARG1, [BASE, RA, lsl #3] + | mvn RC, RC + | ldrh RBw, [PC, #2] + | ldr CARG2, [KBASE, RC, lsl #3] + | add PC, PC, #4 + | movn TMP0, #~LJ_TSTR + |.if FFI + | asr ITYPE, CARG1, #47 + |.endif + | add RB, PC, RB, lsl #2 + | add CARG2, CARG2, TMP0, lsl #47 + | sub RB, RB, #0x20000 + |.if FFI + | cmn ITYPE, #-LJ_TCDATA + | beq ->vmeta_equal_cd + |.endif + | cmp CARG1, CARG2 + if (vk) { + | csel PC, RB, PC, eq + } else { + | csel PC, RB, PC, ne + } + | ins_next + break; + + case BC_ISEQN: case BC_ISNEN: + vk = op == BC_ISEQN; + | // RA = src, RC = num_const (~), JMP with RC = target + | ldr CARG1, [BASE, RA, lsl #3] + | add RC, KBASE, RC, lsl #3 + | ldrh RBw, [PC, #2] + | ldr CARG3, [RC] + | add PC, PC, #4 + | add RB, PC, RB, lsl #2 + | sub RB, RB, #0x20000 + if (vk) { + |->BC_ISEQN_Z: + } else { + |->BC_ISNEN_Z: + } + | checkint CARG1, >4 + | checkint CARG3, >6 + | cmp CARG1w, CARG3w + |1: + if (vk) { + | csel PC, RB, PC, eq + |2: + } else { + |2: + | csel PC, RB, PC, ne + } + |3: + | ins_next + | + |4: // RA not int. + |.if FFI + | blo >7 + |.else + | blo <2 + |.endif + | ldr FARG1, [BASE, RA, lsl #3] + | ldr FARG2, [RC] + | cmp TISNUMhi, CARG3, lsr #32 + | bne >5 + | // RA number, RC int. + | scvtf FARG2, CARG3w + |5: + | // RA number, RC number. + | fcmp FARG1, FARG2 + | b <1 + | + |6: // RA int, RC number + | ldr FARG2, [RC] + | scvtf FARG1, CARG1w + | fcmp FARG1, FARG2 + | b <1 + | + |.if FFI + |7: + | asr ITYPE, CARG1, #47 + | cmn ITYPE, #-LJ_TCDATA + | bne <2 + | b ->vmeta_equal_cd + |.endif + break; + + case BC_ISEQP: case BC_ISNEP: + vk = op == BC_ISEQP; + | // RA = src, RC = primitive_type (~), JMP with RC = target + | ldr TMP0, [BASE, RA, lsl #3] + | ldrh RBw, [PC, #2] + | add PC, PC, #4 + | add RC, RC, #1 + | add RB, PC, RB, lsl #2 + |.if FFI + | asr ITYPE, TMP0, #47 + | cmn ITYPE, #-LJ_TCDATA + | beq ->vmeta_equal_cd + | cmn RC, ITYPE + |.else + | cmn RC, TMP0, asr #47 + |.endif + | sub RB, RB, #0x20000 + if (vk) { + | csel PC, RB, PC, eq + } else { + | csel PC, RB, PC, ne + } + | ins_next + break; + + /* -- Unary test and copy ops ------------------------------------------- */ + + case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF: + | // RA = dst or unused, RC = src, JMP with RC = target + | ldrh RBw, [PC, #2] + | ldr TMP0, [BASE, RC, lsl #3] + | add PC, PC, #4 + | mov_false TMP1 + | add RB, PC, RB, lsl #2 + | cmp TMP0, TMP1 + | sub RB, RB, #0x20000 + if (op == BC_ISTC || op == BC_IST) { + if (op == BC_ISTC) { + | csel RA, RA, RC, lo + } + | csel PC, RB, PC, lo + } else { + if (op == BC_ISFC) { + | csel RA, RA, RC, hs + } + | csel PC, RB, PC, hs + } + if (op == BC_ISTC || op == BC_ISFC) { + | str TMP0, [BASE, RA, lsl #3] + } + | ins_next + break; + + case BC_ISTYPE: + | // RA = src, RC = -type + | ldr TMP0, [BASE, RA, lsl #3] + | cmn RC, TMP0, asr #47 + | bne ->vmeta_istype + | ins_next + break; + case BC_ISNUM: + | // RA = src, RC = -(TISNUM-1) + | ldr TMP0, [BASE, RA] + | checknum TMP0, ->vmeta_istype + | ins_next + break; + + /* -- Unary ops --------------------------------------------------------- */ + + case BC_MOV: + | // RA = dst, RC = src + | ldr TMP0, [BASE, RC, lsl #3] + | str TMP0, [BASE, RA, lsl #3] + | ins_next + break; + case BC_NOT: + | // RA = dst, RC = src + | ldr TMP0, [BASE, RC, lsl #3] + | mov_false TMP1 + | mov_true TMP2 + | cmp TMP0, TMP1 + | csel TMP0, TMP1, TMP2, lo + | str TMP0, [BASE, RA, lsl #3] + | ins_next + break; + case BC_UNM: + | // RA = dst, RC = src + | ldr TMP0, [BASE, RC, lsl #3] + | asr ITYPE, TMP0, #47 + | cmn ITYPE, #-LJ_TISNUM + | bhi ->vmeta_unm + | eor TMP0, TMP0, #U64x(80000000,00000000) + | bne >5 + | negs TMP0w, TMP0w + | movz CARG3, #0x41e0, lsl #48 // 2^31. + | add TMP0, TMP0, TISNUM + | csel TMP0, TMP0, CARG3, vc + |5: + | str TMP0, [BASE, RA, lsl #3] + | ins_next + break; + case BC_LEN: + | // RA = dst, RC = src + | ldr CARG1, [BASE, RC, lsl #3] + | asr ITYPE, CARG1, #47 + | cmn ITYPE, #-LJ_TSTR + | and CARG1, CARG1, #LJ_GCVMASK + | bne >2 + | ldr CARG1w, STR:CARG1->len + |1: + | add CARG1, CARG1, TISNUM + | str CARG1, [BASE, RA, lsl #3] + | ins_next + | + |2: + | cmn ITYPE, #-LJ_TTAB + | bne ->vmeta_len +#if LJ_52 + | ldr TAB:CARG2, TAB:CARG1->metatable + | cbnz TAB:CARG2, >9 + |3: +#endif + |->BC_LEN_Z: + | bl extern lj_tab_len // (GCtab *t) + | // Returns uint32_t (but less than 2^31). + | b <1 + | +#if LJ_52 + |9: + | ldrb TMP1w, TAB:CARG2->nomm + | tbnz TMP1w, #MM_len, <3 // 'no __len' flag set: done. + | b ->vmeta_len +#endif + break; + + /* -- Binary ops -------------------------------------------------------- */ + + |.macro ins_arithcheck_int, target + | checkint CARG1, target + | checkint CARG2, target + |.endmacro + | + |.macro ins_arithcheck_num, target + | checknum CARG1, target + | checknum CARG2, target + |.endmacro + | + |.macro ins_arithcheck_nzdiv, target + | cbz CARG2w, target + |.endmacro + | + |.macro ins_arithhead + ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + ||if (vk == 1) { + | and RC, RC, #255 + | decode_RB RB, INS + ||} else { + | decode_RB RB, INS + | and RC, RC, #255 + ||} + |.endmacro + | + |.macro ins_arithload, reg1, reg2 + | // RA = dst, RB = src1, RC = src2 | num_const + ||switch (vk) { + ||case 0: + | ldr reg1, [BASE, RB, lsl #3] + | ldr reg2, [KBASE, RC, lsl #3] + || break; + ||case 1: + | ldr reg1, [KBASE, RC, lsl #3] + | ldr reg2, [BASE, RB, lsl #3] + || break; + ||default: + | ldr reg1, [BASE, RB, lsl #3] + | ldr reg2, [BASE, RC, lsl #3] + || break; + ||} + |.endmacro + | + |.macro ins_arithfallback, ins + ||switch (vk) { + ||case 0: + | ins ->vmeta_arith_vn + || break; + ||case 1: + | ins ->vmeta_arith_nv + || break; + ||default: + | ins ->vmeta_arith_vv + || break; + ||} + |.endmacro + | + |.macro ins_arithmod, res, reg1, reg2 + | fdiv d2, reg1, reg2 + | frintm d2, d2 + | fmsub res, d2, reg2, reg1 + |.endmacro + | + |.macro ins_arithdn, intins, fpins + | ins_arithhead + | ins_arithload CARG1, CARG2 + | ins_arithcheck_int >5 + |.if "intins" == "smull" + | smull CARG1, CARG1w, CARG2w + | cmp CARG1, CARG1, sxtw + | mov CARG1w, CARG1w + | ins_arithfallback bne + |.elif "intins" == "ins_arithmodi" + | ins_arithfallback ins_arithcheck_nzdiv + | bl ->vm_modi + |.else + | intins CARG1w, CARG1w, CARG2w + | ins_arithfallback bvs + |.endif + | add CARG1, CARG1, TISNUM + | str CARG1, [BASE, RA, lsl #3] + |4: + | ins_next + | + |5: // FP variant. + | ins_arithload FARG1, FARG2 + | ins_arithfallback ins_arithcheck_num + | fpins FARG1, FARG1, FARG2 + | str FARG1, [BASE, RA, lsl #3] + | b <4 + |.endmacro + | + |.macro ins_arithfp, fpins + | ins_arithhead + | ins_arithload CARG1, CARG2 + | ins_arithload FARG1, FARG2 + | ins_arithfallback ins_arithcheck_num + |.if "fpins" == "fpow" + | bl extern pow + |.else + | fpins FARG1, FARG1, FARG2 + |.endif + | str FARG1, [BASE, RA, lsl #3] + | ins_next + |.endmacro + + case BC_ADDVN: case BC_ADDNV: case BC_ADDVV: + | ins_arithdn adds, fadd + break; + case BC_SUBVN: case BC_SUBNV: case BC_SUBVV: + | ins_arithdn subs, fsub + break; + case BC_MULVN: case BC_MULNV: case BC_MULVV: + | ins_arithdn smull, fmul + break; + case BC_DIVVN: case BC_DIVNV: case BC_DIVVV: + | ins_arithfp fdiv + break; + case BC_MODVN: case BC_MODNV: case BC_MODVV: + | ins_arithdn ins_arithmodi, ins_arithmod + break; + case BC_POW: + | // NYI: (partial) integer arithmetic. + | ins_arithfp fpow + break; + + case BC_CAT: + | decode_RB RB, INS + | and RC, RC, #255 + | // RA = dst, RB = src_start, RC = src_end + | str BASE, L->base + | sub CARG3, RC, RB + | add CARG2, BASE, RC, lsl #3 + |->BC_CAT_Z: + | // RA = dst, CARG2 = top-1, CARG3 = left + | mov CARG1, L + | str PC, SAVE_PC + | bl extern lj_meta_cat // (lua_State *L, TValue *top, int left) + | // Returns NULL (finished) or TValue * (metamethod). + | ldrb RBw, [PC, #-1] + | ldr BASE, L->base + | cbnz CRET1, ->vmeta_binop + | ldr TMP0, [BASE, RB, lsl #3] + | str TMP0, [BASE, RA, lsl #3] // Copy result to RA. + | ins_next + break; + + /* -- Constant ops ------------------------------------------------------ */ + + case BC_KSTR: + | // RA = dst, RC = str_const (~) + | mvn RC, RC + | ldr TMP0, [KBASE, RC, lsl #3] + | movn TMP1, #~LJ_TSTR + | add TMP0, TMP0, TMP1, lsl #47 + | str TMP0, [BASE, RA, lsl #3] + | ins_next + break; + case BC_KCDATA: + |.if FFI + | // RA = dst, RC = cdata_const (~) + | mvn RC, RC + | ldr TMP0, [KBASE, RC, lsl #3] + | movn TMP1, #~LJ_TCDATA + | add TMP0, TMP0, TMP1, lsl #47 + | str TMP0, [BASE, RA, lsl #3] + | ins_next + |.endif + break; + case BC_KSHORT: + | // RA = dst, RC = int16_literal + | sxth RCw, RCw + | add TMP0, RC, TISNUM + | str TMP0, [BASE, RA, lsl #3] + | ins_next + break; + case BC_KNUM: + | // RA = dst, RC = num_const + | ldr TMP0, [KBASE, RC, lsl #3] + | str TMP0, [BASE, RA, lsl #3] + | ins_next + break; + case BC_KPRI: + | // RA = dst, RC = primitive_type (~) + | mvn TMP0, RC, lsl #47 + | str TMP0, [BASE, RA, lsl #3] + | ins_next + break; + case BC_KNIL: + | // RA = base, RC = end + | add RA, BASE, RA, lsl #3 + | add RC, BASE, RC, lsl #3 + | str TISNIL, [RA], #8 + |1: + | cmp RA, RC + | str TISNIL, [RA], #8 + | blt <1 + | ins_next_ + break; + + /* -- Upvalue and function ops ------------------------------------------ */ + + case BC_UGET: + | // RA = dst, RC = uvnum + | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] + | add RC, RC, #offsetof(GCfuncL, uvptr)/8 + | and LFUNC:CARG2, CARG2, #LJ_GCVMASK + | ldr UPVAL:CARG2, [LFUNC:CARG2, RC, lsl #3] + | ldr CARG2, UPVAL:CARG2->v + | ldr TMP0, [CARG2] + | str TMP0, [BASE, RA, lsl #3] + | ins_next + break; + case BC_USETV: + | // RA = uvnum, RC = src + | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] + | add RA, RA, #offsetof(GCfuncL, uvptr)/8 + | and LFUNC:CARG2, CARG2, #LJ_GCVMASK + | ldr UPVAL:CARG1, [LFUNC:CARG2, RA, lsl #3] + | ldr CARG3, [BASE, RC, lsl #3] + | ldr CARG2, UPVAL:CARG1->v + | ldrb TMP2w, UPVAL:CARG1->marked + | ldrb TMP0w, UPVAL:CARG1->closed + | asr ITYPE, CARG3, #47 + | str CARG3, [CARG2] + | add ITYPE, ITYPE, #-LJ_TISGCV + | tst TMP2w, #LJ_GC_BLACK // isblack(uv) + | ccmp TMP0w, #0, #4, ne // && uv->closed + | ccmn ITYPE, #-(LJ_TNUMX - LJ_TISGCV), #0, ne // && tvisgcv(v) + | bhi >2 + |1: + | ins_next + | + |2: // Check if new value is white. + | and GCOBJ:CARG3, CARG3, #LJ_GCVMASK + | ldrb TMP1w, GCOBJ:CARG3->gch.marked + | tst TMP1w, #LJ_GC_WHITES // iswhite(str) + | beq <1 + | // Crossed a write barrier. Move the barrier forward. + | mov CARG1, GL + | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv) + | b <1 + break; + case BC_USETS: + | // RA = uvnum, RC = str_const (~) + | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] + | add RA, RA, #offsetof(GCfuncL, uvptr)/8 + | mvn RC, RC + | and LFUNC:CARG2, CARG2, #LJ_GCVMASK + | ldr UPVAL:CARG1, [LFUNC:CARG2, RA, lsl #3] + | ldr STR:CARG3, [KBASE, RC, lsl #3] + | movn TMP0, #~LJ_TSTR + | ldr CARG2, UPVAL:CARG1->v + | ldrb TMP2w, UPVAL:CARG1->marked + | add TMP0, STR:CARG3, TMP0, lsl #47 + | ldrb TMP1w, STR:CARG3->marked + | str TMP0, [CARG2] + | tbnz TMP2w, #2, >2 // isblack(uv) + |1: + | ins_next + | + |2: // Check if string is white and ensure upvalue is closed. + | ldrb TMP0w, UPVAL:CARG1->closed + | tst TMP1w, #LJ_GC_WHITES // iswhite(str) + | ccmp TMP0w, #0, #0, ne + | beq <1 + | // Crossed a write barrier. Move the barrier forward. + | mov CARG1, GL + | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv) + | b <1 + break; + case BC_USETN: + | // RA = uvnum, RC = num_const + | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] + | add RA, RA, #offsetof(GCfuncL, uvptr)/8 + | and LFUNC:CARG2, CARG2, #LJ_GCVMASK + | ldr UPVAL:CARG2, [LFUNC:CARG2, RA, lsl #3] + | ldr TMP0, [KBASE, RC, lsl #3] + | ldr CARG2, UPVAL:CARG2->v + | str TMP0, [CARG2] + | ins_next + break; + case BC_USETP: + | // RA = uvnum, RC = primitive_type (~) + | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] + | add RA, RA, #offsetof(GCfuncL, uvptr)/8 + | and LFUNC:CARG2, CARG2, #LJ_GCVMASK + | ldr UPVAL:CARG2, [LFUNC:CARG2, RA, lsl #3] + | mvn TMP0, RC, lsl #47 + | ldr CARG2, UPVAL:CARG2->v + | str TMP0, [CARG2] + | ins_next + break; + + case BC_UCLO: + | // RA = level, RC = target + | ldr CARG3, L->openupval + | add RC, PC, RC, lsl #2 + | str BASE, L->base + | sub PC, RC, #0x20000 + | cbz CARG3, >1 + | mov CARG1, L + | add CARG2, BASE, RA, lsl #3 + | bl extern lj_func_closeuv // (lua_State *L, TValue *level) + | ldr BASE, L->base + |1: + | ins_next + break; + + case BC_FNEW: + | // RA = dst, RC = proto_const (~) (holding function prototype) + | mvn RC, RC + | str BASE, L->base + | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] + | str PC, SAVE_PC + | ldr CARG2, [KBASE, RC, lsl #3] + | mov CARG1, L + | and LFUNC:CARG3, CARG3, #LJ_GCVMASK + | // (lua_State *L, GCproto *pt, GCfuncL *parent) + | bl extern lj_func_newL_gc + | // Returns GCfuncL *. + | ldr BASE, L->base + | movn TMP0, #~LJ_TFUNC + | add CRET1, CRET1, TMP0, lsl #47 + | str CRET1, [BASE, RA, lsl #3] + | ins_next + break; + + /* -- Table ops --------------------------------------------------------- */ + + case BC_TNEW: + case BC_TDUP: + | // RA = dst, RC = (hbits|asize) | tab_const (~) + | ldp CARG3, CARG4, GL->gc.total // Assumes threshold follows total. + | str BASE, L->base + | str PC, SAVE_PC + | mov CARG1, L + | cmp CARG3, CARG4 + | bhs >5 + |1: + if (op == BC_TNEW) { + | and CARG2, RC, #0x7ff + | lsr CARG3, RC, #11 + | cmp CARG2, #0x7ff + | mov TMP0, #0x801 + | csel CARG2, CARG2, TMP0, ne + | bl extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits) + | // Returns GCtab *. + } else { + | mvn RC, RC + | ldr CARG2, [KBASE, RC, lsl #3] + | bl extern lj_tab_dup // (lua_State *L, Table *kt) + | // Returns GCtab *. + } + | ldr BASE, L->base + | movk CRET1, #(LJ_TTAB>>1)&0xffff, lsl #48 + | str CRET1, [BASE, RA, lsl #3] + | ins_next + | + |5: + | bl extern lj_gc_step_fixtop // (lua_State *L) + | mov CARG1, L + | b <1 + break; + + case BC_GGET: + | // RA = dst, RC = str_const (~) + case BC_GSET: + | // RA = dst, RC = str_const (~) + | ldr LFUNC:CARG1, [BASE, FRAME_FUNC] + | mvn RC, RC + | and LFUNC:CARG1, CARG1, #LJ_GCVMASK + | ldr TAB:CARG2, LFUNC:CARG1->env + | ldr STR:RC, [KBASE, RC, lsl #3] + if (op == BC_GGET) { + | b ->BC_TGETS_Z + } else { + | b ->BC_TSETS_Z + } + break; + + case BC_TGETV: + | decode_RB RB, INS + | and RC, RC, #255 + | // RA = dst, RB = table, RC = key + | ldr CARG2, [BASE, RB, lsl #3] + | ldr TMP1, [BASE, RC, lsl #3] + | checktab CARG2, ->vmeta_tgetv + | checkint TMP1, >9 // Integer key? + | ldr CARG3, TAB:CARG2->array + | ldr CARG1w, TAB:CARG2->asize + | add CARG3, CARG3, TMP1, uxtw #3 + | cmp TMP1w, CARG1w // In array part? + | bhs ->vmeta_tgetv + | ldr TMP0, [CARG3] + | cmp TMP0, TISNIL + | beq >5 + |1: + | str TMP0, [BASE, RA, lsl #3] + | ins_next + | + |5: // Check for __index if table value is nil. + | ldr TAB:CARG1, TAB:CARG2->metatable + | cbz TAB:CARG1, <1 // No metatable: done. + | ldrb TMP1w, TAB:CARG1->nomm + | tbnz TMP1w, #MM_index, <1 // 'no __index' flag set: done. + | b ->vmeta_tgetv + | + |9: + | asr ITYPE, TMP1, #47 + | cmn ITYPE, #-LJ_TSTR // String key? + | bne ->vmeta_tgetv + | and STR:RC, TMP1, #LJ_GCVMASK + | b ->BC_TGETS_Z + break; + case BC_TGETS: + | decode_RB RB, INS + | and RC, RC, #255 + | // RA = dst, RB = table, RC = str_const (~) + | ldr CARG2, [BASE, RB, lsl #3] + | mvn RC, RC + | ldr STR:RC, [KBASE, RC, lsl #3] + | checktab CARG2, ->vmeta_tgets1 + |->BC_TGETS_Z: + | // TAB:CARG2 = GCtab *, STR:RC = GCstr *, RA = dst + | ldr TMP1w, TAB:CARG2->hmask + | ldr TMP2w, STR:RC->hash + | ldr NODE:CARG3, TAB:CARG2->node + | and TMP1w, TMP1w, TMP2w // idx = str->hash & tab->hmask + | add TMP1, TMP1, TMP1, lsl #1 + | movn CARG4, #~LJ_TSTR + | add NODE:CARG3, NODE:CARG3, TMP1, lsl #3 // node = tab->node + idx*3*8 + | add CARG4, STR:RC, CARG4, lsl #47 // Tagged key to look for. + |1: + | ldp TMP0, CARG1, NODE:CARG3->val + | ldr NODE:CARG3, NODE:CARG3->next + | cmp CARG1, CARG4 + | bne >4 + | cmp TMP0, TISNIL + | beq >5 + |3: + | str TMP0, [BASE, RA, lsl #3] + | ins_next + | + |4: // Follow hash chain. + | cbnz NODE:CARG3, <1 + | // End of hash chain: key not found, nil result. + | mov TMP0, TISNIL + | + |5: // Check for __index if table value is nil. + | ldr TAB:CARG1, TAB:CARG2->metatable + | cbz TAB:CARG1, <3 // No metatable: done. + | ldrb TMP1w, TAB:CARG1->nomm + | tbnz TMP1w, #MM_index, <3 // 'no __index' flag set: done. + | b ->vmeta_tgets + break; + case BC_TGETB: + | decode_RB RB, INS + | and RC, RC, #255 + | // RA = dst, RB = table, RC = index + | ldr CARG2, [BASE, RB, lsl #3] + | checktab CARG2, ->vmeta_tgetb + | ldr CARG3, TAB:CARG2->array + | ldr CARG1w, TAB:CARG2->asize + | add CARG3, CARG3, RC, lsl #3 + | cmp RCw, CARG1w // In array part? + | bhs ->vmeta_tgetb + | ldr TMP0, [CARG3] + | cmp TMP0, TISNIL + | beq >5 + |1: + | str TMP0, [BASE, RA, lsl #3] + | ins_next + | + |5: // Check for __index if table value is nil. + | ldr TAB:CARG1, TAB:CARG2->metatable + | cbz TAB:CARG1, <1 // No metatable: done. + | ldrb TMP1w, TAB:CARG1->nomm + | tbnz TMP1w, #MM_index, <1 // 'no __index' flag set: done. + | b ->vmeta_tgetb + break; + case BC_TGETR: + | decode_RB RB, INS + | and RC, RC, #255 + | // RA = dst, RB = table, RC = key + | ldr CARG1, [BASE, RB, lsl #3] + | ldr TMP1, [BASE, RC, lsl #3] + | and TAB:CARG1, CARG1, #LJ_GCVMASK + | ldr CARG3, TAB:CARG1->array + | ldr TMP2w, TAB:CARG1->asize + | add CARG3, CARG3, TMP1w, uxtw #3 + | cmp TMP1w, TMP2w // In array part? + | bhs ->vmeta_tgetr + | ldr TMP0, [CARG3] + |->BC_TGETR_Z: + | str TMP0, [BASE, RA, lsl #3] + | ins_next + break; + + case BC_TSETV: + | decode_RB RB, INS + | and RC, RC, #255 + | // RA = src, RB = table, RC = key + | ldr CARG2, [BASE, RB, lsl #3] + | ldr TMP1, [BASE, RC, lsl #3] + | checktab CARG2, ->vmeta_tsetv + | checkint TMP1, >9 // Integer key? + | ldr CARG3, TAB:CARG2->array + | ldr CARG1w, TAB:CARG2->asize + | add CARG3, CARG3, TMP1, uxtw #3 + | cmp TMP1w, CARG1w // In array part? + | bhs ->vmeta_tsetv + | ldr TMP1, [CARG3] + | ldr TMP0, [BASE, RA, lsl #3] + | ldrb TMP2w, TAB:CARG2->marked + | cmp TMP1, TISNIL // Previous value is nil? + | beq >5 + |1: + | str TMP0, [CARG3] + | tbnz TMP2w, #2, >7 // isblack(table) + |2: + | ins_next + | + |5: // Check for __newindex if previous value is nil. + | ldr TAB:CARG1, TAB:CARG2->metatable + | cbz TAB:CARG1, <1 // No metatable: done. + | ldrb TMP1w, TAB:CARG1->nomm + | tbnz TMP1w, #MM_newindex, <1 // 'no __newindex' flag set: done. + | b ->vmeta_tsetv + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:CARG2, TMP2w, TMP1 + | b <2 + | + |9: + | asr ITYPE, TMP1, #47 + | cmn ITYPE, #-LJ_TSTR // String key? + | bne ->vmeta_tsetv + | and STR:RC, TMP1, #LJ_GCVMASK + | b ->BC_TSETS_Z + break; + case BC_TSETS: + | decode_RB RB, INS + | and RC, RC, #255 + | // RA = dst, RB = table, RC = str_const (~) + | ldr CARG2, [BASE, RB, lsl #3] + | mvn RC, RC + | ldr STR:RC, [KBASE, RC, lsl #3] + | checktab CARG2, ->vmeta_tsets1 + |->BC_TSETS_Z: + | // TAB:CARG2 = GCtab *, STR:RC = GCstr *, RA = src + | ldr TMP1w, TAB:CARG2->hmask + | ldr TMP2w, STR:RC->hash + | ldr NODE:CARG3, TAB:CARG2->node + | and TMP1w, TMP1w, TMP2w // idx = str->hash & tab->hmask + | add TMP1, TMP1, TMP1, lsl #1 + | movn CARG4, #~LJ_TSTR + | add NODE:CARG3, NODE:CARG3, TMP1, lsl #3 // node = tab->node + idx*3*8 + | add CARG4, STR:RC, CARG4, lsl #47 // Tagged key to look for. + | strb wzr, TAB:CARG2->nomm // Clear metamethod cache. + |1: + | ldp TMP1, CARG1, NODE:CARG3->val + | ldr NODE:TMP3, NODE:CARG3->next + | ldrb TMP2w, TAB:CARG2->marked + | cmp CARG1, CARG4 + | bne >5 + | ldr TMP0, [BASE, RA, lsl #3] + | cmp TMP1, TISNIL // Previous value is nil? + | beq >4 + |2: + | str TMP0, NODE:CARG3->val + | tbnz TMP2w, #2, >7 // isblack(table) + |3: + | ins_next + | + |4: // Check for __newindex if previous value is nil. + | ldr TAB:CARG1, TAB:CARG2->metatable + | cbz TAB:CARG1, <2 // No metatable: done. + | ldrb TMP1w, TAB:CARG1->nomm + | tbnz TMP1w, #MM_newindex, <2 // 'no __newindex' flag set: done. + | b ->vmeta_tsets + | + |5: // Follow hash chain. + | mov NODE:CARG3, NODE:TMP3 + | cbnz NODE:TMP3, <1 + | // End of hash chain: key not found, add a new one. + | + | // But check for __newindex first. + | ldr TAB:CARG1, TAB:CARG2->metatable + | cbz TAB:CARG1, >6 // No metatable: continue. + | ldrb TMP1w, TAB:CARG1->nomm + | // 'no __newindex' flag NOT set: check. + | tbz TMP1w, #MM_newindex, ->vmeta_tsets + |6: + | movn TMP1, #~LJ_TSTR + | str PC, SAVE_PC + | add TMP0, STR:RC, TMP1, lsl #47 + | str BASE, L->base + | mov CARG1, L + | str TMP0, TMPD + | add CARG3, sp, TMPDofs + | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k) + | // Returns TValue *. + | ldr BASE, L->base + | ldr TMP0, [BASE, RA, lsl #3] + | str TMP0, [CRET1] + | b <3 // No 2nd write barrier needed. + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:CARG2, TMP2w, TMP1 + | b <3 + break; + case BC_TSETB: + | decode_RB RB, INS + | and RC, RC, #255 + | // RA = src, RB = table, RC = index + | ldr CARG2, [BASE, RB, lsl #3] + | checktab CARG2, ->vmeta_tsetb + | ldr CARG3, TAB:CARG2->array + | ldr CARG1w, TAB:CARG2->asize + | add CARG3, CARG3, RC, lsl #3 + | cmp RCw, CARG1w // In array part? + | bhs ->vmeta_tsetb + | ldr TMP1, [CARG3] + | ldr TMP0, [BASE, RA, lsl #3] + | ldrb TMP2w, TAB:CARG2->marked + | cmp TMP1, TISNIL // Previous value is nil? + | beq >5 + |1: + | str TMP0, [CARG3] + | tbnz TMP2w, #2, >7 // isblack(table) + |2: + | ins_next + | + |5: // Check for __newindex if previous value is nil. + | ldr TAB:CARG1, TAB:CARG2->metatable + | cbz TAB:CARG1, <1 // No metatable: done. + | ldrb TMP1w, TAB:CARG1->nomm + | tbnz TMP1w, #MM_newindex, <1 // 'no __newindex' flag set: done. + | b ->vmeta_tsetb + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:CARG2, TMP2w, TMP1 + | b <2 + break; + case BC_TSETR: + | decode_RB RB, INS + | and RC, RC, #255 + | // RA = src, RB = table, RC = key + | ldr CARG2, [BASE, RB, lsl #3] + | ldr TMP1, [BASE, RC, lsl #3] + | and TAB:CARG2, CARG2, #LJ_GCVMASK + | ldr CARG1, TAB:CARG2->array + | ldrb TMP2w, TAB:CARG2->marked + | ldr CARG4w, TAB:CARG2->asize + | add CARG1, CARG1, TMP1, uxtw #3 + | tbnz TMP2w, #2, >7 // isblack(table) + |2: + | cmp TMP1w, CARG4w // In array part? + | bhs ->vmeta_tsetr + |->BC_TSETR_Z: + | ldr TMP0, [BASE, RA, lsl #3] + | str TMP0, [CARG1] + | ins_next + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:CARG2, TMP2w, TMP0 + | b <2 + break; + + case BC_TSETM: + | // RA = base (table at base-1), RC = num_const (start index) + | add RA, BASE, RA, lsl #3 + |1: + | ldr RBw, SAVE_MULTRES + | ldr TAB:CARG2, [RA, #-8] // Guaranteed to be a table. + | ldr TMP1, [KBASE, RC, lsl #3] // Integer constant is in lo-word. + | sub RB, RB, #8 + | cbz RB, >4 // Nothing to copy? + | and TAB:CARG2, CARG2, #LJ_GCVMASK + | ldr CARG1w, TAB:CARG2->asize + | add CARG3w, TMP1w, RBw, lsr #3 + | ldr CARG4, TAB:CARG2->array + | cmp CARG3, CARG1 + | add RB, RA, RB + | bhi >5 + | add TMP1, CARG4, TMP1w, uxtw #3 + | ldrb TMP2w, TAB:CARG2->marked + |3: // Copy result slots to table. + | ldr TMP0, [RA], #8 + | str TMP0, [TMP1], #8 + | cmp RA, RB + | blo <3 + | tbnz TMP2w, #2, >7 // isblack(table) + |4: + | ins_next + | + |5: // Need to resize array part. + | str BASE, L->base + | mov CARG1, L + | str PC, SAVE_PC + | bl extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize) + | // Must not reallocate the stack. + | b <1 + | + |7: // Possible table write barrier for any value. Skip valiswhite check. + | barrierback TAB:CARG2, TMP2w, TMP1 + | b <4 + break; + + /* -- Calls and vararg handling ----------------------------------------- */ + + case BC_CALLM: + | // RA = base, (RB = nresults+1,) RC = extra_nargs + | ldr TMP0w, SAVE_MULTRES + | decode_RC8RD NARGS8:RC, RC + | add NARGS8:RC, NARGS8:RC, TMP0 + | b ->BC_CALL_Z + break; + case BC_CALL: + | decode_RC8RD NARGS8:RC, RC + | // RA = base, (RB = nresults+1,) RC = (nargs+1)*8 + |->BC_CALL_Z: + | mov RB, BASE // Save old BASE for vmeta_call. + | add BASE, BASE, RA, lsl #3 + | ldr CARG3, [BASE] + | sub NARGS8:RC, NARGS8:RC, #8 + | add BASE, BASE, #16 + | checkfunc CARG3, ->vmeta_call + | ins_call + break; + + case BC_CALLMT: + | // RA = base, (RB = 0,) RC = extra_nargs + | ldr TMP0w, SAVE_MULTRES + | add NARGS8:RC, TMP0, RC, lsl #3 + | b ->BC_CALLT1_Z + break; + case BC_CALLT: + | lsl NARGS8:RC, RC, #3 + | // RA = base, (RB = 0,) RC = (nargs+1)*8 + |->BC_CALLT1_Z: + | add RA, BASE, RA, lsl #3 + | ldr TMP1, [RA] + | sub NARGS8:RC, NARGS8:RC, #8 + | add RA, RA, #16 + | checktp CARG3, TMP1, LJ_TFUNC, ->vmeta_callt + | ldr PC, [BASE, FRAME_PC] + |->BC_CALLT2_Z: + | mov RB, #0 + | ldrb TMP2w, LFUNC:CARG3->ffid + | tst PC, #FRAME_TYPE + | bne >7 + |1: + | str TMP1, [BASE, FRAME_FUNC] // Copy function down, but keep PC. + | cbz NARGS8:RC, >3 + |2: + | ldr TMP0, [RA, RB] + | add TMP1, RB, #8 + | cmp TMP1, NARGS8:RC + | str TMP0, [BASE, RB] + | mov RB, TMP1 + | bne <2 + |3: + | cmp TMP2, #1 // (> FF_C) Calling a fast function? + | bhi >5 + |4: + | ins_callt + | + |5: // Tailcall to a fast function with a Lua frame below. + | ldrb RAw, [PC, #-3] + | sub CARG1, BASE, RA, lsl #3 + | ldr LFUNC:CARG1, [CARG1, #-32] + | and LFUNC:CARG1, CARG1, #LJ_GCVMASK + | ldr CARG1, LFUNC:CARG1->pc + | ldr KBASE, [CARG1, #PC2PROTO(k)] + | b <4 + | + |7: // Tailcall from a vararg function. + | eor PC, PC, #FRAME_VARG + | tst PC, #FRAME_TYPEP // Vararg frame below? + | csel TMP2, RB, TMP2, ne // Clear ffid if no Lua function below. + | bne <1 + | sub BASE, BASE, PC + | ldr PC, [BASE, FRAME_PC] + | tst PC, #FRAME_TYPE + | csel TMP2, RB, TMP2, ne // Clear ffid if no Lua function below. + | b <1 + break; + + case BC_ITERC: + | // RA = base, (RB = nresults+1, RC = nargs+1 (2+1)) + | add RA, BASE, RA, lsl #3 + | ldr CARG3, [RA, #-24] + | mov RB, BASE // Save old BASE for vmeta_call. + | ldp CARG1, CARG2, [RA, #-16] + | add BASE, RA, #16 + | mov NARGS8:RC, #16 // Iterators get 2 arguments. + | str CARG3, [RA] // Copy callable. + | stp CARG1, CARG2, [RA, #16] // Copy state and control var. + | checkfunc CARG3, ->vmeta_call + | ins_call + break; + + case BC_ITERN: + | // RA = base, (RB = nresults+1, RC = nargs+1 (2+1)) + |.if JIT + | // NYI: add hotloop, record BC_ITERN. + |.endif + | add RA, BASE, RA, lsl #3 + | ldr TAB:RB, [RA, #-16] + | ldrh TMP3w, [PC, #2] + | ldr CARG1w, [RA, #-8] // Get index from control var. + | add PC, PC, #4 + | add TMP3, PC, TMP3, lsl #2 + | and TAB:RB, RB, #LJ_GCVMASK + | sub TMP3, TMP3, #0x20000 + | ldr TMP1w, TAB:RB->asize + | ldr CARG2, TAB:RB->array + |1: // Traverse array part. + | subs RC, CARG1, TMP1 + | add CARG3, CARG2, CARG1, lsl #3 + | bhs >5 // Index points after array part? + | ldr TMP0, [CARG3] + | cmp TMP0, TISNIL + | cinc CARG1, CARG1, eq // Skip holes in array part. + | beq <1 + | add CARG1, CARG1, TISNUM + | stp CARG1, TMP0, [RA] + | add CARG1, CARG1, #1 + |3: + | str CARG1w, [RA, #-8] // Update control var. + | mov PC, TMP3 + |4: + | ins_next + | + |5: // Traverse hash part. + | ldr TMP2w, TAB:RB->hmask + | ldr NODE:RB, TAB:RB->node + |6: + | add CARG1, RC, RC, lsl #1 + | cmp RC, TMP2 // End of iteration? Branch to ITERN+1. + | add NODE:CARG3, NODE:RB, CARG1, lsl #3 // node = tab->node + idx*3*8 + | bhi <4 + | ldp TMP0, CARG1, NODE:CARG3->val + | cmp TMP0, TISNIL + | add RC, RC, #1 + | beq <6 // Skip holes in hash part. + | stp CARG1, TMP0, [RA] + | add CARG1, RC, TMP1 + | b <3 + break; + + case BC_ISNEXT: + | // RA = base, RC = target (points to ITERN) + | add RA, BASE, RA, lsl #3 + | ldr CFUNC:CARG1, [RA, #-24] + | add RC, PC, RC, lsl #2 + | ldp TAB:CARG3, CARG4, [RA, #-16] + | sub RC, RC, #0x20000 + | checkfunc CFUNC:CARG1, >5 + | asr TMP0, TAB:CARG3, #47 + | ldrb TMP1w, CFUNC:CARG1->ffid + | cmn TMP0, #-LJ_TTAB + | ccmp CARG4, TISNIL, #0, eq + | ccmp TMP1w, #FF_next_N, #0, eq + | bne >5 + | mov TMP0w, #0xfffe7fff + | lsl TMP0, TMP0, #32 + | str TMP0, [RA, #-8] // Initialize control var. + |1: + | mov PC, RC + | ins_next + | + |5: // Despecialize bytecode if any of the checks fail. + | mov TMP0, #BC_JMP + | mov TMP1, #BC_ITERC + | strb TMP0w, [PC, #-4] + | strb TMP1w, [RC] + | b <1 + break; + + case BC_VARG: + | decode_RB RB, INS + | and RC, RC, #255 + | // RA = base, RB = (nresults+1), RC = numparams + | ldr TMP1, [BASE, FRAME_PC] + | add RC, BASE, RC, lsl #3 + | add RA, BASE, RA, lsl #3 + | add RC, RC, #FRAME_VARG + | add TMP2, RA, RB, lsl #3 + | sub RC, RC, TMP1 // RC = vbase + | // Note: RC may now be even _above_ BASE if nargs was < numparams. + | sub TMP3, BASE, #16 // TMP3 = vtop + | cbz RB, >5 + | sub TMP2, TMP2, #16 + |1: // Copy vararg slots to destination slots. + | cmp RC, TMP3 + | ldr TMP0, [RC], #8 + | csel TMP0, TMP0, TISNIL, lo + | cmp RA, TMP2 + | str TMP0, [RA], #8 + | blo <1 + |2: + | ins_next + | + |5: // Copy all varargs. + | ldr TMP0, L->maxstack + | subs TMP2, TMP3, RC + | csel RB, xzr, TMP2, le // MULTRES = (max(vtop-vbase,0)+1)*8 + | add RB, RB, #8 + | add TMP1, RA, TMP2 + | str RBw, SAVE_MULTRES + | ble <2 // Nothing to copy. + | cmp TMP1, TMP0 + | bhi >7 + |6: + | ldr TMP0, [RC], #8 + | str TMP0, [RA], #8 + | cmp RC, TMP3 + | blo <6 + | b <2 + | + |7: // Grow stack for varargs. + | lsr CARG2, TMP2, #3 + | stp BASE, RA, L->base + | mov CARG1, L + | sub RC, RC, BASE // Need delta, because BASE may change. + | str PC, SAVE_PC + | bl extern lj_state_growstack // (lua_State *L, int n) + | ldp BASE, RA, L->base + | add RC, BASE, RC + | sub TMP3, BASE, #16 + | b <6 + break; + + /* -- Returns ----------------------------------------------------------- */ + + case BC_RETM: + | // RA = results, RC = extra results + | ldr TMP0w, SAVE_MULTRES + | ldr PC, [BASE, FRAME_PC] + | add RA, BASE, RA, lsl #3 + | add RC, TMP0, RC, lsl #3 + | b ->BC_RETM_Z + break; + + case BC_RET: + | // RA = results, RC = nresults+1 + | ldr PC, [BASE, FRAME_PC] + | lsl RC, RC, #3 + | add RA, BASE, RA, lsl #3 + |->BC_RETM_Z: + | str RCw, SAVE_MULTRES + |1: + | ands CARG1, PC, #FRAME_TYPE + | eor CARG2, PC, #FRAME_VARG + | bne ->BC_RETV2_Z + | + |->BC_RET_Z: + | // BASE = base, RA = resultptr, RC = (nresults+1)*8, PC = return + | ldr INSw, [PC, #-4] + | subs TMP1, RC, #8 + | sub CARG3, BASE, #16 + | beq >3 + |2: + | ldr TMP0, [RA], #8 + | add BASE, BASE, #8 + | sub TMP1, TMP1, #8 + | str TMP0, [BASE, #-24] + | cbnz TMP1, <2 + |3: + | decode_RA RA, INS + | sub CARG4, CARG3, RA, lsl #3 + | decode_RB RB, INS + | ldr LFUNC:CARG1, [CARG4, FRAME_FUNC] + |5: + | cmp RC, RB, lsl #3 // More results expected? + | blo >6 + | and LFUNC:CARG1, CARG1, #LJ_GCVMASK + | mov BASE, CARG4 + | ldr CARG2, LFUNC:CARG1->pc + | ldr KBASE, [CARG2, #PC2PROTO(k)] + | ins_next + | + |6: // Fill up results with nil. + | add BASE, BASE, #8 + | add RC, RC, #8 + | str TISNIL, [BASE, #-24] + | b <5 + | + |->BC_RETV1_Z: // Non-standard return case. + | add RA, BASE, RA, lsl #3 + |->BC_RETV2_Z: + | tst CARG2, #FRAME_TYPEP + | bne ->vm_return + | // Return from vararg function: relocate BASE down. + | sub BASE, BASE, CARG2 + | ldr PC, [BASE, FRAME_PC] + | b <1 + break; + + case BC_RET0: case BC_RET1: + | // RA = results, RC = nresults+1 + | ldr PC, [BASE, FRAME_PC] + | lsl RC, RC, #3 + | str RCw, SAVE_MULTRES + | ands CARG1, PC, #FRAME_TYPE + | eor CARG2, PC, #FRAME_VARG + | bne ->BC_RETV1_Z + | ldr INSw, [PC, #-4] + if (op == BC_RET1) { + | ldr TMP0, [BASE, RA, lsl #3] + } + | sub CARG4, BASE, #16 + | decode_RA RA, INS + | sub BASE, CARG4, RA, lsl #3 + if (op == BC_RET1) { + | str TMP0, [CARG4], #8 + } + | decode_RB RB, INS + | ldr LFUNC:CARG1, [BASE, FRAME_FUNC] + |5: + | cmp RC, RB, lsl #3 + | blo >6 + | and LFUNC:CARG1, CARG1, #LJ_GCVMASK + | ldr CARG2, LFUNC:CARG1->pc + | ldr KBASE, [CARG2, #PC2PROTO(k)] + | ins_next + | + |6: // Fill up results with nil. + | add RC, RC, #8 + | str TISNIL, [CARG4], #8 + | b <5 + break; + + /* -- Loops and branches ------------------------------------------------ */ + + |.define FOR_IDX, [RA]; .define FOR_TIDX, [RA, #4] + |.define FOR_STOP, [RA, #8]; .define FOR_TSTOP, [RA, #12] + |.define FOR_STEP, [RA, #16]; .define FOR_TSTEP, [RA, #20] + |.define FOR_EXT, [RA, #24]; .define FOR_TEXT, [RA, #28] + + case BC_FORL: + |.if JIT + | hotloop + |.endif + | // Fall through. Assumes BC_IFORL follows. + break; + + case BC_JFORI: + case BC_JFORL: +#if !LJ_HASJIT + break; +#endif + case BC_FORI: + case BC_IFORL: + | // RA = base, RC = target (after end of loop or start of loop) + vk = (op == BC_IFORL || op == BC_JFORL); + | add RA, BASE, RA, lsl #3 + | ldp CARG1, CARG2, FOR_IDX // CARG1 = IDX, CARG2 = STOP + | ldr CARG3, FOR_STEP // CARG3 = STEP + if (op != BC_JFORL) { + | add RC, PC, RC, lsl #2 + | sub RC, RC, #0x20000 + } + | checkint CARG1, >5 + if (!vk) { + | checkint CARG2, ->vmeta_for + | checkint CARG3, ->vmeta_for + | tbnz CARG3w, #31, >4 + | cmp CARG1w, CARG2w + } else { + | adds CARG1w, CARG1w, CARG3w + | bvs >2 + | add TMP0, CARG1, TISNUM + | tbnz CARG3w, #31, >4 + | cmp CARG1w, CARG2w + } + |1: + if (op == BC_FORI) { + | csel PC, RC, PC, gt + } else if (op == BC_JFORI) { + | ldrh RCw, [RC, #-2] + } else if (op == BC_IFORL) { + | csel PC, RC, PC, le + } + if (vk) { + | str TMP0, FOR_IDX + | str TMP0, FOR_EXT + } else { + | str CARG1, FOR_EXT + } + if (op == BC_JFORI || op == BC_JFORL) { + | ble =>BC_JLOOP + } + |2: + | ins_next + | + |4: // Invert check for negative step. + | cmp CARG2w, CARG1w + | b <1 + | + |5: // FP loop. + | ldp d0, d1, FOR_IDX + | blo ->vmeta_for + if (!vk) { + | checknum CARG2, ->vmeta_for + | checknum CARG3, ->vmeta_for + | str d0, FOR_EXT + } else { + | ldr d2, FOR_STEP + | fadd d0, d0, d2 + } + | tbnz CARG3, #63, >7 + | fcmp d0, d1 + |6: + if (vk) { + | str d0, FOR_IDX + | str d0, FOR_EXT + } + if (op == BC_FORI) { + | csel PC, RC, PC, hi + } else if (op == BC_JFORI) { + | ldrh RCw, [RC, #-2] + | bls =>BC_JLOOP + } else if (op == BC_IFORL) { + | csel PC, RC, PC, ls + } else { + | bls =>BC_JLOOP + } + | b <2 + | + |7: // Invert check for negative step. + | fcmp d1, d0 + | b <6 + break; + + case BC_ITERL: + |.if JIT + | hotloop + |.endif + | // Fall through. Assumes BC_IITERL follows. + break; + + case BC_JITERL: +#if !LJ_HASJIT + break; +#endif + case BC_IITERL: + | // RA = base, RC = target + | ldr CARG1, [BASE, RA, lsl #3] + | add TMP1, BASE, RA, lsl #3 + | cmp CARG1, TISNIL + | beq >1 // Stop if iterator returned nil. + if (op == BC_JITERL) { + | str CARG1, [TMP1, #-8] + | b =>BC_JLOOP + } else { + | add TMP0, PC, RC, lsl #2 // Otherwise save control var + branch. + | sub PC, TMP0, #0x20000 + | str CARG1, [TMP1, #-8] + } + |1: + | ins_next + break; + + case BC_LOOP: + | // RA = base, RC = target (loop extent) + | // Note: RA/RC is only used by trace recorder to determine scope/extent + | // This opcode does NOT jump, it's only purpose is to detect a hot loop. + |.if JIT + | hotloop + |.endif + | // Fall through. Assumes BC_ILOOP follows. + break; + + case BC_ILOOP: + | // RA = base, RC = target (loop extent) + | ins_next + break; + + case BC_JLOOP: + |.if JIT + | NYI + |.endif + break; + + case BC_JMP: + | // RA = base (only used by trace recorder), RC = target + | add RC, PC, RC, lsl #2 + | sub PC, RC, #0x20000 + | ins_next + break; + + /* -- Function headers -------------------------------------------------- */ + + case BC_FUNCF: + |.if JIT + | hotcall + |.endif + case BC_FUNCV: /* NYI: compiled vararg functions. */ + | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow. + break; + + case BC_JFUNCF: +#if !LJ_HASJIT + break; +#endif + case BC_IFUNCF: + | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8 + | ldr CARG1, L->maxstack + | ldrb TMP1w, [PC, #-4+PC2PROTO(numparams)] + | ldr KBASE, [PC, #-4+PC2PROTO(k)] + | cmp RA, CARG1 + | bhi ->vm_growstack_l + |2: + | cmp NARGS8:RC, TMP1, lsl #3 // Check for missing parameters. + | blo >3 + if (op == BC_JFUNCF) { + | decode_RD RC, INS + | b =>BC_JLOOP + } else { + | ins_next + } + | + |3: // Clear missing parameters. + | str TISNIL, [BASE, NARGS8:RC] + | add NARGS8:RC, NARGS8:RC, #8 + | b <2 + break; + + case BC_JFUNCV: +#if !LJ_HASJIT + break; +#endif + | NYI // NYI: compiled vararg functions + break; /* NYI: compiled vararg functions. */ + + case BC_IFUNCV: + | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8 + | ldr CARG1, L->maxstack + | add TMP2, BASE, RC + | add RA, RA, RC + | add TMP0, RC, #16+FRAME_VARG + | str LFUNC:CARG3, [TMP2], #8 // Store (untagged) copy of LFUNC. + | ldr KBASE, [PC, #-4+PC2PROTO(k)] + | cmp RA, CARG1 + | str TMP0, [TMP2], #8 // Store delta + FRAME_VARG. + | bhs ->vm_growstack_l + | sub RC, TMP2, #16 + | ldrb TMP1w, [PC, #-4+PC2PROTO(numparams)] + | mov RA, BASE + | mov BASE, TMP2 + | cbz TMP1, >2 + |1: + | cmp RA, RC // Less args than parameters? + | bhs >3 + | ldr TMP0, [RA] + | sub TMP1, TMP1, #1 + | str TISNIL, [RA], #8 // Clear old fixarg slot (help the GC). + | str TMP0, [TMP2], #8 + | cbnz TMP1, <1 + |2: + | ins_next + | + |3: + | sub TMP1, TMP1, #1 + | str TISNIL, [TMP2], #8 + | cbz TMP1, <2 + | b <3 + break; + + case BC_FUNCC: + case BC_FUNCCW: + | // BASE = new base, RA = BASE+framesize*8, CARG3 = CFUNC, RC = nargs*8 + if (op == BC_FUNCC) { + | ldr CARG4, CFUNC:CARG3->f + } else { + | ldr CARG4, GL->wrapf + } + | add CARG2, RA, NARGS8:RC + | ldr CARG1, L->maxstack + | add RC, BASE, NARGS8:RC + | cmp CARG2, CARG1 + | stp BASE, RC, L->base + if (op == BC_FUNCCW) { + | ldr CARG2, CFUNC:CARG3->f + } + | mv_vmstate TMP0w, C + | mov CARG1, L + | bhi ->vm_growstack_c // Need to grow stack. + | st_vmstate TMP0w + | blr CARG4 // (lua_State *L [, lua_CFunction f]) + | // Returns nresults. + | ldp BASE, TMP1, L->base + | str L, GL->cur_L + | sbfiz RC, CRET1, #3, #32 + | st_vmstate ST_INTERP + | ldr PC, [BASE, FRAME_PC] + | sub RA, TMP1, RC // RA = L->top - nresults*8 + | b ->vm_returnc + break; + + /* ---------------------------------------------------------------------- */ + + default: + fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]); + exit(2); + break; + } +} + +static int build_backend(BuildCtx *ctx) +{ + int op; + + dasm_growpc(Dst, BC__MAX); + + build_subroutines(ctx); + + |.code_op + for (op = 0; op < BC__MAX; op++) + build_ins(ctx, (BCOp)op, op); + + return BC__MAX; +} + +/* Emit pseudo frame-info for all assembler functions. */ +static void emit_asm_debug(BuildCtx *ctx) +{ + int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code); + int i, cf = CFRAME_SIZE >> 3; + switch (ctx->mode) { + case BUILD_elfasm: + fprintf(ctx->fp, "\t.section .debug_frame,\"\",%%progbits\n"); + fprintf(ctx->fp, + ".Lframe0:\n" + "\t.long .LECIE0-.LSCIE0\n" + ".LSCIE0:\n" + "\t.long 0xffffffff\n" + "\t.byte 0x1\n" + "\t.string \"\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -8\n" + "\t.byte 30\n" /* Return address is in lr. */ + "\t.byte 0xc\n\t.uleb128 31\n\t.uleb128 0\n" /* def_cfa sp */ + "\t.align 3\n" + ".LECIE0:\n\n"); + fprintf(ctx->fp, + ".LSFDE0:\n" + "\t.long .LEFDE0-.LASFDE0\n" + ".LASFDE0:\n" + "\t.long .Lframe0\n" + "\t.quad .Lbegin\n" + "\t.quad %d\n" + "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ + "\t.byte 0x9d\n\t.uleb128 %d\n" /* offset fp */ + "\t.byte 0x9e\n\t.uleb128 %d\n", /* offset lr */ + fcofs, CFRAME_SIZE, cf, cf-1); + for (i = 19; i <= 28; i++) /* offset x19-x28 */ + fprintf(ctx->fp, "\t.byte 0x%x\n\t.uleb128 %d\n", 0x80+i, cf-i+17); + for (i = 8; i <= 15; i++) /* offset d8-d15 */ + fprintf(ctx->fp, "\t.byte 5\n\t.uleb128 0x%x\n\t.uleb128 %d\n", + 64+i, cf-i-4); + fprintf(ctx->fp, + "\t.align 3\n" + ".LEFDE0:\n\n"); +#if LJ_HASFFI + fprintf(ctx->fp, + ".LSFDE1:\n" + "\t.long .LEFDE1-.LASFDE1\n" + ".LASFDE1:\n" + "\t.long .Lframe0\n" + "\t.quad lj_vm_ffi_call\n" + "\t.quad %d\n" + "\t.byte 0xe\n\t.uleb128 32\n" /* def_cfa_offset */ + "\t.byte 0x9d\n\t.uleb128 4\n" /* offset fp */ + "\t.byte 0x9e\n\t.uleb128 3\n" /* offset lr */ + "\t.byte 0x93\n\t.uleb128 2\n" /* offset x19 */ + "\t.align 3\n" + ".LEFDE1:\n\n", (int)ctx->codesz - fcofs); +#endif + fprintf(ctx->fp, "\t.section .eh_frame,\"a\",%%progbits\n"); + fprintf(ctx->fp, + ".Lframe1:\n" + "\t.long .LECIE1-.LSCIE1\n" + ".LSCIE1:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.string \"zPR\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -8\n" + "\t.byte 30\n" /* Return address is in lr. */ + "\t.uleb128 6\n" /* augmentation length */ + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.long lj_err_unwind_dwarf-.\n" + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.uleb128 31\n\t.uleb128 0\n" /* def_cfa sp */ + "\t.align 3\n" + ".LECIE1:\n\n"); + fprintf(ctx->fp, + ".LSFDE2:\n" + "\t.long .LEFDE2-.LASFDE2\n" + ".LASFDE2:\n" + "\t.long .LASFDE2-.Lframe1\n" + "\t.long .Lbegin-.\n" + "\t.long %d\n" + "\t.uleb128 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ + "\t.byte 0x9d\n\t.uleb128 %d\n" /* offset fp */ + "\t.byte 0x9e\n\t.uleb128 %d\n", /* offset lr */ + fcofs, CFRAME_SIZE, cf, cf-1); + for (i = 19; i <= 28; i++) /* offset x19-x28 */ + fprintf(ctx->fp, "\t.byte 0x%x\n\t.uleb128 %d\n", 0x80+i, cf-i+17); + for (i = 8; i <= 15; i++) /* offset d8-d15 */ + fprintf(ctx->fp, "\t.byte 5\n\t.uleb128 0x%x\n\t.uleb128 %d\n", + 64+i, cf-i-4); + fprintf(ctx->fp, + "\t.align 3\n" + ".LEFDE2:\n\n"); +#if LJ_HASFFI + fprintf(ctx->fp, + ".Lframe2:\n" + "\t.long .LECIE2-.LSCIE2\n" + ".LSCIE2:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.string \"zR\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -8\n" + "\t.byte 30\n" /* Return address is in lr. */ + "\t.uleb128 1\n" /* augmentation length */ + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.uleb128 31\n\t.uleb128 0\n" /* def_cfa sp */ + "\t.align 3\n" + ".LECIE2:\n\n"); + fprintf(ctx->fp, + ".LSFDE3:\n" + "\t.long .LEFDE3-.LASFDE3\n" + ".LASFDE3:\n" + "\t.long .LASFDE3-.Lframe2\n" + "\t.long lj_vm_ffi_call-.\n" + "\t.long %d\n" + "\t.uleb128 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.uleb128 32\n" /* def_cfa_offset */ + "\t.byte 0x9d\n\t.uleb128 4\n" /* offset fp */ + "\t.byte 0x9e\n\t.uleb128 3\n" /* offset lr */ + "\t.byte 0x93\n\t.uleb128 2\n" /* offset x19 */ + "\t.align 3\n" + ".LEFDE3:\n\n", (int)ctx->codesz - fcofs); +#endif + break; + default: + break; + } +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/vm_mips.dasc b/Build/source/libs/luajit/LuaJIT-src/src/vm_mips.dasc new file mode 100644 index 00000000000..7cfdf4b18b5 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/vm_mips.dasc @@ -0,0 +1,4355 @@ +|// Low-level VM code for MIPS CPUs. +|// Bytecode interpreter, fast functions and helper functions. +|// Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +| +|.arch mips +|.section code_op, code_sub +| +|.actionlist build_actionlist +|.globals GLOB_ +|.globalnames globnames +|.externnames extnames +| +|// Note: The ragged indentation of the instructions is intentional. +|// The starting columns indicate data dependencies. +| +|//----------------------------------------------------------------------- +| +|// Fixed register assignments for the interpreter. +|// Don't use: r0 = 0, r26/r27 = reserved, r28 = gp, r29 = sp, r31 = ra +| +|// The following must be C callee-save (but BASE is often refetched). +|.define BASE, r16 // Base of current Lua stack frame. +|.define KBASE, r17 // Constants of current Lua function. +|.define PC, r18 // Next PC. +|.define DISPATCH, r19 // Opcode dispatch table. +|.define LREG, r20 // Register holding lua_State (also in SAVE_L). +|.define MULTRES, r21 // Size of multi-result: (nresults+1)*8. +|// NYI: r22 currently unused. +| +|.define JGL, r30 // On-trace: global_State + 32768. +| +|// Constants for type-comparisons, stores and conversions. C callee-save. +|.define TISNIL, r30 +|.define TOBIT, f30 // 2^52 + 2^51. +| +|// The following temporaries are not saved across C calls, except for RA. +|.define RA, r23 // Callee-save. +|.define RB, r8 +|.define RC, r9 +|.define RD, r10 +|.define INS, r11 +| +|.define AT, r1 // Assembler temporary. +|.define TMP0, r12 +|.define TMP1, r13 +|.define TMP2, r14 +|.define TMP3, r15 +| +|// Calling conventions. +|.define CFUNCADDR, r25 +|.define CARG1, r4 +|.define CARG2, r5 +|.define CARG3, r6 +|.define CARG4, r7 +| +|.define CRET1, r2 +|.define CRET2, r3 +| +|.define FARG1, f12 +|.define FARG2, f14 +| +|.define FRET1, f0 +|.define FRET2, f2 +| +|// Stack layout while in interpreter. Must match with lj_frame.h. +|.define CFRAME_SPACE, 112 // Delta for sp. +| +|.define SAVE_ERRF, 124(sp) // 32 bit C frame info. +|.define SAVE_NRES, 120(sp) +|.define SAVE_CFRAME, 116(sp) +|.define SAVE_L, 112(sp) +|//----- 8 byte aligned, ^^^^ 16 byte register save area, owned by interpreter. +|.define SAVE_GPR_, 72 // .. 72+10*4: 32 bit GPR saves. +|.define SAVE_FPR_, 24 // .. 24+6*8: 64 bit FPR saves. +|.define SAVE_PC, 20(sp) +|.define ARG5, 16(sp) +|.define CSAVE_4, 12(sp) +|.define CSAVE_3, 8(sp) +|.define CSAVE_2, 4(sp) +|.define CSAVE_1, 0(sp) +|//----- 8 byte aligned, ^^^^ 16 byte register save area, owned by callee. +| +|.define ARG5_OFS, 16 +|.define SAVE_MULTRES, ARG5 +| +|.macro saveregs +| addiu sp, sp, -CFRAME_SPACE +| sw ra, SAVE_GPR_+9*4(sp) +| sw r30, SAVE_GPR_+8*4(sp) +| sdc1 f30, SAVE_FPR_+5*8(sp) +| sw r23, SAVE_GPR_+7*4(sp) +| sw r22, SAVE_GPR_+6*4(sp) +| sdc1 f28, SAVE_FPR_+4*8(sp) +| sw r21, SAVE_GPR_+5*4(sp) +| sw r20, SAVE_GPR_+4*4(sp) +| sdc1 f26, SAVE_FPR_+3*8(sp) +| sw r19, SAVE_GPR_+3*4(sp) +| sw r18, SAVE_GPR_+2*4(sp) +| sdc1 f24, SAVE_FPR_+2*8(sp) +| sw r17, SAVE_GPR_+1*4(sp) +| sw r16, SAVE_GPR_+0*4(sp) +| sdc1 f22, SAVE_FPR_+1*8(sp) +| sdc1 f20, SAVE_FPR_+0*8(sp) +|.endmacro +| +|.macro restoreregs_ret +| lw ra, SAVE_GPR_+9*4(sp) +| lw r30, SAVE_GPR_+8*4(sp) +| ldc1 f30, SAVE_FPR_+5*8(sp) +| lw r23, SAVE_GPR_+7*4(sp) +| lw r22, SAVE_GPR_+6*4(sp) +| ldc1 f28, SAVE_FPR_+4*8(sp) +| lw r21, SAVE_GPR_+5*4(sp) +| lw r20, SAVE_GPR_+4*4(sp) +| ldc1 f26, SAVE_FPR_+3*8(sp) +| lw r19, SAVE_GPR_+3*4(sp) +| lw r18, SAVE_GPR_+2*4(sp) +| ldc1 f24, SAVE_FPR_+2*8(sp) +| lw r17, SAVE_GPR_+1*4(sp) +| lw r16, SAVE_GPR_+0*4(sp) +| ldc1 f22, SAVE_FPR_+1*8(sp) +| ldc1 f20, SAVE_FPR_+0*8(sp) +| jr ra +| addiu sp, sp, CFRAME_SPACE +|.endmacro +| +|// Type definitions. Some of these are only used for documentation. +|.type L, lua_State, LREG +|.type GL, global_State +|.type TVALUE, TValue +|.type GCOBJ, GCobj +|.type STR, GCstr +|.type TAB, GCtab +|.type LFUNC, GCfuncL +|.type CFUNC, GCfuncC +|.type PROTO, GCproto +|.type UPVAL, GCupval +|.type NODE, Node +|.type NARGS8, int +|.type TRACE, GCtrace +|.type SBUF, SBuf +| +|//----------------------------------------------------------------------- +| +|// Trap for not-yet-implemented parts. +|.macro NYI; .long 0xf0f0f0f0; .endmacro +| +|// Macros to mark delay slots. +|.macro ., a; a; .endmacro +|.macro ., a,b; a,b; .endmacro +|.macro ., a,b,c; a,b,c; .endmacro +| +|//----------------------------------------------------------------------- +| +|// Endian-specific defines. +|.define FRAME_PC, LJ_ENDIAN_SELECT(-4,-8) +|.define FRAME_FUNC, LJ_ENDIAN_SELECT(-8,-4) +|.define HI, LJ_ENDIAN_SELECT(4,0) +|.define LO, LJ_ENDIAN_SELECT(0,4) +|.define OFS_RD, LJ_ENDIAN_SELECT(2,0) +|.define OFS_RA, LJ_ENDIAN_SELECT(1,2) +|.define OFS_OP, LJ_ENDIAN_SELECT(0,3) +| +|// Instruction decode. +|.macro decode_OP1, dst, ins; andi dst, ins, 0xff; .endmacro +|.macro decode_OP4a, dst, ins; andi dst, ins, 0xff; .endmacro +|.macro decode_OP4b, dst; sll dst, dst, 2; .endmacro +|.macro decode_RC4a, dst, ins; srl dst, ins, 14; .endmacro +|.macro decode_RC4b, dst; andi dst, dst, 0x3fc; .endmacro +|.macro decode_RD4b, dst; sll dst, dst, 2; .endmacro +|.macro decode_RA8a, dst, ins; srl dst, ins, 5; .endmacro +|.macro decode_RA8b, dst; andi dst, dst, 0x7f8; .endmacro +|.macro decode_RB8a, dst, ins; srl dst, ins, 21; .endmacro +|.macro decode_RB8b, dst; andi dst, dst, 0x7f8; .endmacro +|.macro decode_RD8a, dst, ins; srl dst, ins, 16; .endmacro +|.macro decode_RD8b, dst; sll dst, dst, 3; .endmacro +|.macro decode_RDtoRC8, dst, src; andi dst, src, 0x7f8; .endmacro +| +|// Instruction fetch. +|.macro ins_NEXT1 +| lw INS, 0(PC) +| addiu PC, PC, 4 +|.endmacro +|// Instruction decode+dispatch. +|.macro ins_NEXT2 +| decode_OP4a TMP1, INS +| decode_OP4b TMP1 +| addu TMP0, DISPATCH, TMP1 +| decode_RD8a RD, INS +| lw AT, 0(TMP0) +| decode_RA8a RA, INS +| decode_RD8b RD +| jr AT +| decode_RA8b RA +|.endmacro +|.macro ins_NEXT +| ins_NEXT1 +| ins_NEXT2 +|.endmacro +| +|// Instruction footer. +|.if 1 +| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use. +| .define ins_next, ins_NEXT +| .define ins_next_, ins_NEXT +| .define ins_next1, ins_NEXT1 +| .define ins_next2, ins_NEXT2 +|.else +| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch. +| // Affects only certain kinds of benchmarks (and only with -j off). +| .macro ins_next +| b ->ins_next +| .endmacro +| .macro ins_next1 +| .endmacro +| .macro ins_next2 +| b ->ins_next +| .endmacro +| .macro ins_next_ +| ->ins_next: +| ins_NEXT +| .endmacro +|.endif +| +|// Call decode and dispatch. +|.macro ins_callt +| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC +| lw PC, LFUNC:RB->pc +| lw INS, 0(PC) +| addiu PC, PC, 4 +| decode_OP4a TMP1, INS +| decode_RA8a RA, INS +| decode_OP4b TMP1 +| decode_RA8b RA +| addu TMP0, DISPATCH, TMP1 +| lw TMP0, 0(TMP0) +| jr TMP0 +| addu RA, RA, BASE +|.endmacro +| +|.macro ins_call +| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, PC = caller PC +| sw PC, FRAME_PC(BASE) +| ins_callt +|.endmacro +| +|//----------------------------------------------------------------------- +| +|.macro branch_RD +| srl TMP0, RD, 1 +| lui AT, (-(BCBIAS_J*4 >> 16) & 65535) +| addu TMP0, TMP0, AT +| addu PC, PC, TMP0 +|.endmacro +| +|// Assumes DISPATCH is relative to GL. +#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field)) +#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field)) +#define GG_DISP2GOT (GG_OFS(got) - GG_OFS(dispatch)) +#define DISPATCH_GOT(name) (GG_DISP2GOT + 4*LJ_GOT_##name) +| +#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto)) +| +|.macro load_got, func +| lw CFUNCADDR, DISPATCH_GOT(func)(DISPATCH) +|.endmacro +|// Much faster. Sadly, there's no easy way to force the required code layout. +|// .macro call_intern, func; bal extern func; .endmacro +|.macro call_intern, func; jalr CFUNCADDR; .endmacro +|.macro call_extern; jalr CFUNCADDR; .endmacro +|.macro jmp_extern; jr CFUNCADDR; .endmacro +| +|.macro hotcheck, delta, target +| srl TMP1, PC, 1 +| andi TMP1, TMP1, 126 +| addu TMP1, TMP1, DISPATCH +| lhu TMP2, GG_DISP2HOT(TMP1) +| addiu TMP2, TMP2, -delta +| bltz TMP2, target +|. sh TMP2, GG_DISP2HOT(TMP1) +|.endmacro +| +|.macro hotloop +| hotcheck HOTCOUNT_LOOP, ->vm_hotloop +|.endmacro +| +|.macro hotcall +| hotcheck HOTCOUNT_CALL, ->vm_hotcall +|.endmacro +| +|// Set current VM state. Uses TMP0. +|.macro li_vmstate, st; li TMP0, ~LJ_VMST_..st; .endmacro +|.macro st_vmstate; sw TMP0, DISPATCH_GL(vmstate)(DISPATCH); .endmacro +| +|// Move table write barrier back. Overwrites mark and tmp. +|.macro barrierback, tab, mark, tmp, target +| lw tmp, DISPATCH_GL(gc.grayagain)(DISPATCH) +| andi mark, mark, ~LJ_GC_BLACK & 255 // black2gray(tab) +| sw tab, DISPATCH_GL(gc.grayagain)(DISPATCH) +| sb mark, tab->marked +| b target +|. sw tmp, tab->gclist +|.endmacro +| +|//----------------------------------------------------------------------- + +/* Generate subroutines used by opcodes and other parts of the VM. */ +/* The .code_sub section should be last to help static branch prediction. */ +static void build_subroutines(BuildCtx *ctx) +{ + |.code_sub + | + |//----------------------------------------------------------------------- + |//-- Return handling ---------------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_returnp: + | // See vm_return. Also: TMP2 = previous base. + | andi AT, PC, FRAME_P + | beqz AT, ->cont_dispatch + |. li TMP1, LJ_TTRUE + | + | // Return from pcall or xpcall fast func. + | lw PC, FRAME_PC(TMP2) // Fetch PC of previous frame. + | move BASE, TMP2 // Restore caller base. + | // Prepending may overwrite the pcall frame, so do it at the end. + | sw TMP1, FRAME_PC(RA) // Prepend true to results. + | addiu RA, RA, -8 + | + |->vm_returnc: + | addiu RD, RD, 8 // RD = (nresults+1)*8. + | andi TMP0, PC, FRAME_TYPE + | beqz RD, ->vm_unwind_c_eh + |. li CRET1, LUA_YIELD + | beqz TMP0, ->BC_RET_Z // Handle regular return to Lua. + |. move MULTRES, RD + | + |->vm_return: + | // BASE = base, RA = resultptr, RD/MULTRES = (nresults+1)*8, PC = return + | // TMP0 = PC & FRAME_TYPE + | li TMP2, -8 + | xori AT, TMP0, FRAME_C + | and TMP2, PC, TMP2 + | bnez AT, ->vm_returnp + | subu TMP2, BASE, TMP2 // TMP2 = previous base. + | + | addiu TMP1, RD, -8 + | sw TMP2, L->base + | li_vmstate C + | lw TMP2, SAVE_NRES + | addiu BASE, BASE, -8 + | st_vmstate + | beqz TMP1, >2 + |. sll TMP2, TMP2, 3 + |1: + | addiu TMP1, TMP1, -8 + | ldc1 f0, 0(RA) + | addiu RA, RA, 8 + | sdc1 f0, 0(BASE) + | bnez TMP1, <1 + |. addiu BASE, BASE, 8 + | + |2: + | bne TMP2, RD, >6 + |3: + |. sw BASE, L->top // Store new top. + | + |->vm_leave_cp: + | lw TMP0, SAVE_CFRAME // Restore previous C frame. + | move CRET1, r0 // Ok return status for vm_pcall. + | sw TMP0, L->cframe + | + |->vm_leave_unw: + | restoreregs_ret + | + |6: + | lw TMP1, L->maxstack + | slt AT, TMP2, RD + | bnez AT, >7 // Less results wanted? + | // More results wanted. Check stack size and fill up results with nil. + |. slt AT, BASE, TMP1 + | beqz AT, >8 + |. nop + | sw TISNIL, HI(BASE) + | addiu RD, RD, 8 + | b <2 + |. addiu BASE, BASE, 8 + | + |7: // Less results wanted. + | subu TMP0, RD, TMP2 + | subu TMP0, BASE, TMP0 // Either keep top or shrink it. + | b <3 + |. movn BASE, TMP0, TMP2 // LUA_MULTRET+1 case? + | + |8: // Corner case: need to grow stack for filling up results. + | // This can happen if: + | // - A C function grows the stack (a lot). + | // - The GC shrinks the stack in between. + | // - A return back from a lua_call() with (high) nresults adjustment. + | load_got lj_state_growstack + | move MULTRES, RD + | srl CARG2, TMP2, 3 + | call_intern lj_state_growstack // (lua_State *L, int n) + |. move CARG1, L + | lw TMP2, SAVE_NRES + | lw BASE, L->top // Need the (realloced) L->top in BASE. + | move RD, MULTRES + | b <2 + |. sll TMP2, TMP2, 3 + | + |->vm_unwind_c: // Unwind C stack, return from vm_pcall. + | // (void *cframe, int errcode) + | move sp, CARG1 + | move CRET1, CARG2 + |->vm_unwind_c_eh: // Landing pad for external unwinder. + | lw L, SAVE_L + | li TMP0, ~LJ_VMST_C + | lw GL:TMP1, L->glref + | b ->vm_leave_unw + |. sw TMP0, GL:TMP1->vmstate + | + |->vm_unwind_ff: // Unwind C stack, return from ff pcall. + | // (void *cframe) + | li AT, -4 + | and sp, CARG1, AT + |->vm_unwind_ff_eh: // Landing pad for external unwinder. + | lw L, SAVE_L + | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). + | li TISNIL, LJ_TNIL + | lw BASE, L->base + | lw DISPATCH, L->glref // Setup pointer to dispatch table. + | mtc1 TMP3, TOBIT + | li TMP1, LJ_TFALSE + | li_vmstate INTERP + | lw PC, FRAME_PC(BASE) // Fetch PC of previous frame. + | cvt.d.s TOBIT, TOBIT + | addiu RA, BASE, -8 // Results start at BASE-8. + | addiu DISPATCH, DISPATCH, GG_G2DISP + | sw TMP1, HI(RA) // Prepend false to error message. + | st_vmstate + | b ->vm_returnc + |. li RD, 16 // 2 results: false + error message. + | + |//----------------------------------------------------------------------- + |//-- Grow stack for calls ----------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_growstack_c: // Grow stack for C function. + | b >2 + |. li CARG2, LUA_MINSTACK + | + |->vm_growstack_l: // Grow stack for Lua function. + | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC + | addu RC, BASE, RC + | subu RA, RA, BASE + | sw BASE, L->base + | addiu PC, PC, 4 // Must point after first instruction. + | sw RC, L->top + | srl CARG2, RA, 3 + |2: + | // L->base = new base, L->top = top + | load_got lj_state_growstack + | sw PC, SAVE_PC + | call_intern lj_state_growstack // (lua_State *L, int n) + |. move CARG1, L + | lw BASE, L->base + | lw RC, L->top + | lw LFUNC:RB, FRAME_FUNC(BASE) + | subu RC, RC, BASE + | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC + | ins_callt // Just retry the call. + | + |//----------------------------------------------------------------------- + |//-- Entry points into the assembler VM --------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_resume: // Setup C frame and resume thread. + | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0) + | saveregs + | move L, CARG1 + | lw DISPATCH, L->glref // Setup pointer to dispatch table. + | move BASE, CARG2 + | lbu TMP1, L->status + | sw L, SAVE_L + | li PC, FRAME_CP + | addiu TMP0, sp, CFRAME_RESUME + | addiu DISPATCH, DISPATCH, GG_G2DISP + | sw r0, SAVE_NRES + | sw r0, SAVE_ERRF + | sw CARG1, SAVE_PC // Any value outside of bytecode is ok. + | sw r0, SAVE_CFRAME + | beqz TMP1, >3 + |. sw TMP0, L->cframe + | + | // Resume after yield (like a return). + | sw L, DISPATCH_GL(cur_L)(DISPATCH) + | move RA, BASE + | lw BASE, L->base + | lw TMP1, L->top + | lw PC, FRAME_PC(BASE) + | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). + | subu RD, TMP1, BASE + | mtc1 TMP3, TOBIT + | sb r0, L->status + | cvt.d.s TOBIT, TOBIT + | li_vmstate INTERP + | addiu RD, RD, 8 + | st_vmstate + | move MULTRES, RD + | andi TMP0, PC, FRAME_TYPE + | beqz TMP0, ->BC_RET_Z + |. li TISNIL, LJ_TNIL + | b ->vm_return + |. nop + | + |->vm_pcall: // Setup protected C frame and enter VM. + | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef) + | saveregs + | sw CARG4, SAVE_ERRF + | b >1 + |. li PC, FRAME_CP + | + |->vm_call: // Setup C frame and enter VM. + | // (lua_State *L, TValue *base, int nres1) + | saveregs + | li PC, FRAME_C + | + |1: // Entry point for vm_pcall above (PC = ftype). + | lw TMP1, L:CARG1->cframe + | move L, CARG1 + | sw CARG3, SAVE_NRES + | lw DISPATCH, L->glref // Setup pointer to dispatch table. + | sw CARG1, SAVE_L + | move BASE, CARG2 + | addiu DISPATCH, DISPATCH, GG_G2DISP + | sw CARG1, SAVE_PC // Any value outside of bytecode is ok. + | sw TMP1, SAVE_CFRAME + | sw sp, L->cframe // Add our C frame to cframe chain. + | + |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype). + | sw L, DISPATCH_GL(cur_L)(DISPATCH) + | lw TMP2, L->base // TMP2 = old base (used in vmeta_call). + | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). + | lw TMP1, L->top + | mtc1 TMP3, TOBIT + | addu PC, PC, BASE + | subu NARGS8:RC, TMP1, BASE + | subu PC, PC, TMP2 // PC = frame delta + frame type + | cvt.d.s TOBIT, TOBIT + | li_vmstate INTERP + | li TISNIL, LJ_TNIL + | st_vmstate + | + |->vm_call_dispatch: + | // TMP2 = old base, BASE = new base, RC = nargs*8, PC = caller PC + | lw TMP0, FRAME_PC(BASE) + | li AT, LJ_TFUNC + | bne TMP0, AT, ->vmeta_call + |. lw LFUNC:RB, FRAME_FUNC(BASE) + | + |->vm_call_dispatch_f: + | ins_call + | // BASE = new base, RB = func, RC = nargs*8, PC = caller PC + | + |->vm_cpcall: // Setup protected C frame, call C. + | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp) + | saveregs + | move L, CARG1 + | lw TMP0, L:CARG1->stack + | sw CARG1, SAVE_L + | lw TMP1, L->top + | lw DISPATCH, L->glref // Setup pointer to dispatch table. + | sw CARG1, SAVE_PC // Any value outside of bytecode is ok. + | subu TMP0, TMP0, TMP1 // Compute -savestack(L, L->top). + | lw TMP1, L->cframe + | addiu DISPATCH, DISPATCH, GG_G2DISP + | sw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame. + | sw r0, SAVE_ERRF // No error function. + | sw TMP1, SAVE_CFRAME + | sw sp, L->cframe // Add our C frame to cframe chain. + | sw L, DISPATCH_GL(cur_L)(DISPATCH) + | jalr CARG4 // (lua_State *L, lua_CFunction func, void *ud) + |. move CFUNCADDR, CARG4 + | move BASE, CRET1 + | bnez CRET1, <3 // Else continue with the call. + |. li PC, FRAME_CP + | b ->vm_leave_cp // No base? Just remove C frame. + |. nop + | + |//----------------------------------------------------------------------- + |//-- Metamethod handling ------------------------------------------------ + |//----------------------------------------------------------------------- + | + |// The lj_meta_* functions (except for lj_meta_cat) don't reallocate the + |// stack, so BASE doesn't need to be reloaded across these calls. + | + |//-- Continuation dispatch ---------------------------------------------- + | + |->cont_dispatch: + | // BASE = meta base, RA = resultptr, RD = (nresults+1)*8 + | lw TMP0, -16+LO(BASE) // Continuation. + | move RB, BASE + | move BASE, TMP2 // Restore caller BASE. + | lw LFUNC:TMP1, FRAME_FUNC(TMP2) + |.if FFI + | sltiu AT, TMP0, 2 + |.endif + | lw PC, -16+HI(RB) // Restore PC from [cont|PC]. + | addu TMP2, RA, RD + | lw TMP1, LFUNC:TMP1->pc + |.if FFI + | bnez AT, >1 + |.endif + |. sw TISNIL, -8+HI(TMP2) // Ensure one valid arg. + | // BASE = base, RA = resultptr, RB = meta base + | jr TMP0 // Jump to continuation. + |. lw KBASE, PC2PROTO(k)(TMP1) + | + |.if FFI + |1: + | bnez TMP0, ->cont_ffi_callback // cont = 1: return from FFI callback. + | // cont = 0: tailcall from C function. + |. addiu TMP1, RB, -16 + | b ->vm_call_tail + |. subu RC, TMP1, BASE + |.endif + | + |->cont_cat: // RA = resultptr, RB = meta base + | lw INS, -4(PC) + | addiu CARG2, RB, -16 + | ldc1 f0, 0(RA) + | decode_RB8a MULTRES, INS + | decode_RA8a RA, INS + | decode_RB8b MULTRES + | decode_RA8b RA + | addu TMP1, BASE, MULTRES + | sw BASE, L->base + | subu CARG3, CARG2, TMP1 + | bne TMP1, CARG2, ->BC_CAT_Z + |. sdc1 f0, 0(CARG2) + | addu RA, BASE, RA + | b ->cont_nop + |. sdc1 f0, 0(RA) + | + |//-- Table indexing metamethods ----------------------------------------- + | + |->vmeta_tgets1: + | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv) + | li TMP0, LJ_TSTR + | sw STR:RC, LO(CARG3) + | b >1 + |. sw TMP0, HI(CARG3) + | + |->vmeta_tgets: + | addiu CARG2, DISPATCH, DISPATCH_GL(tmptv) + | li TMP0, LJ_TTAB + | sw TAB:RB, LO(CARG2) + | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv2) + | sw TMP0, HI(CARG2) + | li TMP1, LJ_TSTR + | sw STR:RC, LO(CARG3) + | b >1 + |. sw TMP1, HI(CARG3) + | + |->vmeta_tgetb: // TMP0 = index + | mtc1 TMP0, f0 + | cvt.d.w f0, f0 + | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv) + | sdc1 f0, 0(CARG3) + | + |->vmeta_tgetv: + |1: + | load_got lj_meta_tget + | sw BASE, L->base + | sw PC, SAVE_PC + | call_intern lj_meta_tget // (lua_State *L, TValue *o, TValue *k) + |. move CARG1, L + | // Returns TValue * (finished) or NULL (metamethod). + | beqz CRET1, >3 + |. addiu TMP1, BASE, -FRAME_CONT + | ldc1 f0, 0(CRET1) + | ins_next1 + | sdc1 f0, 0(RA) + | ins_next2 + | + |3: // Call __index metamethod. + | // BASE = base, L->top = new base, stack = cont/func/t/k + | lw BASE, L->top + | sw PC, -16+HI(BASE) // [cont|PC] + | subu PC, BASE, TMP1 + | lw LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here. + | b ->vm_call_dispatch_f + |. li NARGS8:RC, 16 // 2 args for func(t, k). + | + |->vmeta_tgetr: + | load_got lj_tab_getinth + | call_intern lj_tab_getinth // (GCtab *t, int32_t key) + |. nop + | // Returns cTValue * or NULL. + | beqz CRET1, >1 + |. nop + | b ->BC_TGETR_Z + |. ldc1 f0, 0(CRET1) + | + |//----------------------------------------------------------------------- + | + |->vmeta_tsets1: + | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv) + | li TMP0, LJ_TSTR + | sw STR:RC, LO(CARG3) + | b >1 + |. sw TMP0, HI(CARG3) + | + |->vmeta_tsets: + | addiu CARG2, DISPATCH, DISPATCH_GL(tmptv) + | li TMP0, LJ_TTAB + | sw TAB:RB, LO(CARG2) + | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv2) + | sw TMP0, HI(CARG2) + | li TMP1, LJ_TSTR + | sw STR:RC, LO(CARG3) + | b >1 + |. sw TMP1, HI(CARG3) + | + |->vmeta_tsetb: // TMP0 = index + | mtc1 TMP0, f0 + | cvt.d.w f0, f0 + | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv) + | sdc1 f0, 0(CARG3) + | + |->vmeta_tsetv: + |1: + | load_got lj_meta_tset + | sw BASE, L->base + | sw PC, SAVE_PC + | call_intern lj_meta_tset // (lua_State *L, TValue *o, TValue *k) + |. move CARG1, L + | // Returns TValue * (finished) or NULL (metamethod). + | beqz CRET1, >3 + |. ldc1 f0, 0(RA) + | // NOBARRIER: lj_meta_tset ensures the table is not black. + | ins_next1 + | sdc1 f0, 0(CRET1) + | ins_next2 + | + |3: // Call __newindex metamethod. + | // BASE = base, L->top = new base, stack = cont/func/t/k/(v) + | addiu TMP1, BASE, -FRAME_CONT + | lw BASE, L->top + | sw PC, -16+HI(BASE) // [cont|PC] + | subu PC, BASE, TMP1 + | lw LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here. + | sdc1 f0, 16(BASE) // Copy value to third argument. + | b ->vm_call_dispatch_f + |. li NARGS8:RC, 24 // 3 args for func(t, k, v) + | + |->vmeta_tsetr: + | load_got lj_tab_setinth + | sw BASE, L->base + | sw PC, SAVE_PC + | call_intern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key) + |. move CARG1, L + | // Returns TValue *. + | b ->BC_TSETR_Z + |. nop + | + |//-- Comparison metamethods --------------------------------------------- + | + |->vmeta_comp: + | // CARG2, CARG3 are already set by BC_ISLT/BC_ISGE/BC_ISLE/BC_ISGT. + | load_got lj_meta_comp + | addiu PC, PC, -4 + | sw BASE, L->base + | sw PC, SAVE_PC + | decode_OP1 CARG4, INS + | call_intern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op) + |. move CARG1, L + | // Returns 0/1 or TValue * (metamethod). + |3: + | sltiu AT, CRET1, 2 + | beqz AT, ->vmeta_binop + | negu TMP2, CRET1 + |4: + | lhu RD, OFS_RD(PC) + | addiu PC, PC, 4 + | lui TMP1, (-(BCBIAS_J*4 >> 16) & 65535) + | sll RD, RD, 2 + | addu RD, RD, TMP1 + | and RD, RD, TMP2 + | addu PC, PC, RD + |->cont_nop: + | ins_next + | + |->cont_ra: // RA = resultptr + | lbu TMP1, -4+OFS_RA(PC) + | ldc1 f0, 0(RA) + | sll TMP1, TMP1, 3 + | addu TMP1, BASE, TMP1 + | b ->cont_nop + |. sdc1 f0, 0(TMP1) + | + |->cont_condt: // RA = resultptr + | lw TMP0, HI(RA) + | sltiu AT, TMP0, LJ_TISTRUECOND + | b <4 + |. negu TMP2, AT // Branch if result is true. + | + |->cont_condf: // RA = resultptr + | lw TMP0, HI(RA) + | sltiu AT, TMP0, LJ_TISTRUECOND + | b <4 + |. addiu TMP2, AT, -1 // Branch if result is false. + | + |->vmeta_equal: + | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV. + | load_got lj_meta_equal + | addiu PC, PC, -4 + | sw BASE, L->base + | sw PC, SAVE_PC + | call_intern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne) + |. move CARG1, L + | // Returns 0/1 or TValue * (metamethod). + | b <3 + |. nop + | + |->vmeta_equal_cd: + |.if FFI + | load_got lj_meta_equal_cd + | move CARG2, INS + | addiu PC, PC, -4 + | sw BASE, L->base + | sw PC, SAVE_PC + | call_intern lj_meta_equal_cd // (lua_State *L, BCIns op) + |. move CARG1, L + | // Returns 0/1 or TValue * (metamethod). + | b <3 + |. nop + |.endif + | + |->vmeta_istype: + | load_got lj_meta_istype + | addiu PC, PC, -4 + | sw BASE, L->base + | srl CARG2, RA, 3 + | srl CARG3, RD, 3 + | sw PC, SAVE_PC + | call_intern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp) + |. move CARG1, L + | b ->cont_nop + |. nop + | + |//-- Arithmetic metamethods --------------------------------------------- + | + |->vmeta_unm: + | move CARG4, CARG3 + | + |->vmeta_arith: + | load_got lj_meta_arith + | decode_OP1 TMP0, INS + | sw BASE, L->base + | sw PC, SAVE_PC + | move CARG2, RA + | sw TMP0, ARG5 + | call_intern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op) + |. move CARG1, L + | // Returns NULL (finished) or TValue * (metamethod). + | beqz CRET1, ->cont_nop + |. nop + | + | // Call metamethod for binary op. + |->vmeta_binop: + | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2 + | subu TMP1, CRET1, BASE + | sw PC, -16+HI(CRET1) // [cont|PC] + | move TMP2, BASE + | addiu PC, TMP1, FRAME_CONT + | move BASE, CRET1 + | b ->vm_call_dispatch + |. li NARGS8:RC, 16 // 2 args for func(o1, o2). + | + |->vmeta_len: + | // CARG2 already set by BC_LEN. +#if LJ_52 + | move MULTRES, CARG1 +#endif + | load_got lj_meta_len + | sw BASE, L->base + | sw PC, SAVE_PC + | call_intern lj_meta_len // (lua_State *L, TValue *o) + |. move CARG1, L + | // Returns NULL (retry) or TValue * (metamethod base). +#if LJ_52 + | bnez CRET1, ->vmeta_binop // Binop call for compatibility. + |. nop + | b ->BC_LEN_Z + |. move CARG1, MULTRES +#else + | b ->vmeta_binop // Binop call for compatibility. + |. nop +#endif + | + |//-- Call metamethod ---------------------------------------------------- + | + |->vmeta_call: // Resolve and call __call metamethod. + | // TMP2 = old base, BASE = new base, RC = nargs*8 + | load_got lj_meta_call + | sw TMP2, L->base // This is the callers base! + | addiu CARG2, BASE, -8 + | sw PC, SAVE_PC + | addu CARG3, BASE, RC + | move MULTRES, NARGS8:RC + | call_intern lj_meta_call // (lua_State *L, TValue *func, TValue *top) + |. move CARG1, L + | lw LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here. + | addiu NARGS8:RC, MULTRES, 8 // Got one more argument now. + | ins_call + | + |->vmeta_callt: // Resolve __call for BC_CALLT. + | // BASE = old base, RA = new base, RC = nargs*8 + | load_got lj_meta_call + | sw BASE, L->base + | addiu CARG2, RA, -8 + | sw PC, SAVE_PC + | addu CARG3, RA, RC + | move MULTRES, NARGS8:RC + | call_intern lj_meta_call // (lua_State *L, TValue *func, TValue *top) + |. move CARG1, L + | lw TMP1, FRAME_PC(BASE) + | lw LFUNC:RB, FRAME_FUNC(RA) // Guaranteed to be a function here. + | b ->BC_CALLT_Z + |. addiu NARGS8:RC, MULTRES, 8 // Got one more argument now. + | + |//-- Argument coercion for 'for' statement ------------------------------ + | + |->vmeta_for: + | load_got lj_meta_for + | sw BASE, L->base + | move CARG2, RA + | sw PC, SAVE_PC + | move MULTRES, INS + | call_intern lj_meta_for // (lua_State *L, TValue *base) + |. move CARG1, L + |.if JIT + | decode_OP1 TMP0, MULTRES + | li AT, BC_JFORI + |.endif + | decode_RA8a RA, MULTRES + | decode_RD8a RD, MULTRES + | decode_RA8b RA + |.if JIT + | beq TMP0, AT, =>BC_JFORI + |. decode_RD8b RD + | b =>BC_FORI + |. nop + |.else + | b =>BC_FORI + |. decode_RD8b RD + |.endif + | + |//----------------------------------------------------------------------- + |//-- Fast functions ----------------------------------------------------- + |//----------------------------------------------------------------------- + | + |.macro .ffunc, name + |->ff_ .. name: + |.endmacro + | + |.macro .ffunc_1, name + |->ff_ .. name: + | beqz NARGS8:RC, ->fff_fallback + |. lw CARG3, HI(BASE) + | lw CARG1, LO(BASE) + |.endmacro + | + |.macro .ffunc_2, name + |->ff_ .. name: + | sltiu AT, NARGS8:RC, 16 + | lw CARG3, HI(BASE) + | bnez AT, ->fff_fallback + |. lw CARG4, 8+HI(BASE) + | lw CARG1, LO(BASE) + | lw CARG2, 8+LO(BASE) + |.endmacro + | + |.macro .ffunc_n, name // Caveat: has delay slot! + |->ff_ .. name: + | lw CARG3, HI(BASE) + | beqz NARGS8:RC, ->fff_fallback + |. ldc1 FARG1, 0(BASE) + | sltiu AT, CARG3, LJ_TISNUM + | beqz AT, ->fff_fallback + |.endmacro + | + |.macro .ffunc_nn, name // Caveat: has delay slot! + |->ff_ .. name: + | sltiu AT, NARGS8:RC, 16 + | lw CARG3, HI(BASE) + | bnez AT, ->fff_fallback + |. lw CARG4, 8+HI(BASE) + | ldc1 FARG1, 0(BASE) + | ldc1 FARG2, 8(BASE) + | sltiu TMP0, CARG3, LJ_TISNUM + | sltiu TMP1, CARG4, LJ_TISNUM + | and TMP0, TMP0, TMP1 + | beqz TMP0, ->fff_fallback + |.endmacro + | + |// Inlined GC threshold check. Caveat: uses TMP0 and TMP1 and has delay slot! + |.macro ffgccheck + | lw TMP0, DISPATCH_GL(gc.total)(DISPATCH) + | lw TMP1, DISPATCH_GL(gc.threshold)(DISPATCH) + | subu AT, TMP0, TMP1 + | bgezal AT, ->fff_gcstep + |.endmacro + | + |//-- Base library: checks ----------------------------------------------- + | + |.ffunc_1 assert + | sltiu AT, CARG3, LJ_TISTRUECOND + | beqz AT, ->fff_fallback + |. addiu RA, BASE, -8 + | lw PC, FRAME_PC(BASE) + | addiu RD, NARGS8:RC, 8 // Compute (nresults+1)*8. + | addu TMP2, RA, NARGS8:RC + | sw CARG3, HI(RA) + | addiu TMP1, BASE, 8 + | beq BASE, TMP2, ->fff_res // Done if exactly 1 argument. + |. sw CARG1, LO(RA) + |1: + | ldc1 f0, 0(TMP1) + | sdc1 f0, -8(TMP1) + | bne TMP1, TMP2, <1 + |. addiu TMP1, TMP1, 8 + | b ->fff_res + |. nop + | + |.ffunc type + | lw CARG3, HI(BASE) + | li TMP1, LJ_TISNUM + | beqz NARGS8:RC, ->fff_fallback + |. sltiu TMP0, CARG3, LJ_TISNUM + | movz TMP1, CARG3, TMP0 + | not TMP1, TMP1 + | sll TMP1, TMP1, 3 + | addu TMP1, CFUNC:RB, TMP1 + | b ->fff_resn + |. ldc1 FRET1, CFUNC:TMP1->upvalue + | + |//-- Base library: getters and setters --------------------------------- + | + |.ffunc_1 getmetatable + | li AT, LJ_TTAB + | bne CARG3, AT, >6 + |. li AT, LJ_TUDATA + |1: // Field metatable must be at same offset for GCtab and GCudata! + | lw TAB:CARG1, TAB:CARG1->metatable + |2: + | lw STR:RC, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])(DISPATCH) + | beqz TAB:CARG1, ->fff_restv + |. li CARG3, LJ_TNIL + | lw TMP0, TAB:CARG1->hmask + | li CARG3, LJ_TTAB // Use metatable as default result. + | lw TMP1, STR:RC->hash + | lw NODE:TMP2, TAB:CARG1->node + | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask + | sll TMP0, TMP1, 5 + | sll TMP1, TMP1, 3 + | subu TMP1, TMP0, TMP1 + | addu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8) + | li AT, LJ_TSTR + |3: // Rearranged logic, because we expect _not_ to find the key. + | lw CARG4, offsetof(Node, key)+HI(NODE:TMP2) + | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2) + | lw NODE:TMP3, NODE:TMP2->next + | bne CARG4, AT, >4 + |. lw CARG2, offsetof(Node, val)+HI(NODE:TMP2) + | beq TMP0, STR:RC, >5 + |. lw TMP1, offsetof(Node, val)+LO(NODE:TMP2) + |4: + | beqz NODE:TMP3, ->fff_restv // Not found, keep default result. + |. move NODE:TMP2, NODE:TMP3 + | b <3 + |. nop + |5: + | beq CARG2, TISNIL, ->fff_restv // Ditto for nil value. + |. nop + | move CARG3, CARG2 // Return value of mt.__metatable. + | b ->fff_restv + |. move CARG1, TMP1 + | + |6: + | beq CARG3, AT, <1 + |. sltiu TMP0, CARG3, LJ_TISNUM + | li TMP1, LJ_TISNUM + | movz TMP1, CARG3, TMP0 + | not TMP1, TMP1 + | sll TMP1, TMP1, 2 + | addu TMP1, DISPATCH, TMP1 + | b <2 + |. lw TAB:CARG1, DISPATCH_GL(gcroot[GCROOT_BASEMT])(TMP1) + | + |.ffunc_2 setmetatable + | // Fast path: no mt for table yet and not clearing the mt. + | li AT, LJ_TTAB + | bne CARG3, AT, ->fff_fallback + |. addiu CARG4, CARG4, -LJ_TTAB + | lw TAB:TMP1, TAB:CARG1->metatable + | lbu TMP3, TAB:CARG1->marked + | or AT, CARG4, TAB:TMP1 + | bnez AT, ->fff_fallback + |. andi AT, TMP3, LJ_GC_BLACK // isblack(table) + | beqz AT, ->fff_restv + |. sw TAB:CARG2, TAB:CARG1->metatable + | barrierback TAB:CARG1, TMP3, TMP0, ->fff_restv + | + |.ffunc rawget + | lw CARG4, HI(BASE) + | sltiu AT, NARGS8:RC, 16 + | lw TAB:CARG2, LO(BASE) + | load_got lj_tab_get + | addiu CARG4, CARG4, -LJ_TTAB + | or AT, AT, CARG4 + | bnez AT, ->fff_fallback + | addiu CARG3, BASE, 8 + | call_intern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key) + |. move CARG1, L + | // Returns cTValue *. + | b ->fff_resn + |. ldc1 FRET1, 0(CRET1) + | + |//-- Base library: conversions ------------------------------------------ + | + |.ffunc tonumber + | // Only handles the number case inline (without a base argument). + | lw CARG1, HI(BASE) + | xori AT, NARGS8:RC, 8 + | sltiu CARG1, CARG1, LJ_TISNUM + | movn CARG1, r0, AT + | beqz CARG1, ->fff_fallback // Exactly one number argument. + |. ldc1 FRET1, 0(BASE) + | b ->fff_resn + |. nop + | + |.ffunc_1 tostring + | // Only handles the string or number case inline. + | li AT, LJ_TSTR + | // A __tostring method in the string base metatable is ignored. + | beq CARG3, AT, ->fff_restv // String key? + | // Handle numbers inline, unless a number base metatable is present. + |. lw TMP1, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])(DISPATCH) + | sltiu TMP0, CARG3, LJ_TISNUM + | sltiu TMP1, TMP1, 1 + | and TMP0, TMP0, TMP1 + | beqz TMP0, ->fff_fallback + |. sw BASE, L->base // Add frame since C call can throw. + | ffgccheck + |. sw PC, SAVE_PC // Redundant (but a defined value). + | load_got lj_strfmt_num + | move CARG1, L + | call_intern lj_strfmt_num // (lua_State *L, lua_Number *np) + |. move CARG2, BASE + | // Returns GCstr *. + | li CARG3, LJ_TSTR + | b ->fff_restv + |. move CARG1, CRET1 + | + |//-- Base library: iterators ------------------------------------------- + | + |.ffunc next + | lw CARG1, HI(BASE) + | lw TAB:CARG2, LO(BASE) + | beqz NARGS8:RC, ->fff_fallback + |. addu TMP2, BASE, NARGS8:RC + | li AT, LJ_TTAB + | sw TISNIL, HI(TMP2) // Set missing 2nd arg to nil. + | bne CARG1, AT, ->fff_fallback + |. lw PC, FRAME_PC(BASE) + | load_got lj_tab_next + | sw BASE, L->base // Add frame since C call can throw. + | sw BASE, L->top // Dummy frame length is ok. + | addiu CARG3, BASE, 8 + | sw PC, SAVE_PC + | call_intern lj_tab_next // (lua_State *L, GCtab *t, TValue *key) + |. move CARG1, L + | // Returns 0 at end of traversal. + | beqz CRET1, ->fff_restv // End of traversal: return nil. + |. li CARG3, LJ_TNIL + | ldc1 f0, 8(BASE) // Copy key and value to results. + | addiu RA, BASE, -8 + | ldc1 f2, 16(BASE) + | li RD, (2+1)*8 + | sdc1 f0, 0(RA) + | b ->fff_res + |. sdc1 f2, 8(RA) + | + |.ffunc_1 pairs + | li AT, LJ_TTAB + | bne CARG3, AT, ->fff_fallback + |. lw PC, FRAME_PC(BASE) +#if LJ_52 + | lw TAB:TMP2, TAB:CARG1->metatable + | ldc1 f0, CFUNC:RB->upvalue[0] + | bnez TAB:TMP2, ->fff_fallback +#else + | ldc1 f0, CFUNC:RB->upvalue[0] +#endif + |. addiu RA, BASE, -8 + | sw TISNIL, 8+HI(BASE) + | li RD, (3+1)*8 + | b ->fff_res + |. sdc1 f0, 0(RA) + | + |.ffunc ipairs_aux + | sltiu AT, NARGS8:RC, 16 + | lw CARG3, HI(BASE) + | lw TAB:CARG1, LO(BASE) + | lw CARG4, 8+HI(BASE) + | bnez AT, ->fff_fallback + |. ldc1 FARG2, 8(BASE) + | addiu CARG3, CARG3, -LJ_TTAB + | sltiu AT, CARG4, LJ_TISNUM + | li TMP0, 1 + | movn AT, r0, CARG3 + | mtc1 TMP0, FARG1 + | beqz AT, ->fff_fallback + |. lw PC, FRAME_PC(BASE) + | trunc.w.d FRET1, FARG2 + | cvt.d.w FARG1, FARG1 + | lw TMP0, TAB:CARG1->asize + | lw TMP1, TAB:CARG1->array + | mfc1 TMP2, FRET1 + | addiu RA, BASE, -8 + | add.d FARG2, FARG2, FARG1 + | addiu TMP2, TMP2, 1 + | sltu AT, TMP2, TMP0 + | sll TMP3, TMP2, 3 + | addu TMP3, TMP1, TMP3 + | beqz AT, >2 // Not in array part? + |. sdc1 FARG2, 0(RA) + | lw TMP2, HI(TMP3) + | ldc1 f0, 0(TMP3) + |1: + | beq TMP2, TISNIL, ->fff_res // End of iteration, return 0 results. + |. li RD, (0+1)*8 + | li RD, (2+1)*8 + | b ->fff_res + |. sdc1 f0, 8(RA) + |2: // Check for empty hash part first. Otherwise call C function. + | lw TMP0, TAB:CARG1->hmask + | load_got lj_tab_getinth + | beqz TMP0, ->fff_res + |. li RD, (0+1)*8 + | call_intern lj_tab_getinth // (GCtab *t, int32_t key) + |. move CARG2, TMP2 + | // Returns cTValue * or NULL. + | beqz CRET1, ->fff_res + |. li RD, (0+1)*8 + | lw TMP2, HI(CRET1) + | b <1 + |. ldc1 f0, 0(CRET1) + | + |.ffunc_1 ipairs + | li AT, LJ_TTAB + | bne CARG3, AT, ->fff_fallback + |. lw PC, FRAME_PC(BASE) +#if LJ_52 + | lw TAB:TMP2, TAB:CARG1->metatable + | ldc1 f0, CFUNC:RB->upvalue[0] + | bnez TAB:TMP2, ->fff_fallback +#else + | ldc1 f0, CFUNC:RB->upvalue[0] +#endif + |. addiu RA, BASE, -8 + | sw r0, 8+HI(BASE) + | sw r0, 8+LO(BASE) + | li RD, (3+1)*8 + | b ->fff_res + |. sdc1 f0, 0(RA) + | + |//-- Base library: catch errors ---------------------------------------- + | + |.ffunc pcall + | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH) + | beqz NARGS8:RC, ->fff_fallback + | move TMP2, BASE + | addiu BASE, BASE, 8 + | // Remember active hook before pcall. + | srl TMP3, TMP3, HOOK_ACTIVE_SHIFT + | andi TMP3, TMP3, 1 + | addiu PC, TMP3, 8+FRAME_PCALL + | b ->vm_call_dispatch + |. addiu NARGS8:RC, NARGS8:RC, -8 + | + |.ffunc xpcall + | sltiu AT, NARGS8:RC, 16 + | lw CARG4, 8+HI(BASE) + | bnez AT, ->fff_fallback + |. ldc1 FARG2, 8(BASE) + | ldc1 FARG1, 0(BASE) + | lbu TMP1, DISPATCH_GL(hookmask)(DISPATCH) + | li AT, LJ_TFUNC + | move TMP2, BASE + | bne CARG4, AT, ->fff_fallback // Traceback must be a function. + | addiu BASE, BASE, 16 + | // Remember active hook before pcall. + | srl TMP3, TMP3, HOOK_ACTIVE_SHIFT + | sdc1 FARG2, 0(TMP2) // Swap function and traceback. + | andi TMP3, TMP3, 1 + | sdc1 FARG1, 8(TMP2) + | addiu PC, TMP3, 16+FRAME_PCALL + | b ->vm_call_dispatch + |. addiu NARGS8:RC, NARGS8:RC, -16 + | + |//-- Coroutine library -------------------------------------------------- + | + |.macro coroutine_resume_wrap, resume + |.if resume + |.ffunc_1 coroutine_resume + | li AT, LJ_TTHREAD + | bne CARG3, AT, ->fff_fallback + |.else + |.ffunc coroutine_wrap_aux + | lw L:CARG1, CFUNC:RB->upvalue[0].gcr + |.endif + | lbu TMP0, L:CARG1->status + | lw TMP1, L:CARG1->cframe + | lw CARG2, L:CARG1->top + | lw TMP2, L:CARG1->base + | addiu TMP3, TMP0, -LUA_YIELD + | bgtz TMP3, ->fff_fallback // st > LUA_YIELD? + |. xor TMP2, TMP2, CARG2 + | bnez TMP1, ->fff_fallback // cframe != 0? + |. or AT, TMP2, TMP0 + | lw TMP0, L:CARG1->maxstack + | beqz AT, ->fff_fallback // base == top && st == 0? + |. lw PC, FRAME_PC(BASE) + | addu TMP2, CARG2, NARGS8:RC + | sltu AT, TMP0, TMP2 + | bnez AT, ->fff_fallback // Stack overflow? + |. sw PC, SAVE_PC + | sw BASE, L->base + |1: + |.if resume + | addiu BASE, BASE, 8 // Keep resumed thread in stack for GC. + | addiu NARGS8:RC, NARGS8:RC, -8 + | addiu TMP2, TMP2, -8 + |.endif + | sw TMP2, L:CARG1->top + | addu TMP1, BASE, NARGS8:RC + | move CARG3, CARG2 + | sw BASE, L->top + |2: // Move args to coroutine. + | ldc1 f0, 0(BASE) + | sltu AT, BASE, TMP1 + | beqz AT, >3 + |. addiu BASE, BASE, 8 + | sdc1 f0, 0(CARG3) + | b <2 + |. addiu CARG3, CARG3, 8 + |3: + | bal ->vm_resume // (lua_State *L, TValue *base, 0, 0) + |. move L:RA, L:CARG1 + | // Returns thread status. + |4: + | lw TMP2, L:RA->base + | sltiu AT, CRET1, LUA_YIELD+1 + | lw TMP3, L:RA->top + | li_vmstate INTERP + | lw BASE, L->base + | sw L, DISPATCH_GL(cur_L)(DISPATCH) + | st_vmstate + | beqz AT, >8 + |. subu RD, TMP3, TMP2 + | lw TMP0, L->maxstack + | beqz RD, >6 // No results? + |. addu TMP1, BASE, RD + | sltu AT, TMP0, TMP1 + | bnez AT, >9 // Need to grow stack? + |. addu TMP3, TMP2, RD + | sw TMP2, L:RA->top // Clear coroutine stack. + | move TMP1, BASE + |5: // Move results from coroutine. + | ldc1 f0, 0(TMP2) + | addiu TMP2, TMP2, 8 + | sltu AT, TMP2, TMP3 + | sdc1 f0, 0(TMP1) + | bnez AT, <5 + |. addiu TMP1, TMP1, 8 + |6: + | andi TMP0, PC, FRAME_TYPE + |.if resume + | li TMP1, LJ_TTRUE + | addiu RA, BASE, -8 + | sw TMP1, -8+HI(BASE) // Prepend true to results. + | addiu RD, RD, 16 + |.else + | move RA, BASE + | addiu RD, RD, 8 + |.endif + |7: + | sw PC, SAVE_PC + | beqz TMP0, ->BC_RET_Z + |. move MULTRES, RD + | b ->vm_return + |. nop + | + |8: // Coroutine returned with error (at co->top-1). + |.if resume + | addiu TMP3, TMP3, -8 + | li TMP1, LJ_TFALSE + | ldc1 f0, 0(TMP3) + | sw TMP3, L:RA->top // Remove error from coroutine stack. + | li RD, (2+1)*8 + | sw TMP1, -8+HI(BASE) // Prepend false to results. + | addiu RA, BASE, -8 + | sdc1 f0, 0(BASE) // Copy error message. + | b <7 + |. andi TMP0, PC, FRAME_TYPE + |.else + | load_got lj_ffh_coroutine_wrap_err + | move CARG2, L:RA + | call_intern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co) + |. move CARG1, L + |.endif + | + |9: // Handle stack expansion on return from yield. + | load_got lj_state_growstack + | srl CARG2, RD, 3 + | call_intern lj_state_growstack // (lua_State *L, int n) + |. move CARG1, L + | b <4 + |. li CRET1, 0 + |.endmacro + | + | coroutine_resume_wrap 1 // coroutine.resume + | coroutine_resume_wrap 0 // coroutine.wrap + | + |.ffunc coroutine_yield + | lw TMP0, L->cframe + | addu TMP1, BASE, NARGS8:RC + | sw BASE, L->base + | andi TMP0, TMP0, CFRAME_RESUME + | sw TMP1, L->top + | beqz TMP0, ->fff_fallback + |. li CRET1, LUA_YIELD + | sw r0, L->cframe + | b ->vm_leave_unw + |. sb CRET1, L->status + | + |//-- Math library ------------------------------------------------------- + | + |.ffunc_n math_abs + |. abs.d FRET1, FARG1 + |->fff_resn: + | lw PC, FRAME_PC(BASE) + | addiu RA, BASE, -8 + | b ->fff_res1 + |. sdc1 FRET1, -8(BASE) + | + |->fff_restv: + | // CARG3/CARG1 = TValue result. + | lw PC, FRAME_PC(BASE) + | sw CARG3, -8+HI(BASE) + | addiu RA, BASE, -8 + | sw CARG1, -8+LO(BASE) + |->fff_res1: + | // RA = results, PC = return. + | li RD, (1+1)*8 + |->fff_res: + | // RA = results, RD = (nresults+1)*8, PC = return. + | andi TMP0, PC, FRAME_TYPE + | bnez TMP0, ->vm_return + |. move MULTRES, RD + | lw INS, -4(PC) + | decode_RB8a RB, INS + | decode_RB8b RB + |5: + | sltu AT, RD, RB + | bnez AT, >6 // More results expected? + |. decode_RA8a TMP0, INS + | decode_RA8b TMP0 + | ins_next1 + | // Adjust BASE. KBASE is assumed to be set for the calling frame. + | subu BASE, RA, TMP0 + | ins_next2 + | + |6: // Fill up results with nil. + | addu TMP1, RA, RD + | addiu RD, RD, 8 + | b <5 + |. sw TISNIL, -8+HI(TMP1) + | + |.macro math_extern, func + |->ff_math_ .. func: + | lw CARG3, HI(BASE) + | beqz NARGS8:RC, ->fff_fallback + |. load_got func + | sltiu AT, CARG3, LJ_TISNUM + | beqz AT, ->fff_fallback + |. nop + | call_extern + |. ldc1 FARG1, 0(BASE) + | b ->fff_resn + |. nop + |.endmacro + | + |.macro math_extern2, func + | .ffunc_nn math_ .. func + |. load_got func + | call_extern + |. nop + | b ->fff_resn + |. nop + |.endmacro + | + |.macro math_round, func + | .ffunc_n math_ .. func + |. nop + | bal ->vm_ .. func + |. nop + | b ->fff_resn + |. nop + |.endmacro + | + | math_round floor + | math_round ceil + | + |.ffunc math_log + | lw CARG3, HI(BASE) + | li AT, 8 + | bne NARGS8:RC, AT, ->fff_fallback // Exactly 1 argument. + |. load_got log + | sltiu AT, CARG3, LJ_TISNUM + | beqz AT, ->fff_fallback + |. nop + | call_extern + |. ldc1 FARG1, 0(BASE) + | b ->fff_resn + |. nop + | + | math_extern log10 + | math_extern exp + | math_extern sin + | math_extern cos + | math_extern tan + | math_extern asin + | math_extern acos + | math_extern atan + | math_extern sinh + | math_extern cosh + | math_extern tanh + | math_extern2 pow + | math_extern2 atan2 + | math_extern2 fmod + | + |.ffunc_n math_sqrt + |. sqrt.d FRET1, FARG1 + | b ->fff_resn + |. nop + | + |.ffunc_nn math_ldexp + | trunc.w.d FARG2, FARG2 + | load_got ldexp + | mfc1 CARG3, FARG2 + | call_extern + |. nop + | b ->fff_resn + |. nop + | + |.ffunc_n math_frexp + | load_got frexp + | lw PC, FRAME_PC(BASE) + | call_extern + |. addiu CARG3, DISPATCH, DISPATCH_GL(tmptv) + | lw TMP1, DISPATCH_GL(tmptv)(DISPATCH) + | addiu RA, BASE, -8 + | mtc1 TMP1, FARG2 + | sdc1 FRET1, 0(RA) + | cvt.d.w FARG2, FARG2 + | sdc1 FARG2, 8(RA) + | b ->fff_res + |. li RD, (2+1)*8 + | + |.ffunc_n math_modf + | load_got modf + | lw PC, FRAME_PC(BASE) + | call_extern + |. addiu CARG3, BASE, -8 + | addiu RA, BASE, -8 + | sdc1 FRET1, 0(BASE) + | b ->fff_res + |. li RD, (2+1)*8 + | + |.macro math_minmax, name, ismax + |->ff_ .. name: + | lw CARG3, HI(BASE) + | beqz NARGS8:RC, ->fff_fallback + |. ldc1 FRET1, 0(BASE) + | sltiu AT, CARG3, LJ_TISNUM + | beqz AT, ->fff_fallback + |. addu TMP2, BASE, NARGS8:RC + | addiu TMP1, BASE, 8 + | beq TMP1, TMP2, ->fff_resn + |1: + |. lw CARG3, HI(TMP1) + | ldc1 FARG1, 0(TMP1) + | addiu TMP1, TMP1, 8 + | sltiu AT, CARG3, LJ_TISNUM + | beqz AT, ->fff_fallback + |.if ismax + |. c.olt.d FARG1, FRET1 + |.else + |. c.olt.d FRET1, FARG1 + |.endif + | bne TMP1, TMP2, <1 + |. movf.d FRET1, FARG1 + | b ->fff_resn + |. nop + |.endmacro + | + | math_minmax math_min, 0 + | math_minmax math_max, 1 + | + |//-- String library ----------------------------------------------------- + | + |.ffunc string_byte // Only handle the 1-arg case here. + | lw CARG3, HI(BASE) + | lw STR:CARG1, LO(BASE) + | xori AT, NARGS8:RC, 8 + | addiu CARG3, CARG3, -LJ_TSTR + | or AT, AT, CARG3 + | bnez AT, ->fff_fallback // Need exactly 1 string argument. + |. nop + | lw TMP0, STR:CARG1->len + | lbu TMP1, STR:CARG1[1] // Access is always ok (NUL at end). + | addiu RA, BASE, -8 + | sltu RD, r0, TMP0 + | mtc1 TMP1, f0 + | addiu RD, RD, 1 + | cvt.d.w f0, f0 + | lw PC, FRAME_PC(BASE) + | sll RD, RD, 3 // RD = ((str->len != 0)+1)*8 + | b ->fff_res + |. sdc1 f0, 0(RA) + | + |.ffunc string_char // Only handle the 1-arg case here. + | ffgccheck + | lw CARG3, HI(BASE) + | ldc1 FARG1, 0(BASE) + | li AT, 8 + | bne NARGS8:RC, AT, ->fff_fallback // Exactly 1 argument. + |. sltiu AT, CARG3, LJ_TISNUM + | beqz AT, ->fff_fallback + |. li CARG3, 1 + | trunc.w.d FARG1, FARG1 + | addiu CARG2, sp, ARG5_OFS + | sltiu AT, TMP0, 256 + | mfc1 TMP0, FARG1 + | beqz AT, ->fff_fallback + |. sw TMP0, ARG5 + |->fff_newstr: + | load_got lj_str_new + | sw BASE, L->base + | sw PC, SAVE_PC + | call_intern lj_str_new // (lua_State *L, char *str, size_t l) + |. move CARG1, L + | // Returns GCstr *. + | lw BASE, L->base + |->fff_resstr: + | move CARG1, CRET1 + | b ->fff_restv + |. li CARG3, LJ_TSTR + | + |.ffunc string_sub + | ffgccheck + | addiu AT, NARGS8:RC, -16 + | lw CARG3, 16+HI(BASE) + | ldc1 f0, 16(BASE) + | lw TMP0, HI(BASE) + | lw STR:CARG1, LO(BASE) + | bltz AT, ->fff_fallback + | lw CARG2, 8+HI(BASE) + | ldc1 f2, 8(BASE) + | beqz AT, >1 + |. li CARG4, -1 + | trunc.w.d f0, f0 + | sltiu AT, CARG3, LJ_TISNUM + | beqz AT, ->fff_fallback + |. mfc1 CARG4, f0 + |1: + | sltiu AT, CARG2, LJ_TISNUM + | beqz AT, ->fff_fallback + |. li AT, LJ_TSTR + | trunc.w.d f2, f2 + | bne TMP0, AT, ->fff_fallback + |. lw CARG2, STR:CARG1->len + | mfc1 CARG3, f2 + | // STR:CARG1 = str, CARG2 = str->len, CARG3 = start, CARG4 = end + | slt AT, CARG4, r0 + | addiu TMP0, CARG2, 1 + | addu TMP1, CARG4, TMP0 + | slt TMP3, CARG3, r0 + | movn CARG4, TMP1, AT // if (end < 0) end += len+1 + | addu TMP1, CARG3, TMP0 + | movn CARG3, TMP1, TMP3 // if (start < 0) start += len+1 + | li TMP2, 1 + | slt AT, CARG4, r0 + | slt TMP3, r0, CARG3 + | movn CARG4, r0, AT // if (end < 0) end = 0 + | movz CARG3, TMP2, TMP3 // if (start < 1) start = 1 + | slt AT, CARG2, CARG4 + | movn CARG4, CARG2, AT // if (end > len) end = len + | addu CARG2, STR:CARG1, CARG3 + | subu CARG3, CARG4, CARG3 // len = end - start + | addiu CARG2, CARG2, sizeof(GCstr)-1 + | bgez CARG3, ->fff_newstr + |. addiu CARG3, CARG3, 1 // len++ + |->fff_emptystr: // Return empty string. + | addiu STR:CARG1, DISPATCH, DISPATCH_GL(strempty) + | b ->fff_restv + |. li CARG3, LJ_TSTR + | + |.macro ffstring_op, name + | .ffunc string_ .. name + | ffgccheck + | lw CARG3, HI(BASE) + | lw STR:CARG2, LO(BASE) + | beqz NARGS8:RC, ->fff_fallback + |. li AT, LJ_TSTR + | bne CARG3, AT, ->fff_fallback + |. addiu SBUF:CARG1, DISPATCH, DISPATCH_GL(tmpbuf) + | load_got lj_buf_putstr_ .. name + | lw TMP0, SBUF:CARG1->b + | sw L, SBUF:CARG1->L + | sw BASE, L->base + | sw TMP0, SBUF:CARG1->p + | call_intern extern lj_buf_putstr_ .. name + |. sw PC, SAVE_PC + | load_got lj_buf_tostr + | call_intern lj_buf_tostr + |. move SBUF:CARG1, SBUF:CRET1 + | b ->fff_resstr + |. lw BASE, L->base + |.endmacro + | + |ffstring_op reverse + |ffstring_op lower + |ffstring_op upper + | + |//-- Bit library -------------------------------------------------------- + | + |.macro .ffunc_bit, name + | .ffunc_n bit_..name + |. add.d FARG1, FARG1, TOBIT + | mfc1 CRET1, FARG1 + |.endmacro + | + |.macro .ffunc_bit_op, name, ins + | .ffunc_bit name + | addiu TMP1, BASE, 8 + | addu TMP2, BASE, NARGS8:RC + |1: + | lw CARG4, HI(TMP1) + | beq TMP1, TMP2, ->fff_resi + |. ldc1 FARG1, 0(TMP1) + | sltiu AT, CARG4, LJ_TISNUM + | beqz AT, ->fff_fallback + | add.d FARG1, FARG1, TOBIT + | mfc1 CARG2, FARG1 + | ins CRET1, CRET1, CARG2 + | b <1 + |. addiu TMP1, TMP1, 8 + |.endmacro + | + |.ffunc_bit_op band, and + |.ffunc_bit_op bor, or + |.ffunc_bit_op bxor, xor + | + |.ffunc_bit bswap + | srl TMP0, CRET1, 24 + | srl TMP2, CRET1, 8 + | sll TMP1, CRET1, 24 + | andi TMP2, TMP2, 0xff00 + | or TMP0, TMP0, TMP1 + | andi CRET1, CRET1, 0xff00 + | or TMP0, TMP0, TMP2 + | sll CRET1, CRET1, 8 + | b ->fff_resi + |. or CRET1, TMP0, CRET1 + | + |.ffunc_bit bnot + | b ->fff_resi + |. not CRET1, CRET1 + | + |.macro .ffunc_bit_sh, name, ins, shmod + | .ffunc_nn bit_..name + |. add.d FARG1, FARG1, TOBIT + | add.d FARG2, FARG2, TOBIT + | mfc1 CARG1, FARG1 + | mfc1 CARG2, FARG2 + |.if shmod == 1 + | li AT, 32 + | subu TMP0, AT, CARG2 + | sllv CARG2, CARG1, CARG2 + | srlv CARG1, CARG1, TMP0 + |.elif shmod == 2 + | li AT, 32 + | subu TMP0, AT, CARG2 + | srlv CARG2, CARG1, CARG2 + | sllv CARG1, CARG1, TMP0 + |.endif + | b ->fff_resi + |. ins CRET1, CARG1, CARG2 + |.endmacro + | + |.ffunc_bit_sh lshift, sllv, 0 + |.ffunc_bit_sh rshift, srlv, 0 + |.ffunc_bit_sh arshift, srav, 0 + |// Can't use rotrv, since it's only in MIPS32R2. + |.ffunc_bit_sh rol, or, 1 + |.ffunc_bit_sh ror, or, 2 + | + |.ffunc_bit tobit + |->fff_resi: + | mtc1 CRET1, FRET1 + | b ->fff_resn + |. cvt.d.w FRET1, FRET1 + | + |//----------------------------------------------------------------------- + | + |->fff_fallback: // Call fast function fallback handler. + | // BASE = new base, RB = CFUNC, RC = nargs*8 + | lw TMP3, CFUNC:RB->f + | addu TMP1, BASE, NARGS8:RC + | lw PC, FRAME_PC(BASE) // Fallback may overwrite PC. + | addiu TMP0, TMP1, 8*LUA_MINSTACK + | lw TMP2, L->maxstack + | sw PC, SAVE_PC // Redundant (but a defined value). + | sltu AT, TMP2, TMP0 + | sw BASE, L->base + | sw TMP1, L->top + | bnez AT, >5 // Need to grow stack. + |. move CFUNCADDR, TMP3 + | jalr TMP3 // (lua_State *L) + |. move CARG1, L + | // Either throws an error, or recovers and returns -1, 0 or nresults+1. + | lw BASE, L->base + | sll RD, CRET1, 3 + | bgtz CRET1, ->fff_res // Returned nresults+1? + |. addiu RA, BASE, -8 + |1: // Returned 0 or -1: retry fast path. + | lw TMP0, L->top + | lw LFUNC:RB, FRAME_FUNC(BASE) + | bnez CRET1, ->vm_call_tail // Returned -1? + |. subu NARGS8:RC, TMP0, BASE + | ins_callt // Returned 0: retry fast path. + | + |// Reconstruct previous base for vmeta_call during tailcall. + |->vm_call_tail: + | andi TMP0, PC, FRAME_TYPE + | li AT, -4 + | bnez TMP0, >3 + |. and TMP1, PC, AT + | lbu TMP1, OFS_RA(PC) + | sll TMP1, TMP1, 3 + | addiu TMP1, TMP1, 8 + |3: + | b ->vm_call_dispatch // Resolve again for tailcall. + |. subu TMP2, BASE, TMP1 + | + |5: // Grow stack for fallback handler. + | load_got lj_state_growstack + | li CARG2, LUA_MINSTACK + | call_intern lj_state_growstack // (lua_State *L, int n) + |. move CARG1, L + | lw BASE, L->base + | b <1 + |. li CRET1, 0 // Force retry. + | + |->fff_gcstep: // Call GC step function. + | // BASE = new base, RC = nargs*8 + | move MULTRES, ra + | load_got lj_gc_step + | sw BASE, L->base + | addu TMP0, BASE, NARGS8:RC + | sw PC, SAVE_PC // Redundant (but a defined value). + | sw TMP0, L->top + | call_intern lj_gc_step // (lua_State *L) + |. move CARG1, L + | lw BASE, L->base + | move ra, MULTRES + | lw TMP0, L->top + | lw CFUNC:RB, FRAME_FUNC(BASE) + | jr ra + |. subu NARGS8:RC, TMP0, BASE + | + |//----------------------------------------------------------------------- + |//-- Special dispatch targets ------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_record: // Dispatch target for recording phase. + |.if JIT + | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH) + | andi AT, TMP3, HOOK_VMEVENT // No recording while in vmevent. + | bnez AT, >5 + | // Decrement the hookcount for consistency, but always do the call. + |. lw TMP2, DISPATCH_GL(hookcount)(DISPATCH) + | andi AT, TMP3, HOOK_ACTIVE + | bnez AT, >1 + |. addiu TMP2, TMP2, -1 + | andi AT, TMP3, LUA_MASKLINE|LUA_MASKCOUNT + | beqz AT, >1 + |. nop + | b >1 + |. sw TMP2, DISPATCH_GL(hookcount)(DISPATCH) + |.endif + | + |->vm_rethook: // Dispatch target for return hooks. + | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH) + | andi AT, TMP3, HOOK_ACTIVE // Hook already active? + | beqz AT, >1 + |5: // Re-dispatch to static ins. + |. lw AT, GG_DISP2STATIC(TMP0) // Assumes TMP0 holds DISPATCH+OP*4. + | jr AT + |. nop + | + |->vm_inshook: // Dispatch target for instr/line hooks. + | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH) + | lw TMP2, DISPATCH_GL(hookcount)(DISPATCH) + | andi AT, TMP3, HOOK_ACTIVE // Hook already active? + | bnez AT, <5 + |. andi AT, TMP3, LUA_MASKLINE|LUA_MASKCOUNT + | beqz AT, <5 + |. addiu TMP2, TMP2, -1 + | beqz TMP2, >1 + |. sw TMP2, DISPATCH_GL(hookcount)(DISPATCH) + | andi AT, TMP3, LUA_MASKLINE + | beqz AT, <5 + |1: + |. load_got lj_dispatch_ins + | sw MULTRES, SAVE_MULTRES + | move CARG2, PC + | sw BASE, L->base + | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC. + | call_intern lj_dispatch_ins // (lua_State *L, const BCIns *pc) + |. move CARG1, L + |3: + | lw BASE, L->base + |4: // Re-dispatch to static ins. + | lw INS, -4(PC) + | decode_OP4a TMP1, INS + | decode_OP4b TMP1 + | addu TMP0, DISPATCH, TMP1 + | decode_RD8a RD, INS + | lw AT, GG_DISP2STATIC(TMP0) + | decode_RA8a RA, INS + | decode_RD8b RD + | jr AT + | decode_RA8b RA + | + |->cont_hook: // Continue from hook yield. + | addiu PC, PC, 4 + | b <4 + |. lw MULTRES, -24+LO(RB) // Restore MULTRES for *M ins. + | + |->vm_hotloop: // Hot loop counter underflow. + |.if JIT + | lw LFUNC:TMP1, FRAME_FUNC(BASE) + | addiu CARG1, DISPATCH, GG_DISP2J + | sw PC, SAVE_PC + | lw TMP1, LFUNC:TMP1->pc + | move CARG2, PC + | sw L, DISPATCH_J(L)(DISPATCH) + | lbu TMP1, PC2PROTO(framesize)(TMP1) + | load_got lj_trace_hot + | sw BASE, L->base + | sll TMP1, TMP1, 3 + | addu TMP1, BASE, TMP1 + | call_intern lj_trace_hot // (jit_State *J, const BCIns *pc) + |. sw TMP1, L->top + | b <3 + |. nop + |.endif + | + |->vm_callhook: // Dispatch target for call hooks. + |.if JIT + | b >1 + |.endif + |. move CARG2, PC + | + |->vm_hotcall: // Hot call counter underflow. + |.if JIT + | ori CARG2, PC, 1 + |1: + |.endif + | load_got lj_dispatch_call + | addu TMP0, BASE, RC + | sw PC, SAVE_PC + | sw BASE, L->base + | subu RA, RA, BASE + | sw TMP0, L->top + | call_intern lj_dispatch_call // (lua_State *L, const BCIns *pc) + |. move CARG1, L + | // Returns ASMFunction. + | lw BASE, L->base + | lw TMP0, L->top + | sw r0, SAVE_PC // Invalidate for subsequent line hook. + | subu NARGS8:RC, TMP0, BASE + | addu RA, BASE, RA + | lw LFUNC:RB, FRAME_FUNC(BASE) + | jr CRET1 + |. lw INS, -4(PC) + | + |->cont_stitch: // Trace stitching. + |.if JIT + | // RA = resultptr, RB = meta base + | lw INS, -4(PC) + | lw TMP3, -24+LO(RB) // Save previous trace number. + | decode_RA8a RC, INS + | addiu AT, MULTRES, -8 + | decode_RA8b RC + | beqz AT, >2 + |. addu RC, BASE, RC // Call base. + |1: // Move results down. + | ldc1 f0, 0(RA) + | addiu AT, AT, -8 + | addiu RA, RA, 8 + | sdc1 f0, 0(RC) + | bnez AT, <1 + |. addiu RC, RC, 8 + |2: + | decode_RA8a RA, INS + | decode_RB8a RB, INS + | decode_RA8b RA + | decode_RB8b RB + | addu RA, RA, RB + | lw TMP1, DISPATCH_J(trace)(DISPATCH) + | addu RA, BASE, RA + |3: + | sltu AT, RC, RA + | bnez AT, >9 // More results wanted? + |. sll TMP2, TMP3, 2 + | + | addu TMP2, TMP1, TMP2 + | lw TRACE:TMP2, 0(TMP2) + | beqz TRACE:TMP2, ->cont_nop + |. nop + | lhu RD, TRACE:TMP2->link + | beq RD, TMP3, ->cont_nop // Blacklisted. + |. load_got lj_dispatch_stitch + | bnez RD, =>BC_JLOOP // Jump to stitched trace. + |. sll RD, RD, 3 + | + | // Stitch a new trace to the previous trace. + | sw TMP3, DISPATCH_J(exitno)(DISPATCH) + | sw L, DISPATCH_J(L)(DISPATCH) + | sw BASE, L->base + | addiu CARG1, DISPATCH, GG_DISP2J + | call_intern lj_dispatch_stitch // (jit_State *J, const BCIns *pc) + |. move CARG2, PC + | b ->cont_nop + |. lw BASE, L->base + | + |9: + | sw TISNIL, HI(RC) + | b <3 + |. addiu RC, RC, 8 + |.endif + | + |->vm_profhook: // Dispatch target for profiler hook. +#if LJ_HASPROFILE + | load_got lj_dispatch_profile + | sw MULTRES, SAVE_MULTRES + | move CARG2, PC + | sw BASE, L->base + | call_intern lj_dispatch_profile // (lua_State *L, const BCIns *pc) + |. move CARG1, L + | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction. + | addiu PC, PC, -4 + | b ->cont_nop + |. lw BASE, L->base +#endif + | + |//----------------------------------------------------------------------- + |//-- Trace exit handler ------------------------------------------------- + |//----------------------------------------------------------------------- + | + |.macro savex_, a, b + | sdc1 f..a, 16+a*8(sp) + | sw r..a, 16+32*8+a*4(sp) + | sw r..b, 16+32*8+b*4(sp) + |.endmacro + | + |->vm_exit_handler: + |.if JIT + | addiu sp, sp, -(16+32*8+32*4) + | savex_ 0, 1 + | savex_ 2, 3 + | savex_ 4, 5 + | savex_ 6, 7 + | savex_ 8, 9 + | savex_ 10, 11 + | savex_ 12, 13 + | savex_ 14, 15 + | savex_ 16, 17 + | savex_ 18, 19 + | savex_ 20, 21 + | savex_ 22, 23 + | savex_ 24, 25 + | savex_ 26, 27 + | sdc1 f28, 16+28*8(sp) + | sw r28, 16+32*8+28*4(sp) + | sdc1 f30, 16+30*8(sp) + | sw r30, 16+32*8+30*4(sp) + | sw r0, 16+32*8+31*4(sp) // Clear RID_TMP. + | li_vmstate EXIT + | addiu TMP2, sp, 16+32*8+32*4 // Recompute original value of sp. + | addiu DISPATCH, JGL, -GG_DISP2G-32768 + | lw TMP1, 0(TMP2) // Load exit number. + | st_vmstate + | sw TMP2, 16+32*8+29*4(sp) // Store sp in RID_SP. + | lw L, DISPATCH_GL(cur_L)(DISPATCH) + | lw BASE, DISPATCH_GL(jit_base)(DISPATCH) + | load_got lj_trace_exit + | sw L, DISPATCH_J(L)(DISPATCH) + | sw ra, DISPATCH_J(parent)(DISPATCH) // Store trace number. + | sw BASE, L->base + | sw TMP1, DISPATCH_J(exitno)(DISPATCH) // Store exit number. + | addiu CARG1, DISPATCH, GG_DISP2J + | sw r0, DISPATCH_GL(jit_base)(DISPATCH) + | call_intern lj_trace_exit // (jit_State *J, ExitState *ex) + |. addiu CARG2, sp, 16 + | // Returns MULTRES (unscaled) or negated error code. + | lw TMP1, L->cframe + | li AT, -4 + | lw BASE, L->base + | and sp, TMP1, AT + | lw PC, SAVE_PC // Get SAVE_PC. + | b >1 + |. sw L, SAVE_L // Set SAVE_L (on-trace resume/yield). + |.endif + |->vm_exit_interp: + |.if JIT + | // CRET1 = MULTRES or negated error code, BASE, PC and JGL set. + | lw L, SAVE_L + | addiu DISPATCH, JGL, -GG_DISP2G-32768 + | sw BASE, L->base + |1: + | bltz CRET1, >9 // Check for error from exit. + |. lw LFUNC:RB, FRAME_FUNC(BASE) + | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). + | sll MULTRES, CRET1, 3 + | li TISNIL, LJ_TNIL + | sw MULTRES, SAVE_MULTRES + | mtc1 TMP3, TOBIT + | lw TMP1, LFUNC:RB->pc + | sw r0, DISPATCH_GL(jit_base)(DISPATCH) + | lw KBASE, PC2PROTO(k)(TMP1) + | cvt.d.s TOBIT, TOBIT + | // Modified copy of ins_next which handles function header dispatch, too. + | lw INS, 0(PC) + | addiu PC, PC, 4 + | // Assumes TISNIL == ~LJ_VMST_INTERP == -1 + | sw TISNIL, DISPATCH_GL(vmstate)(DISPATCH) + | decode_OP4a TMP1, INS + | decode_OP4b TMP1 + | sltiu TMP2, TMP1, BC_FUNCF*4 // Function header? + | addu TMP0, DISPATCH, TMP1 + | decode_RD8a RD, INS + | lw AT, 0(TMP0) + | decode_RA8a RA, INS + | beqz TMP2, >2 + |. decode_RA8b RA + | jr AT + |. decode_RD8b RD + |2: + | sltiu TMP2, TMP1, (BC_FUNCC+2)*4 // Fast function? + | bnez TMP2, >3 + |. lw TMP1, FRAME_PC(BASE) + | // Check frame below fast function. + | andi TMP0, TMP1, FRAME_TYPE + | bnez TMP0, >3 // Trace stitching continuation? + |. nop + | // Otherwise set KBASE for Lua function below fast function. + | lw TMP2, -4(TMP1) + | decode_RA8a TMP0, TMP2 + | decode_RA8b TMP0 + | subu TMP1, BASE, TMP0 + | lw LFUNC:TMP2, -8+FRAME_FUNC(TMP1) + | lw TMP1, LFUNC:TMP2->pc + | lw KBASE, PC2PROTO(k)(TMP1) + |3: + | addiu RC, MULTRES, -8 + | jr AT + |. addu RA, RA, BASE + | + |9: // Rethrow error from the right C frame. + | load_got lj_err_throw + | negu CARG2, CRET1 + | call_intern lj_err_throw // (lua_State *L, int errcode) + |. move CARG1, L + |.endif + | + |//----------------------------------------------------------------------- + |//-- Math helper functions ---------------------------------------------- + |//----------------------------------------------------------------------- + | + |// Modifies AT, TMP0, FRET1, FRET2, f4. Keeps all others incl. FARG1. + |.macro vm_round, func + | lui TMP0, 0x4330 // Hiword of 2^52 (double). + | mtc1 r0, f4 + | mtc1 TMP0, f5 + | abs.d FRET2, FARG1 // |x| + | mfc1 AT, f13 + | c.olt.d 0, FRET2, f4 + | add.d FRET1, FRET2, f4 // (|x| + 2^52) - 2^52 + | bc1f 0, >1 // Truncate only if |x| < 2^52. + |. sub.d FRET1, FRET1, f4 + | slt AT, AT, r0 + |.if "func" == "ceil" + | lui TMP0, 0xbff0 // Hiword of -1 (double). Preserves -0. + |.else + | lui TMP0, 0x3ff0 // Hiword of +1 (double). + |.endif + |.if "func" == "trunc" + | mtc1 TMP0, f5 + | c.olt.d 0, FRET2, FRET1 // |x| < result? + | sub.d FRET2, FRET1, f4 + | movt.d FRET1, FRET2, 0 // If yes, subtract +1. + | neg.d FRET2, FRET1 + | jr ra + |. movn.d FRET1, FRET2, AT // Merge sign bit back in. + |.else + | neg.d FRET2, FRET1 + | mtc1 TMP0, f5 + | movn.d FRET1, FRET2, AT // Merge sign bit back in. + |.if "func" == "ceil" + | c.olt.d 0, FRET1, FARG1 // x > result? + |.else + | c.olt.d 0, FARG1, FRET1 // x < result? + |.endif + | sub.d FRET2, FRET1, f4 // If yes, subtract +-1. + | jr ra + |. movt.d FRET1, FRET2, 0 + |.endif + |1: + | jr ra + |. mov.d FRET1, FARG1 + |.endmacro + | + |->vm_floor: + | vm_round floor + |->vm_ceil: + | vm_round ceil + |->vm_trunc: + |.if JIT + | vm_round trunc + |.endif + | + |//----------------------------------------------------------------------- + |//-- Miscellaneous functions -------------------------------------------- + |//----------------------------------------------------------------------- + | + |//----------------------------------------------------------------------- + |//-- FFI helper functions ----------------------------------------------- + |//----------------------------------------------------------------------- + | + |// Handler for callback functions. Callback slot number in r1, g in r2. + |->vm_ffi_callback: + |.if FFI + |.type CTSTATE, CTState, PC + | saveregs + | lw CTSTATE, GL:r2->ctype_state + | addiu DISPATCH, r2, GG_G2DISP + | load_got lj_ccallback_enter + | sw r1, CTSTATE->cb.slot + | sw CARG1, CTSTATE->cb.gpr[0] + | sw CARG2, CTSTATE->cb.gpr[1] + | sdc1 FARG1, CTSTATE->cb.fpr[0] + | sw CARG3, CTSTATE->cb.gpr[2] + | sw CARG4, CTSTATE->cb.gpr[3] + | sdc1 FARG2, CTSTATE->cb.fpr[1] + | addiu TMP0, sp, CFRAME_SPACE+16 + | sw TMP0, CTSTATE->cb.stack + | sw r0, SAVE_PC // Any value outside of bytecode is ok. + | move CARG2, sp + | call_intern lj_ccallback_enter // (CTState *cts, void *cf) + |. move CARG1, CTSTATE + | // Returns lua_State *. + | lw BASE, L:CRET1->base + | lw RC, L:CRET1->top + | move L, CRET1 + | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). + | lw LFUNC:RB, FRAME_FUNC(BASE) + | mtc1 TMP3, TOBIT + | li_vmstate INTERP + | li TISNIL, LJ_TNIL + | subu RC, RC, BASE + | st_vmstate + | cvt.d.s TOBIT, TOBIT + | ins_callt + |.endif + | + |->cont_ffi_callback: // Return from FFI callback. + |.if FFI + | load_got lj_ccallback_leave + | lw CTSTATE, DISPATCH_GL(ctype_state)(DISPATCH) + | sw BASE, L->base + | sw RB, L->top + | sw L, CTSTATE->L + | move CARG2, RA + | call_intern lj_ccallback_leave // (CTState *cts, TValue *o) + |. move CARG1, CTSTATE + | lw CRET1, CTSTATE->cb.gpr[0] + | ldc1 FRET1, CTSTATE->cb.fpr[0] + | lw CRET2, CTSTATE->cb.gpr[1] + | b ->vm_leave_unw + |. ldc1 FRET2, CTSTATE->cb.fpr[1] + |.endif + | + |->vm_ffi_call: // Call C function via FFI. + | // Caveat: needs special frame unwinding, see below. + |.if FFI + | .type CCSTATE, CCallState, CARG1 + | lw TMP1, CCSTATE->spadj + | lbu CARG2, CCSTATE->nsp + | move TMP2, sp + | subu sp, sp, TMP1 + | sw ra, -4(TMP2) + | sll CARG2, CARG2, 2 + | sw r16, -8(TMP2) + | sw CCSTATE, -12(TMP2) + | move r16, TMP2 + | addiu TMP1, CCSTATE, offsetof(CCallState, stack) + | addiu TMP2, sp, 16 + | beqz CARG2, >2 + |. addu TMP3, TMP1, CARG2 + |1: + | lw TMP0, 0(TMP1) + | addiu TMP1, TMP1, 4 + | sltu AT, TMP1, TMP3 + | sw TMP0, 0(TMP2) + | bnez AT, <1 + |. addiu TMP2, TMP2, 4 + |2: + | lw CFUNCADDR, CCSTATE->func + | lw CARG2, CCSTATE->gpr[1] + | lw CARG3, CCSTATE->gpr[2] + | lw CARG4, CCSTATE->gpr[3] + | ldc1 FARG1, CCSTATE->fpr[0] + | ldc1 FARG2, CCSTATE->fpr[1] + | jalr CFUNCADDR + |. lw CARG1, CCSTATE->gpr[0] // Do this last, since CCSTATE is CARG1. + | lw CCSTATE:TMP1, -12(r16) + | lw TMP2, -8(r16) + | lw ra, -4(r16) + | sw CRET1, CCSTATE:TMP1->gpr[0] + | sw CRET2, CCSTATE:TMP1->gpr[1] + | sdc1 FRET1, CCSTATE:TMP1->fpr[0] + | sdc1 FRET2, CCSTATE:TMP1->fpr[1] + | move sp, r16 + | jr ra + |. move r16, TMP2 + |.endif + |// Note: vm_ffi_call must be the last function in this object file! + | + |//----------------------------------------------------------------------- +} + +/* Generate the code for a single instruction. */ +static void build_ins(BuildCtx *ctx, BCOp op, int defop) +{ + int vk = 0; + |=>defop: + + switch (op) { + + /* -- Comparison ops ---------------------------------------------------- */ + + /* Remember: all ops branch for a true comparison, fall through otherwise. */ + + case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: + | // RA = src1*8, RD = src2*8, JMP with RD = target + | addu CARG2, BASE, RA + | addu CARG3, BASE, RD + | lw TMP0, HI(CARG2) + | lw TMP1, HI(CARG3) + | ldc1 f0, 0(CARG2) + | ldc1 f2, 0(CARG3) + | sltiu TMP0, TMP0, LJ_TISNUM + | sltiu TMP1, TMP1, LJ_TISNUM + | lhu TMP2, OFS_RD(PC) + | and TMP0, TMP0, TMP1 + | addiu PC, PC, 4 + | beqz TMP0, ->vmeta_comp + |. lui TMP1, (-(BCBIAS_J*4 >> 16) & 65535) + | decode_RD4b TMP2 + | addu TMP2, TMP2, TMP1 + if (op == BC_ISLT || op == BC_ISGE) { + | c.olt.d f0, f2 + } else { + | c.ole.d f0, f2 + } + if (op == BC_ISLT || op == BC_ISLE) { + | movf TMP2, r0 + } else { + | movt TMP2, r0 + } + | addu PC, PC, TMP2 + |1: + | ins_next + break; + + case BC_ISEQV: case BC_ISNEV: + vk = op == BC_ISEQV; + | // RA = src1*8, RD = src2*8, JMP with RD = target + | addu RA, BASE, RA + | addiu PC, PC, 4 + | lw TMP0, HI(RA) + | ldc1 f0, 0(RA) + | addu RD, BASE, RD + | lhu TMP2, -4+OFS_RD(PC) + | lw TMP1, HI(RD) + | ldc1 f2, 0(RD) + | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535) + | sltiu AT, TMP0, LJ_TISNUM + | sltiu CARG1, TMP1, LJ_TISNUM + | decode_RD4b TMP2 + | and AT, AT, CARG1 + | beqz AT, >5 + |. addu TMP2, TMP2, TMP3 + | c.eq.d f0, f2 + if (vk) { + | movf TMP2, r0 + } else { + | movt TMP2, r0 + } + |1: + | addu PC, PC, TMP2 + | ins_next + |5: // Either or both types are not numbers. + | lw CARG2, LO(RA) + | lw CARG3, LO(RD) + |.if FFI + | li TMP3, LJ_TCDATA + | beq TMP0, TMP3, ->vmeta_equal_cd + |.endif + |. sltiu AT, TMP0, LJ_TISPRI // Not a primitive? + |.if FFI + | beq TMP1, TMP3, ->vmeta_equal_cd + |.endif + |. xor TMP3, CARG2, CARG3 // Same tv? + | xor TMP1, TMP1, TMP0 // Same type? + | sltiu CARG1, TMP0, LJ_TISTABUD+1 // Table or userdata? + | movz TMP3, r0, AT // Ignore tv if primitive. + | movn CARG1, r0, TMP1 // Tab/ud and same type? + | or AT, TMP1, TMP3 // Same type && (pri||same tv). + | movz CARG1, r0, AT + | beqz CARG1, <1 // Done if not tab/ud or not same type or same tv. + if (vk) { + |. movn TMP2, r0, AT + } else { + |. movz TMP2, r0, AT + } + | // Different tables or userdatas. Need to check __eq metamethod. + | // Field metatable must be at same offset for GCtab and GCudata! + | lw TAB:TMP1, TAB:CARG2->metatable + | beqz TAB:TMP1, <1 // No metatable? + |. nop + | lbu TMP1, TAB:TMP1->nomm + | andi TMP1, TMP1, 1<vmeta_equal // Handle __eq metamethod. + |. li CARG4, 1-vk // ne = 0 or 1. + break; + + case BC_ISEQS: case BC_ISNES: + vk = op == BC_ISEQS; + | // RA = src*8, RD = str_const*8 (~), JMP with RD = target + | addu RA, BASE, RA + | addiu PC, PC, 4 + | lw TMP0, HI(RA) + | srl RD, RD, 1 + | lw STR:TMP3, LO(RA) + | subu RD, KBASE, RD + | lhu TMP2, -4+OFS_RD(PC) + |.if FFI + | li AT, LJ_TCDATA + | beq TMP0, AT, ->vmeta_equal_cd + |.endif + |. lw STR:TMP1, -4(RD) // KBASE-4-str_const*4 + | addiu TMP0, TMP0, -LJ_TSTR + | decode_RD4b TMP2 + | xor TMP1, STR:TMP1, STR:TMP3 + | or TMP0, TMP0, TMP1 + | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535) + | addu TMP2, TMP2, TMP3 + if (vk) { + | movn TMP2, r0, TMP0 + } else { + | movz TMP2, r0, TMP0 + } + | addu PC, PC, TMP2 + | ins_next + break; + + case BC_ISEQN: case BC_ISNEN: + vk = op == BC_ISEQN; + | // RA = src*8, RD = num_const*8, JMP with RD = target + | addu RA, BASE, RA + | addiu PC, PC, 4 + | lw TMP0, HI(RA) + | ldc1 f0, 0(RA) + | addu RD, KBASE, RD + | lhu TMP2, -4+OFS_RD(PC) + | ldc1 f2, 0(RD) + | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535) + | sltiu AT, TMP0, LJ_TISNUM + | decode_RD4b TMP2 + |.if FFI + | beqz AT, >5 + |.else + | beqz AT, >1 + |.endif + |. addu TMP2, TMP2, TMP3 + | c.eq.d f0, f2 + if (vk) { + | movf TMP2, r0 + | addu PC, PC, TMP2 + |1: + } else { + | movt TMP2, r0 + |1: + | addu PC, PC, TMP2 + } + | ins_next + |.if FFI + |5: + | li AT, LJ_TCDATA + | beq TMP0, AT, ->vmeta_equal_cd + |. nop + | b <1 + |. nop + |.endif + break; + + case BC_ISEQP: case BC_ISNEP: + vk = op == BC_ISEQP; + | // RA = src*8, RD = primitive_type*8 (~), JMP with RD = target + | addu RA, BASE, RA + | srl TMP1, RD, 3 + | lw TMP0, HI(RA) + | lhu TMP2, OFS_RD(PC) + | not TMP1, TMP1 + | addiu PC, PC, 4 + |.if FFI + | li AT, LJ_TCDATA + | beq TMP0, AT, ->vmeta_equal_cd + |.endif + |. xor TMP0, TMP0, TMP1 + | decode_RD4b TMP2 + | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535) + | addu TMP2, TMP2, TMP3 + if (vk) { + | movn TMP2, r0, TMP0 + } else { + | movz TMP2, r0, TMP0 + } + | addu PC, PC, TMP2 + | ins_next + break; + + /* -- Unary test and copy ops ------------------------------------------- */ + + case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF: + | // RA = dst*8 or unused, RD = src*8, JMP with RD = target + | addu RD, BASE, RD + | lhu TMP2, OFS_RD(PC) + | lw TMP0, HI(RD) + | addiu PC, PC, 4 + if (op == BC_IST || op == BC_ISF) { + | sltiu TMP0, TMP0, LJ_TISTRUECOND + | decode_RD4b TMP2 + | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535) + | addu TMP2, TMP2, TMP3 + if (op == BC_IST) { + | movz TMP2, r0, TMP0 + } else { + | movn TMP2, r0, TMP0 + } + | addu PC, PC, TMP2 + } else { + | sltiu TMP0, TMP0, LJ_TISTRUECOND + | ldc1 f0, 0(RD) + if (op == BC_ISTC) { + | beqz TMP0, >1 + } else { + | bnez TMP0, >1 + } + |. addu RA, BASE, RA + | decode_RD4b TMP2 + | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535) + | addu TMP2, TMP2, TMP3 + | sdc1 f0, 0(RA) + | addu PC, PC, TMP2 + |1: + } + | ins_next + break; + + case BC_ISTYPE: + | // RA = src*8, RD = -type*8 + | addu TMP2, BASE, RA + | srl TMP1, RD, 3 + | lw TMP0, HI(TMP2) + | ins_next1 + | addu AT, TMP0, TMP1 + | bnez AT, ->vmeta_istype + |. ins_next2 + break; + case BC_ISNUM: + | // RA = src*8, RD = -(TISNUM-1)*8 + | addu TMP2, BASE, RA + | lw TMP0, HI(TMP2) + | ins_next1 + | sltiu AT, TMP0, LJ_TISNUM + | beqz AT, ->vmeta_istype + |. ins_next2 + break; + + /* -- Unary ops --------------------------------------------------------- */ + + case BC_MOV: + | // RA = dst*8, RD = src*8 + | addu RD, BASE, RD + | addu RA, BASE, RA + | ldc1 f0, 0(RD) + | ins_next1 + | sdc1 f0, 0(RA) + | ins_next2 + break; + case BC_NOT: + | // RA = dst*8, RD = src*8 + | addu RD, BASE, RD + | addu RA, BASE, RA + | lw TMP0, HI(RD) + | li TMP1, LJ_TFALSE + | sltiu TMP0, TMP0, LJ_TISTRUECOND + | addiu TMP1, TMP0, LJ_TTRUE + | ins_next1 + | sw TMP1, HI(RA) + | ins_next2 + break; + case BC_UNM: + | // RA = dst*8, RD = src*8 + | addu CARG3, BASE, RD + | addu RA, BASE, RA + | lw TMP0, HI(CARG3) + | ldc1 f0, 0(CARG3) + | sltiu AT, TMP0, LJ_TISNUM + | beqz AT, ->vmeta_unm + |. neg.d f0, f0 + | ins_next1 + | sdc1 f0, 0(RA) + | ins_next2 + break; + case BC_LEN: + | // RA = dst*8, RD = src*8 + | addu CARG2, BASE, RD + | addu RA, BASE, RA + | lw TMP0, HI(CARG2) + | lw CARG1, LO(CARG2) + | li AT, LJ_TSTR + | bne TMP0, AT, >2 + |. li AT, LJ_TTAB + | lw CRET1, STR:CARG1->len + |1: + | mtc1 CRET1, f0 + | cvt.d.w f0, f0 + | ins_next1 + | sdc1 f0, 0(RA) + | ins_next2 + |2: + | bne TMP0, AT, ->vmeta_len + |. nop +#if LJ_52 + | lw TAB:TMP2, TAB:CARG1->metatable + | bnez TAB:TMP2, >9 + |. nop + |3: +#endif + |->BC_LEN_Z: + | load_got lj_tab_len + | call_intern lj_tab_len // (GCtab *t) + |. nop + | // Returns uint32_t (but less than 2^31). + | b <1 + |. nop +#if LJ_52 + |9: + | lbu TMP0, TAB:TMP2->nomm + | andi TMP0, TMP0, 1<vmeta_len + |. nop +#endif + break; + + /* -- Binary ops -------------------------------------------------------- */ + + |.macro ins_arithpre + ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + | decode_RB8a RB, INS + | decode_RB8b RB + | decode_RDtoRC8 RC, RD + | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8 + ||switch (vk) { + ||case 0: + | addu CARG3, BASE, RB + | addu CARG4, KBASE, RC + | lw TMP1, HI(CARG3) + | ldc1 f20, 0(CARG3) + | ldc1 f22, 0(CARG4) + | sltiu AT, TMP1, LJ_TISNUM + || break; + ||case 1: + | addu CARG4, BASE, RB + | addu CARG3, KBASE, RC + | lw TMP1, HI(CARG4) + | ldc1 f22, 0(CARG4) + | ldc1 f20, 0(CARG3) + | sltiu AT, TMP1, LJ_TISNUM + || break; + ||default: + | addu CARG3, BASE, RB + | addu CARG4, BASE, RC + | lw TMP1, HI(CARG3) + | lw TMP2, HI(CARG4) + | ldc1 f20, 0(CARG3) + | ldc1 f22, 0(CARG4) + | sltiu AT, TMP1, LJ_TISNUM + | sltiu TMP0, TMP2, LJ_TISNUM + | and AT, AT, TMP0 + || break; + ||} + | beqz AT, ->vmeta_arith + |. addu RA, BASE, RA + |.endmacro + | + |.macro fpmod, a, b, c + |->BC_MODVN_Z: + | bal ->vm_floor // floor(b/c) + |. div.d FARG1, b, c + | mul.d a, FRET1, c + | sub.d a, b, a // b - floor(b/c)*c + |.endmacro + | + |.macro ins_arith, ins + | ins_arithpre + |.if "ins" == "fpmod_" + | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway. + |. nop + |.else + | ins f0, f20, f22 + | ins_next1 + | sdc1 f0, 0(RA) + | ins_next2 + |.endif + |.endmacro + + case BC_ADDVN: case BC_ADDNV: case BC_ADDVV: + | ins_arith add.d + break; + case BC_SUBVN: case BC_SUBNV: case BC_SUBVV: + | ins_arith sub.d + break; + case BC_MULVN: case BC_MULNV: case BC_MULVV: + | ins_arith mul.d + break; + case BC_DIVVN: case BC_DIVNV: case BC_DIVVV: + | ins_arith div.d + break; + case BC_MODVN: + | ins_arith fpmod + break; + case BC_MODNV: case BC_MODVV: + | ins_arith fpmod_ + break; + case BC_POW: + | decode_RB8a RB, INS + | decode_RB8b RB + | decode_RDtoRC8 RC, RD + | addu CARG3, BASE, RB + | addu CARG4, BASE, RC + | lw TMP1, HI(CARG3) + | lw TMP2, HI(CARG4) + | ldc1 FARG1, 0(CARG3) + | ldc1 FARG2, 0(CARG4) + | sltiu AT, TMP1, LJ_TISNUM + | sltiu TMP0, TMP2, LJ_TISNUM + | and AT, AT, TMP0 + | load_got pow + | beqz AT, ->vmeta_arith + |. addu RA, BASE, RA + | call_extern + |. nop + | ins_next1 + | sdc1 FRET1, 0(RA) + | ins_next2 + break; + + case BC_CAT: + | // RA = dst*8, RB = src_start*8, RC = src_end*8 + | decode_RB8a RB, INS + | decode_RB8b RB + | decode_RDtoRC8 RC, RD + | subu CARG3, RC, RB + | sw BASE, L->base + | addu CARG2, BASE, RC + | move MULTRES, RB + |->BC_CAT_Z: + | load_got lj_meta_cat + | srl CARG3, CARG3, 3 + | sw PC, SAVE_PC + | call_intern lj_meta_cat // (lua_State *L, TValue *top, int left) + |. move CARG1, L + | // Returns NULL (finished) or TValue * (metamethod). + | bnez CRET1, ->vmeta_binop + |. lw BASE, L->base + | addu RB, BASE, MULTRES + | ldc1 f0, 0(RB) + | addu RA, BASE, RA + | ins_next1 + | sdc1 f0, 0(RA) // Copy result from RB to RA. + | ins_next2 + break; + + /* -- Constant ops ------------------------------------------------------ */ + + case BC_KSTR: + | // RA = dst*8, RD = str_const*8 (~) + | srl TMP1, RD, 1 + | subu TMP1, KBASE, TMP1 + | ins_next1 + | lw TMP0, -4(TMP1) // KBASE-4-str_const*4 + | addu RA, BASE, RA + | li TMP2, LJ_TSTR + | sw TMP0, LO(RA) + | sw TMP2, HI(RA) + | ins_next2 + break; + case BC_KCDATA: + |.if FFI + | // RA = dst*8, RD = cdata_const*8 (~) + | srl TMP1, RD, 1 + | subu TMP1, KBASE, TMP1 + | ins_next1 + | lw TMP0, -4(TMP1) // KBASE-4-cdata_const*4 + | addu RA, BASE, RA + | li TMP2, LJ_TCDATA + | sw TMP0, LO(RA) + | sw TMP2, HI(RA) + | ins_next2 + |.endif + break; + case BC_KSHORT: + | // RA = dst*8, RD = int16_literal*8 + | sra RD, INS, 16 + | mtc1 RD, f0 + | addu RA, BASE, RA + | cvt.d.w f0, f0 + | ins_next1 + | sdc1 f0, 0(RA) + | ins_next2 + break; + case BC_KNUM: + | // RA = dst*8, RD = num_const*8 + | addu RD, KBASE, RD + | addu RA, BASE, RA + | ldc1 f0, 0(RD) + | ins_next1 + | sdc1 f0, 0(RA) + | ins_next2 + break; + case BC_KPRI: + | // RA = dst*8, RD = primitive_type*8 (~) + | srl TMP1, RD, 3 + | addu RA, BASE, RA + | not TMP0, TMP1 + | ins_next1 + | sw TMP0, HI(RA) + | ins_next2 + break; + case BC_KNIL: + | // RA = base*8, RD = end*8 + | addu RA, BASE, RA + | sw TISNIL, HI(RA) + | addiu RA, RA, 8 + | addu RD, BASE, RD + |1: + | sw TISNIL, HI(RA) + | slt AT, RA, RD + | bnez AT, <1 + |. addiu RA, RA, 8 + | ins_next_ + break; + + /* -- Upvalue and function ops ------------------------------------------ */ + + case BC_UGET: + | // RA = dst*8, RD = uvnum*8 + | lw LFUNC:RB, FRAME_FUNC(BASE) + | srl RD, RD, 1 + | addu RD, RD, LFUNC:RB + | lw UPVAL:RB, LFUNC:RD->uvptr + | ins_next1 + | lw TMP1, UPVAL:RB->v + | ldc1 f0, 0(TMP1) + | addu RA, BASE, RA + | sdc1 f0, 0(RA) + | ins_next2 + break; + case BC_USETV: + | // RA = uvnum*8, RD = src*8 + | lw LFUNC:RB, FRAME_FUNC(BASE) + | srl RA, RA, 1 + | addu RD, BASE, RD + | addu RA, RA, LFUNC:RB + | ldc1 f0, 0(RD) + | lw UPVAL:RB, LFUNC:RA->uvptr + | lbu TMP3, UPVAL:RB->marked + | lw CARG2, UPVAL:RB->v + | andi TMP3, TMP3, LJ_GC_BLACK // isblack(uv) + | lbu TMP0, UPVAL:RB->closed + | lw TMP2, HI(RD) + | sdc1 f0, 0(CARG2) + | li AT, LJ_GC_BLACK|1 + | or TMP3, TMP3, TMP0 + | beq TMP3, AT, >2 // Upvalue is closed and black? + |. addiu TMP2, TMP2, -(LJ_TNUMX+1) + |1: + | ins_next + | + |2: // Check if new value is collectable. + | sltiu AT, TMP2, LJ_TISGCV - (LJ_TNUMX+1) + | beqz AT, <1 // tvisgcv(v) + |. lw TMP1, LO(RD) + | lbu TMP3, GCOBJ:TMP1->gch.marked + | andi TMP3, TMP3, LJ_GC_WHITES // iswhite(v) + | beqz TMP3, <1 + |. load_got lj_gc_barrieruv + | // Crossed a write barrier. Move the barrier forward. + | call_intern lj_gc_barrieruv // (global_State *g, TValue *tv) + |. addiu CARG1, DISPATCH, GG_DISP2G + | b <1 + |. nop + break; + case BC_USETS: + | // RA = uvnum*8, RD = str_const*8 (~) + | lw LFUNC:RB, FRAME_FUNC(BASE) + | srl RA, RA, 1 + | srl TMP1, RD, 1 + | addu RA, RA, LFUNC:RB + | subu TMP1, KBASE, TMP1 + | lw UPVAL:RB, LFUNC:RA->uvptr + | lw STR:TMP1, -4(TMP1) // KBASE-4-str_const*4 + | lbu TMP2, UPVAL:RB->marked + | lw CARG2, UPVAL:RB->v + | lbu TMP3, STR:TMP1->marked + | andi AT, TMP2, LJ_GC_BLACK // isblack(uv) + | lbu TMP2, UPVAL:RB->closed + | li TMP0, LJ_TSTR + | sw STR:TMP1, LO(CARG2) + | bnez AT, >2 + |. sw TMP0, HI(CARG2) + |1: + | ins_next + | + |2: // Check if string is white and ensure upvalue is closed. + | beqz TMP2, <1 + |. andi AT, TMP3, LJ_GC_WHITES // iswhite(str) + | beqz AT, <1 + |. load_got lj_gc_barrieruv + | // Crossed a write barrier. Move the barrier forward. + | call_intern lj_gc_barrieruv // (global_State *g, TValue *tv) + |. addiu CARG1, DISPATCH, GG_DISP2G + | b <1 + |. nop + break; + case BC_USETN: + | // RA = uvnum*8, RD = num_const*8 + | lw LFUNC:RB, FRAME_FUNC(BASE) + | srl RA, RA, 1 + | addu RD, KBASE, RD + | addu RA, RA, LFUNC:RB + | ldc1 f0, 0(RD) + | lw UPVAL:RB, LFUNC:RA->uvptr + | ins_next1 + | lw TMP1, UPVAL:RB->v + | sdc1 f0, 0(TMP1) + | ins_next2 + break; + case BC_USETP: + | // RA = uvnum*8, RD = primitive_type*8 (~) + | lw LFUNC:RB, FRAME_FUNC(BASE) + | srl RA, RA, 1 + | srl TMP0, RD, 3 + | addu RA, RA, LFUNC:RB + | not TMP0, TMP0 + | lw UPVAL:RB, LFUNC:RA->uvptr + | ins_next1 + | lw TMP1, UPVAL:RB->v + | sw TMP0, HI(TMP1) + | ins_next2 + break; + + case BC_UCLO: + | // RA = level*8, RD = target + | lw TMP2, L->openupval + | branch_RD // Do this first since RD is not saved. + | load_got lj_func_closeuv + | sw BASE, L->base + | beqz TMP2, >1 + |. move CARG1, L + | call_intern lj_func_closeuv // (lua_State *L, TValue *level) + |. addu CARG2, BASE, RA + | lw BASE, L->base + |1: + | ins_next + break; + + case BC_FNEW: + | // RA = dst*8, RD = proto_const*8 (~) (holding function prototype) + | srl TMP1, RD, 1 + | load_got lj_func_newL_gc + | subu TMP1, KBASE, TMP1 + | lw CARG3, FRAME_FUNC(BASE) + | lw CARG2, -4(TMP1) // KBASE-4-tab_const*4 + | sw BASE, L->base + | sw PC, SAVE_PC + | // (lua_State *L, GCproto *pt, GCfuncL *parent) + | call_intern lj_func_newL_gc + |. move CARG1, L + | // Returns GCfuncL *. + | lw BASE, L->base + | li TMP0, LJ_TFUNC + | ins_next1 + | addu RA, BASE, RA + | sw TMP0, HI(RA) + | sw LFUNC:CRET1, LO(RA) + | ins_next2 + break; + + /* -- Table ops --------------------------------------------------------- */ + + case BC_TNEW: + case BC_TDUP: + | // RA = dst*8, RD = (hbits|asize)*8 | tab_const*8 (~) + | lw TMP0, DISPATCH_GL(gc.total)(DISPATCH) + | lw TMP1, DISPATCH_GL(gc.threshold)(DISPATCH) + | sw BASE, L->base + | sw PC, SAVE_PC + | sltu AT, TMP0, TMP1 + | beqz AT, >5 + |1: + if (op == BC_TNEW) { + | load_got lj_tab_new + | srl CARG2, RD, 3 + | andi CARG2, CARG2, 0x7ff + | li TMP0, 0x801 + | addiu AT, CARG2, -0x7ff + | srl CARG3, RD, 14 + | movz CARG2, TMP0, AT + | // (lua_State *L, int32_t asize, uint32_t hbits) + | call_intern lj_tab_new + |. move CARG1, L + | // Returns Table *. + } else { + | load_got lj_tab_dup + | srl TMP1, RD, 1 + | subu TMP1, KBASE, TMP1 + | move CARG1, L + | call_intern lj_tab_dup // (lua_State *L, Table *kt) + |. lw CARG2, -4(TMP1) // KBASE-4-str_const*4 + | // Returns Table *. + } + | lw BASE, L->base + | ins_next1 + | addu RA, BASE, RA + | li TMP0, LJ_TTAB + | sw TAB:CRET1, LO(RA) + | sw TMP0, HI(RA) + | ins_next2 + |5: + | load_got lj_gc_step_fixtop + | move MULTRES, RD + | call_intern lj_gc_step_fixtop // (lua_State *L) + |. move CARG1, L + | b <1 + |. move RD, MULTRES + break; + + case BC_GGET: + | // RA = dst*8, RD = str_const*8 (~) + case BC_GSET: + | // RA = src*8, RD = str_const*8 (~) + | lw LFUNC:TMP2, FRAME_FUNC(BASE) + | srl TMP1, RD, 1 + | subu TMP1, KBASE, TMP1 + | lw TAB:RB, LFUNC:TMP2->env + | lw STR:RC, -4(TMP1) // KBASE-4-str_const*4 + if (op == BC_GGET) { + | b ->BC_TGETS_Z + } else { + | b ->BC_TSETS_Z + } + |. addu RA, BASE, RA + break; + + case BC_TGETV: + | // RA = dst*8, RB = table*8, RC = key*8 + | decode_RB8a RB, INS + | decode_RB8b RB + | decode_RDtoRC8 RC, RD + | addu CARG2, BASE, RB + | addu CARG3, BASE, RC + | lw TMP1, HI(CARG2) + | lw TMP2, HI(CARG3) + | lw TAB:RB, LO(CARG2) + | li AT, LJ_TTAB + | ldc1 f0, 0(CARG3) + | bne TMP1, AT, ->vmeta_tgetv + |. addu RA, BASE, RA + | sltiu AT, TMP2, LJ_TISNUM + | beqz AT, >5 + |. li AT, LJ_TSTR + | + | // Convert number key to integer, check for integerness and range. + | cvt.w.d f2, f0 + | lw TMP0, TAB:RB->asize + | mfc1 TMP2, f2 + | cvt.d.w f4, f2 + | lw TMP1, TAB:RB->array + | c.eq.d f0, f4 + | sltu AT, TMP2, TMP0 + | movf AT, r0 + | sll TMP2, TMP2, 3 + | beqz AT, ->vmeta_tgetv // Integer key and in array part? + |. addu TMP2, TMP1, TMP2 + | lw TMP0, HI(TMP2) + | beq TMP0, TISNIL, >2 + |. ldc1 f0, 0(TMP2) + |1: + | ins_next1 + | sdc1 f0, 0(RA) + | ins_next2 + | + |2: // Check for __index if table value is nil. + | lw TAB:TMP2, TAB:RB->metatable + | beqz TAB:TMP2, <1 // No metatable: done. + |. nop + | lbu TMP0, TAB:TMP2->nomm + | andi TMP0, TMP0, 1<vmeta_tgetv + |. nop + | + |5: + | bne TMP2, AT, ->vmeta_tgetv + |. lw STR:RC, LO(CARG3) + | b ->BC_TGETS_Z // String key? + |. nop + break; + case BC_TGETS: + | // RA = dst*8, RB = table*8, RC = str_const*4 (~) + | decode_RB8a RB, INS + | decode_RB8b RB + | addu CARG2, BASE, RB + | decode_RC4a RC, INS + | lw TMP0, HI(CARG2) + | decode_RC4b RC + | li AT, LJ_TTAB + | lw TAB:RB, LO(CARG2) + | subu CARG3, KBASE, RC + | lw STR:RC, -4(CARG3) // KBASE-4-str_const*4 + | bne TMP0, AT, ->vmeta_tgets1 + |. addu RA, BASE, RA + |->BC_TGETS_Z: + | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8 + | lw TMP0, TAB:RB->hmask + | lw TMP1, STR:RC->hash + | lw NODE:TMP2, TAB:RB->node + | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask + | sll TMP0, TMP1, 5 + | sll TMP1, TMP1, 3 + | subu TMP1, TMP0, TMP1 + | addu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8) + |1: + | lw CARG1, offsetof(Node, key)+HI(NODE:TMP2) + | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2) + | lw NODE:TMP1, NODE:TMP2->next + | lw CARG2, offsetof(Node, val)+HI(NODE:TMP2) + | addiu CARG1, CARG1, -LJ_TSTR + | xor TMP0, TMP0, STR:RC + | or AT, CARG1, TMP0 + | bnez AT, >4 + |. lw TAB:TMP3, TAB:RB->metatable + | beq CARG2, TISNIL, >5 // Key found, but nil value? + |. lw CARG1, offsetof(Node, val)+LO(NODE:TMP2) + |3: + | ins_next1 + | sw CARG2, HI(RA) + | sw CARG1, LO(RA) + | ins_next2 + | + |4: // Follow hash chain. + | bnez NODE:TMP1, <1 + |. move NODE:TMP2, NODE:TMP1 + | // End of hash chain: key not found, nil result. + | + |5: // Check for __index if table value is nil. + | beqz TAB:TMP3, <3 // No metatable: done. + |. li CARG2, LJ_TNIL + | lbu TMP0, TAB:TMP3->nomm + | andi TMP0, TMP0, 1<vmeta_tgets + |. nop + break; + case BC_TGETB: + | // RA = dst*8, RB = table*8, RC = index*8 + | decode_RB8a RB, INS + | decode_RB8b RB + | addu CARG2, BASE, RB + | decode_RDtoRC8 RC, RD + | lw CARG1, HI(CARG2) + | li AT, LJ_TTAB + | lw TAB:RB, LO(CARG2) + | addu RA, BASE, RA + | bne CARG1, AT, ->vmeta_tgetb + |. srl TMP0, RC, 3 + | lw TMP1, TAB:RB->asize + | lw TMP2, TAB:RB->array + | sltu AT, TMP0, TMP1 + | beqz AT, ->vmeta_tgetb + |. addu RC, TMP2, RC + | lw TMP1, HI(RC) + | beq TMP1, TISNIL, >5 + |. ldc1 f0, 0(RC) + |1: + | ins_next1 + | sdc1 f0, 0(RA) + | ins_next2 + | + |5: // Check for __index if table value is nil. + | lw TAB:TMP2, TAB:RB->metatable + | beqz TAB:TMP2, <1 // No metatable: done. + |. nop + | lbu TMP1, TAB:TMP2->nomm + | andi TMP1, TMP1, 1<vmeta_tgetb // Caveat: preserve TMP0! + |. nop + break; + case BC_TGETR: + | // RA = dst*8, RB = table*8, RC = key*8 + | decode_RB8a RB, INS + | decode_RB8b RB + | decode_RDtoRC8 RC, RD + | addu CARG2, BASE, RB + | addu CARG3, BASE, RC + | lw TAB:CARG1, LO(CARG2) + | ldc1 f0, 0(CARG3) + | trunc.w.d f2, f0 + | lw TMP0, TAB:CARG1->asize + | mfc1 CARG2, f2 + | lw TMP1, TAB:CARG1->array + | sltu AT, CARG2, TMP0 + | sll TMP2, CARG2, 3 + | beqz AT, ->vmeta_tgetr // In array part? + |. addu TMP2, TMP1, TMP2 + | ldc1 f0, 0(TMP2) + |->BC_TGETR_Z: + | addu RA, BASE, RA + | ins_next1 + | sdc1 f0, 0(RA) + | ins_next2 + break; + + case BC_TSETV: + | // RA = src*8, RB = table*8, RC = key*8 + | decode_RB8a RB, INS + | decode_RB8b RB + | decode_RDtoRC8 RC, RD + | addu CARG2, BASE, RB + | addu CARG3, BASE, RC + | lw TMP1, HI(CARG2) + | lw TMP2, HI(CARG3) + | lw TAB:RB, LO(CARG2) + | li AT, LJ_TTAB + | ldc1 f0, 0(CARG3) + | bne TMP1, AT, ->vmeta_tsetv + |. addu RA, BASE, RA + | sltiu AT, TMP2, LJ_TISNUM + | beqz AT, >5 + |. li AT, LJ_TSTR + | + | // Convert number key to integer, check for integerness and range. + | cvt.w.d f2, f0 + | lw TMP0, TAB:RB->asize + | mfc1 TMP2, f2 + | cvt.d.w f4, f2 + | lw TMP1, TAB:RB->array + | c.eq.d f0, f4 + | sltu AT, TMP2, TMP0 + | movf AT, r0 + | sll TMP2, TMP2, 3 + | beqz AT, ->vmeta_tsetv // Integer key and in array part? + |. addu TMP1, TMP1, TMP2 + | lbu TMP3, TAB:RB->marked + | lw TMP0, HI(TMP1) + | beq TMP0, TISNIL, >3 + |. ldc1 f0, 0(RA) + |1: + | andi AT, TMP3, LJ_GC_BLACK // isblack(table) + | bnez AT, >7 + |. sdc1 f0, 0(TMP1) + |2: + | ins_next + | + |3: // Check for __newindex if previous value is nil. + | lw TAB:TMP2, TAB:RB->metatable + | beqz TAB:TMP2, <1 // No metatable: done. + |. nop + | lbu TMP2, TAB:TMP2->nomm + | andi TMP2, TMP2, 1<vmeta_tsetv + |. nop + | + |5: + | bne TMP2, AT, ->vmeta_tsetv + |. lw STR:RC, LO(CARG3) + | b ->BC_TSETS_Z // String key? + |. nop + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:RB, TMP3, TMP0, <2 + break; + case BC_TSETS: + | // RA = src*8, RB = table*8, RC = str_const*8 (~) + | decode_RB8a RB, INS + | decode_RB8b RB + | addu CARG2, BASE, RB + | decode_RC4a RC, INS + | lw TMP0, HI(CARG2) + | decode_RC4b RC + | li AT, LJ_TTAB + | subu CARG3, KBASE, RC + | lw TAB:RB, LO(CARG2) + | lw STR:RC, -4(CARG3) // KBASE-4-str_const*4 + | bne TMP0, AT, ->vmeta_tsets1 + |. addu RA, BASE, RA + |->BC_TSETS_Z: + | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = BASE+src*8 + | lw TMP0, TAB:RB->hmask + | lw TMP1, STR:RC->hash + | lw NODE:TMP2, TAB:RB->node + | sb r0, TAB:RB->nomm // Clear metamethod cache. + | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask + | sll TMP0, TMP1, 5 + | sll TMP1, TMP1, 3 + | subu TMP1, TMP0, TMP1 + | addu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8) + | ldc1 f20, 0(RA) + |1: + | lw CARG1, offsetof(Node, key)+HI(NODE:TMP2) + | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2) + | li AT, LJ_TSTR + | lw NODE:TMP1, NODE:TMP2->next + | bne CARG1, AT, >5 + |. lw CARG2, offsetof(Node, val)+HI(NODE:TMP2) + | bne TMP0, STR:RC, >5 + |. lbu TMP3, TAB:RB->marked + | beq CARG2, TISNIL, >4 // Key found, but nil value? + |. lw TAB:TMP0, TAB:RB->metatable + |2: + | andi AT, TMP3, LJ_GC_BLACK // isblack(table) + | bnez AT, >7 + |. sdc1 f20, NODE:TMP2->val + |3: + | ins_next + | + |4: // Check for __newindex if previous value is nil. + | beqz TAB:TMP0, <2 // No metatable: done. + |. nop + | lbu TMP0, TAB:TMP0->nomm + | andi TMP0, TMP0, 1<vmeta_tsets + |. nop + | + |5: // Follow hash chain. + | bnez NODE:TMP1, <1 + |. move NODE:TMP2, NODE:TMP1 + | // End of hash chain: key not found, add a new one + | + | // But check for __newindex first. + | lw TAB:TMP2, TAB:RB->metatable + | beqz TAB:TMP2, >6 // No metatable: continue. + |. addiu CARG3, DISPATCH, DISPATCH_GL(tmptv) + | lbu TMP0, TAB:TMP2->nomm + | andi TMP0, TMP0, 1<vmeta_tsets // 'no __newindex' flag NOT set: check. + |. li AT, LJ_TSTR + |6: + | load_got lj_tab_newkey + | sw STR:RC, LO(CARG3) + | sw AT, HI(CARG3) + | sw BASE, L->base + | move CARG2, TAB:RB + | sw PC, SAVE_PC + | call_intern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k + |. move CARG1, L + | // Returns TValue *. + | lw BASE, L->base + | b <3 // No 2nd write barrier needed. + |. sdc1 f20, 0(CRET1) + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:RB, TMP3, TMP0, <3 + break; + case BC_TSETB: + | // RA = src*8, RB = table*8, RC = index*8 + | decode_RB8a RB, INS + | decode_RB8b RB + | addu CARG2, BASE, RB + | decode_RDtoRC8 RC, RD + | lw CARG1, HI(CARG2) + | li AT, LJ_TTAB + | lw TAB:RB, LO(CARG2) + | addu RA, BASE, RA + | bne CARG1, AT, ->vmeta_tsetb + |. srl TMP0, RC, 3 + | lw TMP1, TAB:RB->asize + | lw TMP2, TAB:RB->array + | sltu AT, TMP0, TMP1 + | beqz AT, ->vmeta_tsetb + |. addu RC, TMP2, RC + | lw TMP1, HI(RC) + | lbu TMP3, TAB:RB->marked + | beq TMP1, TISNIL, >5 + |. ldc1 f0, 0(RA) + |1: + | andi AT, TMP3, LJ_GC_BLACK // isblack(table) + | bnez AT, >7 + |. sdc1 f0, 0(RC) + |2: + | ins_next + | + |5: // Check for __newindex if previous value is nil. + | lw TAB:TMP2, TAB:RB->metatable + | beqz TAB:TMP2, <1 // No metatable: done. + |. nop + | lbu TMP1, TAB:TMP2->nomm + | andi TMP1, TMP1, 1<vmeta_tsetb // Caveat: preserve TMP0! + |. nop + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:RB, TMP3, TMP0, <2 + break; + case BC_TSETR: + | // RA = dst*8, RB = table*8, RC = key*8 + | decode_RB8a RB, INS + | decode_RB8b RB + | decode_RDtoRC8 RC, RD + | addu CARG1, BASE, RB + | addu CARG3, BASE, RC + | lw TAB:CARG2, LO(CARG1) + | ldc1 f0, 0(CARG3) + | trunc.w.d f2, f0 + | lbu TMP3, TAB:CARG2->marked + | lw TMP0, TAB:CARG2->asize + | mfc1 CARG3, f2 + | lw TMP1, TAB:CARG2->array + | andi AT, TMP3, LJ_GC_BLACK // isblack(table) + | bnez AT, >7 + |. addu RA, BASE, RA + |2: + | sltu AT, CARG3, TMP0 + | sll TMP2, CARG3, 3 + | beqz AT, ->vmeta_tsetr // In array part? + |. ldc1 f20, 0(RA) + | addu CRET1, TMP1, TMP2 + |->BC_TSETR_Z: + | ins_next1 + | sdc1 f20, 0(CRET1) + | ins_next2 + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:RB, TMP3, TMP0, <2 + break; + + + case BC_TSETM: + | // RA = base*8 (table at base-1), RD = num_const*8 (start index) + | addu RA, BASE, RA + |1: + | addu TMP3, KBASE, RD + | lw TAB:CARG2, -8+LO(RA) // Guaranteed to be a table. + | addiu TMP0, MULTRES, -8 + | lw TMP3, LO(TMP3) // Integer constant is in lo-word. + | beqz TMP0, >4 // Nothing to copy? + |. srl CARG3, TMP0, 3 + | addu CARG3, CARG3, TMP3 + | lw TMP2, TAB:CARG2->asize + | sll TMP1, TMP3, 3 + | lbu TMP3, TAB:CARG2->marked + | lw CARG1, TAB:CARG2->array + | sltu AT, TMP2, CARG3 + | bnez AT, >5 + |. addu TMP2, RA, TMP0 + | addu TMP1, TMP1, CARG1 + | andi TMP0, TMP3, LJ_GC_BLACK // isblack(table) + |3: // Copy result slots to table. + | ldc1 f0, 0(RA) + | addiu RA, RA, 8 + | sltu AT, RA, TMP2 + | sdc1 f0, 0(TMP1) + | bnez AT, <3 + |. addiu TMP1, TMP1, 8 + | bnez TMP0, >7 + |. nop + |4: + | ins_next + | + |5: // Need to resize array part. + | load_got lj_tab_reasize + | sw BASE, L->base + | sw PC, SAVE_PC + | move BASE, RD + | call_intern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize) + |. move CARG1, L + | // Must not reallocate the stack. + | move RD, BASE + | b <1 + |. lw BASE, L->base // Reload BASE for lack of a saved register. + | + |7: // Possible table write barrier for any value. Skip valiswhite check. + | barrierback TAB:CARG2, TMP3, TMP0, <4 + break; + + /* -- Calls and vararg handling ----------------------------------------- */ + + case BC_CALLM: + | // RA = base*8, (RB = (nresults+1)*8,) RC = extra_nargs*8 + | decode_RDtoRC8 NARGS8:RC, RD + | b ->BC_CALL_Z + |. addu NARGS8:RC, NARGS8:RC, MULTRES + break; + case BC_CALL: + | // RA = base*8, (RB = (nresults+1)*8,) RC = (nargs+1)*8 + | decode_RDtoRC8 NARGS8:RC, RD + |->BC_CALL_Z: + | move TMP2, BASE + | addu BASE, BASE, RA + | li AT, LJ_TFUNC + | lw TMP0, HI(BASE) + | lw LFUNC:RB, LO(BASE) + | addiu BASE, BASE, 8 + | bne TMP0, AT, ->vmeta_call + |. addiu NARGS8:RC, NARGS8:RC, -8 + | ins_call + break; + + case BC_CALLMT: + | // RA = base*8, (RB = 0,) RC = extra_nargs*8 + | addu NARGS8:RD, NARGS8:RD, MULTRES // BC_CALLT gets RC from RD. + | // Fall through. Assumes BC_CALLT follows. + break; + case BC_CALLT: + | // RA = base*8, (RB = 0,) RC = (nargs+1)*8 + | addu RA, BASE, RA + | li AT, LJ_TFUNC + | lw TMP0, HI(RA) + | lw LFUNC:RB, LO(RA) + | move NARGS8:RC, RD + | lw TMP1, FRAME_PC(BASE) + | addiu RA, RA, 8 + | bne TMP0, AT, ->vmeta_callt + |. addiu NARGS8:RC, NARGS8:RC, -8 + |->BC_CALLT_Z: + | andi TMP0, TMP1, FRAME_TYPE // Caveat: preserve TMP0 until the 'or'. + | lbu TMP3, LFUNC:RB->ffid + | bnez TMP0, >7 + |. xori TMP2, TMP1, FRAME_VARG + |1: + | sw LFUNC:RB, FRAME_FUNC(BASE) // Copy function down, but keep PC. + | sltiu AT, TMP3, 2 // (> FF_C) Calling a fast function? + | move TMP2, BASE + | beqz NARGS8:RC, >3 + |. move TMP3, NARGS8:RC + |2: + | ldc1 f0, 0(RA) + | addiu RA, RA, 8 + | addiu TMP3, TMP3, -8 + | sdc1 f0, 0(TMP2) + | bnez TMP3, <2 + |. addiu TMP2, TMP2, 8 + |3: + | or TMP0, TMP0, AT + | beqz TMP0, >5 + |. nop + |4: + | ins_callt + | + |5: // Tailcall to a fast function with a Lua frame below. + | lw INS, -4(TMP1) + | decode_RA8a RA, INS + | decode_RA8b RA + | subu TMP1, BASE, RA + | lw LFUNC:TMP1, -8+FRAME_FUNC(TMP1) + | lw TMP1, LFUNC:TMP1->pc + | b <4 + |. lw KBASE, PC2PROTO(k)(TMP1) // Need to prepare KBASE. + | + |7: // Tailcall from a vararg function. + | andi AT, TMP2, FRAME_TYPEP + | bnez AT, <1 // Vararg frame below? + |. subu TMP2, BASE, TMP2 // Relocate BASE down. + | move BASE, TMP2 + | lw TMP1, FRAME_PC(TMP2) + | b <1 + |. andi TMP0, TMP1, FRAME_TYPE + break; + + case BC_ITERC: + | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 ((2+1)*8)) + | move TMP2, BASE + | addu BASE, BASE, RA + | li AT, LJ_TFUNC + | lw TMP1, -24+HI(BASE) + | lw LFUNC:RB, -24+LO(BASE) + | ldc1 f2, -8(BASE) + | ldc1 f0, -16(BASE) + | sw TMP1, HI(BASE) // Copy callable. + | sw LFUNC:RB, LO(BASE) + | sdc1 f2, 16(BASE) // Copy control var. + | sdc1 f0, 8(BASE) // Copy state. + | addiu BASE, BASE, 8 + | bne TMP1, AT, ->vmeta_call + |. li NARGS8:RC, 16 // Iterators get 2 arguments. + | ins_call + break; + + case BC_ITERN: + | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 (2+1)*8) + |.if JIT + | // NYI: add hotloop, record BC_ITERN. + |.endif + | addu RA, BASE, RA + | lw TAB:RB, -16+LO(RA) + | lw RC, -8+LO(RA) // Get index from control var. + | lw TMP0, TAB:RB->asize + | lw TMP1, TAB:RB->array + | addiu PC, PC, 4 + |1: // Traverse array part. + | sltu AT, RC, TMP0 + | beqz AT, >5 // Index points after array part? + |. sll TMP3, RC, 3 + | addu TMP3, TMP1, TMP3 + | lw TMP2, HI(TMP3) + | ldc1 f0, 0(TMP3) + | mtc1 RC, f2 + | lhu RD, -4+OFS_RD(PC) + | beq TMP2, TISNIL, <1 // Skip holes in array part. + |. addiu RC, RC, 1 + | cvt.d.w f2, f2 + | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535) + | sdc1 f0, 8(RA) + | decode_RD4b RD + | addu RD, RD, TMP3 + | sw RC, -8+LO(RA) // Update control var. + | addu PC, PC, RD + | sdc1 f2, 0(RA) + |3: + | ins_next + | + |5: // Traverse hash part. + | lw TMP1, TAB:RB->hmask + | subu RC, RC, TMP0 + | lw TMP2, TAB:RB->node + |6: + | sltu AT, TMP1, RC // End of iteration? Branch to ITERL+1. + | bnez AT, <3 + |. sll TMP3, RC, 5 + | sll RB, RC, 3 + | subu TMP3, TMP3, RB + | addu NODE:TMP3, TMP3, TMP2 + | lw RB, HI(NODE:TMP3) + | ldc1 f0, 0(NODE:TMP3) + | lhu RD, -4+OFS_RD(PC) + | beq RB, TISNIL, <6 // Skip holes in hash part. + |. addiu RC, RC, 1 + | ldc1 f2, NODE:TMP3->key + | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535) + | sdc1 f0, 8(RA) + | addu RC, RC, TMP0 + | decode_RD4b RD + | addu RD, RD, TMP3 + | sdc1 f2, 0(RA) + | addu PC, PC, RD + | b <3 + |. sw RC, -8+LO(RA) // Update control var. + break; + + case BC_ISNEXT: + | // RA = base*8, RD = target (points to ITERN) + | addu RA, BASE, RA + | lw TMP0, -24+HI(RA) + | lw CFUNC:TMP1, -24+LO(RA) + | lw TMP2, -16+HI(RA) + | lw TMP3, -8+HI(RA) + | li AT, LJ_TFUNC + | bne TMP0, AT, >5 + |. addiu TMP2, TMP2, -LJ_TTAB + | lbu TMP1, CFUNC:TMP1->ffid + | addiu TMP3, TMP3, -LJ_TNIL + | srl TMP0, RD, 1 + | or TMP2, TMP2, TMP3 + | addiu TMP1, TMP1, -FF_next_N + | addu TMP0, PC, TMP0 + | or TMP1, TMP1, TMP2 + | bnez TMP1, >5 + |. lui TMP2, (-(BCBIAS_J*4 >> 16) & 65535) + | addu PC, TMP0, TMP2 + | lui TMP1, 0xfffe + | ori TMP1, TMP1, 0x7fff + | sw r0, -8+LO(RA) // Initialize control var. + | sw TMP1, -8+HI(RA) + |1: + | ins_next + |5: // Despecialize bytecode if any of the checks fail. + | li TMP3, BC_JMP + | li TMP1, BC_ITERC + | sb TMP3, -4+OFS_OP(PC) + | addu PC, TMP0, TMP2 + | b <1 + |. sb TMP1, OFS_OP(PC) + break; + + case BC_VARG: + | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8 + | lw TMP0, FRAME_PC(BASE) + | decode_RDtoRC8 RC, RD + | decode_RB8a RB, INS + | addu RC, BASE, RC + | decode_RB8b RB + | addu RA, BASE, RA + | addiu RC, RC, FRAME_VARG + | addu TMP2, RA, RB + | addiu TMP3, BASE, -8 // TMP3 = vtop + | subu RC, RC, TMP0 // RC = vbase + | // Note: RC may now be even _above_ BASE if nargs was < numparams. + | beqz RB, >5 // Copy all varargs? + |. subu TMP1, TMP3, RC + | addiu TMP2, TMP2, -16 + |1: // Copy vararg slots to destination slots. + | lw CARG1, HI(RC) + | sltu AT, RC, TMP3 + | lw CARG2, LO(RC) + | addiu RC, RC, 8 + | movz CARG1, TISNIL, AT + | sw CARG1, HI(RA) + | sw CARG2, LO(RA) + | sltu AT, RA, TMP2 + | bnez AT, <1 + |. addiu RA, RA, 8 + |3: + | ins_next + | + |5: // Copy all varargs. + | lw TMP0, L->maxstack + | blez TMP1, <3 // No vararg slots? + |. li MULTRES, 8 // MULTRES = (0+1)*8 + | addu TMP2, RA, TMP1 + | sltu AT, TMP0, TMP2 + | bnez AT, >7 + |. addiu MULTRES, TMP1, 8 + |6: + | ldc1 f0, 0(RC) + | addiu RC, RC, 8 + | sdc1 f0, 0(RA) + | sltu AT, RC, TMP3 + | bnez AT, <6 // More vararg slots? + |. addiu RA, RA, 8 + | b <3 + |. nop + | + |7: // Grow stack for varargs. + | load_got lj_state_growstack + | sw RA, L->top + | subu RA, RA, BASE + | sw BASE, L->base + | subu BASE, RC, BASE // Need delta, because BASE may change. + | sw PC, SAVE_PC + | srl CARG2, TMP1, 3 + | call_intern lj_state_growstack // (lua_State *L, int n) + |. move CARG1, L + | move RC, BASE + | lw BASE, L->base + | addu RA, BASE, RA + | addu RC, BASE, RC + | b <6 + |. addiu TMP3, BASE, -8 + break; + + /* -- Returns ----------------------------------------------------------- */ + + case BC_RETM: + | // RA = results*8, RD = extra_nresults*8 + | addu RD, RD, MULTRES // MULTRES >= 8, so RD >= 8. + | // Fall through. Assumes BC_RET follows. + break; + + case BC_RET: + | // RA = results*8, RD = (nresults+1)*8 + | lw PC, FRAME_PC(BASE) + | addu RA, BASE, RA + | move MULTRES, RD + |1: + | andi TMP0, PC, FRAME_TYPE + | bnez TMP0, ->BC_RETV_Z + |. xori TMP1, PC, FRAME_VARG + | + |->BC_RET_Z: + | // BASE = base, RA = resultptr, RD = (nresults+1)*8, PC = return + | lw INS, -4(PC) + | addiu TMP2, BASE, -8 + | addiu RC, RD, -8 + | decode_RA8a TMP0, INS + | decode_RB8a RB, INS + | decode_RA8b TMP0 + | decode_RB8b RB + | addu TMP3, TMP2, RB + | beqz RC, >3 + |. subu BASE, TMP2, TMP0 + |2: + | ldc1 f0, 0(RA) + | addiu RA, RA, 8 + | addiu RC, RC, -8 + | sdc1 f0, 0(TMP2) + | bnez RC, <2 + |. addiu TMP2, TMP2, 8 + |3: + | addiu TMP3, TMP3, -8 + |5: + | sltu AT, TMP2, TMP3 + | bnez AT, >6 + |. lw LFUNC:TMP1, FRAME_FUNC(BASE) + | ins_next1 + | lw TMP1, LFUNC:TMP1->pc + | lw KBASE, PC2PROTO(k)(TMP1) + | ins_next2 + | + |6: // Fill up results with nil. + | sw TISNIL, HI(TMP2) + | b <5 + |. addiu TMP2, TMP2, 8 + | + |->BC_RETV_Z: // Non-standard return case. + | andi TMP2, TMP1, FRAME_TYPEP + | bnez TMP2, ->vm_return + |. nop + | // Return from vararg function: relocate BASE down. + | subu BASE, BASE, TMP1 + | b <1 + |. lw PC, FRAME_PC(BASE) + break; + + case BC_RET0: case BC_RET1: + | // RA = results*8, RD = (nresults+1)*8 + | lw PC, FRAME_PC(BASE) + | addu RA, BASE, RA + | move MULTRES, RD + | andi TMP0, PC, FRAME_TYPE + | bnez TMP0, ->BC_RETV_Z + |. xori TMP1, PC, FRAME_VARG + | + | lw INS, -4(PC) + | addiu TMP2, BASE, -8 + if (op == BC_RET1) { + | ldc1 f0, 0(RA) + } + | decode_RB8a RB, INS + | decode_RA8a RA, INS + | decode_RB8b RB + | decode_RA8b RA + if (op == BC_RET1) { + | sdc1 f0, 0(TMP2) + } + | subu BASE, TMP2, RA + |5: + | sltu AT, RD, RB + | bnez AT, >6 + |. lw LFUNC:TMP1, FRAME_FUNC(BASE) + | ins_next1 + | lw TMP1, LFUNC:TMP1->pc + | lw KBASE, PC2PROTO(k)(TMP1) + | ins_next2 + | + |6: // Fill up results with nil. + | addiu TMP2, TMP2, 8 + | addiu RD, RD, 8 + | b <5 + if (op == BC_RET1) { + |. sw TISNIL, HI(TMP2) + } else { + |. sw TISNIL, -8+HI(TMP2) + } + break; + + /* -- Loops and branches ------------------------------------------------ */ + + case BC_FORL: + |.if JIT + | hotloop + |.endif + | // Fall through. Assumes BC_IFORL follows. + break; + + case BC_JFORI: + case BC_JFORL: +#if !LJ_HASJIT + break; +#endif + case BC_FORI: + case BC_IFORL: + | // RA = base*8, RD = target (after end of loop or start of loop) + vk = (op == BC_IFORL || op == BC_JFORL); + | addu RA, BASE, RA + if (vk) { + | ldc1 f0, FORL_IDX*8(RA) + | ldc1 f4, FORL_STEP*8(RA) + | ldc1 f2, FORL_STOP*8(RA) + | lw TMP3, FORL_STEP*8+HI(RA) + | add.d f0, f0, f4 + | sdc1 f0, FORL_IDX*8(RA) + } else { + | lw TMP1, FORL_IDX*8+HI(RA) + | lw TMP3, FORL_STEP*8+HI(RA) + | lw TMP2, FORL_STOP*8+HI(RA) + | sltiu TMP1, TMP1, LJ_TISNUM + | sltiu TMP0, TMP3, LJ_TISNUM + | sltiu TMP2, TMP2, LJ_TISNUM + | and TMP1, TMP1, TMP0 + | and TMP1, TMP1, TMP2 + | ldc1 f0, FORL_IDX*8(RA) + | beqz TMP1, ->vmeta_for + |. ldc1 f2, FORL_STOP*8(RA) + } + if (op != BC_JFORL) { + | srl RD, RD, 1 + | lui TMP0, (-(BCBIAS_J*4 >> 16) & 65535) + } + | c.le.d 0, f0, f2 + | c.le.d 1, f2, f0 + | sdc1 f0, FORL_EXT*8(RA) + if (op == BC_JFORI) { + | li TMP1, 1 + | li TMP2, 1 + | addu TMP0, RD, TMP0 + | slt TMP3, TMP3, r0 + | movf TMP1, r0, 0 + | addu PC, PC, TMP0 + | movf TMP2, r0, 1 + | lhu RD, -4+OFS_RD(PC) + | movn TMP1, TMP2, TMP3 + | bnez TMP1, =>BC_JLOOP + |. decode_RD8b RD + } else if (op == BC_JFORL) { + | li TMP1, 1 + | li TMP2, 1 + | slt TMP3, TMP3, r0 + | movf TMP1, r0, 0 + | movf TMP2, r0, 1 + | movn TMP1, TMP2, TMP3 + | bnez TMP1, =>BC_JLOOP + |. nop + } else { + | addu TMP1, RD, TMP0 + | slt TMP3, TMP3, r0 + | move TMP2, TMP1 + if (op == BC_FORI) { + | movt TMP1, r0, 0 + | movt TMP2, r0, 1 + } else { + | movf TMP1, r0, 0 + | movf TMP2, r0, 1 + } + | movn TMP1, TMP2, TMP3 + | addu PC, PC, TMP1 + } + | ins_next + break; + + case BC_ITERL: + |.if JIT + | hotloop + |.endif + | // Fall through. Assumes BC_IITERL follows. + break; + + case BC_JITERL: +#if !LJ_HASJIT + break; +#endif + case BC_IITERL: + | // RA = base*8, RD = target + | addu RA, BASE, RA + | lw TMP1, HI(RA) + | beq TMP1, TISNIL, >1 // Stop if iterator returned nil. + |. lw TMP2, LO(RA) + if (op == BC_JITERL) { + | sw TMP1, -8+HI(RA) + | b =>BC_JLOOP + |. sw TMP2, -8+LO(RA) + } else { + | branch_RD // Otherwise save control var + branch. + | sw TMP1, -8+HI(RA) + | sw TMP2, -8+LO(RA) + } + |1: + | ins_next + break; + + case BC_LOOP: + | // RA = base*8, RD = target (loop extent) + | // Note: RA/RD is only used by trace recorder to determine scope/extent + | // This opcode does NOT jump, it's only purpose is to detect a hot loop. + |.if JIT + | hotloop + |.endif + | // Fall through. Assumes BC_ILOOP follows. + break; + + case BC_ILOOP: + | // RA = base*8, RD = target (loop extent) + | ins_next + break; + + case BC_JLOOP: + |.if JIT + | // RA = base*8 (ignored), RD = traceno*8 + | lw TMP1, DISPATCH_J(trace)(DISPATCH) + | srl RD, RD, 1 + | li AT, 0 + | addu TMP1, TMP1, RD + | // Traces on MIPS don't store the trace number, so use 0. + | sw AT, DISPATCH_GL(vmstate)(DISPATCH) + | lw TRACE:TMP2, 0(TMP1) + | sw BASE, DISPATCH_GL(jit_base)(DISPATCH) + | lw TMP2, TRACE:TMP2->mcode + | sw L, DISPATCH_GL(tmpbuf.L)(DISPATCH) + | jr TMP2 + |. addiu JGL, DISPATCH, GG_DISP2G+32768 + |.endif + break; + + case BC_JMP: + | // RA = base*8 (only used by trace recorder), RD = target + | branch_RD + | ins_next + break; + + /* -- Function headers -------------------------------------------------- */ + + case BC_FUNCF: + |.if JIT + | hotcall + |.endif + case BC_FUNCV: /* NYI: compiled vararg functions. */ + | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow. + break; + + case BC_JFUNCF: +#if !LJ_HASJIT + break; +#endif + case BC_IFUNCF: + | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8 + | lw TMP2, L->maxstack + | lbu TMP1, -4+PC2PROTO(numparams)(PC) + | lw KBASE, -4+PC2PROTO(k)(PC) + | sltu AT, TMP2, RA + | bnez AT, ->vm_growstack_l + |. sll TMP1, TMP1, 3 + if (op != BC_JFUNCF) { + | ins_next1 + } + |2: + | sltu AT, NARGS8:RC, TMP1 // Check for missing parameters. + | bnez AT, >3 + |. addu AT, BASE, NARGS8:RC + if (op == BC_JFUNCF) { + | decode_RD8a RD, INS + | b =>BC_JLOOP + |. decode_RD8b RD + } else { + | ins_next2 + } + | + |3: // Clear missing parameters. + | sw TISNIL, HI(AT) + | b <2 + |. addiu NARGS8:RC, NARGS8:RC, 8 + break; + + case BC_JFUNCV: +#if !LJ_HASJIT + break; +#endif + | NYI // NYI: compiled vararg functions + break; /* NYI: compiled vararg functions. */ + + case BC_IFUNCV: + | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8 + | addu TMP1, BASE, RC + | lw TMP2, L->maxstack + | addu TMP0, RA, RC + | sw LFUNC:RB, LO(TMP1) // Store copy of LFUNC. + | addiu TMP3, RC, 8+FRAME_VARG + | sltu AT, TMP0, TMP2 + | lw KBASE, -4+PC2PROTO(k)(PC) + | beqz AT, ->vm_growstack_l + |. sw TMP3, HI(TMP1) // Store delta + FRAME_VARG. + | lbu TMP2, -4+PC2PROTO(numparams)(PC) + | move RA, BASE + | move RC, TMP1 + | ins_next1 + | beqz TMP2, >3 + |. addiu BASE, TMP1, 8 + |1: + | lw TMP0, HI(RA) + | lw TMP3, LO(RA) + | sltu AT, RA, RC // Less args than parameters? + | move CARG1, TMP0 + | movz TMP0, TISNIL, AT // Clear missing parameters. + | movn CARG1, TISNIL, AT // Clear old fixarg slot (help the GC). + | sw TMP3, 8+LO(TMP1) + | addiu TMP2, TMP2, -1 + | sw TMP0, 8+HI(TMP1) + | addiu TMP1, TMP1, 8 + | sw CARG1, HI(RA) + | bnez TMP2, <1 + |. addiu RA, RA, 8 + |3: + | ins_next2 + break; + + case BC_FUNCC: + case BC_FUNCCW: + | // BASE = new base, RA = BASE+framesize*8, RB = CFUNC, RC = nargs*8 + if (op == BC_FUNCC) { + | lw CFUNCADDR, CFUNC:RB->f + } else { + | lw CFUNCADDR, DISPATCH_GL(wrapf)(DISPATCH) + } + | addu TMP1, RA, NARGS8:RC + | lw TMP2, L->maxstack + | addu RC, BASE, NARGS8:RC + | sw BASE, L->base + | sltu AT, TMP2, TMP1 + | sw RC, L->top + | li_vmstate C + if (op == BC_FUNCCW) { + | lw CARG2, CFUNC:RB->f + } + | bnez AT, ->vm_growstack_c // Need to grow stack. + |. move CARG1, L + | jalr CFUNCADDR // (lua_State *L [, lua_CFunction f]) + |. st_vmstate + | // Returns nresults. + | lw BASE, L->base + | sll RD, CRET1, 3 + | lw TMP1, L->top + | li_vmstate INTERP + | lw PC, FRAME_PC(BASE) // Fetch PC of caller. + | subu RA, TMP1, RD // RA = L->top - nresults*8 + | sw L, DISPATCH_GL(cur_L)(DISPATCH) + | b ->vm_returnc + |. st_vmstate + break; + + /* ---------------------------------------------------------------------- */ + + default: + fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]); + exit(2); + break; + } +} + +static int build_backend(BuildCtx *ctx) +{ + int op; + + dasm_growpc(Dst, BC__MAX); + + build_subroutines(ctx); + + |.code_op + for (op = 0; op < BC__MAX; op++) + build_ins(ctx, (BCOp)op, op); + + return BC__MAX; +} + +/* Emit pseudo frame-info for all assembler functions. */ +static void emit_asm_debug(BuildCtx *ctx) +{ + int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code); + int i; + switch (ctx->mode) { + case BUILD_elfasm: + fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n"); + fprintf(ctx->fp, + ".Lframe0:\n" + "\t.4byte .LECIE0-.LSCIE0\n" + ".LSCIE0:\n" + "\t.4byte 0xffffffff\n" + "\t.byte 0x1\n" + "\t.string \"\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -4\n" + "\t.byte 31\n" + "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n" + "\t.align 2\n" + ".LECIE0:\n\n"); + fprintf(ctx->fp, + ".LSFDE0:\n" + "\t.4byte .LEFDE0-.LASFDE0\n" + ".LASFDE0:\n" + "\t.4byte .Lframe0\n" + "\t.4byte .Lbegin\n" + "\t.4byte %d\n" + "\t.byte 0xe\n\t.uleb128 %d\n" + "\t.byte 0x9f\n\t.sleb128 1\n" + "\t.byte 0x9e\n\t.sleb128 2\n", + fcofs, CFRAME_SIZE); + for (i = 23; i >= 16; i--) + fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 26-i); + for (i = 30; i >= 20; i -= 2) + fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+32+i, 42-i); + fprintf(ctx->fp, + "\t.align 2\n" + ".LEFDE0:\n\n"); +#if LJ_HASFFI + fprintf(ctx->fp, + ".LSFDE1:\n" + "\t.4byte .LEFDE1-.LASFDE1\n" + ".LASFDE1:\n" + "\t.4byte .Lframe0\n" + "\t.4byte lj_vm_ffi_call\n" + "\t.4byte %d\n" + "\t.byte 0x9f\n\t.uleb128 1\n" + "\t.byte 0x90\n\t.uleb128 2\n" + "\t.byte 0xd\n\t.uleb128 0x10\n" + "\t.align 2\n" + ".LEFDE1:\n\n", (int)ctx->codesz - fcofs); +#endif + fprintf(ctx->fp, "\t.section .eh_frame,\"aw\",@progbits\n"); + fprintf(ctx->fp, + "\t.globl lj_err_unwind_dwarf\n" + ".Lframe1:\n" + "\t.4byte .LECIE1-.LSCIE1\n" + ".LSCIE1:\n" + "\t.4byte 0\n" + "\t.byte 0x1\n" + "\t.string \"zPR\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -4\n" + "\t.byte 31\n" + "\t.uleb128 6\n" /* augmentation length */ + "\t.byte 0\n" + "\t.4byte lj_err_unwind_dwarf\n" + "\t.byte 0\n" + "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n" + "\t.align 2\n" + ".LECIE1:\n\n"); + fprintf(ctx->fp, + ".LSFDE2:\n" + "\t.4byte .LEFDE2-.LASFDE2\n" + ".LASFDE2:\n" + "\t.4byte .LASFDE2-.Lframe1\n" + "\t.4byte .Lbegin\n" + "\t.4byte %d\n" + "\t.uleb128 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.uleb128 %d\n" + "\t.byte 0x9f\n\t.sleb128 1\n" + "\t.byte 0x9e\n\t.sleb128 2\n", + fcofs, CFRAME_SIZE); + for (i = 23; i >= 16; i--) + fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 26-i); + for (i = 30; i >= 20; i -= 2) + fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+32+i, 42-i); + fprintf(ctx->fp, + "\t.align 2\n" + ".LEFDE2:\n\n"); +#if LJ_HASFFI + fprintf(ctx->fp, + ".Lframe2:\n" + "\t.4byte .LECIE2-.LSCIE2\n" + ".LSCIE2:\n" + "\t.4byte 0\n" + "\t.byte 0x1\n" + "\t.string \"zR\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -4\n" + "\t.byte 31\n" + "\t.uleb128 1\n" /* augmentation length */ + "\t.byte 0\n" + "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n" + "\t.align 2\n" + ".LECIE2:\n\n"); + fprintf(ctx->fp, + ".LSFDE3:\n" + "\t.4byte .LEFDE3-.LASFDE3\n" + ".LASFDE3:\n" + "\t.4byte .LASFDE3-.Lframe2\n" + "\t.4byte lj_vm_ffi_call\n" + "\t.4byte %d\n" + "\t.uleb128 0\n" /* augmentation length */ + "\t.byte 0x9f\n\t.uleb128 1\n" + "\t.byte 0x90\n\t.uleb128 2\n" + "\t.byte 0xd\n\t.uleb128 0x10\n" + "\t.align 2\n" + ".LEFDE3:\n\n", (int)ctx->codesz - fcofs); +#endif + break; + default: + break; + } +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/vm_ppc.dasc b/Build/source/libs/luajit/LuaJIT-src/src/vm_ppc.dasc new file mode 100644 index 00000000000..9299c554e2b --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/vm_ppc.dasc @@ -0,0 +1,5252 @@ +|// Low-level VM code for PowerPC 32 bit or 32on64 bit mode. +|// Bytecode interpreter, fast functions and helper functions. +|// Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +| +|.arch ppc +|.section code_op, code_sub +| +|.actionlist build_actionlist +|.globals GLOB_ +|.globalnames globnames +|.externnames extnames +| +|// Note: The ragged indentation of the instructions is intentional. +|// The starting columns indicate data dependencies. +| +|//----------------------------------------------------------------------- +| +|// DynASM defines used by the PPC port: +|// +|// P64 64 bit pointers (only for GPR64 testing). +|// Note: see vm_ppc64.dasc for a full PPC64 _LP64 port. +|// GPR64 64 bit registers (but possibly 32 bit pointers, e.g. PS3). +|// Affects reg saves, stack layout, carry/overflow/dot flags etc. +|// FRAME32 Use 32 bit frame layout, even with GPR64 (Xbox 360). +|// TOC Need table of contents (64 bit or 32 bit variant, e.g. PS3). +|// Function pointers are really a struct: code, TOC, env (optional). +|// TOCENV Function pointers have an environment pointer, too (not on PS3). +|// PPE Power Processor Element of Cell (PS3) or Xenon (Xbox 360). +|// Must avoid (slow) micro-coded instructions. +| +|.if P64 +|.define TOC, 1 +|.define TOCENV, 1 +|.macro lpx, a, b, c; ldx a, b, c; .endmacro +|.macro lp, a, b; ld a, b; .endmacro +|.macro stp, a, b; std a, b; .endmacro +|.define decode_OPP, decode_OP8 +|.if FFI +|// Missing: Calling conventions, 64 bit regs, TOC. +|.error lib_ffi not yet implemented for PPC64 +|.endif +|.else +|.macro lpx, a, b, c; lwzx a, b, c; .endmacro +|.macro lp, a, b; lwz a, b; .endmacro +|.macro stp, a, b; stw a, b; .endmacro +|.define decode_OPP, decode_OP4 +|.endif +| +|// Convenience macros for TOC handling. +|.if TOC +|// Linker needs a TOC patch area for every external call relocation. +|.macro blex, target; bl extern target@plt; nop; .endmacro +|.macro .toc, a, b; a, b; .endmacro +|.if P64 +|.define TOC_OFS, 8 +|.define ENV_OFS, 16 +|.else +|.define TOC_OFS, 4 +|.define ENV_OFS, 8 +|.endif +|.else // No TOC. +|.macro blex, target; bl extern target@plt; .endmacro +|.macro .toc, a, b; .endmacro +|.endif +|.macro .tocenv, a, b; .if TOCENV; a, b; .endif; .endmacro +| +|.macro .gpr64, a, b; .if GPR64; a, b; .endif; .endmacro +| +|.macro andix., y, a, i +|.if PPE +| rlwinm y, a, 0, 31-lj_fls(i), 31-lj_ffs(i) +| cmpwi y, 0 +|.else +| andi. y, a, i +|.endif +|.endmacro +| +|.macro clrso, reg +|.if PPE +| li reg, 0 +| mtxer reg +|.else +| mcrxr cr0 +|.endif +|.endmacro +| +|.macro checkov, reg, noov +|.if PPE +| mfxer reg +| add reg, reg, reg +| cmpwi reg, 0 +| li reg, 0 +| mtxer reg +| bgey noov +|.else +| mcrxr cr0 +| bley noov +|.endif +|.endmacro +| +|//----------------------------------------------------------------------- +| +|// Fixed register assignments for the interpreter. +|// Don't use: r1 = sp, r2 and r13 = reserved (TOC, TLS or SDATA) +| +|// The following must be C callee-save (but BASE is often refetched). +|.define BASE, r14 // Base of current Lua stack frame. +|.define KBASE, r15 // Constants of current Lua function. +|.define PC, r16 // Next PC. +|.define DISPATCH, r17 // Opcode dispatch table. +|.define LREG, r18 // Register holding lua_State (also in SAVE_L). +|.define MULTRES, r19 // Size of multi-result: (nresults+1)*8. +|.define JGL, r31 // On-trace: global_State + 32768. +| +|// Constants for type-comparisons, stores and conversions. C callee-save. +|.define TISNUM, r22 +|.define TISNIL, r23 +|.define ZERO, r24 +|.define TOBIT, f30 // 2^52 + 2^51. +|.define TONUM, f31 // 2^52 + 2^51 + 2^31. +| +|// The following temporaries are not saved across C calls, except for RA. +|.define RA, r20 // Callee-save. +|.define RB, r10 +|.define RC, r11 +|.define RD, r12 +|.define INS, r7 // Overlaps CARG5. +| +|.define TMP0, r0 +|.define TMP1, r8 +|.define TMP2, r9 +|.define TMP3, r6 // Overlaps CARG4. +| +|// Saved temporaries. +|.define SAVE0, r21 +| +|// Calling conventions. +|.define CARG1, r3 +|.define CARG2, r4 +|.define CARG3, r5 +|.define CARG4, r6 // Overlaps TMP3. +|.define CARG5, r7 // Overlaps INS. +| +|.define FARG1, f1 +|.define FARG2, f2 +| +|.define CRET1, r3 +|.define CRET2, r4 +| +|.define TOCREG, r2 // TOC register (only used by C code). +|.define ENVREG, r11 // Environment pointer (nested C functions). +| +|// Stack layout while in interpreter. Must match with lj_frame.h. +|.if GPR64 +|.if FRAME32 +| +|// 456(sp) // \ 32/64 bit C frame info +|.define TONUM_LO, 452(sp) // | +|.define TONUM_HI, 448(sp) // | +|.define TMPD_LO, 444(sp) // | +|.define TMPD_HI, 440(sp) // | +|.define SAVE_CR, 432(sp) // | 64 bit CR save. +|.define SAVE_ERRF, 424(sp) // > Parameter save area. +|.define SAVE_NRES, 420(sp) // | +|.define SAVE_L, 416(sp) // | +|.define SAVE_PC, 412(sp) // | +|.define SAVE_MULTRES, 408(sp) // | +|.define SAVE_CFRAME, 400(sp) // / 64 bit C frame chain. +|// 392(sp) // Reserved. +|.define CFRAME_SPACE, 384 // Delta for sp. +|// Back chain for sp: 384(sp) <-- sp entering interpreter +|.define SAVE_LR, 376(sp) // 32 bit LR stored in hi-part. +|.define SAVE_GPR_, 232 // .. 232+18*8: 64 bit GPR saves. +|.define SAVE_FPR_, 88 // .. 88+18*8: 64 bit FPR saves. +|// 80(sp) // Needed for 16 byte stack frame alignment. +|// 16(sp) // Callee parameter save area (ABI mandated). +|// 8(sp) // Reserved +|// Back chain for sp: 0(sp) <-- sp while in interpreter +|// 32 bit sp stored in hi-part of 0(sp). +| +|.define TMPD_BLO, 447(sp) +|.define TMPD, TMPD_HI +|.define TONUM_D, TONUM_HI +| +|.else +| +|// 508(sp) // \ 32 bit C frame info. +|.define SAVE_ERRF, 472(sp) // | +|.define SAVE_NRES, 468(sp) // | +|.define SAVE_L, 464(sp) // > Parameter save area. +|.define SAVE_PC, 460(sp) // | +|.define SAVE_MULTRES, 456(sp) // | +|.define SAVE_CFRAME, 448(sp) // / 64 bit C frame chain. +|.define SAVE_LR, 416(sp) +|.define CFRAME_SPACE, 400 // Delta for sp. +|// Back chain for sp: 400(sp) <-- sp entering interpreter +|.define SAVE_FPR_, 256 // .. 256+18*8: 64 bit FPR saves. +|.define SAVE_GPR_, 112 // .. 112+18*8: 64 bit GPR saves. +|// 48(sp) // Callee parameter save area (ABI mandated). +|.define SAVE_TOC, 40(sp) // TOC save area. +|.define TMPD_LO, 36(sp) // \ Link editor temp (ABI mandated). +|.define TMPD_HI, 32(sp) // / +|.define TONUM_LO, 28(sp) // \ Compiler temp (ABI mandated). +|.define TONUM_HI, 24(sp) // / +|// Next frame lr: 16(sp) +|.define SAVE_CR, 8(sp) // 64 bit CR save. +|// Back chain for sp: 0(sp) <-- sp while in interpreter +| +|.define TMPD_BLO, 39(sp) +|.define TMPD, TMPD_HI +|.define TONUM_D, TONUM_HI +| +|.endif +|.else +| +|.define SAVE_LR, 276(sp) +|.define CFRAME_SPACE, 272 // Delta for sp. +|// Back chain for sp: 272(sp) <-- sp entering interpreter +|.define SAVE_FPR_, 128 // .. 128+18*8: 64 bit FPR saves. +|.define SAVE_GPR_, 56 // .. 56+18*4: 32 bit GPR saves. +|.define SAVE_CR, 52(sp) // 32 bit CR save. +|.define SAVE_ERRF, 48(sp) // 32 bit C frame info. +|.define SAVE_NRES, 44(sp) +|.define SAVE_CFRAME, 40(sp) +|.define SAVE_L, 36(sp) +|.define SAVE_PC, 32(sp) +|.define SAVE_MULTRES, 28(sp) +|.define UNUSED1, 24(sp) +|.define TMPD_LO, 20(sp) +|.define TMPD_HI, 16(sp) +|.define TONUM_LO, 12(sp) +|.define TONUM_HI, 8(sp) +|// Next frame lr: 4(sp) +|// Back chain for sp: 0(sp) <-- sp while in interpreter +| +|.define TMPD_BLO, 23(sp) +|.define TMPD, TMPD_HI +|.define TONUM_D, TONUM_HI +| +|.endif +| +|.macro save_, reg +|.if GPR64 +| std r..reg, SAVE_GPR_+(reg-14)*8(sp) +|.else +| stw r..reg, SAVE_GPR_+(reg-14)*4(sp) +|.endif +| stfd f..reg, SAVE_FPR_+(reg-14)*8(sp) +|.endmacro +|.macro rest_, reg +|.if GPR64 +| ld r..reg, SAVE_GPR_+(reg-14)*8(sp) +|.else +| lwz r..reg, SAVE_GPR_+(reg-14)*4(sp) +|.endif +| lfd f..reg, SAVE_FPR_+(reg-14)*8(sp) +|.endmacro +| +|.macro saveregs +|.if GPR64 and not FRAME32 +| stdu sp, -CFRAME_SPACE(sp) +|.else +| stwu sp, -CFRAME_SPACE(sp) +|.endif +| save_ 14; save_ 15; save_ 16 +| mflr r0 +| save_ 17; save_ 18; save_ 19; save_ 20; save_ 21; save_ 22 +|.if GPR64 and not FRAME32 +| std r0, SAVE_LR +|.else +| stw r0, SAVE_LR +|.endif +| save_ 23; save_ 24; save_ 25 +| mfcr r0 +| save_ 26; save_ 27; save_ 28; save_ 29; save_ 30; save_ 31 +|.if GPR64 +| std r0, SAVE_CR +|.else +| stw r0, SAVE_CR +|.endif +| .toc std TOCREG, SAVE_TOC +|.endmacro +| +|.macro restoreregs +|.if GPR64 and not FRAME32 +| ld r0, SAVE_LR +|.else +| lwz r0, SAVE_LR +|.endif +|.if GPR64 +| ld r12, SAVE_CR +|.else +| lwz r12, SAVE_CR +|.endif +| rest_ 14; rest_ 15; rest_ 16; rest_ 17; rest_ 18; rest_ 19 +| mtlr r0; +|.if PPE; mtocrf 0x20, r12; .else; mtcrf 0x38, r12; .endif +| rest_ 20; rest_ 21; rest_ 22; rest_ 23; rest_ 24; rest_ 25 +|.if PPE; mtocrf 0x10, r12; .endif +| rest_ 26; rest_ 27; rest_ 28; rest_ 29; rest_ 30; rest_ 31 +|.if PPE; mtocrf 0x08, r12; .endif +| addi sp, sp, CFRAME_SPACE +|.endmacro +| +|// Type definitions. Some of these are only used for documentation. +|.type L, lua_State, LREG +|.type GL, global_State +|.type TVALUE, TValue +|.type GCOBJ, GCobj +|.type STR, GCstr +|.type TAB, GCtab +|.type LFUNC, GCfuncL +|.type CFUNC, GCfuncC +|.type PROTO, GCproto +|.type UPVAL, GCupval +|.type NODE, Node +|.type NARGS8, int +|.type TRACE, GCtrace +|.type SBUF, SBuf +| +|//----------------------------------------------------------------------- +| +|// Trap for not-yet-implemented parts. +|.macro NYI; tw 4, sp, sp; .endmacro +| +|// int/FP conversions. +|.macro tonum_i, freg, reg +| xoris reg, reg, 0x8000 +| stw reg, TONUM_LO +| lfd freg, TONUM_D +| fsub freg, freg, TONUM +|.endmacro +| +|.macro tonum_u, freg, reg +| stw reg, TONUM_LO +| lfd freg, TONUM_D +| fsub freg, freg, TOBIT +|.endmacro +| +|.macro toint, reg, freg, tmpfreg +| fctiwz tmpfreg, freg +| stfd tmpfreg, TMPD +| lwz reg, TMPD_LO +|.endmacro +| +|.macro toint, reg, freg +| toint reg, freg, freg +|.endmacro +| +|//----------------------------------------------------------------------- +| +|// Access to frame relative to BASE. +|.define FRAME_PC, -8 +|.define FRAME_FUNC, -4 +| +|// Instruction decode. +|.macro decode_OP4, dst, ins; rlwinm dst, ins, 2, 22, 29; .endmacro +|.macro decode_OP8, dst, ins; rlwinm dst, ins, 3, 21, 28; .endmacro +|.macro decode_RA8, dst, ins; rlwinm dst, ins, 27, 21, 28; .endmacro +|.macro decode_RB8, dst, ins; rlwinm dst, ins, 11, 21, 28; .endmacro +|.macro decode_RC8, dst, ins; rlwinm dst, ins, 19, 21, 28; .endmacro +|.macro decode_RD8, dst, ins; rlwinm dst, ins, 19, 13, 28; .endmacro +| +|.macro decode_OP1, dst, ins; rlwinm dst, ins, 0, 24, 31; .endmacro +|.macro decode_RD4, dst, ins; rlwinm dst, ins, 18, 14, 29; .endmacro +| +|// Instruction fetch. +|.macro ins_NEXT1 +| lwz INS, 0(PC) +| addi PC, PC, 4 +|.endmacro +|// Instruction decode+dispatch. Note: optimized for e300! +|.macro ins_NEXT2 +| decode_OPP TMP1, INS +| lpx TMP0, DISPATCH, TMP1 +| mtctr TMP0 +| decode_RB8 RB, INS +| decode_RD8 RD, INS +| decode_RA8 RA, INS +| decode_RC8 RC, INS +| bctr +|.endmacro +|.macro ins_NEXT +| ins_NEXT1 +| ins_NEXT2 +|.endmacro +| +|// Instruction footer. +|.if 1 +| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use. +| .define ins_next, ins_NEXT +| .define ins_next_, ins_NEXT +| .define ins_next1, ins_NEXT1 +| .define ins_next2, ins_NEXT2 +|.else +| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch. +| // Affects only certain kinds of benchmarks (and only with -j off). +| .macro ins_next +| b ->ins_next +| .endmacro +| .macro ins_next1 +| .endmacro +| .macro ins_next2 +| b ->ins_next +| .endmacro +| .macro ins_next_ +| ->ins_next: +| ins_NEXT +| .endmacro +|.endif +| +|// Call decode and dispatch. +|.macro ins_callt +| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC +| lwz PC, LFUNC:RB->pc +| lwz INS, 0(PC) +| addi PC, PC, 4 +| decode_OPP TMP1, INS +| decode_RA8 RA, INS +| lpx TMP0, DISPATCH, TMP1 +| add RA, RA, BASE +| mtctr TMP0 +| bctr +|.endmacro +| +|.macro ins_call +| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, PC = caller PC +| stw PC, FRAME_PC(BASE) +| ins_callt +|.endmacro +| +|//----------------------------------------------------------------------- +| +|// Macros to test operand types. +|.macro checknum, reg; cmplw reg, TISNUM; .endmacro +|.macro checknum, cr, reg; cmplw cr, reg, TISNUM; .endmacro +|.macro checkstr, reg; cmpwi reg, LJ_TSTR; .endmacro +|.macro checktab, reg; cmpwi reg, LJ_TTAB; .endmacro +|.macro checkfunc, reg; cmpwi reg, LJ_TFUNC; .endmacro +|.macro checknil, reg; cmpwi reg, LJ_TNIL; .endmacro +| +|.macro branch_RD +| srwi TMP0, RD, 1 +| addis PC, PC, -(BCBIAS_J*4 >> 16) +| add PC, PC, TMP0 +|.endmacro +| +|// Assumes DISPATCH is relative to GL. +#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field)) +#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field)) +| +#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto)) +| +|.macro hotcheck, delta, target +| rlwinm TMP1, PC, 31, 25, 30 +| addi TMP1, TMP1, GG_DISP2HOT +| lhzx TMP2, DISPATCH, TMP1 +| addic. TMP2, TMP2, -delta +| sthx TMP2, DISPATCH, TMP1 +| blt target +|.endmacro +| +|.macro hotloop +| hotcheck HOTCOUNT_LOOP, ->vm_hotloop +|.endmacro +| +|.macro hotcall +| hotcheck HOTCOUNT_CALL, ->vm_hotcall +|.endmacro +| +|// Set current VM state. Uses TMP0. +|.macro li_vmstate, st; li TMP0, ~LJ_VMST_..st; .endmacro +|.macro st_vmstate; stw TMP0, DISPATCH_GL(vmstate)(DISPATCH); .endmacro +| +|// Move table write barrier back. Overwrites mark and tmp. +|.macro barrierback, tab, mark, tmp +| lwz tmp, DISPATCH_GL(gc.grayagain)(DISPATCH) +| // Assumes LJ_GC_BLACK is 0x04. +| rlwinm mark, mark, 0, 30, 28 // black2gray(tab) +| stw tab, DISPATCH_GL(gc.grayagain)(DISPATCH) +| stb mark, tab->marked +| stw tmp, tab->gclist +|.endmacro +| +|//----------------------------------------------------------------------- + +/* Generate subroutines used by opcodes and other parts of the VM. */ +/* The .code_sub section should be last to help static branch prediction. */ +static void build_subroutines(BuildCtx *ctx) +{ + |.code_sub + | + |//----------------------------------------------------------------------- + |//-- Return handling ---------------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_returnp: + | // See vm_return. Also: TMP2 = previous base. + | andix. TMP0, PC, FRAME_P + | li TMP1, LJ_TTRUE + | beq ->cont_dispatch + | + | // Return from pcall or xpcall fast func. + | lwz PC, FRAME_PC(TMP2) // Fetch PC of previous frame. + | mr BASE, TMP2 // Restore caller base. + | // Prepending may overwrite the pcall frame, so do it at the end. + | stwu TMP1, FRAME_PC(RA) // Prepend true to results. + | + |->vm_returnc: + | addi RD, RD, 8 // RD = (nresults+1)*8. + | andix. TMP0, PC, FRAME_TYPE + | cmpwi cr1, RD, 0 + | li CRET1, LUA_YIELD + | beq cr1, ->vm_unwind_c_eh + | mr MULTRES, RD + | beq ->BC_RET_Z // Handle regular return to Lua. + | + |->vm_return: + | // BASE = base, RA = resultptr, RD/MULTRES = (nresults+1)*8, PC = return + | // TMP0 = PC & FRAME_TYPE + | cmpwi TMP0, FRAME_C + | rlwinm TMP2, PC, 0, 0, 28 + | li_vmstate C + | sub TMP2, BASE, TMP2 // TMP2 = previous base. + | bney ->vm_returnp + | + | addic. TMP1, RD, -8 + | stp TMP2, L->base + | lwz TMP2, SAVE_NRES + | subi BASE, BASE, 8 + | st_vmstate + | slwi TMP2, TMP2, 3 + | beq >2 + |1: + | addic. TMP1, TMP1, -8 + | lfd f0, 0(RA) + | addi RA, RA, 8 + | stfd f0, 0(BASE) + | addi BASE, BASE, 8 + | bney <1 + | + |2: + | cmpw TMP2, RD // More/less results wanted? + | bne >6 + |3: + | stp BASE, L->top // Store new top. + | + |->vm_leave_cp: + | lp TMP0, SAVE_CFRAME // Restore previous C frame. + | li CRET1, 0 // Ok return status for vm_pcall. + | stp TMP0, L->cframe + | + |->vm_leave_unw: + | restoreregs + | blr + | + |6: + | ble >7 // Less results wanted? + | // More results wanted. Check stack size and fill up results with nil. + | lwz TMP1, L->maxstack + | cmplw BASE, TMP1 + | bge >8 + | stw TISNIL, 0(BASE) + | addi RD, RD, 8 + | addi BASE, BASE, 8 + | b <2 + | + |7: // Less results wanted. + | subfic TMP3, TMP2, 0 // LUA_MULTRET+1 case? + | sub TMP0, RD, TMP2 + | subfe TMP1, TMP1, TMP1 // TMP1 = TMP2 == 0 ? 0 : -1 + | and TMP0, TMP0, TMP1 + | sub BASE, BASE, TMP0 // Either keep top or shrink it. + | b <3 + | + |8: // Corner case: need to grow stack for filling up results. + | // This can happen if: + | // - A C function grows the stack (a lot). + | // - The GC shrinks the stack in between. + | // - A return back from a lua_call() with (high) nresults adjustment. + | stp BASE, L->top // Save current top held in BASE (yes). + | mr SAVE0, RD + | srwi CARG2, TMP2, 3 + | mr CARG1, L + | bl extern lj_state_growstack // (lua_State *L, int n) + | lwz TMP2, SAVE_NRES + | mr RD, SAVE0 + | slwi TMP2, TMP2, 3 + | lp BASE, L->top // Need the (realloced) L->top in BASE. + | b <2 + | + |->vm_unwind_c: // Unwind C stack, return from vm_pcall. + | // (void *cframe, int errcode) + | mr sp, CARG1 + | mr CRET1, CARG2 + |->vm_unwind_c_eh: // Landing pad for external unwinder. + | lwz L, SAVE_L + | .toc ld TOCREG, SAVE_TOC + | li TMP0, ~LJ_VMST_C + | lwz GL:TMP1, L->glref + | stw TMP0, GL:TMP1->vmstate + | b ->vm_leave_unw + | + |->vm_unwind_ff: // Unwind C stack, return from ff pcall. + | // (void *cframe) + |.if GPR64 + | rldicr sp, CARG1, 0, 61 + |.else + | rlwinm sp, CARG1, 0, 0, 29 + |.endif + |->vm_unwind_ff_eh: // Landing pad for external unwinder. + | lwz L, SAVE_L + | .toc ld TOCREG, SAVE_TOC + | li TISNUM, LJ_TISNUM // Setup type comparison constants. + | lp BASE, L->base + | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). + | lwz DISPATCH, L->glref // Setup pointer to dispatch table. + | li ZERO, 0 + | stw TMP3, TMPD + | li TMP1, LJ_TFALSE + | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float). + | li TISNIL, LJ_TNIL + | li_vmstate INTERP + | lfs TOBIT, TMPD + | lwz PC, FRAME_PC(BASE) // Fetch PC of previous frame. + | la RA, -8(BASE) // Results start at BASE-8. + | stw TMP3, TMPD + | addi DISPATCH, DISPATCH, GG_G2DISP + | stw TMP1, 0(RA) // Prepend false to error message. + | li RD, 16 // 2 results: false + error message. + | st_vmstate + | lfs TONUM, TMPD + | b ->vm_returnc + | + |//----------------------------------------------------------------------- + |//-- Grow stack for calls ----------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_growstack_c: // Grow stack for C function. + | li CARG2, LUA_MINSTACK + | b >2 + | + |->vm_growstack_l: // Grow stack for Lua function. + | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC + | add RC, BASE, RC + | sub RA, RA, BASE + | stp BASE, L->base + | addi PC, PC, 4 // Must point after first instruction. + | stp RC, L->top + | srwi CARG2, RA, 3 + |2: + | // L->base = new base, L->top = top + | stw PC, SAVE_PC + | mr CARG1, L + | bl extern lj_state_growstack // (lua_State *L, int n) + | lp BASE, L->base + | lp RC, L->top + | lwz LFUNC:RB, FRAME_FUNC(BASE) + | sub RC, RC, BASE + | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC + | ins_callt // Just retry the call. + | + |//----------------------------------------------------------------------- + |//-- Entry points into the assembler VM --------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_resume: // Setup C frame and resume thread. + | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0) + | saveregs + | mr L, CARG1 + | lwz DISPATCH, L->glref // Setup pointer to dispatch table. + | mr BASE, CARG2 + | lbz TMP1, L->status + | stw L, SAVE_L + | li PC, FRAME_CP + | addi TMP0, sp, CFRAME_RESUME + | addi DISPATCH, DISPATCH, GG_G2DISP + | stw CARG3, SAVE_NRES + | cmplwi TMP1, 0 + | stw CARG3, SAVE_ERRF + | stp CARG3, SAVE_CFRAME + | stw CARG1, SAVE_PC // Any value outside of bytecode is ok. + | stp TMP0, L->cframe + | beq >3 + | + | // Resume after yield (like a return). + | stw L, DISPATCH_GL(cur_L)(DISPATCH) + | mr RA, BASE + | lp BASE, L->base + | li TISNUM, LJ_TISNUM // Setup type comparison constants. + | lp TMP1, L->top + | lwz PC, FRAME_PC(BASE) + | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). + | stb CARG3, L->status + | stw TMP3, TMPD + | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float). + | lfs TOBIT, TMPD + | sub RD, TMP1, BASE + | stw TMP3, TMPD + | lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double) + | addi RD, RD, 8 + | stw TMP0, TONUM_HI + | li_vmstate INTERP + | li ZERO, 0 + | st_vmstate + | andix. TMP0, PC, FRAME_TYPE + | mr MULTRES, RD + | lfs TONUM, TMPD + | li TISNIL, LJ_TNIL + | beq ->BC_RET_Z + | b ->vm_return + | + |->vm_pcall: // Setup protected C frame and enter VM. + | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef) + | saveregs + | li PC, FRAME_CP + | stw CARG4, SAVE_ERRF + | b >1 + | + |->vm_call: // Setup C frame and enter VM. + | // (lua_State *L, TValue *base, int nres1) + | saveregs + | li PC, FRAME_C + | + |1: // Entry point for vm_pcall above (PC = ftype). + | lp TMP1, L:CARG1->cframe + | mr L, CARG1 + | stw CARG3, SAVE_NRES + | lwz DISPATCH, L->glref // Setup pointer to dispatch table. + | stw CARG1, SAVE_L + | mr BASE, CARG2 + | addi DISPATCH, DISPATCH, GG_G2DISP + | stw CARG1, SAVE_PC // Any value outside of bytecode is ok. + | stp TMP1, SAVE_CFRAME + | stp sp, L->cframe // Add our C frame to cframe chain. + | + |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype). + | stw L, DISPATCH_GL(cur_L)(DISPATCH) + | lp TMP2, L->base // TMP2 = old base (used in vmeta_call). + | li TISNUM, LJ_TISNUM // Setup type comparison constants. + | lp TMP1, L->top + | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). + | add PC, PC, BASE + | stw TMP3, TMPD + | li ZERO, 0 + | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float). + | lfs TOBIT, TMPD + | sub PC, PC, TMP2 // PC = frame delta + frame type + | stw TMP3, TMPD + | lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double) + | sub NARGS8:RC, TMP1, BASE + | stw TMP0, TONUM_HI + | li_vmstate INTERP + | lfs TONUM, TMPD + | li TISNIL, LJ_TNIL + | st_vmstate + | + |->vm_call_dispatch: + | // TMP2 = old base, BASE = new base, RC = nargs*8, PC = caller PC + | lwz TMP0, FRAME_PC(BASE) + | lwz LFUNC:RB, FRAME_FUNC(BASE) + | checkfunc TMP0; bne ->vmeta_call + | + |->vm_call_dispatch_f: + | ins_call + | // BASE = new base, RB = func, RC = nargs*8, PC = caller PC + | + |->vm_cpcall: // Setup protected C frame, call C. + | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp) + | saveregs + | mr L, CARG1 + | lwz TMP0, L:CARG1->stack + | stw CARG1, SAVE_L + | lp TMP1, L->top + | lwz DISPATCH, L->glref // Setup pointer to dispatch table. + | stw CARG1, SAVE_PC // Any value outside of bytecode is ok. + | sub TMP0, TMP0, TMP1 // Compute -savestack(L, L->top). + | lp TMP1, L->cframe + | addi DISPATCH, DISPATCH, GG_G2DISP + | .toc lp CARG4, 0(CARG4) + | li TMP2, 0 + | stw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame. + | stw TMP2, SAVE_ERRF // No error function. + | stp TMP1, SAVE_CFRAME + | stp sp, L->cframe // Add our C frame to cframe chain. + | stw L, DISPATCH_GL(cur_L)(DISPATCH) + | mtctr CARG4 + | bctrl // (lua_State *L, lua_CFunction func, void *ud) + |.if PPE + | mr BASE, CRET1 + | cmpwi CRET1, 0 + |.else + | mr. BASE, CRET1 + |.endif + | li PC, FRAME_CP + | bne <3 // Else continue with the call. + | b ->vm_leave_cp // No base? Just remove C frame. + | + |//----------------------------------------------------------------------- + |//-- Metamethod handling ------------------------------------------------ + |//----------------------------------------------------------------------- + | + |// The lj_meta_* functions (except for lj_meta_cat) don't reallocate the + |// stack, so BASE doesn't need to be reloaded across these calls. + | + |//-- Continuation dispatch ---------------------------------------------- + | + |->cont_dispatch: + | // BASE = meta base, RA = resultptr, RD = (nresults+1)*8 + | lwz TMP0, -12(BASE) // Continuation. + | mr RB, BASE + | mr BASE, TMP2 // Restore caller BASE. + | lwz LFUNC:TMP1, FRAME_FUNC(TMP2) + |.if FFI + | cmplwi TMP0, 1 + |.endif + | lwz PC, -16(RB) // Restore PC from [cont|PC]. + | subi TMP2, RD, 8 + | lwz TMP1, LFUNC:TMP1->pc + | stwx TISNIL, RA, TMP2 // Ensure one valid arg. + |.if FFI + | ble >1 + |.endif + | lwz KBASE, PC2PROTO(k)(TMP1) + | // BASE = base, RA = resultptr, RB = meta base + | mtctr TMP0 + | bctr // Jump to continuation. + | + |.if FFI + |1: + | beq ->cont_ffi_callback // cont = 1: return from FFI callback. + | // cont = 0: tailcall from C function. + | subi TMP1, RB, 16 + | sub RC, TMP1, BASE + | b ->vm_call_tail + |.endif + | + |->cont_cat: // RA = resultptr, RB = meta base + | lwz INS, -4(PC) + | subi CARG2, RB, 16 + | decode_RB8 SAVE0, INS + | lfd f0, 0(RA) + | add TMP1, BASE, SAVE0 + | stp BASE, L->base + | cmplw TMP1, CARG2 + | sub CARG3, CARG2, TMP1 + | decode_RA8 RA, INS + | stfd f0, 0(CARG2) + | bney ->BC_CAT_Z + | stfdx f0, BASE, RA + | b ->cont_nop + | + |//-- Table indexing metamethods ----------------------------------------- + | + |->vmeta_tgets1: + | la CARG3, DISPATCH_GL(tmptv)(DISPATCH) + | li TMP0, LJ_TSTR + | decode_RB8 RB, INS + | stw STR:RC, 4(CARG3) + | add CARG2, BASE, RB + | stw TMP0, 0(CARG3) + | b >1 + | + |->vmeta_tgets: + | la CARG2, DISPATCH_GL(tmptv)(DISPATCH) + | li TMP0, LJ_TTAB + | stw TAB:RB, 4(CARG2) + | la CARG3, DISPATCH_GL(tmptv2)(DISPATCH) + | stw TMP0, 0(CARG2) + | li TMP1, LJ_TSTR + | stw STR:RC, 4(CARG3) + | stw TMP1, 0(CARG3) + | b >1 + | + |->vmeta_tgetb: // TMP0 = index + |.if not DUALNUM + | tonum_u f0, TMP0 + |.endif + | decode_RB8 RB, INS + | la CARG3, DISPATCH_GL(tmptv)(DISPATCH) + | add CARG2, BASE, RB + |.if DUALNUM + | stw TISNUM, 0(CARG3) + | stw TMP0, 4(CARG3) + |.else + | stfd f0, 0(CARG3) + |.endif + | b >1 + | + |->vmeta_tgetv: + | decode_RB8 RB, INS + | decode_RC8 RC, INS + | add CARG2, BASE, RB + | add CARG3, BASE, RC + |1: + | stp BASE, L->base + | mr CARG1, L + | stw PC, SAVE_PC + | bl extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k) + | // Returns TValue * (finished) or NULL (metamethod). + | cmplwi CRET1, 0 + | beq >3 + | lfd f0, 0(CRET1) + | ins_next1 + | stfdx f0, BASE, RA + | ins_next2 + | + |3: // Call __index metamethod. + | // BASE = base, L->top = new base, stack = cont/func/t/k + | subfic TMP1, BASE, FRAME_CONT + | lp BASE, L->top + | stw PC, -16(BASE) // [cont|PC] + | add PC, TMP1, BASE + | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here. + | li NARGS8:RC, 16 // 2 args for func(t, k). + | b ->vm_call_dispatch_f + | + |->vmeta_tgetr: + | bl extern lj_tab_getinth // (GCtab *t, int32_t key) + | // Returns cTValue * or NULL. + | cmplwi CRET1, 0 + | beq >1 + | lfd f14, 0(CRET1) + | b ->BC_TGETR_Z + |1: + | stwx TISNIL, BASE, RA + | b ->cont_nop + | + |//----------------------------------------------------------------------- + | + |->vmeta_tsets1: + | la CARG3, DISPATCH_GL(tmptv)(DISPATCH) + | li TMP0, LJ_TSTR + | decode_RB8 RB, INS + | stw STR:RC, 4(CARG3) + | add CARG2, BASE, RB + | stw TMP0, 0(CARG3) + | b >1 + | + |->vmeta_tsets: + | la CARG2, DISPATCH_GL(tmptv)(DISPATCH) + | li TMP0, LJ_TTAB + | stw TAB:RB, 4(CARG2) + | la CARG3, DISPATCH_GL(tmptv2)(DISPATCH) + | stw TMP0, 0(CARG2) + | li TMP1, LJ_TSTR + | stw STR:RC, 4(CARG3) + | stw TMP1, 0(CARG3) + | b >1 + | + |->vmeta_tsetb: // TMP0 = index + |.if not DUALNUM + | tonum_u f0, TMP0 + |.endif + | decode_RB8 RB, INS + | la CARG3, DISPATCH_GL(tmptv)(DISPATCH) + | add CARG2, BASE, RB + |.if DUALNUM + | stw TISNUM, 0(CARG3) + | stw TMP0, 4(CARG3) + |.else + | stfd f0, 0(CARG3) + |.endif + | b >1 + | + |->vmeta_tsetv: + | decode_RB8 RB, INS + | decode_RC8 RC, INS + | add CARG2, BASE, RB + | add CARG3, BASE, RC + |1: + | stp BASE, L->base + | mr CARG1, L + | stw PC, SAVE_PC + | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k) + | // Returns TValue * (finished) or NULL (metamethod). + | cmplwi CRET1, 0 + | lfdx f0, BASE, RA + | beq >3 + | // NOBARRIER: lj_meta_tset ensures the table is not black. + | ins_next1 + | stfd f0, 0(CRET1) + | ins_next2 + | + |3: // Call __newindex metamethod. + | // BASE = base, L->top = new base, stack = cont/func/t/k/(v) + | subfic TMP1, BASE, FRAME_CONT + | lp BASE, L->top + | stw PC, -16(BASE) // [cont|PC] + | add PC, TMP1, BASE + | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here. + | li NARGS8:RC, 24 // 3 args for func(t, k, v) + | stfd f0, 16(BASE) // Copy value to third argument. + | b ->vm_call_dispatch_f + | + |->vmeta_tsetr: + | stp BASE, L->base + | stw PC, SAVE_PC + | bl extern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key) + | // Returns TValue *. + | stfd f14, 0(CRET1) + | b ->cont_nop + | + |//-- Comparison metamethods --------------------------------------------- + | + |->vmeta_comp: + | mr CARG1, L + | subi PC, PC, 4 + |.if DUALNUM + | mr CARG2, RA + |.else + | add CARG2, BASE, RA + |.endif + | stw PC, SAVE_PC + |.if DUALNUM + | mr CARG3, RD + |.else + | add CARG3, BASE, RD + |.endif + | stp BASE, L->base + | decode_OP1 CARG4, INS + | bl extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op) + | // Returns 0/1 or TValue * (metamethod). + |3: + | cmplwi CRET1, 1 + | bgt ->vmeta_binop + | subfic CRET1, CRET1, 0 + |4: + | lwz INS, 0(PC) + | addi PC, PC, 4 + | decode_RD4 TMP2, INS + | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) + | and TMP2, TMP2, CRET1 + | add PC, PC, TMP2 + |->cont_nop: + | ins_next + | + |->cont_ra: // RA = resultptr + | lwz INS, -4(PC) + | lfd f0, 0(RA) + | decode_RA8 TMP1, INS + | stfdx f0, BASE, TMP1 + | b ->cont_nop + | + |->cont_condt: // RA = resultptr + | lwz TMP0, 0(RA) + | .gpr64 extsw TMP0, TMP0 + | subfic TMP0, TMP0, LJ_TTRUE // Branch if result is true. + | subfe CRET1, CRET1, CRET1 + | not CRET1, CRET1 + | b <4 + | + |->cont_condf: // RA = resultptr + | lwz TMP0, 0(RA) + | .gpr64 extsw TMP0, TMP0 + | subfic TMP0, TMP0, LJ_TTRUE // Branch if result is false. + | subfe CRET1, CRET1, CRET1 + | b <4 + | + |->vmeta_equal: + | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV. + | subi PC, PC, 4 + | stp BASE, L->base + | mr CARG1, L + | stw PC, SAVE_PC + | bl extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne) + | // Returns 0/1 or TValue * (metamethod). + | b <3 + | + |->vmeta_equal_cd: + |.if FFI + | mr CARG2, INS + | subi PC, PC, 4 + | stp BASE, L->base + | mr CARG1, L + | stw PC, SAVE_PC + | bl extern lj_meta_equal_cd // (lua_State *L, BCIns op) + | // Returns 0/1 or TValue * (metamethod). + | b <3 + |.endif + | + |->vmeta_istype: + | subi PC, PC, 4 + | stp BASE, L->base + | srwi CARG2, RA, 3 + | mr CARG1, L + | srwi CARG3, RD, 3 + | stw PC, SAVE_PC + | bl extern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp) + | b ->cont_nop + | + |//-- Arithmetic metamethods --------------------------------------------- + | + |->vmeta_arith_nv: + | add CARG3, KBASE, RC + | add CARG4, BASE, RB + | b >1 + |->vmeta_arith_nv2: + |.if DUALNUM + | mr CARG3, RC + | mr CARG4, RB + | b >1 + |.endif + | + |->vmeta_unm: + | mr CARG3, RD + | mr CARG4, RD + | b >1 + | + |->vmeta_arith_vn: + | add CARG3, BASE, RB + | add CARG4, KBASE, RC + | b >1 + | + |->vmeta_arith_vv: + | add CARG3, BASE, RB + | add CARG4, BASE, RC + |.if DUALNUM + | b >1 + |.endif + |->vmeta_arith_vn2: + |->vmeta_arith_vv2: + |.if DUALNUM + | mr CARG3, RB + | mr CARG4, RC + |.endif + |1: + | add CARG2, BASE, RA + | stp BASE, L->base + | mr CARG1, L + | stw PC, SAVE_PC + | decode_OP1 CARG5, INS // Caveat: CARG5 overlaps INS. + | bl extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op) + | // Returns NULL (finished) or TValue * (metamethod). + | cmplwi CRET1, 0 + | beq ->cont_nop + | + | // Call metamethod for binary op. + |->vmeta_binop: + | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2 + | sub TMP1, CRET1, BASE + | stw PC, -16(CRET1) // [cont|PC] + | mr TMP2, BASE + | addi PC, TMP1, FRAME_CONT + | mr BASE, CRET1 + | li NARGS8:RC, 16 // 2 args for func(o1, o2). + | b ->vm_call_dispatch + | + |->vmeta_len: +#if LJ_52 + | mr SAVE0, CARG1 +#endif + | mr CARG2, RD + | stp BASE, L->base + | mr CARG1, L + | stw PC, SAVE_PC + | bl extern lj_meta_len // (lua_State *L, TValue *o) + | // Returns NULL (retry) or TValue * (metamethod base). +#if LJ_52 + | cmplwi CRET1, 0 + | bne ->vmeta_binop // Binop call for compatibility. + | mr CARG1, SAVE0 + | b ->BC_LEN_Z +#else + | b ->vmeta_binop // Binop call for compatibility. +#endif + | + |//-- Call metamethod ---------------------------------------------------- + | + |->vmeta_call: // Resolve and call __call metamethod. + | // TMP2 = old base, BASE = new base, RC = nargs*8 + | mr CARG1, L + | stp TMP2, L->base // This is the callers base! + | subi CARG2, BASE, 8 + | stw PC, SAVE_PC + | add CARG3, BASE, RC + | mr SAVE0, NARGS8:RC + | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top) + | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here. + | addi NARGS8:RC, SAVE0, 8 // Got one more argument now. + | ins_call + | + |->vmeta_callt: // Resolve __call for BC_CALLT. + | // BASE = old base, RA = new base, RC = nargs*8 + | mr CARG1, L + | stp BASE, L->base + | subi CARG2, RA, 8 + | stw PC, SAVE_PC + | add CARG3, RA, RC + | mr SAVE0, NARGS8:RC + | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top) + | lwz TMP1, FRAME_PC(BASE) + | addi NARGS8:RC, SAVE0, 8 // Got one more argument now. + | lwz LFUNC:RB, FRAME_FUNC(RA) // Guaranteed to be a function here. + | b ->BC_CALLT_Z + | + |//-- Argument coercion for 'for' statement ------------------------------ + | + |->vmeta_for: + | mr CARG1, L + | stp BASE, L->base + | mr CARG2, RA + | stw PC, SAVE_PC + | mr SAVE0, INS + | bl extern lj_meta_for // (lua_State *L, TValue *base) + |.if JIT + | decode_OP1 TMP0, SAVE0 + |.endif + | decode_RA8 RA, SAVE0 + |.if JIT + | cmpwi TMP0, BC_JFORI + |.endif + | decode_RD8 RD, SAVE0 + |.if JIT + | beqy =>BC_JFORI + |.endif + | b =>BC_FORI + | + |//----------------------------------------------------------------------- + |//-- Fast functions ----------------------------------------------------- + |//----------------------------------------------------------------------- + | + |.macro .ffunc, name + |->ff_ .. name: + |.endmacro + | + |.macro .ffunc_1, name + |->ff_ .. name: + | cmplwi NARGS8:RC, 8 + | lwz CARG3, 0(BASE) + | lwz CARG1, 4(BASE) + | blt ->fff_fallback + |.endmacro + | + |.macro .ffunc_2, name + |->ff_ .. name: + | cmplwi NARGS8:RC, 16 + | lwz CARG3, 0(BASE) + | lwz CARG4, 8(BASE) + | lwz CARG1, 4(BASE) + | lwz CARG2, 12(BASE) + | blt ->fff_fallback + |.endmacro + | + |.macro .ffunc_n, name + |->ff_ .. name: + | cmplwi NARGS8:RC, 8 + | lwz CARG3, 0(BASE) + | lfd FARG1, 0(BASE) + | blt ->fff_fallback + | checknum CARG3; bge ->fff_fallback + |.endmacro + | + |.macro .ffunc_nn, name + |->ff_ .. name: + | cmplwi NARGS8:RC, 16 + | lwz CARG3, 0(BASE) + | lfd FARG1, 0(BASE) + | lwz CARG4, 8(BASE) + | lfd FARG2, 8(BASE) + | blt ->fff_fallback + | checknum CARG3; bge ->fff_fallback + | checknum CARG4; bge ->fff_fallback + |.endmacro + | + |// Inlined GC threshold check. Caveat: uses TMP0 and TMP1. + |.macro ffgccheck + | lwz TMP0, DISPATCH_GL(gc.total)(DISPATCH) + | lwz TMP1, DISPATCH_GL(gc.threshold)(DISPATCH) + | cmplw TMP0, TMP1 + | bgel ->fff_gcstep + |.endmacro + | + |//-- Base library: checks ----------------------------------------------- + | + |.ffunc_1 assert + | li TMP1, LJ_TFALSE + | la RA, -8(BASE) + | cmplw cr1, CARG3, TMP1 + | lwz PC, FRAME_PC(BASE) + | bge cr1, ->fff_fallback + | stw CARG3, 0(RA) + | addi RD, NARGS8:RC, 8 // Compute (nresults+1)*8. + | stw CARG1, 4(RA) + | beq ->fff_res // Done if exactly 1 argument. + | li TMP1, 8 + | subi RC, RC, 8 + |1: + | cmplw TMP1, RC + | lfdx f0, BASE, TMP1 + | stfdx f0, RA, TMP1 + | addi TMP1, TMP1, 8 + | bney <1 + | b ->fff_res + | + |.ffunc type + | cmplwi NARGS8:RC, 8 + | lwz CARG1, 0(BASE) + | blt ->fff_fallback + | .gpr64 extsw CARG1, CARG1 + | subfc TMP0, TISNUM, CARG1 + | subfe TMP2, CARG1, CARG1 + | orc TMP1, TMP2, TMP0 + | addi TMP1, TMP1, ~LJ_TISNUM+1 + | slwi TMP1, TMP1, 3 + | la TMP2, CFUNC:RB->upvalue + | lfdx FARG1, TMP2, TMP1 + | b ->fff_resn + | + |//-- Base library: getters and setters --------------------------------- + | + |.ffunc_1 getmetatable + | checktab CARG3; bne >6 + |1: // Field metatable must be at same offset for GCtab and GCudata! + | lwz TAB:CARG1, TAB:CARG1->metatable + |2: + | li CARG3, LJ_TNIL + | cmplwi TAB:CARG1, 0 + | lwz STR:RC, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])(DISPATCH) + | beq ->fff_restv + | lwz TMP0, TAB:CARG1->hmask + | li CARG3, LJ_TTAB // Use metatable as default result. + | lwz TMP1, STR:RC->hash + | lwz NODE:TMP2, TAB:CARG1->node + | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask + | slwi TMP0, TMP1, 5 + | slwi TMP1, TMP1, 3 + | sub TMP1, TMP0, TMP1 + | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8) + |3: // Rearranged logic, because we expect _not_ to find the key. + | lwz CARG4, NODE:TMP2->key + | lwz TMP0, 4+offsetof(Node, key)(NODE:TMP2) + | lwz CARG2, NODE:TMP2->val + | lwz TMP1, 4+offsetof(Node, val)(NODE:TMP2) + | checkstr CARG4; bne >4 + | cmpw TMP0, STR:RC; beq >5 + |4: + | lwz NODE:TMP2, NODE:TMP2->next + | cmplwi NODE:TMP2, 0 + | beq ->fff_restv // Not found, keep default result. + | b <3 + |5: + | checknil CARG2 + | beq ->fff_restv // Ditto for nil value. + | mr CARG3, CARG2 // Return value of mt.__metatable. + | mr CARG1, TMP1 + | b ->fff_restv + | + |6: + | cmpwi CARG3, LJ_TUDATA; beq <1 + | .gpr64 extsw CARG3, CARG3 + | subfc TMP0, TISNUM, CARG3 + | subfe TMP2, CARG3, CARG3 + | orc TMP1, TMP2, TMP0 + | addi TMP1, TMP1, ~LJ_TISNUM+1 + | slwi TMP1, TMP1, 2 + | la TMP2, DISPATCH_GL(gcroot[GCROOT_BASEMT])(DISPATCH) + | lwzx TAB:CARG1, TMP2, TMP1 + | b <2 + | + |.ffunc_2 setmetatable + | // Fast path: no mt for table yet and not clearing the mt. + | checktab CARG3; bne ->fff_fallback + | lwz TAB:TMP1, TAB:CARG1->metatable + | checktab CARG4; bne ->fff_fallback + | cmplwi TAB:TMP1, 0 + | lbz TMP3, TAB:CARG1->marked + | bne ->fff_fallback + | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table) + | stw TAB:CARG2, TAB:CARG1->metatable + | beq ->fff_restv + | barrierback TAB:CARG1, TMP3, TMP0 + | b ->fff_restv + | + |.ffunc rawget + | cmplwi NARGS8:RC, 16 + | lwz CARG4, 0(BASE) + | lwz TAB:CARG2, 4(BASE) + | blt ->fff_fallback + | checktab CARG4; bne ->fff_fallback + | la CARG3, 8(BASE) + | mr CARG1, L + | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key) + | // Returns cTValue *. + | lfd FARG1, 0(CRET1) + | b ->fff_resn + | + |//-- Base library: conversions ------------------------------------------ + | + |.ffunc tonumber + | // Only handles the number case inline (without a base argument). + | cmplwi NARGS8:RC, 8 + | lwz CARG1, 0(BASE) + | lfd FARG1, 0(BASE) + | bne ->fff_fallback // Exactly one argument. + | checknum CARG1; bgt ->fff_fallback + | b ->fff_resn + | + |.ffunc_1 tostring + | // Only handles the string or number case inline. + | checkstr CARG3 + | // A __tostring method in the string base metatable is ignored. + | beq ->fff_restv // String key? + | // Handle numbers inline, unless a number base metatable is present. + | lwz TMP0, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])(DISPATCH) + | checknum CARG3 + | cmplwi cr1, TMP0, 0 + | stp BASE, L->base // Add frame since C call can throw. + | crorc 4*cr0+eq, 4*cr0+gt, 4*cr1+eq + | stw PC, SAVE_PC // Redundant (but a defined value). + | beq ->fff_fallback + | ffgccheck + | mr CARG1, L + | mr CARG2, BASE + |.if DUALNUM + | bl extern lj_strfmt_number // (lua_State *L, cTValue *o) + |.else + | bl extern lj_strfmt_num // (lua_State *L, lua_Number *np) + |.endif + | // Returns GCstr *. + | li CARG3, LJ_TSTR + | b ->fff_restv + | + |//-- Base library: iterators ------------------------------------------- + | + |.ffunc next + | cmplwi NARGS8:RC, 8 + | lwz CARG1, 0(BASE) + | lwz TAB:CARG2, 4(BASE) + | blt ->fff_fallback + | stwx TISNIL, BASE, NARGS8:RC // Set missing 2nd arg to nil. + | checktab CARG1 + | lwz PC, FRAME_PC(BASE) + | bne ->fff_fallback + | stp BASE, L->base // Add frame since C call can throw. + | mr CARG1, L + | stp BASE, L->top // Dummy frame length is ok. + | la CARG3, 8(BASE) + | stw PC, SAVE_PC + | bl extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key) + | // Returns 0 at end of traversal. + | cmplwi CRET1, 0 + | li CARG3, LJ_TNIL + | beq ->fff_restv // End of traversal: return nil. + | lfd f0, 8(BASE) // Copy key and value to results. + | la RA, -8(BASE) + | lfd f1, 16(BASE) + | stfd f0, 0(RA) + | li RD, (2+1)*8 + | stfd f1, 8(RA) + | b ->fff_res + | + |.ffunc_1 pairs + | checktab CARG3 + | lwz PC, FRAME_PC(BASE) + | bne ->fff_fallback +#if LJ_52 + | lwz TAB:TMP2, TAB:CARG1->metatable + | lfd f0, CFUNC:RB->upvalue[0] + | cmplwi TAB:TMP2, 0 + | la RA, -8(BASE) + | bne ->fff_fallback +#else + | lfd f0, CFUNC:RB->upvalue[0] + | la RA, -8(BASE) +#endif + | stw TISNIL, 8(BASE) + | li RD, (3+1)*8 + | stfd f0, 0(RA) + | b ->fff_res + | + |.ffunc ipairs_aux + | cmplwi NARGS8:RC, 16 + | lwz CARG3, 0(BASE) + | lwz TAB:CARG1, 4(BASE) + | lwz CARG4, 8(BASE) + |.if DUALNUM + | lwz TMP2, 12(BASE) + |.else + | lfd FARG2, 8(BASE) + |.endif + | blt ->fff_fallback + | checktab CARG3 + | checknum cr1, CARG4 + | lwz PC, FRAME_PC(BASE) + |.if DUALNUM + | bne ->fff_fallback + | bne cr1, ->fff_fallback + |.else + | lus TMP0, 0x3ff0 + | stw ZERO, TMPD_LO + | bne ->fff_fallback + | stw TMP0, TMPD_HI + | bge cr1, ->fff_fallback + | lfd FARG1, TMPD + | toint TMP2, FARG2, f0 + |.endif + | lwz TMP0, TAB:CARG1->asize + | lwz TMP1, TAB:CARG1->array + |.if not DUALNUM + | fadd FARG2, FARG2, FARG1 + |.endif + | addi TMP2, TMP2, 1 + | la RA, -8(BASE) + | cmplw TMP0, TMP2 + |.if DUALNUM + | stw TISNUM, 0(RA) + | slwi TMP3, TMP2, 3 + | stw TMP2, 4(RA) + |.else + | slwi TMP3, TMP2, 3 + | stfd FARG2, 0(RA) + |.endif + | ble >2 // Not in array part? + | lwzx TMP2, TMP1, TMP3 + | lfdx f0, TMP1, TMP3 + |1: + | checknil TMP2 + | li RD, (0+1)*8 + | beq ->fff_res // End of iteration, return 0 results. + | li RD, (2+1)*8 + | stfd f0, 8(RA) + | b ->fff_res + |2: // Check for empty hash part first. Otherwise call C function. + | lwz TMP0, TAB:CARG1->hmask + | cmplwi TMP0, 0 + | li RD, (0+1)*8 + | beq ->fff_res + | mr CARG2, TMP2 + | bl extern lj_tab_getinth // (GCtab *t, int32_t key) + | // Returns cTValue * or NULL. + | cmplwi CRET1, 0 + | li RD, (0+1)*8 + | beq ->fff_res + | lwz TMP2, 0(CRET1) + | lfd f0, 0(CRET1) + | b <1 + | + |.ffunc_1 ipairs + | checktab CARG3 + | lwz PC, FRAME_PC(BASE) + | bne ->fff_fallback +#if LJ_52 + | lwz TAB:TMP2, TAB:CARG1->metatable + | lfd f0, CFUNC:RB->upvalue[0] + | cmplwi TAB:TMP2, 0 + | la RA, -8(BASE) + | bne ->fff_fallback +#else + | lfd f0, CFUNC:RB->upvalue[0] + | la RA, -8(BASE) +#endif + |.if DUALNUM + | stw TISNUM, 8(BASE) + |.else + | stw ZERO, 8(BASE) + |.endif + | stw ZERO, 12(BASE) + | li RD, (3+1)*8 + | stfd f0, 0(RA) + | b ->fff_res + | + |//-- Base library: catch errors ---------------------------------------- + | + |.ffunc pcall + | cmplwi NARGS8:RC, 8 + | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH) + | blt ->fff_fallback + | mr TMP2, BASE + | la BASE, 8(BASE) + | // Remember active hook before pcall. + | rlwinm TMP3, TMP3, 32-HOOK_ACTIVE_SHIFT, 31, 31 + | subi NARGS8:RC, NARGS8:RC, 8 + | addi PC, TMP3, 8+FRAME_PCALL + | b ->vm_call_dispatch + | + |.ffunc xpcall + | cmplwi NARGS8:RC, 16 + | lwz CARG4, 8(BASE) + | lfd FARG2, 8(BASE) + | lfd FARG1, 0(BASE) + | blt ->fff_fallback + | lbz TMP1, DISPATCH_GL(hookmask)(DISPATCH) + | mr TMP2, BASE + | checkfunc CARG4; bne ->fff_fallback // Traceback must be a function. + | la BASE, 16(BASE) + | // Remember active hook before pcall. + | rlwinm TMP1, TMP1, 32-HOOK_ACTIVE_SHIFT, 31, 31 + | stfd FARG2, 0(TMP2) // Swap function and traceback. + | subi NARGS8:RC, NARGS8:RC, 16 + | stfd FARG1, 8(TMP2) + | addi PC, TMP1, 16+FRAME_PCALL + | b ->vm_call_dispatch + | + |//-- Coroutine library -------------------------------------------------- + | + |.macro coroutine_resume_wrap, resume + |.if resume + |.ffunc_1 coroutine_resume + | cmpwi CARG3, LJ_TTHREAD; bne ->fff_fallback + |.else + |.ffunc coroutine_wrap_aux + | lwz L:CARG1, CFUNC:RB->upvalue[0].gcr + |.endif + | lbz TMP0, L:CARG1->status + | lp TMP1, L:CARG1->cframe + | lp CARG2, L:CARG1->top + | cmplwi cr0, TMP0, LUA_YIELD + | lp TMP2, L:CARG1->base + | cmplwi cr1, TMP1, 0 + | lwz TMP0, L:CARG1->maxstack + | cmplw cr7, CARG2, TMP2 + | lwz PC, FRAME_PC(BASE) + | crorc 4*cr6+lt, 4*cr0+gt, 4*cr1+eq // st>LUA_YIELD || cframe!=0 + | add TMP2, CARG2, NARGS8:RC + | crandc 4*cr6+gt, 4*cr7+eq, 4*cr0+eq // base==top && st!=LUA_YIELD + | cmplw cr1, TMP2, TMP0 + | cror 4*cr6+lt, 4*cr6+lt, 4*cr6+gt + | stw PC, SAVE_PC + | cror 4*cr6+lt, 4*cr6+lt, 4*cr1+gt // cond1 || cond2 || stackov + | stp BASE, L->base + | blt cr6, ->fff_fallback + |1: + |.if resume + | addi BASE, BASE, 8 // Keep resumed thread in stack for GC. + | subi NARGS8:RC, NARGS8:RC, 8 + | subi TMP2, TMP2, 8 + |.endif + | stp TMP2, L:CARG1->top + | li TMP1, 0 + | stp BASE, L->top + |2: // Move args to coroutine. + | cmpw TMP1, NARGS8:RC + | lfdx f0, BASE, TMP1 + | beq >3 + | stfdx f0, CARG2, TMP1 + | addi TMP1, TMP1, 8 + | b <2 + |3: + | li CARG3, 0 + | mr L:SAVE0, L:CARG1 + | li CARG4, 0 + | bl ->vm_resume // (lua_State *L, TValue *base, 0, 0) + | // Returns thread status. + |4: + | lp TMP2, L:SAVE0->base + | cmplwi CRET1, LUA_YIELD + | lp TMP3, L:SAVE0->top + | li_vmstate INTERP + | lp BASE, L->base + | stw L, DISPATCH_GL(cur_L)(DISPATCH) + | st_vmstate + | bgt >8 + | sub RD, TMP3, TMP2 + | lwz TMP0, L->maxstack + | cmplwi RD, 0 + | add TMP1, BASE, RD + | beq >6 // No results? + | cmplw TMP1, TMP0 + | li TMP1, 0 + | bgt >9 // Need to grow stack? + | + | subi TMP3, RD, 8 + | stp TMP2, L:SAVE0->top // Clear coroutine stack. + |5: // Move results from coroutine. + | cmplw TMP1, TMP3 + | lfdx f0, TMP2, TMP1 + | stfdx f0, BASE, TMP1 + | addi TMP1, TMP1, 8 + | bne <5 + |6: + | andix. TMP0, PC, FRAME_TYPE + |.if resume + | li TMP1, LJ_TTRUE + | la RA, -8(BASE) + | stw TMP1, -8(BASE) // Prepend true to results. + | addi RD, RD, 16 + |.else + | mr RA, BASE + | addi RD, RD, 8 + |.endif + |7: + | stw PC, SAVE_PC + | mr MULTRES, RD + | beq ->BC_RET_Z + | b ->vm_return + | + |8: // Coroutine returned with error (at co->top-1). + |.if resume + | andix. TMP0, PC, FRAME_TYPE + | la TMP3, -8(TMP3) + | li TMP1, LJ_TFALSE + | lfd f0, 0(TMP3) + | stp TMP3, L:SAVE0->top // Remove error from coroutine stack. + | li RD, (2+1)*8 + | stw TMP1, -8(BASE) // Prepend false to results. + | la RA, -8(BASE) + | stfd f0, 0(BASE) // Copy error message. + | b <7 + |.else + | mr CARG1, L + | mr CARG2, L:SAVE0 + | bl extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co) + |.endif + | + |9: // Handle stack expansion on return from yield. + | mr CARG1, L + | srwi CARG2, RD, 3 + | bl extern lj_state_growstack // (lua_State *L, int n) + | li CRET1, 0 + | b <4 + |.endmacro + | + | coroutine_resume_wrap 1 // coroutine.resume + | coroutine_resume_wrap 0 // coroutine.wrap + | + |.ffunc coroutine_yield + | lp TMP0, L->cframe + | add TMP1, BASE, NARGS8:RC + | stp BASE, L->base + | andix. TMP0, TMP0, CFRAME_RESUME + | stp TMP1, L->top + | li CRET1, LUA_YIELD + | beq ->fff_fallback + | stp ZERO, L->cframe + | stb CRET1, L->status + | b ->vm_leave_unw + | + |//-- Math library ------------------------------------------------------- + | + |.ffunc_1 math_abs + | checknum CARG3 + |.if DUALNUM + | bne >2 + | srawi TMP1, CARG1, 31 + | xor TMP2, TMP1, CARG1 + |.if GPR64 + | lus TMP0, 0x8000 + | sub CARG1, TMP2, TMP1 + | cmplw CARG1, TMP0 + | beq >1 + |.else + | sub. CARG1, TMP2, TMP1 + | blt >1 + |.endif + |->fff_resi: + | lwz PC, FRAME_PC(BASE) + | la RA, -8(BASE) + | stw TISNUM, -8(BASE) + | stw CRET1, -4(BASE) + | b ->fff_res1 + |1: + | lus CARG3, 0x41e0 // 2^31. + | li CARG1, 0 + | b ->fff_restv + |2: + |.endif + | bge ->fff_fallback + | rlwinm CARG3, CARG3, 0, 1, 31 + | // Fallthrough. + | + |->fff_restv: + | // CARG3/CARG1 = TValue result. + | lwz PC, FRAME_PC(BASE) + | stw CARG3, -8(BASE) + | la RA, -8(BASE) + | stw CARG1, -4(BASE) + |->fff_res1: + | // RA = results, PC = return. + | li RD, (1+1)*8 + |->fff_res: + | // RA = results, RD = (nresults+1)*8, PC = return. + | andix. TMP0, PC, FRAME_TYPE + | mr MULTRES, RD + | bney ->vm_return + | lwz INS, -4(PC) + | decode_RB8 RB, INS + |5: + | cmplw RB, RD // More results expected? + | decode_RA8 TMP0, INS + | bgt >6 + | ins_next1 + | // Adjust BASE. KBASE is assumed to be set for the calling frame. + | sub BASE, RA, TMP0 + | ins_next2 + | + |6: // Fill up results with nil. + | subi TMP1, RD, 8 + | addi RD, RD, 8 + | stwx TISNIL, RA, TMP1 + | b <5 + | + |.macro math_extern, func + | .ffunc_n math_ .. func + | blex func + | b ->fff_resn + |.endmacro + | + |.macro math_extern2, func + | .ffunc_nn math_ .. func + | blex func + | b ->fff_resn + |.endmacro + | + |.macro math_round, func + | .ffunc_1 math_ .. func + | checknum CARG3; beqy ->fff_restv + | rlwinm TMP2, CARG3, 12, 21, 31 + | bge ->fff_fallback + | addic. TMP2, TMP2, -1023 // exp = exponent(x) - 1023 + | cmplwi cr1, TMP2, 31 // 0 <= exp < 31? + | subfic TMP0, TMP2, 31 + | blt >3 + | slwi TMP1, CARG3, 11 + | srwi TMP3, CARG1, 21 + | oris TMP1, TMP1, 0x8000 + | addi TMP2, TMP2, 1 + | or TMP1, TMP1, TMP3 + | slwi CARG2, CARG1, 11 + | bge cr1, >4 + | slw TMP3, TMP1, TMP2 + | srw RD, TMP1, TMP0 + | or TMP3, TMP3, CARG2 + | srawi TMP2, CARG3, 31 + |.if "func" == "floor" + | and TMP1, TMP3, TMP2 + | addic TMP0, TMP1, -1 + | subfe TMP1, TMP0, TMP1 + | add CARG1, RD, TMP1 + | xor CARG1, CARG1, TMP2 + | sub CARG1, CARG1, TMP2 + | b ->fff_resi + |.else + | andc TMP1, TMP3, TMP2 + | addic TMP0, TMP1, -1 + | subfe TMP1, TMP0, TMP1 + | add CARG1, RD, TMP1 + | cmpw CARG1, RD + | xor CARG1, CARG1, TMP2 + | sub CARG1, CARG1, TMP2 + | bge ->fff_resi + | // Overflow to 2^31. + | lus CARG3, 0x41e0 // 2^31. + | li CARG1, 0 + | b ->fff_restv + |.endif + |3: // |x| < 1 + | slwi TMP2, CARG3, 1 + | srawi TMP1, CARG3, 31 + | or TMP2, CARG1, TMP2 // ztest = (hi+hi) | lo + |.if "func" == "floor" + | and TMP1, TMP2, TMP1 // (ztest & sign) == 0 ? 0 : -1 + | subfic TMP2, TMP1, 0 + | subfe CARG1, CARG1, CARG1 + |.else + | andc TMP1, TMP2, TMP1 // (ztest & ~sign) == 0 ? 0 : 1 + | addic TMP2, TMP1, -1 + | subfe CARG1, TMP2, TMP1 + |.endif + | b ->fff_resi + |4: // exp >= 31. Check for -(2^31). + | xoris TMP1, TMP1, 0x8000 + | srawi TMP2, CARG3, 31 + |.if "func" == "floor" + | or TMP1, TMP1, CARG2 + |.endif + |.if PPE + | orc TMP1, TMP1, TMP2 + | cmpwi TMP1, 0 + |.else + | orc. TMP1, TMP1, TMP2 + |.endif + | crand 4*cr0+eq, 4*cr0+eq, 4*cr1+eq + | lus CARG1, 0x8000 // -(2^31). + | beqy ->fff_resi + |5: + | lfd FARG1, 0(BASE) + | blex func + | b ->fff_resn + |.endmacro + | + |.if DUALNUM + | math_round floor + | math_round ceil + |.else + | // NYI: use internal implementation. + | math_extern floor + | math_extern ceil + |.endif + | + |.if SQRT + |.ffunc_n math_sqrt + | fsqrt FARG1, FARG1 + | b ->fff_resn + |.else + | math_extern sqrt + |.endif + | + |.ffunc math_log + | cmplwi NARGS8:RC, 8 + | lwz CARG3, 0(BASE) + | lfd FARG1, 0(BASE) + | bne ->fff_fallback // Need exactly 1 argument. + | checknum CARG3; bge ->fff_fallback + | blex log + | b ->fff_resn + | + | math_extern log10 + | math_extern exp + | math_extern sin + | math_extern cos + | math_extern tan + | math_extern asin + | math_extern acos + | math_extern atan + | math_extern sinh + | math_extern cosh + | math_extern tanh + | math_extern2 pow + | math_extern2 atan2 + | math_extern2 fmod + | + |.if DUALNUM + |.ffunc math_ldexp + | cmplwi NARGS8:RC, 16 + | lwz CARG3, 0(BASE) + | lfd FARG1, 0(BASE) + | lwz CARG4, 8(BASE) + |.if GPR64 + | lwz CARG2, 12(BASE) + |.else + | lwz CARG1, 12(BASE) + |.endif + | blt ->fff_fallback + | checknum CARG3; bge ->fff_fallback + | checknum CARG4; bne ->fff_fallback + |.else + |.ffunc_nn math_ldexp + |.if GPR64 + | toint CARG2, FARG2 + |.else + | toint CARG1, FARG2 + |.endif + |.endif + | blex ldexp + | b ->fff_resn + | + |.ffunc_n math_frexp + |.if GPR64 + | la CARG2, DISPATCH_GL(tmptv)(DISPATCH) + |.else + | la CARG1, DISPATCH_GL(tmptv)(DISPATCH) + |.endif + | lwz PC, FRAME_PC(BASE) + | blex frexp + | lwz TMP1, DISPATCH_GL(tmptv)(DISPATCH) + | la RA, -8(BASE) + |.if not DUALNUM + | tonum_i FARG2, TMP1 + |.endif + | stfd FARG1, 0(RA) + | li RD, (2+1)*8 + |.if DUALNUM + | stw TISNUM, 8(RA) + | stw TMP1, 12(RA) + |.else + | stfd FARG2, 8(RA) + |.endif + | b ->fff_res + | + |.ffunc_n math_modf + |.if GPR64 + | la CARG2, -8(BASE) + |.else + | la CARG1, -8(BASE) + |.endif + | lwz PC, FRAME_PC(BASE) + | blex modf + | la RA, -8(BASE) + | stfd FARG1, 0(BASE) + | li RD, (2+1)*8 + | b ->fff_res + | + |.macro math_minmax, name, ismax + |.if DUALNUM + | .ffunc_1 name + | checknum CARG3 + | addi TMP1, BASE, 8 + | add TMP2, BASE, NARGS8:RC + | bne >4 + |1: // Handle integers. + | lwz CARG4, 0(TMP1) + | cmplw cr1, TMP1, TMP2 + | lwz CARG2, 4(TMP1) + | bge cr1, ->fff_resi + | checknum CARG4 + | xoris TMP0, CARG1, 0x8000 + | xoris TMP3, CARG2, 0x8000 + | bne >3 + | subfc TMP3, TMP3, TMP0 + | subfe TMP0, TMP0, TMP0 + |.if ismax + | andc TMP3, TMP3, TMP0 + |.else + | and TMP3, TMP3, TMP0 + |.endif + | add CARG1, TMP3, CARG2 + |.if GPR64 + | rldicl CARG1, CARG1, 0, 32 + |.endif + | addi TMP1, TMP1, 8 + | b <1 + |3: + | bge ->fff_fallback + | // Convert intermediate result to number and continue below. + | tonum_i FARG1, CARG1 + | lfd FARG2, 0(TMP1) + | b >6 + |4: + | lfd FARG1, 0(BASE) + | bge ->fff_fallback + |5: // Handle numbers. + | lwz CARG4, 0(TMP1) + | cmplw cr1, TMP1, TMP2 + | lfd FARG2, 0(TMP1) + | bge cr1, ->fff_resn + | checknum CARG4; bge >7 + |6: + | fsub f0, FARG1, FARG2 + | addi TMP1, TMP1, 8 + |.if ismax + | fsel FARG1, f0, FARG1, FARG2 + |.else + | fsel FARG1, f0, FARG2, FARG1 + |.endif + | b <5 + |7: // Convert integer to number and continue above. + | lwz CARG2, 4(TMP1) + | bne ->fff_fallback + | tonum_i FARG2, CARG2 + | b <6 + |.else + | .ffunc_n name + | li TMP1, 8 + |1: + | lwzx CARG2, BASE, TMP1 + | lfdx FARG2, BASE, TMP1 + | cmplw cr1, TMP1, NARGS8:RC + | checknum CARG2 + | bge cr1, ->fff_resn + | bge ->fff_fallback + | fsub f0, FARG1, FARG2 + | addi TMP1, TMP1, 8 + |.if ismax + | fsel FARG1, f0, FARG1, FARG2 + |.else + | fsel FARG1, f0, FARG2, FARG1 + |.endif + | b <1 + |.endif + |.endmacro + | + | math_minmax math_min, 0 + | math_minmax math_max, 1 + | + |//-- String library ----------------------------------------------------- + | + |.ffunc string_byte // Only handle the 1-arg case here. + | cmplwi NARGS8:RC, 8 + | lwz CARG3, 0(BASE) + | lwz STR:CARG1, 4(BASE) + | bne ->fff_fallback // Need exactly 1 argument. + | checkstr CARG3 + | bne ->fff_fallback + | lwz TMP0, STR:CARG1->len + |.if DUALNUM + | lbz CARG1, STR:CARG1[1] // Access is always ok (NUL at end). + | li RD, (0+1)*8 + | lwz PC, FRAME_PC(BASE) + | cmplwi TMP0, 0 + | la RA, -8(BASE) + | beqy ->fff_res + | b ->fff_resi + |.else + | lbz TMP1, STR:CARG1[1] // Access is always ok (NUL at end). + | addic TMP3, TMP0, -1 // RD = ((str->len != 0)+1)*8 + | subfe RD, TMP3, TMP0 + | stw TMP1, TONUM_LO // Inlined tonum_u f0, TMP1. + | addi RD, RD, 1 + | lfd f0, TONUM_D + | la RA, -8(BASE) + | lwz PC, FRAME_PC(BASE) + | fsub f0, f0, TOBIT + | slwi RD, RD, 3 + | stfd f0, 0(RA) + | b ->fff_res + |.endif + | + |.ffunc string_char // Only handle the 1-arg case here. + | ffgccheck + | cmplwi NARGS8:RC, 8 + | lwz CARG3, 0(BASE) + |.if DUALNUM + | lwz TMP0, 4(BASE) + | bne ->fff_fallback // Exactly 1 argument. + | checknum CARG3; bne ->fff_fallback + | la CARG2, 7(BASE) + |.else + | lfd FARG1, 0(BASE) + | bne ->fff_fallback // Exactly 1 argument. + | checknum CARG3; bge ->fff_fallback + | toint TMP0, FARG1 + | la CARG2, TMPD_BLO + |.endif + | li CARG3, 1 + | cmplwi TMP0, 255; bgt ->fff_fallback + |->fff_newstr: + | mr CARG1, L + | stp BASE, L->base + | stw PC, SAVE_PC + | bl extern lj_str_new // (lua_State *L, char *str, size_t l) + |->fff_resstr: + | // Returns GCstr *. + | lp BASE, L->base + | li CARG3, LJ_TSTR + | b ->fff_restv + | + |.ffunc string_sub + | ffgccheck + | cmplwi NARGS8:RC, 16 + | lwz CARG3, 16(BASE) + |.if not DUALNUM + | lfd f0, 16(BASE) + |.endif + | lwz TMP0, 0(BASE) + | lwz STR:CARG1, 4(BASE) + | blt ->fff_fallback + | lwz CARG2, 8(BASE) + |.if DUALNUM + | lwz TMP1, 12(BASE) + |.else + | lfd f1, 8(BASE) + |.endif + | li TMP2, -1 + | beq >1 + |.if DUALNUM + | checknum CARG3 + | lwz TMP2, 20(BASE) + | bne ->fff_fallback + |1: + | checknum CARG2; bne ->fff_fallback + |.else + | checknum CARG3; bge ->fff_fallback + | toint TMP2, f0 + |1: + | checknum CARG2; bge ->fff_fallback + |.endif + | checkstr TMP0; bne ->fff_fallback + |.if not DUALNUM + | toint TMP1, f1 + |.endif + | lwz TMP0, STR:CARG1->len + | cmplw TMP0, TMP2 // len < end? (unsigned compare) + | addi TMP3, TMP2, 1 + | blt >5 + |2: + | cmpwi TMP1, 0 // start <= 0? + | add TMP3, TMP1, TMP0 + | ble >7 + |3: + | sub CARG3, TMP2, TMP1 + | addi CARG2, STR:CARG1, #STR-1 + | srawi TMP0, CARG3, 31 + | addi CARG3, CARG3, 1 + | add CARG2, CARG2, TMP1 + | andc CARG3, CARG3, TMP0 + |.if GPR64 + | rldicl CARG2, CARG2, 0, 32 + | rldicl CARG3, CARG3, 0, 32 + |.endif + | b ->fff_newstr + | + |5: // Negative end or overflow. + | cmpw TMP0, TMP2 // len >= end? (signed compare) + | add TMP2, TMP0, TMP3 // Negative end: end = end+len+1. + | bge <2 + | mr TMP2, TMP0 // Overflow: end = len. + | b <2 + | + |7: // Negative start or underflow. + | .gpr64 extsw TMP1, TMP1 + | addic CARG3, TMP1, -1 + | subfe CARG3, CARG3, CARG3 + | srawi CARG2, TMP3, 31 // Note: modifies carry. + | andc TMP3, TMP3, CARG3 + | andc TMP1, TMP3, CARG2 + | addi TMP1, TMP1, 1 // start = 1 + (start ? start+len : 0) + | b <3 + | + |.macro ffstring_op, name + | .ffunc string_ .. name + | ffgccheck + | cmplwi NARGS8:RC, 8 + | lwz CARG3, 0(BASE) + | lwz STR:CARG2, 4(BASE) + | blt ->fff_fallback + | checkstr CARG3 + | la SBUF:CARG1, DISPATCH_GL(tmpbuf)(DISPATCH) + | bne ->fff_fallback + | lwz TMP0, SBUF:CARG1->b + | stw L, SBUF:CARG1->L + | stp BASE, L->base + | stw PC, SAVE_PC + | stw TMP0, SBUF:CARG1->p + | bl extern lj_buf_putstr_ .. name + | bl extern lj_buf_tostr + | b ->fff_resstr + |.endmacro + | + |ffstring_op reverse + |ffstring_op lower + |ffstring_op upper + | + |//-- Bit library -------------------------------------------------------- + | + |.macro .ffunc_bit, name + |.if DUALNUM + | .ffunc_1 bit_..name + | checknum CARG3; bnel ->fff_tobit_fb + |.else + | .ffunc_n bit_..name + | fadd FARG1, FARG1, TOBIT + | stfd FARG1, TMPD + | lwz CARG1, TMPD_LO + |.endif + |.endmacro + | + |.macro .ffunc_bit_op, name, ins + | .ffunc_bit name + | addi TMP1, BASE, 8 + | add TMP2, BASE, NARGS8:RC + |1: + | lwz CARG4, 0(TMP1) + | cmplw cr1, TMP1, TMP2 + |.if DUALNUM + | lwz CARG2, 4(TMP1) + |.else + | lfd FARG1, 0(TMP1) + |.endif + | bgey cr1, ->fff_resi + | checknum CARG4 + |.if DUALNUM + | bnel ->fff_bitop_fb + |.else + | fadd FARG1, FARG1, TOBIT + | bge ->fff_fallback + | stfd FARG1, TMPD + | lwz CARG2, TMPD_LO + |.endif + | ins CARG1, CARG1, CARG2 + | addi TMP1, TMP1, 8 + | b <1 + |.endmacro + | + |.ffunc_bit_op band, and + |.ffunc_bit_op bor, or + |.ffunc_bit_op bxor, xor + | + |.ffunc_bit bswap + | rotlwi TMP0, CARG1, 8 + | rlwimi TMP0, CARG1, 24, 0, 7 + | rlwimi TMP0, CARG1, 24, 16, 23 + | mr CRET1, TMP0 + | b ->fff_resi + | + |.ffunc_bit bnot + | not CRET1, CARG1 + | b ->fff_resi + | + |.macro .ffunc_bit_sh, name, ins, shmod + |.if DUALNUM + | .ffunc_2 bit_..name + | checknum CARG3; bnel ->fff_tobit_fb + | // Note: no inline conversion from number for 2nd argument! + | checknum CARG4; bne ->fff_fallback + |.else + | .ffunc_nn bit_..name + | fadd FARG1, FARG1, TOBIT + | fadd FARG2, FARG2, TOBIT + | stfd FARG1, TMPD + | lwz CARG1, TMPD_LO + | stfd FARG2, TMPD + | lwz CARG2, TMPD_LO + |.endif + |.if shmod == 1 + | rlwinm CARG2, CARG2, 0, 27, 31 + |.elif shmod == 2 + | neg CARG2, CARG2 + |.endif + | ins CRET1, CARG1, CARG2 + | b ->fff_resi + |.endmacro + | + |.ffunc_bit_sh lshift, slw, 1 + |.ffunc_bit_sh rshift, srw, 1 + |.ffunc_bit_sh arshift, sraw, 1 + |.ffunc_bit_sh rol, rotlw, 0 + |.ffunc_bit_sh ror, rotlw, 2 + | + |.ffunc_bit tobit + |.if DUALNUM + | b ->fff_resi + |.else + |->fff_resi: + | tonum_i FARG1, CRET1 + |.endif + |->fff_resn: + | lwz PC, FRAME_PC(BASE) + | la RA, -8(BASE) + | stfd FARG1, -8(BASE) + | b ->fff_res1 + | + |// Fallback FP number to bit conversion. + |->fff_tobit_fb: + |.if DUALNUM + | lfd FARG1, 0(BASE) + | bgt ->fff_fallback + | fadd FARG1, FARG1, TOBIT + | stfd FARG1, TMPD + | lwz CARG1, TMPD_LO + | blr + |.endif + |->fff_bitop_fb: + |.if DUALNUM + | lfd FARG1, 0(TMP1) + | bgt ->fff_fallback + | fadd FARG1, FARG1, TOBIT + | stfd FARG1, TMPD + | lwz CARG2, TMPD_LO + | blr + |.endif + | + |//----------------------------------------------------------------------- + | + |->fff_fallback: // Call fast function fallback handler. + | // BASE = new base, RB = CFUNC, RC = nargs*8 + | lp TMP3, CFUNC:RB->f + | add TMP1, BASE, NARGS8:RC + | lwz PC, FRAME_PC(BASE) // Fallback may overwrite PC. + | addi TMP0, TMP1, 8*LUA_MINSTACK + | lwz TMP2, L->maxstack + | stw PC, SAVE_PC // Redundant (but a defined value). + | .toc lp TMP3, 0(TMP3) + | cmplw TMP0, TMP2 + | stp BASE, L->base + | stp TMP1, L->top + | mr CARG1, L + | bgt >5 // Need to grow stack. + | mtctr TMP3 + | bctrl // (lua_State *L) + | // Either throws an error, or recovers and returns -1, 0 or nresults+1. + | lp BASE, L->base + | cmpwi CRET1, 0 + | slwi RD, CRET1, 3 + | la RA, -8(BASE) + | bgt ->fff_res // Returned nresults+1? + |1: // Returned 0 or -1: retry fast path. + | lp TMP0, L->top + | lwz LFUNC:RB, FRAME_FUNC(BASE) + | sub NARGS8:RC, TMP0, BASE + | bne ->vm_call_tail // Returned -1? + | ins_callt // Returned 0: retry fast path. + | + |// Reconstruct previous base for vmeta_call during tailcall. + |->vm_call_tail: + | andix. TMP0, PC, FRAME_TYPE + | rlwinm TMP1, PC, 0, 0, 28 + | bne >3 + | lwz INS, -4(PC) + | decode_RA8 TMP1, INS + | addi TMP1, TMP1, 8 + |3: + | sub TMP2, BASE, TMP1 + | b ->vm_call_dispatch // Resolve again for tailcall. + | + |5: // Grow stack for fallback handler. + | li CARG2, LUA_MINSTACK + | bl extern lj_state_growstack // (lua_State *L, int n) + | lp BASE, L->base + | cmpw TMP0, TMP0 // Set 4*cr0+eq to force retry. + | b <1 + | + |->fff_gcstep: // Call GC step function. + | // BASE = new base, RC = nargs*8 + | mflr SAVE0 + | stp BASE, L->base + | add TMP0, BASE, NARGS8:RC + | stw PC, SAVE_PC // Redundant (but a defined value). + | stp TMP0, L->top + | mr CARG1, L + | bl extern lj_gc_step // (lua_State *L) + | lp BASE, L->base + | mtlr SAVE0 + | lp TMP0, L->top + | sub NARGS8:RC, TMP0, BASE + | lwz CFUNC:RB, FRAME_FUNC(BASE) + | blr + | + |//----------------------------------------------------------------------- + |//-- Special dispatch targets ------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_record: // Dispatch target for recording phase. + |.if JIT + | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH) + | andix. TMP0, TMP3, HOOK_VMEVENT // No recording while in vmevent. + | bne >5 + | // Decrement the hookcount for consistency, but always do the call. + | lwz TMP2, DISPATCH_GL(hookcount)(DISPATCH) + | andix. TMP0, TMP3, HOOK_ACTIVE + | bne >1 + | subi TMP2, TMP2, 1 + | andi. TMP0, TMP3, LUA_MASKLINE|LUA_MASKCOUNT + | beqy >1 + | stw TMP2, DISPATCH_GL(hookcount)(DISPATCH) + | b >1 + |.endif + | + |->vm_rethook: // Dispatch target for return hooks. + | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH) + | andix. TMP0, TMP3, HOOK_ACTIVE // Hook already active? + | beq >1 + |5: // Re-dispatch to static ins. + | addi TMP1, TMP1, GG_DISP2STATIC // Assumes decode_OPP TMP1, INS. + | lpx TMP0, DISPATCH, TMP1 + | mtctr TMP0 + | bctr + | + |->vm_inshook: // Dispatch target for instr/line hooks. + | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH) + | lwz TMP2, DISPATCH_GL(hookcount)(DISPATCH) + | andix. TMP0, TMP3, HOOK_ACTIVE // Hook already active? + | rlwinm TMP0, TMP3, 31-LUA_HOOKLINE, 31, 0 + | bne <5 + | + | cmpwi cr1, TMP0, 0 + | addic. TMP2, TMP2, -1 + | beq cr1, <5 + | stw TMP2, DISPATCH_GL(hookcount)(DISPATCH) + | beq >1 + | bge cr1, <5 + |1: + | mr CARG1, L + | stw MULTRES, SAVE_MULTRES + | mr CARG2, PC + | stp BASE, L->base + | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC. + | bl extern lj_dispatch_ins // (lua_State *L, const BCIns *pc) + |3: + | lp BASE, L->base + |4: // Re-dispatch to static ins. + | lwz INS, -4(PC) + | decode_OPP TMP1, INS + | decode_RB8 RB, INS + | addi TMP1, TMP1, GG_DISP2STATIC + | decode_RD8 RD, INS + | lpx TMP0, DISPATCH, TMP1 + | decode_RA8 RA, INS + | decode_RC8 RC, INS + | mtctr TMP0 + | bctr + | + |->cont_hook: // Continue from hook yield. + | addi PC, PC, 4 + | lwz MULTRES, -20(RB) // Restore MULTRES for *M ins. + | b <4 + | + |->vm_hotloop: // Hot loop counter underflow. + |.if JIT + | lwz LFUNC:TMP1, FRAME_FUNC(BASE) + | addi CARG1, DISPATCH, GG_DISP2J + | stw PC, SAVE_PC + | lwz TMP1, LFUNC:TMP1->pc + | mr CARG2, PC + | stw L, DISPATCH_J(L)(DISPATCH) + | lbz TMP1, PC2PROTO(framesize)(TMP1) + | stp BASE, L->base + | slwi TMP1, TMP1, 3 + | add TMP1, BASE, TMP1 + | stp TMP1, L->top + | bl extern lj_trace_hot // (jit_State *J, const BCIns *pc) + | b <3 + |.endif + | + |->vm_callhook: // Dispatch target for call hooks. + | mr CARG2, PC + |.if JIT + | b >1 + |.endif + | + |->vm_hotcall: // Hot call counter underflow. + |.if JIT + | ori CARG2, PC, 1 + |1: + |.endif + | add TMP0, BASE, RC + | stw PC, SAVE_PC + | mr CARG1, L + | stp BASE, L->base + | sub RA, RA, BASE + | stp TMP0, L->top + | bl extern lj_dispatch_call // (lua_State *L, const BCIns *pc) + | // Returns ASMFunction. + | lp BASE, L->base + | lp TMP0, L->top + | stw ZERO, SAVE_PC // Invalidate for subsequent line hook. + | sub NARGS8:RC, TMP0, BASE + | add RA, BASE, RA + | lwz LFUNC:RB, FRAME_FUNC(BASE) + | lwz INS, -4(PC) + | mtctr CRET1 + | bctr + | + |->cont_stitch: // Trace stitching. + |.if JIT + | // RA = resultptr, RB = meta base + | lwz INS, -4(PC) + | lwz TMP3, -20(RB) // Save previous trace number. + | addic. TMP1, MULTRES, -8 + | decode_RA8 RC, INS // Call base. + | beq >2 + |1: // Move results down. + | lfd f0, 0(RA) + | addic. TMP1, TMP1, -8 + | addi RA, RA, 8 + | stfdx f0, BASE, RC + | addi RC, RC, 8 + | bne <1 + |2: + | decode_RA8 RA, INS + | decode_RB8 RB, INS + | add RA, RA, RB + | lwz TMP1, DISPATCH_J(trace)(DISPATCH) + |3: + | cmplw RA, RC + | bgt >9 // More results wanted? + | + | slwi TMP2, TMP3, 2 + | lwzx TRACE:TMP2, TMP1, TMP2 + | cmpwi TRACE:TMP2, 0 + | beq ->cont_nop + | lhz RD, TRACE:TMP2->link + | cmpw RD, TMP3 + | cmpwi cr1, RD, 0 + | beq ->cont_nop // Blacklisted. + | slwi RD, RD, 3 + | bne cr1, =>BC_JLOOP // Jump to stitched trace. + | + | // Stitch a new trace to the previous trace. + | stw TMP3, DISPATCH_J(exitno)(DISPATCH) + | stp L, DISPATCH_J(L)(DISPATCH) + | stp BASE, L->base + | addi CARG1, DISPATCH, GG_DISP2J + | mr CARG2, PC + | bl extern lj_dispatch_stitch // (jit_State *J, const BCIns *pc) + | lp BASE, L->base + | b ->cont_nop + | + |9: + | stwx TISNIL, BASE, RC + | addi RC, RC, 8 + | b <3 + |.endif + | + |->vm_profhook: // Dispatch target for profiler hook. +#if LJ_HASPROFILE + | mr CARG1, L + | stw MULTRES, SAVE_MULTRES + | mr CARG2, PC + | stp BASE, L->base + | bl extern lj_dispatch_profile // (lua_State *L, const BCIns *pc) + | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction. + | lp BASE, L->base + | subi PC, PC, 4 + | b ->cont_nop +#endif + | + |//----------------------------------------------------------------------- + |//-- Trace exit handler ------------------------------------------------- + |//----------------------------------------------------------------------- + | + |.macro savex_, a, b, c, d + | stfd f..a, 16+a*8(sp) + | stfd f..b, 16+b*8(sp) + | stfd f..c, 16+c*8(sp) + | stfd f..d, 16+d*8(sp) + |.endmacro + | + |->vm_exit_handler: + |.if JIT + | addi sp, sp, -(16+32*8+32*4) + | stmw r2, 16+32*8+2*4(sp) + | addi DISPATCH, JGL, -GG_DISP2G-32768 + | li CARG2, ~LJ_VMST_EXIT + | lwz CARG1, 16+32*8+32*4(sp) // Get stack chain. + | stw CARG2, DISPATCH_GL(vmstate)(DISPATCH) + | savex_ 0,1,2,3 + | stw CARG1, 0(sp) // Store extended stack chain. + | clrso TMP1 + | savex_ 4,5,6,7 + | addi CARG2, sp, 16+32*8+32*4 // Recompute original value of sp. + | savex_ 8,9,10,11 + | stw CARG2, 16+32*8+1*4(sp) // Store sp in RID_SP. + | savex_ 12,13,14,15 + | mflr CARG3 + | li TMP1, 0 + | savex_ 16,17,18,19 + | stw TMP1, 16+32*8+0*4(sp) // Clear RID_TMP. + | savex_ 20,21,22,23 + | lhz CARG4, 2(CARG3) // Load trace number. + | savex_ 24,25,26,27 + | lwz L, DISPATCH_GL(cur_L)(DISPATCH) + | savex_ 28,29,30,31 + | sub CARG3, TMP0, CARG3 // Compute exit number. + | lp BASE, DISPATCH_GL(jit_base)(DISPATCH) + | srwi CARG3, CARG3, 2 + | stp L, DISPATCH_J(L)(DISPATCH) + | subi CARG3, CARG3, 2 + | stp BASE, L->base + | stw CARG4, DISPATCH_J(parent)(DISPATCH) + | stw TMP1, DISPATCH_GL(jit_base)(DISPATCH) + | addi CARG1, DISPATCH, GG_DISP2J + | stw CARG3, DISPATCH_J(exitno)(DISPATCH) + | addi CARG2, sp, 16 + | bl extern lj_trace_exit // (jit_State *J, ExitState *ex) + | // Returns MULTRES (unscaled) or negated error code. + | lp TMP1, L->cframe + | lwz TMP2, 0(sp) + | lp BASE, L->base + |.if GPR64 + | rldicr sp, TMP1, 0, 61 + |.else + | rlwinm sp, TMP1, 0, 0, 29 + |.endif + | lwz PC, SAVE_PC // Get SAVE_PC. + | stw TMP2, 0(sp) + | stw L, SAVE_L // Set SAVE_L (on-trace resume/yield). + | b >1 + |.endif + |->vm_exit_interp: + |.if JIT + | // CARG1 = MULTRES or negated error code, BASE, PC and JGL set. + | lwz L, SAVE_L + | addi DISPATCH, JGL, -GG_DISP2G-32768 + | stp BASE, L->base + |1: + | cmpwi CARG1, 0 + | blt >9 // Check for error from exit. + | lwz LFUNC:RB, FRAME_FUNC(BASE) + | slwi MULTRES, CARG1, 3 + | li TMP2, 0 + | stw MULTRES, SAVE_MULTRES + | lwz TMP1, LFUNC:RB->pc + | stw TMP2, DISPATCH_GL(jit_base)(DISPATCH) + | lwz KBASE, PC2PROTO(k)(TMP1) + | // Setup type comparison constants. + | li TISNUM, LJ_TISNUM + | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). + | stw TMP3, TMPD + | li ZERO, 0 + | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float). + | lfs TOBIT, TMPD + | stw TMP3, TMPD + | lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double) + | li TISNIL, LJ_TNIL + | stw TMP0, TONUM_HI + | lfs TONUM, TMPD + | // Modified copy of ins_next which handles function header dispatch, too. + | lwz INS, 0(PC) + | addi PC, PC, 4 + | // Assumes TISNIL == ~LJ_VMST_INTERP == -1. + | stw TISNIL, DISPATCH_GL(vmstate)(DISPATCH) + | decode_OPP TMP1, INS + | decode_RA8 RA, INS + | lpx TMP0, DISPATCH, TMP1 + | mtctr TMP0 + | cmplwi TMP1, BC_FUNCF*4 // Function header? + | bge >2 + | decode_RB8 RB, INS + | decode_RD8 RD, INS + | decode_RC8 RC, INS + | bctr + |2: + | cmplwi TMP1, (BC_FUNCC+2)*4 // Fast function? + | blt >3 + | // Check frame below fast function. + | lwz TMP1, FRAME_PC(BASE) + | andix. TMP0, TMP1, FRAME_TYPE + | bney >3 // Trace stitching continuation? + | // Otherwise set KBASE for Lua function below fast function. + | lwz TMP2, -4(TMP1) + | decode_RA8 TMP0, TMP2 + | sub TMP1, BASE, TMP0 + | lwz LFUNC:TMP2, -12(TMP1) + | lwz TMP1, LFUNC:TMP2->pc + | lwz KBASE, PC2PROTO(k)(TMP1) + |3: + | subi RC, MULTRES, 8 + | add RA, RA, BASE + | bctr + | + |9: // Rethrow error from the right C frame. + | neg CARG2, CARG1 + | mr CARG1, L + | bl extern lj_err_throw // (lua_State *L, int errcode) + |.endif + | + |//----------------------------------------------------------------------- + |//-- Math helper functions ---------------------------------------------- + |//----------------------------------------------------------------------- + | + |// NYI: Use internal implementations of floor, ceil, trunc. + | + |->vm_modi: + | divwo. TMP0, CARG1, CARG2 + | bso >1 + |.if GPR64 + | xor CARG3, CARG1, CARG2 + | cmpwi CARG3, 0 + |.else + | xor. CARG3, CARG1, CARG2 + |.endif + | mullw TMP0, TMP0, CARG2 + | sub CARG1, CARG1, TMP0 + | bgelr + | cmpwi CARG1, 0; beqlr + | add CARG1, CARG1, CARG2 + | blr + |1: + | cmpwi CARG2, 0 + | li CARG1, 0 + | beqlr + | clrso TMP0 // Clear SO for -2147483648 % -1 and return 0. + | blr + | + |//----------------------------------------------------------------------- + |//-- Miscellaneous functions -------------------------------------------- + |//----------------------------------------------------------------------- + | + |// void lj_vm_cachesync(void *start, void *end) + |// Flush D-Cache and invalidate I-Cache. Assumes 32 byte cache line size. + |// This is a good lower bound, except for very ancient PPC models. + |->vm_cachesync: + |.if JIT or FFI + | // Compute start of first cache line and number of cache lines. + | rlwinm CARG1, CARG1, 0, 0, 26 + | sub CARG2, CARG2, CARG1 + | addi CARG2, CARG2, 31 + | rlwinm. CARG2, CARG2, 27, 5, 31 + | beqlr + | mtctr CARG2 + | mr CARG3, CARG1 + |1: // Flush D-Cache. + | dcbst r0, CARG1 + | addi CARG1, CARG1, 32 + | bdnz <1 + | sync + | mtctr CARG2 + |1: // Invalidate I-Cache. + | icbi r0, CARG3 + | addi CARG3, CARG3, 32 + | bdnz <1 + | isync + | blr + |.endif + | + |//----------------------------------------------------------------------- + |//-- FFI helper functions ----------------------------------------------- + |//----------------------------------------------------------------------- + | + |// Handler for callback functions. Callback slot number in r11, g in r12. + |->vm_ffi_callback: + |.if FFI + |.type CTSTATE, CTState, PC + | saveregs + | lwz CTSTATE, GL:r12->ctype_state + | addi DISPATCH, r12, GG_G2DISP + | stw r11, CTSTATE->cb.slot + | stw r3, CTSTATE->cb.gpr[0] + | stfd f1, CTSTATE->cb.fpr[0] + | stw r4, CTSTATE->cb.gpr[1] + | stfd f2, CTSTATE->cb.fpr[1] + | stw r5, CTSTATE->cb.gpr[2] + | stfd f3, CTSTATE->cb.fpr[2] + | stw r6, CTSTATE->cb.gpr[3] + | stfd f4, CTSTATE->cb.fpr[3] + | stw r7, CTSTATE->cb.gpr[4] + | stfd f5, CTSTATE->cb.fpr[4] + | stw r8, CTSTATE->cb.gpr[5] + | stfd f6, CTSTATE->cb.fpr[5] + | stw r9, CTSTATE->cb.gpr[6] + | stfd f7, CTSTATE->cb.fpr[6] + | stw r10, CTSTATE->cb.gpr[7] + | stfd f8, CTSTATE->cb.fpr[7] + | addi TMP0, sp, CFRAME_SPACE+8 + | stw TMP0, CTSTATE->cb.stack + | mr CARG1, CTSTATE + | stw CTSTATE, SAVE_PC // Any value outside of bytecode is ok. + | mr CARG2, sp + | bl extern lj_ccallback_enter // (CTState *cts, void *cf) + | // Returns lua_State *. + | lp BASE, L:CRET1->base + | li TISNUM, LJ_TISNUM // Setup type comparison constants. + | lp RC, L:CRET1->top + | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). + | li ZERO, 0 + | mr L, CRET1 + | stw TMP3, TMPD + | lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double) + | lwz LFUNC:RB, FRAME_FUNC(BASE) + | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float). + | stw TMP0, TONUM_HI + | li TISNIL, LJ_TNIL + | li_vmstate INTERP + | lfs TOBIT, TMPD + | stw TMP3, TMPD + | sub RC, RC, BASE + | st_vmstate + | lfs TONUM, TMPD + | ins_callt + |.endif + | + |->cont_ffi_callback: // Return from FFI callback. + |.if FFI + | lwz CTSTATE, DISPATCH_GL(ctype_state)(DISPATCH) + | stp BASE, L->base + | stp RB, L->top + | stp L, CTSTATE->L + | mr CARG1, CTSTATE + | mr CARG2, RA + | bl extern lj_ccallback_leave // (CTState *cts, TValue *o) + | lwz CRET1, CTSTATE->cb.gpr[0] + | lfd FARG1, CTSTATE->cb.fpr[0] + | lwz CRET2, CTSTATE->cb.gpr[1] + | b ->vm_leave_unw + |.endif + | + |->vm_ffi_call: // Call C function via FFI. + | // Caveat: needs special frame unwinding, see below. + |.if FFI + | .type CCSTATE, CCallState, CARG1 + | lwz TMP1, CCSTATE->spadj + | mflr TMP0 + | lbz CARG2, CCSTATE->nsp + | lbz CARG3, CCSTATE->nfpr + | neg TMP1, TMP1 + | stw TMP0, 4(sp) + | cmpwi cr1, CARG3, 0 + | mr TMP2, sp + | addic. CARG2, CARG2, -1 + | stwux sp, sp, TMP1 + | crnot 4*cr1+eq, 4*cr1+eq // For vararg calls. + | stw r14, -4(TMP2) + | stw CCSTATE, -8(TMP2) + | mr r14, TMP2 + | la TMP1, CCSTATE->stack + | slwi CARG2, CARG2, 2 + | blty >2 + | la TMP2, 8(sp) + |1: + | lwzx TMP0, TMP1, CARG2 + | stwx TMP0, TMP2, CARG2 + | addic. CARG2, CARG2, -4 + | bge <1 + |2: + | bney cr1, >3 + | lfd f1, CCSTATE->fpr[0] + | lfd f2, CCSTATE->fpr[1] + | lfd f3, CCSTATE->fpr[2] + | lfd f4, CCSTATE->fpr[3] + | lfd f5, CCSTATE->fpr[4] + | lfd f6, CCSTATE->fpr[5] + | lfd f7, CCSTATE->fpr[6] + | lfd f8, CCSTATE->fpr[7] + |3: + | lp TMP0, CCSTATE->func + | lwz CARG2, CCSTATE->gpr[1] + | lwz CARG3, CCSTATE->gpr[2] + | lwz CARG4, CCSTATE->gpr[3] + | lwz CARG5, CCSTATE->gpr[4] + | mtctr TMP0 + | lwz r8, CCSTATE->gpr[5] + | lwz r9, CCSTATE->gpr[6] + | lwz r10, CCSTATE->gpr[7] + | lwz CARG1, CCSTATE->gpr[0] // Do this last, since CCSTATE is CARG1. + | bctrl + | lwz CCSTATE:TMP1, -8(r14) + | lwz TMP2, -4(r14) + | lwz TMP0, 4(r14) + | stw CARG1, CCSTATE:TMP1->gpr[0] + | stfd FARG1, CCSTATE:TMP1->fpr[0] + | stw CARG2, CCSTATE:TMP1->gpr[1] + | mtlr TMP0 + | stw CARG3, CCSTATE:TMP1->gpr[2] + | mr sp, r14 + | stw CARG4, CCSTATE:TMP1->gpr[3] + | mr r14, TMP2 + | blr + |.endif + |// Note: vm_ffi_call must be the last function in this object file! + | + |//----------------------------------------------------------------------- +} + +/* Generate the code for a single instruction. */ +static void build_ins(BuildCtx *ctx, BCOp op, int defop) +{ + int vk = 0; + |=>defop: + + switch (op) { + + /* -- Comparison ops ---------------------------------------------------- */ + + /* Remember: all ops branch for a true comparison, fall through otherwise. */ + + case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: + | // RA = src1*8, RD = src2*8, JMP with RD = target + |.if DUALNUM + | lwzux TMP0, RA, BASE + | addi PC, PC, 4 + | lwz CARG2, 4(RA) + | lwzux TMP1, RD, BASE + | lwz TMP2, -4(PC) + | checknum cr0, TMP0 + | lwz CARG3, 4(RD) + | decode_RD4 TMP2, TMP2 + | checknum cr1, TMP1 + | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) + | bne cr0, >7 + | bne cr1, >8 + | cmpw CARG2, CARG3 + if (op == BC_ISLT) { + | bge >2 + } else if (op == BC_ISGE) { + | blt >2 + } else if (op == BC_ISLE) { + | bgt >2 + } else { + | ble >2 + } + |1: + | add PC, PC, TMP2 + |2: + | ins_next + | + |7: // RA is not an integer. + | bgt cr0, ->vmeta_comp + | // RA is a number. + | lfd f0, 0(RA) + | bgt cr1, ->vmeta_comp + | blt cr1, >4 + | // RA is a number, RD is an integer. + | tonum_i f1, CARG3 + | b >5 + | + |8: // RA is an integer, RD is not an integer. + | bgt cr1, ->vmeta_comp + | // RA is an integer, RD is a number. + | tonum_i f0, CARG2 + |4: + | lfd f1, 0(RD) + |5: + | fcmpu cr0, f0, f1 + if (op == BC_ISLT) { + | bge <2 + } else if (op == BC_ISGE) { + | blt <2 + } else if (op == BC_ISLE) { + | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq + | bge <2 + } else { + | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq + | blt <2 + } + | b <1 + |.else + | lwzx TMP0, BASE, RA + | addi PC, PC, 4 + | lfdx f0, BASE, RA + | lwzx TMP1, BASE, RD + | checknum cr0, TMP0 + | lwz TMP2, -4(PC) + | lfdx f1, BASE, RD + | checknum cr1, TMP1 + | decode_RD4 TMP2, TMP2 + | bge cr0, ->vmeta_comp + | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) + | bge cr1, ->vmeta_comp + | fcmpu cr0, f0, f1 + if (op == BC_ISLT) { + | bge >1 + } else if (op == BC_ISGE) { + | blt >1 + } else if (op == BC_ISLE) { + | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq + | bge >1 + } else { + | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq + | blt >1 + } + | add PC, PC, TMP2 + |1: + | ins_next + |.endif + break; + + case BC_ISEQV: case BC_ISNEV: + vk = op == BC_ISEQV; + | // RA = src1*8, RD = src2*8, JMP with RD = target + |.if DUALNUM + | lwzux TMP0, RA, BASE + | addi PC, PC, 4 + | lwz CARG2, 4(RA) + | lwzux TMP1, RD, BASE + | checknum cr0, TMP0 + | lwz TMP2, -4(PC) + | checknum cr1, TMP1 + | decode_RD4 TMP2, TMP2 + | lwz CARG3, 4(RD) + | cror 4*cr7+gt, 4*cr0+gt, 4*cr1+gt + | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) + if (vk) { + | ble cr7, ->BC_ISEQN_Z + } else { + | ble cr7, ->BC_ISNEN_Z + } + |.else + | lwzux TMP0, RA, BASE + | lwz TMP2, 0(PC) + | lfd f0, 0(RA) + | addi PC, PC, 4 + | lwzux TMP1, RD, BASE + | checknum cr0, TMP0 + | decode_RD4 TMP2, TMP2 + | lfd f1, 0(RD) + | checknum cr1, TMP1 + | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) + | bge cr0, >5 + | bge cr1, >5 + | fcmpu cr0, f0, f1 + if (vk) { + | bne >1 + | add PC, PC, TMP2 + } else { + | beq >1 + | add PC, PC, TMP2 + } + |1: + | ins_next + |.endif + |5: // Either or both types are not numbers. + |.if not DUALNUM + | lwz CARG2, 4(RA) + | lwz CARG3, 4(RD) + |.endif + |.if FFI + | cmpwi cr7, TMP0, LJ_TCDATA + | cmpwi cr5, TMP1, LJ_TCDATA + |.endif + | not TMP3, TMP0 + | cmplw TMP0, TMP1 + | cmplwi cr1, TMP3, ~LJ_TISPRI // Primitive? + |.if FFI + | cror 4*cr7+eq, 4*cr7+eq, 4*cr5+eq + |.endif + | cmplwi cr6, TMP3, ~LJ_TISTABUD // Table or userdata? + |.if FFI + | beq cr7, ->vmeta_equal_cd + |.endif + | cmplw cr5, CARG2, CARG3 + | crandc 4*cr0+gt, 4*cr0+eq, 4*cr1+gt // 2: Same type and primitive. + | crorc 4*cr0+lt, 4*cr5+eq, 4*cr0+eq // 1: Same tv or different type. + | crand 4*cr0+eq, 4*cr0+eq, 4*cr5+eq // 0: Same type and same tv. + | mr SAVE0, PC + | cror 4*cr0+eq, 4*cr0+eq, 4*cr0+gt // 0 or 2. + | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+gt // 1 or 2. + if (vk) { + | bne cr0, >6 + | add PC, PC, TMP2 + |6: + } else { + | beq cr0, >6 + | add PC, PC, TMP2 + |6: + } + |.if DUALNUM + | bge cr0, >2 // Done if 1 or 2. + |1: + | ins_next + |2: + |.else + | blt cr0, <1 // Done if 1 or 2. + |.endif + | blt cr6, <1 // Done if not tab/ud. + | + | // Different tables or userdatas. Need to check __eq metamethod. + | // Field metatable must be at same offset for GCtab and GCudata! + | lwz TAB:TMP2, TAB:CARG2->metatable + | li CARG4, 1-vk // ne = 0 or 1. + | cmplwi TAB:TMP2, 0 + | beq <1 // No metatable? + | lbz TMP2, TAB:TMP2->nomm + | andix. TMP2, TMP2, 1<vmeta_equal // Handle __eq metamethod. + break; + + case BC_ISEQS: case BC_ISNES: + vk = op == BC_ISEQS; + | // RA = src*8, RD = str_const*8 (~), JMP with RD = target + | lwzux TMP0, RA, BASE + | srwi RD, RD, 1 + | lwz STR:TMP3, 4(RA) + | lwz TMP2, 0(PC) + | subfic RD, RD, -4 + | addi PC, PC, 4 + |.if FFI + | cmpwi TMP0, LJ_TCDATA + |.endif + | lwzx STR:TMP1, KBASE, RD // KBASE-4-str_const*4 + | .gpr64 extsw TMP0, TMP0 + | subfic TMP0, TMP0, LJ_TSTR + |.if FFI + | beq ->vmeta_equal_cd + |.endif + | sub TMP1, STR:TMP1, STR:TMP3 + | or TMP0, TMP0, TMP1 + | decode_RD4 TMP2, TMP2 + | subfic TMP0, TMP0, 0 + | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) + | subfe TMP1, TMP1, TMP1 + if (vk) { + | andc TMP2, TMP2, TMP1 + } else { + | and TMP2, TMP2, TMP1 + } + | add PC, PC, TMP2 + | ins_next + break; + + case BC_ISEQN: case BC_ISNEN: + vk = op == BC_ISEQN; + | // RA = src*8, RD = num_const*8, JMP with RD = target + |.if DUALNUM + | lwzux TMP0, RA, BASE + | addi PC, PC, 4 + | lwz CARG2, 4(RA) + | lwzux TMP1, RD, KBASE + | checknum cr0, TMP0 + | lwz TMP2, -4(PC) + | checknum cr1, TMP1 + | decode_RD4 TMP2, TMP2 + | lwz CARG3, 4(RD) + | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) + if (vk) { + |->BC_ISEQN_Z: + } else { + |->BC_ISNEN_Z: + } + | bne cr0, >7 + | bne cr1, >8 + | cmpw CARG2, CARG3 + |4: + |.else + if (vk) { + |->BC_ISEQN_Z: // Dummy label. + } else { + |->BC_ISNEN_Z: // Dummy label. + } + | lwzx TMP0, BASE, RA + | addi PC, PC, 4 + | lfdx f0, BASE, RA + | lwz TMP2, -4(PC) + | lfdx f1, KBASE, RD + | decode_RD4 TMP2, TMP2 + | checknum TMP0 + | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) + | bge >3 + | fcmpu cr0, f0, f1 + |.endif + if (vk) { + | bne >1 + | add PC, PC, TMP2 + |1: + |.if not FFI + |3: + |.endif + } else { + | beq >2 + |1: + |.if not FFI + |3: + |.endif + | add PC, PC, TMP2 + |2: + } + | ins_next + |.if FFI + |3: + | cmpwi TMP0, LJ_TCDATA + | beq ->vmeta_equal_cd + | b <1 + |.endif + |.if DUALNUM + |7: // RA is not an integer. + | bge cr0, <3 + | // RA is a number. + | lfd f0, 0(RA) + | blt cr1, >1 + | // RA is a number, RD is an integer. + | tonum_i f1, CARG3 + | b >2 + | + |8: // RA is an integer, RD is a number. + | tonum_i f0, CARG2 + |1: + | lfd f1, 0(RD) + |2: + | fcmpu cr0, f0, f1 + | b <4 + |.endif + break; + + case BC_ISEQP: case BC_ISNEP: + vk = op == BC_ISEQP; + | // RA = src*8, RD = primitive_type*8 (~), JMP with RD = target + | lwzx TMP0, BASE, RA + | srwi TMP1, RD, 3 + | lwz TMP2, 0(PC) + | not TMP1, TMP1 + | addi PC, PC, 4 + |.if FFI + | cmpwi TMP0, LJ_TCDATA + |.endif + | sub TMP0, TMP0, TMP1 + |.if FFI + | beq ->vmeta_equal_cd + |.endif + | decode_RD4 TMP2, TMP2 + | .gpr64 extsw TMP0, TMP0 + | addic TMP0, TMP0, -1 + | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) + | subfe TMP1, TMP1, TMP1 + if (vk) { + | and TMP2, TMP2, TMP1 + } else { + | andc TMP2, TMP2, TMP1 + } + | add PC, PC, TMP2 + | ins_next + break; + + /* -- Unary test and copy ops ------------------------------------------- */ + + case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF: + | // RA = dst*8 or unused, RD = src*8, JMP with RD = target + | lwzx TMP0, BASE, RD + | lwz INS, 0(PC) + | addi PC, PC, 4 + if (op == BC_IST || op == BC_ISF) { + | .gpr64 extsw TMP0, TMP0 + | subfic TMP0, TMP0, LJ_TTRUE + | decode_RD4 TMP2, INS + | subfe TMP1, TMP1, TMP1 + | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) + if (op == BC_IST) { + | andc TMP2, TMP2, TMP1 + } else { + | and TMP2, TMP2, TMP1 + } + | add PC, PC, TMP2 + } else { + | li TMP1, LJ_TFALSE + | lfdx f0, BASE, RD + | cmplw TMP0, TMP1 + if (op == BC_ISTC) { + | bge >1 + } else { + | blt >1 + } + | addis PC, PC, -(BCBIAS_J*4 >> 16) + | decode_RD4 TMP2, INS + | stfdx f0, BASE, RA + | add PC, PC, TMP2 + |1: + } + | ins_next + break; + + case BC_ISTYPE: + | // RA = src*8, RD = -type*8 + | lwzx TMP0, BASE, RA + | srwi TMP1, RD, 3 + | ins_next1 + |.if not PPE and not GPR64 + | add. TMP0, TMP0, TMP1 + |.else + | neg TMP1, TMP1 + | cmpw TMP0, TMP1 + |.endif + | bne ->vmeta_istype + | ins_next2 + break; + case BC_ISNUM: + | // RA = src*8, RD = -(TISNUM-1)*8 + | lwzx TMP0, BASE, RA + | ins_next1 + | checknum TMP0 + | bge ->vmeta_istype + | ins_next2 + break; + + /* -- Unary ops --------------------------------------------------------- */ + + case BC_MOV: + | // RA = dst*8, RD = src*8 + | ins_next1 + | lfdx f0, BASE, RD + | stfdx f0, BASE, RA + | ins_next2 + break; + case BC_NOT: + | // RA = dst*8, RD = src*8 + | ins_next1 + | lwzx TMP0, BASE, RD + | .gpr64 extsw TMP0, TMP0 + | subfic TMP1, TMP0, LJ_TTRUE + | adde TMP0, TMP0, TMP1 + | stwx TMP0, BASE, RA + | ins_next2 + break; + case BC_UNM: + | // RA = dst*8, RD = src*8 + | lwzux TMP1, RD, BASE + | lwz TMP0, 4(RD) + | checknum TMP1 + |.if DUALNUM + | bne >5 + |.if GPR64 + | lus TMP2, 0x8000 + | neg TMP0, TMP0 + | cmplw TMP0, TMP2 + | beq >4 + |.else + | nego. TMP0, TMP0 + | bso >4 + |1: + |.endif + | ins_next1 + | stwux TISNUM, RA, BASE + | stw TMP0, 4(RA) + |3: + | ins_next2 + |4: + |.if not GPR64 + | // Potential overflow. + | checkov TMP1, <1 // Ignore unrelated overflow. + |.endif + | lus TMP1, 0x41e0 // 2^31. + | li TMP0, 0 + | b >7 + |.endif + |5: + | bge ->vmeta_unm + | xoris TMP1, TMP1, 0x8000 + |7: + | ins_next1 + | stwux TMP1, RA, BASE + | stw TMP0, 4(RA) + |.if DUALNUM + | b <3 + |.else + | ins_next2 + |.endif + break; + case BC_LEN: + | // RA = dst*8, RD = src*8 + | lwzux TMP0, RD, BASE + | lwz CARG1, 4(RD) + | checkstr TMP0; bne >2 + | lwz CRET1, STR:CARG1->len + |1: + |.if DUALNUM + | ins_next1 + | stwux TISNUM, RA, BASE + | stw CRET1, 4(RA) + |.else + | tonum_u f0, CRET1 // Result is a non-negative integer. + | ins_next1 + | stfdx f0, BASE, RA + |.endif + | ins_next2 + |2: + | checktab TMP0; bne ->vmeta_len +#if LJ_52 + | lwz TAB:TMP2, TAB:CARG1->metatable + | cmplwi TAB:TMP2, 0 + | bne >9 + |3: +#endif + |->BC_LEN_Z: + | bl extern lj_tab_len // (GCtab *t) + | // Returns uint32_t (but less than 2^31). + | b <1 +#if LJ_52 + |9: + | lbz TMP0, TAB:TMP2->nomm + | andix. TMP0, TMP0, 1<vmeta_len +#endif + break; + + /* -- Binary ops -------------------------------------------------------- */ + + |.macro ins_arithpre + | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8 + ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + ||switch (vk) { + ||case 0: + | lwzx TMP1, BASE, RB + | .if DUALNUM + | lwzx TMP2, KBASE, RC + | .endif + | lfdx f14, BASE, RB + | lfdx f15, KBASE, RC + | .if DUALNUM + | checknum cr0, TMP1 + | checknum cr1, TMP2 + | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt + | bge ->vmeta_arith_vn + | .else + | checknum TMP1; bge ->vmeta_arith_vn + | .endif + || break; + ||case 1: + | lwzx TMP1, BASE, RB + | .if DUALNUM + | lwzx TMP2, KBASE, RC + | .endif + | lfdx f15, BASE, RB + | lfdx f14, KBASE, RC + | .if DUALNUM + | checknum cr0, TMP1 + | checknum cr1, TMP2 + | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt + | bge ->vmeta_arith_nv + | .else + | checknum TMP1; bge ->vmeta_arith_nv + | .endif + || break; + ||default: + | lwzx TMP1, BASE, RB + | lwzx TMP2, BASE, RC + | lfdx f14, BASE, RB + | lfdx f15, BASE, RC + | checknum cr0, TMP1 + | checknum cr1, TMP2 + | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt + | bge ->vmeta_arith_vv + || break; + ||} + |.endmacro + | + |.macro ins_arithfallback, ins + ||switch (vk) { + ||case 0: + | ins ->vmeta_arith_vn2 + || break; + ||case 1: + | ins ->vmeta_arith_nv2 + || break; + ||default: + | ins ->vmeta_arith_vv2 + || break; + ||} + |.endmacro + | + |.macro intmod, a, b, c + | bl ->vm_modi + |.endmacro + | + |.macro fpmod, a, b, c + |->BC_MODVN_Z: + | fdiv FARG1, b, c + | // NYI: Use internal implementation of floor. + | blex floor // floor(b/c) + | fmul a, FARG1, c + | fsub a, b, a // b - floor(b/c)*c + |.endmacro + | + |.macro ins_arithfp, fpins + | ins_arithpre + |.if "fpins" == "fpmod_" + | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway. + |.else + | fpins f0, f14, f15 + | ins_next1 + | stfdx f0, BASE, RA + | ins_next2 + |.endif + |.endmacro + | + |.macro ins_arithdn, intins, fpins + | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8 + ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + ||switch (vk) { + ||case 0: + | lwzux TMP1, RB, BASE + | lwzux TMP2, RC, KBASE + | lwz CARG1, 4(RB) + | checknum cr0, TMP1 + | lwz CARG2, 4(RC) + || break; + ||case 1: + | lwzux TMP1, RB, BASE + | lwzux TMP2, RC, KBASE + | lwz CARG2, 4(RB) + | checknum cr0, TMP1 + | lwz CARG1, 4(RC) + || break; + ||default: + | lwzux TMP1, RB, BASE + | lwzux TMP2, RC, BASE + | lwz CARG1, 4(RB) + | checknum cr0, TMP1 + | lwz CARG2, 4(RC) + || break; + ||} + | checknum cr1, TMP2 + | bne >5 + | bne cr1, >5 + | intins CARG1, CARG1, CARG2 + | bso >4 + |1: + | ins_next1 + | stwux TISNUM, RA, BASE + | stw CARG1, 4(RA) + |2: + | ins_next2 + |4: // Overflow. + | checkov TMP0, <1 // Ignore unrelated overflow. + | ins_arithfallback b + |5: // FP variant. + ||if (vk == 1) { + | lfd f15, 0(RB) + | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt + | lfd f14, 0(RC) + ||} else { + | lfd f14, 0(RB) + | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt + | lfd f15, 0(RC) + ||} + | ins_arithfallback bge + |.if "fpins" == "fpmod_" + | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway. + |.else + | fpins f0, f14, f15 + | ins_next1 + | stfdx f0, BASE, RA + | b <2 + |.endif + |.endmacro + | + |.macro ins_arith, intins, fpins + |.if DUALNUM + | ins_arithdn intins, fpins + |.else + | ins_arithfp fpins + |.endif + |.endmacro + + case BC_ADDVN: case BC_ADDNV: case BC_ADDVV: + |.if GPR64 + |.macro addo32., y, a, b + | // Need to check overflow for (a<<32) + (b<<32). + | rldicr TMP0, a, 32, 31 + | rldicr TMP3, b, 32, 31 + | addo. TMP0, TMP0, TMP3 + | add y, a, b + |.endmacro + | ins_arith addo32., fadd + |.else + | ins_arith addo., fadd + |.endif + break; + case BC_SUBVN: case BC_SUBNV: case BC_SUBVV: + |.if GPR64 + |.macro subo32., y, a, b + | // Need to check overflow for (a<<32) - (b<<32). + | rldicr TMP0, a, 32, 31 + | rldicr TMP3, b, 32, 31 + | subo. TMP0, TMP0, TMP3 + | sub y, a, b + |.endmacro + | ins_arith subo32., fsub + |.else + | ins_arith subo., fsub + |.endif + break; + case BC_MULVN: case BC_MULNV: case BC_MULVV: + | ins_arith mullwo., fmul + break; + case BC_DIVVN: case BC_DIVNV: case BC_DIVVV: + | ins_arithfp fdiv + break; + case BC_MODVN: + | ins_arith intmod, fpmod + break; + case BC_MODNV: case BC_MODVV: + | ins_arith intmod, fpmod_ + break; + case BC_POW: + | // NYI: (partial) integer arithmetic. + | lwzx TMP1, BASE, RB + | lfdx FARG1, BASE, RB + | lwzx TMP2, BASE, RC + | lfdx FARG2, BASE, RC + | checknum cr0, TMP1 + | checknum cr1, TMP2 + | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt + | bge ->vmeta_arith_vv + | blex pow + | ins_next1 + | stfdx FARG1, BASE, RA + | ins_next2 + break; + + case BC_CAT: + | // RA = dst*8, RB = src_start*8, RC = src_end*8 + | sub CARG3, RC, RB + | stp BASE, L->base + | add CARG2, BASE, RC + | mr SAVE0, RB + |->BC_CAT_Z: + | stw PC, SAVE_PC + | mr CARG1, L + | srwi CARG3, CARG3, 3 + | bl extern lj_meta_cat // (lua_State *L, TValue *top, int left) + | // Returns NULL (finished) or TValue * (metamethod). + | cmplwi CRET1, 0 + | lp BASE, L->base + | bne ->vmeta_binop + | ins_next1 + | lfdx f0, BASE, SAVE0 // Copy result from RB to RA. + | stfdx f0, BASE, RA + | ins_next2 + break; + + /* -- Constant ops ------------------------------------------------------ */ + + case BC_KSTR: + | // RA = dst*8, RD = str_const*8 (~) + | srwi TMP1, RD, 1 + | subfic TMP1, TMP1, -4 + | ins_next1 + | lwzx TMP0, KBASE, TMP1 // KBASE-4-str_const*4 + | li TMP2, LJ_TSTR + | stwux TMP2, RA, BASE + | stw TMP0, 4(RA) + | ins_next2 + break; + case BC_KCDATA: + |.if FFI + | // RA = dst*8, RD = cdata_const*8 (~) + | srwi TMP1, RD, 1 + | subfic TMP1, TMP1, -4 + | ins_next1 + | lwzx TMP0, KBASE, TMP1 // KBASE-4-cdata_const*4 + | li TMP2, LJ_TCDATA + | stwux TMP2, RA, BASE + | stw TMP0, 4(RA) + | ins_next2 + |.endif + break; + case BC_KSHORT: + | // RA = dst*8, RD = int16_literal*8 + |.if DUALNUM + | slwi RD, RD, 13 + | srawi RD, RD, 16 + | ins_next1 + | stwux TISNUM, RA, BASE + | stw RD, 4(RA) + | ins_next2 + |.else + | // The soft-float approach is faster. + | slwi RD, RD, 13 + | srawi TMP1, RD, 31 + | xor TMP2, TMP1, RD + | sub TMP2, TMP2, TMP1 // TMP2 = abs(x) + | cntlzw TMP3, TMP2 + | subfic TMP1, TMP3, 0x40d // TMP1 = exponent-1 + | slw TMP2, TMP2, TMP3 // TMP2 = left aligned mantissa + | subfic TMP3, RD, 0 + | slwi TMP1, TMP1, 20 + | rlwimi RD, TMP2, 21, 1, 31 // hi = sign(x) | (mantissa>>11) + | subfe TMP0, TMP0, TMP0 + | add RD, RD, TMP1 // hi = hi + exponent-1 + | and RD, RD, TMP0 // hi = x == 0 ? 0 : hi + | ins_next1 + | stwux RD, RA, BASE + | stw ZERO, 4(RA) + | ins_next2 + |.endif + break; + case BC_KNUM: + | // RA = dst*8, RD = num_const*8 + | ins_next1 + | lfdx f0, KBASE, RD + | stfdx f0, BASE, RA + | ins_next2 + break; + case BC_KPRI: + | // RA = dst*8, RD = primitive_type*8 (~) + | srwi TMP1, RD, 3 + | not TMP0, TMP1 + | ins_next1 + | stwx TMP0, BASE, RA + | ins_next2 + break; + case BC_KNIL: + | // RA = base*8, RD = end*8 + | stwx TISNIL, BASE, RA + | addi RA, RA, 8 + |1: + | stwx TISNIL, BASE, RA + | cmpw RA, RD + | addi RA, RA, 8 + | blt <1 + | ins_next_ + break; + + /* -- Upvalue and function ops ------------------------------------------ */ + + case BC_UGET: + | // RA = dst*8, RD = uvnum*8 + | lwz LFUNC:RB, FRAME_FUNC(BASE) + | srwi RD, RD, 1 + | addi RD, RD, offsetof(GCfuncL, uvptr) + | lwzx UPVAL:RB, LFUNC:RB, RD + | ins_next1 + | lwz TMP1, UPVAL:RB->v + | lfd f0, 0(TMP1) + | stfdx f0, BASE, RA + | ins_next2 + break; + case BC_USETV: + | // RA = uvnum*8, RD = src*8 + | lwz LFUNC:RB, FRAME_FUNC(BASE) + | srwi RA, RA, 1 + | addi RA, RA, offsetof(GCfuncL, uvptr) + | lfdux f0, RD, BASE + | lwzx UPVAL:RB, LFUNC:RB, RA + | lbz TMP3, UPVAL:RB->marked + | lwz CARG2, UPVAL:RB->v + | andix. TMP3, TMP3, LJ_GC_BLACK // isblack(uv) + | lbz TMP0, UPVAL:RB->closed + | lwz TMP2, 0(RD) + | stfd f0, 0(CARG2) + | cmplwi cr1, TMP0, 0 + | lwz TMP1, 4(RD) + | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq + | subi TMP2, TMP2, (LJ_TNUMX+1) + | bne >2 // Upvalue is closed and black? + |1: + | ins_next + | + |2: // Check if new value is collectable. + | cmplwi TMP2, LJ_TISGCV - (LJ_TNUMX+1) + | bge <1 // tvisgcv(v) + | lbz TMP3, GCOBJ:TMP1->gch.marked + | andix. TMP3, TMP3, LJ_GC_WHITES // iswhite(v) + | la CARG1, GG_DISP2G(DISPATCH) + | // Crossed a write barrier. Move the barrier forward. + | beq <1 + | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv) + | b <1 + break; + case BC_USETS: + | // RA = uvnum*8, RD = str_const*8 (~) + | lwz LFUNC:RB, FRAME_FUNC(BASE) + | srwi TMP1, RD, 1 + | srwi RA, RA, 1 + | subfic TMP1, TMP1, -4 + | addi RA, RA, offsetof(GCfuncL, uvptr) + | lwzx STR:TMP1, KBASE, TMP1 // KBASE-4-str_const*4 + | lwzx UPVAL:RB, LFUNC:RB, RA + | lbz TMP3, UPVAL:RB->marked + | lwz CARG2, UPVAL:RB->v + | andix. TMP3, TMP3, LJ_GC_BLACK // isblack(uv) + | lbz TMP3, STR:TMP1->marked + | lbz TMP2, UPVAL:RB->closed + | li TMP0, LJ_TSTR + | stw STR:TMP1, 4(CARG2) + | stw TMP0, 0(CARG2) + | bne >2 + |1: + | ins_next + | + |2: // Check if string is white and ensure upvalue is closed. + | andix. TMP3, TMP3, LJ_GC_WHITES // iswhite(str) + | cmplwi cr1, TMP2, 0 + | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq + | la CARG1, GG_DISP2G(DISPATCH) + | // Crossed a write barrier. Move the barrier forward. + | beq <1 + | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv) + | b <1 + break; + case BC_USETN: + | // RA = uvnum*8, RD = num_const*8 + | lwz LFUNC:RB, FRAME_FUNC(BASE) + | srwi RA, RA, 1 + | addi RA, RA, offsetof(GCfuncL, uvptr) + | lfdx f0, KBASE, RD + | lwzx UPVAL:RB, LFUNC:RB, RA + | ins_next1 + | lwz TMP1, UPVAL:RB->v + | stfd f0, 0(TMP1) + | ins_next2 + break; + case BC_USETP: + | // RA = uvnum*8, RD = primitive_type*8 (~) + | lwz LFUNC:RB, FRAME_FUNC(BASE) + | srwi RA, RA, 1 + | srwi TMP0, RD, 3 + | addi RA, RA, offsetof(GCfuncL, uvptr) + | not TMP0, TMP0 + | lwzx UPVAL:RB, LFUNC:RB, RA + | ins_next1 + | lwz TMP1, UPVAL:RB->v + | stw TMP0, 0(TMP1) + | ins_next2 + break; + + case BC_UCLO: + | // RA = level*8, RD = target + | lwz TMP1, L->openupval + | branch_RD // Do this first since RD is not saved. + | stp BASE, L->base + | cmplwi TMP1, 0 + | mr CARG1, L + | beq >1 + | add CARG2, BASE, RA + | bl extern lj_func_closeuv // (lua_State *L, TValue *level) + | lp BASE, L->base + |1: + | ins_next + break; + + case BC_FNEW: + | // RA = dst*8, RD = proto_const*8 (~) (holding function prototype) + | srwi TMP1, RD, 1 + | stp BASE, L->base + | subfic TMP1, TMP1, -4 + | stw PC, SAVE_PC + | lwzx CARG2, KBASE, TMP1 // KBASE-4-tab_const*4 + | mr CARG1, L + | lwz CARG3, FRAME_FUNC(BASE) + | // (lua_State *L, GCproto *pt, GCfuncL *parent) + | bl extern lj_func_newL_gc + | // Returns GCfuncL *. + | lp BASE, L->base + | li TMP0, LJ_TFUNC + | stwux TMP0, RA, BASE + | stw LFUNC:CRET1, 4(RA) + | ins_next + break; + + /* -- Table ops --------------------------------------------------------- */ + + case BC_TNEW: + case BC_TDUP: + | // RA = dst*8, RD = (hbits|asize)*8 | tab_const*8 (~) + | lwz TMP0, DISPATCH_GL(gc.total)(DISPATCH) + | mr CARG1, L + | lwz TMP1, DISPATCH_GL(gc.threshold)(DISPATCH) + | stp BASE, L->base + | cmplw TMP0, TMP1 + | stw PC, SAVE_PC + | bge >5 + |1: + if (op == BC_TNEW) { + | rlwinm CARG2, RD, 29, 21, 31 + | rlwinm CARG3, RD, 18, 27, 31 + | cmpwi CARG2, 0x7ff; beq >3 + |2: + | bl extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits) + | // Returns Table *. + } else { + | srwi TMP1, RD, 1 + | subfic TMP1, TMP1, -4 + | lwzx CARG2, KBASE, TMP1 // KBASE-4-tab_const*4 + | bl extern lj_tab_dup // (lua_State *L, Table *kt) + | // Returns Table *. + } + | lp BASE, L->base + | li TMP0, LJ_TTAB + | stwux TMP0, RA, BASE + | stw TAB:CRET1, 4(RA) + | ins_next + if (op == BC_TNEW) { + |3: + | li CARG2, 0x801 + | b <2 + } + |5: + | mr SAVE0, RD + | bl extern lj_gc_step_fixtop // (lua_State *L) + | mr RD, SAVE0 + | mr CARG1, L + | b <1 + break; + + case BC_GGET: + | // RA = dst*8, RD = str_const*8 (~) + case BC_GSET: + | // RA = src*8, RD = str_const*8 (~) + | lwz LFUNC:TMP2, FRAME_FUNC(BASE) + | srwi TMP1, RD, 1 + | lwz TAB:RB, LFUNC:TMP2->env + | subfic TMP1, TMP1, -4 + | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4 + if (op == BC_GGET) { + | b ->BC_TGETS_Z + } else { + | b ->BC_TSETS_Z + } + break; + + case BC_TGETV: + | // RA = dst*8, RB = table*8, RC = key*8 + | lwzux CARG1, RB, BASE + | lwzux CARG2, RC, BASE + | lwz TAB:RB, 4(RB) + |.if DUALNUM + | lwz RC, 4(RC) + |.else + | lfd f0, 0(RC) + |.endif + | checktab CARG1 + | checknum cr1, CARG2 + | bne ->vmeta_tgetv + |.if DUALNUM + | lwz TMP0, TAB:RB->asize + | bne cr1, >5 + | lwz TMP1, TAB:RB->array + | cmplw TMP0, RC + | slwi TMP2, RC, 3 + |.else + | bge cr1, >5 + | // Convert number key to integer, check for integerness and range. + | fctiwz f1, f0 + | fadd f2, f0, TOBIT + | stfd f1, TMPD + | lwz TMP0, TAB:RB->asize + | fsub f2, f2, TOBIT + | lwz TMP2, TMPD_LO + | lwz TMP1, TAB:RB->array + | fcmpu cr1, f0, f2 + | cmplw cr0, TMP0, TMP2 + | crand 4*cr0+gt, 4*cr0+gt, 4*cr1+eq + | slwi TMP2, TMP2, 3 + |.endif + | ble ->vmeta_tgetv // Integer key and in array part? + | lwzx TMP0, TMP1, TMP2 + | lfdx f14, TMP1, TMP2 + | checknil TMP0; beq >2 + |1: + | ins_next1 + | stfdx f14, BASE, RA + | ins_next2 + | + |2: // Check for __index if table value is nil. + | lwz TAB:TMP2, TAB:RB->metatable + | cmplwi TAB:TMP2, 0 + | beq <1 // No metatable: done. + | lbz TMP0, TAB:TMP2->nomm + | andix. TMP0, TMP0, 1<vmeta_tgetv + | + |5: + | checkstr CARG2; bne ->vmeta_tgetv + |.if not DUALNUM + | lwz STR:RC, 4(RC) + |.endif + | b ->BC_TGETS_Z // String key? + break; + case BC_TGETS: + | // RA = dst*8, RB = table*8, RC = str_const*8 (~) + | lwzux CARG1, RB, BASE + | srwi TMP1, RC, 1 + | lwz TAB:RB, 4(RB) + | subfic TMP1, TMP1, -4 + | checktab CARG1 + | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4 + | bne ->vmeta_tgets1 + |->BC_TGETS_Z: + | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8 + | lwz TMP0, TAB:RB->hmask + | lwz TMP1, STR:RC->hash + | lwz NODE:TMP2, TAB:RB->node + | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask + | slwi TMP0, TMP1, 5 + | slwi TMP1, TMP1, 3 + | sub TMP1, TMP0, TMP1 + | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8) + |1: + | lwz CARG1, NODE:TMP2->key + | lwz TMP0, 4+offsetof(Node, key)(NODE:TMP2) + | lwz CARG2, NODE:TMP2->val + | lwz TMP1, 4+offsetof(Node, val)(NODE:TMP2) + | checkstr CARG1; bne >4 + | cmpw TMP0, STR:RC; bne >4 + | checknil CARG2; beq >5 // Key found, but nil value? + |3: + | stwux CARG2, RA, BASE + | stw TMP1, 4(RA) + | ins_next + | + |4: // Follow hash chain. + | lwz NODE:TMP2, NODE:TMP2->next + | cmplwi NODE:TMP2, 0 + | bne <1 + | // End of hash chain: key not found, nil result. + | li CARG2, LJ_TNIL + | + |5: // Check for __index if table value is nil. + | lwz TAB:TMP2, TAB:RB->metatable + | cmplwi TAB:TMP2, 0 + | beq <3 // No metatable: done. + | lbz TMP0, TAB:TMP2->nomm + | andix. TMP0, TMP0, 1<vmeta_tgets + break; + case BC_TGETB: + | // RA = dst*8, RB = table*8, RC = index*8 + | lwzux CARG1, RB, BASE + | srwi TMP0, RC, 3 + | lwz TAB:RB, 4(RB) + | checktab CARG1; bne ->vmeta_tgetb + | lwz TMP1, TAB:RB->asize + | lwz TMP2, TAB:RB->array + | cmplw TMP0, TMP1; bge ->vmeta_tgetb + | lwzx TMP1, TMP2, RC + | lfdx f0, TMP2, RC + | checknil TMP1; beq >5 + |1: + | ins_next1 + | stfdx f0, BASE, RA + | ins_next2 + | + |5: // Check for __index if table value is nil. + | lwz TAB:TMP2, TAB:RB->metatable + | cmplwi TAB:TMP2, 0 + | beq <1 // No metatable: done. + | lbz TMP2, TAB:TMP2->nomm + | andix. TMP2, TMP2, 1<vmeta_tgetb // Caveat: preserve TMP0! + break; + case BC_TGETR: + | // RA = dst*8, RB = table*8, RC = key*8 + | add RB, BASE, RB + | lwz TAB:CARG1, 4(RB) + |.if DUALNUM + | add RC, BASE, RC + | lwz TMP0, TAB:CARG1->asize + | lwz CARG2, 4(RC) + | lwz TMP1, TAB:CARG1->array + |.else + | lfdx f0, BASE, RC + | lwz TMP0, TAB:CARG1->asize + | toint CARG2, f0 + | lwz TMP1, TAB:CARG1->array + |.endif + | cmplw TMP0, CARG2 + | slwi TMP2, CARG2, 3 + | ble ->vmeta_tgetr // In array part? + | lfdx f14, TMP1, TMP2 + |->BC_TGETR_Z: + | ins_next1 + | stfdx f14, BASE, RA + | ins_next2 + break; + + case BC_TSETV: + | // RA = src*8, RB = table*8, RC = key*8 + | lwzux CARG1, RB, BASE + | lwzux CARG2, RC, BASE + | lwz TAB:RB, 4(RB) + |.if DUALNUM + | lwz RC, 4(RC) + |.else + | lfd f0, 0(RC) + |.endif + | checktab CARG1 + | checknum cr1, CARG2 + | bne ->vmeta_tsetv + |.if DUALNUM + | lwz TMP0, TAB:RB->asize + | bne cr1, >5 + | lwz TMP1, TAB:RB->array + | cmplw TMP0, RC + | slwi TMP0, RC, 3 + |.else + | bge cr1, >5 + | // Convert number key to integer, check for integerness and range. + | fctiwz f1, f0 + | fadd f2, f0, TOBIT + | stfd f1, TMPD + | lwz TMP0, TAB:RB->asize + | fsub f2, f2, TOBIT + | lwz TMP2, TMPD_LO + | lwz TMP1, TAB:RB->array + | fcmpu cr1, f0, f2 + | cmplw cr0, TMP0, TMP2 + | crand 4*cr0+gt, 4*cr0+gt, 4*cr1+eq + | slwi TMP0, TMP2, 3 + |.endif + | ble ->vmeta_tsetv // Integer key and in array part? + | lwzx TMP2, TMP1, TMP0 + | lbz TMP3, TAB:RB->marked + | lfdx f14, BASE, RA + | checknil TMP2; beq >3 + |1: + | andix. TMP2, TMP3, LJ_GC_BLACK // isblack(table) + | stfdx f14, TMP1, TMP0 + | bne >7 + |2: + | ins_next + | + |3: // Check for __newindex if previous value is nil. + | lwz TAB:TMP2, TAB:RB->metatable + | cmplwi TAB:TMP2, 0 + | beq <1 // No metatable: done. + | lbz TMP2, TAB:TMP2->nomm + | andix. TMP2, TMP2, 1<vmeta_tsetv + | + |5: + | checkstr CARG2; bne ->vmeta_tsetv + |.if not DUALNUM + | lwz STR:RC, 4(RC) + |.endif + | b ->BC_TSETS_Z // String key? + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:RB, TMP3, TMP0 + | b <2 + break; + case BC_TSETS: + | // RA = src*8, RB = table*8, RC = str_const*8 (~) + | lwzux CARG1, RB, BASE + | srwi TMP1, RC, 1 + | lwz TAB:RB, 4(RB) + | subfic TMP1, TMP1, -4 + | checktab CARG1 + | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4 + | bne ->vmeta_tsets1 + |->BC_TSETS_Z: + | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = src*8 + | lwz TMP0, TAB:RB->hmask + | lwz TMP1, STR:RC->hash + | lwz NODE:TMP2, TAB:RB->node + | stb ZERO, TAB:RB->nomm // Clear metamethod cache. + | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask + | lfdx f14, BASE, RA + | slwi TMP0, TMP1, 5 + | slwi TMP1, TMP1, 3 + | sub TMP1, TMP0, TMP1 + | lbz TMP3, TAB:RB->marked + | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8) + |1: + | lwz CARG1, NODE:TMP2->key + | lwz TMP0, 4+offsetof(Node, key)(NODE:TMP2) + | lwz CARG2, NODE:TMP2->val + | lwz NODE:TMP1, NODE:TMP2->next + | checkstr CARG1; bne >5 + | cmpw TMP0, STR:RC; bne >5 + | checknil CARG2; beq >4 // Key found, but nil value? + |2: + | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table) + | stfd f14, NODE:TMP2->val + | bne >7 + |3: + | ins_next + | + |4: // Check for __newindex if previous value is nil. + | lwz TAB:TMP1, TAB:RB->metatable + | cmplwi TAB:TMP1, 0 + | beq <2 // No metatable: done. + | lbz TMP0, TAB:TMP1->nomm + | andix. TMP0, TMP0, 1<vmeta_tsets + | + |5: // Follow hash chain. + | cmplwi NODE:TMP1, 0 + | mr NODE:TMP2, NODE:TMP1 + | bne <1 + | // End of hash chain: key not found, add a new one. + | + | // But check for __newindex first. + | lwz TAB:TMP1, TAB:RB->metatable + | la CARG3, DISPATCH_GL(tmptv)(DISPATCH) + | stw PC, SAVE_PC + | mr CARG1, L + | cmplwi TAB:TMP1, 0 + | stp BASE, L->base + | beq >6 // No metatable: continue. + | lbz TMP0, TAB:TMP1->nomm + | andix. TMP0, TMP0, 1<vmeta_tsets // 'no __newindex' flag NOT set: check. + |6: + | li TMP0, LJ_TSTR + | stw STR:RC, 4(CARG3) + | mr CARG2, TAB:RB + | stw TMP0, 0(CARG3) + | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k) + | // Returns TValue *. + | lp BASE, L->base + | stfd f14, 0(CRET1) + | b <3 // No 2nd write barrier needed. + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:RB, TMP3, TMP0 + | b <3 + break; + case BC_TSETB: + | // RA = src*8, RB = table*8, RC = index*8 + | lwzux CARG1, RB, BASE + | srwi TMP0, RC, 3 + | lwz TAB:RB, 4(RB) + | checktab CARG1; bne ->vmeta_tsetb + | lwz TMP1, TAB:RB->asize + | lwz TMP2, TAB:RB->array + | lbz TMP3, TAB:RB->marked + | cmplw TMP0, TMP1 + | lfdx f14, BASE, RA + | bge ->vmeta_tsetb + | lwzx TMP1, TMP2, RC + | checknil TMP1; beq >5 + |1: + | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table) + | stfdx f14, TMP2, RC + | bne >7 + |2: + | ins_next + | + |5: // Check for __newindex if previous value is nil. + | lwz TAB:TMP1, TAB:RB->metatable + | cmplwi TAB:TMP1, 0 + | beq <1 // No metatable: done. + | lbz TMP1, TAB:TMP1->nomm + | andix. TMP1, TMP1, 1<vmeta_tsetb // Caveat: preserve TMP0! + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:RB, TMP3, TMP0 + | b <2 + break; + case BC_TSETR: + | // RA = dst*8, RB = table*8, RC = key*8 + | add RB, BASE, RB + | lwz TAB:CARG2, 4(RB) + |.if DUALNUM + | add RC, BASE, RC + | lbz TMP3, TAB:CARG2->marked + | lwz TMP0, TAB:CARG2->asize + | lwz CARG3, 4(RC) + | lwz TMP1, TAB:CARG2->array + |.else + | lfdx f0, BASE, RC + | lbz TMP3, TAB:CARG2->marked + | lwz TMP0, TAB:CARG2->asize + | toint CARG3, f0 + | lwz TMP1, TAB:CARG2->array + |.endif + | andix. TMP2, TMP3, LJ_GC_BLACK // isblack(table) + | bne >7 + |2: + | cmplw TMP0, CARG3 + | slwi TMP2, CARG3, 3 + | lfdx f14, BASE, RA + | ble ->vmeta_tsetr // In array part? + | ins_next1 + | stfdx f14, TMP1, TMP2 + | ins_next2 + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:CARG2, TMP3, TMP2 + | b <2 + break; + + + case BC_TSETM: + | // RA = base*8 (table at base-1), RD = num_const*8 (start index) + | add RA, BASE, RA + |1: + | add TMP3, KBASE, RD + | lwz TAB:CARG2, -4(RA) // Guaranteed to be a table. + | addic. TMP0, MULTRES, -8 + | lwz TMP3, 4(TMP3) // Integer constant is in lo-word. + | srwi CARG3, TMP0, 3 + | beq >4 // Nothing to copy? + | add CARG3, CARG3, TMP3 + | lwz TMP2, TAB:CARG2->asize + | slwi TMP1, TMP3, 3 + | lbz TMP3, TAB:CARG2->marked + | cmplw CARG3, TMP2 + | add TMP2, RA, TMP0 + | lwz TMP0, TAB:CARG2->array + | bgt >5 + | add TMP1, TMP1, TMP0 + | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table) + |3: // Copy result slots to table. + | lfd f0, 0(RA) + | addi RA, RA, 8 + | cmpw cr1, RA, TMP2 + | stfd f0, 0(TMP1) + | addi TMP1, TMP1, 8 + | blt cr1, <3 + | bne >7 + |4: + | ins_next + | + |5: // Need to resize array part. + | stp BASE, L->base + | mr CARG1, L + | stw PC, SAVE_PC + | mr SAVE0, RD + | bl extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize) + | // Must not reallocate the stack. + | mr RD, SAVE0 + | b <1 + | + |7: // Possible table write barrier for any value. Skip valiswhite check. + | barrierback TAB:CARG2, TMP3, TMP0 + | b <4 + break; + + /* -- Calls and vararg handling ----------------------------------------- */ + + case BC_CALLM: + | // RA = base*8, (RB = (nresults+1)*8,) RC = extra_nargs*8 + | add NARGS8:RC, NARGS8:RC, MULTRES + | // Fall through. Assumes BC_CALL follows. + break; + case BC_CALL: + | // RA = base*8, (RB = (nresults+1)*8,) RC = (nargs+1)*8 + | mr TMP2, BASE + | lwzux TMP0, BASE, RA + | lwz LFUNC:RB, 4(BASE) + | subi NARGS8:RC, NARGS8:RC, 8 + | addi BASE, BASE, 8 + | checkfunc TMP0; bne ->vmeta_call + | ins_call + break; + + case BC_CALLMT: + | // RA = base*8, (RB = 0,) RC = extra_nargs*8 + | add NARGS8:RC, NARGS8:RC, MULTRES + | // Fall through. Assumes BC_CALLT follows. + break; + case BC_CALLT: + | // RA = base*8, (RB = 0,) RC = (nargs+1)*8 + | lwzux TMP0, RA, BASE + | lwz LFUNC:RB, 4(RA) + | subi NARGS8:RC, NARGS8:RC, 8 + | lwz TMP1, FRAME_PC(BASE) + | checkfunc TMP0 + | addi RA, RA, 8 + | bne ->vmeta_callt + |->BC_CALLT_Z: + | andix. TMP0, TMP1, FRAME_TYPE // Caveat: preserve cr0 until the crand. + | lbz TMP3, LFUNC:RB->ffid + | xori TMP2, TMP1, FRAME_VARG + | cmplwi cr1, NARGS8:RC, 0 + | bne >7 + |1: + | stw LFUNC:RB, FRAME_FUNC(BASE) // Copy function down, but keep PC. + | li TMP2, 0 + | cmplwi cr7, TMP3, 1 // (> FF_C) Calling a fast function? + | beq cr1, >3 + |2: + | addi TMP3, TMP2, 8 + | lfdx f0, RA, TMP2 + | cmplw cr1, TMP3, NARGS8:RC + | stfdx f0, BASE, TMP2 + | mr TMP2, TMP3 + | bne cr1, <2 + |3: + | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+gt + | beq >5 + |4: + | ins_callt + | + |5: // Tailcall to a fast function with a Lua frame below. + | lwz INS, -4(TMP1) + | decode_RA8 RA, INS + | sub TMP1, BASE, RA + | lwz LFUNC:TMP1, FRAME_FUNC-8(TMP1) + | lwz TMP1, LFUNC:TMP1->pc + | lwz KBASE, PC2PROTO(k)(TMP1) // Need to prepare KBASE. + | b <4 + | + |7: // Tailcall from a vararg function. + | andix. TMP0, TMP2, FRAME_TYPEP + | bne <1 // Vararg frame below? + | sub BASE, BASE, TMP2 // Relocate BASE down. + | lwz TMP1, FRAME_PC(BASE) + | andix. TMP0, TMP1, FRAME_TYPE + | b <1 + break; + + case BC_ITERC: + | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 ((2+1)*8)) + | mr TMP2, BASE + | add BASE, BASE, RA + | lwz TMP1, -24(BASE) + | lwz LFUNC:RB, -20(BASE) + | lfd f1, -8(BASE) + | lfd f0, -16(BASE) + | stw TMP1, 0(BASE) // Copy callable. + | stw LFUNC:RB, 4(BASE) + | checkfunc TMP1 + | stfd f1, 16(BASE) // Copy control var. + | li NARGS8:RC, 16 // Iterators get 2 arguments. + | stfdu f0, 8(BASE) // Copy state. + | bne ->vmeta_call + | ins_call + break; + + case BC_ITERN: + | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 (2+1)*8) + |.if JIT + | // NYI: add hotloop, record BC_ITERN. + |.endif + | add RA, BASE, RA + | lwz TAB:RB, -12(RA) + | lwz RC, -4(RA) // Get index from control var. + | lwz TMP0, TAB:RB->asize + | lwz TMP1, TAB:RB->array + | addi PC, PC, 4 + |1: // Traverse array part. + | cmplw RC, TMP0 + | slwi TMP3, RC, 3 + | bge >5 // Index points after array part? + | lwzx TMP2, TMP1, TMP3 + | lfdx f0, TMP1, TMP3 + | checknil TMP2 + | lwz INS, -4(PC) + | beq >4 + |.if DUALNUM + | stw RC, 4(RA) + | stw TISNUM, 0(RA) + |.else + | tonum_u f1, RC + |.endif + | addi RC, RC, 1 + | addis TMP3, PC, -(BCBIAS_J*4 >> 16) + | stfd f0, 8(RA) + | decode_RD4 TMP1, INS + | stw RC, -4(RA) // Update control var. + | add PC, TMP1, TMP3 + |.if not DUALNUM + | stfd f1, 0(RA) + |.endif + |3: + | ins_next + | + |4: // Skip holes in array part. + | addi RC, RC, 1 + | b <1 + | + |5: // Traverse hash part. + | lwz TMP1, TAB:RB->hmask + | sub RC, RC, TMP0 + | lwz TMP2, TAB:RB->node + |6: + | cmplw RC, TMP1 // End of iteration? Branch to ITERL+1. + | slwi TMP3, RC, 5 + | bgty <3 + | slwi RB, RC, 3 + | sub TMP3, TMP3, RB + | lwzx RB, TMP2, TMP3 + | lfdx f0, TMP2, TMP3 + | add NODE:TMP3, TMP2, TMP3 + | checknil RB + | lwz INS, -4(PC) + | beq >7 + | lfd f1, NODE:TMP3->key + | addis TMP2, PC, -(BCBIAS_J*4 >> 16) + | stfd f0, 8(RA) + | add RC, RC, TMP0 + | decode_RD4 TMP1, INS + | stfd f1, 0(RA) + | addi RC, RC, 1 + | add PC, TMP1, TMP2 + | stw RC, -4(RA) // Update control var. + | b <3 + | + |7: // Skip holes in hash part. + | addi RC, RC, 1 + | b <6 + break; + + case BC_ISNEXT: + | // RA = base*8, RD = target (points to ITERN) + | add RA, BASE, RA + | lwz TMP0, -24(RA) + | lwz CFUNC:TMP1, -20(RA) + | lwz TMP2, -16(RA) + | lwz TMP3, -8(RA) + | cmpwi cr0, TMP2, LJ_TTAB + | cmpwi cr1, TMP0, LJ_TFUNC + | cmpwi cr6, TMP3, LJ_TNIL + | bne cr1, >5 + | lbz TMP1, CFUNC:TMP1->ffid + | crand 4*cr0+eq, 4*cr0+eq, 4*cr6+eq + | cmpwi cr7, TMP1, FF_next_N + | srwi TMP0, RD, 1 + | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+eq + | add TMP3, PC, TMP0 + | bne cr0, >5 + | lus TMP1, 0xfffe + | ori TMP1, TMP1, 0x7fff + | stw ZERO, -4(RA) // Initialize control var. + | stw TMP1, -8(RA) + | addis PC, TMP3, -(BCBIAS_J*4 >> 16) + |1: + | ins_next + |5: // Despecialize bytecode if any of the checks fail. + | li TMP0, BC_JMP + | li TMP1, BC_ITERC + | stb TMP0, -1(PC) + | addis PC, TMP3, -(BCBIAS_J*4 >> 16) + | stb TMP1, 3(PC) + | b <1 + break; + + case BC_VARG: + | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8 + | lwz TMP0, FRAME_PC(BASE) + | add RC, BASE, RC + | add RA, BASE, RA + | addi RC, RC, FRAME_VARG + | add TMP2, RA, RB + | subi TMP3, BASE, 8 // TMP3 = vtop + | sub RC, RC, TMP0 // RC = vbase + | // Note: RC may now be even _above_ BASE if nargs was < numparams. + | cmplwi cr1, RB, 0 + |.if PPE + | sub TMP1, TMP3, RC + | cmpwi TMP1, 0 + |.else + | sub. TMP1, TMP3, RC + |.endif + | beq cr1, >5 // Copy all varargs? + | subi TMP2, TMP2, 16 + | ble >2 // No vararg slots? + |1: // Copy vararg slots to destination slots. + | lfd f0, 0(RC) + | addi RC, RC, 8 + | stfd f0, 0(RA) + | cmplw RA, TMP2 + | cmplw cr1, RC, TMP3 + | bge >3 // All destination slots filled? + | addi RA, RA, 8 + | blt cr1, <1 // More vararg slots? + |2: // Fill up remainder with nil. + | stw TISNIL, 0(RA) + | cmplw RA, TMP2 + | addi RA, RA, 8 + | blt <2 + |3: + | ins_next + | + |5: // Copy all varargs. + | lwz TMP0, L->maxstack + | li MULTRES, 8 // MULTRES = (0+1)*8 + | bley <3 // No vararg slots? + | add TMP2, RA, TMP1 + | cmplw TMP2, TMP0 + | addi MULTRES, TMP1, 8 + | bgt >7 + |6: + | lfd f0, 0(RC) + | addi RC, RC, 8 + | stfd f0, 0(RA) + | cmplw RC, TMP3 + | addi RA, RA, 8 + | blt <6 // More vararg slots? + | b <3 + | + |7: // Grow stack for varargs. + | mr CARG1, L + | stp RA, L->top + | sub SAVE0, RC, BASE // Need delta, because BASE may change. + | stp BASE, L->base + | sub RA, RA, BASE + | stw PC, SAVE_PC + | srwi CARG2, TMP1, 3 + | bl extern lj_state_growstack // (lua_State *L, int n) + | lp BASE, L->base + | add RA, BASE, RA + | add RC, BASE, SAVE0 + | subi TMP3, BASE, 8 + | b <6 + break; + + /* -- Returns ----------------------------------------------------------- */ + + case BC_RETM: + | // RA = results*8, RD = extra_nresults*8 + | add RD, RD, MULTRES // MULTRES >= 8, so RD >= 8. + | // Fall through. Assumes BC_RET follows. + break; + + case BC_RET: + | // RA = results*8, RD = (nresults+1)*8 + | lwz PC, FRAME_PC(BASE) + | add RA, BASE, RA + | mr MULTRES, RD + |1: + | andix. TMP0, PC, FRAME_TYPE + | xori TMP1, PC, FRAME_VARG + | bne ->BC_RETV_Z + | + |->BC_RET_Z: + | // BASE = base, RA = resultptr, RD = (nresults+1)*8, PC = return + | lwz INS, -4(PC) + | cmpwi RD, 8 + | subi TMP2, BASE, 8 + | subi RC, RD, 8 + | decode_RB8 RB, INS + | beq >3 + | li TMP1, 0 + |2: + | addi TMP3, TMP1, 8 + | lfdx f0, RA, TMP1 + | cmpw TMP3, RC + | stfdx f0, TMP2, TMP1 + | beq >3 + | addi TMP1, TMP3, 8 + | lfdx f1, RA, TMP3 + | cmpw TMP1, RC + | stfdx f1, TMP2, TMP3 + | bne <2 + |3: + |5: + | cmplw RB, RD + | decode_RA8 RA, INS + | bgt >6 + | sub BASE, TMP2, RA + | lwz LFUNC:TMP1, FRAME_FUNC(BASE) + | ins_next1 + | lwz TMP1, LFUNC:TMP1->pc + | lwz KBASE, PC2PROTO(k)(TMP1) + | ins_next2 + | + |6: // Fill up results with nil. + | subi TMP1, RD, 8 + | addi RD, RD, 8 + | stwx TISNIL, TMP2, TMP1 + | b <5 + | + |->BC_RETV_Z: // Non-standard return case. + | andix. TMP2, TMP1, FRAME_TYPEP + | bne ->vm_return + | // Return from vararg function: relocate BASE down. + | sub BASE, BASE, TMP1 + | lwz PC, FRAME_PC(BASE) + | b <1 + break; + + case BC_RET0: case BC_RET1: + | // RA = results*8, RD = (nresults+1)*8 + | lwz PC, FRAME_PC(BASE) + | add RA, BASE, RA + | mr MULTRES, RD + | andix. TMP0, PC, FRAME_TYPE + | xori TMP1, PC, FRAME_VARG + | bney ->BC_RETV_Z + | + | lwz INS, -4(PC) + | subi TMP2, BASE, 8 + | decode_RB8 RB, INS + if (op == BC_RET1) { + | lfd f0, 0(RA) + | stfd f0, 0(TMP2) + } + |5: + | cmplw RB, RD + | decode_RA8 RA, INS + | bgt >6 + | sub BASE, TMP2, RA + | lwz LFUNC:TMP1, FRAME_FUNC(BASE) + | ins_next1 + | lwz TMP1, LFUNC:TMP1->pc + | lwz KBASE, PC2PROTO(k)(TMP1) + | ins_next2 + | + |6: // Fill up results with nil. + | subi TMP1, RD, 8 + | addi RD, RD, 8 + | stwx TISNIL, TMP2, TMP1 + | b <5 + break; + + /* -- Loops and branches ------------------------------------------------ */ + + case BC_FORL: + |.if JIT + | hotloop + |.endif + | // Fall through. Assumes BC_IFORL follows. + break; + + case BC_JFORI: + case BC_JFORL: +#if !LJ_HASJIT + break; +#endif + case BC_FORI: + case BC_IFORL: + | // RA = base*8, RD = target (after end of loop or start of loop) + vk = (op == BC_IFORL || op == BC_JFORL); + |.if DUALNUM + | // Integer loop. + | lwzux TMP1, RA, BASE + | lwz CARG1, FORL_IDX*8+4(RA) + | cmplw cr0, TMP1, TISNUM + if (vk) { + | lwz CARG3, FORL_STEP*8+4(RA) + | bne >9 + |.if GPR64 + | // Need to check overflow for (a<<32) + (b<<32). + | rldicr TMP0, CARG1, 32, 31 + | rldicr TMP2, CARG3, 32, 31 + | add CARG1, CARG1, CARG3 + | addo. TMP0, TMP0, TMP2 + |.else + | addo. CARG1, CARG1, CARG3 + |.endif + | cmpwi cr6, CARG3, 0 + | lwz CARG2, FORL_STOP*8+4(RA) + | bso >6 + |4: + | stw CARG1, FORL_IDX*8+4(RA) + } else { + | lwz TMP3, FORL_STEP*8(RA) + | lwz CARG3, FORL_STEP*8+4(RA) + | lwz TMP2, FORL_STOP*8(RA) + | lwz CARG2, FORL_STOP*8+4(RA) + | cmplw cr7, TMP3, TISNUM + | cmplw cr1, TMP2, TISNUM + | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+eq + | crand 4*cr0+eq, 4*cr0+eq, 4*cr1+eq + | cmpwi cr6, CARG3, 0 + | bne >9 + } + | blt cr6, >5 + | cmpw CARG1, CARG2 + |1: + | stw TISNUM, FORL_EXT*8(RA) + if (op != BC_JFORL) { + | srwi RD, RD, 1 + } + | stw CARG1, FORL_EXT*8+4(RA) + if (op != BC_JFORL) { + | add RD, PC, RD + } + if (op == BC_FORI) { + | bgt >3 // See FP loop below. + } else if (op == BC_JFORI) { + | addis PC, RD, -(BCBIAS_J*4 >> 16) + | bley >7 + } else if (op == BC_IFORL) { + | bgt >2 + | addis PC, RD, -(BCBIAS_J*4 >> 16) + } else { + | bley =>BC_JLOOP + } + |2: + | ins_next + |5: // Invert check for negative step. + | cmpw CARG2, CARG1 + | b <1 + if (vk) { + |6: // Potential overflow. + | checkov TMP0, <4 // Ignore unrelated overflow. + | b <2 + } + |.endif + if (vk) { + |.if DUALNUM + |9: // FP loop. + | lfd f1, FORL_IDX*8(RA) + |.else + | lfdux f1, RA, BASE + |.endif + | lfd f3, FORL_STEP*8(RA) + | lfd f2, FORL_STOP*8(RA) + | lwz TMP3, FORL_STEP*8(RA) + | fadd f1, f1, f3 + | stfd f1, FORL_IDX*8(RA) + } else { + |.if DUALNUM + |9: // FP loop. + |.else + | lwzux TMP1, RA, BASE + | lwz TMP3, FORL_STEP*8(RA) + | lwz TMP2, FORL_STOP*8(RA) + | cmplw cr0, TMP1, TISNUM + | cmplw cr7, TMP3, TISNUM + | cmplw cr1, TMP2, TISNUM + |.endif + | lfd f1, FORL_IDX*8(RA) + | crand 4*cr0+lt, 4*cr0+lt, 4*cr7+lt + | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt + | lfd f2, FORL_STOP*8(RA) + | bge ->vmeta_for + } + | cmpwi cr6, TMP3, 0 + if (op != BC_JFORL) { + | srwi RD, RD, 1 + } + | stfd f1, FORL_EXT*8(RA) + if (op != BC_JFORL) { + | add RD, PC, RD + } + | fcmpu cr0, f1, f2 + if (op == BC_JFORI) { + | addis PC, RD, -(BCBIAS_J*4 >> 16) + } + | blt cr6, >5 + if (op == BC_FORI) { + | bgt >3 + } else if (op == BC_IFORL) { + |.if DUALNUM + | bgty <2 + |.else + | bgt >2 + |.endif + |1: + | addis PC, RD, -(BCBIAS_J*4 >> 16) + } else if (op == BC_JFORI) { + | bley >7 + } else { + | bley =>BC_JLOOP + } + |.if DUALNUM + | b <2 + |.else + |2: + | ins_next + |.endif + |5: // Negative step. + if (op == BC_FORI) { + | bge <2 + |3: // Used by integer loop, too. + | addis PC, RD, -(BCBIAS_J*4 >> 16) + } else if (op == BC_IFORL) { + | bgey <1 + } else if (op == BC_JFORI) { + | bgey >7 + } else { + | bgey =>BC_JLOOP + } + | b <2 + if (op == BC_JFORI) { + |7: + | lwz INS, -4(PC) + | decode_RD8 RD, INS + | b =>BC_JLOOP + } + break; + + case BC_ITERL: + |.if JIT + | hotloop + |.endif + | // Fall through. Assumes BC_IITERL follows. + break; + + case BC_JITERL: +#if !LJ_HASJIT + break; +#endif + case BC_IITERL: + | // RA = base*8, RD = target + | lwzux TMP1, RA, BASE + | lwz TMP2, 4(RA) + | checknil TMP1; beq >1 // Stop if iterator returned nil. + if (op == BC_JITERL) { + | stw TMP1, -8(RA) + | stw TMP2, -4(RA) + | b =>BC_JLOOP + } else { + | branch_RD // Otherwise save control var + branch. + | stw TMP1, -8(RA) + | stw TMP2, -4(RA) + } + |1: + | ins_next + break; + + case BC_LOOP: + | // RA = base*8, RD = target (loop extent) + | // Note: RA/RD is only used by trace recorder to determine scope/extent + | // This opcode does NOT jump, it's only purpose is to detect a hot loop. + |.if JIT + | hotloop + |.endif + | // Fall through. Assumes BC_ILOOP follows. + break; + + case BC_ILOOP: + | // RA = base*8, RD = target (loop extent) + | ins_next + break; + + case BC_JLOOP: + |.if JIT + | // RA = base*8 (ignored), RD = traceno*8 + | lwz TMP1, DISPATCH_J(trace)(DISPATCH) + | srwi RD, RD, 1 + | // Traces on PPC don't store the trace number, so use 0. + | stw ZERO, DISPATCH_GL(vmstate)(DISPATCH) + | lwzx TRACE:TMP2, TMP1, RD + | clrso TMP1 + | lp TMP2, TRACE:TMP2->mcode + | stw BASE, DISPATCH_GL(jit_base)(DISPATCH) + | mtctr TMP2 + | addi JGL, DISPATCH, GG_DISP2G+32768 + | stw L, DISPATCH_GL(tmpbuf.L)(DISPATCH) + | bctr + |.endif + break; + + case BC_JMP: + | // RA = base*8 (only used by trace recorder), RD = target + | branch_RD + | ins_next + break; + + /* -- Function headers -------------------------------------------------- */ + + case BC_FUNCF: + |.if JIT + | hotcall + |.endif + case BC_FUNCV: /* NYI: compiled vararg functions. */ + | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow. + break; + + case BC_JFUNCF: +#if !LJ_HASJIT + break; +#endif + case BC_IFUNCF: + | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8 + | lwz TMP2, L->maxstack + | lbz TMP1, -4+PC2PROTO(numparams)(PC) + | lwz KBASE, -4+PC2PROTO(k)(PC) + | cmplw RA, TMP2 + | slwi TMP1, TMP1, 3 + | bgt ->vm_growstack_l + if (op != BC_JFUNCF) { + | ins_next1 + } + |2: + | cmplw NARGS8:RC, TMP1 // Check for missing parameters. + | blt >3 + if (op == BC_JFUNCF) { + | decode_RD8 RD, INS + | b =>BC_JLOOP + } else { + | ins_next2 + } + | + |3: // Clear missing parameters. + | stwx TISNIL, BASE, NARGS8:RC + | addi NARGS8:RC, NARGS8:RC, 8 + | b <2 + break; + + case BC_JFUNCV: +#if !LJ_HASJIT + break; +#endif + | NYI // NYI: compiled vararg functions + break; /* NYI: compiled vararg functions. */ + + case BC_IFUNCV: + | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8 + | lwz TMP2, L->maxstack + | add TMP1, BASE, RC + | add TMP0, RA, RC + | stw LFUNC:RB, 4(TMP1) // Store copy of LFUNC. + | addi TMP3, RC, 8+FRAME_VARG + | lwz KBASE, -4+PC2PROTO(k)(PC) + | cmplw TMP0, TMP2 + | stw TMP3, 0(TMP1) // Store delta + FRAME_VARG. + | bge ->vm_growstack_l + | lbz TMP2, -4+PC2PROTO(numparams)(PC) + | mr RA, BASE + | mr RC, TMP1 + | ins_next1 + | cmpwi TMP2, 0 + | addi BASE, TMP1, 8 + | beq >3 + |1: + | cmplw RA, RC // Less args than parameters? + | lwz TMP0, 0(RA) + | lwz TMP3, 4(RA) + | bge >4 + | stw TISNIL, 0(RA) // Clear old fixarg slot (help the GC). + | addi RA, RA, 8 + |2: + | addic. TMP2, TMP2, -1 + | stw TMP0, 8(TMP1) + | stw TMP3, 12(TMP1) + | addi TMP1, TMP1, 8 + | bne <1 + |3: + | ins_next2 + | + |4: // Clear missing parameters. + | li TMP0, LJ_TNIL + | b <2 + break; + + case BC_FUNCC: + case BC_FUNCCW: + | // BASE = new base, RA = BASE+framesize*8, RB = CFUNC, RC = nargs*8 + if (op == BC_FUNCC) { + | lp RD, CFUNC:RB->f + } else { + | lp RD, DISPATCH_GL(wrapf)(DISPATCH) + } + | add TMP1, RA, NARGS8:RC + | lwz TMP2, L->maxstack + | .toc lp TMP3, 0(RD) + | add RC, BASE, NARGS8:RC + | stp BASE, L->base + | cmplw TMP1, TMP2 + | stp RC, L->top + | li_vmstate C + |.if TOC + | mtctr TMP3 + |.else + | mtctr RD + |.endif + if (op == BC_FUNCCW) { + | lp CARG2, CFUNC:RB->f + } + | mr CARG1, L + | bgt ->vm_growstack_c // Need to grow stack. + | .toc lp TOCREG, TOC_OFS(RD) + | .tocenv lp ENVREG, ENV_OFS(RD) + | st_vmstate + | bctrl // (lua_State *L [, lua_CFunction f]) + | // Returns nresults. + | lp BASE, L->base + | .toc ld TOCREG, SAVE_TOC + | slwi RD, CRET1, 3 + | lp TMP1, L->top + | li_vmstate INTERP + | lwz PC, FRAME_PC(BASE) // Fetch PC of caller. + | stw L, DISPATCH_GL(cur_L)(DISPATCH) + | sub RA, TMP1, RD // RA = L->top - nresults*8 + | st_vmstate + | b ->vm_returnc + break; + + /* ---------------------------------------------------------------------- */ + + default: + fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]); + exit(2); + break; + } +} + +static int build_backend(BuildCtx *ctx) +{ + int op; + + dasm_growpc(Dst, BC__MAX); + + build_subroutines(ctx); + + |.code_op + for (op = 0; op < BC__MAX; op++) + build_ins(ctx, (BCOp)op, op); + + return BC__MAX; +} + +/* Emit pseudo frame-info for all assembler functions. */ +static void emit_asm_debug(BuildCtx *ctx) +{ + int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code); + int i; + switch (ctx->mode) { + case BUILD_elfasm: + fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n"); + fprintf(ctx->fp, + ".Lframe0:\n" + "\t.long .LECIE0-.LSCIE0\n" + ".LSCIE0:\n" + "\t.long 0xffffffff\n" + "\t.byte 0x1\n" + "\t.string \"\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -4\n" + "\t.byte 65\n" + "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n" + "\t.align 2\n" + ".LECIE0:\n\n"); + fprintf(ctx->fp, + ".LSFDE0:\n" + "\t.long .LEFDE0-.LASFDE0\n" + ".LASFDE0:\n" + "\t.long .Lframe0\n" + "\t.long .Lbegin\n" + "\t.long %d\n" + "\t.byte 0xe\n\t.uleb128 %d\n" + "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n" + "\t.byte 0x5\n\t.uleb128 70\n\t.uleb128 55\n", + fcofs, CFRAME_SIZE); + for (i = 14; i <= 31; i++) + fprintf(ctx->fp, + "\t.byte %d\n\t.uleb128 %d\n" + "\t.byte %d\n\t.uleb128 %d\n", + 0x80+i, 37+(31-i), 0x80+32+i, 2+2*(31-i)); + fprintf(ctx->fp, + "\t.align 2\n" + ".LEFDE0:\n\n"); +#if LJ_HASFFI + fprintf(ctx->fp, + ".LSFDE1:\n" + "\t.long .LEFDE1-.LASFDE1\n" + ".LASFDE1:\n" + "\t.long .Lframe0\n" +#if LJ_TARGET_PS3 + "\t.long .lj_vm_ffi_call\n" +#else + "\t.long lj_vm_ffi_call\n" +#endif + "\t.long %d\n" + "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n" + "\t.byte 0x8e\n\t.uleb128 2\n" + "\t.byte 0xd\n\t.uleb128 0xe\n" + "\t.align 2\n" + ".LEFDE1:\n\n", (int)ctx->codesz - fcofs); +#endif +#if !LJ_NO_UNWIND + fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n"); + fprintf(ctx->fp, + ".Lframe1:\n" + "\t.long .LECIE1-.LSCIE1\n" + ".LSCIE1:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.string \"zPR\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -4\n" + "\t.byte 65\n" + "\t.uleb128 6\n" /* augmentation length */ + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.long lj_err_unwind_dwarf-.\n" + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n" + "\t.align 2\n" + ".LECIE1:\n\n"); + fprintf(ctx->fp, + ".LSFDE2:\n" + "\t.long .LEFDE2-.LASFDE2\n" + ".LASFDE2:\n" + "\t.long .LASFDE2-.Lframe1\n" + "\t.long .Lbegin-.\n" + "\t.long %d\n" + "\t.uleb128 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.uleb128 %d\n" + "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n" + "\t.byte 0x5\n\t.uleb128 70\n\t.uleb128 55\n", + fcofs, CFRAME_SIZE); + for (i = 14; i <= 31; i++) + fprintf(ctx->fp, + "\t.byte %d\n\t.uleb128 %d\n" + "\t.byte %d\n\t.uleb128 %d\n", + 0x80+i, 37+(31-i), 0x80+32+i, 2+2*(31-i)); + fprintf(ctx->fp, + "\t.align 2\n" + ".LEFDE2:\n\n"); +#if LJ_HASFFI + fprintf(ctx->fp, + ".Lframe2:\n" + "\t.long .LECIE2-.LSCIE2\n" + ".LSCIE2:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.string \"zR\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -4\n" + "\t.byte 65\n" + "\t.uleb128 1\n" /* augmentation length */ + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n" + "\t.align 2\n" + ".LECIE2:\n\n"); + fprintf(ctx->fp, + ".LSFDE3:\n" + "\t.long .LEFDE3-.LASFDE3\n" + ".LASFDE3:\n" + "\t.long .LASFDE3-.Lframe2\n" + "\t.long lj_vm_ffi_call-.\n" + "\t.long %d\n" + "\t.uleb128 0\n" /* augmentation length */ + "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n" + "\t.byte 0x8e\n\t.uleb128 2\n" + "\t.byte 0xd\n\t.uleb128 0xe\n" + "\t.align 2\n" + ".LEFDE3:\n\n", (int)ctx->codesz - fcofs); +#endif +#endif + break; + default: + break; + } +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/vm_x64.dasc b/Build/source/libs/luajit/LuaJIT-src/src/vm_x64.dasc new file mode 100644 index 00000000000..e7e990ae27f --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/vm_x64.dasc @@ -0,0 +1,4902 @@ +|// Low-level VM code for x64 CPUs in LJ_GC64 mode. +|// Bytecode interpreter, fast functions and helper functions. +|// Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +| +|.arch x64 +|.section code_op, code_sub +| +|.actionlist build_actionlist +|.globals GLOB_ +|.globalnames globnames +|.externnames extnames +| +|//----------------------------------------------------------------------- +| +|.if WIN +|.define X64WIN, 1 // Windows/x64 calling conventions. +|.endif +| +|// Fixed register assignments for the interpreter. +|// This is very fragile and has many dependencies. Caveat emptor. +|.define BASE, rdx // Not C callee-save, refetched anyway. +|.if X64WIN +|.define KBASE, rdi // Must be C callee-save. +|.define PC, rsi // Must be C callee-save. +|.define DISPATCH, rbx // Must be C callee-save. +|.define KBASEd, edi +|.define PCd, esi +|.define DISPATCHd, ebx +|.else +|.define KBASE, r15 // Must be C callee-save. +|.define PC, rbx // Must be C callee-save. +|.define DISPATCH, r14 // Must be C callee-save. +|.define KBASEd, r15d +|.define PCd, ebx +|.define DISPATCHd, r14d +|.endif +| +|.define RA, rcx +|.define RAd, ecx +|.define RAH, ch +|.define RAL, cl +|.define RB, rbp // Must be rbp (C callee-save). +|.define RBd, ebp +|.define RC, rax // Must be rax. +|.define RCd, eax +|.define RCW, ax +|.define RCH, ah +|.define RCL, al +|.define OP, RBd +|.define RD, RC +|.define RDd, RCd +|.define RDW, RCW +|.define RDL, RCL +|.define TMPR, r10 +|.define TMPRd, r10d +|.define ITYPE, r11 +|.define ITYPEd, r11d +| +|.if X64WIN +|.define CARG1, rcx // x64/WIN64 C call arguments. +|.define CARG2, rdx +|.define CARG3, r8 +|.define CARG4, r9 +|.define CARG1d, ecx +|.define CARG2d, edx +|.define CARG3d, r8d +|.define CARG4d, r9d +|.else +|.define CARG1, rdi // x64/POSIX C call arguments. +|.define CARG2, rsi +|.define CARG3, rdx +|.define CARG4, rcx +|.define CARG5, r8 +|.define CARG6, r9 +|.define CARG1d, edi +|.define CARG2d, esi +|.define CARG3d, edx +|.define CARG4d, ecx +|.define CARG5d, r8d +|.define CARG6d, r9d +|.endif +| +|// Type definitions. Some of these are only used for documentation. +|.type L, lua_State +|.type GL, global_State +|.type TVALUE, TValue +|.type GCOBJ, GCobj +|.type STR, GCstr +|.type TAB, GCtab +|.type LFUNC, GCfuncL +|.type CFUNC, GCfuncC +|.type PROTO, GCproto +|.type UPVAL, GCupval +|.type NODE, Node +|.type NARGS, int +|.type TRACE, GCtrace +|.type SBUF, SBuf +| +|// Stack layout while in interpreter. Must match with lj_frame.h. +|//----------------------------------------------------------------------- +|.if X64WIN // x64/Windows stack layout +| +|.define CFRAME_SPACE, aword*5 // Delta for rsp (see <--). +|.macro saveregs_ +| push rdi; push rsi; push rbx +| sub rsp, CFRAME_SPACE +|.endmacro +|.macro saveregs +| push rbp; saveregs_ +|.endmacro +|.macro restoreregs +| add rsp, CFRAME_SPACE +| pop rbx; pop rsi; pop rdi; pop rbp +|.endmacro +| +|.define SAVE_CFRAME, aword [rsp+aword*13] +|.define SAVE_PC, aword [rsp+aword*12] +|.define SAVE_L, aword [rsp+aword*11] +|.define SAVE_ERRF, dword [rsp+dword*21] +|.define SAVE_NRES, dword [rsp+dword*20] +|//----- 16 byte aligned, ^^^ 32 byte register save area, owned by interpreter +|.define SAVE_RET, aword [rsp+aword*9] //<-- rsp entering interpreter. +|.define SAVE_R4, aword [rsp+aword*8] +|.define SAVE_R3, aword [rsp+aword*7] +|.define SAVE_R2, aword [rsp+aword*6] +|.define SAVE_R1, aword [rsp+aword*5] //<-- rsp after register saves. +|.define ARG5, aword [rsp+aword*4] +|.define CSAVE_4, aword [rsp+aword*3] +|.define CSAVE_3, aword [rsp+aword*2] +|.define CSAVE_2, aword [rsp+aword*1] +|.define CSAVE_1, aword [rsp] //<-- rsp while in interpreter. +|//----- 16 byte aligned, ^^^ 32 byte register save area, owned by callee +| +|.define ARG5d, dword [rsp+dword*8] +|.define TMP1, ARG5 // TMP1 overlaps ARG5 +|.define TMP1d, ARG5d +|.define TMP1hi, dword [rsp+dword*9] +|.define MULTRES, TMP1d // MULTRES overlaps TMP1d. +| +|//----------------------------------------------------------------------- +|.else // x64/POSIX stack layout +| +|.define CFRAME_SPACE, aword*5 // Delta for rsp (see <--). +|.macro saveregs_ +| push rbx; push r15; push r14 +|.if NO_UNWIND +| push r13; push r12 +|.endif +| sub rsp, CFRAME_SPACE +|.endmacro +|.macro saveregs +| push rbp; saveregs_ +|.endmacro +|.macro restoreregs +| add rsp, CFRAME_SPACE +|.if NO_UNWIND +| pop r12; pop r13 +|.endif +| pop r14; pop r15; pop rbx; pop rbp +|.endmacro +| +|//----- 16 byte aligned, +|.if NO_UNWIND +|.define SAVE_RET, aword [rsp+aword*11] //<-- rsp entering interpreter. +|.define SAVE_R4, aword [rsp+aword*10] +|.define SAVE_R3, aword [rsp+aword*9] +|.define SAVE_R2, aword [rsp+aword*8] +|.define SAVE_R1, aword [rsp+aword*7] +|.define SAVE_RU2, aword [rsp+aword*6] +|.define SAVE_RU1, aword [rsp+aword*5] //<-- rsp after register saves. +|.else +|.define SAVE_RET, aword [rsp+aword*9] //<-- rsp entering interpreter. +|.define SAVE_R4, aword [rsp+aword*8] +|.define SAVE_R3, aword [rsp+aword*7] +|.define SAVE_R2, aword [rsp+aword*6] +|.define SAVE_R1, aword [rsp+aword*5] //<-- rsp after register saves. +|.endif +|.define SAVE_CFRAME, aword [rsp+aword*4] +|.define SAVE_PC, aword [rsp+aword*3] +|.define SAVE_L, aword [rsp+aword*2] +|.define SAVE_ERRF, dword [rsp+dword*3] +|.define SAVE_NRES, dword [rsp+dword*2] +|.define TMP1, aword [rsp] //<-- rsp while in interpreter. +|//----- 16 byte aligned +| +|.define TMP1d, dword [rsp] +|.define TMP1hi, dword [rsp+dword*1] +|.define MULTRES, TMP1d // MULTRES overlaps TMP1d. +| +|.endif +| +|//----------------------------------------------------------------------- +| +|// Instruction headers. +|.macro ins_A; .endmacro +|.macro ins_AD; .endmacro +|.macro ins_AJ; .endmacro +|.macro ins_ABC; movzx RBd, RCH; movzx RCd, RCL; .endmacro +|.macro ins_AB_; movzx RBd, RCH; .endmacro +|.macro ins_A_C; movzx RCd, RCL; .endmacro +|.macro ins_AND; not RD; .endmacro +| +|// Instruction decode+dispatch. Carefully tuned (nope, lodsd is not faster). +|.macro ins_NEXT +| mov RCd, [PC] +| movzx RAd, RCH +| movzx OP, RCL +| add PC, 4 +| shr RCd, 16 +| jmp aword [DISPATCH+OP*8] +|.endmacro +| +|// Instruction footer. +|.if 1 +| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use. +| .define ins_next, ins_NEXT +| .define ins_next_, ins_NEXT +|.else +| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch. +| // Affects only certain kinds of benchmarks (and only with -j off). +| // Around 10%-30% slower on Core2, a lot more slower on P4. +| .macro ins_next +| jmp ->ins_next +| .endmacro +| .macro ins_next_ +| ->ins_next: +| ins_NEXT +| .endmacro +|.endif +| +|// Call decode and dispatch. +|.macro ins_callt +| // BASE = new base, RB = LFUNC, RD = nargs+1, [BASE-8] = PC +| mov PC, LFUNC:RB->pc +| mov RAd, [PC] +| movzx OP, RAL +| movzx RAd, RAH +| add PC, 4 +| jmp aword [DISPATCH+OP*8] +|.endmacro +| +|.macro ins_call +| // BASE = new base, RB = LFUNC, RD = nargs+1 +| mov [BASE-8], PC +| ins_callt +|.endmacro +| +|//----------------------------------------------------------------------- +| +|// Macros to clear or set tags. +|.macro cleartp, reg; shl reg, 17; shr reg, 17; .endmacro +|.macro settp, reg, tp +| mov64 ITYPE, ((int64_t)tp<<47) +| or reg, ITYPE +|.endmacro +|.macro settp, dst, reg, tp +| mov64 dst, ((int64_t)tp<<47) +| or dst, reg +|.endmacro +|.macro setint, reg +| settp reg, LJ_TISNUM +|.endmacro +|.macro setint, dst, reg +| settp dst, reg, LJ_TISNUM +|.endmacro +| +|// Macros to test operand types. +|.macro checktp_nc, reg, tp, target +| mov ITYPE, reg +| sar ITYPE, 47 +| cmp ITYPEd, tp +| jne target +|.endmacro +|.macro checktp, reg, tp, target +| mov ITYPE, reg +| cleartp reg +| sar ITYPE, 47 +| cmp ITYPEd, tp +| jne target +|.endmacro +|.macro checktptp, src, tp, target +| mov ITYPE, src +| sar ITYPE, 47 +| cmp ITYPEd, tp +| jne target +|.endmacro +|.macro checkstr, reg, target; checktp reg, LJ_TSTR, target; .endmacro +|.macro checktab, reg, target; checktp reg, LJ_TTAB, target; .endmacro +|.macro checkfunc, reg, target; checktp reg, LJ_TFUNC, target; .endmacro +| +|.macro checknumx, reg, target, jump +| mov ITYPE, reg +| sar ITYPE, 47 +| cmp ITYPEd, LJ_TISNUM +| jump target +|.endmacro +|.macro checkint, reg, target; checknumx reg, target, jne; .endmacro +|.macro checkinttp, src, target; checknumx src, target, jne; .endmacro +|.macro checknum, reg, target; checknumx reg, target, jae; .endmacro +|.macro checknumtp, src, target; checknumx src, target, jae; .endmacro +|.macro checknumber, src, target; checknumx src, target, ja; .endmacro +| +|.macro mov_false, reg; mov64 reg, (int64_t)~((uint64_t)1<<47); .endmacro +|.macro mov_true, reg; mov64 reg, (int64_t)~((uint64_t)2<<47); .endmacro +| +|// These operands must be used with movzx. +|.define PC_OP, byte [PC-4] +|.define PC_RA, byte [PC-3] +|.define PC_RB, byte [PC-1] +|.define PC_RC, byte [PC-2] +|.define PC_RD, word [PC-2] +| +|.macro branchPC, reg +| lea PC, [PC+reg*4-BCBIAS_J*4] +|.endmacro +| +|// Assumes DISPATCH is relative to GL. +#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field)) +#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field)) +| +#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto)) +| +|// Decrement hashed hotcount and trigger trace recorder if zero. +|.macro hotloop, reg +| mov reg, PCd +| shr reg, 1 +| and reg, HOTCOUNT_PCMASK +| sub word [DISPATCH+reg+GG_DISP2HOT], HOTCOUNT_LOOP +| jb ->vm_hotloop +|.endmacro +| +|.macro hotcall, reg +| mov reg, PCd +| shr reg, 1 +| and reg, HOTCOUNT_PCMASK +| sub word [DISPATCH+reg+GG_DISP2HOT], HOTCOUNT_CALL +| jb ->vm_hotcall +|.endmacro +| +|// Set current VM state. +|.macro set_vmstate, st +| mov dword [DISPATCH+DISPATCH_GL(vmstate)], ~LJ_VMST_..st +|.endmacro +| +|.macro fpop1; fstp st1; .endmacro +| +|// Synthesize SSE FP constants. +|.macro sseconst_abs, reg, tmp // Synthesize abs mask. +| mov64 tmp, U64x(7fffffff,ffffffff); movd reg, tmp +|.endmacro +| +|.macro sseconst_hi, reg, tmp, val // Synthesize hi-32 bit const. +| mov64 tmp, U64x(val,00000000); movd reg, tmp +|.endmacro +| +|.macro sseconst_sign, reg, tmp // Synthesize sign mask. +| sseconst_hi reg, tmp, 80000000 +|.endmacro +|.macro sseconst_1, reg, tmp // Synthesize 1.0. +| sseconst_hi reg, tmp, 3ff00000 +|.endmacro +|.macro sseconst_m1, reg, tmp // Synthesize -1.0. +| sseconst_hi reg, tmp, bff00000 +|.endmacro +|.macro sseconst_2p52, reg, tmp // Synthesize 2^52. +| sseconst_hi reg, tmp, 43300000 +|.endmacro +|.macro sseconst_tobit, reg, tmp // Synthesize 2^52 + 2^51. +| sseconst_hi reg, tmp, 43380000 +|.endmacro +| +|// Move table write barrier back. Overwrites reg. +|.macro barrierback, tab, reg +| and byte tab->marked, (uint8_t)~LJ_GC_BLACK // black2gray(tab) +| mov reg, [DISPATCH+DISPATCH_GL(gc.grayagain)] +| mov [DISPATCH+DISPATCH_GL(gc.grayagain)], tab +| mov tab->gclist, reg +|.endmacro +| +|//----------------------------------------------------------------------- + +/* Generate subroutines used by opcodes and other parts of the VM. */ +/* The .code_sub section should be last to help static branch prediction. */ +static void build_subroutines(BuildCtx *ctx) +{ + |.code_sub + | + |//----------------------------------------------------------------------- + |//-- Return handling ---------------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_returnp: + | test PCd, FRAME_P + | jz ->cont_dispatch + | + | // Return from pcall or xpcall fast func. + | and PC, -8 + | sub BASE, PC // Restore caller base. + | lea RA, [RA+PC-8] // Rebase RA and prepend one result. + | mov PC, [BASE-8] // Fetch PC of previous frame. + | // Prepending may overwrite the pcall frame, so do it at the end. + | mov_true ITYPE + | mov aword [BASE+RA], ITYPE // Prepend true to results. + | + |->vm_returnc: + | add RDd, 1 // RD = nresults+1 + | jz ->vm_unwind_yield + | mov MULTRES, RDd + | test PC, FRAME_TYPE + | jz ->BC_RET_Z // Handle regular return to Lua. + | + |->vm_return: + | // BASE = base, RA = resultofs, RD = nresults+1 (= MULTRES), PC = return + | xor PC, FRAME_C + | test PCd, FRAME_TYPE + | jnz ->vm_returnp + | + | // Return to C. + | set_vmstate C + | and PC, -8 + | sub PC, BASE + | neg PC // Previous base = BASE - delta. + | + | sub RDd, 1 + | jz >2 + |1: // Move results down. + | mov RB, [BASE+RA] + | mov [BASE-16], RB + | add BASE, 8 + | sub RDd, 1 + | jnz <1 + |2: + | mov L:RB, SAVE_L + | mov L:RB->base, PC + |3: + | mov RDd, MULTRES + | mov RAd, SAVE_NRES // RA = wanted nresults+1 + |4: + | cmp RAd, RDd + | jne >6 // More/less results wanted? + |5: + | sub BASE, 16 + | mov L:RB->top, BASE + | + |->vm_leave_cp: + | mov RA, SAVE_CFRAME // Restore previous C frame. + | mov L:RB->cframe, RA + | xor eax, eax // Ok return status for vm_pcall. + | + |->vm_leave_unw: + | restoreregs + | ret + | + |6: + | jb >7 // Less results wanted? + | // More results wanted. Check stack size and fill up results with nil. + | cmp BASE, L:RB->maxstack + | ja >8 + | mov aword [BASE-16], LJ_TNIL + | add BASE, 8 + | add RDd, 1 + | jmp <4 + | + |7: // Less results wanted. + | test RAd, RAd + | jz <5 // But check for LUA_MULTRET+1. + | sub RA, RD // Negative result! + | lea BASE, [BASE+RA*8] // Correct top. + | jmp <5 + | + |8: // Corner case: need to grow stack for filling up results. + | // This can happen if: + | // - A C function grows the stack (a lot). + | // - The GC shrinks the stack in between. + | // - A return back from a lua_call() with (high) nresults adjustment. + | mov L:RB->top, BASE // Save current top held in BASE (yes). + | mov MULTRES, RDd // Need to fill only remainder with nil. + | mov CARG2d, RAd + | mov CARG1, L:RB + | call extern lj_state_growstack // (lua_State *L, int n) + | mov BASE, L:RB->top // Need the (realloced) L->top in BASE. + | jmp <3 + | + |->vm_unwind_yield: + | mov al, LUA_YIELD + | jmp ->vm_unwind_c_eh + | + |->vm_unwind_c: // Unwind C stack, return from vm_pcall. + | // (void *cframe, int errcode) + | mov eax, CARG2d // Error return status for vm_pcall. + | mov rsp, CARG1 + |->vm_unwind_c_eh: // Landing pad for external unwinder. + | mov L:RB, SAVE_L + | mov GL:RB, L:RB->glref + | mov dword GL:RB->vmstate, ~LJ_VMST_C + | jmp ->vm_leave_unw + | + |->vm_unwind_rethrow: + |.if not X64WIN + | mov CARG1, SAVE_L + | mov CARG2d, eax + | restoreregs + | jmp extern lj_err_throw // (lua_State *L, int errcode) + |.endif + | + |->vm_unwind_ff: // Unwind C stack, return from ff pcall. + | // (void *cframe) + | and CARG1, CFRAME_RAWMASK + | mov rsp, CARG1 + |->vm_unwind_ff_eh: // Landing pad for external unwinder. + | mov L:RB, SAVE_L + | mov RDd, 1+1 // Really 1+2 results, incr. later. + | mov BASE, L:RB->base + | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table. + | add DISPATCH, GG_G2DISP + | mov PC, [BASE-8] // Fetch PC of previous frame. + | mov_false RA + | mov RB, [BASE] + | mov [BASE-16], RA // Prepend false to error message. + | mov [BASE-8], RB + | mov RA, -16 // Results start at BASE+RA = BASE-16. + | set_vmstate INTERP + | jmp ->vm_returnc // Increments RD/MULTRES and returns. + | + |//----------------------------------------------------------------------- + |//-- Grow stack for calls ----------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_growstack_c: // Grow stack for C function. + | mov CARG2d, LUA_MINSTACK + | jmp >2 + | + |->vm_growstack_v: // Grow stack for vararg Lua function. + | sub RD, 8 + | jmp >1 + | + |->vm_growstack_f: // Grow stack for fixarg Lua function. + | // BASE = new base, RD = nargs+1, RB = L, PC = first PC + | lea RD, [BASE+NARGS:RD*8-8] + |1: + | movzx RAd, byte [PC-4+PC2PROTO(framesize)] + | add PC, 4 // Must point after first instruction. + | mov L:RB->base, BASE + | mov L:RB->top, RD + | mov SAVE_PC, PC + | mov CARG2, RA + |2: + | // RB = L, L->base = new base, L->top = top + | mov CARG1, L:RB + | call extern lj_state_growstack // (lua_State *L, int n) + | mov BASE, L:RB->base + | mov RD, L:RB->top + | mov LFUNC:RB, [BASE-16] + | cleartp LFUNC:RB + | sub RD, BASE + | shr RDd, 3 + | add NARGS:RDd, 1 + | // BASE = new base, RB = LFUNC, RD = nargs+1 + | ins_callt // Just retry the call. + | + |//----------------------------------------------------------------------- + |//-- Entry points into the assembler VM --------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_resume: // Setup C frame and resume thread. + | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0) + | saveregs + | mov L:RB, CARG1 // Caveat: CARG1 may be RA. + | mov SAVE_L, CARG1 + | mov RA, CARG2 + | mov PCd, FRAME_CP + | xor RDd, RDd + | lea KBASE, [esp+CFRAME_RESUME] + | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table. + | add DISPATCH, GG_G2DISP + | mov SAVE_PC, RD // Any value outside of bytecode is ok. + | mov SAVE_CFRAME, RD + | mov SAVE_NRES, RDd + | mov SAVE_ERRF, RDd + | mov L:RB->cframe, KBASE + | cmp byte L:RB->status, RDL + | je >2 // Initial resume (like a call). + | + | // Resume after yield (like a return). + | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB + | set_vmstate INTERP + | mov byte L:RB->status, RDL + | mov BASE, L:RB->base + | mov RD, L:RB->top + | sub RD, RA + | shr RDd, 3 + | add RDd, 1 // RD = nresults+1 + | sub RA, BASE // RA = resultofs + | mov PC, [BASE-8] + | mov MULTRES, RDd + | test PCd, FRAME_TYPE + | jz ->BC_RET_Z + | jmp ->vm_return + | + |->vm_pcall: // Setup protected C frame and enter VM. + | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef) + | saveregs + | mov PCd, FRAME_CP + | mov SAVE_ERRF, CARG4d + | jmp >1 + | + |->vm_call: // Setup C frame and enter VM. + | // (lua_State *L, TValue *base, int nres1) + | saveregs + | mov PCd, FRAME_C + | + |1: // Entry point for vm_pcall above (PC = ftype). + | mov SAVE_NRES, CARG3d + | mov L:RB, CARG1 // Caveat: CARG1 may be RA. + | mov SAVE_L, CARG1 + | mov RA, CARG2 + | + | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table. + | mov KBASE, L:RB->cframe // Add our C frame to cframe chain. + | mov SAVE_CFRAME, KBASE + | mov SAVE_PC, L:RB // Any value outside of bytecode is ok. + | add DISPATCH, GG_G2DISP + | mov L:RB->cframe, rsp + | + |2: // Entry point for vm_resume/vm_cpcall (RA = base, RB = L, PC = ftype). + | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB + | set_vmstate INTERP + | mov BASE, L:RB->base // BASE = old base (used in vmeta_call). + | add PC, RA + | sub PC, BASE // PC = frame delta + frame type + | + | mov RD, L:RB->top + | sub RD, RA + | shr NARGS:RDd, 3 + | add NARGS:RDd, 1 // RD = nargs+1 + | + |->vm_call_dispatch: + | mov LFUNC:RB, [RA-16] + | checkfunc LFUNC:RB, ->vmeta_call // Ensure KBASE defined and != BASE. + | + |->vm_call_dispatch_f: + | mov BASE, RA + | ins_call + | // BASE = new base, RB = func, RD = nargs+1, PC = caller PC + | + |->vm_cpcall: // Setup protected C frame, call C. + | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp) + | saveregs + | mov L:RB, CARG1 // Caveat: CARG1 may be RA. + | mov SAVE_L, CARG1 + | mov SAVE_PC, L:RB // Any value outside of bytecode is ok. + | + | mov KBASE, L:RB->stack // Compute -savestack(L, L->top). + | sub KBASE, L:RB->top + | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table. + | mov SAVE_ERRF, 0 // No error function. + | mov SAVE_NRES, KBASEd // Neg. delta means cframe w/o frame. + | add DISPATCH, GG_G2DISP + | // Handler may change cframe_nres(L->cframe) or cframe_errfunc(L->cframe). + | + | mov KBASE, L:RB->cframe // Add our C frame to cframe chain. + | mov SAVE_CFRAME, KBASE + | mov L:RB->cframe, rsp + | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB + | + | call CARG4 // (lua_State *L, lua_CFunction func, void *ud) + | // TValue * (new base) or NULL returned in eax (RC). + | test RC, RC + | jz ->vm_leave_cp // No base? Just remove C frame. + | mov RA, RC + | mov PCd, FRAME_CP + | jmp <2 // Else continue with the call. + | + |//----------------------------------------------------------------------- + |//-- Metamethod handling ------------------------------------------------ + |//----------------------------------------------------------------------- + | + |//-- Continuation dispatch ---------------------------------------------- + | + |->cont_dispatch: + | // BASE = meta base, RA = resultofs, RD = nresults+1 (also in MULTRES) + | add RA, BASE + | and PC, -8 + | mov RB, BASE + | sub BASE, PC // Restore caller BASE. + | mov aword [RA+RD*8-8], LJ_TNIL // Ensure one valid arg. + | mov RC, RA // ... in [RC] + | mov PC, [RB-24] // Restore PC from [cont|PC]. + | mov RA, qword [RB-32] // May be negative on WIN64 with debug. + |.if FFI + | cmp RA, 1 + | jbe >1 + |.endif + | mov LFUNC:KBASE, [BASE-16] + | cleartp LFUNC:KBASE + | mov KBASE, LFUNC:KBASE->pc + | mov KBASE, [KBASE+PC2PROTO(k)] + | // BASE = base, RC = result, RB = meta base + | jmp RA // Jump to continuation. + | + |.if FFI + |1: + | je ->cont_ffi_callback // cont = 1: return from FFI callback. + | // cont = 0: Tail call from C function. + | sub RB, BASE + | shr RBd, 3 + | lea RDd, [RBd-3] + | jmp ->vm_call_tail + |.endif + | + |->cont_cat: // BASE = base, RC = result, RB = mbase + | movzx RAd, PC_RB + | sub RB, 32 + | lea RA, [BASE+RA*8] + | sub RA, RB + | je ->cont_ra + | neg RA + | shr RAd, 3 + |.if X64WIN + | mov CARG3d, RAd + | mov L:CARG1, SAVE_L + | mov L:CARG1->base, BASE + | mov RC, [RC] + | mov [RB], RC + | mov CARG2, RB + |.else + | mov L:CARG1, SAVE_L + | mov L:CARG1->base, BASE + | mov CARG3d, RAd + | mov RA, [RC] + | mov [RB], RA + | mov CARG2, RB + |.endif + | jmp ->BC_CAT_Z + | + |//-- Table indexing metamethods ----------------------------------------- + | + |->vmeta_tgets: + | settp STR:RC, LJ_TSTR // STR:RC = GCstr * + | mov TMP1, STR:RC + | lea RC, TMP1 + | cmp PC_OP, BC_GGET + | jne >1 + | settp TAB:RA, TAB:RB, LJ_TTAB // TAB:RB = GCtab * + | lea RB, [DISPATCH+DISPATCH_GL(tmptv)] // Store fn->l.env in g->tmptv. + | mov [RB], TAB:RA + | jmp >2 + | + |->vmeta_tgetb: + | movzx RCd, PC_RC + |.if DUALNUM + | setint RC + | mov TMP1, RC + |.else + | cvtsi2sd xmm0, RCd + | movsd TMP1, xmm0 + |.endif + | lea RC, TMP1 + | jmp >1 + | + |->vmeta_tgetv: + | movzx RCd, PC_RC // Reload TValue *k from RC. + | lea RC, [BASE+RC*8] + |1: + | movzx RBd, PC_RB // Reload TValue *t from RB. + | lea RB, [BASE+RB*8] + |2: + | mov L:CARG1, SAVE_L + | mov L:CARG1->base, BASE // Caveat: CARG2/CARG3 may be BASE. + | mov CARG2, RB + | mov CARG3, RC + | mov L:RB, L:CARG1 + | mov SAVE_PC, PC + | call extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k) + | // TValue * (finished) or NULL (metamethod) returned in eax (RC). + | mov BASE, L:RB->base + | test RC, RC + | jz >3 + |->cont_ra: // BASE = base, RC = result + | movzx RAd, PC_RA + | mov RB, [RC] + | mov [BASE+RA*8], RB + | ins_next + | + |3: // Call __index metamethod. + | // BASE = base, L->top = new base, stack = cont/func/t/k + | mov RA, L:RB->top + | mov [RA-24], PC // [cont|PC] + | lea PC, [RA+FRAME_CONT] + | sub PC, BASE + | mov LFUNC:RB, [RA-16] // Guaranteed to be a function here. + | mov NARGS:RDd, 2+1 // 2 args for func(t, k). + | cleartp LFUNC:RB + | jmp ->vm_call_dispatch_f + | + |->vmeta_tgetr: + | mov CARG1, TAB:RB + | mov RB, BASE // Save BASE. + | mov CARG2d, RCd // Caveat: CARG2 == BASE + | call extern lj_tab_getinth // (GCtab *t, int32_t key) + | // cTValue * or NULL returned in eax (RC). + | movzx RAd, PC_RA + | mov BASE, RB // Restore BASE. + | test RC, RC + | jnz ->BC_TGETR_Z + | mov ITYPE, LJ_TNIL + | jmp ->BC_TGETR2_Z + | + |//----------------------------------------------------------------------- + | + |->vmeta_tsets: + | settp STR:RC, LJ_TSTR // STR:RC = GCstr * + | mov TMP1, STR:RC + | lea RC, TMP1 + | cmp PC_OP, BC_GSET + | jne >1 + | settp TAB:RA, TAB:RB, LJ_TTAB // TAB:RB = GCtab * + | lea RB, [DISPATCH+DISPATCH_GL(tmptv)] // Store fn->l.env in g->tmptv. + | mov [RB], TAB:RA + | jmp >2 + | + |->vmeta_tsetb: + | movzx RCd, PC_RC + |.if DUALNUM + | setint RC + | mov TMP1, RC + |.else + | cvtsi2sd xmm0, RCd + | movsd TMP1, xmm0 + |.endif + | lea RC, TMP1 + | jmp >1 + | + |->vmeta_tsetv: + | movzx RCd, PC_RC // Reload TValue *k from RC. + | lea RC, [BASE+RC*8] + |1: + | movzx RBd, PC_RB // Reload TValue *t from RB. + | lea RB, [BASE+RB*8] + |2: + | mov L:CARG1, SAVE_L + | mov L:CARG1->base, BASE // Caveat: CARG2/CARG3 may be BASE. + | mov CARG2, RB + | mov CARG3, RC + | mov L:RB, L:CARG1 + | mov SAVE_PC, PC + | call extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k) + | // TValue * (finished) or NULL (metamethod) returned in eax (RC). + | mov BASE, L:RB->base + | test RC, RC + | jz >3 + | // NOBARRIER: lj_meta_tset ensures the table is not black. + | movzx RAd, PC_RA + | mov RB, [BASE+RA*8] + | mov [RC], RB + |->cont_nop: // BASE = base, (RC = result) + | ins_next + | + |3: // Call __newindex metamethod. + | // BASE = base, L->top = new base, stack = cont/func/t/k/(v) + | mov RA, L:RB->top + | mov [RA-24], PC // [cont|PC] + | movzx RCd, PC_RA + | // Copy value to third argument. + | mov RB, [BASE+RC*8] + | mov [RA+16], RB + | lea PC, [RA+FRAME_CONT] + | sub PC, BASE + | mov LFUNC:RB, [RA-16] // Guaranteed to be a function here. + | mov NARGS:RDd, 3+1 // 3 args for func(t, k, v). + | cleartp LFUNC:RB + | jmp ->vm_call_dispatch_f + | + |->vmeta_tsetr: + |.if X64WIN + | mov L:CARG1, SAVE_L + | mov CARG3d, RCd + | mov L:CARG1->base, BASE + | xchg CARG2, TAB:RB // Caveat: CARG2 == BASE. + |.else + | mov L:CARG1, SAVE_L + | mov CARG2, TAB:RB + | mov L:CARG1->base, BASE + | mov RB, BASE // Save BASE. + | mov CARG3d, RCd // Caveat: CARG3 == BASE. + |.endif + | mov SAVE_PC, PC + | call extern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key) + | // TValue * returned in eax (RC). + | movzx RAd, PC_RA + | mov BASE, RB // Restore BASE. + | jmp ->BC_TSETR_Z + | + |//-- Comparison metamethods --------------------------------------------- + | + |->vmeta_comp: + | movzx RDd, PC_RD + | movzx RAd, PC_RA + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Caveat: CARG2/CARG3 == BASE. + |.if X64WIN + | lea CARG3, [BASE+RD*8] + | lea CARG2, [BASE+RA*8] + |.else + | lea CARG2, [BASE+RA*8] + | lea CARG3, [BASE+RD*8] + |.endif + | mov CARG1, L:RB // Caveat: CARG1/CARG4 == RA. + | movzx CARG4d, PC_OP + | mov SAVE_PC, PC + | call extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op) + | // 0/1 or TValue * (metamethod) returned in eax (RC). + |3: + | mov BASE, L:RB->base + | cmp RC, 1 + | ja ->vmeta_binop + |4: + | lea PC, [PC+4] + | jb >6 + |5: + | movzx RDd, PC_RD + | branchPC RD + |6: + | ins_next + | + |->cont_condt: // BASE = base, RC = result + | add PC, 4 + | mov ITYPE, [RC] + | sar ITYPE, 47 + | cmp ITYPEd, LJ_TISTRUECOND // Branch if result is true. + | jb <5 + | jmp <6 + | + |->cont_condf: // BASE = base, RC = result + | mov ITYPE, [RC] + | sar ITYPE, 47 + | cmp ITYPEd, LJ_TISTRUECOND // Branch if result is false. + | jmp <4 + | + |->vmeta_equal: + | cleartp TAB:RD + | sub PC, 4 + |.if X64WIN + | mov CARG3, RD + | mov CARG4d, RBd + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Caveat: CARG2 == BASE. + | mov CARG2, RA + | mov CARG1, L:RB // Caveat: CARG1 == RA. + |.else + | mov CARG2, RA + | mov CARG4d, RBd // Caveat: CARG4 == RA. + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Caveat: CARG3 == BASE. + | mov CARG3, RD + | mov CARG1, L:RB + |.endif + | mov SAVE_PC, PC + | call extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne) + | // 0/1 or TValue * (metamethod) returned in eax (RC). + | jmp <3 + | + |->vmeta_equal_cd: + |.if FFI + | sub PC, 4 + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov CARG1, L:RB + | mov CARG2d, dword [PC-4] + | mov SAVE_PC, PC + | call extern lj_meta_equal_cd // (lua_State *L, BCIns ins) + | // 0/1 or TValue * (metamethod) returned in eax (RC). + | jmp <3 + |.endif + | + |->vmeta_istype: + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Caveat: CARG2/CARG3 may be BASE. + | mov CARG2d, RAd + | mov CARG3d, RDd + | mov L:CARG1, L:RB + | mov SAVE_PC, PC + | call extern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp) + | mov BASE, L:RB->base + | jmp <6 + | + |//-- Arithmetic metamethods --------------------------------------------- + | + |->vmeta_arith_vno: + |.if DUALNUM + | movzx RBd, PC_RB + | movzx RCd, PC_RC + |.endif + |->vmeta_arith_vn: + | lea RC, [KBASE+RC*8] + | jmp >1 + | + |->vmeta_arith_nvo: + |.if DUALNUM + | movzx RBd, PC_RB + | movzx RCd, PC_RC + |.endif + |->vmeta_arith_nv: + | lea TMPR, [KBASE+RC*8] + | lea RC, [BASE+RB*8] + | mov RB, TMPR + | jmp >2 + | + |->vmeta_unm: + | lea RC, [BASE+RD*8] + | mov RB, RC + | jmp >2 + | + |->vmeta_arith_vvo: + |.if DUALNUM + | movzx RBd, PC_RB + | movzx RCd, PC_RC + |.endif + |->vmeta_arith_vv: + | lea RC, [BASE+RC*8] + |1: + | lea RB, [BASE+RB*8] + |2: + | lea RA, [BASE+RA*8] + |.if X64WIN + | mov CARG3, RB + | mov CARG4, RC + | movzx RCd, PC_OP + | mov ARG5d, RCd + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Caveat: CARG2 == BASE. + | mov CARG2, RA + | mov CARG1, L:RB // Caveat: CARG1 == RA. + |.else + | movzx CARG5d, PC_OP + | mov CARG2, RA + | mov CARG4, RC // Caveat: CARG4 == RA. + | mov L:CARG1, SAVE_L + | mov L:CARG1->base, BASE // Caveat: CARG3 == BASE. + | mov CARG3, RB + | mov L:RB, L:CARG1 + |.endif + | mov SAVE_PC, PC + | call extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op) + | // NULL (finished) or TValue * (metamethod) returned in eax (RC). + | mov BASE, L:RB->base + | test RC, RC + | jz ->cont_nop + | + | // Call metamethod for binary op. + |->vmeta_binop: + | // BASE = base, RC = new base, stack = cont/func/o1/o2 + | mov RA, RC + | sub RC, BASE + | mov [RA-24], PC // [cont|PC] + | lea PC, [RC+FRAME_CONT] + | mov NARGS:RDd, 2+1 // 2 args for func(o1, o2). + | jmp ->vm_call_dispatch + | + |->vmeta_len: + | movzx RDd, PC_RD + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | lea CARG2, [BASE+RD*8] // Caveat: CARG2 == BASE + | mov L:CARG1, L:RB + | mov SAVE_PC, PC + | call extern lj_meta_len // (lua_State *L, TValue *o) + | // NULL (retry) or TValue * (metamethod) returned in eax (RC). + | mov BASE, L:RB->base +#if LJ_52 + | test RC, RC + | jne ->vmeta_binop // Binop call for compatibility. + | movzx RDd, PC_RD + | mov TAB:CARG1, [BASE+RD*8] + | cleartp TAB:CARG1 + | jmp ->BC_LEN_Z +#else + | jmp ->vmeta_binop // Binop call for compatibility. +#endif + | + |//-- Call metamethod ---------------------------------------------------- + | + |->vmeta_call_ra: + | lea RA, [BASE+RA*8+16] + |->vmeta_call: // Resolve and call __call metamethod. + | // BASE = old base, RA = new base, RC = nargs+1, PC = return + | mov TMP1d, NARGS:RDd // Save RA, RC for us. + | mov RB, RA + |.if X64WIN + | mov L:TMPR, SAVE_L + | mov L:TMPR->base, BASE // Caveat: CARG2 is BASE. + | lea CARG2, [RA-16] + | lea CARG3, [RA+NARGS:RD*8-8] + | mov CARG1, L:TMPR // Caveat: CARG1 is RA. + |.else + | mov L:CARG1, SAVE_L + | mov L:CARG1->base, BASE // Caveat: CARG3 is BASE. + | lea CARG2, [RA-16] + | lea CARG3, [RA+NARGS:RD*8-8] + |.endif + | mov SAVE_PC, PC + | call extern lj_meta_call // (lua_State *L, TValue *func, TValue *top) + | mov RA, RB + | mov L:RB, SAVE_L + | mov BASE, L:RB->base + | mov NARGS:RDd, TMP1d + | mov LFUNC:RB, [RA-16] + | cleartp LFUNC:RB + | add NARGS:RDd, 1 + | // This is fragile. L->base must not move, KBASE must always be defined. + | cmp KBASE, BASE // Continue with CALLT if flag set. + | je ->BC_CALLT_Z + | mov BASE, RA + | ins_call // Otherwise call resolved metamethod. + | + |//-- Argument coercion for 'for' statement ------------------------------ + | + |->vmeta_for: + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov CARG2, RA // Caveat: CARG2 == BASE + | mov L:CARG1, L:RB // Caveat: CARG1 == RA + | mov SAVE_PC, PC + | call extern lj_meta_for // (lua_State *L, TValue *base) + | mov BASE, L:RB->base + | mov RCd, [PC-4] + | movzx RAd, RCH + | movzx OP, RCL + | shr RCd, 16 + | jmp aword [DISPATCH+OP*8+GG_DISP2STATIC] // Retry FORI or JFORI. + | + |//----------------------------------------------------------------------- + |//-- Fast functions ----------------------------------------------------- + |//----------------------------------------------------------------------- + | + |.macro .ffunc, name + |->ff_ .. name: + |.endmacro + | + |.macro .ffunc_1, name + |->ff_ .. name: + | cmp NARGS:RDd, 1+1; jb ->fff_fallback + |.endmacro + | + |.macro .ffunc_2, name + |->ff_ .. name: + | cmp NARGS:RDd, 2+1; jb ->fff_fallback + |.endmacro + | + |.macro .ffunc_n, name, op + | .ffunc_1 name + | checknumtp [BASE], ->fff_fallback + | op xmm0, qword [BASE] + |.endmacro + | + |.macro .ffunc_n, name + | .ffunc_n name, movsd + |.endmacro + | + |.macro .ffunc_nn, name + | .ffunc_2 name + | checknumtp [BASE], ->fff_fallback + | checknumtp [BASE+8], ->fff_fallback + | movsd xmm0, qword [BASE] + | movsd xmm1, qword [BASE+8] + |.endmacro + | + |// Inlined GC threshold check. Caveat: uses label 1. + |.macro ffgccheck + | mov RB, [DISPATCH+DISPATCH_GL(gc.total)] + | cmp RB, [DISPATCH+DISPATCH_GL(gc.threshold)] + | jb >1 + | call ->fff_gcstep + |1: + |.endmacro + | + |//-- Base library: checks ----------------------------------------------- + | + |.ffunc_1 assert + | mov ITYPE, [BASE] + | mov RB, ITYPE + | sar ITYPE, 47 + | cmp ITYPEd, LJ_TISTRUECOND; jae ->fff_fallback + | mov PC, [BASE-8] + | mov MULTRES, RDd + | mov RB, [BASE] + | mov [BASE-16], RB + | sub RDd, 2 + | jz >2 + | mov RA, BASE + |1: + | add RA, 8 + | mov RB, [RA] + | mov [RA-16], RB + | sub RDd, 1 + | jnz <1 + |2: + | mov RDd, MULTRES + | jmp ->fff_res_ + | + |.ffunc_1 type + | mov RC, [BASE] + | sar RC, 47 + | mov RBd, LJ_TISNUM + | cmp RCd, RBd + | cmovb RCd, RBd + | not RCd + |2: + | mov CFUNC:RB, [BASE-16] + | cleartp CFUNC:RB + | mov STR:RC, [CFUNC:RB+RC*8+((char *)(&((GCfuncC *)0)->upvalue))] + | mov PC, [BASE-8] + | settp STR:RC, LJ_TSTR + | mov [BASE-16], STR:RC + | jmp ->fff_res1 + | + |//-- Base library: getters and setters --------------------------------- + | + |.ffunc_1 getmetatable + | mov TAB:RB, [BASE] + | mov PC, [BASE-8] + | checktab TAB:RB, >6 + |1: // Field metatable must be at same offset for GCtab and GCudata! + | mov TAB:RB, TAB:RB->metatable + |2: + | test TAB:RB, TAB:RB + | mov aword [BASE-16], LJ_TNIL + | jz ->fff_res1 + | settp TAB:RC, TAB:RB, LJ_TTAB + | mov [BASE-16], TAB:RC // Store metatable as default result. + | mov STR:RC, [DISPATCH+DISPATCH_GL(gcroot)+8*(GCROOT_MMNAME+MM_metatable)] + | mov RAd, TAB:RB->hmask + | and RAd, STR:RC->hash + | settp STR:RC, LJ_TSTR + | imul RAd, #NODE + | add NODE:RA, TAB:RB->node + |3: // Rearranged logic, because we expect _not_ to find the key. + | cmp NODE:RA->key, STR:RC + | je >5 + |4: + | mov NODE:RA, NODE:RA->next + | test NODE:RA, NODE:RA + | jnz <3 + | jmp ->fff_res1 // Not found, keep default result. + |5: + | mov RB, NODE:RA->val + | cmp RB, LJ_TNIL; je ->fff_res1 // Ditto for nil value. + | mov [BASE-16], RB // Return value of mt.__metatable. + | jmp ->fff_res1 + | + |6: + | cmp ITYPEd, LJ_TUDATA; je <1 + | cmp ITYPEd, LJ_TISNUM; ja >7 + | mov ITYPEd, LJ_TISNUM + |7: + | not ITYPEd + | mov TAB:RB, [DISPATCH+ITYPE*8+DISPATCH_GL(gcroot[GCROOT_BASEMT])] + | jmp <2 + | + |.ffunc_2 setmetatable + | mov TAB:RB, [BASE] + | mov TAB:TMPR, TAB:RB + | checktab TAB:RB, ->fff_fallback + | // Fast path: no mt for table yet and not clearing the mt. + | cmp aword TAB:RB->metatable, 0; jne ->fff_fallback + | mov TAB:RA, [BASE+8] + | checktab TAB:RA, ->fff_fallback + | mov TAB:RB->metatable, TAB:RA + | mov PC, [BASE-8] + | mov [BASE-16], TAB:TMPR // Return original table. + | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) + | jz >1 + | // Possible write barrier. Table is black, but skip iswhite(mt) check. + | barrierback TAB:RB, RC + |1: + | jmp ->fff_res1 + | + |.ffunc_2 rawget + |.if X64WIN + | mov TAB:RA, [BASE] + | checktab TAB:RA, ->fff_fallback + | mov RB, BASE // Save BASE. + | lea CARG3, [BASE+8] + | mov CARG2, TAB:RA // Caveat: CARG2 == BASE. + | mov CARG1, SAVE_L + |.else + | mov TAB:CARG2, [BASE] + | checktab TAB:CARG2, ->fff_fallback + | mov RB, BASE // Save BASE. + | lea CARG3, [BASE+8] // Caveat: CARG3 == BASE. + | mov CARG1, SAVE_L + |.endif + | call extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key) + | // cTValue * returned in eax (RD). + | mov BASE, RB // Restore BASE. + | // Copy table slot. + | mov RB, [RD] + | mov PC, [BASE-8] + | mov [BASE-16], RB + | jmp ->fff_res1 + | + |//-- Base library: conversions ------------------------------------------ + | + |.ffunc tonumber + | // Only handles the number case inline (without a base argument). + | cmp NARGS:RDd, 1+1; jne ->fff_fallback // Exactly one argument. + | mov RB, [BASE] + | checknumber RB, ->fff_fallback + | mov PC, [BASE-8] + | mov [BASE-16], RB + | jmp ->fff_res1 + | + |.ffunc_1 tostring + | // Only handles the string or number case inline. + | mov PC, [BASE-8] + | mov STR:RB, [BASE] + | checktp_nc STR:RB, LJ_TSTR, >3 + | // A __tostring method in the string base metatable is ignored. + |2: + | mov [BASE-16], STR:RB + | jmp ->fff_res1 + |3: // Handle numbers inline, unless a number base metatable is present. + | cmp ITYPEd, LJ_TISNUM; ja ->fff_fallback_1 + | cmp aword [DISPATCH+DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])], 0 + | jne ->fff_fallback + | ffgccheck // Caveat: uses label 1. + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Add frame since C call can throw. + | mov SAVE_PC, PC // Redundant (but a defined value). + |.if not X64WIN + | mov CARG2, BASE // Otherwise: CARG2 == BASE + |.endif + | mov L:CARG1, L:RB + |.if DUALNUM + | call extern lj_strfmt_number // (lua_State *L, cTValue *o) + |.else + | call extern lj_strfmt_num // (lua_State *L, lua_Number *np) + |.endif + | // GCstr returned in eax (RD). + | mov BASE, L:RB->base + | settp STR:RB, RD, LJ_TSTR + | jmp <2 + | + |//-- Base library: iterators ------------------------------------------- + | + |.ffunc_1 next + | je >2 // Missing 2nd arg? + |1: + |.if X64WIN + | mov RA, [BASE] + | checktab RA, ->fff_fallback + |.else + | mov CARG2, [BASE] + | checktab CARG2, ->fff_fallback + |.endif + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Add frame since C call can throw. + | mov L:RB->top, BASE // Dummy frame length is ok. + | mov PC, [BASE-8] + |.if X64WIN + | lea CARG3, [BASE+8] + | mov CARG2, RA // Caveat: CARG2 == BASE. + | mov CARG1, L:RB + |.else + | lea CARG3, [BASE+8] // Caveat: CARG3 == BASE. + | mov CARG1, L:RB + |.endif + | mov SAVE_PC, PC // Needed for ITERN fallback. + | call extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key) + | // Flag returned in eax (RD). + | mov BASE, L:RB->base + | test RDd, RDd; jz >3 // End of traversal? + | // Copy key and value to results. + | mov RB, [BASE+8] + | mov RD, [BASE+16] + | mov [BASE-16], RB + | mov [BASE-8], RD + |->fff_res2: + | mov RDd, 1+2 + | jmp ->fff_res + |2: // Set missing 2nd arg to nil. + | mov aword [BASE+8], LJ_TNIL + | jmp <1 + |3: // End of traversal: return nil. + | mov aword [BASE-16], LJ_TNIL + | jmp ->fff_res1 + | + |.ffunc_1 pairs + | mov TAB:RB, [BASE] + | mov TMPR, TAB:RB + | checktab TAB:RB, ->fff_fallback +#if LJ_52 + | cmp aword TAB:RB->metatable, 0; jne ->fff_fallback +#endif + | mov CFUNC:RD, [BASE-16] + | cleartp CFUNC:RD + | mov CFUNC:RD, CFUNC:RD->upvalue[0] + | settp CFUNC:RD, LJ_TFUNC + | mov PC, [BASE-8] + | mov [BASE-16], CFUNC:RD + | mov [BASE-8], TMPR + | mov aword [BASE], LJ_TNIL + | mov RDd, 1+3 + | jmp ->fff_res + | + |.ffunc_2 ipairs_aux + | mov TAB:RB, [BASE] + | checktab TAB:RB, ->fff_fallback + |.if DUALNUM + | mov RA, [BASE+8] + | checkint RA, ->fff_fallback + |.else + | checknumtp [BASE+8], ->fff_fallback + | movsd xmm0, qword [BASE+8] + |.endif + | mov PC, [BASE-8] + |.if DUALNUM + | add RAd, 1 + | setint ITYPE, RA + | mov [BASE-16], ITYPE + |.else + | sseconst_1 xmm1, TMPR + | addsd xmm0, xmm1 + | cvttsd2si RAd, xmm0 + | movsd qword [BASE-16], xmm0 + |.endif + | cmp RAd, TAB:RB->asize; jae >2 // Not in array part? + | mov RD, TAB:RB->array + | lea RD, [RD+RA*8] + |1: + | cmp aword [RD], LJ_TNIL; je ->fff_res0 + | // Copy array slot. + | mov RB, [RD] + | mov [BASE-8], RB + | jmp ->fff_res2 + |2: // Check for empty hash part first. Otherwise call C function. + | cmp dword TAB:RB->hmask, 0; je ->fff_res0 + |.if X64WIN + | mov TMPR, BASE + | mov CARG2d, RAd + | mov CARG1, TAB:RB + | mov RB, TMPR + |.else + | mov CARG1, TAB:RB + | mov RB, BASE // Save BASE. + | mov CARG2d, RAd // Caveat: CARG2 == BASE + |.endif + | call extern lj_tab_getinth // (GCtab *t, int32_t key) + | // cTValue * or NULL returned in eax (RD). + | mov BASE, RB + | test RD, RD + | jnz <1 + |->fff_res0: + | mov RDd, 1+0 + | jmp ->fff_res + | + |.ffunc_1 ipairs + | mov TAB:RB, [BASE] + | mov TMPR, TAB:RB + | checktab TAB:RB, ->fff_fallback +#if LJ_52 + | cmp aword TAB:RB->metatable, 0; jne ->fff_fallback +#endif + | mov CFUNC:RD, [BASE-16] + | cleartp CFUNC:RD + | mov CFUNC:RD, CFUNC:RD->upvalue[0] + | settp CFUNC:RD, LJ_TFUNC + | mov PC, [BASE-8] + | mov [BASE-16], CFUNC:RD + | mov [BASE-8], TMPR + |.if DUALNUM + | mov64 RD, ((int64_t)LJ_TISNUM<<47) + | mov [BASE], RD + |.else + | mov qword [BASE], 0 + |.endif + | mov RDd, 1+3 + | jmp ->fff_res + | + |//-- Base library: catch errors ---------------------------------------- + | + |.ffunc_1 pcall + | lea RA, [BASE+16] + | sub NARGS:RDd, 1 + | mov PCd, 16+FRAME_PCALL + |1: + | movzx RBd, byte [DISPATCH+DISPATCH_GL(hookmask)] + | shr RB, HOOK_ACTIVE_SHIFT + | and RB, 1 + | add PC, RB // Remember active hook before pcall. + | // Note: this does a (harmless) copy of the function to the PC slot, too. + | mov KBASE, RD + |2: + | mov RB, [RA+KBASE*8-24] + | mov [RA+KBASE*8-16], RB + | sub KBASE, 1 + | ja <2 + | jmp ->vm_call_dispatch + | + |.ffunc_2 xpcall + | mov LFUNC:RA, [BASE+8] + | checktp_nc LFUNC:RA, LJ_TFUNC, ->fff_fallback + | mov LFUNC:RB, [BASE] // Swap function and traceback. + | mov [BASE], LFUNC:RA + | mov [BASE+8], LFUNC:RB + | lea RA, [BASE+24] + | sub NARGS:RDd, 2 + | mov PCd, 24+FRAME_PCALL + | jmp <1 + | + |//-- Coroutine library -------------------------------------------------- + | + |.macro coroutine_resume_wrap, resume + |.if resume + |.ffunc_1 coroutine_resume + | mov L:RB, [BASE] + | cleartp L:RB + |.else + |.ffunc coroutine_wrap_aux + | mov CFUNC:RB, [BASE-16] + | cleartp CFUNC:RB + | mov L:RB, CFUNC:RB->upvalue[0].gcr + | cleartp L:RB + |.endif + | mov PC, [BASE-8] + | mov SAVE_PC, PC + | mov TMP1, L:RB + |.if resume + | checktptp [BASE], LJ_TTHREAD, ->fff_fallback + |.endif + | cmp aword L:RB->cframe, 0; jne ->fff_fallback + | cmp byte L:RB->status, LUA_YIELD; ja ->fff_fallback + | mov RA, L:RB->top + | je >1 // Status != LUA_YIELD (i.e. 0)? + | cmp RA, L:RB->base // Check for presence of initial func. + | je ->fff_fallback + | mov PC, [RA-8] // Move initial function up. + | mov [RA], PC + | add RA, 8 + |1: + |.if resume + | lea PC, [RA+NARGS:RD*8-16] // Check stack space (-1-thread). + |.else + | lea PC, [RA+NARGS:RD*8-8] // Check stack space (-1). + |.endif + | cmp PC, L:RB->maxstack; ja ->fff_fallback + | mov L:RB->top, PC + | + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + |.if resume + | add BASE, 8 // Keep resumed thread in stack for GC. + |.endif + | mov L:RB->top, BASE + |.if resume + | lea RB, [BASE+NARGS:RD*8-24] // RB = end of source for stack move. + |.else + | lea RB, [BASE+NARGS:RD*8-16] // RB = end of source for stack move. + |.endif + | sub RB, PC // Relative to PC. + | + | cmp PC, RA + | je >3 + |2: // Move args to coroutine. + | mov RC, [PC+RB] + | mov [PC-8], RC + | sub PC, 8 + | cmp PC, RA + | jne <2 + |3: + | mov CARG2, RA + | mov CARG1, TMP1 + | call ->vm_resume // (lua_State *L, TValue *base, 0, 0) + | + | mov L:RB, SAVE_L + | mov L:PC, TMP1 + | mov BASE, L:RB->base + | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB + | set_vmstate INTERP + | + | cmp eax, LUA_YIELD + | ja >8 + |4: + | mov RA, L:PC->base + | mov KBASE, L:PC->top + | mov L:PC->top, RA // Clear coroutine stack. + | mov PC, KBASE + | sub PC, RA + | je >6 // No results? + | lea RD, [BASE+PC] + | shr PCd, 3 + | cmp RD, L:RB->maxstack + | ja >9 // Need to grow stack? + | + | mov RB, BASE + | sub RB, RA + |5: // Move results from coroutine. + | mov RD, [RA] + | mov [RA+RB], RD + | add RA, 8 + | cmp RA, KBASE + | jne <5 + |6: + |.if resume + | lea RDd, [PCd+2] // nresults+1 = 1 + true + results. + | mov_true ITYPE // Prepend true to results. + | mov [BASE-8], ITYPE + |.else + | lea RDd, [PCd+1] // nresults+1 = 1 + results. + |.endif + |7: + | mov PC, SAVE_PC + | mov MULTRES, RDd + |.if resume + | mov RA, -8 + |.else + | xor RAd, RAd + |.endif + | test PCd, FRAME_TYPE + | jz ->BC_RET_Z + | jmp ->vm_return + | + |8: // Coroutine returned with error (at co->top-1). + |.if resume + | mov_false ITYPE // Prepend false to results. + | mov [BASE-8], ITYPE + | mov RA, L:PC->top + | sub RA, 8 + | mov L:PC->top, RA // Clear error from coroutine stack. + | // Copy error message. + | mov RD, [RA] + | mov [BASE], RD + | mov RDd, 1+2 // nresults+1 = 1 + false + error. + | jmp <7 + |.else + | mov CARG2, L:PC + | mov CARG1, L:RB + | call extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co) + | // Error function does not return. + |.endif + | + |9: // Handle stack expansion on return from yield. + | mov L:RA, TMP1 + | mov L:RA->top, KBASE // Undo coroutine stack clearing. + | mov CARG2, PC + | mov CARG1, L:RB + | call extern lj_state_growstack // (lua_State *L, int n) + | mov L:PC, TMP1 + | mov BASE, L:RB->base + | jmp <4 // Retry the stack move. + |.endmacro + | + | coroutine_resume_wrap 1 // coroutine.resume + | coroutine_resume_wrap 0 // coroutine.wrap + | + |.ffunc coroutine_yield + | mov L:RB, SAVE_L + | test aword L:RB->cframe, CFRAME_RESUME + | jz ->fff_fallback + | mov L:RB->base, BASE + | lea RD, [BASE+NARGS:RD*8-8] + | mov L:RB->top, RD + | xor RDd, RDd + | mov aword L:RB->cframe, RD + | mov al, LUA_YIELD + | mov byte L:RB->status, al + | jmp ->vm_leave_unw + | + |//-- Math library ------------------------------------------------------- + | + | .ffunc_1 math_abs + | mov RB, [BASE] + |.if DUALNUM + | checkint RB, >3 + | cmp RBd, 0; jns ->fff_resi + | neg RBd; js >2 + |->fff_resbit: + |->fff_resi: + | setint RB + |->fff_resRB: + | mov PC, [BASE-8] + | mov [BASE-16], RB + | jmp ->fff_res1 + |2: + | mov64 RB, U64x(41e00000,00000000) // 2^31. + | jmp ->fff_resRB + |3: + | ja ->fff_fallback + |.else + | checknum RB, ->fff_fallback + |.endif + | shl RB, 1 + | shr RB, 1 + | mov PC, [BASE-8] + | mov [BASE-16], RB + | jmp ->fff_res1 + | + |.ffunc_n math_sqrt, sqrtsd + |->fff_resxmm0: + | mov PC, [BASE-8] + | movsd qword [BASE-16], xmm0 + | // fallthrough + | + |->fff_res1: + | mov RDd, 1+1 + |->fff_res: + | mov MULTRES, RDd + |->fff_res_: + | test PCd, FRAME_TYPE + | jnz >7 + |5: + | cmp PC_RB, RDL // More results expected? + | ja >6 + | // Adjust BASE. KBASE is assumed to be set for the calling frame. + | movzx RAd, PC_RA + | neg RA + | lea BASE, [BASE+RA*8-16] // base = base - (RA+2)*8 + | ins_next + | + |6: // Fill up results with nil. + | mov aword [BASE+RD*8-24], LJ_TNIL + | add RD, 1 + | jmp <5 + | + |7: // Non-standard return case. + | mov RA, -16 // Results start at BASE+RA = BASE-16. + | jmp ->vm_return + | + |.macro math_round, func + | .ffunc math_ .. func + |.if DUALNUM + | mov RB, [BASE] + | checknumx RB, ->fff_resRB, je + | ja ->fff_fallback + |.else + | checknumtp [BASE], ->fff_fallback + |.endif + | movsd xmm0, qword [BASE] + | call ->vm_ .. func .. _sse + |.if DUALNUM + | cvttsd2si RBd, xmm0 + | cmp RBd, 0x80000000 + | jne ->fff_resi + | cvtsi2sd xmm1, RBd + | ucomisd xmm0, xmm1 + | jp ->fff_resxmm0 + | je ->fff_resi + |.endif + | jmp ->fff_resxmm0 + |.endmacro + | + | math_round floor + | math_round ceil + | + |.ffunc math_log + | cmp NARGS:RDd, 1+1; jne ->fff_fallback // Exactly one argument. + | checknumtp [BASE], ->fff_fallback + | movsd xmm0, qword [BASE] + | mov RB, BASE + | call extern log + | mov BASE, RB + | jmp ->fff_resxmm0 + | + |.macro math_extern, func + | .ffunc_n math_ .. func + | mov RB, BASE + | call extern func + | mov BASE, RB + | jmp ->fff_resxmm0 + |.endmacro + | + |.macro math_extern2, func + | .ffunc_nn math_ .. func + | mov RB, BASE + | call extern func + | mov BASE, RB + | jmp ->fff_resxmm0 + |.endmacro + | + | math_extern log10 + | math_extern exp + | math_extern sin + | math_extern cos + | math_extern tan + | math_extern asin + | math_extern acos + | math_extern atan + | math_extern sinh + | math_extern cosh + | math_extern tanh + | math_extern2 pow + | math_extern2 atan2 + | math_extern2 fmod + | + |.ffunc_2 math_ldexp + | checknumtp [BASE], ->fff_fallback + | checknumtp [BASE+8], ->fff_fallback + | fld qword [BASE+8] + | fld qword [BASE] + | fscale + | fpop1 + | mov PC, [BASE-8] + | fstp qword [BASE-16] + | jmp ->fff_res1 + | + |.ffunc_n math_frexp + | lea CARG1, TMP1 + | mov RB, BASE + | call extern frexp + | mov BASE, RB + | mov RBd, TMP1d + | mov PC, [BASE-8] + | movsd qword [BASE-16], xmm0 + |.if DUALNUM + | setint RB + | mov [BASE-8], RB + |.else + | cvtsi2sd xmm1, RBd + | movsd qword [BASE-8], xmm1 + |.endif + | mov RDd, 1+2 + | jmp ->fff_res + | + |.ffunc_n math_modf + | lea CARG1, [BASE-16] + | mov PC, [BASE-8] + | mov RB, BASE + | call extern modf + | mov BASE, RB + | mov PC, [BASE-8] + | movsd qword [BASE-8], xmm0 + | mov RDd, 1+2 + | jmp ->fff_res + | + |.macro math_minmax, name, cmovop, sseop + | .ffunc name + | mov RAd, 2 + |.if DUALNUM + | mov RB, [BASE] + | checkint RB, >4 + |1: // Handle integers. + | cmp RAd, RDd; jae ->fff_resRB + | mov TMPR, [BASE+RA*8-8] + | checkint TMPR, >3 + | cmp RBd, TMPRd + | cmovop RB, TMPR + | add RAd, 1 + | jmp <1 + |3: + | ja ->fff_fallback + | // Convert intermediate result to number and continue below. + | cvtsi2sd xmm0, RBd + | jmp >6 + |4: + | ja ->fff_fallback + |.else + | checknumtp [BASE], ->fff_fallback + |.endif + | + | movsd xmm0, qword [BASE] + |5: // Handle numbers or integers. + | cmp RAd, RDd; jae ->fff_resxmm0 + |.if DUALNUM + | mov RB, [BASE+RA*8-8] + | checknumx RB, >6, jb + | ja ->fff_fallback + | cvtsi2sd xmm1, RBd + | jmp >7 + |.else + | checknumtp [BASE+RA*8-8], ->fff_fallback + |.endif + |6: + | movsd xmm1, qword [BASE+RA*8-8] + |7: + | sseop xmm0, xmm1 + | add RAd, 1 + | jmp <5 + |.endmacro + | + | math_minmax math_min, cmovg, minsd + | math_minmax math_max, cmovl, maxsd + | + |//-- String library ----------------------------------------------------- + | + |.ffunc string_byte // Only handle the 1-arg case here. + | cmp NARGS:RDd, 1+1; jne ->fff_fallback + | mov STR:RB, [BASE] + | checkstr STR:RB, ->fff_fallback + | mov PC, [BASE-8] + | cmp dword STR:RB->len, 1 + | jb ->fff_res0 // Return no results for empty string. + | movzx RBd, byte STR:RB[1] + |.if DUALNUM + | jmp ->fff_resi + |.else + | cvtsi2sd xmm0, RBd; jmp ->fff_resxmm0 + |.endif + | + |.ffunc string_char // Only handle the 1-arg case here. + | ffgccheck + | cmp NARGS:RDd, 1+1; jne ->fff_fallback // *Exactly* 1 arg. + |.if DUALNUM + | mov RB, [BASE] + | checkint RB, ->fff_fallback + |.else + | checknumtp [BASE], ->fff_fallback + | cvttsd2si RBd, qword [BASE] + |.endif + | cmp RBd, 255; ja ->fff_fallback + | mov TMP1d, RBd + | mov TMPRd, 1 + | lea RD, TMP1 // Points to stack. Little-endian. + |->fff_newstr: + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov CARG3d, TMPRd // Zero-extended to size_t. + | mov CARG2, RD + | mov CARG1, L:RB + | mov SAVE_PC, PC + | call extern lj_str_new // (lua_State *L, char *str, size_t l) + |->fff_resstr: + | // GCstr * returned in eax (RD). + | mov BASE, L:RB->base + | mov PC, [BASE-8] + | settp STR:RD, LJ_TSTR + | mov [BASE-16], STR:RD + | jmp ->fff_res1 + | + |.ffunc string_sub + | ffgccheck + | mov TMPRd, -1 + | cmp NARGS:RDd, 1+2; jb ->fff_fallback + | jna >1 + |.if DUALNUM + | mov TMPR, [BASE+16] + | checkint TMPR, ->fff_fallback + |.else + | checknumtp [BASE+16], ->fff_fallback + | cvttsd2si TMPRd, qword [BASE+16] + |.endif + |1: + | mov STR:RB, [BASE] + | checkstr STR:RB, ->fff_fallback + |.if DUALNUM + | mov ITYPE, [BASE+8] + | mov RAd, ITYPEd // Must clear hiword for lea below. + | sar ITYPE, 47 + | cmp ITYPEd, LJ_TISNUM + | jne ->fff_fallback + |.else + | checknumtp [BASE+8], ->fff_fallback + | cvttsd2si RAd, qword [BASE+8] + |.endif + | mov RCd, STR:RB->len + | cmp RCd, TMPRd // len < end? (unsigned compare) + | jb >5 + |2: + | test RAd, RAd // start <= 0? + | jle >7 + |3: + | sub TMPRd, RAd // start > end? + | jl ->fff_emptystr + | lea RD, [STR:RB+RAd+#STR-1] + | add TMPRd, 1 + |4: + | jmp ->fff_newstr + | + |5: // Negative end or overflow. + | jl >6 + | lea TMPRd, [TMPRd+RCd+1] // end = end+(len+1) + | jmp <2 + |6: // Overflow. + | mov TMPRd, RCd // end = len + | jmp <2 + | + |7: // Negative start or underflow. + | je >8 + | add RAd, RCd // start = start+(len+1) + | add RAd, 1 + | jg <3 // start > 0? + |8: // Underflow. + | mov RAd, 1 // start = 1 + | jmp <3 + | + |->fff_emptystr: // Range underflow. + | xor TMPRd, TMPRd // Zero length. Any ptr in RD is ok. + | jmp <4 + | + |.macro ffstring_op, name + | .ffunc_1 string_ .. name + | ffgccheck + |.if X64WIN + | mov STR:TMPR, [BASE] + | checkstr STR:TMPR, ->fff_fallback + |.else + | mov STR:CARG2, [BASE] + | checkstr STR:CARG2, ->fff_fallback + |.endif + | mov L:RB, SAVE_L + | lea SBUF:CARG1, [DISPATCH+DISPATCH_GL(tmpbuf)] + | mov L:RB->base, BASE + |.if X64WIN + | mov STR:CARG2, STR:TMPR // Caveat: CARG2 == BASE + |.endif + | mov RC, SBUF:CARG1->b + | mov SBUF:CARG1->L, L:RB + | mov SBUF:CARG1->p, RC + | mov SAVE_PC, PC + | call extern lj_buf_putstr_ .. name + | mov CARG1, rax + | call extern lj_buf_tostr + | jmp ->fff_resstr + |.endmacro + | + |ffstring_op reverse + |ffstring_op lower + |ffstring_op upper + | + |//-- Bit library -------------------------------------------------------- + | + |.macro .ffunc_bit, name, kind, fdef + | fdef name + |.if kind == 2 + | sseconst_tobit xmm1, RB + |.endif + |.if DUALNUM + | mov RB, [BASE] + | checkint RB, >1 + |.if kind > 0 + | jmp >2 + |.else + | jmp ->fff_resbit + |.endif + |1: + | ja ->fff_fallback + | movd xmm0, RB + |.else + | checknumtp [BASE], ->fff_fallback + | movsd xmm0, qword [BASE] + |.endif + |.if kind < 2 + | sseconst_tobit xmm1, RB + |.endif + | addsd xmm0, xmm1 + | movd RBd, xmm0 + |2: + |.endmacro + | + |.macro .ffunc_bit, name, kind + | .ffunc_bit name, kind, .ffunc_1 + |.endmacro + | + |.ffunc_bit bit_tobit, 0 + | jmp ->fff_resbit + | + |.macro .ffunc_bit_op, name, ins + | .ffunc_bit name, 2 + | mov TMPRd, NARGS:RDd // Save for fallback. + | lea RD, [BASE+NARGS:RD*8-16] + |1: + | cmp RD, BASE + | jbe ->fff_resbit + |.if DUALNUM + | mov RA, [RD] + | checkint RA, >2 + | ins RBd, RAd + | sub RD, 8 + | jmp <1 + |2: + | ja ->fff_fallback_bit_op + | movd xmm0, RA + |.else + | checknumtp [RD], ->fff_fallback_bit_op + | movsd xmm0, qword [RD] + |.endif + | addsd xmm0, xmm1 + | movd RAd, xmm0 + | ins RBd, RAd + | sub RD, 8 + | jmp <1 + |.endmacro + | + |.ffunc_bit_op bit_band, and + |.ffunc_bit_op bit_bor, or + |.ffunc_bit_op bit_bxor, xor + | + |.ffunc_bit bit_bswap, 1 + | bswap RBd + | jmp ->fff_resbit + | + |.ffunc_bit bit_bnot, 1 + | not RBd + |.if DUALNUM + | jmp ->fff_resbit + |.else + |->fff_resbit: + | cvtsi2sd xmm0, RBd + | jmp ->fff_resxmm0 + |.endif + | + |->fff_fallback_bit_op: + | mov NARGS:RDd, TMPRd // Restore for fallback + | jmp ->fff_fallback + | + |.macro .ffunc_bit_sh, name, ins + |.if DUALNUM + | .ffunc_bit name, 1, .ffunc_2 + | // Note: no inline conversion from number for 2nd argument! + | mov RA, [BASE+8] + | checkint RA, ->fff_fallback + |.else + | .ffunc_nn name + | sseconst_tobit xmm2, RB + | addsd xmm0, xmm2 + | addsd xmm1, xmm2 + | movd RBd, xmm0 + | movd RAd, xmm1 + |.endif + | ins RBd, cl // Assumes RA is ecx. + | jmp ->fff_resbit + |.endmacro + | + |.ffunc_bit_sh bit_lshift, shl + |.ffunc_bit_sh bit_rshift, shr + |.ffunc_bit_sh bit_arshift, sar + |.ffunc_bit_sh bit_rol, rol + |.ffunc_bit_sh bit_ror, ror + | + |//----------------------------------------------------------------------- + | + |->fff_fallback_2: + | mov NARGS:RDd, 1+2 // Other args are ignored, anyway. + | jmp ->fff_fallback + |->fff_fallback_1: + | mov NARGS:RDd, 1+1 // Other args are ignored, anyway. + |->fff_fallback: // Call fast function fallback handler. + | // BASE = new base, RD = nargs+1 + | mov L:RB, SAVE_L + | mov PC, [BASE-8] // Fallback may overwrite PC. + | mov SAVE_PC, PC // Redundant (but a defined value). + | mov L:RB->base, BASE + | lea RD, [BASE+NARGS:RD*8-8] + | lea RA, [RD+8*LUA_MINSTACK] // Ensure enough space for handler. + | mov L:RB->top, RD + | mov CFUNC:RD, [BASE-16] + | cleartp CFUNC:RD + | cmp RA, L:RB->maxstack + | ja >5 // Need to grow stack. + | mov CARG1, L:RB + | call aword CFUNC:RD->f // (lua_State *L) + | mov BASE, L:RB->base + | // Either throws an error, or recovers and returns -1, 0 or nresults+1. + | test RDd, RDd; jg ->fff_res // Returned nresults+1? + |1: + | mov RA, L:RB->top + | sub RA, BASE + | shr RAd, 3 + | test RDd, RDd + | lea NARGS:RDd, [RAd+1] + | mov LFUNC:RB, [BASE-16] + | jne ->vm_call_tail // Returned -1? + | cleartp LFUNC:RB + | ins_callt // Returned 0: retry fast path. + | + |// Reconstruct previous base for vmeta_call during tailcall. + |->vm_call_tail: + | mov RA, BASE + | test PCd, FRAME_TYPE + | jnz >3 + | movzx RBd, PC_RA + | neg RB + | lea BASE, [BASE+RB*8-16] // base = base - (RB+2)*8 + | jmp ->vm_call_dispatch // Resolve again for tailcall. + |3: + | mov RB, PC + | and RB, -8 + | sub BASE, RB + | jmp ->vm_call_dispatch // Resolve again for tailcall. + | + |5: // Grow stack for fallback handler. + | mov CARG2d, LUA_MINSTACK + | mov CARG1, L:RB + | call extern lj_state_growstack // (lua_State *L, int n) + | mov BASE, L:RB->base + | xor RDd, RDd // Simulate a return 0. + | jmp <1 // Dumb retry (goes through ff first). + | + |->fff_gcstep: // Call GC step function. + | // BASE = new base, RD = nargs+1 + | pop RB // Must keep stack at same level. + | mov TMP1, RB // Save return address + | mov L:RB, SAVE_L + | mov SAVE_PC, PC // Redundant (but a defined value). + | mov L:RB->base, BASE + | lea RD, [BASE+NARGS:RD*8-8] + | mov CARG1, L:RB + | mov L:RB->top, RD + | call extern lj_gc_step // (lua_State *L) + | mov BASE, L:RB->base + | mov RD, L:RB->top + | sub RD, BASE + | shr RDd, 3 + | add NARGS:RDd, 1 + | mov RB, TMP1 + | push RB // Restore return address. + | ret + | + |//----------------------------------------------------------------------- + |//-- Special dispatch targets ------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_record: // Dispatch target for recording phase. + |.if JIT + | movzx RDd, byte [DISPATCH+DISPATCH_GL(hookmask)] + | test RDL, HOOK_VMEVENT // No recording while in vmevent. + | jnz >5 + | // Decrement the hookcount for consistency, but always do the call. + | test RDL, HOOK_ACTIVE + | jnz >1 + | test RDL, LUA_MASKLINE|LUA_MASKCOUNT + | jz >1 + | dec dword [DISPATCH+DISPATCH_GL(hookcount)] + | jmp >1 + |.endif + | + |->vm_rethook: // Dispatch target for return hooks. + | movzx RDd, byte [DISPATCH+DISPATCH_GL(hookmask)] + | test RDL, HOOK_ACTIVE // Hook already active? + | jnz >5 + | jmp >1 + | + |->vm_inshook: // Dispatch target for instr/line hooks. + | movzx RDd, byte [DISPATCH+DISPATCH_GL(hookmask)] + | test RDL, HOOK_ACTIVE // Hook already active? + | jnz >5 + | + | test RDL, LUA_MASKLINE|LUA_MASKCOUNT + | jz >5 + | dec dword [DISPATCH+DISPATCH_GL(hookcount)] + | jz >1 + | test RDL, LUA_MASKLINE + | jz >5 + |1: + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov CARG2, PC // Caveat: CARG2 == BASE + | mov CARG1, L:RB + | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC. + | call extern lj_dispatch_ins // (lua_State *L, const BCIns *pc) + |3: + | mov BASE, L:RB->base + |4: + | movzx RAd, PC_RA + |5: + | movzx OP, PC_OP + | movzx RDd, PC_RD + | jmp aword [DISPATCH+OP*8+GG_DISP2STATIC] // Re-dispatch to static ins. + | + |->cont_hook: // Continue from hook yield. + | add PC, 4 + | mov RA, [RB-40] + | mov MULTRES, RAd // Restore MULTRES for *M ins. + | jmp <4 + | + |->vm_hotloop: // Hot loop counter underflow. + |.if JIT + | mov LFUNC:RB, [BASE-16] // Same as curr_topL(L). + | cleartp LFUNC:RB + | mov RB, LFUNC:RB->pc + | movzx RDd, byte [RB+PC2PROTO(framesize)] + | lea RD, [BASE+RD*8] + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov L:RB->top, RD + | mov CARG2, PC + | lea CARG1, [DISPATCH+GG_DISP2J] + | mov aword [DISPATCH+DISPATCH_J(L)], L:RB + | mov SAVE_PC, PC + | call extern lj_trace_hot // (jit_State *J, const BCIns *pc) + | jmp <3 + |.endif + | + |->vm_callhook: // Dispatch target for call hooks. + | mov SAVE_PC, PC + |.if JIT + | jmp >1 + |.endif + | + |->vm_hotcall: // Hot call counter underflow. + |.if JIT + | mov SAVE_PC, PC + | or PC, 1 // Marker for hot call. + |1: + |.endif + | lea RD, [BASE+NARGS:RD*8-8] + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov L:RB->top, RD + | mov CARG2, PC + | mov CARG1, L:RB + | call extern lj_dispatch_call // (lua_State *L, const BCIns *pc) + | // ASMFunction returned in eax/rax (RD). + | mov SAVE_PC, 0 // Invalidate for subsequent line hook. + |.if JIT + | and PC, -2 + |.endif + | mov BASE, L:RB->base + | mov RA, RD + | mov RD, L:RB->top + | sub RD, BASE + | mov RB, RA + | movzx RAd, PC_RA + | shr RDd, 3 + | add NARGS:RDd, 1 + | jmp RB + | + |->cont_stitch: // Trace stitching. + |.if JIT + | // BASE = base, RC = result, RB = mbase + | mov ITYPEd, [RB-24] // Save previous trace number. + | mov TMPRd, MULTRES + | movzx RAd, PC_RA + | lea RA, [BASE+RA*8] // Call base. + | sub TMPRd, 1 + | jz >2 + |1: // Move results down. + | mov RB, [RC] + | mov [RA], RB + | add RC, 8 + | add RA, 8 + | sub TMPRd, 1 + | jnz <1 + |2: + | movzx RCd, PC_RA + | movzx RBd, PC_RB + | add RC, RB + | lea RC, [BASE+RC*8-8] + |3: + | cmp RC, RA + | ja >9 // More results wanted? + | + | mov RA, [DISPATCH+DISPATCH_J(trace)] + | mov TRACE:RD, [RA+ITYPE*8] + | test TRACE:RD, TRACE:RD + | jz ->cont_nop + | movzx RDd, word TRACE:RD->link + | cmp RDd, RBd + | je ->cont_nop // Blacklisted. + | test RDd, RDd + | jne =>BC_JLOOP // Jump to stitched trace. + | + | // Stitch a new trace to the previous trace. + | mov [DISPATCH+DISPATCH_J(exitno)], RB + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov CARG2, PC + | lea CARG1, [DISPATCH+GG_DISP2J] + | mov aword [DISPATCH+DISPATCH_J(L)], L:RB + | call extern lj_dispatch_stitch // (jit_State *J, const BCIns *pc) + | mov BASE, L:RB->base + | jmp ->cont_nop + | + |9: // Fill up results with nil. + | mov aword [RA], LJ_TNIL + | add RA, 8 + | jmp <3 + |.endif + | + |->vm_profhook: // Dispatch target for profiler hook. +#if LJ_HASPROFILE + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov CARG2, PC // Caveat: CARG2 == BASE + | mov CARG1, L:RB + | call extern lj_dispatch_profile // (lua_State *L, const BCIns *pc) + | mov BASE, L:RB->base + | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction. + | sub PC, 4 + | jmp ->cont_nop +#endif + | + |//----------------------------------------------------------------------- + |//-- Trace exit handler ------------------------------------------------- + |//----------------------------------------------------------------------- + | + |// Called from an exit stub with the exit number on the stack. + |// The 16 bit exit number is stored with two (sign-extended) push imm8. + |->vm_exit_handler: + |.if JIT + | push r13; push r12 + | push r11; push r10; push r9; push r8 + | push rdi; push rsi; push rbp; lea rbp, [rsp+88]; push rbp + | push rbx; push rdx; push rcx; push rax + | movzx RCd, byte [rbp-8] // Reconstruct exit number. + | mov RCH, byte [rbp-16] + | mov [rbp-8], r15; mov [rbp-16], r14 + | // Caveat: DISPATCH is rbx. + | mov DISPATCH, [ebp] + | mov RA, [DISPATCH+DISPATCH_GL(vmstate)] // Get trace number. + | set_vmstate EXIT + | mov [DISPATCH+DISPATCH_J(exitno)], RC + | mov [DISPATCH+DISPATCH_J(parent)], RA + |.if X64WIN + | sub rsp, 16*8+4*8 // Room for SSE regs + save area. + |.else + | sub rsp, 16*8 // Room for SSE regs. + |.endif + | add rbp, -128 + | movsd qword [rbp-8], xmm15; movsd qword [rbp-16], xmm14 + | movsd qword [rbp-24], xmm13; movsd qword [rbp-32], xmm12 + | movsd qword [rbp-40], xmm11; movsd qword [rbp-48], xmm10 + | movsd qword [rbp-56], xmm9; movsd qword [rbp-64], xmm8 + | movsd qword [rbp-72], xmm7; movsd qword [rbp-80], xmm6 + | movsd qword [rbp-88], xmm5; movsd qword [rbp-96], xmm4 + | movsd qword [rbp-104], xmm3; movsd qword [rbp-112], xmm2 + | movsd qword [rbp-120], xmm1; movsd qword [rbp-128], xmm0 + | // Caveat: RB is rbp. + | mov L:RB, [DISPATCH+DISPATCH_GL(cur_L)] + | mov BASE, [DISPATCH+DISPATCH_GL(jit_base)] + | mov aword [DISPATCH+DISPATCH_J(L)], L:RB + | mov L:RB->base, BASE + |.if X64WIN + | lea CARG2, [rsp+4*8] + |.else + | mov CARG2, rsp + |.endif + | lea CARG1, [DISPATCH+GG_DISP2J] + | mov dword [DISPATCH+DISPATCH_GL(jit_base)], 0 + | call extern lj_trace_exit // (jit_State *J, ExitState *ex) + | // MULTRES or negated error code returned in eax (RD). + | mov RA, L:RB->cframe + | and RA, CFRAME_RAWMASK + | mov [RA+CFRAME_OFS_L], L:RB // Set SAVE_L (on-trace resume/yield). + | mov BASE, L:RB->base + | mov PC, [RA+CFRAME_OFS_PC] // Get SAVE_PC. + | jmp >1 + |.endif + |->vm_exit_interp: + | // RD = MULTRES or negated error code, BASE, PC and DISPATCH set. + |.if JIT + | // Restore additional callee-save registers only used in compiled code. + |.if X64WIN + | lea RA, [rsp+10*16+4*8] + |1: + | movdqa xmm15, [RA-10*16] + | movdqa xmm14, [RA-9*16] + | movdqa xmm13, [RA-8*16] + | movdqa xmm12, [RA-7*16] + | movdqa xmm11, [RA-6*16] + | movdqa xmm10, [RA-5*16] + | movdqa xmm9, [RA-4*16] + | movdqa xmm8, [RA-3*16] + | movdqa xmm7, [RA-2*16] + | mov rsp, RA // Reposition stack to C frame. + | movdqa xmm6, [RA-1*16] + | mov r15, CSAVE_1 + | mov r14, CSAVE_2 + | mov r13, CSAVE_3 + | mov r12, CSAVE_4 + |.else + | lea RA, [rsp+16] + |1: + | mov r13, [RA-8] + | mov r12, [RA] + | mov rsp, RA // Reposition stack to C frame. + |.endif + | test RDd, RDd; js >9 // Check for error from exit. + | mov L:RB, SAVE_L + | mov MULTRES, RDd + | mov LFUNC:KBASE, [BASE-16] + | cleartp LFUNC:KBASE + | mov KBASE, LFUNC:KBASE->pc + | mov KBASE, [KBASE+PC2PROTO(k)] + | mov L:RB->base, BASE + | mov dword [DISPATCH+DISPATCH_GL(jit_base)], 0 + | set_vmstate INTERP + | // Modified copy of ins_next which handles function header dispatch, too. + | mov RCd, [PC] + | movzx RAd, RCH + | movzx OP, RCL + | add PC, 4 + | shr RCd, 16 + | cmp OP, BC_FUNCF // Function header? + | jb >3 + | cmp OP, BC_FUNCC+2 // Fast function? + | jae >4 + |2: + | mov RCd, MULTRES // RC/RD holds nres+1. + |3: + | jmp aword [DISPATCH+OP*8] + | + |4: // Check frame below fast function. + | mov RC, [BASE-8] + | test RCd, FRAME_TYPE + | jnz <2 // Trace stitching continuation? + | // Otherwise set KBASE for Lua function below fast function. + | movzx RCd, byte [RC-3] + | neg RC + | mov LFUNC:KBASE, [BASE+RC*8-24] + | cleartp LFUNC:KBASE + | mov KBASE, LFUNC:KBASE->pc + | mov KBASE, [KBASE+PC2PROTO(k)] + | jmp <2 + | + |9: // Rethrow error from the right C frame. + | neg RD + | mov CARG1, L:RB + | mov CARG2, RD + | call extern lj_err_throw // (lua_State *L, int errcode) + |.endif + | + |//----------------------------------------------------------------------- + |//-- Math helper functions ---------------------------------------------- + |//----------------------------------------------------------------------- + | + |// FP value rounding. Called by math.floor/math.ceil fast functions + |// and from JIT code. arg/ret is xmm0. xmm0-xmm3 and RD (eax) modified. + |.macro vm_round, name, mode, cond + |->name: + |->name .. _sse: + | sseconst_abs xmm2, RD + | sseconst_2p52 xmm3, RD + | movaps xmm1, xmm0 + | andpd xmm1, xmm2 // |x| + | ucomisd xmm3, xmm1 // No truncation if 2^52 <= |x|. + | jbe >1 + | andnpd xmm2, xmm0 // Isolate sign bit. + |.if mode == 2 // trunc(x)? + | movaps xmm0, xmm1 + | addsd xmm1, xmm3 // (|x| + 2^52) - 2^52 + | subsd xmm1, xmm3 + | sseconst_1 xmm3, RD + | cmpsd xmm0, xmm1, 1 // |x| < result? + | andpd xmm0, xmm3 + | subsd xmm1, xmm0 // If yes, subtract -1. + | orpd xmm1, xmm2 // Merge sign bit back in. + |.else + | addsd xmm1, xmm3 // (|x| + 2^52) - 2^52 + | subsd xmm1, xmm3 + | orpd xmm1, xmm2 // Merge sign bit back in. + | .if mode == 1 // ceil(x)? + | sseconst_m1 xmm2, RD // Must subtract -1 to preserve -0. + | cmpsd xmm0, xmm1, 6 // x > result? + | .else // floor(x)? + | sseconst_1 xmm2, RD + | cmpsd xmm0, xmm1, 1 // x < result? + | .endif + | andpd xmm0, xmm2 + | subsd xmm1, xmm0 // If yes, subtract +-1. + |.endif + | movaps xmm0, xmm1 + |1: + | ret + |.endmacro + | + | vm_round vm_floor, 0, 1 + | vm_round vm_ceil, 1, JIT + | vm_round vm_trunc, 2, JIT + | + |// FP modulo x%y. Called by BC_MOD* and vm_arith. + |->vm_mod: + |// Args in xmm0/xmm1, return value in xmm0. + |// Caveat: xmm0-xmm5 and RC (eax) modified! + | movaps xmm5, xmm0 + | divsd xmm0, xmm1 + | sseconst_abs xmm2, RD + | sseconst_2p52 xmm3, RD + | movaps xmm4, xmm0 + | andpd xmm4, xmm2 // |x/y| + | ucomisd xmm3, xmm4 // No truncation if 2^52 <= |x/y|. + | jbe >1 + | andnpd xmm2, xmm0 // Isolate sign bit. + | addsd xmm4, xmm3 // (|x/y| + 2^52) - 2^52 + | subsd xmm4, xmm3 + | orpd xmm4, xmm2 // Merge sign bit back in. + | sseconst_1 xmm2, RD + | cmpsd xmm0, xmm4, 1 // x/y < result? + | andpd xmm0, xmm2 + | subsd xmm4, xmm0 // If yes, subtract 1.0. + | movaps xmm0, xmm5 + | mulsd xmm1, xmm4 + | subsd xmm0, xmm1 + | ret + |1: + | mulsd xmm1, xmm0 + | movaps xmm0, xmm5 + | subsd xmm0, xmm1 + | ret + | + |// Args in xmm0/eax. Ret in xmm0. xmm0-xmm1 and eax modified. + |->vm_powi_sse: + | cmp eax, 1; jle >6 // i<=1? + | // Now 1 < (unsigned)i <= 0x80000000. + |1: // Handle leading zeros. + | test eax, 1; jnz >2 + | mulsd xmm0, xmm0 + | shr eax, 1 + | jmp <1 + |2: + | shr eax, 1; jz >5 + | movaps xmm1, xmm0 + |3: // Handle trailing bits. + | mulsd xmm0, xmm0 + | shr eax, 1; jz >4 + | jnc <3 + | mulsd xmm1, xmm0 + | jmp <3 + |4: + | mulsd xmm0, xmm1 + |5: + | ret + |6: + | je <5 // x^1 ==> x + | jb >7 // x^0 ==> 1 + | neg eax + | call <1 + | sseconst_1 xmm1, RD + | divsd xmm1, xmm0 + | movaps xmm0, xmm1 + | ret + |7: + | sseconst_1 xmm0, RD + | ret + | + |//----------------------------------------------------------------------- + |//-- Miscellaneous functions -------------------------------------------- + |//----------------------------------------------------------------------- + | + |// int lj_vm_cpuid(uint32_t f, uint32_t res[4]) + |->vm_cpuid: + | mov eax, CARG1d + | .if X64WIN; push rsi; mov rsi, CARG2; .endif + | push rbx + | cpuid + | mov [rsi], eax + | mov [rsi+4], ebx + | mov [rsi+8], ecx + | mov [rsi+12], edx + | pop rbx + | .if X64WIN; pop rsi; .endif + | ret + | + |//----------------------------------------------------------------------- + |//-- Assertions --------------------------------------------------------- + |//----------------------------------------------------------------------- + | + |->assert_bad_for_arg_type: +#ifdef LUA_USE_ASSERT + | int3 +#endif + | int3 + | + |//----------------------------------------------------------------------- + |//-- FFI helper functions ----------------------------------------------- + |//----------------------------------------------------------------------- + | + |// Handler for callback functions. Callback slot number in ah/al. + |->vm_ffi_callback: + |.if FFI + |.type CTSTATE, CTState, PC + | saveregs_ // ebp/rbp already saved. ebp now holds global_State *. + | lea DISPATCH, [ebp+GG_G2DISP] + | mov CTSTATE, GL:ebp->ctype_state + | movzx eax, ax + | mov CTSTATE->cb.slot, eax + | mov CTSTATE->cb.gpr[0], CARG1 + | mov CTSTATE->cb.gpr[1], CARG2 + | mov CTSTATE->cb.gpr[2], CARG3 + | mov CTSTATE->cb.gpr[3], CARG4 + | movsd qword CTSTATE->cb.fpr[0], xmm0 + | movsd qword CTSTATE->cb.fpr[1], xmm1 + | movsd qword CTSTATE->cb.fpr[2], xmm2 + | movsd qword CTSTATE->cb.fpr[3], xmm3 + |.if X64WIN + | lea rax, [rsp+CFRAME_SIZE+4*8] + |.else + | lea rax, [rsp+CFRAME_SIZE] + | mov CTSTATE->cb.gpr[4], CARG5 + | mov CTSTATE->cb.gpr[5], CARG6 + | movsd qword CTSTATE->cb.fpr[4], xmm4 + | movsd qword CTSTATE->cb.fpr[5], xmm5 + | movsd qword CTSTATE->cb.fpr[6], xmm6 + | movsd qword CTSTATE->cb.fpr[7], xmm7 + |.endif + | mov CTSTATE->cb.stack, rax + | mov CARG2, rsp + | mov SAVE_PC, CTSTATE // Any value outside of bytecode is ok. + | mov CARG1, CTSTATE + | call extern lj_ccallback_enter // (CTState *cts, void *cf) + | // lua_State * returned in eax (RD). + | set_vmstate INTERP + | mov BASE, L:RD->base + | mov RD, L:RD->top + | sub RD, BASE + | mov LFUNC:RB, [BASE-16] + | cleartp LFUNC:RB + | shr RD, 3 + | add RD, 1 + | ins_callt + |.endif + | + |->cont_ffi_callback: // Return from FFI callback. + |.if FFI + | mov L:RA, SAVE_L + | mov CTSTATE, [DISPATCH+DISPATCH_GL(ctype_state)] + | mov aword CTSTATE->L, L:RA + | mov L:RA->base, BASE + | mov L:RA->top, RB + | mov CARG1, CTSTATE + | mov CARG2, RC + | call extern lj_ccallback_leave // (CTState *cts, TValue *o) + | mov rax, CTSTATE->cb.gpr[0] + | movsd xmm0, qword CTSTATE->cb.fpr[0] + | jmp ->vm_leave_unw + |.endif + | + |->vm_ffi_call: // Call C function via FFI. + | // Caveat: needs special frame unwinding, see below. + |.if FFI + | .type CCSTATE, CCallState, rbx + | push rbp; mov rbp, rsp; push rbx; mov CCSTATE, CARG1 + | + | // Readjust stack. + | mov eax, CCSTATE->spadj + | sub rsp, rax + | + | // Copy stack slots. + | movzx ecx, byte CCSTATE->nsp + | sub ecx, 1 + | js >2 + |1: + | mov rax, [CCSTATE+rcx*8+offsetof(CCallState, stack)] + | mov [rsp+rcx*8+CCALL_SPS_EXTRA*8], rax + | sub ecx, 1 + | jns <1 + |2: + | + | movzx eax, byte CCSTATE->nfpr + | mov CARG1, CCSTATE->gpr[0] + | mov CARG2, CCSTATE->gpr[1] + | mov CARG3, CCSTATE->gpr[2] + | mov CARG4, CCSTATE->gpr[3] + |.if not X64WIN + | mov CARG5, CCSTATE->gpr[4] + | mov CARG6, CCSTATE->gpr[5] + |.endif + | test eax, eax; jz >5 + | movaps xmm0, CCSTATE->fpr[0] + | movaps xmm1, CCSTATE->fpr[1] + | movaps xmm2, CCSTATE->fpr[2] + | movaps xmm3, CCSTATE->fpr[3] + |.if not X64WIN + | cmp eax, 4; jbe >5 + | movaps xmm4, CCSTATE->fpr[4] + | movaps xmm5, CCSTATE->fpr[5] + | movaps xmm6, CCSTATE->fpr[6] + | movaps xmm7, CCSTATE->fpr[7] + |.endif + |5: + | + | call aword CCSTATE->func + | + | mov CCSTATE->gpr[0], rax + | movaps CCSTATE->fpr[0], xmm0 + |.if not X64WIN + | mov CCSTATE->gpr[1], rdx + | movaps CCSTATE->fpr[1], xmm1 + |.endif + | + | mov rbx, [rbp-8]; leave; ret + |.endif + |// Note: vm_ffi_call must be the last function in this object file! + | + |//----------------------------------------------------------------------- +} + +/* Generate the code for a single instruction. */ +static void build_ins(BuildCtx *ctx, BCOp op, int defop) +{ + int vk = 0; + |// Note: aligning all instructions does not pay off. + |=>defop: + + switch (op) { + + /* -- Comparison ops ---------------------------------------------------- */ + + /* Remember: all ops branch for a true comparison, fall through otherwise. */ + + |.macro jmp_comp, lt, ge, le, gt, target + ||switch (op) { + ||case BC_ISLT: + | lt target + ||break; + ||case BC_ISGE: + | ge target + ||break; + ||case BC_ISLE: + | le target + ||break; + ||case BC_ISGT: + | gt target + ||break; + ||default: break; /* Shut up GCC. */ + ||} + |.endmacro + + case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: + | // RA = src1, RD = src2, JMP with RD = target + | ins_AD + | mov ITYPE, [BASE+RA*8] + | mov RB, [BASE+RD*8] + | mov RA, ITYPE + | mov RD, RB + | sar ITYPE, 47 + | sar RB, 47 + |.if DUALNUM + | cmp ITYPEd, LJ_TISNUM; jne >7 + | cmp RBd, LJ_TISNUM; jne >8 + | add PC, 4 + | cmp RAd, RDd + | jmp_comp jge, jl, jg, jle, >9 + |6: + | movzx RDd, PC_RD + | branchPC RD + |9: + | ins_next + | + |7: // RA is not an integer. + | ja ->vmeta_comp + | // RA is a number. + | cmp RBd, LJ_TISNUM; jb >1; jne ->vmeta_comp + | // RA is a number, RD is an integer. + | cvtsi2sd xmm0, RDd + | jmp >2 + | + |8: // RA is an integer, RD is not an integer. + | ja ->vmeta_comp + | // RA is an integer, RD is a number. + | cvtsi2sd xmm1, RAd + | movd xmm0, RD + | jmp >3 + |.else + | cmp ITYPEd, LJ_TISNUM; jae ->vmeta_comp + | cmp RBd, LJ_TISNUM; jae ->vmeta_comp + |.endif + |1: + | movd xmm0, RD + |2: + | movd xmm1, RA + |3: + | add PC, 4 + | ucomisd xmm0, xmm1 + | // Unordered: all of ZF CF PF set, ordered: PF clear. + | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't. + |.if DUALNUM + | jmp_comp jbe, ja, jb, jae, <9 + | jmp <6 + |.else + | jmp_comp jbe, ja, jb, jae, >1 + | movzx RDd, PC_RD + | branchPC RD + |1: + | ins_next + |.endif + break; + + case BC_ISEQV: case BC_ISNEV: + vk = op == BC_ISEQV; + | ins_AD // RA = src1, RD = src2, JMP with RD = target + | mov RB, [BASE+RD*8] + | mov ITYPE, [BASE+RA*8] + | add PC, 4 + | mov RD, RB + | mov RA, ITYPE + | sar RB, 47 + | sar ITYPE, 47 + |.if DUALNUM + | cmp RBd, LJ_TISNUM; jne >7 + | cmp ITYPEd, LJ_TISNUM; jne >8 + | cmp RDd, RAd + if (vk) { + | jne >9 + } else { + | je >9 + } + | movzx RDd, PC_RD + | branchPC RD + |9: + | ins_next + | + |7: // RD is not an integer. + | ja >5 + | // RD is a number. + | movd xmm1, RD + | cmp ITYPEd, LJ_TISNUM; jb >1; jne >5 + | // RD is a number, RA is an integer. + | cvtsi2sd xmm0, RAd + | jmp >2 + | + |8: // RD is an integer, RA is not an integer. + | ja >5 + | // RD is an integer, RA is a number. + | cvtsi2sd xmm1, RDd + | jmp >1 + | + |.else + | cmp RBd, LJ_TISNUM; jae >5 + | cmp ITYPEd, LJ_TISNUM; jae >5 + | movd xmm1, RD + |.endif + |1: + | movd xmm0, RA + |2: + | ucomisd xmm0, xmm1 + |4: + iseqne_fp: + if (vk) { + | jp >2 // Unordered means not equal. + | jne >2 + } else { + | jp >2 // Unordered means not equal. + | je >1 + } + iseqne_end: + if (vk) { + |1: // EQ: Branch to the target. + | movzx RDd, PC_RD + | branchPC RD + |2: // NE: Fallthrough to next instruction. + |.if not FFI + |3: + |.endif + } else { + |.if not FFI + |3: + |.endif + |2: // NE: Branch to the target. + | movzx RDd, PC_RD + | branchPC RD + |1: // EQ: Fallthrough to next instruction. + } + if (LJ_DUALNUM && (op == BC_ISEQV || op == BC_ISNEV || + op == BC_ISEQN || op == BC_ISNEN)) { + | jmp <9 + } else { + | ins_next + } + | + if (op == BC_ISEQV || op == BC_ISNEV) { + |5: // Either or both types are not numbers. + |.if FFI + | cmp RBd, LJ_TCDATA; je ->vmeta_equal_cd + | cmp ITYPEd, LJ_TCDATA; je ->vmeta_equal_cd + |.endif + | cmp RA, RD + | je <1 // Same GCobjs or pvalues? + | cmp RBd, ITYPEd + | jne <2 // Not the same type? + | cmp RBd, LJ_TISTABUD + | ja <2 // Different objects and not table/ud? + | + | // Different tables or userdatas. Need to check __eq metamethod. + | // Field metatable must be at same offset for GCtab and GCudata! + | cleartp TAB:RA + | mov TAB:RB, TAB:RA->metatable + | test TAB:RB, TAB:RB + | jz <2 // No metatable? + | test byte TAB:RB->nomm, 1<vmeta_equal // Handle __eq metamethod. + } else { + |.if FFI + |3: + | cmp ITYPEd, LJ_TCDATA + if (LJ_DUALNUM && vk) { + | jne <9 + } else { + | jne <2 + } + | jmp ->vmeta_equal_cd + |.endif + } + break; + case BC_ISEQS: case BC_ISNES: + vk = op == BC_ISEQS; + | ins_AND // RA = src, RD = str const, JMP with RD = target + | mov RB, [BASE+RA*8] + | add PC, 4 + | checkstr RB, >3 + | cmp RB, [KBASE+RD*8] + iseqne_test: + if (vk) { + | jne >2 + } else { + | je >1 + } + goto iseqne_end; + case BC_ISEQN: case BC_ISNEN: + vk = op == BC_ISEQN; + | ins_AD // RA = src, RD = num const, JMP with RD = target + | mov RB, [BASE+RA*8] + | add PC, 4 + |.if DUALNUM + | checkint RB, >7 + | mov RD, [KBASE+RD*8] + | checkint RD, >8 + | cmp RBd, RDd + if (vk) { + | jne >9 + } else { + | je >9 + } + | movzx RDd, PC_RD + | branchPC RD + |9: + | ins_next + | + |7: // RA is not an integer. + | ja >3 + | // RA is a number. + | mov RD, [KBASE+RD*8] + | checkint RD, >1 + | // RA is a number, RD is an integer. + | cvtsi2sd xmm0, RDd + | jmp >2 + | + |8: // RA is an integer, RD is a number. + | cvtsi2sd xmm0, RBd + | movd xmm1, RD + | ucomisd xmm0, xmm1 + | jmp >4 + |1: + | movd xmm0, RD + |.else + | checknum RB, >3 + |1: + | movsd xmm0, qword [KBASE+RD*8] + |.endif + |2: + | ucomisd xmm0, qword [BASE+RA*8] + |4: + goto iseqne_fp; + case BC_ISEQP: case BC_ISNEP: + vk = op == BC_ISEQP; + | ins_AND // RA = src, RD = primitive type (~), JMP with RD = target + | mov RB, [BASE+RA*8] + | sar RB, 47 + | add PC, 4 + | cmp RBd, RDd + if (!LJ_HASFFI) goto iseqne_test; + if (vk) { + | jne >3 + | movzx RDd, PC_RD + | branchPC RD + |2: + | ins_next + |3: + | cmp RBd, LJ_TCDATA; jne <2 + | jmp ->vmeta_equal_cd + } else { + | je >2 + | cmp RBd, LJ_TCDATA; je ->vmeta_equal_cd + | movzx RDd, PC_RD + | branchPC RD + |2: + | ins_next + } + break; + + /* -- Unary test and copy ops ------------------------------------------- */ + + case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF: + | ins_AD // RA = dst or unused, RD = src, JMP with RD = target + | mov ITYPE, [BASE+RD*8] + | add PC, 4 + if (op == BC_ISTC || op == BC_ISFC) { + | mov RB, ITYPE + } + | sar ITYPE, 47 + | cmp ITYPEd, LJ_TISTRUECOND + if (op == BC_IST || op == BC_ISTC) { + | jae >1 + } else { + | jb >1 + } + if (op == BC_ISTC || op == BC_ISFC) { + | mov [BASE+RA*8], RB + } + | movzx RDd, PC_RD + | branchPC RD + |1: // Fallthrough to the next instruction. + | ins_next + break; + + case BC_ISTYPE: + | ins_AD // RA = src, RD = -type + | mov RB, [BASE+RA*8] + | sar RB, 47 + | add RBd, RDd + | jne ->vmeta_istype + | ins_next + break; + case BC_ISNUM: + | ins_AD // RA = src, RD = -(TISNUM-1) + | checknumtp [BASE+RA*8], ->vmeta_istype + | ins_next + break; + + /* -- Unary ops --------------------------------------------------------- */ + + case BC_MOV: + | ins_AD // RA = dst, RD = src + | mov RB, [BASE+RD*8] + | mov [BASE+RA*8], RB + | ins_next_ + break; + case BC_NOT: + | ins_AD // RA = dst, RD = src + | mov RB, [BASE+RD*8] + | sar RB, 47 + | mov RCd, 2 + | cmp RB, LJ_TISTRUECOND + | sbb RCd, 0 + | shl RC, 47 + | not RC + | mov [BASE+RA*8], RC + | ins_next + break; + case BC_UNM: + | ins_AD // RA = dst, RD = src + | mov RB, [BASE+RD*8] + |.if DUALNUM + | checkint RB, >5 + | neg RBd + | jo >4 + | setint RB + |9: + | mov [BASE+RA*8], RB + | ins_next + |4: + | mov64 RB, U64x(41e00000,00000000) // 2^31. + | jmp <9 + |5: + | ja ->vmeta_unm + |.else + | checknum RB, ->vmeta_unm + |.endif + | mov64 RD, U64x(80000000,00000000) + | xor RB, RD + |.if DUALNUM + | jmp <9 + |.else + | mov [BASE+RA*8], RB + | ins_next + |.endif + break; + case BC_LEN: + | ins_AD // RA = dst, RD = src + | mov RD, [BASE+RD*8] + | checkstr RD, >2 + |.if DUALNUM + | mov RDd, dword STR:RD->len + |1: + | setint RD + | mov [BASE+RA*8], RD + |.else + | xorps xmm0, xmm0 + | cvtsi2sd xmm0, dword STR:RD->len + |1: + | movsd qword [BASE+RA*8], xmm0 + |.endif + | ins_next + |2: + | cmp ITYPEd, LJ_TTAB; jne ->vmeta_len + | mov TAB:CARG1, TAB:RD +#if LJ_52 + | mov TAB:RB, TAB:RD->metatable + | cmp TAB:RB, 0 + | jnz >9 + |3: +#endif + |->BC_LEN_Z: + | mov RB, BASE // Save BASE. + | call extern lj_tab_len // (GCtab *t) + | // Length of table returned in eax (RD). + |.if DUALNUM + | // Nothing to do. + |.else + | cvtsi2sd xmm0, RDd + |.endif + | mov BASE, RB // Restore BASE. + | movzx RAd, PC_RA + | jmp <1 +#if LJ_52 + |9: // Check for __len. + | test byte TAB:RB->nomm, 1<vmeta_len // 'no __len' flag NOT set: check. +#endif + break; + + /* -- Binary ops -------------------------------------------------------- */ + + |.macro ins_arithpre, sseins, ssereg + | ins_ABC + ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + ||switch (vk) { + ||case 0: + | checknumtp [BASE+RB*8], ->vmeta_arith_vn + | .if DUALNUM + | checknumtp [KBASE+RC*8], ->vmeta_arith_vn + | .endif + | movsd xmm0, qword [BASE+RB*8] + | sseins ssereg, qword [KBASE+RC*8] + || break; + ||case 1: + | checknumtp [BASE+RB*8], ->vmeta_arith_nv + | .if DUALNUM + | checknumtp [KBASE+RC*8], ->vmeta_arith_nv + | .endif + | movsd xmm0, qword [KBASE+RC*8] + | sseins ssereg, qword [BASE+RB*8] + || break; + ||default: + | checknumtp [BASE+RB*8], ->vmeta_arith_vv + | checknumtp [BASE+RC*8], ->vmeta_arith_vv + | movsd xmm0, qword [BASE+RB*8] + | sseins ssereg, qword [BASE+RC*8] + || break; + ||} + |.endmacro + | + |.macro ins_arithdn, intins + | ins_ABC + ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + ||switch (vk) { + ||case 0: + | mov RB, [BASE+RB*8] + | mov RC, [KBASE+RC*8] + | checkint RB, ->vmeta_arith_vno + | checkint RC, ->vmeta_arith_vno + | intins RBd, RCd; jo ->vmeta_arith_vno + || break; + ||case 1: + | mov RB, [BASE+RB*8] + | mov RC, [KBASE+RC*8] + | checkint RB, ->vmeta_arith_nvo + | checkint RC, ->vmeta_arith_nvo + | intins RCd, RBd; jo ->vmeta_arith_nvo + || break; + ||default: + | mov RB, [BASE+RB*8] + | mov RC, [BASE+RC*8] + | checkint RB, ->vmeta_arith_vvo + | checkint RC, ->vmeta_arith_vvo + | intins RBd, RCd; jo ->vmeta_arith_vvo + || break; + ||} + ||if (vk == 1) { + | setint RC + | mov [BASE+RA*8], RC + ||} else { + | setint RB + | mov [BASE+RA*8], RB + ||} + | ins_next + |.endmacro + | + |.macro ins_arithpost + | movsd qword [BASE+RA*8], xmm0 + |.endmacro + | + |.macro ins_arith, sseins + | ins_arithpre sseins, xmm0 + | ins_arithpost + | ins_next + |.endmacro + | + |.macro ins_arith, intins, sseins + |.if DUALNUM + | ins_arithdn intins + |.else + | ins_arith, sseins + |.endif + |.endmacro + + | // RA = dst, RB = src1 or num const, RC = src2 or num const + case BC_ADDVN: case BC_ADDNV: case BC_ADDVV: + | ins_arith add, addsd + break; + case BC_SUBVN: case BC_SUBNV: case BC_SUBVV: + | ins_arith sub, subsd + break; + case BC_MULVN: case BC_MULNV: case BC_MULVV: + | ins_arith imul, mulsd + break; + case BC_DIVVN: case BC_DIVNV: case BC_DIVVV: + | ins_arith divsd + break; + case BC_MODVN: + | ins_arithpre movsd, xmm1 + |->BC_MODVN_Z: + | call ->vm_mod + | ins_arithpost + | ins_next + break; + case BC_MODNV: case BC_MODVV: + | ins_arithpre movsd, xmm1 + | jmp ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway. + break; + case BC_POW: + | ins_arithpre movsd, xmm1 + | mov RB, BASE + | call extern pow + | movzx RAd, PC_RA + | mov BASE, RB + | ins_arithpost + | ins_next + break; + + case BC_CAT: + | ins_ABC // RA = dst, RB = src_start, RC = src_end + | mov L:CARG1, SAVE_L + | mov L:CARG1->base, BASE + | lea CARG2, [BASE+RC*8] + | mov CARG3d, RCd + | sub CARG3d, RBd + |->BC_CAT_Z: + | mov L:RB, L:CARG1 + | mov SAVE_PC, PC + | call extern lj_meta_cat // (lua_State *L, TValue *top, int left) + | // NULL (finished) or TValue * (metamethod) returned in eax (RC). + | mov BASE, L:RB->base + | test RC, RC + | jnz ->vmeta_binop + | movzx RBd, PC_RB // Copy result to Stk[RA] from Stk[RB]. + | movzx RAd, PC_RA + | mov RC, [BASE+RB*8] + | mov [BASE+RA*8], RC + | ins_next + break; + + /* -- Constant ops ------------------------------------------------------ */ + + case BC_KSTR: + | ins_AND // RA = dst, RD = str const (~) + | mov RD, [KBASE+RD*8] + | settp RD, LJ_TSTR + | mov [BASE+RA*8], RD + | ins_next + break; + case BC_KCDATA: + |.if FFI + | ins_AND // RA = dst, RD = cdata const (~) + | mov RD, [KBASE+RD*8] + | settp RD, LJ_TCDATA + | mov [BASE+RA*8], RD + | ins_next + |.endif + break; + case BC_KSHORT: + | ins_AD // RA = dst, RD = signed int16 literal + |.if DUALNUM + | movsx RDd, RDW + | setint RD + | mov [BASE+RA*8], RD + |.else + | movsx RDd, RDW // Sign-extend literal. + | cvtsi2sd xmm0, RDd + | movsd qword [BASE+RA*8], xmm0 + |.endif + | ins_next + break; + case BC_KNUM: + | ins_AD // RA = dst, RD = num const + | movsd xmm0, qword [KBASE+RD*8] + | movsd qword [BASE+RA*8], xmm0 + | ins_next + break; + case BC_KPRI: + | ins_AD // RA = dst, RD = primitive type (~) + | shl RD, 47 + | not RD + | mov [BASE+RA*8], RD + | ins_next + break; + case BC_KNIL: + | ins_AD // RA = dst_start, RD = dst_end + | lea RA, [BASE+RA*8+8] + | lea RD, [BASE+RD*8] + | mov RB, LJ_TNIL + | mov [RA-8], RB // Sets minimum 2 slots. + |1: + | mov [RA], RB + | add RA, 8 + | cmp RA, RD + | jbe <1 + | ins_next + break; + + /* -- Upvalue and function ops ------------------------------------------ */ + + case BC_UGET: + | ins_AD // RA = dst, RD = upvalue # + | mov LFUNC:RB, [BASE-16] + | cleartp LFUNC:RB + | mov UPVAL:RB, [LFUNC:RB+RD*8+offsetof(GCfuncL, uvptr)] + | mov RB, UPVAL:RB->v + | mov RD, [RB] + | mov [BASE+RA*8], RD + | ins_next + break; + case BC_USETV: +#define TV2MARKOFS \ + ((int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)) + | ins_AD // RA = upvalue #, RD = src + | mov LFUNC:RB, [BASE-16] + | cleartp LFUNC:RB + | mov UPVAL:RB, [LFUNC:RB+RA*8+offsetof(GCfuncL, uvptr)] + | cmp byte UPVAL:RB->closed, 0 + | mov RB, UPVAL:RB->v + | mov RA, [BASE+RD*8] + | mov [RB], RA + | jz >1 + | // Check barrier for closed upvalue. + | test byte [RB+TV2MARKOFS], LJ_GC_BLACK // isblack(uv) + | jnz >2 + |1: + | ins_next + | + |2: // Upvalue is black. Check if new value is collectable and white. + | mov RD, RA + | sar RD, 47 + | sub RDd, LJ_TISGCV + | cmp RDd, LJ_TNUMX - LJ_TISGCV // tvisgcv(v) + | jbe <1 + | cleartp GCOBJ:RA + | test byte GCOBJ:RA->gch.marked, LJ_GC_WHITES // iswhite(v) + | jz <1 + | // Crossed a write barrier. Move the barrier forward. + |.if not X64WIN + | mov CARG2, RB + | mov RB, BASE // Save BASE. + |.else + | xchg CARG2, RB // Save BASE (CARG2 == BASE). + |.endif + | lea GL:CARG1, [DISPATCH+GG_DISP2G] + | call extern lj_gc_barrieruv // (global_State *g, TValue *tv) + | mov BASE, RB // Restore BASE. + | jmp <1 + break; +#undef TV2MARKOFS + case BC_USETS: + | ins_AND // RA = upvalue #, RD = str const (~) + | mov LFUNC:RB, [BASE-16] + | cleartp LFUNC:RB + | mov UPVAL:RB, [LFUNC:RB+RA*8+offsetof(GCfuncL, uvptr)] + | mov STR:RA, [KBASE+RD*8] + | mov RD, UPVAL:RB->v + | settp STR:ITYPE, STR:RA, LJ_TSTR + | mov [RD], STR:ITYPE + | test byte UPVAL:RB->marked, LJ_GC_BLACK // isblack(uv) + | jnz >2 + |1: + | ins_next + | + |2: // Check if string is white and ensure upvalue is closed. + | test byte GCOBJ:RA->gch.marked, LJ_GC_WHITES // iswhite(str) + | jz <1 + | cmp byte UPVAL:RB->closed, 0 + | jz <1 + | // Crossed a write barrier. Move the barrier forward. + | mov RB, BASE // Save BASE (CARG2 == BASE). + | mov CARG2, RD + | lea GL:CARG1, [DISPATCH+GG_DISP2G] + | call extern lj_gc_barrieruv // (global_State *g, TValue *tv) + | mov BASE, RB // Restore BASE. + | jmp <1 + break; + case BC_USETN: + | ins_AD // RA = upvalue #, RD = num const + | mov LFUNC:RB, [BASE-16] + | cleartp LFUNC:RB + | movsd xmm0, qword [KBASE+RD*8] + | mov UPVAL:RB, [LFUNC:RB+RA*8+offsetof(GCfuncL, uvptr)] + | mov RA, UPVAL:RB->v + | movsd qword [RA], xmm0 + | ins_next + break; + case BC_USETP: + | ins_AD // RA = upvalue #, RD = primitive type (~) + | mov LFUNC:RB, [BASE-16] + | cleartp LFUNC:RB + | mov UPVAL:RB, [LFUNC:RB+RA*8+offsetof(GCfuncL, uvptr)] + | shl RD, 47 + | not RD + | mov RA, UPVAL:RB->v + | mov [RA], RD + | ins_next + break; + case BC_UCLO: + | ins_AD // RA = level, RD = target + | branchPC RD // Do this first to free RD. + | mov L:RB, SAVE_L + | cmp dword L:RB->openupval, 0 + | je >1 + | mov L:RB->base, BASE + | lea CARG2, [BASE+RA*8] // Caveat: CARG2 == BASE + | mov L:CARG1, L:RB // Caveat: CARG1 == RA + | call extern lj_func_closeuv // (lua_State *L, TValue *level) + | mov BASE, L:RB->base + |1: + | ins_next + break; + + case BC_FNEW: + | ins_AND // RA = dst, RD = proto const (~) (holding function prototype) + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Caveat: CARG2/CARG3 may be BASE. + | mov CARG3, [BASE-16] + | cleartp CARG3 + | mov CARG2, [KBASE+RD*8] // Fetch GCproto *. + | mov CARG1, L:RB + | mov SAVE_PC, PC + | // (lua_State *L, GCproto *pt, GCfuncL *parent) + | call extern lj_func_newL_gc + | // GCfuncL * returned in eax (RC). + | mov BASE, L:RB->base + | movzx RAd, PC_RA + | settp LFUNC:RC, LJ_TFUNC + | mov [BASE+RA*8], LFUNC:RC + | ins_next + break; + + /* -- Table ops --------------------------------------------------------- */ + + case BC_TNEW: + | ins_AD // RA = dst, RD = hbits|asize + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov RA, [DISPATCH+DISPATCH_GL(gc.total)] + | cmp RA, [DISPATCH+DISPATCH_GL(gc.threshold)] + | mov SAVE_PC, PC + | jae >5 + |1: + | mov CARG3d, RDd + | and RDd, 0x7ff + | shr CARG3d, 11 + | cmp RDd, 0x7ff + | je >3 + |2: + | mov L:CARG1, L:RB + | mov CARG2d, RDd + | call extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits) + | // Table * returned in eax (RC). + | mov BASE, L:RB->base + | movzx RAd, PC_RA + | settp TAB:RC, LJ_TTAB + | mov [BASE+RA*8], TAB:RC + | ins_next + |3: // Turn 0x7ff into 0x801. + | mov RDd, 0x801 + | jmp <2 + |5: + | mov L:CARG1, L:RB + | call extern lj_gc_step_fixtop // (lua_State *L) + | movzx RDd, PC_RD + | jmp <1 + break; + case BC_TDUP: + | ins_AND // RA = dst, RD = table const (~) (holding template table) + | mov L:RB, SAVE_L + | mov RA, [DISPATCH+DISPATCH_GL(gc.total)] + | mov SAVE_PC, PC + | cmp RA, [DISPATCH+DISPATCH_GL(gc.threshold)] + | mov L:RB->base, BASE + | jae >3 + |2: + | mov TAB:CARG2, [KBASE+RD*8] // Caveat: CARG2 == BASE + | mov L:CARG1, L:RB // Caveat: CARG1 == RA + | call extern lj_tab_dup // (lua_State *L, Table *kt) + | // Table * returned in eax (RC). + | mov BASE, L:RB->base + | movzx RAd, PC_RA + | settp TAB:RC, LJ_TTAB + | mov [BASE+RA*8], TAB:RC + | ins_next + |3: + | mov L:CARG1, L:RB + | call extern lj_gc_step_fixtop // (lua_State *L) + | movzx RDd, PC_RD // Need to reload RD. + | not RD + | jmp <2 + break; + + case BC_GGET: + | ins_AND // RA = dst, RD = str const (~) + | mov LFUNC:RB, [BASE-16] + | cleartp LFUNC:RB + | mov TAB:RB, LFUNC:RB->env + | mov STR:RC, [KBASE+RD*8] + | jmp ->BC_TGETS_Z + break; + case BC_GSET: + | ins_AND // RA = src, RD = str const (~) + | mov LFUNC:RB, [BASE-16] + | cleartp LFUNC:RB + | mov TAB:RB, LFUNC:RB->env + | mov STR:RC, [KBASE+RD*8] + | jmp ->BC_TSETS_Z + break; + + case BC_TGETV: + | ins_ABC // RA = dst, RB = table, RC = key + | mov TAB:RB, [BASE+RB*8] + | mov RC, [BASE+RC*8] + | checktab TAB:RB, ->vmeta_tgetv + | + | // Integer key? + |.if DUALNUM + | checkint RC, >5 + |.else + | // Convert number to int and back and compare. + | checknum RC, >5 + | movd xmm0, RC + | cvttsd2si RCd, xmm0 + | cvtsi2sd xmm1, RCd + | ucomisd xmm0, xmm1 + | jne ->vmeta_tgetv // Generic numeric key? Use fallback. + |.endif + | cmp RCd, TAB:RB->asize // Takes care of unordered, too. + | jae ->vmeta_tgetv // Not in array part? Use fallback. + | shl RCd, 3 + | add RC, TAB:RB->array + | // Get array slot. + | mov ITYPE, [RC] + | cmp ITYPE, LJ_TNIL // Avoid overwriting RB in fastpath. + | je >2 + |1: + | mov [BASE+RA*8], ITYPE + | ins_next + | + |2: // Check for __index if table value is nil. + | mov TAB:TMPR, TAB:RB->metatable + | test TAB:TMPR, TAB:TMPR + | jz <1 + | test byte TAB:TMPR->nomm, 1<vmeta_tgetv // 'no __index' flag NOT set: check. + | jmp <1 + | + |5: // String key? + | cmp ITYPEd, LJ_TSTR; jne ->vmeta_tgetv + | cleartp STR:RC + | jmp ->BC_TGETS_Z + break; + case BC_TGETS: + | ins_ABC // RA = dst, RB = table, RC = str const (~) + | mov TAB:RB, [BASE+RB*8] + | not RC + | mov STR:RC, [KBASE+RC*8] + | checktab TAB:RB, ->vmeta_tgets + |->BC_TGETS_Z: // RB = GCtab *, RC = GCstr * + | mov TMPRd, TAB:RB->hmask + | and TMPRd, STR:RC->hash + | imul TMPRd, #NODE + | add NODE:TMPR, TAB:RB->node + | settp ITYPE, STR:RC, LJ_TSTR + |1: + | cmp NODE:TMPR->key, ITYPE + | jne >4 + | // Get node value. + | mov ITYPE, NODE:TMPR->val + | cmp ITYPE, LJ_TNIL + | je >5 // Key found, but nil value? + |2: + | mov [BASE+RA*8], ITYPE + | ins_next + | + |4: // Follow hash chain. + | mov NODE:TMPR, NODE:TMPR->next + | test NODE:TMPR, NODE:TMPR + | jnz <1 + | // End of hash chain: key not found, nil result. + | mov ITYPE, LJ_TNIL + | + |5: // Check for __index if table value is nil. + | mov TAB:TMPR, TAB:RB->metatable + | test TAB:TMPR, TAB:TMPR + | jz <2 // No metatable: done. + | test byte TAB:TMPR->nomm, 1<vmeta_tgets // Caveat: preserve STR:RC. + break; + case BC_TGETB: + | ins_ABC // RA = dst, RB = table, RC = byte literal + | mov TAB:RB, [BASE+RB*8] + | checktab TAB:RB, ->vmeta_tgetb + | cmp RCd, TAB:RB->asize + | jae ->vmeta_tgetb + | shl RCd, 3 + | add RC, TAB:RB->array + | // Get array slot. + | mov ITYPE, [RC] + | cmp ITYPE, LJ_TNIL + | je >2 + |1: + | mov [BASE+RA*8], ITYPE + | ins_next + | + |2: // Check for __index if table value is nil. + | mov TAB:TMPR, TAB:RB->metatable + | test TAB:TMPR, TAB:TMPR + | jz <1 + | test byte TAB:TMPR->nomm, 1<vmeta_tgetb // 'no __index' flag NOT set: check. + | jmp <1 + break; + case BC_TGETR: + | ins_ABC // RA = dst, RB = table, RC = key + | mov TAB:RB, [BASE+RB*8] + | cleartp TAB:RB + |.if DUALNUM + | mov RCd, dword [BASE+RC*8] + |.else + | cvttsd2si RCd, qword [BASE+RC*8] + |.endif + | cmp RCd, TAB:RB->asize + | jae ->vmeta_tgetr // Not in array part? Use fallback. + | shl RCd, 3 + | add RC, TAB:RB->array + | // Get array slot. + |->BC_TGETR_Z: + | mov ITYPE, [RC] + |->BC_TGETR2_Z: + | mov [BASE+RA*8], ITYPE + | ins_next + break; + + case BC_TSETV: + | ins_ABC // RA = src, RB = table, RC = key + | mov TAB:RB, [BASE+RB*8] + | mov RC, [BASE+RC*8] + | checktab TAB:RB, ->vmeta_tsetv + | + | // Integer key? + |.if DUALNUM + | checkint RC, >5 + |.else + | // Convert number to int and back and compare. + | checknum RC, >5 + | movd xmm0, RC + | cvttsd2si RCd, xmm0 + | cvtsi2sd xmm1, RCd + | ucomisd xmm0, xmm1 + | jne ->vmeta_tsetv // Generic numeric key? Use fallback. + |.endif + | cmp RCd, TAB:RB->asize // Takes care of unordered, too. + | jae ->vmeta_tsetv + | shl RCd, 3 + | add RC, TAB:RB->array + | cmp aword [RC], LJ_TNIL + | je >3 // Previous value is nil? + |1: + | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) + | jnz >7 + |2: // Set array slot. + | mov RB, [BASE+RA*8] + | mov [RC], RB + | ins_next + | + |3: // Check for __newindex if previous value is nil. + | mov TAB:TMPR, TAB:RB->metatable + | test TAB:TMPR, TAB:TMPR + | jz <1 + | test byte TAB:TMPR->nomm, 1<vmeta_tsetv // 'no __newindex' flag NOT set: check. + | jmp <1 + | + |5: // String key? + | cmp ITYPEd, LJ_TSTR; jne ->vmeta_tsetv + | cleartp STR:RC + | jmp ->BC_TSETS_Z + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:RB, TMPR + | jmp <2 + break; + case BC_TSETS: + | ins_ABC // RA = src, RB = table, RC = str const (~) + | mov TAB:RB, [BASE+RB*8] + | not RC + | mov STR:RC, [KBASE+RC*8] + | checktab TAB:RB, ->vmeta_tsets + |->BC_TSETS_Z: // RB = GCtab *, RC = GCstr * + | mov TMPRd, TAB:RB->hmask + | and TMPRd, STR:RC->hash + | imul TMPRd, #NODE + | mov byte TAB:RB->nomm, 0 // Clear metamethod cache. + | add NODE:TMPR, TAB:RB->node + | settp ITYPE, STR:RC, LJ_TSTR + |1: + | cmp NODE:TMPR->key, ITYPE + | jne >5 + | // Ok, key found. Assumes: offsetof(Node, val) == 0 + | cmp aword [TMPR], LJ_TNIL + | je >4 // Previous value is nil? + |2: + | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) + | jnz >7 + |3: // Set node value. + | mov ITYPE, [BASE+RA*8] + | mov [TMPR], ITYPE + | ins_next + | + |4: // Check for __newindex if previous value is nil. + | mov TAB:ITYPE, TAB:RB->metatable + | test TAB:ITYPE, TAB:ITYPE + | jz <2 + | test byte TAB:ITYPE->nomm, 1<vmeta_tsets // 'no __newindex' flag NOT set: check. + | jmp <2 + | + |5: // Follow hash chain. + | mov NODE:TMPR, NODE:TMPR->next + | test NODE:TMPR, NODE:TMPR + | jnz <1 + | // End of hash chain: key not found, add a new one. + | + | // But check for __newindex first. + | mov TAB:TMPR, TAB:RB->metatable + | test TAB:TMPR, TAB:TMPR + | jz >6 // No metatable: continue. + | test byte TAB:TMPR->nomm, 1<vmeta_tsets // 'no __newindex' flag NOT set: check. + |6: + | mov TMP1, ITYPE + | mov L:CARG1, SAVE_L + | mov L:CARG1->base, BASE + | lea CARG3, TMP1 + | mov CARG2, TAB:RB + | mov SAVE_PC, PC + | call extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k) + | // Handles write barrier for the new key. TValue * returned in eax (RC). + | mov L:CARG1, SAVE_L + | mov BASE, L:CARG1->base + | mov TMPR, rax + | movzx RAd, PC_RA + | jmp <2 // Must check write barrier for value. + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:RB, ITYPE + | jmp <3 + break; + case BC_TSETB: + | ins_ABC // RA = src, RB = table, RC = byte literal + | mov TAB:RB, [BASE+RB*8] + | checktab TAB:RB, ->vmeta_tsetb + | cmp RCd, TAB:RB->asize + | jae ->vmeta_tsetb + | shl RCd, 3 + | add RC, TAB:RB->array + | cmp aword [RC], LJ_TNIL + | je >3 // Previous value is nil? + |1: + | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) + | jnz >7 + |2: // Set array slot. + | mov ITYPE, [BASE+RA*8] + | mov [RC], ITYPE + | ins_next + | + |3: // Check for __newindex if previous value is nil. + | mov TAB:TMPR, TAB:RB->metatable + | test TAB:TMPR, TAB:TMPR + | jz <1 + | test byte TAB:TMPR->nomm, 1<vmeta_tsetb // 'no __newindex' flag NOT set: check. + | jmp <1 + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:RB, TMPR + | jmp <2 + break; + case BC_TSETR: + | ins_ABC // RA = src, RB = table, RC = key + | mov TAB:RB, [BASE+RB*8] + | cleartp TAB:RB + |.if DUALNUM + | mov RC, [BASE+RC*8] + |.else + | cvttsd2si RCd, qword [BASE+RC*8] + |.endif + | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) + | jnz >7 + |2: + | cmp RCd, TAB:RB->asize + | jae ->vmeta_tsetr + | shl RCd, 3 + | add RC, TAB:RB->array + | // Set array slot. + |->BC_TSETR_Z: + | mov ITYPE, [BASE+RA*8] + | mov [RC], ITYPE + | ins_next + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:RB, TMPR + | jmp <2 + break; + + case BC_TSETM: + | ins_AD // RA = base (table at base-1), RD = num const (start index) + |1: + | mov TMPRd, dword [KBASE+RD*8] // Integer constant is in lo-word. + | lea RA, [BASE+RA*8] + | mov TAB:RB, [RA-8] // Guaranteed to be a table. + | cleartp TAB:RB + | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) + | jnz >7 + |2: + | mov RDd, MULTRES + | sub RDd, 1 + | jz >4 // Nothing to copy? + | add RDd, TMPRd // Compute needed size. + | cmp RDd, TAB:RB->asize + | ja >5 // Doesn't fit into array part? + | sub RDd, TMPRd + | shl TMPRd, 3 + | add TMPR, TAB:RB->array + |3: // Copy result slots to table. + | mov RB, [RA] + | add RA, 8 + | mov [TMPR], RB + | add TMPR, 8 + | sub RDd, 1 + | jnz <3 + |4: + | ins_next + | + |5: // Need to resize array part. + | mov L:CARG1, SAVE_L + | mov L:CARG1->base, BASE // Caveat: CARG2/CARG3 may be BASE. + | mov CARG2, TAB:RB + | mov CARG3d, RDd + | mov L:RB, L:CARG1 + | mov SAVE_PC, PC + | call extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize) + | mov BASE, L:RB->base + | movzx RAd, PC_RA // Restore RA. + | movzx RDd, PC_RD // Restore RD. + | jmp <1 // Retry. + | + |7: // Possible table write barrier for any value. Skip valiswhite check. + | barrierback TAB:RB, RD + | jmp <2 + break; + + /* -- Calls and vararg handling ----------------------------------------- */ + + case BC_CALL: case BC_CALLM: + | ins_A_C // RA = base, (RB = nresults+1,) RC = nargs+1 | extra_nargs + if (op == BC_CALLM) { + | add NARGS:RDd, MULTRES + } + | mov LFUNC:RB, [BASE+RA*8] + | checkfunc LFUNC:RB, ->vmeta_call_ra + | lea BASE, [BASE+RA*8+16] + | ins_call + break; + + case BC_CALLMT: + | ins_AD // RA = base, RD = extra_nargs + | add NARGS:RDd, MULTRES + | // Fall through. Assumes BC_CALLT follows and ins_AD is a no-op. + break; + case BC_CALLT: + | ins_AD // RA = base, RD = nargs+1 + | lea RA, [BASE+RA*8+16] + | mov KBASE, BASE // Use KBASE for move + vmeta_call hint. + | mov LFUNC:RB, [RA-16] + | checktp_nc LFUNC:RB, LJ_TFUNC, ->vmeta_call + |->BC_CALLT_Z: + | mov PC, [BASE-8] + | test PCd, FRAME_TYPE + | jnz >7 + |1: + | mov [BASE-16], LFUNC:RB // Copy func+tag down, reloaded below. + | mov MULTRES, NARGS:RDd + | sub NARGS:RDd, 1 + | jz >3 + |2: // Move args down. + | mov RB, [RA] + | add RA, 8 + | mov [KBASE], RB + | add KBASE, 8 + | sub NARGS:RDd, 1 + | jnz <2 + | + | mov LFUNC:RB, [BASE-16] + |3: + | cleartp LFUNC:RB + | mov NARGS:RDd, MULTRES + | cmp byte LFUNC:RB->ffid, 1 // (> FF_C) Calling a fast function? + | ja >5 + |4: + | ins_callt + | + |5: // Tailcall to a fast function. + | test PCd, FRAME_TYPE // Lua frame below? + | jnz <4 + | movzx RAd, PC_RA + | neg RA + | mov LFUNC:KBASE, [BASE+RA*8-32] // Need to prepare KBASE. + | cleartp LFUNC:KBASE + | mov KBASE, LFUNC:KBASE->pc + | mov KBASE, [KBASE+PC2PROTO(k)] + | jmp <4 + | + |7: // Tailcall from a vararg function. + | sub PC, FRAME_VARG + | test PCd, FRAME_TYPEP + | jnz >8 // Vararg frame below? + | sub BASE, PC // Need to relocate BASE/KBASE down. + | mov KBASE, BASE + | mov PC, [BASE-8] + | jmp <1 + |8: + | add PCd, FRAME_VARG + | jmp <1 + break; + + case BC_ITERC: + | ins_A // RA = base, (RB = nresults+1,) RC = nargs+1 (2+1) + | lea RA, [BASE+RA*8+16] // fb = base+2 + | mov RB, [RA-32] // Copy state. fb[0] = fb[-4]. + | mov RC, [RA-24] // Copy control var. fb[1] = fb[-3]. + | mov [RA], RB + | mov [RA+8], RC + | mov LFUNC:RB, [RA-40] // Copy callable. fb[-1] = fb[-5] + | mov [RA-16], LFUNC:RB + | mov NARGS:RDd, 2+1 // Handle like a regular 2-arg call. + | checkfunc LFUNC:RB, ->vmeta_call + | mov BASE, RA + | ins_call + break; + + case BC_ITERN: + | ins_A // RA = base, (RB = nresults+1, RC = nargs+1 (2+1)) + |.if JIT + | // NYI: add hotloop, record BC_ITERN. + |.endif + | mov TAB:RB, [BASE+RA*8-16] + | cleartp TAB:RB + | mov RCd, [BASE+RA*8-8] // Get index from control var. + | mov TMPRd, TAB:RB->asize + | add PC, 4 + | mov ITYPE, TAB:RB->array + |1: // Traverse array part. + | cmp RCd, TMPRd; jae >5 // Index points after array part? + | cmp aword [ITYPE+RC*8], LJ_TNIL; je >4 + |.if not DUALNUM + | cvtsi2sd xmm0, RCd + |.endif + | // Copy array slot to returned value. + | mov RB, [ITYPE+RC*8] + | mov [BASE+RA*8+8], RB + | // Return array index as a numeric key. + |.if DUALNUM + | setint ITYPE, RC + | mov [BASE+RA*8], ITYPE + |.else + | movsd qword [BASE+RA*8], xmm0 + |.endif + | add RCd, 1 + | mov [BASE+RA*8-8], RCd // Update control var. + |2: + | movzx RDd, PC_RD // Get target from ITERL. + | branchPC RD + |3: + | ins_next + | + |4: // Skip holes in array part. + | add RCd, 1 + | jmp <1 + | + |5: // Traverse hash part. + | sub RCd, TMPRd + |6: + | cmp RCd, TAB:RB->hmask; ja <3 // End of iteration? Branch to ITERL+1. + | imul ITYPEd, RCd, #NODE + | add NODE:ITYPE, TAB:RB->node + | cmp aword NODE:ITYPE->val, LJ_TNIL; je >7 + | lea TMPRd, [RCd+TMPRd+1] + | // Copy key and value from hash slot. + | mov RB, NODE:ITYPE->key + | mov RC, NODE:ITYPE->val + | mov [BASE+RA*8], RB + | mov [BASE+RA*8+8], RC + | mov [BASE+RA*8-8], TMPRd + | jmp <2 + | + |7: // Skip holes in hash part. + | add RCd, 1 + | jmp <6 + break; + + case BC_ISNEXT: + | ins_AD // RA = base, RD = target (points to ITERN) + | mov CFUNC:RB, [BASE+RA*8-24] + | checkfunc CFUNC:RB, >5 + | checktptp [BASE+RA*8-16], LJ_TTAB, >5 + | cmp aword [BASE+RA*8-8], LJ_TNIL; jne >5 + | cmp byte CFUNC:RB->ffid, FF_next_N; jne >5 + | branchPC RD + | mov64 TMPR, U64x(fffe7fff, 00000000) + | mov [BASE+RA*8-8], TMPR // Initialize control var. + |1: + | ins_next + |5: // Despecialize bytecode if any of the checks fail. + | mov PC_OP, BC_JMP + | branchPC RD + | mov byte [PC], BC_ITERC + | jmp <1 + break; + + case BC_VARG: + | ins_ABC // RA = base, RB = nresults+1, RC = numparams + | lea TMPR, [BASE+RC*8+(16+FRAME_VARG)] + | lea RA, [BASE+RA*8] + | sub TMPR, [BASE-8] + | // Note: TMPR may now be even _above_ BASE if nargs was < numparams. + | test RB, RB + | jz >5 // Copy all varargs? + | lea RB, [RA+RB*8-8] + | cmp TMPR, BASE // No vararg slots? + | jnb >2 + |1: // Copy vararg slots to destination slots. + | mov RC, [TMPR-16] + | add TMPR, 8 + | mov [RA], RC + | add RA, 8 + | cmp RA, RB // All destination slots filled? + | jnb >3 + | cmp TMPR, BASE // No more vararg slots? + | jb <1 + |2: // Fill up remainder with nil. + | mov aword [RA], LJ_TNIL + | add RA, 8 + | cmp RA, RB + | jb <2 + |3: + | ins_next + | + |5: // Copy all varargs. + | mov MULTRES, 1 // MULTRES = 0+1 + | mov RC, BASE + | sub RC, TMPR + | jbe <3 // No vararg slots? + | mov RBd, RCd + | shr RBd, 3 + | add RBd, 1 + | mov MULTRES, RBd // MULTRES = #varargs+1 + | mov L:RB, SAVE_L + | add RC, RA + | cmp RC, L:RB->maxstack + | ja >7 // Need to grow stack? + |6: // Copy all vararg slots. + | mov RC, [TMPR-16] + | add TMPR, 8 + | mov [RA], RC + | add RA, 8 + | cmp TMPR, BASE // No more vararg slots? + | jb <6 + | jmp <3 + | + |7: // Grow stack for varargs. + | mov L:RB->base, BASE + | mov L:RB->top, RA + | mov SAVE_PC, PC + | sub TMPR, BASE // Need delta, because BASE may change. + | mov TMP1hi, TMPRd + | mov CARG2d, MULTRES + | sub CARG2d, 1 + | mov CARG1, L:RB + | call extern lj_state_growstack // (lua_State *L, int n) + | mov BASE, L:RB->base + | movsxd TMPR, TMP1hi + | mov RA, L:RB->top + | add TMPR, BASE + | jmp <6 + break; + + /* -- Returns ----------------------------------------------------------- */ + + case BC_RETM: + | ins_AD // RA = results, RD = extra_nresults + | add RDd, MULTRES // MULTRES >=1, so RD >=1. + | // Fall through. Assumes BC_RET follows and ins_AD is a no-op. + break; + + case BC_RET: case BC_RET0: case BC_RET1: + | ins_AD // RA = results, RD = nresults+1 + if (op != BC_RET0) { + | shl RAd, 3 + } + |1: + | mov PC, [BASE-8] + | mov MULTRES, RDd // Save nresults+1. + | test PCd, FRAME_TYPE // Check frame type marker. + | jnz >7 // Not returning to a fixarg Lua func? + switch (op) { + case BC_RET: + |->BC_RET_Z: + | mov KBASE, BASE // Use KBASE for result move. + | sub RDd, 1 + | jz >3 + |2: // Move results down. + | mov RB, [KBASE+RA] + | mov [KBASE-16], RB + | add KBASE, 8 + | sub RDd, 1 + | jnz <2 + |3: + | mov RDd, MULTRES // Note: MULTRES may be >255. + | movzx RBd, PC_RB // So cannot compare with RDL! + |5: + | cmp RBd, RDd // More results expected? + | ja >6 + break; + case BC_RET1: + | mov RB, [BASE+RA] + | mov [BASE-16], RB + /* fallthrough */ + case BC_RET0: + |5: + | cmp PC_RB, RDL // More results expected? + | ja >6 + default: + break; + } + | movzx RAd, PC_RA + | neg RA + | lea BASE, [BASE+RA*8-16] // base = base - (RA+2)*8 + | mov LFUNC:KBASE, [BASE-16] + | cleartp LFUNC:KBASE + | mov KBASE, LFUNC:KBASE->pc + | mov KBASE, [KBASE+PC2PROTO(k)] + | ins_next + | + |6: // Fill up results with nil. + if (op == BC_RET) { + | mov aword [KBASE-16], LJ_TNIL // Note: relies on shifted base. + | add KBASE, 8 + } else { + | mov aword [BASE+RD*8-24], LJ_TNIL + } + | add RD, 1 + | jmp <5 + | + |7: // Non-standard return case. + | lea RB, [PC-FRAME_VARG] + | test RBd, FRAME_TYPEP + | jnz ->vm_return + | // Return from vararg function: relocate BASE down and RA up. + | sub BASE, RB + if (op != BC_RET0) { + | add RA, RB + } + | jmp <1 + break; + + /* -- Loops and branches ------------------------------------------------ */ + + |.define FOR_IDX, [RA] + |.define FOR_STOP, [RA+8] + |.define FOR_STEP, [RA+16] + |.define FOR_EXT, [RA+24] + + case BC_FORL: + |.if JIT + | hotloop RBd + |.endif + | // Fall through. Assumes BC_IFORL follows and ins_AJ is a no-op. + break; + + case BC_JFORI: + case BC_JFORL: +#if !LJ_HASJIT + break; +#endif + case BC_FORI: + case BC_IFORL: + vk = (op == BC_IFORL || op == BC_JFORL); + | ins_AJ // RA = base, RD = target (after end of loop or start of loop) + | lea RA, [BASE+RA*8] + if (LJ_DUALNUM) { + | mov RB, FOR_IDX + | checkint RB, >9 + | mov TMPR, FOR_STOP + if (!vk) { + | checkint TMPR, ->vmeta_for + | mov ITYPE, FOR_STEP + | test ITYPEd, ITYPEd; js >5 + | sar ITYPE, 47; + | cmp ITYPEd, LJ_TISNUM; jne ->vmeta_for + } else { +#ifdef LUA_USE_ASSERT + | checkinttp FOR_STOP, ->assert_bad_for_arg_type + | checkinttp FOR_STEP, ->assert_bad_for_arg_type +#endif + | mov ITYPE, FOR_STEP + | test ITYPEd, ITYPEd; js >5 + | add RBd, ITYPEd; jo >1 + | setint RB + | mov FOR_IDX, RB + } + | cmp RBd, TMPRd + | mov FOR_EXT, RB + if (op == BC_FORI) { + | jle >7 + |1: + |6: + | branchPC RD + } else if (op == BC_JFORI) { + | branchPC RD + | movzx RDd, PC_RD + | jle =>BC_JLOOP + |1: + |6: + } else if (op == BC_IFORL) { + | jg >7 + |6: + | branchPC RD + |1: + } else { + | jle =>BC_JLOOP + |1: + |6: + } + |7: + | ins_next + | + |5: // Invert check for negative step. + if (!vk) { + | sar ITYPE, 47; + | cmp ITYPEd, LJ_TISNUM; jne ->vmeta_for + } else { + | add RBd, ITYPEd; jo <1 + | setint RB + | mov FOR_IDX, RB + } + | cmp RBd, TMPRd + | mov FOR_EXT, RB + if (op == BC_FORI) { + | jge <7 + } else if (op == BC_JFORI) { + | branchPC RD + | movzx RDd, PC_RD + | jge =>BC_JLOOP + } else if (op == BC_IFORL) { + | jl <7 + } else { + | jge =>BC_JLOOP + } + | jmp <6 + |9: // Fallback to FP variant. + if (!vk) { + | jae ->vmeta_for + } + } else if (!vk) { + | checknumtp FOR_IDX, ->vmeta_for + } + if (!vk) { + | checknumtp FOR_STOP, ->vmeta_for + } else { +#ifdef LUA_USE_ASSERT + | checknumtp FOR_STOP, ->assert_bad_for_arg_type + | checknumtp FOR_STEP, ->assert_bad_for_arg_type +#endif + } + | mov RB, FOR_STEP + if (!vk) { + | checknum RB, ->vmeta_for + } + | movsd xmm0, qword FOR_IDX + | movsd xmm1, qword FOR_STOP + if (vk) { + | addsd xmm0, qword FOR_STEP + | movsd qword FOR_IDX, xmm0 + | test RB, RB; js >3 + } else { + | jl >3 + } + | ucomisd xmm1, xmm0 + |1: + | movsd qword FOR_EXT, xmm0 + if (op == BC_FORI) { + |.if DUALNUM + | jnb <7 + |.else + | jnb >2 + | branchPC RD + |.endif + } else if (op == BC_JFORI) { + | branchPC RD + | movzx RDd, PC_RD + | jnb =>BC_JLOOP + } else if (op == BC_IFORL) { + |.if DUALNUM + | jb <7 + |.else + | jb >2 + | branchPC RD + |.endif + } else { + | jnb =>BC_JLOOP + } + |.if DUALNUM + | jmp <6 + |.else + |2: + | ins_next + |.endif + | + |3: // Invert comparison if step is negative. + | ucomisd xmm0, xmm1 + | jmp <1 + break; + + case BC_ITERL: + |.if JIT + | hotloop RBd + |.endif + | // Fall through. Assumes BC_IITERL follows and ins_AJ is a no-op. + break; + + case BC_JITERL: +#if !LJ_HASJIT + break; +#endif + case BC_IITERL: + | ins_AJ // RA = base, RD = target + | lea RA, [BASE+RA*8] + | mov RB, [RA] + | cmp RB, LJ_TNIL; je >1 // Stop if iterator returned nil. + if (op == BC_JITERL) { + | mov [RA-8], RB + | jmp =>BC_JLOOP + } else { + | branchPC RD // Otherwise save control var + branch. + | mov [RA-8], RB + } + |1: + | ins_next + break; + + case BC_LOOP: + | ins_A // RA = base, RD = target (loop extent) + | // Note: RA/RD is only used by trace recorder to determine scope/extent + | // This opcode does NOT jump, it's only purpose is to detect a hot loop. + |.if JIT + | hotloop RBd + |.endif + | // Fall through. Assumes BC_ILOOP follows and ins_A is a no-op. + break; + + case BC_ILOOP: + | ins_A // RA = base, RD = target (loop extent) + | ins_next + break; + + case BC_JLOOP: + |.if JIT + | ins_AD // RA = base (ignored), RD = traceno + | mov RA, [DISPATCH+DISPATCH_J(trace)] + | mov TRACE:RD, [RA+RD*8] + | mov RD, TRACE:RD->mcode + | mov L:RB, SAVE_L + | mov [DISPATCH+DISPATCH_GL(jit_base)], BASE + | mov [DISPATCH+DISPATCH_GL(tmpbuf.L)], L:RB + | // Save additional callee-save registers only used in compiled code. + |.if X64WIN + | mov CSAVE_4, r12 + | mov CSAVE_3, r13 + | mov CSAVE_2, r14 + | mov CSAVE_1, r15 + | mov RA, rsp + | sub rsp, 10*16+4*8 + | movdqa [RA-1*16], xmm6 + | movdqa [RA-2*16], xmm7 + | movdqa [RA-3*16], xmm8 + | movdqa [RA-4*16], xmm9 + | movdqa [RA-5*16], xmm10 + | movdqa [RA-6*16], xmm11 + | movdqa [RA-7*16], xmm12 + | movdqa [RA-8*16], xmm13 + | movdqa [RA-9*16], xmm14 + | movdqa [RA-10*16], xmm15 + |.else + | sub rsp, 16 + | mov [rsp+16], r12 + | mov [rsp+8], r13 + |.endif + | jmp RD + |.endif + break; + + case BC_JMP: + | ins_AJ // RA = unused, RD = target + | branchPC RD + | ins_next + break; + + /* -- Function headers -------------------------------------------------- */ + + /* + ** Reminder: A function may be called with func/args above L->maxstack, + ** i.e. occupying EXTRA_STACK slots. And vmeta_call may add one extra slot, + ** too. This means all FUNC* ops (including fast functions) must check + ** for stack overflow _before_ adding more slots! + */ + + case BC_FUNCF: + |.if JIT + | hotcall RBd + |.endif + case BC_FUNCV: /* NYI: compiled vararg functions. */ + | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow and ins_AD is a no-op. + break; + + case BC_JFUNCF: +#if !LJ_HASJIT + break; +#endif + case BC_IFUNCF: + | ins_AD // BASE = new base, RA = framesize, RD = nargs+1 + | mov KBASE, [PC-4+PC2PROTO(k)] + | mov L:RB, SAVE_L + | lea RA, [BASE+RA*8] // Top of frame. + | cmp RA, L:RB->maxstack + | ja ->vm_growstack_f + | movzx RAd, byte [PC-4+PC2PROTO(numparams)] + | cmp NARGS:RDd, RAd // Check for missing parameters. + | jbe >3 + |2: + if (op == BC_JFUNCF) { + | movzx RDd, PC_RD + | jmp =>BC_JLOOP + } else { + | ins_next + } + | + |3: // Clear missing parameters. + | mov aword [BASE+NARGS:RD*8-8], LJ_TNIL + | add NARGS:RDd, 1 + | cmp NARGS:RDd, RAd + | jbe <3 + | jmp <2 + break; + + case BC_JFUNCV: +#if !LJ_HASJIT + break; +#endif + | int3 // NYI: compiled vararg functions + break; /* NYI: compiled vararg functions. */ + + case BC_IFUNCV: + | ins_AD // BASE = new base, RA = framesize, RD = nargs+1 + | lea RBd, [NARGS:RD*8+FRAME_VARG+8] + | lea RD, [BASE+NARGS:RD*8+8] + | mov LFUNC:KBASE, [BASE-16] + | mov [RD-8], RB // Store delta + FRAME_VARG. + | mov [RD-16], LFUNC:KBASE // Store copy of LFUNC. + | mov L:RB, SAVE_L + | lea RA, [RD+RA*8] + | cmp RA, L:RB->maxstack + | ja ->vm_growstack_v // Need to grow stack. + | mov RA, BASE + | mov BASE, RD + | movzx RBd, byte [PC-4+PC2PROTO(numparams)] + | test RBd, RBd + | jz >2 + | add RA, 8 + |1: // Copy fixarg slots up to new frame. + | add RA, 8 + | cmp RA, BASE + | jnb >3 // Less args than parameters? + | mov KBASE, [RA-16] + | mov [RD], KBASE + | add RD, 8 + | mov aword [RA-16], LJ_TNIL // Clear old fixarg slot (help the GC). + | sub RBd, 1 + | jnz <1 + |2: + if (op == BC_JFUNCV) { + | movzx RDd, PC_RD + | jmp =>BC_JLOOP + } else { + | mov KBASE, [PC-4+PC2PROTO(k)] + | ins_next + } + | + |3: // Clear missing parameters. + | mov aword [RD], LJ_TNIL + | add RD, 8 + | sub RBd, 1 + | jnz <3 + | jmp <2 + break; + + case BC_FUNCC: + case BC_FUNCCW: + | ins_AD // BASE = new base, RA = ins RA|RD (unused), RD = nargs+1 + | mov CFUNC:RB, [BASE-16] + | cleartp CFUNC:RB + | mov KBASE, CFUNC:RB->f + | mov L:RB, SAVE_L + | lea RD, [BASE+NARGS:RD*8-8] + | mov L:RB->base, BASE + | lea RA, [RD+8*LUA_MINSTACK] + | cmp RA, L:RB->maxstack + | mov L:RB->top, RD + if (op == BC_FUNCC) { + | mov CARG1, L:RB // Caveat: CARG1 may be RA. + } else { + | mov CARG2, KBASE + | mov CARG1, L:RB // Caveat: CARG1 may be RA. + } + | ja ->vm_growstack_c // Need to grow stack. + | set_vmstate C + if (op == BC_FUNCC) { + | call KBASE // (lua_State *L) + } else { + | // (lua_State *L, lua_CFunction f) + | call aword [DISPATCH+DISPATCH_GL(wrapf)] + } + | // nresults returned in eax (RD). + | mov BASE, L:RB->base + | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB + | set_vmstate INTERP + | lea RA, [BASE+RD*8] + | neg RA + | add RA, L:RB->top // RA = (L->top-(L->base+nresults))*8 + | mov PC, [BASE-8] // Fetch PC of caller. + | jmp ->vm_returnc + break; + + /* ---------------------------------------------------------------------- */ + + default: + fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]); + exit(2); + break; + } +} + +static int build_backend(BuildCtx *ctx) +{ + int op; + dasm_growpc(Dst, BC__MAX); + build_subroutines(ctx); + |.code_op + for (op = 0; op < BC__MAX; op++) + build_ins(ctx, (BCOp)op, op); + return BC__MAX; +} + +/* Emit pseudo frame-info for all assembler functions. */ +static void emit_asm_debug(BuildCtx *ctx) +{ + int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code); + switch (ctx->mode) { + case BUILD_elfasm: + fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n"); + fprintf(ctx->fp, + ".Lframe0:\n" + "\t.long .LECIE0-.LSCIE0\n" + ".LSCIE0:\n" + "\t.long 0xffffffff\n" + "\t.byte 0x1\n" + "\t.string \"\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -8\n" + "\t.byte 0x10\n" + "\t.byte 0xc\n\t.uleb128 0x7\n\t.uleb128 8\n" + "\t.byte 0x80+0x10\n\t.uleb128 0x1\n" + "\t.align 8\n" + ".LECIE0:\n\n"); + fprintf(ctx->fp, + ".LSFDE0:\n" + "\t.long .LEFDE0-.LASFDE0\n" + ".LASFDE0:\n" + "\t.long .Lframe0\n" + "\t.quad .Lbegin\n" + "\t.quad %d\n" + "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ + "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ + "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ + "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */ + "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */ +#if LJ_NO_UNWIND + "\t.byte 0x8d\n\t.uleb128 0x6\n" /* offset r13 */ + "\t.byte 0x8c\n\t.uleb128 0x7\n" /* offset r12 */ +#endif + "\t.align 8\n" + ".LEFDE0:\n\n", fcofs, CFRAME_SIZE); +#if LJ_HASFFI + fprintf(ctx->fp, + ".LSFDE1:\n" + "\t.long .LEFDE1-.LASFDE1\n" + ".LASFDE1:\n" + "\t.long .Lframe0\n" + "\t.quad lj_vm_ffi_call\n" + "\t.quad %d\n" + "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */ + "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ + "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */ + "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ + "\t.align 8\n" + ".LEFDE1:\n\n", (int)ctx->codesz - fcofs); +#endif +#if !LJ_NO_UNWIND +#if (defined(__sun__) && defined(__svr4__)) + fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@unwind\n"); +#else + fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n"); +#endif + fprintf(ctx->fp, + ".Lframe1:\n" + "\t.long .LECIE1-.LSCIE1\n" + ".LSCIE1:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.string \"zPR\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -8\n" + "\t.byte 0x10\n" + "\t.uleb128 6\n" /* augmentation length */ + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.long lj_err_unwind_dwarf-.\n" + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.uleb128 0x7\n\t.uleb128 8\n" + "\t.byte 0x80+0x10\n\t.uleb128 0x1\n" + "\t.align 8\n" + ".LECIE1:\n\n"); + fprintf(ctx->fp, + ".LSFDE2:\n" + "\t.long .LEFDE2-.LASFDE2\n" + ".LASFDE2:\n" + "\t.long .LASFDE2-.Lframe1\n" + "\t.long .Lbegin-.\n" + "\t.long %d\n" + "\t.uleb128 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ + "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ + "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ + "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */ + "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */ + "\t.align 8\n" + ".LEFDE2:\n\n", fcofs, CFRAME_SIZE); +#if LJ_HASFFI + fprintf(ctx->fp, + ".Lframe2:\n" + "\t.long .LECIE2-.LSCIE2\n" + ".LSCIE2:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.string \"zR\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -8\n" + "\t.byte 0x10\n" + "\t.uleb128 1\n" /* augmentation length */ + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.uleb128 0x7\n\t.uleb128 8\n" + "\t.byte 0x80+0x10\n\t.uleb128 0x1\n" + "\t.align 8\n" + ".LECIE2:\n\n"); + fprintf(ctx->fp, + ".LSFDE3:\n" + "\t.long .LEFDE3-.LASFDE3\n" + ".LASFDE3:\n" + "\t.long .LASFDE3-.Lframe2\n" + "\t.long lj_vm_ffi_call-.\n" + "\t.long %d\n" + "\t.uleb128 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */ + "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ + "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */ + "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ + "\t.align 8\n" + ".LEFDE3:\n\n", (int)ctx->codesz - fcofs); +#endif +#endif + break; +#if !LJ_NO_UNWIND + /* Mental note: never let Apple design an assembler. + ** Or a linker. Or a plastic case. But I digress. + */ + case BUILD_machasm: { +#if LJ_HASFFI + int fcsize = 0; +#endif + int i; + fprintf(ctx->fp, "\t.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support\n"); + fprintf(ctx->fp, + "EH_frame1:\n" + "\t.set L$set$x,LECIEX-LSCIEX\n" + "\t.long L$set$x\n" + "LSCIEX:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.ascii \"zPR\\0\"\n" + "\t.byte 0x1\n" + "\t.byte 128-8\n" + "\t.byte 0x10\n" + "\t.byte 6\n" /* augmentation length */ + "\t.byte 0x9b\n" /* indirect|pcrel|sdata4 */ + "\t.long _lj_err_unwind_dwarf+4@GOTPCREL\n" + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.byte 0x7\n\t.byte 8\n" + "\t.byte 0x80+0x10\n\t.byte 0x1\n" + "\t.align 3\n" + "LECIEX:\n\n"); + for (i = 0; i < ctx->nsym; i++) { + const char *name = ctx->sym[i].name; + int32_t size = ctx->sym[i+1].ofs - ctx->sym[i].ofs; + if (size == 0) continue; +#if LJ_HASFFI + if (!strcmp(name, "_lj_vm_ffi_call")) { fcsize = size; continue; } +#endif + fprintf(ctx->fp, + "%s.eh:\n" + "LSFDE%d:\n" + "\t.set L$set$%d,LEFDE%d-LASFDE%d\n" + "\t.long L$set$%d\n" + "LASFDE%d:\n" + "\t.long LASFDE%d-EH_frame1\n" + "\t.long %s-.\n" + "\t.long %d\n" + "\t.byte 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.byte %d\n" /* def_cfa_offset */ + "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */ + "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */ + "\t.byte 0x8f\n\t.byte 0x4\n" /* offset r15 */ + "\t.byte 0x8e\n\t.byte 0x5\n" /* offset r14 */ + "\t.align 3\n" + "LEFDE%d:\n\n", + name, i, i, i, i, i, i, i, name, size, CFRAME_SIZE, i); + } +#if LJ_HASFFI + if (fcsize) { + fprintf(ctx->fp, + "EH_frame2:\n" + "\t.set L$set$y,LECIEY-LSCIEY\n" + "\t.long L$set$y\n" + "LSCIEY:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.ascii \"zR\\0\"\n" + "\t.byte 0x1\n" + "\t.byte 128-8\n" + "\t.byte 0x10\n" + "\t.byte 1\n" /* augmentation length */ + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.byte 0x7\n\t.byte 8\n" + "\t.byte 0x80+0x10\n\t.byte 0x1\n" + "\t.align 3\n" + "LECIEY:\n\n"); + fprintf(ctx->fp, + "_lj_vm_ffi_call.eh:\n" + "LSFDEY:\n" + "\t.set L$set$yy,LEFDEY-LASFDEY\n" + "\t.long L$set$yy\n" + "LASFDEY:\n" + "\t.long LASFDEY-EH_frame2\n" + "\t.long _lj_vm_ffi_call-.\n" + "\t.long %d\n" + "\t.byte 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.byte 16\n" /* def_cfa_offset */ + "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */ + "\t.byte 0xd\n\t.byte 0x6\n" /* def_cfa_register rbp */ + "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */ + "\t.align 3\n" + "LEFDEY:\n\n", fcsize); + } +#endif + fprintf(ctx->fp, ".subsections_via_symbols\n"); + } + break; +#endif + default: /* Difficult for other modes. */ + break; + } +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/vm_x86.dasc b/Build/source/libs/luajit/LuaJIT-src/src/vm_x86.dasc new file mode 100644 index 00000000000..f31e595b3f4 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/vm_x86.dasc @@ -0,0 +1,5710 @@ +|// Low-level VM code for x86 CPUs. +|// Bytecode interpreter, fast functions and helper functions. +|// Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h +| +|.if P64 +|.arch x64 +|.else +|.arch x86 +|.endif +|.section code_op, code_sub +| +|.actionlist build_actionlist +|.globals GLOB_ +|.globalnames globnames +|.externnames extnames +| +|//----------------------------------------------------------------------- +| +|.if P64 +|.define X64, 1 +|.if WIN +|.define X64WIN, 1 +|.endif +|.endif +| +|// Fixed register assignments for the interpreter. +|// This is very fragile and has many dependencies. Caveat emptor. +|.define BASE, edx // Not C callee-save, refetched anyway. +|.if not X64 +|.define KBASE, edi // Must be C callee-save. +|.define KBASEa, KBASE +|.define PC, esi // Must be C callee-save. +|.define PCa, PC +|.define DISPATCH, ebx // Must be C callee-save. +|.elif X64WIN +|.define KBASE, edi // Must be C callee-save. +|.define KBASEa, rdi +|.define PC, esi // Must be C callee-save. +|.define PCa, rsi +|.define DISPATCH, ebx // Must be C callee-save. +|.else +|.define KBASE, r15d // Must be C callee-save. +|.define KBASEa, r15 +|.define PC, ebx // Must be C callee-save. +|.define PCa, rbx +|.define DISPATCH, r14d // Must be C callee-save. +|.endif +| +|.define RA, ecx +|.define RAH, ch +|.define RAL, cl +|.define RB, ebp // Must be ebp (C callee-save). +|.define RC, eax // Must be eax. +|.define RCW, ax +|.define RCH, ah +|.define RCL, al +|.define OP, RB +|.define RD, RC +|.define RDW, RCW +|.define RDL, RCL +|.if X64 +|.define RAa, rcx +|.define RBa, rbp +|.define RCa, rax +|.define RDa, rax +|.else +|.define RAa, RA +|.define RBa, RB +|.define RCa, RC +|.define RDa, RD +|.endif +| +|.if not X64 +|.define FCARG1, ecx // x86 fastcall arguments. +|.define FCARG2, edx +|.elif X64WIN +|.define CARG1, rcx // x64/WIN64 C call arguments. +|.define CARG2, rdx +|.define CARG3, r8 +|.define CARG4, r9 +|.define CARG1d, ecx +|.define CARG2d, edx +|.define CARG3d, r8d +|.define CARG4d, r9d +|.define FCARG1, CARG1d // Upwards compatible to x86 fastcall. +|.define FCARG2, CARG2d +|.else +|.define CARG1, rdi // x64/POSIX C call arguments. +|.define CARG2, rsi +|.define CARG3, rdx +|.define CARG4, rcx +|.define CARG5, r8 +|.define CARG6, r9 +|.define CARG1d, edi +|.define CARG2d, esi +|.define CARG3d, edx +|.define CARG4d, ecx +|.define CARG5d, r8d +|.define CARG6d, r9d +|.define FCARG1, CARG1d // Simulate x86 fastcall. +|.define FCARG2, CARG2d +|.endif +| +|// Type definitions. Some of these are only used for documentation. +|.type L, lua_State +|.type GL, global_State +|.type TVALUE, TValue +|.type GCOBJ, GCobj +|.type STR, GCstr +|.type TAB, GCtab +|.type LFUNC, GCfuncL +|.type CFUNC, GCfuncC +|.type PROTO, GCproto +|.type UPVAL, GCupval +|.type NODE, Node +|.type NARGS, int +|.type TRACE, GCtrace +|.type SBUF, SBuf +| +|// Stack layout while in interpreter. Must match with lj_frame.h. +|//----------------------------------------------------------------------- +|.if not X64 // x86 stack layout. +| +|.define CFRAME_SPACE, aword*7 // Delta for esp (see <--). +|.macro saveregs_ +| push edi; push esi; push ebx +| sub esp, CFRAME_SPACE +|.endmacro +|.macro saveregs +| push ebp; saveregs_ +|.endmacro +|.macro restoreregs +| add esp, CFRAME_SPACE +| pop ebx; pop esi; pop edi; pop ebp +|.endmacro +| +|.define SAVE_ERRF, aword [esp+aword*15] // vm_pcall/vm_cpcall only. +|.define SAVE_NRES, aword [esp+aword*14] +|.define SAVE_CFRAME, aword [esp+aword*13] +|.define SAVE_L, aword [esp+aword*12] +|//----- 16 byte aligned, ^^^ arguments from C caller +|.define SAVE_RET, aword [esp+aword*11] //<-- esp entering interpreter. +|.define SAVE_R4, aword [esp+aword*10] +|.define SAVE_R3, aword [esp+aword*9] +|.define SAVE_R2, aword [esp+aword*8] +|//----- 16 byte aligned +|.define SAVE_R1, aword [esp+aword*7] //<-- esp after register saves. +|.define SAVE_PC, aword [esp+aword*6] +|.define TMP2, aword [esp+aword*5] +|.define TMP1, aword [esp+aword*4] +|//----- 16 byte aligned +|.define ARG4, aword [esp+aword*3] +|.define ARG3, aword [esp+aword*2] +|.define ARG2, aword [esp+aword*1] +|.define ARG1, aword [esp] //<-- esp while in interpreter. +|//----- 16 byte aligned, ^^^ arguments for C callee +| +|// FPARGx overlaps ARGx and ARG(x+1) on x86. +|.define FPARG3, qword [esp+qword*1] +|.define FPARG1, qword [esp] +|// TMPQ overlaps TMP1/TMP2. ARG5/MULTRES overlap TMP1/TMP2 (and TMPQ). +|.define TMPQ, qword [esp+aword*4] +|.define TMP3, ARG4 +|.define ARG5, TMP1 +|.define TMPa, TMP1 +|.define MULTRES, TMP2 +| +|// Arguments for vm_call and vm_pcall. +|.define INARG_BASE, SAVE_CFRAME // Overwritten by SAVE_CFRAME! +| +|// Arguments for vm_cpcall. +|.define INARG_CP_CALL, SAVE_ERRF +|.define INARG_CP_UD, SAVE_NRES +|.define INARG_CP_FUNC, SAVE_CFRAME +| +|//----------------------------------------------------------------------- +|.elif X64WIN // x64/Windows stack layout +| +|.define CFRAME_SPACE, aword*5 // Delta for rsp (see <--). +|.macro saveregs_ +| push rdi; push rsi; push rbx +| sub rsp, CFRAME_SPACE +|.endmacro +|.macro saveregs +| push rbp; saveregs_ +|.endmacro +|.macro restoreregs +| add rsp, CFRAME_SPACE +| pop rbx; pop rsi; pop rdi; pop rbp +|.endmacro +| +|.define SAVE_CFRAME, aword [rsp+aword*13] +|.define SAVE_PC, dword [rsp+dword*25] +|.define SAVE_L, dword [rsp+dword*24] +|.define SAVE_ERRF, dword [rsp+dword*23] +|.define SAVE_NRES, dword [rsp+dword*22] +|.define TMP2, dword [rsp+dword*21] +|.define TMP1, dword [rsp+dword*20] +|//----- 16 byte aligned, ^^^ 32 byte register save area, owned by interpreter +|.define SAVE_RET, aword [rsp+aword*9] //<-- rsp entering interpreter. +|.define SAVE_R4, aword [rsp+aword*8] +|.define SAVE_R3, aword [rsp+aword*7] +|.define SAVE_R2, aword [rsp+aword*6] +|.define SAVE_R1, aword [rsp+aword*5] //<-- rsp after register saves. +|.define ARG5, aword [rsp+aword*4] +|.define CSAVE_4, aword [rsp+aword*3] +|.define CSAVE_3, aword [rsp+aword*2] +|.define CSAVE_2, aword [rsp+aword*1] +|.define CSAVE_1, aword [rsp] //<-- rsp while in interpreter. +|//----- 16 byte aligned, ^^^ 32 byte register save area, owned by callee +| +|// TMPQ overlaps TMP1/TMP2. MULTRES overlaps TMP2 (and TMPQ). +|.define TMPQ, qword [rsp+aword*10] +|.define MULTRES, TMP2 +|.define TMPa, ARG5 +|.define ARG5d, dword [rsp+aword*4] +|.define TMP3, ARG5d +| +|//----------------------------------------------------------------------- +|.else // x64/POSIX stack layout +| +|.define CFRAME_SPACE, aword*5 // Delta for rsp (see <--). +|.macro saveregs_ +| push rbx; push r15; push r14 +|.if NO_UNWIND +| push r13; push r12 +|.endif +| sub rsp, CFRAME_SPACE +|.endmacro +|.macro saveregs +| push rbp; saveregs_ +|.endmacro +|.macro restoreregs +| add rsp, CFRAME_SPACE +|.if NO_UNWIND +| pop r12; pop r13 +|.endif +| pop r14; pop r15; pop rbx; pop rbp +|.endmacro +| +|//----- 16 byte aligned, +|.if NO_UNWIND +|.define SAVE_RET, aword [rsp+aword*11] //<-- rsp entering interpreter. +|.define SAVE_R4, aword [rsp+aword*10] +|.define SAVE_R3, aword [rsp+aword*9] +|.define SAVE_R2, aword [rsp+aword*8] +|.define SAVE_R1, aword [rsp+aword*7] +|.define SAVE_RU2, aword [rsp+aword*6] +|.define SAVE_RU1, aword [rsp+aword*5] //<-- rsp after register saves. +|.else +|.define SAVE_RET, aword [rsp+aword*9] //<-- rsp entering interpreter. +|.define SAVE_R4, aword [rsp+aword*8] +|.define SAVE_R3, aword [rsp+aword*7] +|.define SAVE_R2, aword [rsp+aword*6] +|.define SAVE_R1, aword [rsp+aword*5] //<-- rsp after register saves. +|.endif +|.define SAVE_CFRAME, aword [rsp+aword*4] +|.define SAVE_PC, dword [rsp+dword*7] +|.define SAVE_L, dword [rsp+dword*6] +|.define SAVE_ERRF, dword [rsp+dword*5] +|.define SAVE_NRES, dword [rsp+dword*4] +|.define TMPa, aword [rsp+aword*1] +|.define TMP2, dword [rsp+dword*1] +|.define TMP1, dword [rsp] //<-- rsp while in interpreter. +|//----- 16 byte aligned +| +|// TMPQ overlaps TMP1/TMP2. MULTRES overlaps TMP2 (and TMPQ). +|.define TMPQ, qword [rsp] +|.define TMP3, dword [rsp+aword*1] +|.define MULTRES, TMP2 +| +|.endif +| +|//----------------------------------------------------------------------- +| +|// Instruction headers. +|.macro ins_A; .endmacro +|.macro ins_AD; .endmacro +|.macro ins_AJ; .endmacro +|.macro ins_ABC; movzx RB, RCH; movzx RC, RCL; .endmacro +|.macro ins_AB_; movzx RB, RCH; .endmacro +|.macro ins_A_C; movzx RC, RCL; .endmacro +|.macro ins_AND; not RDa; .endmacro +| +|// Instruction decode+dispatch. Carefully tuned (nope, lodsd is not faster). +|.macro ins_NEXT +| mov RC, [PC] +| movzx RA, RCH +| movzx OP, RCL +| add PC, 4 +| shr RC, 16 +|.if X64 +| jmp aword [DISPATCH+OP*8] +|.else +| jmp aword [DISPATCH+OP*4] +|.endif +|.endmacro +| +|// Instruction footer. +|.if 1 +| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use. +| .define ins_next, ins_NEXT +| .define ins_next_, ins_NEXT +|.else +| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch. +| // Affects only certain kinds of benchmarks (and only with -j off). +| // Around 10%-30% slower on Core2, a lot more slower on P4. +| .macro ins_next +| jmp ->ins_next +| .endmacro +| .macro ins_next_ +| ->ins_next: +| ins_NEXT +| .endmacro +|.endif +| +|// Call decode and dispatch. +|.macro ins_callt +| // BASE = new base, RB = LFUNC, RD = nargs+1, [BASE-4] = PC +| mov PC, LFUNC:RB->pc +| mov RA, [PC] +| movzx OP, RAL +| movzx RA, RAH +| add PC, 4 +|.if X64 +| jmp aword [DISPATCH+OP*8] +|.else +| jmp aword [DISPATCH+OP*4] +|.endif +|.endmacro +| +|.macro ins_call +| // BASE = new base, RB = LFUNC, RD = nargs+1 +| mov [BASE-4], PC +| ins_callt +|.endmacro +| +|//----------------------------------------------------------------------- +| +|// Macros to test operand types. +|.macro checktp, reg, tp; cmp dword [BASE+reg*8+4], tp; .endmacro +|.macro checknum, reg, target; checktp reg, LJ_TISNUM; jae target; .endmacro +|.macro checkint, reg, target; checktp reg, LJ_TISNUM; jne target; .endmacro +|.macro checkstr, reg, target; checktp reg, LJ_TSTR; jne target; .endmacro +|.macro checktab, reg, target; checktp reg, LJ_TTAB; jne target; .endmacro +| +|// These operands must be used with movzx. +|.define PC_OP, byte [PC-4] +|.define PC_RA, byte [PC-3] +|.define PC_RB, byte [PC-1] +|.define PC_RC, byte [PC-2] +|.define PC_RD, word [PC-2] +| +|.macro branchPC, reg +| lea PC, [PC+reg*4-BCBIAS_J*4] +|.endmacro +| +|// Assumes DISPATCH is relative to GL. +#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field)) +#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field)) +| +#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto)) +| +|// Decrement hashed hotcount and trigger trace recorder if zero. +|.macro hotloop, reg +| mov reg, PC +| shr reg, 1 +| and reg, HOTCOUNT_PCMASK +| sub word [DISPATCH+reg+GG_DISP2HOT], HOTCOUNT_LOOP +| jb ->vm_hotloop +|.endmacro +| +|.macro hotcall, reg +| mov reg, PC +| shr reg, 1 +| and reg, HOTCOUNT_PCMASK +| sub word [DISPATCH+reg+GG_DISP2HOT], HOTCOUNT_CALL +| jb ->vm_hotcall +|.endmacro +| +|// Set current VM state. +|.macro set_vmstate, st +| mov dword [DISPATCH+DISPATCH_GL(vmstate)], ~LJ_VMST_..st +|.endmacro +| +|// x87 compares. +|.macro fcomparepp // Compare and pop st0 >< st1. +| fucomip st1 +| fpop +|.endmacro +| +|.macro fpop1; fstp st1; .endmacro +| +|// Synthesize SSE FP constants. +|.macro sseconst_abs, reg, tmp // Synthesize abs mask. +|.if X64 +| mov64 tmp, U64x(7fffffff,ffffffff); movd reg, tmp +|.else +| pxor reg, reg; pcmpeqd reg, reg; psrlq reg, 1 +|.endif +|.endmacro +| +|.macro sseconst_hi, reg, tmp, val // Synthesize hi-32 bit const. +|.if X64 +| mov64 tmp, U64x(val,00000000); movd reg, tmp +|.else +| mov tmp, 0x .. val; movd reg, tmp; pshufd reg, reg, 0x51 +|.endif +|.endmacro +| +|.macro sseconst_sign, reg, tmp // Synthesize sign mask. +| sseconst_hi reg, tmp, 80000000 +|.endmacro +|.macro sseconst_1, reg, tmp // Synthesize 1.0. +| sseconst_hi reg, tmp, 3ff00000 +|.endmacro +|.macro sseconst_m1, reg, tmp // Synthesize -1.0. +| sseconst_hi reg, tmp, bff00000 +|.endmacro +|.macro sseconst_2p52, reg, tmp // Synthesize 2^52. +| sseconst_hi reg, tmp, 43300000 +|.endmacro +|.macro sseconst_tobit, reg, tmp // Synthesize 2^52 + 2^51. +| sseconst_hi reg, tmp, 43380000 +|.endmacro +| +|// Move table write barrier back. Overwrites reg. +|.macro barrierback, tab, reg +| and byte tab->marked, (uint8_t)~LJ_GC_BLACK // black2gray(tab) +| mov reg, [DISPATCH+DISPATCH_GL(gc.grayagain)] +| mov [DISPATCH+DISPATCH_GL(gc.grayagain)], tab +| mov tab->gclist, reg +|.endmacro +| +|//----------------------------------------------------------------------- + +/* Generate subroutines used by opcodes and other parts of the VM. */ +/* The .code_sub section should be last to help static branch prediction. */ +static void build_subroutines(BuildCtx *ctx) +{ + |.code_sub + | + |//----------------------------------------------------------------------- + |//-- Return handling ---------------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_returnp: + | test PC, FRAME_P + | jz ->cont_dispatch + | + | // Return from pcall or xpcall fast func. + | and PC, -8 + | sub BASE, PC // Restore caller base. + | lea RAa, [RA+PC-8] // Rebase RA and prepend one result. + | mov PC, [BASE-4] // Fetch PC of previous frame. + | // Prepending may overwrite the pcall frame, so do it at the end. + | mov dword [BASE+RA+4], LJ_TTRUE // Prepend true to results. + | + |->vm_returnc: + | add RD, 1 // RD = nresults+1 + | jz ->vm_unwind_yield + | mov MULTRES, RD + | test PC, FRAME_TYPE + | jz ->BC_RET_Z // Handle regular return to Lua. + | + |->vm_return: + | // BASE = base, RA = resultofs, RD = nresults+1 (= MULTRES), PC = return + | xor PC, FRAME_C + | test PC, FRAME_TYPE + | jnz ->vm_returnp + | + | // Return to C. + | set_vmstate C + | and PC, -8 + | sub PC, BASE + | neg PC // Previous base = BASE - delta. + | + | sub RD, 1 + | jz >2 + |1: // Move results down. + |.if X64 + | mov RBa, [BASE+RA] + | mov [BASE-8], RBa + |.else + | mov RB, [BASE+RA] + | mov [BASE-8], RB + | mov RB, [BASE+RA+4] + | mov [BASE-4], RB + |.endif + | add BASE, 8 + | sub RD, 1 + | jnz <1 + |2: + | mov L:RB, SAVE_L + | mov L:RB->base, PC + |3: + | mov RD, MULTRES + | mov RA, SAVE_NRES // RA = wanted nresults+1 + |4: + | cmp RA, RD + | jne >6 // More/less results wanted? + |5: + | sub BASE, 8 + | mov L:RB->top, BASE + | + |->vm_leave_cp: + | mov RAa, SAVE_CFRAME // Restore previous C frame. + | mov L:RB->cframe, RAa + | xor eax, eax // Ok return status for vm_pcall. + | + |->vm_leave_unw: + | restoreregs + | ret + | + |6: + | jb >7 // Less results wanted? + | // More results wanted. Check stack size and fill up results with nil. + | cmp BASE, L:RB->maxstack + | ja >8 + | mov dword [BASE-4], LJ_TNIL + | add BASE, 8 + | add RD, 1 + | jmp <4 + | + |7: // Less results wanted. + | test RA, RA + | jz <5 // But check for LUA_MULTRET+1. + | sub RA, RD // Negative result! + | lea BASE, [BASE+RA*8] // Correct top. + | jmp <5 + | + |8: // Corner case: need to grow stack for filling up results. + | // This can happen if: + | // - A C function grows the stack (a lot). + | // - The GC shrinks the stack in between. + | // - A return back from a lua_call() with (high) nresults adjustment. + | mov L:RB->top, BASE // Save current top held in BASE (yes). + | mov MULTRES, RD // Need to fill only remainder with nil. + | mov FCARG2, RA + | mov FCARG1, L:RB + | call extern lj_state_growstack@8 // (lua_State *L, int n) + | mov BASE, L:RB->top // Need the (realloced) L->top in BASE. + | jmp <3 + | + |->vm_unwind_yield: + | mov al, LUA_YIELD + | jmp ->vm_unwind_c_eh + | + |->vm_unwind_c@8: // Unwind C stack, return from vm_pcall. + | // (void *cframe, int errcode) + |.if X64 + | mov eax, CARG2d // Error return status for vm_pcall. + | mov rsp, CARG1 + |.else + | mov eax, FCARG2 // Error return status for vm_pcall. + | mov esp, FCARG1 + |.endif + |->vm_unwind_c_eh: // Landing pad for external unwinder. + | mov L:RB, SAVE_L + | mov GL:RB, L:RB->glref + | mov dword GL:RB->vmstate, ~LJ_VMST_C + | jmp ->vm_leave_unw + | + |->vm_unwind_rethrow: + |.if X64 and not X64WIN + | mov FCARG1, SAVE_L + | mov FCARG2, eax + | restoreregs + | jmp extern lj_err_throw@8 // (lua_State *L, int errcode) + |.endif + | + |->vm_unwind_ff@4: // Unwind C stack, return from ff pcall. + | // (void *cframe) + |.if X64 + | and CARG1, CFRAME_RAWMASK + | mov rsp, CARG1 + |.else + | and FCARG1, CFRAME_RAWMASK + | mov esp, FCARG1 + |.endif + |->vm_unwind_ff_eh: // Landing pad for external unwinder. + | mov L:RB, SAVE_L + | mov RAa, -8 // Results start at BASE+RA = BASE-8. + | mov RD, 1+1 // Really 1+2 results, incr. later. + | mov BASE, L:RB->base + | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table. + | add DISPATCH, GG_G2DISP + | mov PC, [BASE-4] // Fetch PC of previous frame. + | mov dword [BASE-4], LJ_TFALSE // Prepend false to error message. + | set_vmstate INTERP + | jmp ->vm_returnc // Increments RD/MULTRES and returns. + | + |//----------------------------------------------------------------------- + |//-- Grow stack for calls ----------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_growstack_c: // Grow stack for C function. + | mov FCARG2, LUA_MINSTACK + | jmp >2 + | + |->vm_growstack_v: // Grow stack for vararg Lua function. + | sub RD, 8 + | jmp >1 + | + |->vm_growstack_f: // Grow stack for fixarg Lua function. + | // BASE = new base, RD = nargs+1, RB = L, PC = first PC + | lea RD, [BASE+NARGS:RD*8-8] + |1: + | movzx RA, byte [PC-4+PC2PROTO(framesize)] + | add PC, 4 // Must point after first instruction. + | mov L:RB->base, BASE + | mov L:RB->top, RD + | mov SAVE_PC, PC + | mov FCARG2, RA + |2: + | // RB = L, L->base = new base, L->top = top + | mov FCARG1, L:RB + | call extern lj_state_growstack@8 // (lua_State *L, int n) + | mov BASE, L:RB->base + | mov RD, L:RB->top + | mov LFUNC:RB, [BASE-8] + | sub RD, BASE + | shr RD, 3 + | add NARGS:RD, 1 + | // BASE = new base, RB = LFUNC, RD = nargs+1 + | ins_callt // Just retry the call. + | + |//----------------------------------------------------------------------- + |//-- Entry points into the assembler VM --------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_resume: // Setup C frame and resume thread. + | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0) + | saveregs + |.if X64 + | mov L:RB, CARG1d // Caveat: CARG1d may be RA. + | mov SAVE_L, CARG1d + | mov RA, CARG2d + |.else + | mov L:RB, SAVE_L + | mov RA, INARG_BASE // Caveat: overlaps SAVE_CFRAME! + |.endif + | mov PC, FRAME_CP + | xor RD, RD + | lea KBASEa, [esp+CFRAME_RESUME] + | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table. + | add DISPATCH, GG_G2DISP + | mov SAVE_PC, RD // Any value outside of bytecode is ok. + | mov SAVE_CFRAME, RDa + |.if X64 + | mov SAVE_NRES, RD + | mov SAVE_ERRF, RD + |.endif + | mov L:RB->cframe, KBASEa + | cmp byte L:RB->status, RDL + | je >2 // Initial resume (like a call). + | + | // Resume after yield (like a return). + | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB + | set_vmstate INTERP + | mov byte L:RB->status, RDL + | mov BASE, L:RB->base + | mov RD, L:RB->top + | sub RD, RA + | shr RD, 3 + | add RD, 1 // RD = nresults+1 + | sub RA, BASE // RA = resultofs + | mov PC, [BASE-4] + | mov MULTRES, RD + | test PC, FRAME_TYPE + | jz ->BC_RET_Z + | jmp ->vm_return + | + |->vm_pcall: // Setup protected C frame and enter VM. + | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef) + | saveregs + | mov PC, FRAME_CP + |.if X64 + | mov SAVE_ERRF, CARG4d + |.endif + | jmp >1 + | + |->vm_call: // Setup C frame and enter VM. + | // (lua_State *L, TValue *base, int nres1) + | saveregs + | mov PC, FRAME_C + | + |1: // Entry point for vm_pcall above (PC = ftype). + |.if X64 + | mov SAVE_NRES, CARG3d + | mov L:RB, CARG1d // Caveat: CARG1d may be RA. + | mov SAVE_L, CARG1d + | mov RA, CARG2d + |.else + | mov L:RB, SAVE_L + | mov RA, INARG_BASE // Caveat: overlaps SAVE_CFRAME! + |.endif + | + | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table. + | mov KBASEa, L:RB->cframe // Add our C frame to cframe chain. + | mov SAVE_CFRAME, KBASEa + | mov SAVE_PC, L:RB // Any value outside of bytecode is ok. + | add DISPATCH, GG_G2DISP + |.if X64 + | mov L:RB->cframe, rsp + |.else + | mov L:RB->cframe, esp + |.endif + | + |2: // Entry point for vm_resume/vm_cpcall (RA = base, RB = L, PC = ftype). + | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB + | set_vmstate INTERP + | mov BASE, L:RB->base // BASE = old base (used in vmeta_call). + | add PC, RA + | sub PC, BASE // PC = frame delta + frame type + | + | mov RD, L:RB->top + | sub RD, RA + | shr NARGS:RD, 3 + | add NARGS:RD, 1 // RD = nargs+1 + | + |->vm_call_dispatch: + | mov LFUNC:RB, [RA-8] + | cmp dword [RA-4], LJ_TFUNC + | jne ->vmeta_call // Ensure KBASE defined and != BASE. + | + |->vm_call_dispatch_f: + | mov BASE, RA + | ins_call + | // BASE = new base, RB = func, RD = nargs+1, PC = caller PC + | + |->vm_cpcall: // Setup protected C frame, call C. + | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp) + | saveregs + |.if X64 + | mov L:RB, CARG1d // Caveat: CARG1d may be RA. + | mov SAVE_L, CARG1d + |.else + | mov L:RB, SAVE_L + | // Caveat: INARG_CP_* and SAVE_CFRAME/SAVE_NRES/SAVE_ERRF overlap! + | mov RC, INARG_CP_UD // Get args before they are overwritten. + | mov RA, INARG_CP_FUNC + | mov BASE, INARG_CP_CALL + |.endif + | mov SAVE_PC, L:RB // Any value outside of bytecode is ok. + | + | mov KBASE, L:RB->stack // Compute -savestack(L, L->top). + | sub KBASE, L:RB->top + | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table. + | mov SAVE_ERRF, 0 // No error function. + | mov SAVE_NRES, KBASE // Neg. delta means cframe w/o frame. + | add DISPATCH, GG_G2DISP + | // Handler may change cframe_nres(L->cframe) or cframe_errfunc(L->cframe). + | + |.if X64 + | mov KBASEa, L:RB->cframe // Add our C frame to cframe chain. + | mov SAVE_CFRAME, KBASEa + | mov L:RB->cframe, rsp + | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB + | + | call CARG4 // (lua_State *L, lua_CFunction func, void *ud) + |.else + | mov ARG3, RC // Have to copy args downwards. + | mov ARG2, RA + | mov ARG1, L:RB + | + | mov KBASE, L:RB->cframe // Add our C frame to cframe chain. + | mov SAVE_CFRAME, KBASE + | mov L:RB->cframe, esp + | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB + | + | call BASE // (lua_State *L, lua_CFunction func, void *ud) + |.endif + | // TValue * (new base) or NULL returned in eax (RC). + | test RC, RC + | jz ->vm_leave_cp // No base? Just remove C frame. + | mov RA, RC + | mov PC, FRAME_CP + | jmp <2 // Else continue with the call. + | + |//----------------------------------------------------------------------- + |//-- Metamethod handling ------------------------------------------------ + |//----------------------------------------------------------------------- + | + |//-- Continuation dispatch ---------------------------------------------- + | + |->cont_dispatch: + | // BASE = meta base, RA = resultofs, RD = nresults+1 (also in MULTRES) + | add RA, BASE + | and PC, -8 + | mov RB, BASE + | sub BASE, PC // Restore caller BASE. + | mov dword [RA+RD*8-4], LJ_TNIL // Ensure one valid arg. + | mov RC, RA // ... in [RC] + | mov PC, [RB-12] // Restore PC from [cont|PC]. + |.if X64 + | movsxd RAa, dword [RB-16] // May be negative on WIN64 with debug. + |.if FFI + | cmp RA, 1 + | jbe >1 + |.endif + | lea KBASEa, qword [=>0] + | add RAa, KBASEa + |.else + | mov RA, dword [RB-16] + |.if FFI + | cmp RA, 1 + | jbe >1 + |.endif + |.endif + | mov LFUNC:KBASE, [BASE-8] + | mov KBASE, LFUNC:KBASE->pc + | mov KBASE, [KBASE+PC2PROTO(k)] + | // BASE = base, RC = result, RB = meta base + | jmp RAa // Jump to continuation. + | + |.if FFI + |1: + | je ->cont_ffi_callback // cont = 1: return from FFI callback. + | // cont = 0: Tail call from C function. + | sub RB, BASE + | shr RB, 3 + | lea RD, [RB-1] + | jmp ->vm_call_tail + |.endif + | + |->cont_cat: // BASE = base, RC = result, RB = mbase + | movzx RA, PC_RB + | sub RB, 16 + | lea RA, [BASE+RA*8] + | sub RA, RB + | je ->cont_ra + | neg RA + | shr RA, 3 + |.if X64WIN + | mov CARG3d, RA + | mov L:CARG1d, SAVE_L + | mov L:CARG1d->base, BASE + | mov RCa, [RC] + | mov [RB], RCa + | mov CARG2d, RB + |.elif X64 + | mov L:CARG1d, SAVE_L + | mov L:CARG1d->base, BASE + | mov CARG3d, RA + | mov RAa, [RC] + | mov [RB], RAa + | mov CARG2d, RB + |.else + | mov ARG3, RA + | mov RA, [RC+4] + | mov RC, [RC] + | mov [RB+4], RA + | mov [RB], RC + | mov ARG2, RB + |.endif + | jmp ->BC_CAT_Z + | + |//-- Table indexing metamethods ----------------------------------------- + | + |->vmeta_tgets: + | mov TMP1, RC // RC = GCstr * + | mov TMP2, LJ_TSTR + | lea RCa, TMP1 // Store temp. TValue in TMP1/TMP2. + | cmp PC_OP, BC_GGET + | jne >1 + | lea RA, [DISPATCH+DISPATCH_GL(tmptv)] // Store fn->l.env in g->tmptv. + | mov [RA], TAB:RB // RB = GCtab * + | mov dword [RA+4], LJ_TTAB + | mov RB, RA + | jmp >2 + | + |->vmeta_tgetb: + | movzx RC, PC_RC + |.if DUALNUM + | mov TMP2, LJ_TISNUM + | mov TMP1, RC + |.else + | cvtsi2sd xmm0, RC + | movsd TMPQ, xmm0 + |.endif + | lea RCa, TMPQ // Store temp. TValue in TMPQ. + | jmp >1 + | + |->vmeta_tgetv: + | movzx RC, PC_RC // Reload TValue *k from RC. + | lea RC, [BASE+RC*8] + |1: + | movzx RB, PC_RB // Reload TValue *t from RB. + | lea RB, [BASE+RB*8] + |2: + |.if X64 + | mov L:CARG1d, SAVE_L + | mov L:CARG1d->base, BASE // Caveat: CARG2d/CARG3d may be BASE. + | mov CARG2d, RB + | mov CARG3, RCa // May be 64 bit ptr to stack. + | mov L:RB, L:CARG1d + |.else + | mov ARG2, RB + | mov L:RB, SAVE_L + | mov ARG3, RC + | mov ARG1, L:RB + | mov L:RB->base, BASE + |.endif + | mov SAVE_PC, PC + | call extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k) + | // TValue * (finished) or NULL (metamethod) returned in eax (RC). + | mov BASE, L:RB->base + | test RC, RC + | jz >3 + |->cont_ra: // BASE = base, RC = result + | movzx RA, PC_RA + |.if X64 + | mov RBa, [RC] + | mov [BASE+RA*8], RBa + |.else + | mov RB, [RC+4] + | mov RC, [RC] + | mov [BASE+RA*8+4], RB + | mov [BASE+RA*8], RC + |.endif + | ins_next + | + |3: // Call __index metamethod. + | // BASE = base, L->top = new base, stack = cont/func/t/k + | mov RA, L:RB->top + | mov [RA-12], PC // [cont|PC] + | lea PC, [RA+FRAME_CONT] + | sub PC, BASE + | mov LFUNC:RB, [RA-8] // Guaranteed to be a function here. + | mov NARGS:RD, 2+1 // 2 args for func(t, k). + | jmp ->vm_call_dispatch_f + | + |->vmeta_tgetr: + | mov FCARG1, TAB:RB + | mov RB, BASE // Save BASE. + | mov FCARG2, RC // Caveat: FCARG2 == BASE + | call extern lj_tab_getinth@8 // (GCtab *t, int32_t key) + | // cTValue * or NULL returned in eax (RC). + | movzx RA, PC_RA + | mov BASE, RB // Restore BASE. + | test RC, RC + | jnz ->BC_TGETR_Z + | mov dword [BASE+RA*8+4], LJ_TNIL + | jmp ->BC_TGETR2_Z + | + |//----------------------------------------------------------------------- + | + |->vmeta_tsets: + | mov TMP1, RC // RC = GCstr * + | mov TMP2, LJ_TSTR + | lea RCa, TMP1 // Store temp. TValue in TMP1/TMP2. + | cmp PC_OP, BC_GSET + | jne >1 + | lea RA, [DISPATCH+DISPATCH_GL(tmptv)] // Store fn->l.env in g->tmptv. + | mov [RA], TAB:RB // RB = GCtab * + | mov dword [RA+4], LJ_TTAB + | mov RB, RA + | jmp >2 + | + |->vmeta_tsetb: + | movzx RC, PC_RC + |.if DUALNUM + | mov TMP2, LJ_TISNUM + | mov TMP1, RC + |.else + | cvtsi2sd xmm0, RC + | movsd TMPQ, xmm0 + |.endif + | lea RCa, TMPQ // Store temp. TValue in TMPQ. + | jmp >1 + | + |->vmeta_tsetv: + | movzx RC, PC_RC // Reload TValue *k from RC. + | lea RC, [BASE+RC*8] + |1: + | movzx RB, PC_RB // Reload TValue *t from RB. + | lea RB, [BASE+RB*8] + |2: + |.if X64 + | mov L:CARG1d, SAVE_L + | mov L:CARG1d->base, BASE // Caveat: CARG2d/CARG3d may be BASE. + | mov CARG2d, RB + | mov CARG3, RCa // May be 64 bit ptr to stack. + | mov L:RB, L:CARG1d + |.else + | mov ARG2, RB + | mov L:RB, SAVE_L + | mov ARG3, RC + | mov ARG1, L:RB + | mov L:RB->base, BASE + |.endif + | mov SAVE_PC, PC + | call extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k) + | // TValue * (finished) or NULL (metamethod) returned in eax (RC). + | mov BASE, L:RB->base + | test RC, RC + | jz >3 + | // NOBARRIER: lj_meta_tset ensures the table is not black. + | movzx RA, PC_RA + |.if X64 + | mov RBa, [BASE+RA*8] + | mov [RC], RBa + |.else + | mov RB, [BASE+RA*8+4] + | mov RA, [BASE+RA*8] + | mov [RC+4], RB + | mov [RC], RA + |.endif + |->cont_nop: // BASE = base, (RC = result) + | ins_next + | + |3: // Call __newindex metamethod. + | // BASE = base, L->top = new base, stack = cont/func/t/k/(v) + | mov RA, L:RB->top + | mov [RA-12], PC // [cont|PC] + | movzx RC, PC_RA + | // Copy value to third argument. + |.if X64 + | mov RBa, [BASE+RC*8] + | mov [RA+16], RBa + |.else + | mov RB, [BASE+RC*8+4] + | mov RC, [BASE+RC*8] + | mov [RA+20], RB + | mov [RA+16], RC + |.endif + | lea PC, [RA+FRAME_CONT] + | sub PC, BASE + | mov LFUNC:RB, [RA-8] // Guaranteed to be a function here. + | mov NARGS:RD, 3+1 // 3 args for func(t, k, v). + | jmp ->vm_call_dispatch_f + | + |->vmeta_tsetr: + |.if X64WIN + | mov L:CARG1d, SAVE_L + | mov CARG3d, RC + | mov L:CARG1d->base, BASE + | xchg CARG2d, TAB:RB // Caveat: CARG2d == BASE. + |.elif X64 + | mov L:CARG1d, SAVE_L + | mov CARG2d, TAB:RB + | mov L:CARG1d->base, BASE + | mov RB, BASE // Save BASE. + | mov CARG3d, RC // Caveat: CARG3d == BASE. + |.else + | mov L:RA, SAVE_L + | mov ARG2, TAB:RB + | mov RB, BASE // Save BASE. + | mov ARG3, RC + | mov ARG1, L:RA + | mov L:RA->base, BASE + |.endif + | mov SAVE_PC, PC + | call extern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key) + | // TValue * returned in eax (RC). + | movzx RA, PC_RA + | mov BASE, RB // Restore BASE. + | jmp ->BC_TSETR_Z + | + |//-- Comparison metamethods --------------------------------------------- + | + |->vmeta_comp: + |.if X64 + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Caveat: CARG2d/CARG3d == BASE. + |.if X64WIN + | lea CARG3d, [BASE+RD*8] + | lea CARG2d, [BASE+RA*8] + |.else + | lea CARG2d, [BASE+RA*8] + | lea CARG3d, [BASE+RD*8] + |.endif + | mov CARG1d, L:RB // Caveat: CARG1d/CARG4d == RA. + | movzx CARG4d, PC_OP + |.else + | movzx RB, PC_OP + | lea RD, [BASE+RD*8] + | lea RA, [BASE+RA*8] + | mov ARG4, RB + | mov L:RB, SAVE_L + | mov ARG3, RD + | mov ARG2, RA + | mov ARG1, L:RB + | mov L:RB->base, BASE + |.endif + | mov SAVE_PC, PC + | call extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op) + | // 0/1 or TValue * (metamethod) returned in eax (RC). + |3: + | mov BASE, L:RB->base + | cmp RC, 1 + | ja ->vmeta_binop + |4: + | lea PC, [PC+4] + | jb >6 + |5: + | movzx RD, PC_RD + | branchPC RD + |6: + | ins_next + | + |->cont_condt: // BASE = base, RC = result + | add PC, 4 + | cmp dword [RC+4], LJ_TISTRUECOND // Branch if result is true. + | jb <5 + | jmp <6 + | + |->cont_condf: // BASE = base, RC = result + | cmp dword [RC+4], LJ_TISTRUECOND // Branch if result is false. + | jmp <4 + | + |->vmeta_equal: + | sub PC, 4 + |.if X64WIN + | mov CARG3d, RD + | mov CARG4d, RB + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Caveat: CARG2d == BASE. + | mov CARG2d, RA + | mov CARG1d, L:RB // Caveat: CARG1d == RA. + |.elif X64 + | mov CARG2d, RA + | mov CARG4d, RB // Caveat: CARG4d == RA. + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Caveat: CARG3d == BASE. + | mov CARG3d, RD + | mov CARG1d, L:RB + |.else + | mov ARG4, RB + | mov L:RB, SAVE_L + | mov ARG3, RD + | mov ARG2, RA + | mov ARG1, L:RB + | mov L:RB->base, BASE + |.endif + | mov SAVE_PC, PC + | call extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne) + | // 0/1 or TValue * (metamethod) returned in eax (RC). + | jmp <3 + | + |->vmeta_equal_cd: + |.if FFI + | sub PC, 4 + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov FCARG1, L:RB + | mov FCARG2, dword [PC-4] + | mov SAVE_PC, PC + | call extern lj_meta_equal_cd@8 // (lua_State *L, BCIns ins) + | // 0/1 or TValue * (metamethod) returned in eax (RC). + | jmp <3 + |.endif + | + |->vmeta_istype: + |.if X64 + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Caveat: CARG2d/CARG3d may be BASE. + | mov CARG2d, RA + | movzx CARG3d, PC_RD + | mov L:CARG1d, L:RB + |.else + | movzx RD, PC_RD + | mov ARG2, RA + | mov L:RB, SAVE_L + | mov ARG3, RD + | mov ARG1, L:RB + | mov L:RB->base, BASE + |.endif + | mov SAVE_PC, PC + | call extern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp) + | mov BASE, L:RB->base + | jmp <6 + | + |//-- Arithmetic metamethods --------------------------------------------- + | + |->vmeta_arith_vno: + |.if DUALNUM + | movzx RB, PC_RB + |.endif + |->vmeta_arith_vn: + | lea RC, [KBASE+RC*8] + | jmp >1 + | + |->vmeta_arith_nvo: + |.if DUALNUM + | movzx RC, PC_RC + |.endif + |->vmeta_arith_nv: + | lea RC, [KBASE+RC*8] + | lea RB, [BASE+RB*8] + | xchg RB, RC + | jmp >2 + | + |->vmeta_unm: + | lea RC, [BASE+RD*8] + | mov RB, RC + | jmp >2 + | + |->vmeta_arith_vvo: + |.if DUALNUM + | movzx RB, PC_RB + |.endif + |->vmeta_arith_vv: + | lea RC, [BASE+RC*8] + |1: + | lea RB, [BASE+RB*8] + |2: + | lea RA, [BASE+RA*8] + |.if X64WIN + | mov CARG3d, RB + | mov CARG4d, RC + | movzx RC, PC_OP + | mov ARG5d, RC + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Caveat: CARG2d == BASE. + | mov CARG2d, RA + | mov CARG1d, L:RB // Caveat: CARG1d == RA. + |.elif X64 + | movzx CARG5d, PC_OP + | mov CARG2d, RA + | mov CARG4d, RC // Caveat: CARG4d == RA. + | mov L:CARG1d, SAVE_L + | mov L:CARG1d->base, BASE // Caveat: CARG3d == BASE. + | mov CARG3d, RB + | mov L:RB, L:CARG1d + |.else + | mov ARG3, RB + | mov L:RB, SAVE_L + | mov ARG4, RC + | movzx RC, PC_OP + | mov ARG2, RA + | mov ARG5, RC + | mov ARG1, L:RB + | mov L:RB->base, BASE + |.endif + | mov SAVE_PC, PC + | call extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op) + | // NULL (finished) or TValue * (metamethod) returned in eax (RC). + | mov BASE, L:RB->base + | test RC, RC + | jz ->cont_nop + | + | // Call metamethod for binary op. + |->vmeta_binop: + | // BASE = base, RC = new base, stack = cont/func/o1/o2 + | mov RA, RC + | sub RC, BASE + | mov [RA-12], PC // [cont|PC] + | lea PC, [RC+FRAME_CONT] + | mov NARGS:RD, 2+1 // 2 args for func(o1, o2). + | jmp ->vm_call_dispatch + | + |->vmeta_len: + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | lea FCARG2, [BASE+RD*8] // Caveat: FCARG2 == BASE + | mov L:FCARG1, L:RB + | mov SAVE_PC, PC + | call extern lj_meta_len@8 // (lua_State *L, TValue *o) + | // NULL (retry) or TValue * (metamethod) returned in eax (RC). + | mov BASE, L:RB->base +#if LJ_52 + | test RC, RC + | jne ->vmeta_binop // Binop call for compatibility. + | movzx RD, PC_RD + | mov TAB:FCARG1, [BASE+RD*8] + | jmp ->BC_LEN_Z +#else + | jmp ->vmeta_binop // Binop call for compatibility. +#endif + | + |//-- Call metamethod ---------------------------------------------------- + | + |->vmeta_call_ra: + | lea RA, [BASE+RA*8+8] + |->vmeta_call: // Resolve and call __call metamethod. + | // BASE = old base, RA = new base, RC = nargs+1, PC = return + | mov TMP2, RA // Save RA, RC for us. + | mov TMP1, NARGS:RD + | sub RA, 8 + |.if X64 + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Caveat: CARG2d/CARG3d may be BASE. + | mov CARG2d, RA + | lea CARG3d, [RA+NARGS:RD*8] + | mov CARG1d, L:RB // Caveat: CARG1d may be RA. + |.else + | lea RC, [RA+NARGS:RD*8] + | mov L:RB, SAVE_L + | mov ARG2, RA + | mov ARG3, RC + | mov ARG1, L:RB + | mov L:RB->base, BASE // This is the callers base! + |.endif + | mov SAVE_PC, PC + | call extern lj_meta_call // (lua_State *L, TValue *func, TValue *top) + | mov BASE, L:RB->base + | mov RA, TMP2 + | mov NARGS:RD, TMP1 + | mov LFUNC:RB, [RA-8] + | add NARGS:RD, 1 + | // This is fragile. L->base must not move, KBASE must always be defined. + | cmp KBASE, BASE // Continue with CALLT if flag set. + | je ->BC_CALLT_Z + | mov BASE, RA + | ins_call // Otherwise call resolved metamethod. + | + |//-- Argument coercion for 'for' statement ------------------------------ + | + |->vmeta_for: + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov FCARG2, RA // Caveat: FCARG2 == BASE + | mov L:FCARG1, L:RB // Caveat: FCARG1 == RA + | mov SAVE_PC, PC + | call extern lj_meta_for@8 // (lua_State *L, TValue *base) + | mov BASE, L:RB->base + | mov RC, [PC-4] + | movzx RA, RCH + | movzx OP, RCL + | shr RC, 16 + |.if X64 + | jmp aword [DISPATCH+OP*8+GG_DISP2STATIC] // Retry FORI or JFORI. + |.else + | jmp aword [DISPATCH+OP*4+GG_DISP2STATIC] // Retry FORI or JFORI. + |.endif + | + |//----------------------------------------------------------------------- + |//-- Fast functions ----------------------------------------------------- + |//----------------------------------------------------------------------- + | + |.macro .ffunc, name + |->ff_ .. name: + |.endmacro + | + |.macro .ffunc_1, name + |->ff_ .. name: + | cmp NARGS:RD, 1+1; jb ->fff_fallback + |.endmacro + | + |.macro .ffunc_2, name + |->ff_ .. name: + | cmp NARGS:RD, 2+1; jb ->fff_fallback + |.endmacro + | + |.macro .ffunc_nsse, name, op + | .ffunc_1 name + | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback + | op xmm0, qword [BASE] + |.endmacro + | + |.macro .ffunc_nsse, name + | .ffunc_nsse name, movsd + |.endmacro + | + |.macro .ffunc_nnsse, name + | .ffunc_2 name + | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback + | cmp dword [BASE+12], LJ_TISNUM; jae ->fff_fallback + | movsd xmm0, qword [BASE] + | movsd xmm1, qword [BASE+8] + |.endmacro + | + |.macro .ffunc_nnr, name + | .ffunc_2 name + | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback + | cmp dword [BASE+12], LJ_TISNUM; jae ->fff_fallback + | fld qword [BASE+8] + | fld qword [BASE] + |.endmacro + | + |// Inlined GC threshold check. Caveat: uses label 1. + |.macro ffgccheck + | mov RB, [DISPATCH+DISPATCH_GL(gc.total)] + | cmp RB, [DISPATCH+DISPATCH_GL(gc.threshold)] + | jb >1 + | call ->fff_gcstep + |1: + |.endmacro + | + |//-- Base library: checks ----------------------------------------------- + | + |.ffunc_1 assert + | mov RB, [BASE+4] + | cmp RB, LJ_TISTRUECOND; jae ->fff_fallback + | mov PC, [BASE-4] + | mov MULTRES, RD + | mov [BASE-4], RB + | mov RB, [BASE] + | mov [BASE-8], RB + | sub RD, 2 + | jz >2 + | mov RA, BASE + |1: + | add RA, 8 + |.if X64 + | mov RBa, [RA] + | mov [RA-8], RBa + |.else + | mov RB, [RA+4] + | mov [RA-4], RB + | mov RB, [RA] + | mov [RA-8], RB + |.endif + | sub RD, 1 + | jnz <1 + |2: + | mov RD, MULTRES + | jmp ->fff_res_ + | + |.ffunc_1 type + | mov RB, [BASE+4] + |.if X64 + | mov RA, RB + | sar RA, 15 + | cmp RA, -2 + | je >3 + |.endif + | mov RC, ~LJ_TNUMX + | not RB + | cmp RC, RB + | cmova RC, RB + |2: + | mov CFUNC:RB, [BASE-8] + | mov STR:RC, [CFUNC:RB+RC*8+((char *)(&((GCfuncC *)0)->upvalue))] + | mov PC, [BASE-4] + | mov dword [BASE-4], LJ_TSTR + | mov [BASE-8], STR:RC + | jmp ->fff_res1 + |.if X64 + |3: + | mov RC, ~LJ_TLIGHTUD + | jmp <2 + |.endif + | + |//-- Base library: getters and setters --------------------------------- + | + |.ffunc_1 getmetatable + | mov RB, [BASE+4] + | mov PC, [BASE-4] + | cmp RB, LJ_TTAB; jne >6 + |1: // Field metatable must be at same offset for GCtab and GCudata! + | mov TAB:RB, [BASE] + | mov TAB:RB, TAB:RB->metatable + |2: + | test TAB:RB, TAB:RB + | mov dword [BASE-4], LJ_TNIL + | jz ->fff_res1 + | mov STR:RC, [DISPATCH+DISPATCH_GL(gcroot)+4*(GCROOT_MMNAME+MM_metatable)] + | mov dword [BASE-4], LJ_TTAB // Store metatable as default result. + | mov [BASE-8], TAB:RB + | mov RA, TAB:RB->hmask + | and RA, STR:RC->hash + | imul RA, #NODE + | add NODE:RA, TAB:RB->node + |3: // Rearranged logic, because we expect _not_ to find the key. + | cmp dword NODE:RA->key.it, LJ_TSTR + | jne >4 + | cmp dword NODE:RA->key.gcr, STR:RC + | je >5 + |4: + | mov NODE:RA, NODE:RA->next + | test NODE:RA, NODE:RA + | jnz <3 + | jmp ->fff_res1 // Not found, keep default result. + |5: + | mov RB, [RA+4] + | cmp RB, LJ_TNIL; je ->fff_res1 // Ditto for nil value. + | mov RC, [RA] + | mov [BASE-4], RB // Return value of mt.__metatable. + | mov [BASE-8], RC + | jmp ->fff_res1 + | + |6: + | cmp RB, LJ_TUDATA; je <1 + |.if X64 + | cmp RB, LJ_TNUMX; ja >8 + | cmp RB, LJ_TISNUM; jbe >7 + | mov RB, LJ_TLIGHTUD + | jmp >8 + |7: + |.else + | cmp RB, LJ_TISNUM; ja >8 + |.endif + | mov RB, LJ_TNUMX + |8: + | not RB + | mov TAB:RB, [DISPATCH+RB*4+DISPATCH_GL(gcroot[GCROOT_BASEMT])] + | jmp <2 + | + |.ffunc_2 setmetatable + | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback + | // Fast path: no mt for table yet and not clearing the mt. + | mov TAB:RB, [BASE] + | cmp dword TAB:RB->metatable, 0; jne ->fff_fallback + | cmp dword [BASE+12], LJ_TTAB; jne ->fff_fallback + | mov TAB:RC, [BASE+8] + | mov TAB:RB->metatable, TAB:RC + | mov PC, [BASE-4] + | mov dword [BASE-4], LJ_TTAB // Return original table. + | mov [BASE-8], TAB:RB + | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) + | jz >1 + | // Possible write barrier. Table is black, but skip iswhite(mt) check. + | barrierback TAB:RB, RC + |1: + | jmp ->fff_res1 + | + |.ffunc_2 rawget + | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback + |.if X64WIN + | mov RB, BASE // Save BASE. + | lea CARG3d, [BASE+8] + | mov CARG2d, [BASE] // Caveat: CARG2d == BASE. + | mov CARG1d, SAVE_L + |.elif X64 + | mov RB, BASE // Save BASE. + | mov CARG2d, [BASE] + | lea CARG3d, [BASE+8] // Caveat: CARG3d == BASE. + | mov CARG1d, SAVE_L + |.else + | mov TAB:RD, [BASE] + | mov L:RB, SAVE_L + | mov ARG2, TAB:RD + | mov ARG1, L:RB + | mov RB, BASE // Save BASE. + | add BASE, 8 + | mov ARG3, BASE + |.endif + | call extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key) + | // cTValue * returned in eax (RD). + | mov BASE, RB // Restore BASE. + | // Copy table slot. + |.if X64 + | mov RBa, [RD] + | mov PC, [BASE-4] + | mov [BASE-8], RBa + |.else + | mov RB, [RD] + | mov RD, [RD+4] + | mov PC, [BASE-4] + | mov [BASE-8], RB + | mov [BASE-4], RD + |.endif + | jmp ->fff_res1 + | + |//-- Base library: conversions ------------------------------------------ + | + |.ffunc tonumber + | // Only handles the number case inline (without a base argument). + | cmp NARGS:RD, 1+1; jne ->fff_fallback // Exactly one argument. + | cmp dword [BASE+4], LJ_TISNUM + |.if DUALNUM + | jne >1 + | mov RB, dword [BASE]; jmp ->fff_resi + |1: + | ja ->fff_fallback + |.else + | jae ->fff_fallback + |.endif + | movsd xmm0, qword [BASE]; jmp ->fff_resxmm0 + | + |.ffunc_1 tostring + | // Only handles the string or number case inline. + | mov PC, [BASE-4] + | cmp dword [BASE+4], LJ_TSTR; jne >3 + | // A __tostring method in the string base metatable is ignored. + | mov STR:RD, [BASE] + |2: + | mov dword [BASE-4], LJ_TSTR + | mov [BASE-8], STR:RD + | jmp ->fff_res1 + |3: // Handle numbers inline, unless a number base metatable is present. + | cmp dword [BASE+4], LJ_TISNUM; ja ->fff_fallback + | cmp dword [DISPATCH+DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])], 0 + | jne ->fff_fallback + | ffgccheck // Caveat: uses label 1. + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Add frame since C call can throw. + | mov SAVE_PC, PC // Redundant (but a defined value). + |.if X64 and not X64WIN + | mov FCARG2, BASE // Otherwise: FCARG2 == BASE + |.endif + | mov L:FCARG1, L:RB + |.if DUALNUM + | call extern lj_strfmt_number@8 // (lua_State *L, cTValue *o) + |.else + | call extern lj_strfmt_num@8 // (lua_State *L, lua_Number *np) + |.endif + | // GCstr returned in eax (RD). + | mov BASE, L:RB->base + | jmp <2 + | + |//-- Base library: iterators ------------------------------------------- + | + |.ffunc_1 next + | je >2 // Missing 2nd arg? + |1: + | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Add frame since C call can throw. + | mov L:RB->top, BASE // Dummy frame length is ok. + | mov PC, [BASE-4] + |.if X64WIN + | lea CARG3d, [BASE+8] + | mov CARG2d, [BASE] // Caveat: CARG2d == BASE. + | mov CARG1d, L:RB + |.elif X64 + | mov CARG2d, [BASE] + | lea CARG3d, [BASE+8] // Caveat: CARG3d == BASE. + | mov CARG1d, L:RB + |.else + | mov TAB:RD, [BASE] + | mov ARG2, TAB:RD + | mov ARG1, L:RB + | add BASE, 8 + | mov ARG3, BASE + |.endif + | mov SAVE_PC, PC // Needed for ITERN fallback. + | call extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key) + | // Flag returned in eax (RD). + | mov BASE, L:RB->base + | test RD, RD; jz >3 // End of traversal? + | // Copy key and value to results. + |.if X64 + | mov RBa, [BASE+8] + | mov RDa, [BASE+16] + | mov [BASE-8], RBa + | mov [BASE], RDa + |.else + | mov RB, [BASE+8] + | mov RD, [BASE+12] + | mov [BASE-8], RB + | mov [BASE-4], RD + | mov RB, [BASE+16] + | mov RD, [BASE+20] + | mov [BASE], RB + | mov [BASE+4], RD + |.endif + |->fff_res2: + | mov RD, 1+2 + | jmp ->fff_res + |2: // Set missing 2nd arg to nil. + | mov dword [BASE+12], LJ_TNIL + | jmp <1 + |3: // End of traversal: return nil. + | mov dword [BASE-4], LJ_TNIL + | jmp ->fff_res1 + | + |.ffunc_1 pairs + | mov TAB:RB, [BASE] + | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback +#if LJ_52 + | cmp dword TAB:RB->metatable, 0; jne ->fff_fallback +#endif + | mov CFUNC:RB, [BASE-8] + | mov CFUNC:RD, CFUNC:RB->upvalue[0] + | mov PC, [BASE-4] + | mov dword [BASE-4], LJ_TFUNC + | mov [BASE-8], CFUNC:RD + | mov dword [BASE+12], LJ_TNIL + | mov RD, 1+3 + | jmp ->fff_res + | + |.ffunc_2 ipairs_aux + | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback + | cmp dword [BASE+12], LJ_TISNUM + |.if DUALNUM + | jne ->fff_fallback + |.else + | jae ->fff_fallback + |.endif + | mov PC, [BASE-4] + |.if DUALNUM + | mov RD, dword [BASE+8] + | add RD, 1 + | mov dword [BASE-4], LJ_TISNUM + | mov dword [BASE-8], RD + |.else + | movsd xmm0, qword [BASE+8] + | sseconst_1 xmm1, RBa + | addsd xmm0, xmm1 + | cvttsd2si RD, xmm0 + | movsd qword [BASE-8], xmm0 + |.endif + | mov TAB:RB, [BASE] + | cmp RD, TAB:RB->asize; jae >2 // Not in array part? + | shl RD, 3 + | add RD, TAB:RB->array + |1: + | cmp dword [RD+4], LJ_TNIL; je ->fff_res0 + | // Copy array slot. + |.if X64 + | mov RBa, [RD] + | mov [BASE], RBa + |.else + | mov RB, [RD] + | mov RD, [RD+4] + | mov [BASE], RB + | mov [BASE+4], RD + |.endif + | jmp ->fff_res2 + |2: // Check for empty hash part first. Otherwise call C function. + | cmp dword TAB:RB->hmask, 0; je ->fff_res0 + | mov FCARG1, TAB:RB + | mov RB, BASE // Save BASE. + | mov FCARG2, RD // Caveat: FCARG2 == BASE + | call extern lj_tab_getinth@8 // (GCtab *t, int32_t key) + | // cTValue * or NULL returned in eax (RD). + | mov BASE, RB + | test RD, RD + | jnz <1 + |->fff_res0: + | mov RD, 1+0 + | jmp ->fff_res + | + |.ffunc_1 ipairs + | mov TAB:RB, [BASE] + | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback +#if LJ_52 + | cmp dword TAB:RB->metatable, 0; jne ->fff_fallback +#endif + | mov CFUNC:RB, [BASE-8] + | mov CFUNC:RD, CFUNC:RB->upvalue[0] + | mov PC, [BASE-4] + | mov dword [BASE-4], LJ_TFUNC + | mov [BASE-8], CFUNC:RD + |.if DUALNUM + | mov dword [BASE+12], LJ_TISNUM + | mov dword [BASE+8], 0 + |.else + | xorps xmm0, xmm0 + | movsd qword [BASE+8], xmm0 + |.endif + | mov RD, 1+3 + | jmp ->fff_res + | + |//-- Base library: catch errors ---------------------------------------- + | + |.ffunc_1 pcall + | lea RA, [BASE+8] + | sub NARGS:RD, 1 + | mov PC, 8+FRAME_PCALL + |1: + | movzx RB, byte [DISPATCH+DISPATCH_GL(hookmask)] + | shr RB, HOOK_ACTIVE_SHIFT + | and RB, 1 + | add PC, RB // Remember active hook before pcall. + | jmp ->vm_call_dispatch + | + |.ffunc_2 xpcall + | cmp dword [BASE+12], LJ_TFUNC; jne ->fff_fallback + | mov RB, [BASE+4] // Swap function and traceback. + | mov [BASE+12], RB + | mov dword [BASE+4], LJ_TFUNC + | mov LFUNC:RB, [BASE] + | mov PC, [BASE+8] + | mov [BASE+8], LFUNC:RB + | mov [BASE], PC + | lea RA, [BASE+16] + | sub NARGS:RD, 2 + | mov PC, 16+FRAME_PCALL + | jmp <1 + | + |//-- Coroutine library -------------------------------------------------- + | + |.macro coroutine_resume_wrap, resume + |.if resume + |.ffunc_1 coroutine_resume + | mov L:RB, [BASE] + |.else + |.ffunc coroutine_wrap_aux + | mov CFUNC:RB, [BASE-8] + | mov L:RB, CFUNC:RB->upvalue[0].gcr + |.endif + | mov PC, [BASE-4] + | mov SAVE_PC, PC + |.if X64 + | mov TMP1, L:RB + |.else + | mov ARG1, L:RB + |.endif + |.if resume + | cmp dword [BASE+4], LJ_TTHREAD; jne ->fff_fallback + |.endif + | cmp aword L:RB->cframe, 0; jne ->fff_fallback + | cmp byte L:RB->status, LUA_YIELD; ja ->fff_fallback + | mov RA, L:RB->top + | je >1 // Status != LUA_YIELD (i.e. 0)? + | cmp RA, L:RB->base // Check for presence of initial func. + | je ->fff_fallback + |1: + |.if resume + | lea PC, [RA+NARGS:RD*8-16] // Check stack space (-1-thread). + |.else + | lea PC, [RA+NARGS:RD*8-8] // Check stack space (-1). + |.endif + | cmp PC, L:RB->maxstack; ja ->fff_fallback + | mov L:RB->top, PC + | + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + |.if resume + | add BASE, 8 // Keep resumed thread in stack for GC. + |.endif + | mov L:RB->top, BASE + |.if resume + | lea RB, [BASE+NARGS:RD*8-24] // RB = end of source for stack move. + |.else + | lea RB, [BASE+NARGS:RD*8-16] // RB = end of source for stack move. + |.endif + | sub RBa, PCa // Relative to PC. + | + | cmp PC, RA + | je >3 + |2: // Move args to coroutine. + |.if X64 + | mov RCa, [PC+RB] + | mov [PC-8], RCa + |.else + | mov RC, [PC+RB+4] + | mov [PC-4], RC + | mov RC, [PC+RB] + | mov [PC-8], RC + |.endif + | sub PC, 8 + | cmp PC, RA + | jne <2 + |3: + |.if X64 + | mov CARG2d, RA + | mov CARG1d, TMP1 + |.else + | mov ARG2, RA + | xor RA, RA + | mov ARG4, RA + | mov ARG3, RA + |.endif + | call ->vm_resume // (lua_State *L, TValue *base, 0, 0) + | + | mov L:RB, SAVE_L + |.if X64 + | mov L:PC, TMP1 + |.else + | mov L:PC, ARG1 // The callee doesn't modify SAVE_L. + |.endif + | mov BASE, L:RB->base + | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB + | set_vmstate INTERP + | + | cmp eax, LUA_YIELD + | ja >8 + |4: + | mov RA, L:PC->base + | mov KBASE, L:PC->top + | mov L:PC->top, RA // Clear coroutine stack. + | mov PC, KBASE + | sub PC, RA + | je >6 // No results? + | lea RD, [BASE+PC] + | shr PC, 3 + | cmp RD, L:RB->maxstack + | ja >9 // Need to grow stack? + | + | mov RB, BASE + | sub RBa, RAa + |5: // Move results from coroutine. + |.if X64 + | mov RDa, [RA] + | mov [RA+RB], RDa + |.else + | mov RD, [RA] + | mov [RA+RB], RD + | mov RD, [RA+4] + | mov [RA+RB+4], RD + |.endif + | add RA, 8 + | cmp RA, KBASE + | jne <5 + |6: + |.if resume + | lea RD, [PC+2] // nresults+1 = 1 + true + results. + | mov dword [BASE-4], LJ_TTRUE // Prepend true to results. + |.else + | lea RD, [PC+1] // nresults+1 = 1 + results. + |.endif + |7: + | mov PC, SAVE_PC + | mov MULTRES, RD + |.if resume + | mov RAa, -8 + |.else + | xor RA, RA + |.endif + | test PC, FRAME_TYPE + | jz ->BC_RET_Z + | jmp ->vm_return + | + |8: // Coroutine returned with error (at co->top-1). + |.if resume + | mov dword [BASE-4], LJ_TFALSE // Prepend false to results. + | mov RA, L:PC->top + | sub RA, 8 + | mov L:PC->top, RA // Clear error from coroutine stack. + | // Copy error message. + |.if X64 + | mov RDa, [RA] + | mov [BASE], RDa + |.else + | mov RD, [RA] + | mov [BASE], RD + | mov RD, [RA+4] + | mov [BASE+4], RD + |.endif + | mov RD, 1+2 // nresults+1 = 1 + false + error. + | jmp <7 + |.else + | mov FCARG2, L:PC + | mov FCARG1, L:RB + | call extern lj_ffh_coroutine_wrap_err@8 // (lua_State *L, lua_State *co) + | // Error function does not return. + |.endif + | + |9: // Handle stack expansion on return from yield. + |.if X64 + | mov L:RA, TMP1 + |.else + | mov L:RA, ARG1 // The callee doesn't modify SAVE_L. + |.endif + | mov L:RA->top, KBASE // Undo coroutine stack clearing. + | mov FCARG2, PC + | mov FCARG1, L:RB + | call extern lj_state_growstack@8 // (lua_State *L, int n) + |.if X64 + | mov L:PC, TMP1 + |.else + | mov L:PC, ARG1 + |.endif + | mov BASE, L:RB->base + | jmp <4 // Retry the stack move. + |.endmacro + | + | coroutine_resume_wrap 1 // coroutine.resume + | coroutine_resume_wrap 0 // coroutine.wrap + | + |.ffunc coroutine_yield + | mov L:RB, SAVE_L + | test aword L:RB->cframe, CFRAME_RESUME + | jz ->fff_fallback + | mov L:RB->base, BASE + | lea RD, [BASE+NARGS:RD*8-8] + | mov L:RB->top, RD + | xor RD, RD + | mov aword L:RB->cframe, RDa + | mov al, LUA_YIELD + | mov byte L:RB->status, al + | jmp ->vm_leave_unw + | + |//-- Math library ------------------------------------------------------- + | + |.if not DUALNUM + |->fff_resi: // Dummy. + |.endif + | + |->fff_resn: + | mov PC, [BASE-4] + | fstp qword [BASE-8] + | jmp ->fff_res1 + | + | .ffunc_1 math_abs + |.if DUALNUM + | cmp dword [BASE+4], LJ_TISNUM; jne >2 + | mov RB, dword [BASE] + | cmp RB, 0; jns ->fff_resi + | neg RB; js >1 + |->fff_resbit: + |->fff_resi: + | mov PC, [BASE-4] + | mov dword [BASE-4], LJ_TISNUM + | mov dword [BASE-8], RB + | jmp ->fff_res1 + |1: + | mov PC, [BASE-4] + | mov dword [BASE-4], 0x41e00000 // 2^31. + | mov dword [BASE-8], 0 + | jmp ->fff_res1 + |2: + | ja ->fff_fallback + |.else + | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback + |.endif + | movsd xmm0, qword [BASE] + | sseconst_abs xmm1, RDa + | andps xmm0, xmm1 + |->fff_resxmm0: + | mov PC, [BASE-4] + | movsd qword [BASE-8], xmm0 + | // fallthrough + | + |->fff_res1: + | mov RD, 1+1 + |->fff_res: + | mov MULTRES, RD + |->fff_res_: + | test PC, FRAME_TYPE + | jnz >7 + |5: + | cmp PC_RB, RDL // More results expected? + | ja >6 + | // Adjust BASE. KBASE is assumed to be set for the calling frame. + | movzx RA, PC_RA + | not RAa // Note: ~RA = -(RA+1) + | lea BASE, [BASE+RA*8] // base = base - (RA+1)*8 + | ins_next + | + |6: // Fill up results with nil. + | mov dword [BASE+RD*8-12], LJ_TNIL + | add RD, 1 + | jmp <5 + | + |7: // Non-standard return case. + | mov RAa, -8 // Results start at BASE+RA = BASE-8. + | jmp ->vm_return + | + |.if X64 + |.define fff_resfp, fff_resxmm0 + |.else + |.define fff_resfp, fff_resn + |.endif + | + |.macro math_round, func + | .ffunc math_ .. func + |.if DUALNUM + | cmp dword [BASE+4], LJ_TISNUM; jne >1 + | mov RB, dword [BASE]; jmp ->fff_resi + |1: + | ja ->fff_fallback + |.else + | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback + |.endif + | movsd xmm0, qword [BASE] + | call ->vm_ .. func .. _sse + |.if DUALNUM + | cvttsd2si RB, xmm0 + | cmp RB, 0x80000000 + | jne ->fff_resi + | cvtsi2sd xmm1, RB + | ucomisd xmm0, xmm1 + | jp ->fff_resxmm0 + | je ->fff_resi + |.endif + | jmp ->fff_resxmm0 + |.endmacro + | + | math_round floor + | math_round ceil + | + |.ffunc_nsse math_sqrt, sqrtsd; jmp ->fff_resxmm0 + | + |.ffunc math_log + | cmp NARGS:RD, 1+1; jne ->fff_fallback // Exactly one argument. + | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback + | movsd xmm0, qword [BASE] + |.if not X64 + | movsd FPARG1, xmm0 + |.endif + | mov RB, BASE + | call extern log + | mov BASE, RB + | jmp ->fff_resfp + | + |.macro math_extern, func + | .ffunc_nsse math_ .. func + |.if not X64 + | movsd FPARG1, xmm0 + |.endif + | mov RB, BASE + | call extern func + | mov BASE, RB + | jmp ->fff_resfp + |.endmacro + | + |.macro math_extern2, func + | .ffunc_nnsse math_ .. func + |.if not X64 + | movsd FPARG1, xmm0 + | movsd FPARG3, xmm1 + |.endif + | mov RB, BASE + | call extern func + | mov BASE, RB + | jmp ->fff_resfp + |.endmacro + | + | math_extern log10 + | math_extern exp + | math_extern sin + | math_extern cos + | math_extern tan + | math_extern asin + | math_extern acos + | math_extern atan + | math_extern sinh + | math_extern cosh + | math_extern tanh + | math_extern2 pow + | math_extern2 atan2 + | math_extern2 fmod + | + |.ffunc_nnr math_ldexp; fscale; fpop1; jmp ->fff_resn + | + |.ffunc_1 math_frexp + | mov RB, [BASE+4] + | cmp RB, LJ_TISNUM; jae ->fff_fallback + | mov PC, [BASE-4] + | mov RC, [BASE] + | mov [BASE-4], RB; mov [BASE-8], RC + | shl RB, 1; cmp RB, 0xffe00000; jae >3 + | or RC, RB; jz >3 + | mov RC, 1022 + | cmp RB, 0x00200000; jb >4 + |1: + | shr RB, 21; sub RB, RC // Extract and unbias exponent. + | cvtsi2sd xmm0, RB + | mov RB, [BASE-4] + | and RB, 0x800fffff // Mask off exponent. + | or RB, 0x3fe00000 // Put mantissa in range [0.5,1) or 0. + | mov [BASE-4], RB + |2: + | movsd qword [BASE], xmm0 + | mov RD, 1+2 + | jmp ->fff_res + |3: // Return +-0, +-Inf, NaN unmodified and an exponent of 0. + | xorps xmm0, xmm0; jmp <2 + |4: // Handle denormals by multiplying with 2^54 and adjusting the bias. + | movsd xmm0, qword [BASE] + | sseconst_hi xmm1, RBa, 43500000 // 2^54. + | mulsd xmm0, xmm1 + | movsd qword [BASE-8], xmm0 + | mov RB, [BASE-4]; mov RC, 1076; shl RB, 1; jmp <1 + | + |.ffunc_nsse math_modf + | mov RB, [BASE+4] + | mov PC, [BASE-4] + | shl RB, 1; cmp RB, 0xffe00000; je >4 // +-Inf? + | movaps xmm4, xmm0 + | call ->vm_trunc_sse + | subsd xmm4, xmm0 + |1: + | movsd qword [BASE-8], xmm0 + | movsd qword [BASE], xmm4 + | mov RC, [BASE-4]; mov RB, [BASE+4] + | xor RC, RB; js >3 // Need to adjust sign? + |2: + | mov RD, 1+2 + | jmp ->fff_res + |3: + | xor RB, 0x80000000; mov [BASE+4], RB // Flip sign of fraction. + | jmp <2 + |4: + | xorps xmm4, xmm4; jmp <1 // Return +-Inf and +-0. + | + |.macro math_minmax, name, cmovop, sseop + | .ffunc name + | mov RA, 2 + | cmp dword [BASE+4], LJ_TISNUM + |.if DUALNUM + | jne >4 + | mov RB, dword [BASE] + |1: // Handle integers. + | cmp RA, RD; jae ->fff_resi + | cmp dword [BASE+RA*8-4], LJ_TISNUM; jne >3 + | cmp RB, dword [BASE+RA*8-8] + | cmovop RB, dword [BASE+RA*8-8] + | add RA, 1 + | jmp <1 + |3: + | ja ->fff_fallback + | // Convert intermediate result to number and continue below. + | cvtsi2sd xmm0, RB + | jmp >6 + |4: + | ja ->fff_fallback + |.else + | jae ->fff_fallback + |.endif + | + | movsd xmm0, qword [BASE] + |5: // Handle numbers or integers. + | cmp RA, RD; jae ->fff_resxmm0 + | cmp dword [BASE+RA*8-4], LJ_TISNUM + |.if DUALNUM + | jb >6 + | ja ->fff_fallback + | cvtsi2sd xmm1, dword [BASE+RA*8-8] + | jmp >7 + |.else + | jae ->fff_fallback + |.endif + |6: + | movsd xmm1, qword [BASE+RA*8-8] + |7: + | sseop xmm0, xmm1 + | add RA, 1 + | jmp <5 + |.endmacro + | + | math_minmax math_min, cmovg, minsd + | math_minmax math_max, cmovl, maxsd + | + |//-- String library ----------------------------------------------------- + | + |.ffunc string_byte // Only handle the 1-arg case here. + | cmp NARGS:RD, 1+1; jne ->fff_fallback + | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback + | mov STR:RB, [BASE] + | mov PC, [BASE-4] + | cmp dword STR:RB->len, 1 + | jb ->fff_res0 // Return no results for empty string. + | movzx RB, byte STR:RB[1] + |.if DUALNUM + | jmp ->fff_resi + |.else + | cvtsi2sd xmm0, RB; jmp ->fff_resxmm0 + |.endif + | + |.ffunc string_char // Only handle the 1-arg case here. + | ffgccheck + | cmp NARGS:RD, 1+1; jne ->fff_fallback // *Exactly* 1 arg. + | cmp dword [BASE+4], LJ_TISNUM + |.if DUALNUM + | jne ->fff_fallback + | mov RB, dword [BASE] + | cmp RB, 255; ja ->fff_fallback + | mov TMP2, RB + |.else + | jae ->fff_fallback + | cvttsd2si RB, qword [BASE] + | cmp RB, 255; ja ->fff_fallback + | mov TMP2, RB + |.endif + |.if X64 + | mov TMP3, 1 + |.else + | mov ARG3, 1 + |.endif + | lea RDa, TMP2 // Points to stack. Little-endian. + |->fff_newstr: + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + |.if X64 + | mov CARG3d, TMP3 // Zero-extended to size_t. + | mov CARG2, RDa // May be 64 bit ptr to stack. + | mov CARG1d, L:RB + |.else + | mov ARG2, RD + | mov ARG1, L:RB + |.endif + | mov SAVE_PC, PC + | call extern lj_str_new // (lua_State *L, char *str, size_t l) + |->fff_resstr: + | // GCstr * returned in eax (RD). + | mov BASE, L:RB->base + | mov PC, [BASE-4] + | mov dword [BASE-4], LJ_TSTR + | mov [BASE-8], STR:RD + | jmp ->fff_res1 + | + |.ffunc string_sub + | ffgccheck + | mov TMP2, -1 + | cmp NARGS:RD, 1+2; jb ->fff_fallback + | jna >1 + | cmp dword [BASE+20], LJ_TISNUM + |.if DUALNUM + | jne ->fff_fallback + | mov RB, dword [BASE+16] + | mov TMP2, RB + |.else + | jae ->fff_fallback + | cvttsd2si RB, qword [BASE+16] + | mov TMP2, RB + |.endif + |1: + | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback + | cmp dword [BASE+12], LJ_TISNUM + |.if DUALNUM + | jne ->fff_fallback + |.else + | jae ->fff_fallback + |.endif + | mov STR:RB, [BASE] + | mov TMP3, STR:RB + | mov RB, STR:RB->len + |.if DUALNUM + | mov RA, dword [BASE+8] + |.else + | cvttsd2si RA, qword [BASE+8] + |.endif + | mov RC, TMP2 + | cmp RB, RC // len < end? (unsigned compare) + | jb >5 + |2: + | test RA, RA // start <= 0? + | jle >7 + |3: + | mov STR:RB, TMP3 + | sub RC, RA // start > end? + | jl ->fff_emptystr + | lea RB, [STR:RB+RA+#STR-1] + | add RC, 1 + |4: + |.if X64 + | mov TMP3, RC + |.else + | mov ARG3, RC + |.endif + | mov RD, RB + | jmp ->fff_newstr + | + |5: // Negative end or overflow. + | jl >6 + | lea RC, [RC+RB+1] // end = end+(len+1) + | jmp <2 + |6: // Overflow. + | mov RC, RB // end = len + | jmp <2 + | + |7: // Negative start or underflow. + | je >8 + | add RA, RB // start = start+(len+1) + | add RA, 1 + | jg <3 // start > 0? + |8: // Underflow. + | mov RA, 1 // start = 1 + | jmp <3 + | + |->fff_emptystr: // Range underflow. + | xor RC, RC // Zero length. Any ptr in RB is ok. + | jmp <4 + | + |.macro ffstring_op, name + | .ffunc_1 string_ .. name + | ffgccheck + | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback + | mov L:RB, SAVE_L + | lea SBUF:FCARG1, [DISPATCH+DISPATCH_GL(tmpbuf)] + | mov L:RB->base, BASE + | mov STR:FCARG2, [BASE] // Caveat: FCARG2 == BASE + | mov RC, SBUF:FCARG1->b + | mov SBUF:FCARG1->L, L:RB + | mov SBUF:FCARG1->p, RC + | mov SAVE_PC, PC + | call extern lj_buf_putstr_ .. name .. @8 + | mov FCARG1, eax + | call extern lj_buf_tostr@4 + | jmp ->fff_resstr + |.endmacro + | + |ffstring_op reverse + |ffstring_op lower + |ffstring_op upper + | + |//-- Bit library -------------------------------------------------------- + | + |.macro .ffunc_bit, name, kind, fdef + | fdef name + |.if kind == 2 + | sseconst_tobit xmm1, RBa + |.endif + | cmp dword [BASE+4], LJ_TISNUM + |.if DUALNUM + | jne >1 + | mov RB, dword [BASE] + |.if kind > 0 + | jmp >2 + |.else + | jmp ->fff_resbit + |.endif + |1: + | ja ->fff_fallback + |.else + | jae ->fff_fallback + |.endif + | movsd xmm0, qword [BASE] + |.if kind < 2 + | sseconst_tobit xmm1, RBa + |.endif + | addsd xmm0, xmm1 + | movd RB, xmm0 + |2: + |.endmacro + | + |.macro .ffunc_bit, name, kind + | .ffunc_bit name, kind, .ffunc_1 + |.endmacro + | + |.ffunc_bit bit_tobit, 0 + | jmp ->fff_resbit + | + |.macro .ffunc_bit_op, name, ins + | .ffunc_bit name, 2 + | mov TMP2, NARGS:RD // Save for fallback. + | lea RD, [BASE+NARGS:RD*8-16] + |1: + | cmp RD, BASE + | jbe ->fff_resbit + | cmp dword [RD+4], LJ_TISNUM + |.if DUALNUM + | jne >2 + | ins RB, dword [RD] + | sub RD, 8 + | jmp <1 + |2: + | ja ->fff_fallback_bit_op + |.else + | jae ->fff_fallback_bit_op + |.endif + | movsd xmm0, qword [RD] + | addsd xmm0, xmm1 + | movd RA, xmm0 + | ins RB, RA + | sub RD, 8 + | jmp <1 + |.endmacro + | + |.ffunc_bit_op bit_band, and + |.ffunc_bit_op bit_bor, or + |.ffunc_bit_op bit_bxor, xor + | + |.ffunc_bit bit_bswap, 1 + | bswap RB + | jmp ->fff_resbit + | + |.ffunc_bit bit_bnot, 1 + | not RB + |.if DUALNUM + | jmp ->fff_resbit + |.else + |->fff_resbit: + | cvtsi2sd xmm0, RB + | jmp ->fff_resxmm0 + |.endif + | + |->fff_fallback_bit_op: + | mov NARGS:RD, TMP2 // Restore for fallback + | jmp ->fff_fallback + | + |.macro .ffunc_bit_sh, name, ins + |.if DUALNUM + | .ffunc_bit name, 1, .ffunc_2 + | // Note: no inline conversion from number for 2nd argument! + | cmp dword [BASE+12], LJ_TISNUM; jne ->fff_fallback + | mov RA, dword [BASE+8] + |.else + | .ffunc_nnsse name + | sseconst_tobit xmm2, RBa + | addsd xmm0, xmm2 + | addsd xmm1, xmm2 + | movd RB, xmm0 + | movd RA, xmm1 + |.endif + | ins RB, cl // Assumes RA is ecx. + | jmp ->fff_resbit + |.endmacro + | + |.ffunc_bit_sh bit_lshift, shl + |.ffunc_bit_sh bit_rshift, shr + |.ffunc_bit_sh bit_arshift, sar + |.ffunc_bit_sh bit_rol, rol + |.ffunc_bit_sh bit_ror, ror + | + |//----------------------------------------------------------------------- + | + |->fff_fallback_2: + | mov NARGS:RD, 1+2 // Other args are ignored, anyway. + | jmp ->fff_fallback + |->fff_fallback_1: + | mov NARGS:RD, 1+1 // Other args are ignored, anyway. + |->fff_fallback: // Call fast function fallback handler. + | // BASE = new base, RD = nargs+1 + | mov L:RB, SAVE_L + | mov PC, [BASE-4] // Fallback may overwrite PC. + | mov SAVE_PC, PC // Redundant (but a defined value). + | mov L:RB->base, BASE + | lea RD, [BASE+NARGS:RD*8-8] + | lea RA, [RD+8*LUA_MINSTACK] // Ensure enough space for handler. + | mov L:RB->top, RD + | mov CFUNC:RD, [BASE-8] + | cmp RA, L:RB->maxstack + | ja >5 // Need to grow stack. + |.if X64 + | mov CARG1d, L:RB + |.else + | mov ARG1, L:RB + |.endif + | call aword CFUNC:RD->f // (lua_State *L) + | mov BASE, L:RB->base + | // Either throws an error, or recovers and returns -1, 0 or nresults+1. + | test RD, RD; jg ->fff_res // Returned nresults+1? + |1: + | mov RA, L:RB->top + | sub RA, BASE + | shr RA, 3 + | test RD, RD + | lea NARGS:RD, [RA+1] + | mov LFUNC:RB, [BASE-8] + | jne ->vm_call_tail // Returned -1? + | ins_callt // Returned 0: retry fast path. + | + |// Reconstruct previous base for vmeta_call during tailcall. + |->vm_call_tail: + | mov RA, BASE + | test PC, FRAME_TYPE + | jnz >3 + | movzx RB, PC_RA + | not RBa // Note: ~RB = -(RB+1) + | lea BASE, [BASE+RB*8] // base = base - (RB+1)*8 + | jmp ->vm_call_dispatch // Resolve again for tailcall. + |3: + | mov RB, PC + | and RB, -8 + | sub BASE, RB + | jmp ->vm_call_dispatch // Resolve again for tailcall. + | + |5: // Grow stack for fallback handler. + | mov FCARG2, LUA_MINSTACK + | mov FCARG1, L:RB + | call extern lj_state_growstack@8 // (lua_State *L, int n) + | mov BASE, L:RB->base + | xor RD, RD // Simulate a return 0. + | jmp <1 // Dumb retry (goes through ff first). + | + |->fff_gcstep: // Call GC step function. + | // BASE = new base, RD = nargs+1 + | pop RBa // Must keep stack at same level. + | mov TMPa, RBa // Save return address + | mov L:RB, SAVE_L + | mov SAVE_PC, PC // Redundant (but a defined value). + | mov L:RB->base, BASE + | lea RD, [BASE+NARGS:RD*8-8] + | mov FCARG1, L:RB + | mov L:RB->top, RD + | call extern lj_gc_step@4 // (lua_State *L) + | mov BASE, L:RB->base + | mov RD, L:RB->top + | sub RD, BASE + | shr RD, 3 + | add NARGS:RD, 1 + | mov RBa, TMPa + | push RBa // Restore return address. + | ret + | + |//----------------------------------------------------------------------- + |//-- Special dispatch targets ------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_record: // Dispatch target for recording phase. + |.if JIT + | movzx RD, byte [DISPATCH+DISPATCH_GL(hookmask)] + | test RDL, HOOK_VMEVENT // No recording while in vmevent. + | jnz >5 + | // Decrement the hookcount for consistency, but always do the call. + | test RDL, HOOK_ACTIVE + | jnz >1 + | test RDL, LUA_MASKLINE|LUA_MASKCOUNT + | jz >1 + | dec dword [DISPATCH+DISPATCH_GL(hookcount)] + | jmp >1 + |.endif + | + |->vm_rethook: // Dispatch target for return hooks. + | movzx RD, byte [DISPATCH+DISPATCH_GL(hookmask)] + | test RDL, HOOK_ACTIVE // Hook already active? + | jnz >5 + | jmp >1 + | + |->vm_inshook: // Dispatch target for instr/line hooks. + | movzx RD, byte [DISPATCH+DISPATCH_GL(hookmask)] + | test RDL, HOOK_ACTIVE // Hook already active? + | jnz >5 + | + | test RDL, LUA_MASKLINE|LUA_MASKCOUNT + | jz >5 + | dec dword [DISPATCH+DISPATCH_GL(hookcount)] + | jz >1 + | test RDL, LUA_MASKLINE + | jz >5 + |1: + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov FCARG2, PC // Caveat: FCARG2 == BASE + | mov FCARG1, L:RB + | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC. + | call extern lj_dispatch_ins@8 // (lua_State *L, const BCIns *pc) + |3: + | mov BASE, L:RB->base + |4: + | movzx RA, PC_RA + |5: + | movzx OP, PC_OP + | movzx RD, PC_RD + |.if X64 + | jmp aword [DISPATCH+OP*8+GG_DISP2STATIC] // Re-dispatch to static ins. + |.else + | jmp aword [DISPATCH+OP*4+GG_DISP2STATIC] // Re-dispatch to static ins. + |.endif + | + |->cont_hook: // Continue from hook yield. + | add PC, 4 + | mov RA, [RB-24] + | mov MULTRES, RA // Restore MULTRES for *M ins. + | jmp <4 + | + |->vm_hotloop: // Hot loop counter underflow. + |.if JIT + | mov LFUNC:RB, [BASE-8] // Same as curr_topL(L). + | mov RB, LFUNC:RB->pc + | movzx RD, byte [RB+PC2PROTO(framesize)] + | lea RD, [BASE+RD*8] + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov L:RB->top, RD + | mov FCARG2, PC + | lea FCARG1, [DISPATCH+GG_DISP2J] + | mov aword [DISPATCH+DISPATCH_J(L)], L:RBa + | mov SAVE_PC, PC + | call extern lj_trace_hot@8 // (jit_State *J, const BCIns *pc) + | jmp <3 + |.endif + | + |->vm_callhook: // Dispatch target for call hooks. + | mov SAVE_PC, PC + |.if JIT + | jmp >1 + |.endif + | + |->vm_hotcall: // Hot call counter underflow. + |.if JIT + | mov SAVE_PC, PC + | or PC, 1 // Marker for hot call. + |1: + |.endif + | lea RD, [BASE+NARGS:RD*8-8] + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov L:RB->top, RD + | mov FCARG2, PC + | mov FCARG1, L:RB + | call extern lj_dispatch_call@8 // (lua_State *L, const BCIns *pc) + | // ASMFunction returned in eax/rax (RDa). + | mov SAVE_PC, 0 // Invalidate for subsequent line hook. + |.if JIT + | and PC, -2 + |.endif + | mov BASE, L:RB->base + | mov RAa, RDa + | mov RD, L:RB->top + | sub RD, BASE + | mov RBa, RAa + | movzx RA, PC_RA + | shr RD, 3 + | add NARGS:RD, 1 + | jmp RBa + | + |->cont_stitch: // Trace stitching. + |.if JIT + | // BASE = base, RC = result, RB = mbase + | mov RA, [RB-24] // Save previous trace number. + | mov TMP1, RA + | mov TMP3, DISPATCH // Need one more register. + | mov DISPATCH, MULTRES + | movzx RA, PC_RA + | lea RA, [BASE+RA*8] // Call base. + | sub DISPATCH, 1 + | jz >2 + |1: // Move results down. + |.if X64 + | mov RBa, [RC] + | mov [RA], RBa + |.else + | mov RB, [RC] + | mov [RA], RB + | mov RB, [RC+4] + | mov [RA+4], RB + |.endif + | add RC, 8 + | add RA, 8 + | sub DISPATCH, 1 + | jnz <1 + |2: + | movzx RC, PC_RA + | movzx RB, PC_RB + | add RC, RB + | lea RC, [BASE+RC*8-8] + |3: + | cmp RC, RA + | ja >9 // More results wanted? + | + | mov DISPATCH, TMP3 + | mov RB, TMP1 // Get previous trace number. + | mov RA, [DISPATCH+DISPATCH_J(trace)] + | mov TRACE:RD, [RA+RB*4] + | test TRACE:RD, TRACE:RD + | jz ->cont_nop + | movzx RD, word TRACE:RD->link + | cmp RD, RB + | je ->cont_nop // Blacklisted. + | test RD, RD + | jne =>BC_JLOOP // Jump to stitched trace. + | + | // Stitch a new trace to the previous trace. + | mov [DISPATCH+DISPATCH_J(exitno)], RB + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov FCARG2, PC + | lea FCARG1, [DISPATCH+GG_DISP2J] + | mov aword [DISPATCH+DISPATCH_J(L)], L:RBa + | call extern lj_dispatch_stitch@8 // (jit_State *J, const BCIns *pc) + | mov BASE, L:RB->base + | jmp ->cont_nop + | + |9: // Fill up results with nil. + | mov dword [RA+4], LJ_TNIL + | add RA, 8 + | jmp <3 + |.endif + | + |->vm_profhook: // Dispatch target for profiler hook. +#if LJ_HASPROFILE + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov FCARG2, PC // Caveat: FCARG2 == BASE + | mov FCARG1, L:RB + | call extern lj_dispatch_profile@8 // (lua_State *L, const BCIns *pc) + | mov BASE, L:RB->base + | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction. + | sub PC, 4 + | jmp ->cont_nop +#endif + | + |//----------------------------------------------------------------------- + |//-- Trace exit handler ------------------------------------------------- + |//----------------------------------------------------------------------- + | + |// Called from an exit stub with the exit number on the stack. + |// The 16 bit exit number is stored with two (sign-extended) push imm8. + |->vm_exit_handler: + |.if JIT + |.if X64 + | push r13; push r12 + | push r11; push r10; push r9; push r8 + | push rdi; push rsi; push rbp; lea rbp, [rsp+88]; push rbp + | push rbx; push rdx; push rcx; push rax + | movzx RC, byte [rbp-8] // Reconstruct exit number. + | mov RCH, byte [rbp-16] + | mov [rbp-8], r15; mov [rbp-16], r14 + |.else + | push ebp; lea ebp, [esp+12]; push ebp + | push ebx; push edx; push ecx; push eax + | movzx RC, byte [ebp-4] // Reconstruct exit number. + | mov RCH, byte [ebp-8] + | mov [ebp-4], edi; mov [ebp-8], esi + |.endif + | // Caveat: DISPATCH is ebx. + | mov DISPATCH, [ebp] + | mov RA, [DISPATCH+DISPATCH_GL(vmstate)] // Get trace number. + | set_vmstate EXIT + | mov [DISPATCH+DISPATCH_J(exitno)], RC + | mov [DISPATCH+DISPATCH_J(parent)], RA + |.if X64 + |.if X64WIN + | sub rsp, 16*8+4*8 // Room for SSE regs + save area. + |.else + | sub rsp, 16*8 // Room for SSE regs. + |.endif + | add rbp, -128 + | movsd qword [rbp-8], xmm15; movsd qword [rbp-16], xmm14 + | movsd qword [rbp-24], xmm13; movsd qword [rbp-32], xmm12 + | movsd qword [rbp-40], xmm11; movsd qword [rbp-48], xmm10 + | movsd qword [rbp-56], xmm9; movsd qword [rbp-64], xmm8 + | movsd qword [rbp-72], xmm7; movsd qword [rbp-80], xmm6 + | movsd qword [rbp-88], xmm5; movsd qword [rbp-96], xmm4 + | movsd qword [rbp-104], xmm3; movsd qword [rbp-112], xmm2 + | movsd qword [rbp-120], xmm1; movsd qword [rbp-128], xmm0 + |.else + | sub esp, 8*8+16 // Room for SSE regs + args. + | movsd qword [ebp-40], xmm7; movsd qword [ebp-48], xmm6 + | movsd qword [ebp-56], xmm5; movsd qword [ebp-64], xmm4 + | movsd qword [ebp-72], xmm3; movsd qword [ebp-80], xmm2 + | movsd qword [ebp-88], xmm1; movsd qword [ebp-96], xmm0 + |.endif + | // Caveat: RB is ebp. + | mov L:RB, [DISPATCH+DISPATCH_GL(cur_L)] + | mov BASE, [DISPATCH+DISPATCH_GL(jit_base)] + | mov aword [DISPATCH+DISPATCH_J(L)], L:RBa + | mov L:RB->base, BASE + |.if X64WIN + | lea CARG2, [rsp+4*8] + |.elif X64 + | mov CARG2, rsp + |.else + | lea FCARG2, [esp+16] + |.endif + | lea FCARG1, [DISPATCH+GG_DISP2J] + | mov dword [DISPATCH+DISPATCH_GL(jit_base)], 0 + | call extern lj_trace_exit@8 // (jit_State *J, ExitState *ex) + | // MULTRES or negated error code returned in eax (RD). + | mov RAa, L:RB->cframe + | and RAa, CFRAME_RAWMASK + |.if X64WIN + | // Reposition stack later. + |.elif X64 + | mov rsp, RAa // Reposition stack to C frame. + |.else + | mov esp, RAa // Reposition stack to C frame. + |.endif + | mov [RAa+CFRAME_OFS_L], L:RB // Set SAVE_L (on-trace resume/yield). + | mov BASE, L:RB->base + | mov PC, [RAa+CFRAME_OFS_PC] // Get SAVE_PC. + |.if X64 + | jmp >1 + |.endif + |.endif + |->vm_exit_interp: + | // RD = MULTRES or negated error code, BASE, PC and DISPATCH set. + |.if JIT + |.if X64 + | // Restore additional callee-save registers only used in compiled code. + |.if X64WIN + | lea RAa, [rsp+9*16+4*8] + |1: + | movdqa xmm15, [RAa-9*16] + | movdqa xmm14, [RAa-8*16] + | movdqa xmm13, [RAa-7*16] + | movdqa xmm12, [RAa-6*16] + | movdqa xmm11, [RAa-5*16] + | movdqa xmm10, [RAa-4*16] + | movdqa xmm9, [RAa-3*16] + | movdqa xmm8, [RAa-2*16] + | movdqa xmm7, [RAa-1*16] + | mov rsp, RAa // Reposition stack to C frame. + | movdqa xmm6, [RAa] + | mov r15, CSAVE_3 + | mov r14, CSAVE_4 + |.else + | add rsp, 16 // Reposition stack to C frame. + |1: + |.endif + | mov r13, TMPa + | mov r12, TMPQ + |.endif + | test RD, RD; js >9 // Check for error from exit. + | mov L:RB, SAVE_L + | mov MULTRES, RD + | mov LFUNC:KBASE, [BASE-8] + | mov KBASE, LFUNC:KBASE->pc + | mov KBASE, [KBASE+PC2PROTO(k)] + | mov L:RB->base, BASE + | mov dword [DISPATCH+DISPATCH_GL(jit_base)], 0 + | set_vmstate INTERP + | // Modified copy of ins_next which handles function header dispatch, too. + | mov RC, [PC] + | movzx RA, RCH + | movzx OP, RCL + | add PC, 4 + | shr RC, 16 + | cmp OP, BC_FUNCF // Function header? + | jb >3 + | cmp OP, BC_FUNCC+2 // Fast function? + | jae >4 + |2: + | mov RC, MULTRES // RC/RD holds nres+1. + |3: + |.if X64 + | jmp aword [DISPATCH+OP*8] + |.else + | jmp aword [DISPATCH+OP*4] + |.endif + | + |4: // Check frame below fast function. + | mov RC, [BASE-4] + | test RC, FRAME_TYPE + | jnz <2 // Trace stitching continuation? + | // Otherwise set KBASE for Lua function below fast function. + | movzx RC, byte [RC-3] + | not RCa + | mov LFUNC:KBASE, [BASE+RC*8-8] + | mov KBASE, LFUNC:KBASE->pc + | mov KBASE, [KBASE+PC2PROTO(k)] + | jmp <2 + | + |9: // Rethrow error from the right C frame. + | neg RD + | mov FCARG1, L:RB + | mov FCARG2, RD + | call extern lj_err_throw@8 // (lua_State *L, int errcode) + |.endif + | + |//----------------------------------------------------------------------- + |//-- Math helper functions ---------------------------------------------- + |//----------------------------------------------------------------------- + | + |// FP value rounding. Called by math.floor/math.ceil fast functions + |// and from JIT code. arg/ret is xmm0. xmm0-xmm3 and RD (eax) modified. + |.macro vm_round, name, mode, cond + |->name: + |.if not X64 and cond + | movsd xmm0, qword [esp+4] + | call ->name .. _sse + | movsd qword [esp+4], xmm0 // Overwrite callee-owned arg. + | fld qword [esp+4] + | ret + |.endif + | + |->name .. _sse: + | sseconst_abs xmm2, RDa + | sseconst_2p52 xmm3, RDa + | movaps xmm1, xmm0 + | andpd xmm1, xmm2 // |x| + | ucomisd xmm3, xmm1 // No truncation if 2^52 <= |x|. + | jbe >1 + | andnpd xmm2, xmm0 // Isolate sign bit. + |.if mode == 2 // trunc(x)? + | movaps xmm0, xmm1 + | addsd xmm1, xmm3 // (|x| + 2^52) - 2^52 + | subsd xmm1, xmm3 + | sseconst_1 xmm3, RDa + | cmpsd xmm0, xmm1, 1 // |x| < result? + | andpd xmm0, xmm3 + | subsd xmm1, xmm0 // If yes, subtract -1. + | orpd xmm1, xmm2 // Merge sign bit back in. + |.else + | addsd xmm1, xmm3 // (|x| + 2^52) - 2^52 + | subsd xmm1, xmm3 + | orpd xmm1, xmm2 // Merge sign bit back in. + | .if mode == 1 // ceil(x)? + | sseconst_m1 xmm2, RDa // Must subtract -1 to preserve -0. + | cmpsd xmm0, xmm1, 6 // x > result? + | .else // floor(x)? + | sseconst_1 xmm2, RDa + | cmpsd xmm0, xmm1, 1 // x < result? + | .endif + | andpd xmm0, xmm2 + | subsd xmm1, xmm0 // If yes, subtract +-1. + |.endif + | movaps xmm0, xmm1 + |1: + | ret + |.endmacro + | + | vm_round vm_floor, 0, 1 + | vm_round vm_ceil, 1, JIT + | vm_round vm_trunc, 2, JIT + | + |// FP modulo x%y. Called by BC_MOD* and vm_arith. + |->vm_mod: + |// Args in xmm0/xmm1, return value in xmm0. + |// Caveat: xmm0-xmm5 and RC (eax) modified! + | movaps xmm5, xmm0 + | divsd xmm0, xmm1 + | sseconst_abs xmm2, RDa + | sseconst_2p52 xmm3, RDa + | movaps xmm4, xmm0 + | andpd xmm4, xmm2 // |x/y| + | ucomisd xmm3, xmm4 // No truncation if 2^52 <= |x/y|. + | jbe >1 + | andnpd xmm2, xmm0 // Isolate sign bit. + | addsd xmm4, xmm3 // (|x/y| + 2^52) - 2^52 + | subsd xmm4, xmm3 + | orpd xmm4, xmm2 // Merge sign bit back in. + | sseconst_1 xmm2, RDa + | cmpsd xmm0, xmm4, 1 // x/y < result? + | andpd xmm0, xmm2 + | subsd xmm4, xmm0 // If yes, subtract 1.0. + | movaps xmm0, xmm5 + | mulsd xmm1, xmm4 + | subsd xmm0, xmm1 + | ret + |1: + | mulsd xmm1, xmm0 + | movaps xmm0, xmm5 + | subsd xmm0, xmm1 + | ret + | + |// Args in xmm0/eax. Ret in xmm0. xmm0-xmm1 and eax modified. + |->vm_powi_sse: + | cmp eax, 1; jle >6 // i<=1? + | // Now 1 < (unsigned)i <= 0x80000000. + |1: // Handle leading zeros. + | test eax, 1; jnz >2 + | mulsd xmm0, xmm0 + | shr eax, 1 + | jmp <1 + |2: + | shr eax, 1; jz >5 + | movaps xmm1, xmm0 + |3: // Handle trailing bits. + | mulsd xmm0, xmm0 + | shr eax, 1; jz >4 + | jnc <3 + | mulsd xmm1, xmm0 + | jmp <3 + |4: + | mulsd xmm0, xmm1 + |5: + | ret + |6: + | je <5 // x^1 ==> x + | jb >7 // x^0 ==> 1 + | neg eax + | call <1 + | sseconst_1 xmm1, RDa + | divsd xmm1, xmm0 + | movaps xmm0, xmm1 + | ret + |7: + | sseconst_1 xmm0, RDa + | ret + | + |//----------------------------------------------------------------------- + |//-- Miscellaneous functions -------------------------------------------- + |//----------------------------------------------------------------------- + | + |// int lj_vm_cpuid(uint32_t f, uint32_t res[4]) + |->vm_cpuid: + |.if X64 + | mov eax, CARG1d + | .if X64WIN; push rsi; mov rsi, CARG2; .endif + | push rbx + | cpuid + | mov [rsi], eax + | mov [rsi+4], ebx + | mov [rsi+8], ecx + | mov [rsi+12], edx + | pop rbx + | .if X64WIN; pop rsi; .endif + | ret + |.else + | pushfd + | pop edx + | mov ecx, edx + | xor edx, 0x00200000 // Toggle ID bit in flags. + | push edx + | popfd + | pushfd + | pop edx + | xor eax, eax // Zero means no features supported. + | cmp ecx, edx + | jz >1 // No ID toggle means no CPUID support. + | mov eax, [esp+4] // Argument 1 is function number. + | push edi + | push ebx + | cpuid + | mov edi, [esp+16] // Argument 2 is result area. + | mov [edi], eax + | mov [edi+4], ebx + | mov [edi+8], ecx + | mov [edi+12], edx + | pop ebx + | pop edi + |1: + | ret + |.endif + | + |//----------------------------------------------------------------------- + |//-- Assertions --------------------------------------------------------- + |//----------------------------------------------------------------------- + | + |->assert_bad_for_arg_type: +#ifdef LUA_USE_ASSERT + | int3 +#endif + | int3 + | + |//----------------------------------------------------------------------- + |//-- FFI helper functions ----------------------------------------------- + |//----------------------------------------------------------------------- + | + |// Handler for callback functions. Callback slot number in ah/al. + |->vm_ffi_callback: + |.if FFI + |.type CTSTATE, CTState, PC + |.if not X64 + | sub esp, 16 // Leave room for SAVE_ERRF etc. + |.endif + | saveregs_ // ebp/rbp already saved. ebp now holds global_State *. + | lea DISPATCH, [ebp+GG_G2DISP] + | mov CTSTATE, GL:ebp->ctype_state + | movzx eax, ax + | mov CTSTATE->cb.slot, eax + |.if X64 + | mov CTSTATE->cb.gpr[0], CARG1 + | mov CTSTATE->cb.gpr[1], CARG2 + | mov CTSTATE->cb.gpr[2], CARG3 + | mov CTSTATE->cb.gpr[3], CARG4 + | movsd qword CTSTATE->cb.fpr[0], xmm0 + | movsd qword CTSTATE->cb.fpr[1], xmm1 + | movsd qword CTSTATE->cb.fpr[2], xmm2 + | movsd qword CTSTATE->cb.fpr[3], xmm3 + |.if X64WIN + | lea rax, [rsp+CFRAME_SIZE+4*8] + |.else + | lea rax, [rsp+CFRAME_SIZE] + | mov CTSTATE->cb.gpr[4], CARG5 + | mov CTSTATE->cb.gpr[5], CARG6 + | movsd qword CTSTATE->cb.fpr[4], xmm4 + | movsd qword CTSTATE->cb.fpr[5], xmm5 + | movsd qword CTSTATE->cb.fpr[6], xmm6 + | movsd qword CTSTATE->cb.fpr[7], xmm7 + |.endif + | mov CTSTATE->cb.stack, rax + | mov CARG2, rsp + |.else + | lea eax, [esp+CFRAME_SIZE+16] + | mov CTSTATE->cb.gpr[0], FCARG1 + | mov CTSTATE->cb.gpr[1], FCARG2 + | mov CTSTATE->cb.stack, eax + | mov FCARG1, [esp+CFRAME_SIZE+12] // Move around misplaced retaddr/ebp. + | mov FCARG2, [esp+CFRAME_SIZE+8] + | mov SAVE_RET, FCARG1 + | mov SAVE_R4, FCARG2 + | mov FCARG2, esp + |.endif + | mov SAVE_PC, CTSTATE // Any value outside of bytecode is ok. + | mov FCARG1, CTSTATE + | call extern lj_ccallback_enter@8 // (CTState *cts, void *cf) + | // lua_State * returned in eax (RD). + | set_vmstate INTERP + | mov BASE, L:RD->base + | mov RD, L:RD->top + | sub RD, BASE + | mov LFUNC:RB, [BASE-8] + | shr RD, 3 + | add RD, 1 + | ins_callt + |.endif + | + |->cont_ffi_callback: // Return from FFI callback. + |.if FFI + | mov L:RA, SAVE_L + | mov CTSTATE, [DISPATCH+DISPATCH_GL(ctype_state)] + | mov aword CTSTATE->L, L:RAa + | mov L:RA->base, BASE + | mov L:RA->top, RB + | mov FCARG1, CTSTATE + | mov FCARG2, RC + | call extern lj_ccallback_leave@8 // (CTState *cts, TValue *o) + |.if X64 + | mov rax, CTSTATE->cb.gpr[0] + | movsd xmm0, qword CTSTATE->cb.fpr[0] + | jmp ->vm_leave_unw + |.else + | mov L:RB, SAVE_L + | mov eax, CTSTATE->cb.gpr[0] + | mov edx, CTSTATE->cb.gpr[1] + | cmp dword CTSTATE->cb.gpr[2], 1 + | jb >7 + | je >6 + | fld qword CTSTATE->cb.fpr[0].d + | jmp >7 + |6: + | fld dword CTSTATE->cb.fpr[0].f + |7: + | mov ecx, L:RB->top + | movzx ecx, word [ecx+6] // Get stack adjustment and copy up. + | mov SAVE_L, ecx // Must be one slot above SAVE_RET + | restoreregs + | pop ecx // Move return addr from SAVE_RET. + | add esp, [esp] // Adjust stack. + | add esp, 16 + | push ecx + | ret + |.endif + |.endif + | + |->vm_ffi_call@4: // Call C function via FFI. + | // Caveat: needs special frame unwinding, see below. + |.if FFI + |.if X64 + | .type CCSTATE, CCallState, rbx + | push rbp; mov rbp, rsp; push rbx; mov CCSTATE, CARG1 + |.else + | .type CCSTATE, CCallState, ebx + | push ebp; mov ebp, esp; push ebx; mov CCSTATE, FCARG1 + |.endif + | + | // Readjust stack. + |.if X64 + | mov eax, CCSTATE->spadj + | sub rsp, rax + |.else + | sub esp, CCSTATE->spadj + |.if WIN + | mov CCSTATE->spadj, esp + |.endif + |.endif + | + | // Copy stack slots. + | movzx ecx, byte CCSTATE->nsp + | sub ecx, 1 + | js >2 + |1: + |.if X64 + | mov rax, [CCSTATE+rcx*8+offsetof(CCallState, stack)] + | mov [rsp+rcx*8+CCALL_SPS_EXTRA*8], rax + |.else + | mov eax, [CCSTATE+ecx*4+offsetof(CCallState, stack)] + | mov [esp+ecx*4], eax + |.endif + | sub ecx, 1 + | jns <1 + |2: + | + |.if X64 + | movzx eax, byte CCSTATE->nfpr + | mov CARG1, CCSTATE->gpr[0] + | mov CARG2, CCSTATE->gpr[1] + | mov CARG3, CCSTATE->gpr[2] + | mov CARG4, CCSTATE->gpr[3] + |.if not X64WIN + | mov CARG5, CCSTATE->gpr[4] + | mov CARG6, CCSTATE->gpr[5] + |.endif + | test eax, eax; jz >5 + | movaps xmm0, CCSTATE->fpr[0] + | movaps xmm1, CCSTATE->fpr[1] + | movaps xmm2, CCSTATE->fpr[2] + | movaps xmm3, CCSTATE->fpr[3] + |.if not X64WIN + | cmp eax, 4; jbe >5 + | movaps xmm4, CCSTATE->fpr[4] + | movaps xmm5, CCSTATE->fpr[5] + | movaps xmm6, CCSTATE->fpr[6] + | movaps xmm7, CCSTATE->fpr[7] + |.endif + |5: + |.else + | mov FCARG1, CCSTATE->gpr[0] + | mov FCARG2, CCSTATE->gpr[1] + |.endif + | + | call aword CCSTATE->func + | + |.if X64 + | mov CCSTATE->gpr[0], rax + | movaps CCSTATE->fpr[0], xmm0 + |.if not X64WIN + | mov CCSTATE->gpr[1], rdx + | movaps CCSTATE->fpr[1], xmm1 + |.endif + |.else + | mov CCSTATE->gpr[0], eax + | mov CCSTATE->gpr[1], edx + | cmp byte CCSTATE->resx87, 1 + | jb >7 + | je >6 + | fstp qword CCSTATE->fpr[0].d[0] + | jmp >7 + |6: + | fstp dword CCSTATE->fpr[0].f[0] + |7: + |.if WIN + | sub CCSTATE->spadj, esp + |.endif + |.endif + | + |.if X64 + | mov rbx, [rbp-8]; leave; ret + |.else + | mov ebx, [ebp-4]; leave; ret + |.endif + |.endif + |// Note: vm_ffi_call must be the last function in this object file! + | + |//----------------------------------------------------------------------- +} + +/* Generate the code for a single instruction. */ +static void build_ins(BuildCtx *ctx, BCOp op, int defop) +{ + int vk = 0; + |// Note: aligning all instructions does not pay off. + |=>defop: + + switch (op) { + + /* -- Comparison ops ---------------------------------------------------- */ + + /* Remember: all ops branch for a true comparison, fall through otherwise. */ + + |.macro jmp_comp, lt, ge, le, gt, target + ||switch (op) { + ||case BC_ISLT: + | lt target + ||break; + ||case BC_ISGE: + | ge target + ||break; + ||case BC_ISLE: + | le target + ||break; + ||case BC_ISGT: + | gt target + ||break; + ||default: break; /* Shut up GCC. */ + ||} + |.endmacro + + case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: + | // RA = src1, RD = src2, JMP with RD = target + | ins_AD + |.if DUALNUM + | checkint RA, >7 + | checkint RD, >8 + | mov RB, dword [BASE+RA*8] + | add PC, 4 + | cmp RB, dword [BASE+RD*8] + | jmp_comp jge, jl, jg, jle, >9 + |6: + | movzx RD, PC_RD + | branchPC RD + |9: + | ins_next + | + |7: // RA is not an integer. + | ja ->vmeta_comp + | // RA is a number. + | cmp dword [BASE+RD*8+4], LJ_TISNUM; jb >1; jne ->vmeta_comp + | // RA is a number, RD is an integer. + | cvtsi2sd xmm0, dword [BASE+RD*8] + | jmp >2 + | + |8: // RA is an integer, RD is not an integer. + | ja ->vmeta_comp + | // RA is an integer, RD is a number. + | cvtsi2sd xmm1, dword [BASE+RA*8] + | movsd xmm0, qword [BASE+RD*8] + | add PC, 4 + | ucomisd xmm0, xmm1 + | jmp_comp jbe, ja, jb, jae, <9 + | jmp <6 + |.else + | checknum RA, ->vmeta_comp + | checknum RD, ->vmeta_comp + |.endif + |1: + | movsd xmm0, qword [BASE+RD*8] + |2: + | add PC, 4 + | ucomisd xmm0, qword [BASE+RA*8] + |3: + | // Unordered: all of ZF CF PF set, ordered: PF clear. + | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't. + |.if DUALNUM + | jmp_comp jbe, ja, jb, jae, <9 + | jmp <6 + |.else + | jmp_comp jbe, ja, jb, jae, >1 + | movzx RD, PC_RD + | branchPC RD + |1: + | ins_next + |.endif + break; + + case BC_ISEQV: case BC_ISNEV: + vk = op == BC_ISEQV; + | ins_AD // RA = src1, RD = src2, JMP with RD = target + | mov RB, [BASE+RD*8+4] + | add PC, 4 + |.if DUALNUM + | cmp RB, LJ_TISNUM; jne >7 + | checkint RA, >8 + | mov RB, dword [BASE+RD*8] + | cmp RB, dword [BASE+RA*8] + if (vk) { + | jne >9 + } else { + | je >9 + } + | movzx RD, PC_RD + | branchPC RD + |9: + | ins_next + | + |7: // RD is not an integer. + | ja >5 + | // RD is a number. + | cmp dword [BASE+RA*8+4], LJ_TISNUM; jb >1; jne >5 + | // RD is a number, RA is an integer. + | cvtsi2sd xmm0, dword [BASE+RA*8] + | jmp >2 + | + |8: // RD is an integer, RA is not an integer. + | ja >5 + | // RD is an integer, RA is a number. + | cvtsi2sd xmm0, dword [BASE+RD*8] + | ucomisd xmm0, qword [BASE+RA*8] + | jmp >4 + | + |.else + | cmp RB, LJ_TISNUM; jae >5 + | checknum RA, >5 + |.endif + |1: + | movsd xmm0, qword [BASE+RA*8] + |2: + | ucomisd xmm0, qword [BASE+RD*8] + |4: + iseqne_fp: + if (vk) { + | jp >2 // Unordered means not equal. + | jne >2 + } else { + | jp >2 // Unordered means not equal. + | je >1 + } + iseqne_end: + if (vk) { + |1: // EQ: Branch to the target. + | movzx RD, PC_RD + | branchPC RD + |2: // NE: Fallthrough to next instruction. + |.if not FFI + |3: + |.endif + } else { + |.if not FFI + |3: + |.endif + |2: // NE: Branch to the target. + | movzx RD, PC_RD + | branchPC RD + |1: // EQ: Fallthrough to next instruction. + } + if (LJ_DUALNUM && (op == BC_ISEQV || op == BC_ISNEV || + op == BC_ISEQN || op == BC_ISNEN)) { + | jmp <9 + } else { + | ins_next + } + | + if (op == BC_ISEQV || op == BC_ISNEV) { + |5: // Either or both types are not numbers. + |.if FFI + | cmp RB, LJ_TCDATA; je ->vmeta_equal_cd + | checktp RA, LJ_TCDATA; je ->vmeta_equal_cd + |.endif + | checktp RA, RB // Compare types. + | jne <2 // Not the same type? + | cmp RB, LJ_TISPRI + | jae <1 // Same type and primitive type? + | + | // Same types and not a primitive type. Compare GCobj or pvalue. + | mov RA, [BASE+RA*8] + | mov RD, [BASE+RD*8] + | cmp RA, RD + | je <1 // Same GCobjs or pvalues? + | cmp RB, LJ_TISTABUD + | ja <2 // Different objects and not table/ud? + |.if X64 + | cmp RB, LJ_TUDATA // And not 64 bit lightuserdata. + | jb <2 + |.endif + | + | // Different tables or userdatas. Need to check __eq metamethod. + | // Field metatable must be at same offset for GCtab and GCudata! + | mov TAB:RB, TAB:RA->metatable + | test TAB:RB, TAB:RB + | jz <2 // No metatable? + | test byte TAB:RB->nomm, 1<vmeta_equal // Handle __eq metamethod. + } else { + |.if FFI + |3: + | cmp RB, LJ_TCDATA + if (LJ_DUALNUM && vk) { + | jne <9 + } else { + | jne <2 + } + | jmp ->vmeta_equal_cd + |.endif + } + break; + case BC_ISEQS: case BC_ISNES: + vk = op == BC_ISEQS; + | ins_AND // RA = src, RD = str const, JMP with RD = target + | mov RB, [BASE+RA*8+4] + | add PC, 4 + | cmp RB, LJ_TSTR; jne >3 + | mov RA, [BASE+RA*8] + | cmp RA, [KBASE+RD*4] + iseqne_test: + if (vk) { + | jne >2 + } else { + | je >1 + } + goto iseqne_end; + case BC_ISEQN: case BC_ISNEN: + vk = op == BC_ISEQN; + | ins_AD // RA = src, RD = num const, JMP with RD = target + | mov RB, [BASE+RA*8+4] + | add PC, 4 + |.if DUALNUM + | cmp RB, LJ_TISNUM; jne >7 + | cmp dword [KBASE+RD*8+4], LJ_TISNUM; jne >8 + | mov RB, dword [KBASE+RD*8] + | cmp RB, dword [BASE+RA*8] + if (vk) { + | jne >9 + } else { + | je >9 + } + | movzx RD, PC_RD + | branchPC RD + |9: + | ins_next + | + |7: // RA is not an integer. + | ja >3 + | // RA is a number. + | cmp dword [KBASE+RD*8+4], LJ_TISNUM; jb >1 + | // RA is a number, RD is an integer. + | cvtsi2sd xmm0, dword [KBASE+RD*8] + | jmp >2 + | + |8: // RA is an integer, RD is a number. + | cvtsi2sd xmm0, dword [BASE+RA*8] + | ucomisd xmm0, qword [KBASE+RD*8] + | jmp >4 + |.else + | cmp RB, LJ_TISNUM; jae >3 + |.endif + |1: + | movsd xmm0, qword [KBASE+RD*8] + |2: + | ucomisd xmm0, qword [BASE+RA*8] + |4: + goto iseqne_fp; + case BC_ISEQP: case BC_ISNEP: + vk = op == BC_ISEQP; + | ins_AND // RA = src, RD = primitive type (~), JMP with RD = target + | mov RB, [BASE+RA*8+4] + | add PC, 4 + | cmp RB, RD + if (!LJ_HASFFI) goto iseqne_test; + if (vk) { + | jne >3 + | movzx RD, PC_RD + | branchPC RD + |2: + | ins_next + |3: + | cmp RB, LJ_TCDATA; jne <2 + | jmp ->vmeta_equal_cd + } else { + | je >2 + | cmp RB, LJ_TCDATA; je ->vmeta_equal_cd + | movzx RD, PC_RD + | branchPC RD + |2: + | ins_next + } + break; + + /* -- Unary test and copy ops ------------------------------------------- */ + + case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF: + | ins_AD // RA = dst or unused, RD = src, JMP with RD = target + | mov RB, [BASE+RD*8+4] + | add PC, 4 + | cmp RB, LJ_TISTRUECOND + if (op == BC_IST || op == BC_ISTC) { + | jae >1 + } else { + | jb >1 + } + if (op == BC_ISTC || op == BC_ISFC) { + | mov [BASE+RA*8+4], RB + | mov RB, [BASE+RD*8] + | mov [BASE+RA*8], RB + } + | movzx RD, PC_RD + | branchPC RD + |1: // Fallthrough to the next instruction. + | ins_next + break; + + case BC_ISTYPE: + | ins_AD // RA = src, RD = -type + | add RD, [BASE+RA*8+4] + | jne ->vmeta_istype + | ins_next + break; + case BC_ISNUM: + | ins_AD // RA = src, RD = -(TISNUM-1) + | checknum RA, ->vmeta_istype + | ins_next + break; + + /* -- Unary ops --------------------------------------------------------- */ + + case BC_MOV: + | ins_AD // RA = dst, RD = src + |.if X64 + | mov RBa, [BASE+RD*8] + | mov [BASE+RA*8], RBa + |.else + | mov RB, [BASE+RD*8+4] + | mov RD, [BASE+RD*8] + | mov [BASE+RA*8+4], RB + | mov [BASE+RA*8], RD + |.endif + | ins_next_ + break; + case BC_NOT: + | ins_AD // RA = dst, RD = src + | xor RB, RB + | checktp RD, LJ_TISTRUECOND + | adc RB, LJ_TTRUE + | mov [BASE+RA*8+4], RB + | ins_next + break; + case BC_UNM: + | ins_AD // RA = dst, RD = src + |.if DUALNUM + | checkint RD, >5 + | mov RB, [BASE+RD*8] + | neg RB + | jo >4 + | mov dword [BASE+RA*8+4], LJ_TISNUM + | mov dword [BASE+RA*8], RB + |9: + | ins_next + |4: + | mov dword [BASE+RA*8+4], 0x41e00000 // 2^31. + | mov dword [BASE+RA*8], 0 + | jmp <9 + |5: + | ja ->vmeta_unm + |.else + | checknum RD, ->vmeta_unm + |.endif + | movsd xmm0, qword [BASE+RD*8] + | sseconst_sign xmm1, RDa + | xorps xmm0, xmm1 + | movsd qword [BASE+RA*8], xmm0 + |.if DUALNUM + | jmp <9 + |.else + | ins_next + |.endif + break; + case BC_LEN: + | ins_AD // RA = dst, RD = src + | checkstr RD, >2 + | mov STR:RD, [BASE+RD*8] + |.if DUALNUM + | mov RD, dword STR:RD->len + |1: + | mov dword [BASE+RA*8+4], LJ_TISNUM + | mov dword [BASE+RA*8], RD + |.else + | xorps xmm0, xmm0 + | cvtsi2sd xmm0, dword STR:RD->len + |1: + | movsd qword [BASE+RA*8], xmm0 + |.endif + | ins_next + |2: + | checktab RD, ->vmeta_len + | mov TAB:FCARG1, [BASE+RD*8] +#if LJ_52 + | mov TAB:RB, TAB:FCARG1->metatable + | cmp TAB:RB, 0 + | jnz >9 + |3: +#endif + |->BC_LEN_Z: + | mov RB, BASE // Save BASE. + | call extern lj_tab_len@4 // (GCtab *t) + | // Length of table returned in eax (RD). + |.if DUALNUM + | // Nothing to do. + |.else + | cvtsi2sd xmm0, RD + |.endif + | mov BASE, RB // Restore BASE. + | movzx RA, PC_RA + | jmp <1 +#if LJ_52 + |9: // Check for __len. + | test byte TAB:RB->nomm, 1<vmeta_len // 'no __len' flag NOT set: check. +#endif + break; + + /* -- Binary ops -------------------------------------------------------- */ + + |.macro ins_arithpre, sseins, ssereg + | ins_ABC + ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + ||switch (vk) { + ||case 0: + | checknum RB, ->vmeta_arith_vn + | .if DUALNUM + | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jae ->vmeta_arith_vn + | .endif + | movsd xmm0, qword [BASE+RB*8] + | sseins ssereg, qword [KBASE+RC*8] + || break; + ||case 1: + | checknum RB, ->vmeta_arith_nv + | .if DUALNUM + | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jae ->vmeta_arith_nv + | .endif + | movsd xmm0, qword [KBASE+RC*8] + | sseins ssereg, qword [BASE+RB*8] + || break; + ||default: + | checknum RB, ->vmeta_arith_vv + | checknum RC, ->vmeta_arith_vv + | movsd xmm0, qword [BASE+RB*8] + | sseins ssereg, qword [BASE+RC*8] + || break; + ||} + |.endmacro + | + |.macro ins_arithdn, intins + | ins_ABC + ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + ||switch (vk) { + ||case 0: + | checkint RB, ->vmeta_arith_vn + | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jne ->vmeta_arith_vn + | mov RB, [BASE+RB*8] + | intins RB, [KBASE+RC*8]; jo ->vmeta_arith_vno + || break; + ||case 1: + | checkint RB, ->vmeta_arith_nv + | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jne ->vmeta_arith_nv + | mov RC, [KBASE+RC*8] + | intins RC, [BASE+RB*8]; jo ->vmeta_arith_nvo + || break; + ||default: + | checkint RB, ->vmeta_arith_vv + | checkint RC, ->vmeta_arith_vv + | mov RB, [BASE+RB*8] + | intins RB, [BASE+RC*8]; jo ->vmeta_arith_vvo + || break; + ||} + | mov dword [BASE+RA*8+4], LJ_TISNUM + ||if (vk == 1) { + | mov dword [BASE+RA*8], RC + ||} else { + | mov dword [BASE+RA*8], RB + ||} + | ins_next + |.endmacro + | + |.macro ins_arithpost + | movsd qword [BASE+RA*8], xmm0 + |.endmacro + | + |.macro ins_arith, sseins + | ins_arithpre sseins, xmm0 + | ins_arithpost + | ins_next + |.endmacro + | + |.macro ins_arith, intins, sseins + |.if DUALNUM + | ins_arithdn intins + |.else + | ins_arith, sseins + |.endif + |.endmacro + + | // RA = dst, RB = src1 or num const, RC = src2 or num const + case BC_ADDVN: case BC_ADDNV: case BC_ADDVV: + | ins_arith add, addsd + break; + case BC_SUBVN: case BC_SUBNV: case BC_SUBVV: + | ins_arith sub, subsd + break; + case BC_MULVN: case BC_MULNV: case BC_MULVV: + | ins_arith imul, mulsd + break; + case BC_DIVVN: case BC_DIVNV: case BC_DIVVV: + | ins_arith divsd + break; + case BC_MODVN: + | ins_arithpre movsd, xmm1 + |->BC_MODVN_Z: + | call ->vm_mod + | ins_arithpost + | ins_next + break; + case BC_MODNV: case BC_MODVV: + | ins_arithpre movsd, xmm1 + | jmp ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway. + break; + case BC_POW: + | ins_arithpre movsd, xmm1 + | mov RB, BASE + |.if not X64 + | movsd FPARG1, xmm0 + | movsd FPARG3, xmm1 + |.endif + | call extern pow + | movzx RA, PC_RA + | mov BASE, RB + |.if X64 + | ins_arithpost + |.else + | fstp qword [BASE+RA*8] + |.endif + | ins_next + break; + + case BC_CAT: + | ins_ABC // RA = dst, RB = src_start, RC = src_end + |.if X64 + | mov L:CARG1d, SAVE_L + | mov L:CARG1d->base, BASE + | lea CARG2d, [BASE+RC*8] + | mov CARG3d, RC + | sub CARG3d, RB + |->BC_CAT_Z: + | mov L:RB, L:CARG1d + |.else + | lea RA, [BASE+RC*8] + | sub RC, RB + | mov ARG2, RA + | mov ARG3, RC + |->BC_CAT_Z: + | mov L:RB, SAVE_L + | mov ARG1, L:RB + | mov L:RB->base, BASE + |.endif + | mov SAVE_PC, PC + | call extern lj_meta_cat // (lua_State *L, TValue *top, int left) + | // NULL (finished) or TValue * (metamethod) returned in eax (RC). + | mov BASE, L:RB->base + | test RC, RC + | jnz ->vmeta_binop + | movzx RB, PC_RB // Copy result to Stk[RA] from Stk[RB]. + | movzx RA, PC_RA + |.if X64 + | mov RCa, [BASE+RB*8] + | mov [BASE+RA*8], RCa + |.else + | mov RC, [BASE+RB*8+4] + | mov RB, [BASE+RB*8] + | mov [BASE+RA*8+4], RC + | mov [BASE+RA*8], RB + |.endif + | ins_next + break; + + /* -- Constant ops ------------------------------------------------------ */ + + case BC_KSTR: + | ins_AND // RA = dst, RD = str const (~) + | mov RD, [KBASE+RD*4] + | mov dword [BASE+RA*8+4], LJ_TSTR + | mov [BASE+RA*8], RD + | ins_next + break; + case BC_KCDATA: + |.if FFI + | ins_AND // RA = dst, RD = cdata const (~) + | mov RD, [KBASE+RD*4] + | mov dword [BASE+RA*8+4], LJ_TCDATA + | mov [BASE+RA*8], RD + | ins_next + |.endif + break; + case BC_KSHORT: + | ins_AD // RA = dst, RD = signed int16 literal + |.if DUALNUM + | movsx RD, RDW + | mov dword [BASE+RA*8+4], LJ_TISNUM + | mov dword [BASE+RA*8], RD + |.else + | movsx RD, RDW // Sign-extend literal. + | cvtsi2sd xmm0, RD + | movsd qword [BASE+RA*8], xmm0 + |.endif + | ins_next + break; + case BC_KNUM: + | ins_AD // RA = dst, RD = num const + | movsd xmm0, qword [KBASE+RD*8] + | movsd qword [BASE+RA*8], xmm0 + | ins_next + break; + case BC_KPRI: + | ins_AND // RA = dst, RD = primitive type (~) + | mov [BASE+RA*8+4], RD + | ins_next + break; + case BC_KNIL: + | ins_AD // RA = dst_start, RD = dst_end + | lea RA, [BASE+RA*8+12] + | lea RD, [BASE+RD*8+4] + | mov RB, LJ_TNIL + | mov [RA-8], RB // Sets minimum 2 slots. + |1: + | mov [RA], RB + | add RA, 8 + | cmp RA, RD + | jbe <1 + | ins_next + break; + + /* -- Upvalue and function ops ------------------------------------------ */ + + case BC_UGET: + | ins_AD // RA = dst, RD = upvalue # + | mov LFUNC:RB, [BASE-8] + | mov UPVAL:RB, [LFUNC:RB+RD*4+offsetof(GCfuncL, uvptr)] + | mov RB, UPVAL:RB->v + |.if X64 + | mov RDa, [RB] + | mov [BASE+RA*8], RDa + |.else + | mov RD, [RB+4] + | mov RB, [RB] + | mov [BASE+RA*8+4], RD + | mov [BASE+RA*8], RB + |.endif + | ins_next + break; + case BC_USETV: +#define TV2MARKOFS \ + ((int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)) + | ins_AD // RA = upvalue #, RD = src + | mov LFUNC:RB, [BASE-8] + | mov UPVAL:RB, [LFUNC:RB+RA*4+offsetof(GCfuncL, uvptr)] + | cmp byte UPVAL:RB->closed, 0 + | mov RB, UPVAL:RB->v + | mov RA, [BASE+RD*8] + | mov RD, [BASE+RD*8+4] + | mov [RB], RA + | mov [RB+4], RD + | jz >1 + | // Check barrier for closed upvalue. + | test byte [RB+TV2MARKOFS], LJ_GC_BLACK // isblack(uv) + | jnz >2 + |1: + | ins_next + | + |2: // Upvalue is black. Check if new value is collectable and white. + | sub RD, LJ_TISGCV + | cmp RD, LJ_TNUMX - LJ_TISGCV // tvisgcv(v) + | jbe <1 + | test byte GCOBJ:RA->gch.marked, LJ_GC_WHITES // iswhite(v) + | jz <1 + | // Crossed a write barrier. Move the barrier forward. + |.if X64 and not X64WIN + | mov FCARG2, RB + | mov RB, BASE // Save BASE. + |.else + | xchg FCARG2, RB // Save BASE (FCARG2 == BASE). + |.endif + | lea GL:FCARG1, [DISPATCH+GG_DISP2G] + | call extern lj_gc_barrieruv@8 // (global_State *g, TValue *tv) + | mov BASE, RB // Restore BASE. + | jmp <1 + break; +#undef TV2MARKOFS + case BC_USETS: + | ins_AND // RA = upvalue #, RD = str const (~) + | mov LFUNC:RB, [BASE-8] + | mov UPVAL:RB, [LFUNC:RB+RA*4+offsetof(GCfuncL, uvptr)] + | mov GCOBJ:RA, [KBASE+RD*4] + | mov RD, UPVAL:RB->v + | mov [RD], GCOBJ:RA + | mov dword [RD+4], LJ_TSTR + | test byte UPVAL:RB->marked, LJ_GC_BLACK // isblack(uv) + | jnz >2 + |1: + | ins_next + | + |2: // Check if string is white and ensure upvalue is closed. + | test byte GCOBJ:RA->gch.marked, LJ_GC_WHITES // iswhite(str) + | jz <1 + | cmp byte UPVAL:RB->closed, 0 + | jz <1 + | // Crossed a write barrier. Move the barrier forward. + | mov RB, BASE // Save BASE (FCARG2 == BASE). + | mov FCARG2, RD + | lea GL:FCARG1, [DISPATCH+GG_DISP2G] + | call extern lj_gc_barrieruv@8 // (global_State *g, TValue *tv) + | mov BASE, RB // Restore BASE. + | jmp <1 + break; + case BC_USETN: + | ins_AD // RA = upvalue #, RD = num const + | mov LFUNC:RB, [BASE-8] + | movsd xmm0, qword [KBASE+RD*8] + | mov UPVAL:RB, [LFUNC:RB+RA*4+offsetof(GCfuncL, uvptr)] + | mov RA, UPVAL:RB->v + | movsd qword [RA], xmm0 + | ins_next + break; + case BC_USETP: + | ins_AND // RA = upvalue #, RD = primitive type (~) + | mov LFUNC:RB, [BASE-8] + | mov UPVAL:RB, [LFUNC:RB+RA*4+offsetof(GCfuncL, uvptr)] + | mov RA, UPVAL:RB->v + | mov [RA+4], RD + | ins_next + break; + case BC_UCLO: + | ins_AD // RA = level, RD = target + | branchPC RD // Do this first to free RD. + | mov L:RB, SAVE_L + | cmp dword L:RB->openupval, 0 + | je >1 + | mov L:RB->base, BASE + | lea FCARG2, [BASE+RA*8] // Caveat: FCARG2 == BASE + | mov L:FCARG1, L:RB // Caveat: FCARG1 == RA + | call extern lj_func_closeuv@8 // (lua_State *L, TValue *level) + | mov BASE, L:RB->base + |1: + | ins_next + break; + + case BC_FNEW: + | ins_AND // RA = dst, RD = proto const (~) (holding function prototype) + |.if X64 + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Caveat: CARG2d/CARG3d may be BASE. + | mov CARG3d, [BASE-8] + | mov CARG2d, [KBASE+RD*4] // Fetch GCproto *. + | mov CARG1d, L:RB + |.else + | mov LFUNC:RA, [BASE-8] + | mov PROTO:RD, [KBASE+RD*4] // Fetch GCproto *. + | mov L:RB, SAVE_L + | mov ARG3, LFUNC:RA + | mov ARG2, PROTO:RD + | mov ARG1, L:RB + | mov L:RB->base, BASE + |.endif + | mov SAVE_PC, PC + | // (lua_State *L, GCproto *pt, GCfuncL *parent) + | call extern lj_func_newL_gc + | // GCfuncL * returned in eax (RC). + | mov BASE, L:RB->base + | movzx RA, PC_RA + | mov [BASE+RA*8], LFUNC:RC + | mov dword [BASE+RA*8+4], LJ_TFUNC + | ins_next + break; + + /* -- Table ops --------------------------------------------------------- */ + + case BC_TNEW: + | ins_AD // RA = dst, RD = hbits|asize + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov RA, [DISPATCH+DISPATCH_GL(gc.total)] + | cmp RA, [DISPATCH+DISPATCH_GL(gc.threshold)] + | mov SAVE_PC, PC + | jae >5 + |1: + |.if X64 + | mov CARG3d, RD + | and RD, 0x7ff + | shr CARG3d, 11 + |.else + | mov RA, RD + | and RD, 0x7ff + | shr RA, 11 + | mov ARG3, RA + |.endif + | cmp RD, 0x7ff + | je >3 + |2: + |.if X64 + | mov L:CARG1d, L:RB + | mov CARG2d, RD + |.else + | mov ARG1, L:RB + | mov ARG2, RD + |.endif + | call extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits) + | // Table * returned in eax (RC). + | mov BASE, L:RB->base + | movzx RA, PC_RA + | mov [BASE+RA*8], TAB:RC + | mov dword [BASE+RA*8+4], LJ_TTAB + | ins_next + |3: // Turn 0x7ff into 0x801. + | mov RD, 0x801 + | jmp <2 + |5: + | mov L:FCARG1, L:RB + | call extern lj_gc_step_fixtop@4 // (lua_State *L) + | movzx RD, PC_RD + | jmp <1 + break; + case BC_TDUP: + | ins_AND // RA = dst, RD = table const (~) (holding template table) + | mov L:RB, SAVE_L + | mov RA, [DISPATCH+DISPATCH_GL(gc.total)] + | mov SAVE_PC, PC + | cmp RA, [DISPATCH+DISPATCH_GL(gc.threshold)] + | mov L:RB->base, BASE + | jae >3 + |2: + | mov TAB:FCARG2, [KBASE+RD*4] // Caveat: FCARG2 == BASE + | mov L:FCARG1, L:RB // Caveat: FCARG1 == RA + | call extern lj_tab_dup@8 // (lua_State *L, Table *kt) + | // Table * returned in eax (RC). + | mov BASE, L:RB->base + | movzx RA, PC_RA + | mov [BASE+RA*8], TAB:RC + | mov dword [BASE+RA*8+4], LJ_TTAB + | ins_next + |3: + | mov L:FCARG1, L:RB + | call extern lj_gc_step_fixtop@4 // (lua_State *L) + | movzx RD, PC_RD // Need to reload RD. + | not RDa + | jmp <2 + break; + + case BC_GGET: + | ins_AND // RA = dst, RD = str const (~) + | mov LFUNC:RB, [BASE-8] + | mov TAB:RB, LFUNC:RB->env + | mov STR:RC, [KBASE+RD*4] + | jmp ->BC_TGETS_Z + break; + case BC_GSET: + | ins_AND // RA = src, RD = str const (~) + | mov LFUNC:RB, [BASE-8] + | mov TAB:RB, LFUNC:RB->env + | mov STR:RC, [KBASE+RD*4] + | jmp ->BC_TSETS_Z + break; + + case BC_TGETV: + | ins_ABC // RA = dst, RB = table, RC = key + | checktab RB, ->vmeta_tgetv + | mov TAB:RB, [BASE+RB*8] + | + | // Integer key? + |.if DUALNUM + | checkint RC, >5 + | mov RC, dword [BASE+RC*8] + |.else + | // Convert number to int and back and compare. + | checknum RC, >5 + | movsd xmm0, qword [BASE+RC*8] + | cvttsd2si RC, xmm0 + | cvtsi2sd xmm1, RC + | ucomisd xmm0, xmm1 + | jne ->vmeta_tgetv // Generic numeric key? Use fallback. + |.endif + | cmp RC, TAB:RB->asize // Takes care of unordered, too. + | jae ->vmeta_tgetv // Not in array part? Use fallback. + | shl RC, 3 + | add RC, TAB:RB->array + | cmp dword [RC+4], LJ_TNIL // Avoid overwriting RB in fastpath. + | je >2 + | // Get array slot. + |.if X64 + | mov RBa, [RC] + | mov [BASE+RA*8], RBa + |.else + | mov RB, [RC] + | mov RC, [RC+4] + | mov [BASE+RA*8], RB + | mov [BASE+RA*8+4], RC + |.endif + |1: + | ins_next + | + |2: // Check for __index if table value is nil. + | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath. + | jz >3 + | mov TAB:RA, TAB:RB->metatable + | test byte TAB:RA->nomm, 1<vmeta_tgetv // 'no __index' flag NOT set: check. + | movzx RA, PC_RA // Restore RA. + |3: + | mov dword [BASE+RA*8+4], LJ_TNIL + | jmp <1 + | + |5: // String key? + | checkstr RC, ->vmeta_tgetv + | mov STR:RC, [BASE+RC*8] + | jmp ->BC_TGETS_Z + break; + case BC_TGETS: + | ins_ABC // RA = dst, RB = table, RC = str const (~) + | not RCa + | mov STR:RC, [KBASE+RC*4] + | checktab RB, ->vmeta_tgets + | mov TAB:RB, [BASE+RB*8] + |->BC_TGETS_Z: // RB = GCtab *, RC = GCstr *, refetches PC_RA. + | mov RA, TAB:RB->hmask + | and RA, STR:RC->hash + | imul RA, #NODE + | add NODE:RA, TAB:RB->node + |1: + | cmp dword NODE:RA->key.it, LJ_TSTR + | jne >4 + | cmp dword NODE:RA->key.gcr, STR:RC + | jne >4 + | // Ok, key found. Assumes: offsetof(Node, val) == 0 + | cmp dword [RA+4], LJ_TNIL // Avoid overwriting RB in fastpath. + | je >5 // Key found, but nil value? + | movzx RC, PC_RA + | // Get node value. + |.if X64 + | mov RBa, [RA] + | mov [BASE+RC*8], RBa + |.else + | mov RB, [RA] + | mov RA, [RA+4] + | mov [BASE+RC*8], RB + | mov [BASE+RC*8+4], RA + |.endif + |2: + | ins_next + | + |3: + | movzx RC, PC_RA + | mov dword [BASE+RC*8+4], LJ_TNIL + | jmp <2 + | + |4: // Follow hash chain. + | mov NODE:RA, NODE:RA->next + | test NODE:RA, NODE:RA + | jnz <1 + | // End of hash chain: key not found, nil result. + | + |5: // Check for __index if table value is nil. + | mov TAB:RA, TAB:RB->metatable + | test TAB:RA, TAB:RA + | jz <3 // No metatable: done. + | test byte TAB:RA->nomm, 1<vmeta_tgets // Caveat: preserve STR:RC. + break; + case BC_TGETB: + | ins_ABC // RA = dst, RB = table, RC = byte literal + | checktab RB, ->vmeta_tgetb + | mov TAB:RB, [BASE+RB*8] + | cmp RC, TAB:RB->asize + | jae ->vmeta_tgetb + | shl RC, 3 + | add RC, TAB:RB->array + | cmp dword [RC+4], LJ_TNIL // Avoid overwriting RB in fastpath. + | je >2 + | // Get array slot. + |.if X64 + | mov RBa, [RC] + | mov [BASE+RA*8], RBa + |.else + | mov RB, [RC] + | mov RC, [RC+4] + | mov [BASE+RA*8], RB + | mov [BASE+RA*8+4], RC + |.endif + |1: + | ins_next + | + |2: // Check for __index if table value is nil. + | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath. + | jz >3 + | mov TAB:RA, TAB:RB->metatable + | test byte TAB:RA->nomm, 1<vmeta_tgetb // 'no __index' flag NOT set: check. + | movzx RA, PC_RA // Restore RA. + |3: + | mov dword [BASE+RA*8+4], LJ_TNIL + | jmp <1 + break; + case BC_TGETR: + | ins_ABC // RA = dst, RB = table, RC = key + | mov TAB:RB, [BASE+RB*8] + |.if DUALNUM + | mov RC, dword [BASE+RC*8] + |.else + | cvttsd2si RC, qword [BASE+RC*8] + |.endif + | cmp RC, TAB:RB->asize + | jae ->vmeta_tgetr // Not in array part? Use fallback. + | shl RC, 3 + | add RC, TAB:RB->array + | // Get array slot. + |->BC_TGETR_Z: + |.if X64 + | mov RBa, [RC] + | mov [BASE+RA*8], RBa + |.else + | mov RB, [RC] + | mov RC, [RC+4] + | mov [BASE+RA*8], RB + | mov [BASE+RA*8+4], RC + |.endif + |->BC_TGETR2_Z: + | ins_next + break; + + case BC_TSETV: + | ins_ABC // RA = src, RB = table, RC = key + | checktab RB, ->vmeta_tsetv + | mov TAB:RB, [BASE+RB*8] + | + | // Integer key? + |.if DUALNUM + | checkint RC, >5 + | mov RC, dword [BASE+RC*8] + |.else + | // Convert number to int and back and compare. + | checknum RC, >5 + | movsd xmm0, qword [BASE+RC*8] + | cvttsd2si RC, xmm0 + | cvtsi2sd xmm1, RC + | ucomisd xmm0, xmm1 + | jne ->vmeta_tsetv // Generic numeric key? Use fallback. + |.endif + | cmp RC, TAB:RB->asize // Takes care of unordered, too. + | jae ->vmeta_tsetv + | shl RC, 3 + | add RC, TAB:RB->array + | cmp dword [RC+4], LJ_TNIL + | je >3 // Previous value is nil? + |1: + | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) + | jnz >7 + |2: // Set array slot. + |.if X64 + | mov RBa, [BASE+RA*8] + | mov [RC], RBa + |.else + | mov RB, [BASE+RA*8+4] + | mov RA, [BASE+RA*8] + | mov [RC+4], RB + | mov [RC], RA + |.endif + | ins_next + | + |3: // Check for __newindex if previous value is nil. + | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath. + | jz <1 + | mov TAB:RA, TAB:RB->metatable + | test byte TAB:RA->nomm, 1<vmeta_tsetv // 'no __newindex' flag NOT set: check. + | movzx RA, PC_RA // Restore RA. + | jmp <1 + | + |5: // String key? + | checkstr RC, ->vmeta_tsetv + | mov STR:RC, [BASE+RC*8] + | jmp ->BC_TSETS_Z + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:RB, RA + | movzx RA, PC_RA // Restore RA. + | jmp <2 + break; + case BC_TSETS: + | ins_ABC // RA = src, RB = table, RC = str const (~) + | not RCa + | mov STR:RC, [KBASE+RC*4] + | checktab RB, ->vmeta_tsets + | mov TAB:RB, [BASE+RB*8] + |->BC_TSETS_Z: // RB = GCtab *, RC = GCstr *, refetches PC_RA. + | mov RA, TAB:RB->hmask + | and RA, STR:RC->hash + | imul RA, #NODE + | mov byte TAB:RB->nomm, 0 // Clear metamethod cache. + | add NODE:RA, TAB:RB->node + |1: + | cmp dword NODE:RA->key.it, LJ_TSTR + | jne >5 + | cmp dword NODE:RA->key.gcr, STR:RC + | jne >5 + | // Ok, key found. Assumes: offsetof(Node, val) == 0 + | cmp dword [RA+4], LJ_TNIL + | je >4 // Previous value is nil? + |2: + | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) + | jnz >7 + |3: // Set node value. + | movzx RC, PC_RA + |.if X64 + | mov RBa, [BASE+RC*8] + | mov [RA], RBa + |.else + | mov RB, [BASE+RC*8+4] + | mov RC, [BASE+RC*8] + | mov [RA+4], RB + | mov [RA], RC + |.endif + | ins_next + | + |4: // Check for __newindex if previous value is nil. + | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath. + | jz <2 + | mov TMP1, RA // Save RA. + | mov TAB:RA, TAB:RB->metatable + | test byte TAB:RA->nomm, 1<vmeta_tsets // 'no __newindex' flag NOT set: check. + | mov RA, TMP1 // Restore RA. + | jmp <2 + | + |5: // Follow hash chain. + | mov NODE:RA, NODE:RA->next + | test NODE:RA, NODE:RA + | jnz <1 + | // End of hash chain: key not found, add a new one. + | + | // But check for __newindex first. + | mov TAB:RA, TAB:RB->metatable + | test TAB:RA, TAB:RA + | jz >6 // No metatable: continue. + | test byte TAB:RA->nomm, 1<vmeta_tsets // 'no __newindex' flag NOT set: check. + |6: + | mov TMP1, STR:RC + | mov TMP2, LJ_TSTR + | mov TMP3, TAB:RB // Save TAB:RB for us. + |.if X64 + | mov L:CARG1d, SAVE_L + | mov L:CARG1d->base, BASE + | lea CARG3, TMP1 + | mov CARG2d, TAB:RB + | mov L:RB, L:CARG1d + |.else + | lea RC, TMP1 // Store temp. TValue in TMP1/TMP2. + | mov ARG2, TAB:RB + | mov L:RB, SAVE_L + | mov ARG3, RC + | mov ARG1, L:RB + | mov L:RB->base, BASE + |.endif + | mov SAVE_PC, PC + | call extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k) + | // Handles write barrier for the new key. TValue * returned in eax (RC). + | mov BASE, L:RB->base + | mov TAB:RB, TMP3 // Need TAB:RB for barrier. + | mov RA, eax + | jmp <2 // Must check write barrier for value. + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:RB, RC // Destroys STR:RC. + | jmp <3 + break; + case BC_TSETB: + | ins_ABC // RA = src, RB = table, RC = byte literal + | checktab RB, ->vmeta_tsetb + | mov TAB:RB, [BASE+RB*8] + | cmp RC, TAB:RB->asize + | jae ->vmeta_tsetb + | shl RC, 3 + | add RC, TAB:RB->array + | cmp dword [RC+4], LJ_TNIL + | je >3 // Previous value is nil? + |1: + | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) + | jnz >7 + |2: // Set array slot. + |.if X64 + | mov RAa, [BASE+RA*8] + | mov [RC], RAa + |.else + | mov RB, [BASE+RA*8+4] + | mov RA, [BASE+RA*8] + | mov [RC+4], RB + | mov [RC], RA + |.endif + | ins_next + | + |3: // Check for __newindex if previous value is nil. + | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath. + | jz <1 + | mov TAB:RA, TAB:RB->metatable + | test byte TAB:RA->nomm, 1<vmeta_tsetb // 'no __newindex' flag NOT set: check. + | movzx RA, PC_RA // Restore RA. + | jmp <1 + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:RB, RA + | movzx RA, PC_RA // Restore RA. + | jmp <2 + break; + case BC_TSETR: + | ins_ABC // RA = src, RB = table, RC = key + | mov TAB:RB, [BASE+RB*8] + |.if DUALNUM + | mov RC, dword [BASE+RC*8] + |.else + | cvttsd2si RC, qword [BASE+RC*8] + |.endif + | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) + | jnz >7 + |2: + | cmp RC, TAB:RB->asize + | jae ->vmeta_tsetr + | shl RC, 3 + | add RC, TAB:RB->array + | // Set array slot. + |->BC_TSETR_Z: + |.if X64 + | mov RBa, [BASE+RA*8] + | mov [RC], RBa + |.else + | mov RB, [BASE+RA*8+4] + | mov RA, [BASE+RA*8] + | mov [RC+4], RB + | mov [RC], RA + |.endif + | ins_next + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:RB, RA + | movzx RA, PC_RA // Restore RA. + | jmp <2 + break; + + case BC_TSETM: + | ins_AD // RA = base (table at base-1), RD = num const (start index) + | mov TMP1, KBASE // Need one more free register. + | mov KBASE, dword [KBASE+RD*8] // Integer constant is in lo-word. + |1: + | lea RA, [BASE+RA*8] + | mov TAB:RB, [RA-8] // Guaranteed to be a table. + | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) + | jnz >7 + |2: + | mov RD, MULTRES + | sub RD, 1 + | jz >4 // Nothing to copy? + | add RD, KBASE // Compute needed size. + | cmp RD, TAB:RB->asize + | ja >5 // Doesn't fit into array part? + | sub RD, KBASE + | shl KBASE, 3 + | add KBASE, TAB:RB->array + |3: // Copy result slots to table. + |.if X64 + | mov RBa, [RA] + | add RA, 8 + | mov [KBASE], RBa + |.else + | mov RB, [RA] + | mov [KBASE], RB + | mov RB, [RA+4] + | add RA, 8 + | mov [KBASE+4], RB + |.endif + | add KBASE, 8 + | sub RD, 1 + | jnz <3 + |4: + | mov KBASE, TMP1 + | ins_next + | + |5: // Need to resize array part. + |.if X64 + | mov L:CARG1d, SAVE_L + | mov L:CARG1d->base, BASE // Caveat: CARG2d/CARG3d may be BASE. + | mov CARG2d, TAB:RB + | mov CARG3d, RD + | mov L:RB, L:CARG1d + |.else + | mov ARG2, TAB:RB + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov ARG3, RD + | mov ARG1, L:RB + |.endif + | mov SAVE_PC, PC + | call extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize) + | mov BASE, L:RB->base + | movzx RA, PC_RA // Restore RA. + | jmp <1 // Retry. + | + |7: // Possible table write barrier for any value. Skip valiswhite check. + | barrierback TAB:RB, RD + | jmp <2 + break; + + /* -- Calls and vararg handling ----------------------------------------- */ + + case BC_CALL: case BC_CALLM: + | ins_A_C // RA = base, (RB = nresults+1,) RC = nargs+1 | extra_nargs + if (op == BC_CALLM) { + | add NARGS:RD, MULTRES + } + | cmp dword [BASE+RA*8+4], LJ_TFUNC + | mov LFUNC:RB, [BASE+RA*8] + | jne ->vmeta_call_ra + | lea BASE, [BASE+RA*8+8] + | ins_call + break; + + case BC_CALLMT: + | ins_AD // RA = base, RD = extra_nargs + | add NARGS:RD, MULTRES + | // Fall through. Assumes BC_CALLT follows and ins_AD is a no-op. + break; + case BC_CALLT: + | ins_AD // RA = base, RD = nargs+1 + | lea RA, [BASE+RA*8+8] + | mov KBASE, BASE // Use KBASE for move + vmeta_call hint. + | mov LFUNC:RB, [RA-8] + | cmp dword [RA-4], LJ_TFUNC + | jne ->vmeta_call + |->BC_CALLT_Z: + | mov PC, [BASE-4] + | test PC, FRAME_TYPE + | jnz >7 + |1: + | mov [BASE-8], LFUNC:RB // Copy function down, reloaded below. + | mov MULTRES, NARGS:RD + | sub NARGS:RD, 1 + | jz >3 + |2: // Move args down. + |.if X64 + | mov RBa, [RA] + | add RA, 8 + | mov [KBASE], RBa + |.else + | mov RB, [RA] + | mov [KBASE], RB + | mov RB, [RA+4] + | add RA, 8 + | mov [KBASE+4], RB + |.endif + | add KBASE, 8 + | sub NARGS:RD, 1 + | jnz <2 + | + | mov LFUNC:RB, [BASE-8] + |3: + | mov NARGS:RD, MULTRES + | cmp byte LFUNC:RB->ffid, 1 // (> FF_C) Calling a fast function? + | ja >5 + |4: + | ins_callt + | + |5: // Tailcall to a fast function. + | test PC, FRAME_TYPE // Lua frame below? + | jnz <4 + | movzx RA, PC_RA + | not RAa + | mov LFUNC:KBASE, [BASE+RA*8-8] // Need to prepare KBASE. + | mov KBASE, LFUNC:KBASE->pc + | mov KBASE, [KBASE+PC2PROTO(k)] + | jmp <4 + | + |7: // Tailcall from a vararg function. + | sub PC, FRAME_VARG + | test PC, FRAME_TYPEP + | jnz >8 // Vararg frame below? + | sub BASE, PC // Need to relocate BASE/KBASE down. + | mov KBASE, BASE + | mov PC, [BASE-4] + | jmp <1 + |8: + | add PC, FRAME_VARG + | jmp <1 + break; + + case BC_ITERC: + | ins_A // RA = base, (RB = nresults+1,) RC = nargs+1 (2+1) + | lea RA, [BASE+RA*8+8] // fb = base+1 + |.if X64 + | mov RBa, [RA-24] // Copy state. fb[0] = fb[-3]. + | mov RCa, [RA-16] // Copy control var. fb[1] = fb[-2]. + | mov [RA], RBa + | mov [RA+8], RCa + |.else + | mov RB, [RA-24] // Copy state. fb[0] = fb[-3]. + | mov RC, [RA-20] + | mov [RA], RB + | mov [RA+4], RC + | mov RB, [RA-16] // Copy control var. fb[1] = fb[-2]. + | mov RC, [RA-12] + | mov [RA+8], RB + | mov [RA+12], RC + |.endif + | mov LFUNC:RB, [RA-32] // Copy callable. fb[-1] = fb[-4] + | mov RC, [RA-28] + | mov [RA-8], LFUNC:RB + | mov [RA-4], RC + | cmp RC, LJ_TFUNC // Handle like a regular 2-arg call. + | mov NARGS:RD, 2+1 + | jne ->vmeta_call + | mov BASE, RA + | ins_call + break; + + case BC_ITERN: + | ins_A // RA = base, (RB = nresults+1, RC = nargs+1 (2+1)) + |.if JIT + | // NYI: add hotloop, record BC_ITERN. + |.endif + | mov TMP1, KBASE // Need two more free registers. + | mov TMP2, DISPATCH + | mov TAB:RB, [BASE+RA*8-16] + | mov RC, [BASE+RA*8-8] // Get index from control var. + | mov DISPATCH, TAB:RB->asize + | add PC, 4 + | mov KBASE, TAB:RB->array + |1: // Traverse array part. + | cmp RC, DISPATCH; jae >5 // Index points after array part? + | cmp dword [KBASE+RC*8+4], LJ_TNIL; je >4 + |.if DUALNUM + | mov dword [BASE+RA*8+4], LJ_TISNUM + | mov dword [BASE+RA*8], RC + |.else + | cvtsi2sd xmm0, RC + |.endif + | // Copy array slot to returned value. + |.if X64 + | mov RBa, [KBASE+RC*8] + | mov [BASE+RA*8+8], RBa + |.else + | mov RB, [KBASE+RC*8+4] + | mov [BASE+RA*8+12], RB + | mov RB, [KBASE+RC*8] + | mov [BASE+RA*8+8], RB + |.endif + | add RC, 1 + | // Return array index as a numeric key. + |.if DUALNUM + | // See above. + |.else + | movsd qword [BASE+RA*8], xmm0 + |.endif + | mov [BASE+RA*8-8], RC // Update control var. + |2: + | movzx RD, PC_RD // Get target from ITERL. + | branchPC RD + |3: + | mov DISPATCH, TMP2 + | mov KBASE, TMP1 + | ins_next + | + |4: // Skip holes in array part. + | add RC, 1 + | jmp <1 + | + |5: // Traverse hash part. + | sub RC, DISPATCH + |6: + | cmp RC, TAB:RB->hmask; ja <3 // End of iteration? Branch to ITERL+1. + | imul KBASE, RC, #NODE + | add NODE:KBASE, TAB:RB->node + | cmp dword NODE:KBASE->val.it, LJ_TNIL; je >7 + | lea DISPATCH, [RC+DISPATCH+1] + | // Copy key and value from hash slot. + |.if X64 + | mov RBa, NODE:KBASE->key + | mov RCa, NODE:KBASE->val + | mov [BASE+RA*8], RBa + | mov [BASE+RA*8+8], RCa + |.else + | mov RB, NODE:KBASE->key.gcr + | mov RC, NODE:KBASE->key.it + | mov [BASE+RA*8], RB + | mov [BASE+RA*8+4], RC + | mov RB, NODE:KBASE->val.gcr + | mov RC, NODE:KBASE->val.it + | mov [BASE+RA*8+8], RB + | mov [BASE+RA*8+12], RC + |.endif + | mov [BASE+RA*8-8], DISPATCH + | jmp <2 + | + |7: // Skip holes in hash part. + | add RC, 1 + | jmp <6 + break; + + case BC_ISNEXT: + | ins_AD // RA = base, RD = target (points to ITERN) + | cmp dword [BASE+RA*8-20], LJ_TFUNC; jne >5 + | mov CFUNC:RB, [BASE+RA*8-24] + | cmp dword [BASE+RA*8-12], LJ_TTAB; jne >5 + | cmp dword [BASE+RA*8-4], LJ_TNIL; jne >5 + | cmp byte CFUNC:RB->ffid, FF_next_N; jne >5 + | branchPC RD + | mov dword [BASE+RA*8-8], 0 // Initialize control var. + | mov dword [BASE+RA*8-4], 0xfffe7fff + |1: + | ins_next + |5: // Despecialize bytecode if any of the checks fail. + | mov PC_OP, BC_JMP + | branchPC RD + | mov byte [PC], BC_ITERC + | jmp <1 + break; + + case BC_VARG: + | ins_ABC // RA = base, RB = nresults+1, RC = numparams + | mov TMP1, KBASE // Need one more free register. + | lea KBASE, [BASE+RC*8+(8+FRAME_VARG)] + | lea RA, [BASE+RA*8] + | sub KBASE, [BASE-4] + | // Note: KBASE may now be even _above_ BASE if nargs was < numparams. + | test RB, RB + | jz >5 // Copy all varargs? + | lea RB, [RA+RB*8-8] + | cmp KBASE, BASE // No vararg slots? + | jnb >2 + |1: // Copy vararg slots to destination slots. + |.if X64 + | mov RCa, [KBASE-8] + | add KBASE, 8 + | mov [RA], RCa + |.else + | mov RC, [KBASE-8] + | mov [RA], RC + | mov RC, [KBASE-4] + | add KBASE, 8 + | mov [RA+4], RC + |.endif + | add RA, 8 + | cmp RA, RB // All destination slots filled? + | jnb >3 + | cmp KBASE, BASE // No more vararg slots? + | jb <1 + |2: // Fill up remainder with nil. + | mov dword [RA+4], LJ_TNIL + | add RA, 8 + | cmp RA, RB + | jb <2 + |3: + | mov KBASE, TMP1 + | ins_next + | + |5: // Copy all varargs. + | mov MULTRES, 1 // MULTRES = 0+1 + | mov RC, BASE + | sub RC, KBASE + | jbe <3 // No vararg slots? + | mov RB, RC + | shr RB, 3 + | add RB, 1 + | mov MULTRES, RB // MULTRES = #varargs+1 + | mov L:RB, SAVE_L + | add RC, RA + | cmp RC, L:RB->maxstack + | ja >7 // Need to grow stack? + |6: // Copy all vararg slots. + |.if X64 + | mov RCa, [KBASE-8] + | add KBASE, 8 + | mov [RA], RCa + |.else + | mov RC, [KBASE-8] + | mov [RA], RC + | mov RC, [KBASE-4] + | add KBASE, 8 + | mov [RA+4], RC + |.endif + | add RA, 8 + | cmp KBASE, BASE // No more vararg slots? + | jb <6 + | jmp <3 + | + |7: // Grow stack for varargs. + | mov L:RB->base, BASE + | mov L:RB->top, RA + | mov SAVE_PC, PC + | sub KBASE, BASE // Need delta, because BASE may change. + | mov FCARG2, MULTRES + | sub FCARG2, 1 + | mov FCARG1, L:RB + | call extern lj_state_growstack@8 // (lua_State *L, int n) + | mov BASE, L:RB->base + | mov RA, L:RB->top + | add KBASE, BASE + | jmp <6 + break; + + /* -- Returns ----------------------------------------------------------- */ + + case BC_RETM: + | ins_AD // RA = results, RD = extra_nresults + | add RD, MULTRES // MULTRES >=1, so RD >=1. + | // Fall through. Assumes BC_RET follows and ins_AD is a no-op. + break; + + case BC_RET: case BC_RET0: case BC_RET1: + | ins_AD // RA = results, RD = nresults+1 + if (op != BC_RET0) { + | shl RA, 3 + } + |1: + | mov PC, [BASE-4] + | mov MULTRES, RD // Save nresults+1. + | test PC, FRAME_TYPE // Check frame type marker. + | jnz >7 // Not returning to a fixarg Lua func? + switch (op) { + case BC_RET: + |->BC_RET_Z: + | mov KBASE, BASE // Use KBASE for result move. + | sub RD, 1 + | jz >3 + |2: // Move results down. + |.if X64 + | mov RBa, [KBASE+RA] + | mov [KBASE-8], RBa + |.else + | mov RB, [KBASE+RA] + | mov [KBASE-8], RB + | mov RB, [KBASE+RA+4] + | mov [KBASE-4], RB + |.endif + | add KBASE, 8 + | sub RD, 1 + | jnz <2 + |3: + | mov RD, MULTRES // Note: MULTRES may be >255. + | movzx RB, PC_RB // So cannot compare with RDL! + |5: + | cmp RB, RD // More results expected? + | ja >6 + break; + case BC_RET1: + |.if X64 + | mov RBa, [BASE+RA] + | mov [BASE-8], RBa + |.else + | mov RB, [BASE+RA+4] + | mov [BASE-4], RB + | mov RB, [BASE+RA] + | mov [BASE-8], RB + |.endif + /* fallthrough */ + case BC_RET0: + |5: + | cmp PC_RB, RDL // More results expected? + | ja >6 + default: + break; + } + | movzx RA, PC_RA + | not RAa // Note: ~RA = -(RA+1) + | lea BASE, [BASE+RA*8] // base = base - (RA+1)*8 + | mov LFUNC:KBASE, [BASE-8] + | mov KBASE, LFUNC:KBASE->pc + | mov KBASE, [KBASE+PC2PROTO(k)] + | ins_next + | + |6: // Fill up results with nil. + if (op == BC_RET) { + | mov dword [KBASE-4], LJ_TNIL // Note: relies on shifted base. + | add KBASE, 8 + } else { + | mov dword [BASE+RD*8-12], LJ_TNIL + } + | add RD, 1 + | jmp <5 + | + |7: // Non-standard return case. + | lea RB, [PC-FRAME_VARG] + | test RB, FRAME_TYPEP + | jnz ->vm_return + | // Return from vararg function: relocate BASE down and RA up. + | sub BASE, RB + if (op != BC_RET0) { + | add RA, RB + } + | jmp <1 + break; + + /* -- Loops and branches ------------------------------------------------ */ + + |.define FOR_IDX, [RA]; .define FOR_TIDX, dword [RA+4] + |.define FOR_STOP, [RA+8]; .define FOR_TSTOP, dword [RA+12] + |.define FOR_STEP, [RA+16]; .define FOR_TSTEP, dword [RA+20] + |.define FOR_EXT, [RA+24]; .define FOR_TEXT, dword [RA+28] + + case BC_FORL: + |.if JIT + | hotloop RB + |.endif + | // Fall through. Assumes BC_IFORL follows and ins_AJ is a no-op. + break; + + case BC_JFORI: + case BC_JFORL: +#if !LJ_HASJIT + break; +#endif + case BC_FORI: + case BC_IFORL: + vk = (op == BC_IFORL || op == BC_JFORL); + | ins_AJ // RA = base, RD = target (after end of loop or start of loop) + | lea RA, [BASE+RA*8] + if (LJ_DUALNUM) { + | cmp FOR_TIDX, LJ_TISNUM; jne >9 + if (!vk) { + | cmp FOR_TSTOP, LJ_TISNUM; jne ->vmeta_for + | cmp FOR_TSTEP, LJ_TISNUM; jne ->vmeta_for + | mov RB, dword FOR_IDX + | cmp dword FOR_STEP, 0; jl >5 + } else { +#ifdef LUA_USE_ASSERT + | cmp FOR_TSTOP, LJ_TISNUM; jne ->assert_bad_for_arg_type + | cmp FOR_TSTEP, LJ_TISNUM; jne ->assert_bad_for_arg_type +#endif + | mov RB, dword FOR_STEP + | test RB, RB; js >5 + | add RB, dword FOR_IDX; jo >1 + | mov dword FOR_IDX, RB + } + | cmp RB, dword FOR_STOP + | mov FOR_TEXT, LJ_TISNUM + | mov dword FOR_EXT, RB + if (op == BC_FORI) { + | jle >7 + |1: + |6: + | branchPC RD + } else if (op == BC_JFORI) { + | branchPC RD + | movzx RD, PC_RD + | jle =>BC_JLOOP + |1: + |6: + } else if (op == BC_IFORL) { + | jg >7 + |6: + | branchPC RD + |1: + } else { + | jle =>BC_JLOOP + |1: + |6: + } + |7: + | ins_next + | + |5: // Invert check for negative step. + if (vk) { + | add RB, dword FOR_IDX; jo <1 + | mov dword FOR_IDX, RB + } + | cmp RB, dword FOR_STOP + | mov FOR_TEXT, LJ_TISNUM + | mov dword FOR_EXT, RB + if (op == BC_FORI) { + | jge <7 + } else if (op == BC_JFORI) { + | branchPC RD + | movzx RD, PC_RD + | jge =>BC_JLOOP + } else if (op == BC_IFORL) { + | jl <7 + } else { + | jge =>BC_JLOOP + } + | jmp <6 + |9: // Fallback to FP variant. + } else if (!vk) { + | cmp FOR_TIDX, LJ_TISNUM + } + if (!vk) { + | jae ->vmeta_for + | cmp FOR_TSTOP, LJ_TISNUM; jae ->vmeta_for + } else { +#ifdef LUA_USE_ASSERT + | cmp FOR_TSTOP, LJ_TISNUM; jae ->assert_bad_for_arg_type + | cmp FOR_TSTEP, LJ_TISNUM; jae ->assert_bad_for_arg_type +#endif + } + | mov RB, FOR_TSTEP // Load type/hiword of for step. + if (!vk) { + | cmp RB, LJ_TISNUM; jae ->vmeta_for + } + | movsd xmm0, qword FOR_IDX + | movsd xmm1, qword FOR_STOP + if (vk) { + | addsd xmm0, qword FOR_STEP + | movsd qword FOR_IDX, xmm0 + | test RB, RB; js >3 + } else { + | jl >3 + } + | ucomisd xmm1, xmm0 + |1: + | movsd qword FOR_EXT, xmm0 + if (op == BC_FORI) { + |.if DUALNUM + | jnb <7 + |.else + | jnb >2 + | branchPC RD + |.endif + } else if (op == BC_JFORI) { + | branchPC RD + | movzx RD, PC_RD + | jnb =>BC_JLOOP + } else if (op == BC_IFORL) { + |.if DUALNUM + | jb <7 + |.else + | jb >2 + | branchPC RD + |.endif + } else { + | jnb =>BC_JLOOP + } + |.if DUALNUM + | jmp <6 + |.else + |2: + | ins_next + |.endif + | + |3: // Invert comparison if step is negative. + | ucomisd xmm0, xmm1 + | jmp <1 + break; + + case BC_ITERL: + |.if JIT + | hotloop RB + |.endif + | // Fall through. Assumes BC_IITERL follows and ins_AJ is a no-op. + break; + + case BC_JITERL: +#if !LJ_HASJIT + break; +#endif + case BC_IITERL: + | ins_AJ // RA = base, RD = target + | lea RA, [BASE+RA*8] + | mov RB, [RA+4] + | cmp RB, LJ_TNIL; je >1 // Stop if iterator returned nil. + if (op == BC_JITERL) { + | mov [RA-4], RB + | mov RB, [RA] + | mov [RA-8], RB + | jmp =>BC_JLOOP + } else { + | branchPC RD // Otherwise save control var + branch. + | mov RD, [RA] + | mov [RA-4], RB + | mov [RA-8], RD + } + |1: + | ins_next + break; + + case BC_LOOP: + | ins_A // RA = base, RD = target (loop extent) + | // Note: RA/RD is only used by trace recorder to determine scope/extent + | // This opcode does NOT jump, it's only purpose is to detect a hot loop. + |.if JIT + | hotloop RB + |.endif + | // Fall through. Assumes BC_ILOOP follows and ins_A is a no-op. + break; + + case BC_ILOOP: + | ins_A // RA = base, RD = target (loop extent) + | ins_next + break; + + case BC_JLOOP: + |.if JIT + | ins_AD // RA = base (ignored), RD = traceno + | mov RA, [DISPATCH+DISPATCH_J(trace)] + | mov TRACE:RD, [RA+RD*4] + | mov RDa, TRACE:RD->mcode + | mov L:RB, SAVE_L + | mov [DISPATCH+DISPATCH_GL(jit_base)], BASE + | mov [DISPATCH+DISPATCH_GL(tmpbuf.L)], L:RB + | // Save additional callee-save registers only used in compiled code. + |.if X64WIN + | mov TMPQ, r12 + | mov TMPa, r13 + | mov CSAVE_4, r14 + | mov CSAVE_3, r15 + | mov RAa, rsp + | sub rsp, 9*16+4*8 + | movdqa [RAa], xmm6 + | movdqa [RAa-1*16], xmm7 + | movdqa [RAa-2*16], xmm8 + | movdqa [RAa-3*16], xmm9 + | movdqa [RAa-4*16], xmm10 + | movdqa [RAa-5*16], xmm11 + | movdqa [RAa-6*16], xmm12 + | movdqa [RAa-7*16], xmm13 + | movdqa [RAa-8*16], xmm14 + | movdqa [RAa-9*16], xmm15 + |.elif X64 + | mov TMPQ, r12 + | mov TMPa, r13 + | sub rsp, 16 + |.endif + | jmp RDa + |.endif + break; + + case BC_JMP: + | ins_AJ // RA = unused, RD = target + | branchPC RD + | ins_next + break; + + /* -- Function headers -------------------------------------------------- */ + + /* + ** Reminder: A function may be called with func/args above L->maxstack, + ** i.e. occupying EXTRA_STACK slots. And vmeta_call may add one extra slot, + ** too. This means all FUNC* ops (including fast functions) must check + ** for stack overflow _before_ adding more slots! + */ + + case BC_FUNCF: + |.if JIT + | hotcall RB + |.endif + case BC_FUNCV: /* NYI: compiled vararg functions. */ + | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow and ins_AD is a no-op. + break; + + case BC_JFUNCF: +#if !LJ_HASJIT + break; +#endif + case BC_IFUNCF: + | ins_AD // BASE = new base, RA = framesize, RD = nargs+1 + | mov KBASE, [PC-4+PC2PROTO(k)] + | mov L:RB, SAVE_L + | lea RA, [BASE+RA*8] // Top of frame. + | cmp RA, L:RB->maxstack + | ja ->vm_growstack_f + | movzx RA, byte [PC-4+PC2PROTO(numparams)] + | cmp NARGS:RD, RA // Check for missing parameters. + | jbe >3 + |2: + if (op == BC_JFUNCF) { + | movzx RD, PC_RD + | jmp =>BC_JLOOP + } else { + | ins_next + } + | + |3: // Clear missing parameters. + | mov dword [BASE+NARGS:RD*8-4], LJ_TNIL + | add NARGS:RD, 1 + | cmp NARGS:RD, RA + | jbe <3 + | jmp <2 + break; + + case BC_JFUNCV: +#if !LJ_HASJIT + break; +#endif + | int3 // NYI: compiled vararg functions + break; /* NYI: compiled vararg functions. */ + + case BC_IFUNCV: + | ins_AD // BASE = new base, RA = framesize, RD = nargs+1 + | lea RB, [NARGS:RD*8+FRAME_VARG] + | lea RD, [BASE+NARGS:RD*8] + | mov LFUNC:KBASE, [BASE-8] + | mov [RD-4], RB // Store delta + FRAME_VARG. + | mov [RD-8], LFUNC:KBASE // Store copy of LFUNC. + | mov L:RB, SAVE_L + | lea RA, [RD+RA*8] + | cmp RA, L:RB->maxstack + | ja ->vm_growstack_v // Need to grow stack. + | mov RA, BASE + | mov BASE, RD + | movzx RB, byte [PC-4+PC2PROTO(numparams)] + | test RB, RB + | jz >2 + |1: // Copy fixarg slots up to new frame. + | add RA, 8 + | cmp RA, BASE + | jnb >3 // Less args than parameters? + | mov KBASE, [RA-8] + | mov [RD], KBASE + | mov KBASE, [RA-4] + | mov [RD+4], KBASE + | add RD, 8 + | mov dword [RA-4], LJ_TNIL // Clear old fixarg slot (help the GC). + | sub RB, 1 + | jnz <1 + |2: + if (op == BC_JFUNCV) { + | movzx RD, PC_RD + | jmp =>BC_JLOOP + } else { + | mov KBASE, [PC-4+PC2PROTO(k)] + | ins_next + } + | + |3: // Clear missing parameters. + | mov dword [RD+4], LJ_TNIL + | add RD, 8 + | sub RB, 1 + | jnz <3 + | jmp <2 + break; + + case BC_FUNCC: + case BC_FUNCCW: + | ins_AD // BASE = new base, RA = ins RA|RD (unused), RD = nargs+1 + | mov CFUNC:RB, [BASE-8] + | mov KBASEa, CFUNC:RB->f + | mov L:RB, SAVE_L + | lea RD, [BASE+NARGS:RD*8-8] + | mov L:RB->base, BASE + | lea RA, [RD+8*LUA_MINSTACK] + | cmp RA, L:RB->maxstack + | mov L:RB->top, RD + if (op == BC_FUNCC) { + |.if X64 + | mov CARG1d, L:RB // Caveat: CARG1d may be RA. + |.else + | mov ARG1, L:RB + |.endif + } else { + |.if X64 + | mov CARG2, KBASEa + | mov CARG1d, L:RB // Caveat: CARG1d may be RA. + |.else + | mov ARG2, KBASEa + | mov ARG1, L:RB + |.endif + } + | ja ->vm_growstack_c // Need to grow stack. + | set_vmstate C + if (op == BC_FUNCC) { + | call KBASEa // (lua_State *L) + } else { + | // (lua_State *L, lua_CFunction f) + | call aword [DISPATCH+DISPATCH_GL(wrapf)] + } + | // nresults returned in eax (RD). + | mov BASE, L:RB->base + | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB + | set_vmstate INTERP + | lea RA, [BASE+RD*8] + | neg RA + | add RA, L:RB->top // RA = (L->top-(L->base+nresults))*8 + | mov PC, [BASE-4] // Fetch PC of caller. + | jmp ->vm_returnc + break; + + /* ---------------------------------------------------------------------- */ + + default: + fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]); + exit(2); + break; + } +} + +static int build_backend(BuildCtx *ctx) +{ + int op; + dasm_growpc(Dst, BC__MAX); + build_subroutines(ctx); + |.code_op + for (op = 0; op < BC__MAX; op++) + build_ins(ctx, (BCOp)op, op); + return BC__MAX; +} + +/* Emit pseudo frame-info for all assembler functions. */ +static void emit_asm_debug(BuildCtx *ctx) +{ + int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code); +#if LJ_64 +#define SZPTR "8" +#define BSZPTR "3" +#define REG_SP "0x7" +#define REG_RA "0x10" +#else +#define SZPTR "4" +#define BSZPTR "2" +#define REG_SP "0x4" +#define REG_RA "0x8" +#endif + switch (ctx->mode) { + case BUILD_elfasm: + fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n"); + fprintf(ctx->fp, + ".Lframe0:\n" + "\t.long .LECIE0-.LSCIE0\n" + ".LSCIE0:\n" + "\t.long 0xffffffff\n" + "\t.byte 0x1\n" + "\t.string \"\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -" SZPTR "\n" + "\t.byte " REG_RA "\n" + "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n" + "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n" + "\t.align " SZPTR "\n" + ".LECIE0:\n\n"); + fprintf(ctx->fp, + ".LSFDE0:\n" + "\t.long .LEFDE0-.LASFDE0\n" + ".LASFDE0:\n" + "\t.long .Lframe0\n" +#if LJ_64 + "\t.quad .Lbegin\n" + "\t.quad %d\n" + "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ + "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ + "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ + "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */ + "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */ +#if LJ_NO_UNWIND + "\t.byte 0x8d\n\t.uleb128 0x6\n" /* offset r13 */ + "\t.byte 0x8c\n\t.uleb128 0x7\n" /* offset r12 */ +#endif +#else + "\t.long .Lbegin\n" + "\t.long %d\n" + "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ + "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */ + "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */ + "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */ + "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */ +#endif + "\t.align " SZPTR "\n" + ".LEFDE0:\n\n", fcofs, CFRAME_SIZE); +#if LJ_HASFFI + fprintf(ctx->fp, + ".LSFDE1:\n" + "\t.long .LEFDE1-.LASFDE1\n" + ".LASFDE1:\n" + "\t.long .Lframe0\n" +#if LJ_64 + "\t.quad lj_vm_ffi_call\n" + "\t.quad %d\n" + "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */ + "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ + "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */ + "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ +#else + "\t.long lj_vm_ffi_call\n" + "\t.long %d\n" + "\t.byte 0xe\n\t.uleb128 8\n" /* def_cfa_offset */ + "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */ + "\t.byte 0xd\n\t.uleb128 0x5\n" /* def_cfa_register ebp */ + "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset ebx */ +#endif + "\t.align " SZPTR "\n" + ".LEFDE1:\n\n", (int)ctx->codesz - fcofs); +#endif +#if !LJ_NO_UNWIND +#if (defined(__sun__) && defined(__svr4__)) +#if LJ_64 + fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@unwind\n"); +#else + fprintf(ctx->fp, "\t.section .eh_frame,\"aw\",@progbits\n"); +#endif +#else + fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n"); +#endif + fprintf(ctx->fp, + ".Lframe1:\n" + "\t.long .LECIE1-.LSCIE1\n" + ".LSCIE1:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.string \"zPR\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -" SZPTR "\n" + "\t.byte " REG_RA "\n" + "\t.uleb128 6\n" /* augmentation length */ + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.long lj_err_unwind_dwarf-.\n" + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n" + "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n" + "\t.align " SZPTR "\n" + ".LECIE1:\n\n"); + fprintf(ctx->fp, + ".LSFDE2:\n" + "\t.long .LEFDE2-.LASFDE2\n" + ".LASFDE2:\n" + "\t.long .LASFDE2-.Lframe1\n" + "\t.long .Lbegin-.\n" + "\t.long %d\n" + "\t.uleb128 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ +#if LJ_64 + "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ + "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ + "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */ + "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */ +#else + "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */ + "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */ + "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */ + "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */ +#endif + "\t.align " SZPTR "\n" + ".LEFDE2:\n\n", fcofs, CFRAME_SIZE); +#if LJ_HASFFI + fprintf(ctx->fp, + ".Lframe2:\n" + "\t.long .LECIE2-.LSCIE2\n" + ".LSCIE2:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.string \"zR\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -" SZPTR "\n" + "\t.byte " REG_RA "\n" + "\t.uleb128 1\n" /* augmentation length */ + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n" + "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n" + "\t.align " SZPTR "\n" + ".LECIE2:\n\n"); + fprintf(ctx->fp, + ".LSFDE3:\n" + "\t.long .LEFDE3-.LASFDE3\n" + ".LASFDE3:\n" + "\t.long .LASFDE3-.Lframe2\n" + "\t.long lj_vm_ffi_call-.\n" + "\t.long %d\n" + "\t.uleb128 0\n" /* augmentation length */ +#if LJ_64 + "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */ + "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ + "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */ + "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ +#else + "\t.byte 0xe\n\t.uleb128 8\n" /* def_cfa_offset */ + "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */ + "\t.byte 0xd\n\t.uleb128 0x5\n" /* def_cfa_register ebp */ + "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset ebx */ +#endif + "\t.align " SZPTR "\n" + ".LEFDE3:\n\n", (int)ctx->codesz - fcofs); +#endif +#endif + break; +#if !LJ_NO_UNWIND + /* Mental note: never let Apple design an assembler. + ** Or a linker. Or a plastic case. But I digress. + */ + case BUILD_machasm: { +#if LJ_HASFFI + int fcsize = 0; +#endif + int i; + fprintf(ctx->fp, "\t.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support\n"); + fprintf(ctx->fp, + "EH_frame1:\n" + "\t.set L$set$x,LECIEX-LSCIEX\n" + "\t.long L$set$x\n" + "LSCIEX:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.ascii \"zPR\\0\"\n" + "\t.byte 0x1\n" + "\t.byte 128-" SZPTR "\n" + "\t.byte " REG_RA "\n" + "\t.byte 6\n" /* augmentation length */ + "\t.byte 0x9b\n" /* indirect|pcrel|sdata4 */ +#if LJ_64 + "\t.long _lj_err_unwind_dwarf+4@GOTPCREL\n" + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.byte " REG_SP "\n\t.byte " SZPTR "\n" +#else + "\t.long L_lj_err_unwind_dwarf$non_lazy_ptr-.\n" + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.byte 0x5\n\t.byte 0x4\n" /* esp=5 on 32 bit MACH-O. */ +#endif + "\t.byte 0x80+" REG_RA "\n\t.byte 0x1\n" + "\t.align " BSZPTR "\n" + "LECIEX:\n\n"); + for (i = 0; i < ctx->nsym; i++) { + const char *name = ctx->sym[i].name; + int32_t size = ctx->sym[i+1].ofs - ctx->sym[i].ofs; + if (size == 0) continue; +#if LJ_HASFFI + if (!strcmp(name, "_lj_vm_ffi_call")) { fcsize = size; continue; } +#endif + fprintf(ctx->fp, + "%s.eh:\n" + "LSFDE%d:\n" + "\t.set L$set$%d,LEFDE%d-LASFDE%d\n" + "\t.long L$set$%d\n" + "LASFDE%d:\n" + "\t.long LASFDE%d-EH_frame1\n" + "\t.long %s-.\n" + "\t.long %d\n" + "\t.byte 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.byte %d\n" /* def_cfa_offset */ +#if LJ_64 + "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */ + "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */ + "\t.byte 0x8f\n\t.byte 0x4\n" /* offset r15 */ + "\t.byte 0x8e\n\t.byte 0x5\n" /* offset r14 */ +#else + "\t.byte 0x84\n\t.byte 0x2\n" /* offset ebp (4 for MACH-O)*/ + "\t.byte 0x87\n\t.byte 0x3\n" /* offset edi */ + "\t.byte 0x86\n\t.byte 0x4\n" /* offset esi */ + "\t.byte 0x83\n\t.byte 0x5\n" /* offset ebx */ +#endif + "\t.align " BSZPTR "\n" + "LEFDE%d:\n\n", + name, i, i, i, i, i, i, i, name, size, CFRAME_SIZE, i); + } +#if LJ_HASFFI + if (fcsize) { + fprintf(ctx->fp, + "EH_frame2:\n" + "\t.set L$set$y,LECIEY-LSCIEY\n" + "\t.long L$set$y\n" + "LSCIEY:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.ascii \"zR\\0\"\n" + "\t.byte 0x1\n" + "\t.byte 128-" SZPTR "\n" + "\t.byte " REG_RA "\n" + "\t.byte 1\n" /* augmentation length */ +#if LJ_64 + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.byte " REG_SP "\n\t.byte " SZPTR "\n" +#else + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.byte 0x5\n\t.byte 0x4\n" /* esp=5 on 32 bit MACH. */ +#endif + "\t.byte 0x80+" REG_RA "\n\t.byte 0x1\n" + "\t.align " BSZPTR "\n" + "LECIEY:\n\n"); + fprintf(ctx->fp, + "_lj_vm_ffi_call.eh:\n" + "LSFDEY:\n" + "\t.set L$set$yy,LEFDEY-LASFDEY\n" + "\t.long L$set$yy\n" + "LASFDEY:\n" + "\t.long LASFDEY-EH_frame2\n" + "\t.long _lj_vm_ffi_call-.\n" + "\t.long %d\n" + "\t.byte 0\n" /* augmentation length */ +#if LJ_64 + "\t.byte 0xe\n\t.byte 16\n" /* def_cfa_offset */ + "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */ + "\t.byte 0xd\n\t.byte 0x6\n" /* def_cfa_register rbp */ + "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */ +#else + "\t.byte 0xe\n\t.byte 8\n" /* def_cfa_offset */ + "\t.byte 0x84\n\t.byte 0x2\n" /* offset ebp (4 for MACH-O)*/ + "\t.byte 0xd\n\t.byte 0x4\n" /* def_cfa_register ebp */ + "\t.byte 0x83\n\t.byte 0x3\n" /* offset ebx */ +#endif + "\t.align " BSZPTR "\n" + "LEFDEY:\n\n", fcsize); + } +#endif +#if !LJ_64 + fprintf(ctx->fp, + "\t.non_lazy_symbol_pointer\n" + "L_lj_err_unwind_dwarf$non_lazy_ptr:\n" + ".indirect_symbol _lj_err_unwind_dwarf\n" + ".long 0\n\n"); + fprintf(ctx->fp, "\t.section __IMPORT,__jump_table,symbol_stubs,pure_instructions+self_modifying_code,5\n"); + { + const char *const *xn; + for (xn = ctx->extnames; *xn; xn++) + if (strncmp(*xn, LABEL_PREFIX, sizeof(LABEL_PREFIX)-1)) + fprintf(ctx->fp, "L_%s$stub:\n\t.indirect_symbol _%s\n\t.ascii \"\\364\\364\\364\\364\\364\"\n", *xn, *xn); + } +#endif + fprintf(ctx->fp, ".subsections_via_symbols\n"); + } + break; +#endif + default: /* Difficult for other modes. */ + break; + } +} + diff --git a/Build/source/libs/luajit/LuaJIT-src/src/xb1build.bat b/Build/source/libs/luajit/LuaJIT-src/src/xb1build.bat new file mode 100644 index 00000000000..847e84a5557 --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/xb1build.bat @@ -0,0 +1,101 @@ +@rem Script to build LuaJIT with the Xbox One SDK. +@rem Donated to the public domain. +@rem +@rem Open a "Visual Studio .NET Command Prompt" (64 bit host compiler) +@rem Then cd to this directory and run this script. + +@if not defined INCLUDE goto :FAIL +@if not defined DurangoXDK goto :FAIL + +@setlocal +@echo ---- Host compiler ---- +@set LJCOMPILE=cl /nologo /c /MD /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE /DLUAJIT_ENABLE_GC64 +@set LJLINK=link /nologo +@set LJMT=mt /nologo +@set DASMDIR=..\dynasm +@set DASM=%DASMDIR%\dynasm.lua +@set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c + +%LJCOMPILE% host\minilua.c +@if errorlevel 1 goto :BAD +%LJLINK% /out:minilua.exe minilua.obj +@if errorlevel 1 goto :BAD +if exist minilua.exe.manifest^ + %LJMT% -manifest minilua.exe.manifest -outputresource:minilua.exe + +@rem Error out for 64 bit host compiler +@minilua +@if not errorlevel 8 goto :FAIL + +@set DASMFLAGS=-D WIN -D FFI -D P64 +minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h vm_x64.dasc +@if errorlevel 1 goto :BAD + +%LJCOMPILE% /I "." /I %DASMDIR% /D_DURANGO host\buildvm*.c +@if errorlevel 1 goto :BAD +%LJLINK% /out:buildvm.exe buildvm*.obj +@if errorlevel 1 goto :BAD +if exist buildvm.exe.manifest^ + %LJMT% -manifest buildvm.exe.manifest -outputresource:buildvm.exe + +buildvm -m peobj -o lj_vm.obj +@if errorlevel 1 goto :BAD +buildvm -m bcdef -o lj_bcdef.h %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m ffdef -o lj_ffdef.h %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m libdef -o lj_libdef.h %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m recdef -o lj_recdef.h %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m vmdef -o jit\vmdef.lua %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m folddef -o lj_folddef.h lj_opt_fold.c +@if errorlevel 1 goto :BAD + +@echo ---- Cross compiler ---- + +@set CWD=%cd% +@call "%DurangoXDK%\xdk\DurangoVars.cmd" XDK +@cd /D "%CWD%" +@shift + +@set LJCOMPILE="cl" /nologo /c /W3 /GF /Gm- /GR- /GS- /Gy /openmp- /D_CRT_SECURE_NO_DEPRECATE /D_LIB /D_UNICODE /D_DURANGO +@set LJLIB="lib" /nologo + +@if "%1"=="debug" ( + @shift + @set LJCOMPILE=%LJCOMPILE% /Zi /MDd /Od + @set LJLINK=%LJLINK% /debug +) else ( + @set LJCOMPILE=%LJCOMPILE% /MD /O2 /DNDEBUG +) + +@if "%1"=="amalg" goto :AMALG +%LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c +@if errorlevel 1 goto :BAD +%LJLIB% /OUT:luajit.lib lj_*.obj lib_*.obj +@if errorlevel 1 goto :BAD +@goto :NOAMALG +:AMALG +%LJCOMPILE% /DLUA_BUILD_AS_DLL ljamalg.c +@if errorlevel 1 goto :BAD +%LJLIB% /OUT:luajit.lib ljamalg.obj lj_vm.obj +@if errorlevel 1 goto :BAD +:NOAMALG + +@del *.obj *.manifest minilua.exe buildvm.exe +@echo. +@echo === Successfully built LuaJIT for Xbox One === + +@goto :END +:BAD +@echo. +@echo ******************************************************* +@echo *** Build FAILED -- Please check the error messages *** +@echo ******************************************************* +@goto :END +:FAIL +@echo To run this script you must open a "Visual Studio .NET Command Prompt" +@echo (64 bit host compiler). The Xbox One SDK must be installed, too. +:END diff --git a/Build/source/libs/luajit/LuaJIT-src/src/xedkbuild.bat b/Build/source/libs/luajit/LuaJIT-src/src/xedkbuild.bat new file mode 100644 index 00000000000..240ec878daf --- /dev/null +++ b/Build/source/libs/luajit/LuaJIT-src/src/xedkbuild.bat @@ -0,0 +1,92 @@ +@rem Script to build LuaJIT with the Xbox 360 SDK. +@rem Donated to the public domain. +@rem +@rem Open a "Visual Studio .NET Command Prompt" (32 bit host compiler) +@rem Then cd to this directory and run this script. + +@if not defined INCLUDE goto :FAIL +@if not defined XEDK goto :FAIL + +@setlocal +@rem ---- Host compiler ---- +@set LJCOMPILE=cl /nologo /c /MD /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE +@set LJLINK=link /nologo +@set LJMT=mt /nologo +@set DASMDIR=..\dynasm +@set DASM=%DASMDIR%\dynasm.lua +@set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c + +%LJCOMPILE% host\minilua.c +@if errorlevel 1 goto :BAD +%LJLINK% /out:minilua.exe minilua.obj +@if errorlevel 1 goto :BAD +if exist minilua.exe.manifest^ + %LJMT% -manifest minilua.exe.manifest -outputresource:minilua.exe + +@rem Error out for 64 bit host compiler +@minilua +@if errorlevel 8 goto :FAIL + +@set DASMFLAGS=-D GPR64 -D FRAME32 -D PPE -D SQRT -D DUALNUM +minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h vm_ppc.dasc +@if errorlevel 1 goto :BAD + +%LJCOMPILE% /I "." /I %DASMDIR% /D_XBOX_VER=200 /DLUAJIT_TARGET=LUAJIT_ARCH_PPC host\buildvm*.c +@if errorlevel 1 goto :BAD +%LJLINK% /out:buildvm.exe buildvm*.obj +@if errorlevel 1 goto :BAD +if exist buildvm.exe.manifest^ + %LJMT% -manifest buildvm.exe.manifest -outputresource:buildvm.exe + +buildvm -m peobj -o lj_vm.obj +@if errorlevel 1 goto :BAD +buildvm -m bcdef -o lj_bcdef.h %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m ffdef -o lj_ffdef.h %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m libdef -o lj_libdef.h %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m recdef -o lj_recdef.h %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m vmdef -o jit\vmdef.lua %ALL_LIB% +@if errorlevel 1 goto :BAD +buildvm -m folddef -o lj_folddef.h lj_opt_fold.c +@if errorlevel 1 goto :BAD + +@rem ---- Cross compiler ---- +@set LJCOMPILE="%XEDK%\bin\win32\cl" /nologo /c /MT /O2 /W3 /GF /Gm- /GR- /GS- /Gy /openmp- /D_CRT_SECURE_NO_DEPRECATE /DNDEBUG /D_XBOX /D_LIB /DLUAJIT_USE_SYSMALLOC +@set LJLIB="%XEDK%\bin\win32\lib" /nologo +@set "INCLUDE=%XEDK%\include\xbox" + +@if "%1" neq "debug" goto :NODEBUG +@shift +@set "LJCOMPILE=%LJCOMPILE% /Zi" +:NODEBUG +@if "%1"=="amalg" goto :AMALG +%LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c +@if errorlevel 1 goto :BAD +%LJLIB% /OUT:luajit20.lib lj_*.obj lib_*.obj +@if errorlevel 1 goto :BAD +@goto :NOAMALG +:AMALG +%LJCOMPILE% /DLUA_BUILD_AS_DLL ljamalg.c +@if errorlevel 1 goto :BAD +%LJLIB% /OUT:luajit20.lib ljamalg.obj lj_vm.obj +@if errorlevel 1 goto :BAD +:NOAMALG + +@del *.obj *.manifest minilua.exe buildvm.exe +@echo. +@echo === Successfully built LuaJIT for Xbox 360 === + +@goto :END +:BAD +@echo. +@echo ******************************************************* +@echo *** Build FAILED -- Please check the error messages *** +@echo ******************************************************* +@goto :END +:FAIL +@echo To run this script you must open a "Visual Studio .NET Command Prompt" +@echo (32 bit host compiler). The Xbox 360 SDK must be installed, too. +:END diff --git a/Build/source/libs/luajit/Makefile.am b/Build/source/libs/luajit/Makefile.am index fcf39f6a4a3..2d62f47ad53 100644 --- a/Build/source/libs/luajit/Makefile.am +++ b/Build/source/libs/luajit/Makefile.am @@ -10,7 +10,7 @@ EXTRA_DIST = $(LUAJIT_TREE) ## Changes applied to the original source tree ## -EXTRA_DIST += $(LUAJIT_TREE)-PATCHES +EXTRA_DIST += LuaJIT-PATCHES # Files not to be distributed include $(srcdir)/../../am/dist_hook.am diff --git a/Build/source/libs/luajit/Makefile.in b/Build/source/libs/luajit/Makefile.in index a350a890298..96a68b98406 100644 --- a/Build/source/libs/luajit/Makefile.in +++ b/Build/source/libs/luajit/Makefile.in @@ -668,7 +668,7 @@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ -EXTRA_DIST = $(LUAJIT_TREE) $(LUAJIT_TREE)-PATCHES jiterr.exp +EXTRA_DIST = $(LUAJIT_TREE) LuaJIT-PATCHES jiterr.exp NEVER_DIST = `find . $(NEVER_NAMES)` # Files not to be distributed diff --git a/Build/source/libs/luajit/configure b/Build/source/libs/luajit/configure index 0ab857b3470..37ea9fb1708 100755 --- a/Build/source/libs/luajit/configure +++ b/Build/source/libs/luajit/configure @@ -595,7 +595,7 @@ PACKAGE_STRING='luajit for TeX Live 2.1.0-beta1' PACKAGE_BUGREPORT='tex-k@tug.org' PACKAGE_URL='' -ac_unique_file="LuaJIT-2.1.0-beta1/src/luajit.h" +ac_unique_file="LuaJIT-src/src/luajit.h" # Factoring default headers for most tests. ac_includes_default="\ #include @@ -14134,7 +14134,7 @@ case $host_os in #( LJHOST='Other' ;; esac lj_save_CPPFLAGS=$CPPFLAGS -CPPFLAGS="$CPPFLAGS -I$srcdir/LuaJIT-2.1.0-beta1/src $LUAJIT_DEFINES -dM" +CPPFLAGS="$CPPFLAGS -I$srcdir/LuaJIT-src/src $LUAJIT_DEFINES -dM" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include @@ -14330,7 +14330,7 @@ fi echo timestamp >config.force fi -LUAJIT_TREE=LuaJIT-2.1.0-beta1 +LUAJIT_TREE=LuaJIT-src ac_config_headers="$ac_config_headers config.h" diff --git a/Build/source/libs/luajit/configure.ac b/Build/source/libs/luajit/configure.ac index d56af825822..68efbddbb34 100644 --- a/Build/source/libs/luajit/configure.ac +++ b/Build/source/libs/luajit/configure.ac @@ -9,7 +9,7 @@ dnl m4_include([version.ac])[] dnl define luajit_version AC_INIT([luajit for TeX Live], luajit_version, [tex-k@tug.org]) AC_PREREQ([2.65]) -AC_CONFIG_SRCDIR([LuaJIT-]luajit_version[/src/luajit.h]) +AC_CONFIG_SRCDIR([LuaJIT-src/src/luajit.h]) AC_CONFIG_AUX_DIR([../../build-aux]) AC_CONFIG_MACRO_DIRS([../../m4 m4]) @@ -76,7 +76,7 @@ AS_IF([test "x$lj_cpp" != xok], echo timestamp >config.force fi -AC_SUBST([LUAJIT_TREE], [LuaJIT-]luajit_version) +AC_SUBST([LUAJIT_TREE], [LuaJIT-src]) dnl Not used but avoids putting a zillion useless -D's on the command line. AC_CONFIG_HEADERS([config.h]) diff --git a/Build/source/libs/luajit/native/configure b/Build/source/libs/luajit/native/configure index 6c8da53c2ab..82f80de379e 100755 --- a/Build/source/libs/luajit/native/configure +++ b/Build/source/libs/luajit/native/configure @@ -585,7 +585,7 @@ PACKAGE_STRING='luajit native 2.1.0-beta1' PACKAGE_BUGREPORT='tex-k@tug.org' PACKAGE_URL='' -ac_unique_file="../LuaJIT-2.1.0-beta1/src/luajit.h" +ac_unique_file="../LuaJIT-src/src/luajit.h" # Factoring default headers for most tests. ac_includes_default="\ #include @@ -5304,7 +5304,7 @@ esac -LUAJIT_TREE=../LuaJIT-2.1.0-beta1 +LUAJIT_TREE=../LuaJIT-src ac_config_headers="$ac_config_headers config.h" diff --git a/Build/source/libs/luajit/native/configure.ac b/Build/source/libs/luajit/native/configure.ac index 773c4ec6f63..ebb6d324dca 100644 --- a/Build/source/libs/luajit/native/configure.ac +++ b/Build/source/libs/luajit/native/configure.ac @@ -9,7 +9,7 @@ dnl m4_include([../version.ac])[] dnl define luajit_version AC_INIT([luajit native], luajit_version, [tex-k@tug.org]) AC_PREREQ([2.63]) -AC_CONFIG_SRCDIR([../LuaJIT-]luajit_version[/src/luajit.h]) +AC_CONFIG_SRCDIR([../LuaJIT-src/src/luajit.h]) AC_CONFIG_AUX_DIR([../../../build-aux]) AC_CONFIG_MACRO_DIR([../../../m4]) @@ -31,7 +31,7 @@ KPSE_SEARCH_LIBS([MATH_LIB], [pow], [m]) AC_SUBST([DASM_ARCH]) -AC_SUBST([LUAJIT_TREE], [../LuaJIT-]luajit_version) +AC_SUBST([LUAJIT_TREE], [../LuaJIT-src]) dnl Not used but avoids putting a zillion useless -D's on the command line. AC_CONFIG_HEADERS([config.h]) -- cgit v1.2.3